1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifdef __DragonFly__ 31 #include "opt_drm.h" /* for VGA_SWITCHEROO */ 32 #endif 33 34 #include <linux/acpi.h> 35 #include <linux/device.h> 36 #include <linux/oom.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/pm.h> 40 #include <linux/pm_runtime.h> 41 #include <linux/pnp.h> 42 #include <linux/slab.h> 43 #include <linux/vgaarb.h> 44 #include <linux/vga_switcheroo.h> 45 #include <linux/vt.h> 46 #include <acpi/video.h> 47 48 #include <drm/drmP.h> 49 #include <drm/drm_crtc_helper.h> 50 #include <drm/i915_drm.h> 51 52 #include "i915_drv.h" 53 #include "i915_trace.h" 54 #include "i915_vgpu.h" 55 #include "intel_drv.h" 56 57 static struct drm_driver driver; 58 59 static unsigned int i915_load_fail_count; 60 61 bool __i915_inject_load_failure(const char *func, int line) 62 { 63 if (i915_load_fail_count >= i915.inject_load_failure) 64 return false; 65 66 if (++i915_load_fail_count == i915.inject_load_failure) { 67 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", 68 i915.inject_load_failure, func, line); 69 return true; 70 } 71 72 return false; 73 } 74 75 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" 76 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ 77 "providing the dmesg log by booting with drm.debug=0xf" 78 79 void 80 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 81 const char *fmt, ...) 82 { 83 static bool shown_bug_once; 84 struct device *kdev = dev_priv->drm.dev; 85 bool is_error = level[1] <= KERN_ERR[1]; 86 bool is_debug = level[1] == KERN_DEBUG[1]; 87 struct va_format vaf; 88 va_list args; 89 90 if (is_debug && !(drm_debug & DRM_UT_DRIVER)) 91 return; 92 93 va_start(args, fmt); 94 95 vaf.fmt = fmt; 96 vaf.va = &args; 97 98 dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", 99 __builtin_return_address(0), &vaf); 100 101 if (is_error && !shown_bug_once) { 102 dev_notice(kdev, "%s", FDO_BUG_MSG); 103 shown_bug_once = true; 104 } 105 106 va_end(args); 107 } 108 109 static bool i915_error_injected(struct drm_i915_private *dev_priv) 110 { 111 return i915.inject_load_failure && 112 i915_load_fail_count == i915.inject_load_failure; 113 } 114 115 #define i915_load_error(dev_priv, fmt, ...) \ 116 __i915_printk(dev_priv, \ 117 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ 118 fmt, ##__VA_ARGS__) 119 120 121 static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) 122 { 123 enum intel_pch ret = PCH_NOP; 124 125 /* 126 * In a virtualized passthrough environment we can be in a 127 * setup where the ISA bridge is not able to be passed through. 128 * In this case, a south bridge can be emulated and we have to 129 * make an educated guess as to which PCH is really there. 130 */ 131 132 if (IS_GEN5(dev_priv)) { 133 ret = PCH_IBX; 134 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 135 } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { 136 ret = PCH_CPT; 137 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); 138 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 139 ret = PCH_LPT; 140 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 141 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 142 ret = PCH_SPT; 143 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 144 } 145 146 return ret; 147 } 148 149 static void intel_detect_pch(struct drm_device *dev) 150 { 151 struct drm_i915_private *dev_priv = to_i915(dev); 152 device_t pch = NULL; 153 struct pci_devinfo *di = NULL; 154 155 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 156 * (which really amounts to a PCH but no South Display). 157 */ 158 if (INTEL_INFO(dev)->num_pipes == 0) { 159 dev_priv->pch_type = PCH_NOP; 160 return; 161 } 162 163 /* XXX The ISA bridge probe causes some old Core2 machines to hang */ 164 if (INTEL_INFO(dev)->gen < 5) 165 return; 166 167 /* 168 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 169 * make graphics device passthrough work easy for VMM, that only 170 * need to expose ISA bridge to let driver know the real hardware 171 * underneath. This is a requirement from virtualization team. 172 * 173 * In some virtualized environments (e.g. XEN), there is irrelevant 174 * ISA bridge in the system. To work reliably, we should scan trhough 175 * all the ISA bridge devices and check for the first match, instead 176 * of only checking the first one. 177 */ 178 while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) { 179 if (pci_get_vendor(pch) == PCI_VENDOR_ID_INTEL) { 180 unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; 181 dev_priv->pch_id = id; 182 183 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 184 dev_priv->pch_type = PCH_IBX; 185 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 186 WARN_ON(!IS_GEN5(dev_priv)); 187 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 188 dev_priv->pch_type = PCH_CPT; 189 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 190 WARN_ON(!(IS_GEN6(dev_priv) || 191 IS_IVYBRIDGE(dev_priv))); 192 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 193 /* PantherPoint is CPT compatible */ 194 dev_priv->pch_type = PCH_CPT; 195 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 196 WARN_ON(!(IS_GEN6(dev_priv) || 197 IS_IVYBRIDGE(dev_priv))); 198 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 199 dev_priv->pch_type = PCH_LPT; 200 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 201 WARN_ON(!IS_HASWELL(dev_priv) && 202 !IS_BROADWELL(dev_priv)); 203 WARN_ON(IS_HSW_ULT(dev_priv) || 204 IS_BDW_ULT(dev_priv)); 205 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 206 dev_priv->pch_type = PCH_LPT; 207 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 208 WARN_ON(!IS_HASWELL(dev_priv) && 209 !IS_BROADWELL(dev_priv)); 210 WARN_ON(!IS_HSW_ULT(dev_priv) && 211 !IS_BDW_ULT(dev_priv)); 212 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 213 dev_priv->pch_type = PCH_SPT; 214 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 215 WARN_ON(!IS_SKYLAKE(dev_priv) && 216 !IS_KABYLAKE(dev_priv)); 217 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 218 dev_priv->pch_type = PCH_SPT; 219 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 220 WARN_ON(!IS_SKYLAKE(dev_priv) && 221 !IS_KABYLAKE(dev_priv)); 222 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 223 dev_priv->pch_type = PCH_KBP; 224 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 225 WARN_ON(!IS_KABYLAKE(dev_priv)); 226 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 227 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 228 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 229 1)) { 230 dev_priv->pch_type = 231 intel_virt_detect_pch(dev_priv); 232 } else 233 continue; 234 235 break; 236 } 237 } 238 if (!pch) 239 DRM_DEBUG_KMS("No PCH found.\n"); 240 241 #if 0 242 pci_dev_put(pch); 243 #endif 244 } 245 246 static int i915_getparam(struct drm_device *dev, void *data, 247 struct drm_file *file_priv) 248 { 249 struct drm_i915_private *dev_priv = to_i915(dev); 250 struct pci_dev *pdev = dev_priv->drm.pdev; 251 drm_i915_getparam_t *param = data; 252 int value; 253 254 switch (param->param) { 255 case I915_PARAM_IRQ_ACTIVE: 256 case I915_PARAM_ALLOW_BATCHBUFFER: 257 case I915_PARAM_LAST_DISPATCH: 258 /* Reject all old ums/dri params. */ 259 return -ENODEV; 260 case I915_PARAM_CHIPSET_ID: 261 value = pdev->device; 262 break; 263 case I915_PARAM_REVISION: 264 value = pdev->revision; 265 break; 266 case I915_PARAM_NUM_FENCES_AVAIL: 267 value = dev_priv->num_fence_regs; 268 break; 269 case I915_PARAM_HAS_OVERLAY: 270 value = dev_priv->overlay ? 1 : 0; 271 break; 272 case I915_PARAM_HAS_BSD: 273 value = !!dev_priv->engine[VCS]; 274 break; 275 case I915_PARAM_HAS_BLT: 276 value = !!dev_priv->engine[BCS]; 277 break; 278 case I915_PARAM_HAS_VEBOX: 279 value = !!dev_priv->engine[VECS]; 280 break; 281 case I915_PARAM_HAS_BSD2: 282 value = !!dev_priv->engine[VCS2]; 283 break; 284 case I915_PARAM_HAS_EXEC_CONSTANTS: 285 value = INTEL_GEN(dev_priv) >= 4; 286 break; 287 case I915_PARAM_HAS_LLC: 288 value = HAS_LLC(dev_priv); 289 break; 290 case I915_PARAM_HAS_WT: 291 value = HAS_WT(dev_priv); 292 break; 293 case I915_PARAM_HAS_ALIASING_PPGTT: 294 value = USES_PPGTT(dev_priv); 295 break; 296 case I915_PARAM_HAS_SEMAPHORES: 297 value = i915.semaphores; 298 break; 299 #if 0 300 case I915_PARAM_HAS_SECURE_BATCHES: 301 value = capable(CAP_SYS_ADMIN); 302 break; 303 #endif 304 case I915_PARAM_CMD_PARSER_VERSION: 305 value = i915_cmd_parser_get_version(dev_priv); 306 break; 307 case I915_PARAM_SUBSLICE_TOTAL: 308 value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu); 309 if (!value) 310 return -ENODEV; 311 break; 312 case I915_PARAM_EU_TOTAL: 313 value = INTEL_INFO(dev_priv)->sseu.eu_total; 314 if (!value) 315 return -ENODEV; 316 break; 317 case I915_PARAM_HAS_GPU_RESET: 318 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); 319 break; 320 case I915_PARAM_HAS_RESOURCE_STREAMER: 321 value = HAS_RESOURCE_STREAMER(dev_priv); 322 break; 323 case I915_PARAM_HAS_POOLED_EU: 324 value = HAS_POOLED_EU(dev_priv); 325 break; 326 case I915_PARAM_MIN_EU_IN_POOL: 327 value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool; 328 break; 329 case I915_PARAM_MMAP_GTT_VERSION: 330 /* Though we've started our numbering from 1, and so class all 331 * earlier versions as 0, in effect their value is undefined as 332 * the ioctl will report EINVAL for the unknown param! 333 */ 334 value = i915_gem_mmap_gtt_version(); 335 break; 336 #if 0 337 case I915_PARAM_MMAP_VERSION: 338 /* Remember to bump this if the version changes! */ 339 #endif 340 case I915_PARAM_HAS_GEM: 341 case I915_PARAM_HAS_PAGEFLIPPING: 342 case I915_PARAM_HAS_EXECBUF2: /* depends on GEM */ 343 case I915_PARAM_HAS_RELAXED_FENCING: 344 case I915_PARAM_HAS_COHERENT_RINGS: 345 case I915_PARAM_HAS_RELAXED_DELTA: 346 case I915_PARAM_HAS_GEN7_SOL_RESET: 347 case I915_PARAM_HAS_WAIT_TIMEOUT: 348 #if 0 349 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 350 #endif 351 case I915_PARAM_HAS_PINNED_BATCHES: 352 case I915_PARAM_HAS_EXEC_NO_RELOC: 353 case I915_PARAM_HAS_EXEC_HANDLE_LUT: 354 case I915_PARAM_HAS_COHERENT_PHYS_GTT: 355 case I915_PARAM_HAS_EXEC_SOFTPIN: 356 /* For the time being all of these are always true; 357 * if some supported hardware does not have one of these 358 * features this value needs to be provided from 359 * INTEL_INFO(), a feature macro, or similar. 360 */ 361 value = 1; 362 break; 363 default: 364 DRM_DEBUG("Unknown parameter %d\n", param->param); 365 return -EINVAL; 366 } 367 368 if (put_user(value, param->value)) 369 return -EFAULT; 370 371 return 0; 372 } 373 374 static int i915_get_bridge_dev(struct drm_device *dev) 375 { 376 struct drm_i915_private *dev_priv = to_i915(dev); 377 static struct pci_dev i915_bridge_dev; 378 379 i915_bridge_dev.dev.bsddev = pci_find_dbsf(0, 0, 0, 0); 380 if (!i915_bridge_dev.dev.bsddev) { 381 DRM_ERROR("bridge device not found\n"); 382 return -1; 383 } 384 385 dev_priv->bridge_dev = &i915_bridge_dev; 386 return 0; 387 } 388 389 /* Allocate space for the MCH regs if needed, return nonzero on error */ 390 static int 391 intel_alloc_mchbar_resource(struct drm_device *dev) 392 { 393 struct drm_i915_private *dev_priv = to_i915(dev); 394 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 395 u32 temp_lo, temp_hi = 0; 396 u64 mchbar_addr; 397 device_t vga; 398 399 if (INTEL_INFO(dev)->gen >= 4) 400 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 401 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 402 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 403 404 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 405 #ifdef CONFIG_PNP 406 if (mchbar_addr && 407 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 408 return 0; 409 #endif 410 411 /* Get some space for it */ 412 vga = device_get_parent(dev->dev->bsddev); 413 dev_priv->mch_res_rid = 0x100; 414 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), 415 dev->dev->bsddev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL, 416 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1); 417 if (dev_priv->mch_res == NULL) { 418 DRM_ERROR("failed mchbar resource alloc\n"); 419 return (-ENOMEM); 420 } 421 422 if (INTEL_INFO(dev)->gen >= 4) 423 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 424 upper_32_bits(rman_get_start(dev_priv->mch_res))); 425 426 pci_write_config_dword(dev_priv->bridge_dev, reg, 427 lower_32_bits(rman_get_start(dev_priv->mch_res))); 428 return 0; 429 } 430 431 /* Setup MCHBAR if possible, return true if we should disable it again */ 432 static void 433 intel_setup_mchbar(struct drm_device *dev) 434 { 435 struct drm_i915_private *dev_priv = to_i915(dev); 436 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 437 u32 temp; 438 bool enabled; 439 440 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 441 return; 442 443 dev_priv->mchbar_need_disable = false; 444 445 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 446 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); 447 enabled = !!(temp & DEVEN_MCHBAR_EN); 448 } else { 449 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 450 enabled = temp & 1; 451 } 452 453 /* If it's already enabled, don't have to do anything */ 454 if (enabled) 455 return; 456 457 if (intel_alloc_mchbar_resource(dev)) 458 return; 459 460 dev_priv->mchbar_need_disable = true; 461 462 /* Space is allocated or reserved, so enable it. */ 463 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 464 pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 465 temp | DEVEN_MCHBAR_EN); 466 } else { 467 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 468 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 469 } 470 } 471 472 static void 473 intel_teardown_mchbar(struct drm_device *dev) 474 { 475 struct drm_i915_private *dev_priv = to_i915(dev); 476 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 477 device_t vga; 478 479 if (dev_priv->mchbar_need_disable) { 480 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) { 481 u32 deven_val; 482 483 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, 484 &deven_val); 485 deven_val &= ~DEVEN_MCHBAR_EN; 486 pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 487 deven_val); 488 } else { 489 u32 mchbar_val; 490 491 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, 492 &mchbar_val); 493 mchbar_val &= ~1; 494 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, 495 mchbar_val); 496 } 497 } 498 499 if (dev_priv->mch_res != NULL) { 500 vga = device_get_parent(dev->dev->bsddev); 501 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev->bsddev, 502 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 503 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev->bsddev, 504 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 505 dev_priv->mch_res = NULL; 506 } 507 } 508 509 #if 0 510 /* true = enable decode, false = disable decoder */ 511 static unsigned int i915_vga_set_decode(void *cookie, bool state) 512 { 513 struct drm_device *dev = cookie; 514 515 intel_modeset_vga_set_state(dev, state); 516 if (state) 517 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 518 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 519 else 520 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 521 } 522 523 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 524 { 525 struct drm_device *dev = pci_get_drvdata(pdev); 526 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 527 528 if (state == VGA_SWITCHEROO_ON) { 529 pr_info("switched on\n"); 530 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 531 /* i915 resume handler doesn't set to D0 */ 532 pci_set_power_state(pdev, PCI_D0); 533 i915_resume_switcheroo(dev); 534 dev->switch_power_state = DRM_SWITCH_POWER_ON; 535 } else { 536 pr_info("switched off\n"); 537 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 538 i915_suspend_switcheroo(dev, pmm); 539 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 540 } 541 } 542 543 static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 544 { 545 struct drm_device *dev = pci_get_drvdata(pdev); 546 547 /* 548 * FIXME: open_count is protected by drm_global_mutex but that would lead to 549 * locking inversion with the driver load path. And the access here is 550 * completely racy anyway. So don't bother with locking for now. 551 */ 552 return dev->open_count == 0; 553 } 554 555 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { 556 .set_gpu_state = i915_switcheroo_set_state, 557 .reprobe = NULL, 558 .can_switch = i915_switcheroo_can_switch, 559 }; 560 #endif 561 562 static void i915_gem_fini(struct drm_device *dev) 563 { 564 mutex_lock(&dev->struct_mutex); 565 i915_gem_cleanup_engines(dev); 566 i915_gem_context_fini(dev); 567 mutex_unlock(&dev->struct_mutex); 568 569 WARN_ON(!list_empty(&to_i915(dev)->context_list)); 570 } 571 572 static int i915_load_modeset_init(struct drm_device *dev) 573 { 574 struct drm_i915_private *dev_priv = to_i915(dev); 575 int ret; 576 577 if (i915_inject_load_failure()) 578 return -ENODEV; 579 580 ret = intel_bios_init(dev_priv); 581 if (ret) 582 DRM_INFO("failed to find VBIOS tables\n"); 583 584 /* If we have > 1 VGA cards, then we need to arbitrate access 585 * to the common VGA resources. 586 * 587 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), 588 * then we do not take part in VGA arbitration and the 589 * vga_client_register() fails with -ENODEV. 590 */ 591 #if 0 592 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 593 if (ret && ret != -ENODEV) 594 goto out; 595 596 intel_register_dsm_handler(); 597 598 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); 599 if (ret) 600 goto cleanup_vga_client; 601 #endif 602 603 /* must happen before intel_power_domains_init_hw() on VLV/CHV */ 604 intel_update_rawclk(dev_priv); 605 606 intel_power_domains_init_hw(dev_priv, false); 607 608 intel_csr_ucode_init(dev_priv); 609 610 ret = intel_irq_install(dev_priv); 611 if (ret) 612 goto cleanup_csr; 613 614 intel_setup_gmbus(dev); 615 616 /* Important: The output setup functions called by modeset_init need 617 * working irqs for e.g. gmbus and dp aux transfers. */ 618 intel_modeset_init(dev); 619 620 intel_guc_init(dev); 621 622 ret = i915_gem_init(dev); 623 if (ret) 624 goto cleanup_irq; 625 626 intel_modeset_gem_init(dev); 627 628 if (INTEL_INFO(dev)->num_pipes == 0) 629 return 0; 630 631 ret = intel_fbdev_init(dev); 632 if (ret) 633 goto cleanup_gem; 634 635 /* Only enable hotplug handling once the fbdev is fully set up. */ 636 intel_hpd_init(dev_priv); 637 638 drm_kms_helper_poll_init(dev); 639 640 #ifdef __DragonFly__ 641 /* 642 * If we are dealing with dual GPU machines the vga_switcheroo module 643 * has been loaded. Machines with dual GPUs have an integrated graphics 644 * device (IGD), which we assume is an Intel device. The other, the 645 * discrete device (DIS), is either an NVidia or a Radeon device. For 646 * now we will force switch the gmux so the intel driver outputs 647 * both to the laptop panel and the external monitor. 648 * 649 * DragonFly does not have an nvidia native driver yet. In the future, 650 * we will check for the radeon device: if present, we will leave 651 * the gmux switch as it is, so the user can choose between the IGD and 652 * the DIS using the /dev/vga_switcheroo device. 653 */ 654 if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { 655 ret = vga_switcheroo_force_migd(); 656 if (ret) { 657 DRM_INFO("could not switch gmux to IGD\n"); 658 } 659 } 660 #endif 661 662 return 0; 663 664 cleanup_gem: 665 if (i915_gem_suspend(dev)) 666 DRM_ERROR("failed to idle hardware; continuing to unload!\n"); 667 i915_gem_fini(dev); 668 cleanup_irq: 669 intel_guc_fini(dev); 670 drm_irq_uninstall(dev); 671 intel_teardown_gmbus(dev); 672 cleanup_csr: 673 intel_csr_ucode_fini(dev_priv); 674 intel_power_domains_fini(dev_priv); 675 #if 0 676 vga_switcheroo_unregister_client(dev->pdev); 677 cleanup_vga_client: 678 vga_client_register(dev->pdev, NULL, NULL, NULL); 679 out: 680 #endif 681 return ret; 682 } 683 684 #if IS_ENABLED(CONFIG_FB) 685 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 686 { 687 struct apertures_struct *ap; 688 struct pci_dev *pdev = dev_priv->drm.pdev; 689 struct i915_ggtt *ggtt = &dev_priv->ggtt; 690 bool primary; 691 int ret; 692 693 ap = alloc_apertures(1); 694 if (!ap) 695 return -ENOMEM; 696 697 ap->ranges[0].base = ggtt->mappable_base; 698 ap->ranges[0].size = ggtt->mappable_end; 699 700 primary = 701 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 702 703 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); 704 705 kfree(ap); 706 707 return ret; 708 } 709 #else 710 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 711 { 712 return 0; 713 } 714 #endif 715 716 #if !defined(CONFIG_VGA_CONSOLE) 717 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 718 { 719 return 0; 720 } 721 #elif !defined(CONFIG_DUMMY_CONSOLE) 722 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 723 { 724 return -ENODEV; 725 } 726 #else 727 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 728 { 729 int ret = 0; 730 731 DRM_INFO("Replacing VGA console driver\n"); 732 733 console_lock(); 734 if (con_is_bound(&vga_con)) 735 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); 736 if (ret == 0) { 737 ret = do_unregister_con_driver(&vga_con); 738 739 /* Ignore "already unregistered". */ 740 if (ret == -ENODEV) 741 ret = 0; 742 } 743 console_unlock(); 744 745 return ret; 746 } 747 #endif 748 749 static void intel_init_dpio(struct drm_i915_private *dev_priv) 750 { 751 /* 752 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 753 * CHV x1 PHY (DP/HDMI D) 754 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 755 */ 756 if (IS_CHERRYVIEW(dev_priv)) { 757 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 758 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 759 } else if (IS_VALLEYVIEW(dev_priv)) { 760 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 761 } 762 } 763 764 static int i915_workqueues_init(struct drm_i915_private *dev_priv) 765 { 766 /* 767 * The i915 workqueue is primarily used for batched retirement of 768 * requests (and thus managing bo) once the task has been completed 769 * by the GPU. i915_gem_retire_requests() is called directly when we 770 * need high-priority retirement, such as waiting for an explicit 771 * bo. 772 * 773 * It is also used for periodic low-priority events, such as 774 * idle-timers and recording error state. 775 * 776 * All tasks on the workqueue are expected to acquire the dev mutex 777 * so there is no point in running more than one instance of the 778 * workqueue at any time. Use an ordered one. 779 */ 780 dev_priv->wq = alloc_ordered_workqueue("i915", 0); 781 if (dev_priv->wq == NULL) 782 goto out_err; 783 784 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); 785 if (dev_priv->hotplug.dp_wq == NULL) 786 goto out_free_wq; 787 788 return 0; 789 790 out_free_wq: 791 destroy_workqueue(dev_priv->wq); 792 out_err: 793 DRM_ERROR("Failed to allocate workqueues.\n"); 794 795 return -ENOMEM; 796 } 797 798 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) 799 { 800 destroy_workqueue(dev_priv->hotplug.dp_wq); 801 destroy_workqueue(dev_priv->wq); 802 } 803 804 /* 805 * We don't keep the workarounds for pre-production hardware, so we expect our 806 * driver to fail on these machines in one way or another. A little warning on 807 * dmesg may help both the user and the bug triagers. 808 */ 809 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) 810 { 811 if (IS_HSW_EARLY_SDV(dev_priv) || 812 IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) 813 DRM_ERROR("This is a pre-production stepping. " 814 "It may not be fully functional.\n"); 815 } 816 817 /** 818 * i915_driver_init_early - setup state not requiring device access 819 * @dev_priv: device private 820 * 821 * Initialize everything that is a "SW-only" state, that is state not 822 * requiring accessing the device or exposing the driver via kernel internal 823 * or userspace interfaces. Example steps belonging here: lock initialization, 824 * system memory allocation, setting up device specific attributes and 825 * function hooks not requiring accessing the device. 826 */ 827 static int i915_driver_init_early(struct drm_i915_private *dev_priv, 828 const struct pci_device_id *ent) 829 { 830 const struct intel_device_info *match_info = 831 (struct intel_device_info *)ent->driver_data; 832 struct intel_device_info *device_info; 833 int ret = 0; 834 835 if (i915_inject_load_failure()) 836 return -ENODEV; 837 838 /* Setup the write-once "constant" device info */ 839 device_info = mkwrite_device_info(dev_priv); 840 memcpy(device_info, match_info, sizeof(*device_info)); 841 device_info->device_id = dev_priv->drm.pdev->device; 842 843 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 844 device_info->gen_mask = BIT(device_info->gen - 1); 845 846 lockinit(&dev_priv->irq_lock, "userirq", 0, 0); 847 lockinit(&dev_priv->gpu_error.lock, "915err", 0, 0); 848 lockinit(&dev_priv->backlight_lock, "i915bl", 0, LK_CANRECURSE); 849 lockinit(&dev_priv->uncore.lock, "915gt", 0, 0); 850 lockinit(&dev_priv->mm.object_stat_lock, "i915osl", 0, 0); 851 lockinit(&dev_priv->mmio_flip_lock, "i915mfl", 0, 0); 852 lockinit(&dev_priv->sb_lock, "i915sbl", 0, LK_CANRECURSE); 853 lockinit(&dev_priv->modeset_restore_lock, "i915mrl", 0, LK_CANRECURSE); 854 lockinit(&dev_priv->av_mutex, "i915am", 0, LK_CANRECURSE); 855 lockinit(&dev_priv->wm.wm_mutex, "i915wm", 0, LK_CANRECURSE); 856 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 857 858 i915_memcpy_init_early(dev_priv); 859 860 ret = i915_workqueues_init(dev_priv); 861 if (ret < 0) 862 return ret; 863 864 ret = intel_gvt_init(dev_priv); 865 if (ret < 0) 866 goto err_workqueues; 867 868 /* This must be called before any calls to HAS_PCH_* */ 869 intel_detect_pch(&dev_priv->drm); 870 871 intel_pm_setup(&dev_priv->drm); 872 intel_init_dpio(dev_priv); 873 intel_power_domains_init(dev_priv); 874 intel_irq_init(dev_priv); 875 intel_init_display_hooks(dev_priv); 876 intel_init_clock_gating_hooks(dev_priv); 877 intel_init_audio_hooks(dev_priv); 878 i915_gem_load_init(&dev_priv->drm); 879 880 intel_display_crc_init(dev_priv); 881 882 intel_device_info_dump(dev_priv); 883 884 intel_detect_preproduction_hw(dev_priv); 885 886 return 0; 887 888 err_workqueues: 889 i915_workqueues_cleanup(dev_priv); 890 return ret; 891 } 892 893 /** 894 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() 895 * @dev_priv: device private 896 */ 897 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) 898 { 899 i915_gem_load_cleanup(&dev_priv->drm); 900 i915_workqueues_cleanup(dev_priv); 901 } 902 903 static int i915_mmio_setup(struct drm_device *dev) 904 { 905 struct drm_i915_private *dev_priv = to_i915(dev); 906 struct pci_dev *pdev = dev_priv->drm.pdev; 907 int mmio_bar; 908 int mmio_size; 909 910 mmio_bar = IS_GEN2(dev_priv) ? 1 : 0; 911 /* 912 * Before gen4, the registers and the GTT are behind different BARs. 913 * However, from gen4 onwards, the registers and the GTT are shared 914 * in the same BAR, so we want to restrict this ioremap from 915 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 916 * the register BAR remains the same size for all the earlier 917 * generations up to Ironlake. 918 */ 919 if (INTEL_INFO(dev)->gen < 5) 920 mmio_size = 512 * 1024; 921 else 922 mmio_size = 2 * 1024 * 1024; 923 dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); 924 if (dev_priv->regs == NULL) { 925 DRM_ERROR("failed to map registers\n"); 926 927 return -EIO; 928 } 929 930 /* Try to make sure MCHBAR is enabled before poking at it */ 931 intel_setup_mchbar(dev); 932 933 return 0; 934 } 935 936 static void i915_mmio_cleanup(struct drm_device *dev) 937 { 938 #if 0 939 struct drm_i915_private *dev_priv = to_i915(dev); 940 #endif 941 942 intel_teardown_mchbar(dev); 943 #if 0 944 pci_iounmap(pdev, dev_priv->regs); 945 #endif 946 } 947 948 /** 949 * i915_driver_init_mmio - setup device MMIO 950 * @dev_priv: device private 951 * 952 * Setup minimal device state necessary for MMIO accesses later in the 953 * initialization sequence. The setup here should avoid any other device-wide 954 * side effects or exposing the driver via kernel internal or user space 955 * interfaces. 956 */ 957 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) 958 { 959 struct drm_device *dev = &dev_priv->drm; 960 int ret; 961 962 if (i915_inject_load_failure()) 963 return -ENODEV; 964 965 if (i915_get_bridge_dev(dev)) 966 return -EIO; 967 968 ret = i915_mmio_setup(dev); 969 if (ret < 0) 970 goto put_bridge; 971 972 intel_uncore_init(dev_priv); 973 974 return 0; 975 976 put_bridge: 977 pci_dev_put(dev_priv->bridge_dev); 978 979 return ret; 980 } 981 982 /** 983 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() 984 * @dev_priv: device private 985 */ 986 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) 987 { 988 struct drm_device *dev = &dev_priv->drm; 989 990 intel_uncore_fini(dev_priv); 991 i915_mmio_cleanup(dev); 992 pci_dev_put(dev_priv->bridge_dev); 993 } 994 995 static void intel_sanitize_options(struct drm_i915_private *dev_priv) 996 { 997 i915.enable_execlists = 998 intel_sanitize_enable_execlists(dev_priv, 999 i915.enable_execlists); 1000 1001 /* 1002 * i915.enable_ppgtt is read-only, so do an early pass to validate the 1003 * user's requested state against the hardware/driver capabilities. We 1004 * do this now so that we can print out any log messages once rather 1005 * than every time we check intel_enable_ppgtt(). 1006 */ 1007 i915.enable_ppgtt = 1008 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); 1009 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); 1010 1011 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); 1012 DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores)); 1013 } 1014 1015 /** 1016 * i915_driver_init_hw - setup state requiring device access 1017 * @dev_priv: device private 1018 * 1019 * Setup state that requires accessing the device, but doesn't require 1020 * exposing the driver via kernel internal or userspace interfaces. 1021 */ 1022 static int i915_driver_init_hw(struct drm_i915_private *dev_priv) 1023 { 1024 struct pci_dev *pdev = dev_priv->drm.pdev; 1025 int ret; 1026 1027 if (i915_inject_load_failure()) 1028 return -ENODEV; 1029 1030 intel_device_info_runtime_init(dev_priv); 1031 1032 intel_sanitize_options(dev_priv); 1033 1034 ret = i915_ggtt_probe_hw(dev_priv); 1035 if (ret) 1036 return ret; 1037 1038 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1039 * otherwise the vga fbdev driver falls over. */ 1040 ret = i915_kick_out_firmware_fb(dev_priv); 1041 if (ret) { 1042 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1043 goto out_ggtt; 1044 } 1045 1046 ret = i915_kick_out_vgacon(dev_priv); 1047 if (ret) { 1048 DRM_ERROR("failed to remove conflicting VGA console\n"); 1049 goto out_ggtt; 1050 } 1051 1052 ret = i915_ggtt_init_hw(dev_priv); 1053 if (ret) 1054 return ret; 1055 1056 ret = i915_ggtt_enable_hw(dev_priv); 1057 if (ret) { 1058 DRM_ERROR("failed to enable GGTT\n"); 1059 goto out_ggtt; 1060 } 1061 1062 pci_set_master(pdev); 1063 1064 #if 0 1065 /* overlay on gen2 is broken and can't address above 1G */ 1066 if (IS_GEN2(dev_priv)) { 1067 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); 1068 if (ret) { 1069 DRM_ERROR("failed to set DMA mask\n"); 1070 1071 goto out_ggtt; 1072 } 1073 } 1074 1075 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1076 * using 32bit addressing, overwriting memory if HWS is located 1077 * above 4GB. 1078 * 1079 * The documentation also mentions an issue with undefined 1080 * behaviour if any general state is accessed within a page above 4GB, 1081 * which also needs to be handled carefully. 1082 */ 1083 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { 1084 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 1085 1086 if (ret) { 1087 DRM_ERROR("failed to set DMA mask\n"); 1088 1089 goto out_ggtt; 1090 } 1091 } 1092 #endif 1093 1094 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, 1095 PM_QOS_DEFAULT_VALUE); 1096 1097 intel_uncore_sanitize(dev_priv); 1098 1099 intel_opregion_setup(dev_priv); 1100 1101 i915_gem_load_init_fences(dev_priv); 1102 1103 /* On the 945G/GM, the chipset reports the MSI capability on the 1104 * integrated graphics even though the support isn't actually there 1105 * according to the published specs. It doesn't appear to function 1106 * correctly in testing on 945G. 1107 * This may be a side effect of MSI having been made available for PEG 1108 * and the registers being closely associated. 1109 * 1110 * According to chipset errata, on the 965GM, MSI interrupts may 1111 * be lost or delayed, but we use them anyways to avoid 1112 * stuck interrupts on some machines. 1113 */ 1114 #if 0 1115 if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) { 1116 if (pci_enable_msi(pdev) < 0) 1117 DRM_DEBUG_DRIVER("can't enable MSI"); 1118 } 1119 #endif 1120 1121 return 0; 1122 1123 out_ggtt: 1124 i915_ggtt_cleanup_hw(dev_priv); 1125 1126 return ret; 1127 } 1128 1129 /** 1130 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() 1131 * @dev_priv: device private 1132 */ 1133 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) 1134 { 1135 1136 #if 0 1137 if (dev->pdev->msi_enabled) 1138 pci_disable_msi(dev->pdev); 1139 #endif 1140 1141 pm_qos_remove_request(&dev_priv->pm_qos); 1142 i915_ggtt_cleanup_hw(dev_priv); 1143 } 1144 1145 /** 1146 * i915_driver_register - register the driver with the rest of the system 1147 * @dev_priv: device private 1148 * 1149 * Perform any steps necessary to make the driver available via kernel 1150 * internal or userspace interfaces. 1151 */ 1152 static void i915_driver_register(struct drm_i915_private *dev_priv) 1153 { 1154 struct drm_device *dev = &dev_priv->drm; 1155 1156 i915_gem_shrinker_init(dev_priv); 1157 1158 /* 1159 * Notify a valid surface after modesetting, 1160 * when running inside a VM. 1161 */ 1162 if (intel_vgpu_active(dev_priv)) 1163 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); 1164 1165 /* Reveal our presence to userspace */ 1166 if (drm_dev_register(dev, 0) == 0) { 1167 i915_debugfs_register(dev_priv); 1168 i915_setup_sysfs(dev_priv); 1169 } else 1170 DRM_ERROR("Failed to register driver for userspace access!\n"); 1171 1172 if (INTEL_INFO(dev_priv)->num_pipes) { 1173 /* Must be done after probing outputs */ 1174 intel_opregion_register(dev_priv); 1175 acpi_video_register(); 1176 } 1177 1178 if (IS_GEN5(dev_priv)) 1179 intel_gpu_ips_init(dev_priv); 1180 1181 i915_audio_component_init(dev_priv); 1182 1183 /* 1184 * Some ports require correctly set-up hpd registers for detection to 1185 * work properly (leading to ghost connected connector status), e.g. VGA 1186 * on gm45. Hence we can only set up the initial fbdev config after hpd 1187 * irqs are fully enabled. We do it last so that the async config 1188 * cannot run before the connectors are registered. 1189 */ 1190 intel_fbdev_initial_config_async(dev); 1191 } 1192 1193 /** 1194 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() 1195 * @dev_priv: device private 1196 */ 1197 static void i915_driver_unregister(struct drm_i915_private *dev_priv) 1198 { 1199 i915_audio_component_cleanup(dev_priv); 1200 1201 intel_gpu_ips_teardown(); 1202 acpi_video_unregister(); 1203 intel_opregion_unregister(dev_priv); 1204 1205 i915_teardown_sysfs(dev_priv); 1206 i915_debugfs_unregister(dev_priv); 1207 drm_dev_unregister(&dev_priv->drm); 1208 1209 i915_gem_shrinker_cleanup(dev_priv); 1210 } 1211 1212 /** 1213 * i915_driver_load - setup chip and create an initial config 1214 * @dev: DRM device 1215 * @flags: startup flags 1216 * 1217 * The driver load routine has to do several things: 1218 * - drive output discovery via intel_modeset_init() 1219 * - initialize the memory manager 1220 * - allocate initial config memory 1221 * - setup the DRM framebuffer with the allocated memory 1222 */ 1223 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) 1224 { 1225 struct drm_i915_private *dev_priv; 1226 int ret; 1227 1228 if (i915.nuclear_pageflip) 1229 driver.driver_features |= DRIVER_ATOMIC; 1230 1231 ret = -ENOMEM; 1232 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 1233 if (dev_priv) 1234 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); 1235 if (ret) { 1236 dev_printk(KERN_ERR, &pdev->dev, 1237 "[" DRM_NAME ":%s] allocation failed\n", __func__); 1238 kfree(dev_priv); 1239 return ret; 1240 } 1241 1242 dev_priv->drm.pdev = pdev; 1243 dev_priv->drm.dev_private = dev_priv; 1244 1245 #if 0 1246 ret = pci_enable_device(pdev); 1247 if (ret) 1248 goto out_free_priv; 1249 #endif 1250 1251 pci_set_drvdata(pdev, &dev_priv->drm); 1252 1253 ret = i915_driver_init_early(dev_priv, ent); 1254 if (ret < 0) 1255 goto out_pci_disable; 1256 1257 intel_runtime_pm_get(dev_priv); 1258 1259 ret = i915_driver_init_mmio(dev_priv); 1260 if (ret < 0) 1261 goto out_runtime_pm_put; 1262 1263 ret = i915_driver_init_hw(dev_priv); 1264 if (ret < 0) 1265 goto out_cleanup_mmio; 1266 1267 /* 1268 * TODO: move the vblank init and parts of modeset init steps into one 1269 * of the i915_driver_init_/i915_driver_register functions according 1270 * to the role/effect of the given init step. 1271 */ 1272 if (INTEL_INFO(dev_priv)->num_pipes) { 1273 ret = drm_vblank_init(&dev_priv->drm, 1274 INTEL_INFO(dev_priv)->num_pipes); 1275 if (ret) 1276 goto out_cleanup_hw; 1277 } 1278 1279 ret = i915_load_modeset_init(&dev_priv->drm); 1280 if (ret < 0) 1281 goto out_cleanup_vblank; 1282 1283 i915_driver_register(dev_priv); 1284 1285 intel_runtime_pm_enable(dev_priv); 1286 1287 /* Everything is in place, we can now relax! */ 1288 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 1289 driver.name, driver.major, driver.minor, driver.patchlevel, 1290 driver.date, pci_name(pdev), dev_priv->drm.primary->index); 1291 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 1292 DRM_INFO("DRM_I915_DEBUG enabled\n"); 1293 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 1294 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); 1295 1296 intel_runtime_pm_put(dev_priv); 1297 1298 return 0; 1299 1300 out_cleanup_vblank: 1301 drm_vblank_cleanup(&dev_priv->drm); 1302 out_cleanup_hw: 1303 i915_driver_cleanup_hw(dev_priv); 1304 out_cleanup_mmio: 1305 i915_driver_cleanup_mmio(dev_priv); 1306 out_runtime_pm_put: 1307 intel_runtime_pm_put(dev_priv); 1308 i915_driver_cleanup_early(dev_priv); 1309 out_pci_disable: 1310 #if 0 1311 pci_disable_device(pdev); 1312 out_free_priv: 1313 #endif 1314 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); 1315 drm_dev_unref(&dev_priv->drm); 1316 return ret; 1317 } 1318 1319 void i915_driver_unload(struct drm_device *dev) 1320 { 1321 struct drm_i915_private *dev_priv = to_i915(dev); 1322 1323 intel_fbdev_fini(dev); 1324 1325 if (i915_gem_suspend(dev)) 1326 DRM_ERROR("failed to idle hardware; continuing to unload!\n"); 1327 1328 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1329 1330 i915_driver_unregister(dev_priv); 1331 1332 drm_vblank_cleanup(dev); 1333 1334 intel_modeset_cleanup(dev); 1335 1336 /* 1337 * free the memory space allocated for the child device 1338 * config parsed from VBT 1339 */ 1340 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { 1341 kfree(dev_priv->vbt.child_dev); 1342 dev_priv->vbt.child_dev = NULL; 1343 dev_priv->vbt.child_dev_num = 0; 1344 } 1345 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); 1346 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; 1347 kfree(dev_priv->vbt.lfp_lvds_vbt_mode); 1348 dev_priv->vbt.lfp_lvds_vbt_mode = NULL; 1349 1350 #if 0 1351 vga_switcheroo_unregister_client(dev->pdev); 1352 vga_client_register(dev->pdev, NULL, NULL, NULL); 1353 #endif 1354 1355 intel_csr_ucode_fini(dev_priv); 1356 1357 /* Free error state after interrupts are fully disabled. */ 1358 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1359 i915_destroy_error_state(dev); 1360 1361 /* Flush any outstanding unpin_work. */ 1362 drain_workqueue(dev_priv->wq); 1363 1364 intel_guc_fini(dev); 1365 i915_gem_fini(dev); 1366 intel_fbc_cleanup_cfb(dev_priv); 1367 1368 intel_power_domains_fini(dev_priv); 1369 1370 i915_driver_cleanup_hw(dev_priv); 1371 i915_driver_cleanup_mmio(dev_priv); 1372 1373 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1374 1375 i915_driver_cleanup_early(dev_priv); 1376 } 1377 1378 static int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1379 { 1380 int ret; 1381 1382 ret = i915_gem_open(dev, file); 1383 if (ret) 1384 return ret; 1385 1386 return 0; 1387 } 1388 1389 /** 1390 * i915_driver_lastclose - clean up after all DRM clients have exited 1391 * @dev: DRM device 1392 * 1393 * Take care of cleaning up after all DRM clients have exited. In the 1394 * mode setting case, we want to restore the kernel's initial mode (just 1395 * in case the last client left us in a bad state). 1396 * 1397 * Additionally, in the non-mode setting case, we'll tear down the GTT 1398 * and DMA structures, since the kernel won't be using them, and clea 1399 * up any GEM state. 1400 */ 1401 static void i915_driver_lastclose(struct drm_device *dev) 1402 { 1403 intel_fbdev_restore_mode(dev); 1404 #if 0 1405 vga_switcheroo_process_delayed_switch(); 1406 #endif 1407 } 1408 1409 static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) 1410 { 1411 mutex_lock(&dev->struct_mutex); 1412 i915_gem_context_close(dev, file); 1413 i915_gem_release(dev, file); 1414 mutex_unlock(&dev->struct_mutex); 1415 } 1416 1417 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1418 { 1419 struct drm_i915_file_private *file_priv = file->driver_priv; 1420 1421 kfree(file_priv); 1422 } 1423 1424 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 1425 { 1426 struct drm_device *dev = &dev_priv->drm; 1427 struct intel_encoder *encoder; 1428 1429 drm_modeset_lock_all(dev); 1430 for_each_intel_encoder(dev, encoder) 1431 if (encoder->suspend) 1432 encoder->suspend(encoder); 1433 drm_modeset_unlock_all(dev); 1434 } 1435 1436 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 1437 bool rpm_resume); 1438 static int vlv_suspend_complete(struct drm_i915_private *dev_priv); 1439 1440 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 1441 { 1442 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 1443 if (acpi_target_system_state() < ACPI_STATE_S3) 1444 return true; 1445 #endif 1446 return false; 1447 } 1448 1449 static int i915_drm_suspend(struct drm_device *dev) 1450 { 1451 struct drm_i915_private *dev_priv = to_i915(dev); 1452 struct pci_dev *pdev = dev_priv->drm.pdev; 1453 pci_power_t opregion_target_state; 1454 int error; 1455 1456 /* ignore lid events during suspend */ 1457 mutex_lock(&dev_priv->modeset_restore_lock); 1458 dev_priv->modeset_restore = MODESET_SUSPENDED; 1459 mutex_unlock(&dev_priv->modeset_restore_lock); 1460 1461 disable_rpm_wakeref_asserts(dev_priv); 1462 1463 /* We do a lot of poking in a lot of registers, make sure they work 1464 * properly. */ 1465 intel_display_set_init_power(dev_priv, true); 1466 1467 drm_kms_helper_poll_disable(dev); 1468 1469 #if 0 1470 pci_save_state(pdev); 1471 #endif 1472 1473 error = i915_gem_suspend(dev); 1474 if (error) { 1475 dev_err(&pdev->dev, 1476 "GEM idle failed, resume might fail\n"); 1477 goto out; 1478 } 1479 1480 intel_guc_suspend(dev); 1481 1482 intel_display_suspend(dev); 1483 1484 intel_dp_mst_suspend(dev); 1485 1486 intel_runtime_pm_disable_interrupts(dev_priv); 1487 intel_hpd_cancel_work(dev_priv); 1488 1489 intel_suspend_encoders(dev_priv); 1490 1491 intel_suspend_hw(dev); 1492 1493 i915_gem_suspend_gtt_mappings(dev); 1494 1495 i915_save_state(dev); 1496 1497 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1498 intel_opregion_notify_adapter(dev_priv, opregion_target_state); 1499 1500 intel_uncore_forcewake_reset(dev_priv, false); 1501 intel_opregion_unregister(dev_priv); 1502 1503 #if 0 1504 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1505 #endif 1506 1507 dev_priv->suspend_count++; 1508 1509 intel_csr_ucode_suspend(dev_priv); 1510 1511 out: 1512 enable_rpm_wakeref_asserts(dev_priv); 1513 1514 return error; 1515 } 1516 1517 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) 1518 { 1519 struct drm_i915_private *dev_priv = to_i915(dev); 1520 bool fw_csr; 1521 int ret; 1522 1523 disable_rpm_wakeref_asserts(dev_priv); 1524 1525 intel_display_set_init_power(dev_priv, false); 1526 1527 fw_csr = !IS_BROXTON(dev_priv) && 1528 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 1529 /* 1530 * In case of firmware assisted context save/restore don't manually 1531 * deinit the power domains. This also means the CSR/DMC firmware will 1532 * stay active, it will power down any HW resources as required and 1533 * also enable deeper system power states that would be blocked if the 1534 * firmware was inactive. 1535 */ 1536 if (!fw_csr) 1537 intel_power_domains_suspend(dev_priv); 1538 1539 ret = 0; 1540 if (IS_BROXTON(dev_priv)) 1541 bxt_enable_dc9(dev_priv); 1542 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1543 hsw_enable_pc8(dev_priv); 1544 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1545 ret = vlv_suspend_complete(dev_priv); 1546 1547 if (ret) { 1548 DRM_ERROR("Suspend complete failed: %d\n", ret); 1549 if (!fw_csr) 1550 intel_power_domains_init_hw(dev_priv, true); 1551 1552 goto out; 1553 } 1554 1555 #if 0 1556 pci_disable_device(drm_dev->pdev); 1557 /* 1558 * During hibernation on some platforms the BIOS may try to access 1559 * the device even though it's already in D3 and hang the machine. So 1560 * leave the device in D0 on those platforms and hope the BIOS will 1561 * power down the device properly. The issue was seen on multiple old 1562 * GENs with different BIOS vendors, so having an explicit blacklist 1563 * is inpractical; apply the workaround on everything pre GEN6. The 1564 * platforms where the issue was seen: 1565 * Lenovo Thinkpad X301, X61s, X60, T60, X41 1566 * Fujitsu FSC S7110 1567 * Acer Aspire 1830T 1568 */ 1569 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) 1570 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 1571 #endif 1572 1573 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); 1574 1575 out: 1576 enable_rpm_wakeref_asserts(dev_priv); 1577 1578 return ret; 1579 } 1580 1581 int i915_suspend_switcheroo(device_t kdev) 1582 { 1583 struct drm_softc *softc = device_get_softc(kdev); 1584 struct drm_device *dev = softc->drm_driver_data; 1585 int error; 1586 1587 if (!dev) { 1588 DRM_ERROR("dev: %p\n", dev); 1589 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 1590 return -ENODEV; 1591 } 1592 1593 #if 0 1594 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 1595 state.event != PM_EVENT_FREEZE)) 1596 return -EINVAL; 1597 #endif 1598 1599 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1600 return 0; 1601 1602 error = i915_drm_suspend(dev); 1603 if (error) 1604 return error; 1605 1606 return i915_drm_suspend_late(dev, false); 1607 } 1608 1609 static int i915_drm_resume(struct drm_device *dev) 1610 { 1611 struct drm_i915_private *dev_priv = to_i915(dev); 1612 int ret; 1613 1614 disable_rpm_wakeref_asserts(dev_priv); 1615 intel_sanitize_gt_powersave(dev_priv); 1616 1617 ret = i915_ggtt_enable_hw(dev_priv); 1618 if (ret) 1619 DRM_ERROR("failed to re-enable GGTT\n"); 1620 1621 intel_csr_ucode_resume(dev_priv); 1622 1623 i915_gem_resume(dev); 1624 1625 i915_restore_state(dev); 1626 intel_pps_unlock_regs_wa(dev_priv); 1627 intel_opregion_setup(dev_priv); 1628 1629 intel_init_pch_refclk(dev); 1630 drm_mode_config_reset(dev); 1631 1632 /* 1633 * Interrupts have to be enabled before any batches are run. If not the 1634 * GPU will hang. i915_gem_init_hw() will initiate batches to 1635 * update/restore the context. 1636 * 1637 * Modeset enabling in intel_modeset_init_hw() also needs working 1638 * interrupts. 1639 */ 1640 intel_runtime_pm_enable_interrupts(dev_priv); 1641 1642 mutex_lock(&dev->struct_mutex); 1643 if (i915_gem_init_hw(dev)) { 1644 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 1645 i915_gem_set_wedged(dev_priv); 1646 } 1647 mutex_unlock(&dev->struct_mutex); 1648 1649 intel_guc_resume(dev); 1650 1651 intel_modeset_init_hw(dev); 1652 1653 spin_lock_irq(&dev_priv->irq_lock); 1654 if (dev_priv->display.hpd_irq_setup) 1655 dev_priv->display.hpd_irq_setup(dev_priv); 1656 spin_unlock_irq(&dev_priv->irq_lock); 1657 1658 intel_dp_mst_resume(dev); 1659 1660 intel_display_resume(dev); 1661 1662 /* 1663 * ... but also need to make sure that hotplug processing 1664 * doesn't cause havoc. Like in the driver load code we don't 1665 * bother with the tiny race here where we might loose hotplug 1666 * notifications. 1667 * */ 1668 intel_hpd_init(dev_priv); 1669 /* Config may have changed between suspend and resume */ 1670 drm_helper_hpd_irq_event(dev); 1671 1672 intel_opregion_register(dev_priv); 1673 1674 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 1675 1676 mutex_lock(&dev_priv->modeset_restore_lock); 1677 dev_priv->modeset_restore = MODESET_DONE; 1678 mutex_unlock(&dev_priv->modeset_restore_lock); 1679 1680 intel_opregion_notify_adapter(dev_priv, PCI_D0); 1681 1682 intel_autoenable_gt_powersave(dev_priv); 1683 drm_kms_helper_poll_enable(dev); 1684 1685 enable_rpm_wakeref_asserts(dev_priv); 1686 1687 return 0; 1688 } 1689 1690 static int i915_drm_resume_early(struct drm_device *dev) 1691 { 1692 struct drm_i915_private *dev_priv = to_i915(dev); 1693 struct pci_dev *pdev = dev_priv->drm.pdev; 1694 int ret = 0; 1695 1696 /* 1697 * We have a resume ordering issue with the snd-hda driver also 1698 * requiring our device to be power up. Due to the lack of a 1699 * parent/child relationship we currently solve this with an early 1700 * resume hook. 1701 * 1702 * FIXME: This should be solved with a special hdmi sink device or 1703 * similar so that power domains can be employed. 1704 */ 1705 1706 /* 1707 * Note that we need to set the power state explicitly, since we 1708 * powered off the device during freeze and the PCI core won't power 1709 * it back up for us during thaw. Powering off the device during 1710 * freeze is not a hard requirement though, and during the 1711 * suspend/resume phases the PCI core makes sure we get here with the 1712 * device powered on. So in case we change our freeze logic and keep 1713 * the device powered we can also remove the following set power state 1714 * call. 1715 */ 1716 #if 0 1717 ret = pci_set_power_state(dev->pdev, PCI_D0); 1718 if (ret) { 1719 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); 1720 goto out; 1721 } 1722 1723 /* 1724 * Note that pci_enable_device() first enables any parent bridge 1725 * device and only then sets the power state for this device. The 1726 * bridge enabling is a nop though, since bridge devices are resumed 1727 * first. The order of enabling power and enabling the device is 1728 * imposed by the PCI core as described above, so here we preserve the 1729 * same order for the freeze/thaw phases. 1730 * 1731 * TODO: eventually we should remove pci_disable_device() / 1732 * pci_enable_enable_device() from suspend/resume. Due to how they 1733 * depend on the device enable refcount we can't anyway depend on them 1734 * disabling/enabling the device. 1735 */ 1736 if (pci_enable_device(dev->pdev)) { 1737 ret = -EIO; 1738 goto out; 1739 } 1740 #endif 1741 1742 pci_set_master(pdev); 1743 1744 disable_rpm_wakeref_asserts(dev_priv); 1745 1746 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1747 ret = vlv_resume_prepare(dev_priv, false); 1748 if (ret) 1749 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 1750 ret); 1751 1752 intel_uncore_early_sanitize(dev_priv, true); 1753 1754 if (IS_BROXTON(dev_priv)) { 1755 if (!dev_priv->suspended_to_idle) 1756 gen9_sanitize_dc_state(dev_priv); 1757 bxt_disable_dc9(dev_priv); 1758 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1759 hsw_disable_pc8(dev_priv); 1760 } 1761 1762 intel_uncore_sanitize(dev_priv); 1763 1764 if (IS_BROXTON(dev_priv) || 1765 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 1766 intel_power_domains_init_hw(dev_priv, true); 1767 1768 enable_rpm_wakeref_asserts(dev_priv); 1769 1770 #if 0 1771 out: 1772 #endif 1773 dev_priv->suspended_to_idle = false; 1774 1775 return ret; 1776 } 1777 1778 int i915_resume_switcheroo(struct drm_device *dev) 1779 { 1780 int ret; 1781 1782 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1783 return 0; 1784 1785 ret = i915_drm_resume_early(dev); 1786 if (ret) 1787 return ret; 1788 1789 return i915_drm_resume(dev); 1790 } 1791 1792 static void disable_engines_irq(struct drm_i915_private *dev_priv) 1793 { 1794 struct intel_engine_cs *engine; 1795 enum intel_engine_id id; 1796 1797 /* Ensure irq handler finishes, and not run again. */ 1798 disable_irq(dev_priv->drm.irq); 1799 for_each_engine(engine, dev_priv, id) 1800 tasklet_kill(&engine->irq_tasklet); 1801 } 1802 1803 static void enable_engines_irq(struct drm_i915_private *dev_priv) 1804 { 1805 enable_irq(dev_priv->drm.irq); 1806 } 1807 1808 /** 1809 * i915_reset - reset chip after a hang 1810 * @dev: drm device to reset 1811 * 1812 * Reset the chip. Useful if a hang is detected. Marks the device as wedged 1813 * on failure. 1814 * 1815 * Caller must hold the struct_mutex. 1816 * 1817 * Procedure is fairly simple: 1818 * - reset the chip using the reset reg 1819 * - re-init context state 1820 * - re-init hardware status page 1821 * - re-init ring buffer 1822 * - re-init interrupt state 1823 * - re-init display 1824 */ 1825 void i915_reset(struct drm_i915_private *dev_priv) 1826 { 1827 struct drm_device *dev = &dev_priv->drm; 1828 struct i915_gpu_error *error = &dev_priv->gpu_error; 1829 int ret; 1830 1831 lockdep_assert_held(&dev->struct_mutex); 1832 1833 if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags)) 1834 return; 1835 1836 /* Clear any previous failed attempts at recovery. Time to try again. */ 1837 __clear_bit(I915_WEDGED, &error->flags); 1838 error->reset_count++; 1839 1840 pr_notice("drm/i915: Resetting chip after gpu hang\n"); 1841 1842 disable_engines_irq(dev_priv); 1843 ret = intel_gpu_reset(dev_priv, ALL_ENGINES); 1844 enable_engines_irq(dev_priv); 1845 1846 if (ret) { 1847 if (ret != -ENODEV) 1848 DRM_ERROR("Failed to reset chip: %i\n", ret); 1849 else 1850 DRM_DEBUG_DRIVER("GPU reset disabled\n"); 1851 goto error; 1852 } 1853 1854 i915_gem_reset(dev_priv); 1855 intel_overlay_reset(dev_priv); 1856 1857 /* Ok, now get things going again... */ 1858 1859 /* 1860 * Everything depends on having the GTT running, so we need to start 1861 * there. Fortunately we don't need to do this unless we reset the 1862 * chip at a PCI level. 1863 * 1864 * Next we need to restore the context, but we don't use those 1865 * yet either... 1866 * 1867 * Ring buffer needs to be re-initialized in the KMS case, or if X 1868 * was running at the time of the reset (i.e. we weren't VT 1869 * switched away). 1870 */ 1871 ret = i915_gem_init_hw(dev); 1872 if (ret) { 1873 DRM_ERROR("Failed hw init on reset %d\n", ret); 1874 goto error; 1875 } 1876 1877 wakeup: 1878 wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); 1879 return; 1880 1881 error: 1882 i915_gem_set_wedged(dev_priv); 1883 goto wakeup; 1884 } 1885 1886 #if 0 1887 static int i915_pm_suspend(struct device *kdev) 1888 { 1889 struct pci_dev *pdev = to_pci_dev(kdev); 1890 struct drm_device *dev = pci_get_drvdata(pdev); 1891 1892 if (!dev) { 1893 dev_err(kdev, "DRM not initialized, aborting suspend.\n"); 1894 return -ENODEV; 1895 } 1896 1897 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1898 return 0; 1899 1900 return i915_drm_suspend(dev); 1901 } 1902 1903 static int i915_pm_suspend_late(struct device *kdev) 1904 { 1905 struct drm_device *dev = &kdev_to_i915(kdev)->drm; 1906 1907 1908 /* 1909 * We have a suspend ordering issue with the snd-hda driver also 1910 * requiring our device to be power up. Due to the lack of a 1911 * parent/child relationship we currently solve this with an late 1912 * suspend hook. 1913 * 1914 * FIXME: This should be solved with a special hdmi sink device or 1915 * similar so that power domains can be employed. 1916 */ 1917 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1918 return 0; 1919 1920 return i915_drm_suspend_late(dev, false); 1921 } 1922 1923 static int i915_pm_poweroff_late(struct device *kdev) 1924 { 1925 struct drm_device *dev = &kdev_to_i915(kdev)->drm; 1926 1927 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1928 return 0; 1929 1930 return i915_drm_suspend_late(dev, true); 1931 } 1932 1933 static int i915_pm_resume_early(struct device *kdev) 1934 { 1935 struct drm_device *dev = &kdev_to_i915(kdev)->drm; 1936 1937 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1938 return 0; 1939 1940 return i915_drm_resume_early(dev); 1941 } 1942 1943 static int i915_pm_resume(struct device *kdev) 1944 { 1945 struct drm_device *dev = &kdev_to_i915(kdev)->drm; 1946 1947 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1948 return 0; 1949 1950 return i915_drm_resume(dev); 1951 } 1952 1953 /* freeze: before creating the hibernation_image */ 1954 static int i915_pm_freeze(struct device *kdev) 1955 { 1956 int ret; 1957 1958 ret = i915_pm_suspend(kdev); 1959 if (ret) 1960 return ret; 1961 1962 ret = i915_gem_freeze(kdev_to_i915(kdev)); 1963 if (ret) 1964 return ret; 1965 1966 return 0; 1967 } 1968 1969 static int i915_pm_freeze_late(struct device *kdev) 1970 { 1971 int ret; 1972 1973 ret = i915_pm_suspend_late(kdev); 1974 if (ret) 1975 return ret; 1976 1977 ret = i915_gem_freeze_late(kdev_to_i915(kdev)); 1978 if (ret) 1979 return ret; 1980 1981 return 0; 1982 } 1983 1984 /* thaw: called after creating the hibernation image, but before turning off. */ 1985 static int i915_pm_thaw_early(struct device *kdev) 1986 { 1987 return i915_pm_resume_early(kdev); 1988 } 1989 1990 static int i915_pm_thaw(struct device *kdev) 1991 { 1992 return i915_pm_resume(kdev); 1993 } 1994 1995 /* restore: called after loading the hibernation image. */ 1996 static int i915_pm_restore_early(struct device *kdev) 1997 { 1998 return i915_pm_resume_early(kdev); 1999 } 2000 2001 static int i915_pm_restore(struct device *kdev) 2002 { 2003 return i915_pm_resume(kdev); 2004 } 2005 #endif 2006 2007 /* 2008 * Save all Gunit registers that may be lost after a D3 and a subsequent 2009 * S0i[R123] transition. The list of registers needing a save/restore is 2010 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit 2011 * registers in the following way: 2012 * - Driver: saved/restored by the driver 2013 * - Punit : saved/restored by the Punit firmware 2014 * - No, w/o marking: no need to save/restore, since the register is R/O or 2015 * used internally by the HW in a way that doesn't depend 2016 * keeping the content across a suspend/resume. 2017 * - Debug : used for debugging 2018 * 2019 * We save/restore all registers marked with 'Driver', with the following 2020 * exceptions: 2021 * - Registers out of use, including also registers marked with 'Debug'. 2022 * These have no effect on the driver's operation, so we don't save/restore 2023 * them to reduce the overhead. 2024 * - Registers that are fully setup by an initialization function called from 2025 * the resume path. For example many clock gating and RPS/RC6 registers. 2026 * - Registers that provide the right functionality with their reset defaults. 2027 * 2028 * TODO: Except for registers that based on the above 3 criteria can be safely 2029 * ignored, we save/restore all others, practically treating the HW context as 2030 * a black-box for the driver. Further investigation is needed to reduce the 2031 * saved/restored registers even further, by following the same 3 criteria. 2032 */ 2033 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) 2034 { 2035 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 2036 int i; 2037 2038 /* GAM 0x4000-0x4770 */ 2039 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); 2040 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); 2041 s->arb_mode = I915_READ(ARB_MODE); 2042 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); 2043 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 2044 2045 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 2046 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); 2047 2048 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 2049 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 2050 2051 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); 2052 s->ecochk = I915_READ(GAM_ECOCHK); 2053 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); 2054 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); 2055 2056 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); 2057 2058 /* MBC 0x9024-0x91D0, 0x8500 */ 2059 s->g3dctl = I915_READ(VLV_G3DCTL); 2060 s->gsckgctl = I915_READ(VLV_GSCKGCTL); 2061 s->mbctl = I915_READ(GEN6_MBCTL); 2062 2063 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 2064 s->ucgctl1 = I915_READ(GEN6_UCGCTL1); 2065 s->ucgctl3 = I915_READ(GEN6_UCGCTL3); 2066 s->rcgctl1 = I915_READ(GEN6_RCGCTL1); 2067 s->rcgctl2 = I915_READ(GEN6_RCGCTL2); 2068 s->rstctl = I915_READ(GEN6_RSTCTL); 2069 s->misccpctl = I915_READ(GEN7_MISCCPCTL); 2070 2071 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 2072 s->gfxpause = I915_READ(GEN6_GFXPAUSE); 2073 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); 2074 s->rpdeuc = I915_READ(GEN6_RPDEUC); 2075 s->ecobus = I915_READ(ECOBUS); 2076 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); 2077 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); 2078 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); 2079 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); 2080 s->rcedata = I915_READ(VLV_RCEDATA); 2081 s->spare2gh = I915_READ(VLV_SPAREG2H); 2082 2083 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 2084 s->gt_imr = I915_READ(GTIMR); 2085 s->gt_ier = I915_READ(GTIER); 2086 s->pm_imr = I915_READ(GEN6_PMIMR); 2087 s->pm_ier = I915_READ(GEN6_PMIER); 2088 2089 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 2090 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); 2091 2092 /* GT SA CZ domain, 0x100000-0x138124 */ 2093 s->tilectl = I915_READ(TILECTL); 2094 s->gt_fifoctl = I915_READ(GTFIFOCTL); 2095 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); 2096 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2097 s->pmwgicz = I915_READ(VLV_PMWGICZ); 2098 2099 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 2100 s->gu_ctl0 = I915_READ(VLV_GU_CTL0); 2101 s->gu_ctl1 = I915_READ(VLV_GU_CTL1); 2102 s->pcbr = I915_READ(VLV_PCBR); 2103 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); 2104 2105 /* 2106 * Not saving any of: 2107 * DFT, 0x9800-0x9EC0 2108 * SARB, 0xB000-0xB1FC 2109 * GAC, 0x5208-0x524C, 0x14000-0x14C000 2110 * PCI CFG 2111 */ 2112 } 2113 2114 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) 2115 { 2116 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 2117 u32 val; 2118 int i; 2119 2120 /* GAM 0x4000-0x4770 */ 2121 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); 2122 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); 2123 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); 2124 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); 2125 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 2126 2127 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 2128 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); 2129 2130 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 2131 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 2132 2133 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); 2134 I915_WRITE(GAM_ECOCHK, s->ecochk); 2135 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); 2136 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); 2137 2138 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); 2139 2140 /* MBC 0x9024-0x91D0, 0x8500 */ 2141 I915_WRITE(VLV_G3DCTL, s->g3dctl); 2142 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); 2143 I915_WRITE(GEN6_MBCTL, s->mbctl); 2144 2145 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 2146 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); 2147 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); 2148 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); 2149 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); 2150 I915_WRITE(GEN6_RSTCTL, s->rstctl); 2151 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); 2152 2153 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 2154 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); 2155 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); 2156 I915_WRITE(GEN6_RPDEUC, s->rpdeuc); 2157 I915_WRITE(ECOBUS, s->ecobus); 2158 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); 2159 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); 2160 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); 2161 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); 2162 I915_WRITE(VLV_RCEDATA, s->rcedata); 2163 I915_WRITE(VLV_SPAREG2H, s->spare2gh); 2164 2165 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 2166 I915_WRITE(GTIMR, s->gt_imr); 2167 I915_WRITE(GTIER, s->gt_ier); 2168 I915_WRITE(GEN6_PMIMR, s->pm_imr); 2169 I915_WRITE(GEN6_PMIER, s->pm_ier); 2170 2171 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 2172 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); 2173 2174 /* GT SA CZ domain, 0x100000-0x138124 */ 2175 I915_WRITE(TILECTL, s->tilectl); 2176 I915_WRITE(GTFIFOCTL, s->gt_fifoctl); 2177 /* 2178 * Preserve the GT allow wake and GFX force clock bit, they are not 2179 * be restored, as they are used to control the s0ix suspend/resume 2180 * sequence by the caller. 2181 */ 2182 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2183 val &= VLV_GTLC_ALLOWWAKEREQ; 2184 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; 2185 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2186 2187 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2188 val &= VLV_GFX_CLK_FORCE_ON_BIT; 2189 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; 2190 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 2191 2192 I915_WRITE(VLV_PMWGICZ, s->pmwgicz); 2193 2194 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 2195 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); 2196 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); 2197 I915_WRITE(VLV_PCBR, s->pcbr); 2198 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 2199 } 2200 2201 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 2202 { 2203 u32 val; 2204 int err; 2205 2206 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2207 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 2208 if (force_on) 2209 val |= VLV_GFX_CLK_FORCE_ON_BIT; 2210 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 2211 2212 if (!force_on) 2213 return 0; 2214 2215 err = intel_wait_for_register(dev_priv, 2216 VLV_GTLC_SURVIVABILITY_REG, 2217 VLV_GFX_CLK_STATUS_BIT, 2218 VLV_GFX_CLK_STATUS_BIT, 2219 20); 2220 if (err) 2221 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 2222 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 2223 2224 return err; 2225 } 2226 2227 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 2228 { 2229 u32 val; 2230 int err = 0; 2231 2232 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2233 val &= ~VLV_GTLC_ALLOWWAKEREQ; 2234 if (allow) 2235 val |= VLV_GTLC_ALLOWWAKEREQ; 2236 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2237 POSTING_READ(VLV_GTLC_WAKE_CTRL); 2238 2239 err = intel_wait_for_register(dev_priv, 2240 VLV_GTLC_PW_STATUS, 2241 VLV_GTLC_ALLOWWAKEACK, 2242 allow, 2243 1); 2244 if (err) 2245 DRM_ERROR("timeout disabling GT waking\n"); 2246 2247 return err; 2248 } 2249 2250 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 2251 bool wait_for_on) 2252 { 2253 u32 mask; 2254 u32 val; 2255 int err; 2256 2257 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 2258 val = wait_for_on ? mask : 0; 2259 if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 2260 return 0; 2261 2262 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 2263 onoff(wait_for_on), 2264 I915_READ(VLV_GTLC_PW_STATUS)); 2265 2266 /* 2267 * RC6 transitioning can be delayed up to 2 msec (see 2268 * valleyview_enable_rps), use 3 msec for safety. 2269 */ 2270 err = intel_wait_for_register(dev_priv, 2271 VLV_GTLC_PW_STATUS, mask, val, 2272 3); 2273 if (err) 2274 DRM_ERROR("timeout waiting for GT wells to go %s\n", 2275 onoff(wait_for_on)); 2276 2277 return err; 2278 } 2279 2280 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 2281 { 2282 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) 2283 return; 2284 2285 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); 2286 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 2287 } 2288 2289 static int vlv_suspend_complete(struct drm_i915_private *dev_priv) 2290 { 2291 u32 mask; 2292 int err; 2293 2294 /* 2295 * Bspec defines the following GT well on flags as debug only, so 2296 * don't treat them as hard failures. 2297 */ 2298 (void)vlv_wait_for_gt_wells(dev_priv, false); 2299 2300 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 2301 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 2302 2303 vlv_check_no_gt_access(dev_priv); 2304 2305 err = vlv_force_gfx_clock(dev_priv, true); 2306 if (err) 2307 goto err1; 2308 2309 err = vlv_allow_gt_wake(dev_priv, false); 2310 if (err) 2311 goto err2; 2312 2313 if (!IS_CHERRYVIEW(dev_priv)) 2314 vlv_save_gunit_s0ix_state(dev_priv); 2315 2316 err = vlv_force_gfx_clock(dev_priv, false); 2317 if (err) 2318 goto err2; 2319 2320 return 0; 2321 2322 err2: 2323 /* For safety always re-enable waking and disable gfx clock forcing */ 2324 vlv_allow_gt_wake(dev_priv, true); 2325 err1: 2326 vlv_force_gfx_clock(dev_priv, false); 2327 2328 return err; 2329 } 2330 2331 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 2332 bool rpm_resume) 2333 { 2334 struct drm_device *dev = &dev_priv->drm; 2335 int err; 2336 int ret; 2337 2338 /* 2339 * If any of the steps fail just try to continue, that's the best we 2340 * can do at this point. Return the first error code (which will also 2341 * leave RPM permanently disabled). 2342 */ 2343 ret = vlv_force_gfx_clock(dev_priv, true); 2344 2345 if (!IS_CHERRYVIEW(dev_priv)) 2346 vlv_restore_gunit_s0ix_state(dev_priv); 2347 2348 err = vlv_allow_gt_wake(dev_priv, true); 2349 if (!ret) 2350 ret = err; 2351 2352 err = vlv_force_gfx_clock(dev_priv, false); 2353 if (!ret) 2354 ret = err; 2355 2356 vlv_check_no_gt_access(dev_priv); 2357 2358 if (rpm_resume) { 2359 intel_init_clock_gating(dev); 2360 i915_gem_restore_fences(dev); 2361 } 2362 2363 return ret; 2364 } 2365 2366 #if 0 2367 static int intel_runtime_suspend(struct device *device) 2368 { 2369 struct pci_dev *pdev = to_pci_dev(device); 2370 struct drm_device *dev = pci_get_drvdata(pdev); 2371 struct drm_i915_private *dev_priv = to_i915(dev); 2372 int ret; 2373 2374 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) 2375 return -ENODEV; 2376 2377 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) 2378 return -ENODEV; 2379 2380 DRM_DEBUG_KMS("Suspending device\n"); 2381 2382 /* 2383 * We could deadlock here in case another thread holding struct_mutex 2384 * calls RPM suspend concurrently, since the RPM suspend will wait 2385 * first for this RPM suspend to finish. In this case the concurrent 2386 * RPM resume will be followed by its RPM suspend counterpart. Still 2387 * for consistency return -EAGAIN, which will reschedule this suspend. 2388 */ 2389 if (!mutex_trylock(&dev->struct_mutex)) { 2390 DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); 2391 /* 2392 * Bump the expiration timestamp, otherwise the suspend won't 2393 * be rescheduled. 2394 */ 2395 pm_runtime_mark_last_busy(kdev); 2396 2397 return -EAGAIN; 2398 } 2399 2400 disable_rpm_wakeref_asserts(dev_priv); 2401 2402 /* 2403 * We are safe here against re-faults, since the fault handler takes 2404 * an RPM reference. 2405 */ 2406 i915_gem_release_all_mmaps(dev_priv); 2407 mutex_unlock(&dev->struct_mutex); 2408 2409 intel_guc_suspend(dev); 2410 2411 intel_runtime_pm_disable_interrupts(dev_priv); 2412 2413 ret = 0; 2414 if (IS_BROXTON(dev_priv)) { 2415 bxt_display_core_uninit(dev_priv); 2416 bxt_enable_dc9(dev_priv); 2417 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2418 hsw_enable_pc8(dev_priv); 2419 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2420 ret = vlv_suspend_complete(dev_priv); 2421 } 2422 2423 if (ret) { 2424 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 2425 intel_runtime_pm_enable_interrupts(dev_priv); 2426 2427 enable_rpm_wakeref_asserts(dev_priv); 2428 2429 return ret; 2430 } 2431 2432 intel_uncore_forcewake_reset(dev_priv, false); 2433 2434 enable_rpm_wakeref_asserts(dev_priv); 2435 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2436 2437 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) 2438 DRM_ERROR("Unclaimed access detected prior to suspending\n"); 2439 2440 dev_priv->pm.suspended = true; 2441 2442 /* 2443 * FIXME: We really should find a document that references the arguments 2444 * used below! 2445 */ 2446 if (IS_BROADWELL(dev_priv)) { 2447 /* 2448 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 2449 * being detected, and the call we do at intel_runtime_resume() 2450 * won't be able to restore them. Since PCI_D3hot matches the 2451 * actual specification and appears to be working, use it. 2452 */ 2453 intel_opregion_notify_adapter(dev_priv, PCI_D3hot); 2454 } else { 2455 /* 2456 * current versions of firmware which depend on this opregion 2457 * notification have repurposed the D1 definition to mean 2458 * "runtime suspended" vs. what you would normally expect (D3) 2459 * to distinguish it from notifications that might be sent via 2460 * the suspend path. 2461 */ 2462 intel_opregion_notify_adapter(dev_priv, PCI_D1); 2463 } 2464 2465 assert_forcewakes_inactive(dev_priv); 2466 2467 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2468 intel_hpd_poll_init(dev_priv); 2469 2470 DRM_DEBUG_KMS("Device suspended\n"); 2471 return 0; 2472 } 2473 2474 static int intel_runtime_resume(struct device *kdev) 2475 { 2476 struct pci_dev *pdev = to_pci_dev(kdev); 2477 struct drm_device *dev = pci_get_drvdata(pdev); 2478 struct drm_i915_private *dev_priv = to_i915(dev); 2479 int ret = 0; 2480 2481 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) 2482 return -ENODEV; 2483 2484 DRM_DEBUG_KMS("Resuming device\n"); 2485 2486 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2487 disable_rpm_wakeref_asserts(dev_priv); 2488 2489 intel_opregion_notify_adapter(dev_priv, PCI_D0); 2490 dev_priv->pm.suspended = false; 2491 if (intel_uncore_unclaimed_mmio(dev_priv)) 2492 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 2493 2494 intel_guc_resume(dev); 2495 2496 if (IS_GEN6(dev_priv)) 2497 intel_init_pch_refclk(dev); 2498 2499 if (IS_BROXTON(dev_priv)) { 2500 bxt_disable_dc9(dev_priv); 2501 bxt_display_core_init(dev_priv, true); 2502 if (dev_priv->csr.dmc_payload && 2503 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2504 gen9_enable_dc5(dev_priv); 2505 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2506 hsw_disable_pc8(dev_priv); 2507 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2508 ret = vlv_resume_prepare(dev_priv, true); 2509 } 2510 2511 /* 2512 * No point of rolling back things in case of an error, as the best 2513 * we can do is to hope that things will still work (and disable RPM). 2514 */ 2515 i915_gem_init_swizzling(dev); 2516 2517 intel_runtime_pm_enable_interrupts(dev_priv); 2518 2519 /* 2520 * On VLV/CHV display interrupts are part of the display 2521 * power well, so hpd is reinitialized from there. For 2522 * everyone else do it here. 2523 */ 2524 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2525 intel_hpd_init(dev_priv); 2526 2527 enable_rpm_wakeref_asserts(dev_priv); 2528 2529 if (ret) 2530 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 2531 else 2532 DRM_DEBUG_KMS("Device resumed\n"); 2533 2534 return ret; 2535 } 2536 2537 const struct dev_pm_ops i915_pm_ops = { 2538 /* 2539 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 2540 * PMSG_RESUME] 2541 */ 2542 .suspend = i915_pm_suspend, 2543 .suspend_late = i915_pm_suspend_late, 2544 .resume_early = i915_pm_resume_early, 2545 .resume = i915_pm_resume, 2546 2547 /* 2548 * S4 event handlers 2549 * @freeze, @freeze_late : called (1) before creating the 2550 * hibernation image [PMSG_FREEZE] and 2551 * (2) after rebooting, before restoring 2552 * the image [PMSG_QUIESCE] 2553 * @thaw, @thaw_early : called (1) after creating the hibernation 2554 * image, before writing it [PMSG_THAW] 2555 * and (2) after failing to create or 2556 * restore the image [PMSG_RECOVER] 2557 * @poweroff, @poweroff_late: called after writing the hibernation 2558 * image, before rebooting [PMSG_HIBERNATE] 2559 * @restore, @restore_early : called after rebooting and restoring the 2560 * hibernation image [PMSG_RESTORE] 2561 */ 2562 .freeze = i915_pm_freeze, 2563 .freeze_late = i915_pm_freeze_late, 2564 .thaw_early = i915_pm_thaw_early, 2565 .thaw = i915_pm_thaw, 2566 .poweroff = i915_pm_suspend, 2567 .poweroff_late = i915_pm_poweroff_late, 2568 .restore_early = i915_pm_restore_early, 2569 .restore = i915_pm_restore, 2570 2571 /* S0ix (via runtime suspend) event handlers */ 2572 .runtime_suspend = intel_runtime_suspend, 2573 .runtime_resume = intel_runtime_resume, 2574 }; 2575 2576 static const struct vm_operations_struct i915_gem_vm_ops = { 2577 .fault = i915_gem_fault, 2578 .open = drm_gem_vm_open, 2579 .close = drm_gem_vm_close, 2580 }; 2581 #endif 2582 2583 static struct cdev_pager_ops i915_gem_vm_ops = { 2584 .cdev_pg_fault = i915_gem_fault, 2585 .cdev_pg_ctor = i915_gem_pager_ctor, 2586 .cdev_pg_dtor = i915_gem_pager_dtor 2587 }; 2588 2589 static const struct file_operations i915_driver_fops = { 2590 .owner = THIS_MODULE, 2591 #if 0 2592 .open = drm_open, 2593 .release = drm_release, 2594 .unlocked_ioctl = drm_ioctl, 2595 .mmap = drm_gem_mmap, 2596 .poll = drm_poll, 2597 .read = drm_read, 2598 #ifdef CONFIG_COMPAT 2599 .compat_ioctl = i915_compat_ioctl, 2600 #endif 2601 .llseek = noop_llseek, 2602 #endif 2603 }; 2604 2605 static int 2606 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 2607 struct drm_file *file) 2608 { 2609 return -ENODEV; 2610 } 2611 2612 static const struct drm_ioctl_desc i915_ioctls[] = { 2613 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2614 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 2615 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 2616 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 2617 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 2618 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 2619 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 2620 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2621 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 2622 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 2623 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2624 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 2625 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2626 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2627 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 2628 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 2629 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2630 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2631 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 2632 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), 2633 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2634 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2635 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2636 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), 2637 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), 2638 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2639 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2640 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2641 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), 2642 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), 2643 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), 2644 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), 2645 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), 2646 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), 2647 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), 2648 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), 2649 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), 2650 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 2651 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 2652 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 2653 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2654 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2655 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 2656 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 2657 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2658 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 2659 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 2660 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 2661 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), 2662 #if 0 2663 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 2664 #endif 2665 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 2666 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 2667 }; 2668 2669 static int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 2670 struct sysctl_oid *top) 2671 { 2672 return drm_add_busid_modesetting(dev, ctx, top); 2673 } 2674 2675 static struct drm_driver driver = { 2676 /* Don't use MTRRs here; the Xserver or userspace app should 2677 * deal with them for Intel hardware. 2678 */ 2679 .driver_features = 2680 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 2681 DRIVER_RENDER | DRIVER_MODESET, 2682 .open = i915_driver_open, 2683 .lastclose = i915_driver_lastclose, 2684 .preclose = i915_driver_preclose, 2685 .postclose = i915_driver_postclose, 2686 .set_busid = drm_pci_set_busid, 2687 2688 .gem_close_object = i915_gem_close_object, 2689 .gem_free_object = i915_gem_free_object, 2690 .gem_vm_ops = &i915_gem_vm_ops, 2691 2692 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 2693 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 2694 .gem_prime_export = i915_gem_prime_export, 2695 .gem_prime_import = i915_gem_prime_import, 2696 2697 .dumb_create = i915_gem_dumb_create, 2698 .dumb_map_offset = i915_gem_mmap_gtt, 2699 .dumb_destroy = drm_gem_dumb_destroy, 2700 .ioctls = i915_ioctls, 2701 .num_ioctls = ARRAY_SIZE(i915_ioctls), 2702 .fops = &i915_driver_fops, 2703 .name = DRIVER_NAME, 2704 .desc = DRIVER_DESC, 2705 .date = DRIVER_DATE, 2706 .major = DRIVER_MAJOR, 2707 .minor = DRIVER_MINOR, 2708 .patchlevel = DRIVER_PATCHLEVEL, 2709 #ifdef __DragonFly__ 2710 .sysctl_init = i915_sysctl_init, 2711 #endif 2712 }; 2713