1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifdef __DragonFly__ 31 #include "opt_drm.h" /* for VGA_SWITCHEROO */ 32 #endif 33 34 #include <linux/acpi.h> 35 #include <linux/device.h> 36 #include <linux/module.h> 37 #include <linux/pci.h> 38 #include <linux/pm.h> 39 #include <linux/pm_runtime.h> 40 #include <linux/slab.h> 41 #include <linux/vgaarb.h> 42 #include <linux/vga_switcheroo.h> 43 #include <acpi/video.h> 44 45 #include <drm/drmP.h> 46 #include <drm/drm_crtc_helper.h> 47 #include <drm/i915_drm.h> 48 49 #include "i915_drv.h" 50 #include "i915_trace.h" 51 #include "i915_vgpu.h" 52 #include "intel_drv.h" 53 54 static struct drm_driver driver; 55 56 static unsigned int i915_load_fail_count; 57 58 bool __i915_inject_load_failure(const char *func, int line) 59 { 60 if (i915_load_fail_count >= i915.inject_load_failure) 61 return false; 62 63 if (++i915_load_fail_count == i915.inject_load_failure) { 64 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", 65 i915.inject_load_failure, func, line); 66 return true; 67 } 68 69 return false; 70 } 71 72 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" 73 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ 74 "providing the dmesg log by booting with drm.debug=0xf" 75 76 void 77 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 78 const char *fmt, ...) 79 { 80 static bool shown_bug_once; 81 struct device *dev = dev_priv->drm.dev; 82 bool is_error = level[1] <= KERN_ERR[1]; 83 bool is_debug = level[1] == KERN_DEBUG[1]; 84 struct va_format vaf; 85 va_list args; 86 87 if (is_debug && !(drm_debug & DRM_UT_DRIVER)) 88 return; 89 90 va_start(args, fmt); 91 92 vaf.fmt = fmt; 93 vaf.va = &args; 94 95 dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", 96 __builtin_return_address(0), &vaf); 97 98 if (is_error && !shown_bug_once) { 99 #if 0 100 dev_notice(dev, "%s", FDO_BUG_MSG); 101 #endif 102 shown_bug_once = true; 103 } 104 105 va_end(args); 106 } 107 108 static bool i915_error_injected(struct drm_i915_private *dev_priv) 109 { 110 return i915.inject_load_failure && 111 i915_load_fail_count == i915.inject_load_failure; 112 } 113 114 #define i915_load_error(dev_priv, fmt, ...) \ 115 __i915_printk(dev_priv, \ 116 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ 117 fmt, ##__VA_ARGS__) 118 119 120 static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) 121 { 122 enum intel_pch ret = PCH_NOP; 123 124 /* 125 * In a virtualized passthrough environment we can be in a 126 * setup where the ISA bridge is not able to be passed through. 127 * In this case, a south bridge can be emulated and we have to 128 * make an educated guess as to which PCH is really there. 129 */ 130 131 if (IS_GEN5(dev)) { 132 ret = PCH_IBX; 133 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 134 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 135 ret = PCH_CPT; 136 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); 137 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 138 ret = PCH_LPT; 139 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 140 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 141 ret = PCH_SPT; 142 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 143 } 144 145 return ret; 146 } 147 148 static void intel_detect_pch(struct drm_device *dev) 149 { 150 struct drm_i915_private *dev_priv = to_i915(dev); 151 device_t pch = NULL; 152 struct pci_devinfo *di = NULL; 153 154 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 155 * (which really amounts to a PCH but no South Display). 156 */ 157 if (INTEL_INFO(dev)->num_pipes == 0) { 158 dev_priv->pch_type = PCH_NOP; 159 return; 160 } 161 162 /* XXX The ISA bridge probe causes some old Core2 machines to hang */ 163 if (INTEL_INFO(dev)->gen < 5) 164 return; 165 166 /* 167 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 168 * make graphics device passthrough work easy for VMM, that only 169 * need to expose ISA bridge to let driver know the real hardware 170 * underneath. This is a requirement from virtualization team. 171 * 172 * In some virtualized environments (e.g. XEN), there is irrelevant 173 * ISA bridge in the system. To work reliably, we should scan trhough 174 * all the ISA bridge devices and check for the first match, instead 175 * of only checking the first one. 176 */ 177 while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) { 178 if (pci_get_vendor(pch) == PCI_VENDOR_ID_INTEL) { 179 unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; 180 dev_priv->pch_id = id; 181 182 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 183 dev_priv->pch_type = PCH_IBX; 184 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 185 WARN_ON(!IS_GEN5(dev)); 186 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 187 dev_priv->pch_type = PCH_CPT; 188 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 189 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 190 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 191 /* PantherPoint is CPT compatible */ 192 dev_priv->pch_type = PCH_CPT; 193 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 194 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 195 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 196 dev_priv->pch_type = PCH_LPT; 197 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 198 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 199 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); 200 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 201 dev_priv->pch_type = PCH_LPT; 202 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 203 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 204 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); 205 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 206 dev_priv->pch_type = PCH_SPT; 207 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 208 WARN_ON(!IS_SKYLAKE(dev) && 209 !IS_KABYLAKE(dev)); 210 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 211 dev_priv->pch_type = PCH_SPT; 212 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 213 WARN_ON(!IS_SKYLAKE(dev) && 214 !IS_KABYLAKE(dev)); 215 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 216 dev_priv->pch_type = PCH_KBP; 217 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 218 WARN_ON(!IS_KABYLAKE(dev)); 219 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 220 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 221 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 222 1)) { 223 dev_priv->pch_type = intel_virt_detect_pch(dev); 224 } else 225 continue; 226 227 break; 228 } 229 } 230 if (!pch) 231 DRM_DEBUG_KMS("No PCH found.\n"); 232 233 #if 0 234 pci_dev_put(pch); 235 #endif 236 } 237 238 static int i915_getparam(struct drm_device *dev, void *data, 239 struct drm_file *file_priv) 240 { 241 struct drm_i915_private *dev_priv = to_i915(dev); 242 drm_i915_getparam_t *param = data; 243 int value; 244 245 switch (param->param) { 246 case I915_PARAM_IRQ_ACTIVE: 247 case I915_PARAM_ALLOW_BATCHBUFFER: 248 case I915_PARAM_LAST_DISPATCH: 249 /* Reject all old ums/dri params. */ 250 return -ENODEV; 251 case I915_PARAM_CHIPSET_ID: 252 value = dev->pdev->device; 253 break; 254 case I915_PARAM_REVISION: 255 value = dev->pdev->revision; 256 break; 257 case I915_PARAM_HAS_GEM: 258 value = 1; 259 break; 260 case I915_PARAM_NUM_FENCES_AVAIL: 261 value = dev_priv->num_fence_regs; 262 break; 263 case I915_PARAM_HAS_OVERLAY: 264 value = dev_priv->overlay ? 1 : 0; 265 break; 266 case I915_PARAM_HAS_PAGEFLIPPING: 267 value = 1; 268 break; 269 case I915_PARAM_HAS_EXECBUF2: 270 /* depends on GEM */ 271 value = 1; 272 break; 273 case I915_PARAM_HAS_BSD: 274 value = intel_engine_initialized(&dev_priv->engine[VCS]); 275 break; 276 case I915_PARAM_HAS_BLT: 277 value = intel_engine_initialized(&dev_priv->engine[BCS]); 278 break; 279 case I915_PARAM_HAS_VEBOX: 280 value = intel_engine_initialized(&dev_priv->engine[VECS]); 281 break; 282 case I915_PARAM_HAS_BSD2: 283 value = intel_engine_initialized(&dev_priv->engine[VCS2]); 284 break; 285 case I915_PARAM_HAS_RELAXED_FENCING: 286 value = 1; 287 break; 288 case I915_PARAM_HAS_COHERENT_RINGS: 289 value = 1; 290 break; 291 case I915_PARAM_HAS_EXEC_CONSTANTS: 292 value = INTEL_INFO(dev)->gen >= 4; 293 break; 294 case I915_PARAM_HAS_RELAXED_DELTA: 295 value = 1; 296 break; 297 case I915_PARAM_HAS_GEN7_SOL_RESET: 298 value = 1; 299 break; 300 case I915_PARAM_HAS_LLC: 301 value = HAS_LLC(dev); 302 break; 303 case I915_PARAM_HAS_WT: 304 value = HAS_WT(dev); 305 break; 306 case I915_PARAM_HAS_ALIASING_PPGTT: 307 value = USES_PPGTT(dev); 308 break; 309 case I915_PARAM_HAS_WAIT_TIMEOUT: 310 value = 1; 311 break; 312 case I915_PARAM_HAS_SEMAPHORES: 313 value = i915.semaphores; 314 break; 315 case I915_PARAM_HAS_PINNED_BATCHES: 316 value = 1; 317 break; 318 case I915_PARAM_HAS_EXEC_NO_RELOC: 319 value = 1; 320 break; 321 case I915_PARAM_HAS_EXEC_HANDLE_LUT: 322 value = 1; 323 break; 324 case I915_PARAM_CMD_PARSER_VERSION: 325 value = i915_cmd_parser_get_version(dev_priv); 326 break; 327 case I915_PARAM_HAS_COHERENT_PHYS_GTT: 328 value = 1; 329 break; 330 case I915_PARAM_SUBSLICE_TOTAL: 331 value = INTEL_INFO(dev)->subslice_total; 332 if (!value) 333 return -ENODEV; 334 break; 335 case I915_PARAM_EU_TOTAL: 336 value = INTEL_INFO(dev)->eu_total; 337 if (!value) 338 return -ENODEV; 339 break; 340 case I915_PARAM_HAS_GPU_RESET: 341 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); 342 break; 343 case I915_PARAM_HAS_RESOURCE_STREAMER: 344 value = HAS_RESOURCE_STREAMER(dev); 345 break; 346 case I915_PARAM_HAS_EXEC_SOFTPIN: 347 value = 1; 348 break; 349 case I915_PARAM_HAS_POOLED_EU: 350 value = HAS_POOLED_EU(dev); 351 break; 352 case I915_PARAM_MIN_EU_IN_POOL: 353 value = INTEL_INFO(dev)->min_eu_in_pool; 354 break; 355 default: 356 DRM_DEBUG("Unknown parameter %d\n", param->param); 357 return -EINVAL; 358 } 359 360 if (put_user(value, param->value)) 361 return -EFAULT; 362 363 return 0; 364 } 365 366 static int i915_get_bridge_dev(struct drm_device *dev) 367 { 368 struct drm_i915_private *dev_priv = to_i915(dev); 369 static struct pci_dev i915_bridge_dev; 370 371 i915_bridge_dev.dev.bsddev = pci_find_dbsf(0, 0, 0, 0); 372 if (!i915_bridge_dev.dev.bsddev) { 373 DRM_ERROR("bridge device not found\n"); 374 return -1; 375 } 376 377 dev_priv->bridge_dev = &i915_bridge_dev; 378 return 0; 379 } 380 381 /* Allocate space for the MCH regs if needed, return nonzero on error */ 382 static int 383 intel_alloc_mchbar_resource(struct drm_device *dev) 384 { 385 struct drm_i915_private *dev_priv = to_i915(dev); 386 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 387 u32 temp_lo, temp_hi = 0; 388 u64 mchbar_addr; 389 device_t vga; 390 391 if (INTEL_INFO(dev)->gen >= 4) 392 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 393 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 394 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 395 396 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 397 #ifdef CONFIG_PNP 398 if (mchbar_addr && 399 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 400 return 0; 401 #endif 402 403 /* Get some space for it */ 404 vga = device_get_parent(dev->dev->bsddev); 405 dev_priv->mch_res_rid = 0x100; 406 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), 407 dev->dev->bsddev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL, 408 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1); 409 if (dev_priv->mch_res == NULL) { 410 DRM_ERROR("failed mchbar resource alloc\n"); 411 return (-ENOMEM); 412 } 413 414 if (INTEL_INFO(dev)->gen >= 4) 415 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 416 upper_32_bits(rman_get_start(dev_priv->mch_res))); 417 418 pci_write_config_dword(dev_priv->bridge_dev, reg, 419 lower_32_bits(rman_get_start(dev_priv->mch_res))); 420 return 0; 421 } 422 423 /* Setup MCHBAR if possible, return true if we should disable it again */ 424 static void 425 intel_setup_mchbar(struct drm_device *dev) 426 { 427 struct drm_i915_private *dev_priv = to_i915(dev); 428 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 429 u32 temp; 430 bool enabled; 431 432 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 433 return; 434 435 dev_priv->mchbar_need_disable = false; 436 437 if (IS_I915G(dev) || IS_I915GM(dev)) { 438 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); 439 enabled = !!(temp & DEVEN_MCHBAR_EN); 440 } else { 441 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 442 enabled = temp & 1; 443 } 444 445 /* If it's already enabled, don't have to do anything */ 446 if (enabled) 447 return; 448 449 if (intel_alloc_mchbar_resource(dev)) 450 return; 451 452 dev_priv->mchbar_need_disable = true; 453 454 /* Space is allocated or reserved, so enable it. */ 455 if (IS_I915G(dev) || IS_I915GM(dev)) { 456 pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 457 temp | DEVEN_MCHBAR_EN); 458 } else { 459 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 460 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 461 } 462 } 463 464 static void 465 intel_teardown_mchbar(struct drm_device *dev) 466 { 467 struct drm_i915_private *dev_priv = to_i915(dev); 468 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 469 device_t vga; 470 471 if (dev_priv->mchbar_need_disable) { 472 if (IS_I915G(dev) || IS_I915GM(dev)) { 473 u32 deven_val; 474 475 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, 476 &deven_val); 477 deven_val &= ~DEVEN_MCHBAR_EN; 478 pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 479 deven_val); 480 } else { 481 u32 mchbar_val; 482 483 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, 484 &mchbar_val); 485 mchbar_val &= ~1; 486 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, 487 mchbar_val); 488 } 489 } 490 491 if (dev_priv->mch_res != NULL) { 492 vga = device_get_parent(dev->dev->bsddev); 493 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev->bsddev, 494 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 495 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev->bsddev, 496 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 497 dev_priv->mch_res = NULL; 498 } 499 } 500 501 #if 0 502 /* true = enable decode, false = disable decoder */ 503 static unsigned int i915_vga_set_decode(void *cookie, bool state) 504 { 505 struct drm_device *dev = cookie; 506 507 intel_modeset_vga_set_state(dev, state); 508 if (state) 509 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 510 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 511 else 512 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 513 } 514 515 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 516 { 517 struct drm_device *dev = pci_get_drvdata(pdev); 518 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 519 520 if (state == VGA_SWITCHEROO_ON) { 521 pr_info("switched on\n"); 522 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 523 /* i915 resume handler doesn't set to D0 */ 524 pci_set_power_state(dev->pdev, PCI_D0); 525 i915_resume_switcheroo(dev); 526 dev->switch_power_state = DRM_SWITCH_POWER_ON; 527 } else { 528 pr_info("switched off\n"); 529 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 530 i915_suspend_switcheroo(dev, pmm); 531 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 532 } 533 } 534 535 static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 536 { 537 struct drm_device *dev = pci_get_drvdata(pdev); 538 539 /* 540 * FIXME: open_count is protected by drm_global_mutex but that would lead to 541 * locking inversion with the driver load path. And the access here is 542 * completely racy anyway. So don't bother with locking for now. 543 */ 544 return dev->open_count == 0; 545 } 546 547 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { 548 .set_gpu_state = i915_switcheroo_set_state, 549 .reprobe = NULL, 550 .can_switch = i915_switcheroo_can_switch, 551 }; 552 #endif 553 554 static void i915_gem_fini(struct drm_device *dev) 555 { 556 struct drm_i915_private *dev_priv = to_i915(dev); 557 558 /* 559 * Neither the BIOS, ourselves or any other kernel 560 * expects the system to be in execlists mode on startup, 561 * so we need to reset the GPU back to legacy mode. And the only 562 * known way to disable logical contexts is through a GPU reset. 563 * 564 * So in order to leave the system in a known default configuration, 565 * always reset the GPU upon unload. Afterwards we then clean up the 566 * GEM state tracking, flushing off the requests and leaving the 567 * system in a known idle state. 568 * 569 * Note that is of the upmost importance that the GPU is idle and 570 * all stray writes are flushed *before* we dismantle the backing 571 * storage for the pinned objects. 572 * 573 * However, since we are uncertain that reseting the GPU on older 574 * machines is a good idea, we don't - just in case it leaves the 575 * machine in an unusable condition. 576 */ 577 if (HAS_HW_CONTEXTS(dev)) { 578 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); 579 WARN_ON(reset && reset != -ENODEV); 580 } 581 582 mutex_lock(&dev->struct_mutex); 583 i915_gem_reset(dev); 584 i915_gem_cleanup_engines(dev); 585 i915_gem_context_fini(dev); 586 mutex_unlock(&dev->struct_mutex); 587 588 WARN_ON(!list_empty(&to_i915(dev)->context_list)); 589 } 590 591 static int i915_load_modeset_init(struct drm_device *dev) 592 { 593 struct drm_i915_private *dev_priv = to_i915(dev); 594 int ret; 595 596 if (i915_inject_load_failure()) 597 return -ENODEV; 598 599 ret = intel_bios_init(dev_priv); 600 if (ret) 601 DRM_INFO("failed to find VBIOS tables\n"); 602 603 /* If we have > 1 VGA cards, then we need to arbitrate access 604 * to the common VGA resources. 605 * 606 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), 607 * then we do not take part in VGA arbitration and the 608 * vga_client_register() fails with -ENODEV. 609 */ 610 #if 0 611 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 612 if (ret && ret != -ENODEV) 613 goto out; 614 615 intel_register_dsm_handler(); 616 617 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); 618 if (ret) 619 goto cleanup_vga_client; 620 #endif 621 622 /* must happen before intel_power_domains_init_hw() on VLV/CHV */ 623 intel_update_rawclk(dev_priv); 624 625 intel_power_domains_init_hw(dev_priv, false); 626 627 intel_csr_ucode_init(dev_priv); 628 629 ret = intel_irq_install(dev_priv); 630 if (ret) 631 goto cleanup_csr; 632 633 intel_setup_gmbus(dev); 634 635 /* Important: The output setup functions called by modeset_init need 636 * working irqs for e.g. gmbus and dp aux transfers. */ 637 intel_modeset_init(dev); 638 639 intel_guc_init(dev); 640 641 ret = i915_gem_init(dev); 642 if (ret) 643 goto cleanup_irq; 644 645 intel_modeset_gem_init(dev); 646 647 if (INTEL_INFO(dev)->num_pipes == 0) 648 return 0; 649 650 ret = intel_fbdev_init(dev); 651 if (ret) 652 goto cleanup_gem; 653 654 /* Only enable hotplug handling once the fbdev is fully set up. */ 655 intel_hpd_init(dev_priv); 656 657 drm_kms_helper_poll_init(dev); 658 659 #ifdef __DragonFly__ 660 /* 661 * If we are dealing with dual GPU machines the vga_switcheroo module 662 * has been loaded. Machines with dual GPUs have an integrated graphics 663 * device (IGD), which we assume is an Intel device. The other, the 664 * discrete device (DIS), is either an NVidia or a Radeon device. For 665 * now we will force switch the gmux so the intel driver outputs 666 * both to the laptop panel and the external monitor. 667 * 668 * DragonFly does not have an nvidia native driver yet. In the future, 669 * we will check for the radeon device: if present, we will leave 670 * the gmux switch as it is, so the user can choose between the IGD and 671 * the DIS using the /dev/vga_switcheroo device. 672 */ 673 if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { 674 ret = vga_switcheroo_force_migd(); 675 if (ret) { 676 DRM_INFO("could not switch gmux to IGD\n"); 677 } 678 } 679 #endif 680 681 return 0; 682 683 cleanup_gem: 684 i915_gem_fini(dev); 685 cleanup_irq: 686 intel_guc_fini(dev); 687 drm_irq_uninstall(dev); 688 intel_teardown_gmbus(dev); 689 cleanup_csr: 690 intel_csr_ucode_fini(dev_priv); 691 intel_power_domains_fini(dev_priv); 692 #if 0 693 vga_switcheroo_unregister_client(dev->pdev); 694 cleanup_vga_client: 695 vga_client_register(dev->pdev, NULL, NULL, NULL); 696 out: 697 #endif 698 return ret; 699 } 700 701 #if IS_ENABLED(CONFIG_FB) 702 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 703 { 704 struct apertures_struct *ap; 705 struct pci_dev *pdev = dev_priv->drm.pdev; 706 struct i915_ggtt *ggtt = &dev_priv->ggtt; 707 bool primary; 708 int ret; 709 710 ap = alloc_apertures(1); 711 if (!ap) 712 return -ENOMEM; 713 714 ap->ranges[0].base = ggtt->mappable_base; 715 ap->ranges[0].size = ggtt->mappable_end; 716 717 primary = 718 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 719 720 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); 721 722 kfree(ap); 723 724 return ret; 725 } 726 #else 727 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 728 { 729 return 0; 730 } 731 #endif 732 733 #if !defined(CONFIG_VGA_CONSOLE) 734 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 735 { 736 return 0; 737 } 738 #elif !defined(CONFIG_DUMMY_CONSOLE) 739 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 740 { 741 return -ENODEV; 742 } 743 #else 744 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 745 { 746 int ret = 0; 747 748 DRM_INFO("Replacing VGA console driver\n"); 749 750 console_lock(); 751 if (con_is_bound(&vga_con)) 752 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); 753 if (ret == 0) { 754 ret = do_unregister_con_driver(&vga_con); 755 756 /* Ignore "already unregistered". */ 757 if (ret == -ENODEV) 758 ret = 0; 759 } 760 console_unlock(); 761 762 return ret; 763 } 764 #endif 765 766 static void intel_init_dpio(struct drm_i915_private *dev_priv) 767 { 768 /* 769 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 770 * CHV x1 PHY (DP/HDMI D) 771 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 772 */ 773 if (IS_CHERRYVIEW(dev_priv)) { 774 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 775 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 776 } else if (IS_VALLEYVIEW(dev_priv)) { 777 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 778 } 779 } 780 781 static int i915_workqueues_init(struct drm_i915_private *dev_priv) 782 { 783 /* 784 * The i915 workqueue is primarily used for batched retirement of 785 * requests (and thus managing bo) once the task has been completed 786 * by the GPU. i915_gem_retire_requests() is called directly when we 787 * need high-priority retirement, such as waiting for an explicit 788 * bo. 789 * 790 * It is also used for periodic low-priority events, such as 791 * idle-timers and recording error state. 792 * 793 * All tasks on the workqueue are expected to acquire the dev mutex 794 * so there is no point in running more than one instance of the 795 * workqueue at any time. Use an ordered one. 796 */ 797 dev_priv->wq = alloc_ordered_workqueue("i915", 0); 798 if (dev_priv->wq == NULL) 799 goto out_err; 800 801 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); 802 if (dev_priv->hotplug.dp_wq == NULL) 803 goto out_free_wq; 804 805 return 0; 806 807 out_free_wq: 808 destroy_workqueue(dev_priv->wq); 809 out_err: 810 DRM_ERROR("Failed to allocate workqueues.\n"); 811 812 return -ENOMEM; 813 } 814 815 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) 816 { 817 destroy_workqueue(dev_priv->hotplug.dp_wq); 818 destroy_workqueue(dev_priv->wq); 819 } 820 821 /** 822 * i915_driver_init_early - setup state not requiring device access 823 * @dev_priv: device private 824 * 825 * Initialize everything that is a "SW-only" state, that is state not 826 * requiring accessing the device or exposing the driver via kernel internal 827 * or userspace interfaces. Example steps belonging here: lock initialization, 828 * system memory allocation, setting up device specific attributes and 829 * function hooks not requiring accessing the device. 830 */ 831 static int i915_driver_init_early(struct drm_i915_private *dev_priv, 832 const struct pci_device_id *ent) 833 { 834 const struct intel_device_info *match_info = 835 (struct intel_device_info *)ent->driver_data; 836 struct intel_device_info *device_info; 837 int ret = 0; 838 839 if (i915_inject_load_failure()) 840 return -ENODEV; 841 842 /* Setup the write-once "constant" device info */ 843 device_info = mkwrite_device_info(dev_priv); 844 memcpy(device_info, match_info, sizeof(*device_info)); 845 device_info->device_id = dev_priv->drm.pdev->device; 846 847 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 848 device_info->gen_mask = BIT(device_info->gen - 1); 849 850 lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE); 851 lockinit(&dev_priv->gpu_error.lock, "915err", 0, LK_CANRECURSE); 852 lockinit(&dev_priv->backlight_lock, "i915bl", 0, LK_CANRECURSE); 853 lockinit(&dev_priv->uncore.lock, "915gt", 0, LK_CANRECURSE); 854 lockinit(&dev_priv->mm.object_stat_lock, "i915osl", 0, 0); 855 lockinit(&dev_priv->mmio_flip_lock, "i915mfl", 0, 0); 856 lockinit(&dev_priv->sb_lock, "i915sbl", 0, LK_CANRECURSE); 857 lockinit(&dev_priv->modeset_restore_lock, "i915mrl", 0, LK_CANRECURSE); 858 lockinit(&dev_priv->av_mutex, "i915am", 0, LK_CANRECURSE); 859 lockinit(&dev_priv->wm.wm_mutex, "i915wm", 0, LK_CANRECURSE); 860 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 861 862 ret = i915_workqueues_init(dev_priv); 863 if (ret < 0) 864 return ret; 865 866 ret = intel_gvt_init(dev_priv); 867 if (ret < 0) 868 goto err_workqueues; 869 870 /* This must be called before any calls to HAS_PCH_* */ 871 intel_detect_pch(&dev_priv->drm); 872 873 intel_pm_setup(&dev_priv->drm); 874 intel_init_dpio(dev_priv); 875 intel_power_domains_init(dev_priv); 876 intel_irq_init(dev_priv); 877 intel_init_display_hooks(dev_priv); 878 intel_init_clock_gating_hooks(dev_priv); 879 intel_init_audio_hooks(dev_priv); 880 i915_gem_load_init(&dev_priv->drm); 881 882 intel_display_crc_init(&dev_priv->drm); 883 884 intel_device_info_dump(dev_priv); 885 886 /* Not all pre-production machines fall into this category, only the 887 * very first ones. Almost everything should work, except for maybe 888 * suspend/resume. And we don't implement workarounds that affect only 889 * pre-production machines. */ 890 if (IS_HSW_EARLY_SDV(dev_priv)) 891 DRM_INFO("This is an early pre-production Haswell machine. " 892 "It may not be fully functional.\n"); 893 894 return 0; 895 896 err_workqueues: 897 i915_workqueues_cleanup(dev_priv); 898 return ret; 899 } 900 901 /** 902 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() 903 * @dev_priv: device private 904 */ 905 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) 906 { 907 i915_gem_load_cleanup(&dev_priv->drm); 908 i915_workqueues_cleanup(dev_priv); 909 } 910 911 static int i915_mmio_setup(struct drm_device *dev) 912 { 913 struct drm_i915_private *dev_priv = to_i915(dev); 914 int mmio_bar; 915 int mmio_size; 916 917 mmio_bar = IS_GEN2(dev) ? 1 : 0; 918 /* 919 * Before gen4, the registers and the GTT are behind different BARs. 920 * However, from gen4 onwards, the registers and the GTT are shared 921 * in the same BAR, so we want to restrict this ioremap from 922 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 923 * the register BAR remains the same size for all the earlier 924 * generations up to Ironlake. 925 */ 926 if (INTEL_INFO(dev)->gen < 5) 927 mmio_size = 512 * 1024; 928 else 929 mmio_size = 2 * 1024 * 1024; 930 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); 931 if (dev_priv->regs == NULL) { 932 DRM_ERROR("failed to map registers\n"); 933 934 return -EIO; 935 } 936 937 /* Try to make sure MCHBAR is enabled before poking at it */ 938 intel_setup_mchbar(dev); 939 940 return 0; 941 } 942 943 static void i915_mmio_cleanup(struct drm_device *dev) 944 { 945 #if 0 946 struct drm_i915_private *dev_priv = to_i915(dev); 947 #endif 948 949 intel_teardown_mchbar(dev); 950 #if 0 951 pci_iounmap(dev->pdev, dev_priv->regs); 952 #endif 953 } 954 955 /** 956 * i915_driver_init_mmio - setup device MMIO 957 * @dev_priv: device private 958 * 959 * Setup minimal device state necessary for MMIO accesses later in the 960 * initialization sequence. The setup here should avoid any other device-wide 961 * side effects or exposing the driver via kernel internal or user space 962 * interfaces. 963 */ 964 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) 965 { 966 struct drm_device *dev = &dev_priv->drm; 967 int ret; 968 969 if (i915_inject_load_failure()) 970 return -ENODEV; 971 972 if (i915_get_bridge_dev(dev)) 973 return -EIO; 974 975 ret = i915_mmio_setup(dev); 976 if (ret < 0) 977 goto put_bridge; 978 979 intel_uncore_init(dev_priv); 980 981 return 0; 982 983 put_bridge: 984 pci_dev_put(dev_priv->bridge_dev); 985 986 return ret; 987 } 988 989 /** 990 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() 991 * @dev_priv: device private 992 */ 993 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) 994 { 995 struct drm_device *dev = &dev_priv->drm; 996 997 intel_uncore_fini(dev_priv); 998 i915_mmio_cleanup(dev); 999 pci_dev_put(dev_priv->bridge_dev); 1000 } 1001 1002 static void intel_sanitize_options(struct drm_i915_private *dev_priv) 1003 { 1004 i915.enable_execlists = 1005 intel_sanitize_enable_execlists(dev_priv, 1006 i915.enable_execlists); 1007 1008 /* 1009 * i915.enable_ppgtt is read-only, so do an early pass to validate the 1010 * user's requested state against the hardware/driver capabilities. We 1011 * do this now so that we can print out any log messages once rather 1012 * than every time we check intel_enable_ppgtt(). 1013 */ 1014 i915.enable_ppgtt = 1015 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); 1016 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); 1017 1018 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); 1019 DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores)); 1020 } 1021 1022 /** 1023 * i915_driver_init_hw - setup state requiring device access 1024 * @dev_priv: device private 1025 * 1026 * Setup state that requires accessing the device, but doesn't require 1027 * exposing the driver via kernel internal or userspace interfaces. 1028 */ 1029 static int i915_driver_init_hw(struct drm_i915_private *dev_priv) 1030 { 1031 struct drm_device *dev = &dev_priv->drm; 1032 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1033 uint32_t aperture_size; 1034 int ret; 1035 1036 if (i915_inject_load_failure()) 1037 return -ENODEV; 1038 1039 intel_device_info_runtime_init(dev_priv); 1040 1041 intel_sanitize_options(dev_priv); 1042 1043 ret = i915_ggtt_init_hw(dev); 1044 if (ret) 1045 return ret; 1046 1047 ret = i915_ggtt_enable_hw(dev); 1048 if (ret) { 1049 DRM_ERROR("failed to enable GGTT\n"); 1050 goto out_ggtt; 1051 } 1052 1053 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1054 * otherwise the vga fbdev driver falls over. */ 1055 ret = i915_kick_out_firmware_fb(dev_priv); 1056 if (ret) { 1057 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1058 goto out_ggtt; 1059 } 1060 1061 ret = i915_kick_out_vgacon(dev_priv); 1062 if (ret) { 1063 DRM_ERROR("failed to remove conflicting VGA console\n"); 1064 goto out_ggtt; 1065 } 1066 1067 #if 0 1068 pci_set_master(dev->pdev); 1069 1070 /* overlay on gen2 is broken and can't address above 1G */ 1071 if (IS_GEN2(dev)) { 1072 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1073 if (ret) { 1074 DRM_ERROR("failed to set DMA mask\n"); 1075 1076 goto out_ggtt; 1077 } 1078 } 1079 1080 1081 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1082 * using 32bit addressing, overwriting memory if HWS is located 1083 * above 4GB. 1084 * 1085 * The documentation also mentions an issue with undefined 1086 * behaviour if any general state is accessed within a page above 4GB, 1087 * which also needs to be handled carefully. 1088 */ 1089 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { 1090 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1091 1092 if (ret) { 1093 DRM_ERROR("failed to set DMA mask\n"); 1094 1095 goto out_ggtt; 1096 } 1097 } 1098 #endif 1099 1100 aperture_size = ggtt->mappable_end; 1101 1102 ggtt->mappable = 1103 io_mapping_create_wc(ggtt->mappable_base, 1104 aperture_size); 1105 if (!ggtt->mappable) { 1106 ret = -EIO; 1107 goto out_ggtt; 1108 } 1109 1110 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, 1111 aperture_size); 1112 1113 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, 1114 PM_QOS_DEFAULT_VALUE); 1115 1116 intel_uncore_sanitize(dev_priv); 1117 1118 intel_opregion_setup(dev_priv); 1119 1120 i915_gem_load_init_fences(dev_priv); 1121 1122 /* On the 945G/GM, the chipset reports the MSI capability on the 1123 * integrated graphics even though the support isn't actually there 1124 * according to the published specs. It doesn't appear to function 1125 * correctly in testing on 945G. 1126 * This may be a side effect of MSI having been made available for PEG 1127 * and the registers being closely associated. 1128 * 1129 * According to chipset errata, on the 965GM, MSI interrupts may 1130 * be lost or delayed, but we use them anyways to avoid 1131 * stuck interrupts on some machines. 1132 */ 1133 #if 0 1134 if (!IS_I945G(dev) && !IS_I945GM(dev)) { 1135 if (pci_enable_msi(dev->pdev) < 0) 1136 DRM_DEBUG_DRIVER("can't enable MSI"); 1137 } 1138 #endif 1139 1140 return 0; 1141 1142 out_ggtt: 1143 i915_ggtt_cleanup_hw(dev); 1144 1145 return ret; 1146 } 1147 1148 /** 1149 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() 1150 * @dev_priv: device private 1151 */ 1152 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) 1153 { 1154 struct drm_device *dev = &dev_priv->drm; 1155 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1156 1157 #if 0 1158 if (dev->pdev->msi_enabled) 1159 pci_disable_msi(dev->pdev); 1160 #endif 1161 1162 pm_qos_remove_request(&dev_priv->pm_qos); 1163 arch_phys_wc_del(ggtt->mtrr); 1164 io_mapping_free(ggtt->mappable); 1165 i915_ggtt_cleanup_hw(dev); 1166 } 1167 1168 /** 1169 * i915_driver_register - register the driver with the rest of the system 1170 * @dev_priv: device private 1171 * 1172 * Perform any steps necessary to make the driver available via kernel 1173 * internal or userspace interfaces. 1174 */ 1175 static void i915_driver_register(struct drm_i915_private *dev_priv) 1176 { 1177 struct drm_device *dev = &dev_priv->drm; 1178 1179 i915_gem_shrinker_init(dev_priv); 1180 1181 /* 1182 * Notify a valid surface after modesetting, 1183 * when running inside a VM. 1184 */ 1185 if (intel_vgpu_active(dev_priv)) 1186 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); 1187 1188 /* Reveal our presence to userspace */ 1189 if (drm_dev_register(dev, 0) == 0) { 1190 i915_debugfs_register(dev_priv); 1191 i915_setup_sysfs(dev); 1192 } else 1193 DRM_ERROR("Failed to register driver for userspace access!\n"); 1194 1195 if (INTEL_INFO(dev_priv)->num_pipes) { 1196 /* Must be done after probing outputs */ 1197 intel_opregion_register(dev_priv); 1198 acpi_video_register(); 1199 } 1200 1201 if (IS_GEN5(dev_priv)) 1202 intel_gpu_ips_init(dev_priv); 1203 1204 i915_audio_component_init(dev_priv); 1205 1206 /* 1207 * Some ports require correctly set-up hpd registers for detection to 1208 * work properly (leading to ghost connected connector status), e.g. VGA 1209 * on gm45. Hence we can only set up the initial fbdev config after hpd 1210 * irqs are fully enabled. We do it last so that the async config 1211 * cannot run before the connectors are registered. 1212 */ 1213 intel_fbdev_initial_config_async(dev); 1214 } 1215 1216 /** 1217 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() 1218 * @dev_priv: device private 1219 */ 1220 static void i915_driver_unregister(struct drm_i915_private *dev_priv) 1221 { 1222 i915_audio_component_cleanup(dev_priv); 1223 1224 intel_gpu_ips_teardown(); 1225 acpi_video_unregister(); 1226 intel_opregion_unregister(dev_priv); 1227 1228 i915_teardown_sysfs(&dev_priv->drm); 1229 i915_debugfs_unregister(dev_priv); 1230 drm_dev_unregister(&dev_priv->drm); 1231 1232 i915_gem_shrinker_cleanup(dev_priv); 1233 } 1234 1235 /** 1236 * i915_driver_load - setup chip and create an initial config 1237 * @dev: DRM device 1238 * @flags: startup flags 1239 * 1240 * The driver load routine has to do several things: 1241 * - drive output discovery via intel_modeset_init() 1242 * - initialize the memory manager 1243 * - allocate initial config memory 1244 * - setup the DRM framebuffer with the allocated memory 1245 */ 1246 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); 1247 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) 1248 { 1249 struct drm_i915_private *dev_priv; 1250 int ret; 1251 1252 if (i915.nuclear_pageflip) 1253 driver.driver_features |= DRIVER_ATOMIC; 1254 1255 ret = -ENOMEM; 1256 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 1257 if (dev_priv) 1258 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); 1259 if (ret) { 1260 dev_printk(KERN_ERR, &pdev->dev, 1261 "[" DRM_NAME ":%s] allocation failed\n", __func__); 1262 kfree(dev_priv); 1263 return ret; 1264 } 1265 1266 dev_priv->drm.pdev = pdev; 1267 dev_priv->drm.dev_private = dev_priv; 1268 1269 #if 0 1270 ret = pci_enable_device(pdev); 1271 if (ret) 1272 goto out_free_priv; 1273 #endif 1274 1275 pci_set_drvdata(pdev, &dev_priv->drm); 1276 1277 ret = i915_driver_init_early(dev_priv, ent); 1278 if (ret < 0) 1279 goto out_pci_disable; 1280 1281 intel_runtime_pm_get(dev_priv); 1282 1283 ret = i915_driver_init_mmio(dev_priv); 1284 if (ret < 0) 1285 goto out_runtime_pm_put; 1286 1287 ret = i915_driver_init_hw(dev_priv); 1288 if (ret < 0) 1289 goto out_cleanup_mmio; 1290 1291 /* 1292 * TODO: move the vblank init and parts of modeset init steps into one 1293 * of the i915_driver_init_/i915_driver_register functions according 1294 * to the role/effect of the given init step. 1295 */ 1296 if (INTEL_INFO(dev_priv)->num_pipes) { 1297 ret = drm_vblank_init(&dev_priv->drm, 1298 INTEL_INFO(dev_priv)->num_pipes); 1299 if (ret) 1300 goto out_cleanup_hw; 1301 } 1302 1303 ret = i915_load_modeset_init(&dev_priv->drm); 1304 if (ret < 0) 1305 goto out_cleanup_vblank; 1306 1307 i915_driver_register(dev_priv); 1308 1309 intel_runtime_pm_enable(dev_priv); 1310 1311 /* Everything is in place, we can now relax! */ 1312 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 1313 driver.name, driver.major, driver.minor, driver.patchlevel, 1314 driver.date, pci_name(pdev), dev_priv->drm.primary->index); 1315 1316 intel_runtime_pm_put(dev_priv); 1317 1318 return 0; 1319 1320 out_cleanup_vblank: 1321 drm_vblank_cleanup(&dev_priv->drm); 1322 out_cleanup_hw: 1323 i915_driver_cleanup_hw(dev_priv); 1324 out_cleanup_mmio: 1325 i915_driver_cleanup_mmio(dev_priv); 1326 out_runtime_pm_put: 1327 intel_runtime_pm_put(dev_priv); 1328 i915_driver_cleanup_early(dev_priv); 1329 out_pci_disable: 1330 #if 0 1331 pci_disable_device(pdev); 1332 out_free_priv: 1333 #endif 1334 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); 1335 drm_dev_unref(&dev_priv->drm); 1336 return ret; 1337 } 1338 1339 void i915_driver_unload(struct drm_device *dev); 1340 void i915_driver_unload(struct drm_device *dev) 1341 { 1342 struct drm_i915_private *dev_priv = to_i915(dev); 1343 1344 intel_fbdev_fini(dev); 1345 1346 if (i915_gem_suspend(dev)) 1347 DRM_ERROR("failed to idle hardware; continuing to unload!\n"); 1348 1349 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1350 1351 i915_driver_unregister(dev_priv); 1352 1353 drm_vblank_cleanup(dev); 1354 1355 intel_modeset_cleanup(dev); 1356 1357 /* 1358 * free the memory space allocated for the child device 1359 * config parsed from VBT 1360 */ 1361 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { 1362 kfree(dev_priv->vbt.child_dev); 1363 dev_priv->vbt.child_dev = NULL; 1364 dev_priv->vbt.child_dev_num = 0; 1365 } 1366 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); 1367 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; 1368 kfree(dev_priv->vbt.lfp_lvds_vbt_mode); 1369 dev_priv->vbt.lfp_lvds_vbt_mode = NULL; 1370 1371 #if 0 1372 vga_switcheroo_unregister_client(dev->pdev); 1373 vga_client_register(dev->pdev, NULL, NULL, NULL); 1374 #endif 1375 1376 intel_csr_ucode_fini(dev_priv); 1377 1378 /* Free error state after interrupts are fully disabled. */ 1379 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1380 i915_destroy_error_state(dev); 1381 1382 /* Flush any outstanding unpin_work. */ 1383 drain_workqueue(dev_priv->wq); 1384 1385 intel_guc_fini(dev); 1386 i915_gem_fini(dev); 1387 intel_fbc_cleanup_cfb(dev_priv); 1388 1389 intel_power_domains_fini(dev_priv); 1390 1391 i915_driver_cleanup_hw(dev_priv); 1392 i915_driver_cleanup_mmio(dev_priv); 1393 1394 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1395 1396 i915_driver_cleanup_early(dev_priv); 1397 } 1398 1399 static int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1400 { 1401 int ret; 1402 1403 ret = i915_gem_open(dev, file); 1404 if (ret) 1405 return ret; 1406 1407 return 0; 1408 } 1409 1410 /** 1411 * i915_driver_lastclose - clean up after all DRM clients have exited 1412 * @dev: DRM device 1413 * 1414 * Take care of cleaning up after all DRM clients have exited. In the 1415 * mode setting case, we want to restore the kernel's initial mode (just 1416 * in case the last client left us in a bad state). 1417 * 1418 * Additionally, in the non-mode setting case, we'll tear down the GTT 1419 * and DMA structures, since the kernel won't be using them, and clea 1420 * up any GEM state. 1421 */ 1422 static void i915_driver_lastclose(struct drm_device *dev) 1423 { 1424 intel_fbdev_restore_mode(dev); 1425 #if 0 1426 vga_switcheroo_process_delayed_switch(); 1427 #endif 1428 } 1429 1430 static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) 1431 { 1432 mutex_lock(&dev->struct_mutex); 1433 i915_gem_context_close(dev, file); 1434 i915_gem_release(dev, file); 1435 mutex_unlock(&dev->struct_mutex); 1436 } 1437 1438 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1439 { 1440 struct drm_i915_file_private *file_priv = file->driver_priv; 1441 1442 kfree(file_priv); 1443 } 1444 1445 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 1446 { 1447 struct drm_device *dev = &dev_priv->drm; 1448 struct intel_encoder *encoder; 1449 1450 drm_modeset_lock_all(dev); 1451 for_each_intel_encoder(dev, encoder) 1452 if (encoder->suspend) 1453 encoder->suspend(encoder); 1454 drm_modeset_unlock_all(dev); 1455 } 1456 1457 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 1458 bool rpm_resume); 1459 static int vlv_suspend_complete(struct drm_i915_private *dev_priv); 1460 1461 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 1462 { 1463 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 1464 if (acpi_target_system_state() < ACPI_STATE_S3) 1465 return true; 1466 #endif 1467 return false; 1468 } 1469 1470 static int i915_drm_suspend(struct drm_device *dev) 1471 { 1472 struct drm_i915_private *dev_priv = to_i915(dev); 1473 pci_power_t opregion_target_state; 1474 int error; 1475 1476 /* ignore lid events during suspend */ 1477 mutex_lock(&dev_priv->modeset_restore_lock); 1478 dev_priv->modeset_restore = MODESET_SUSPENDED; 1479 mutex_unlock(&dev_priv->modeset_restore_lock); 1480 1481 disable_rpm_wakeref_asserts(dev_priv); 1482 1483 /* We do a lot of poking in a lot of registers, make sure they work 1484 * properly. */ 1485 intel_display_set_init_power(dev_priv, true); 1486 1487 drm_kms_helper_poll_disable(dev); 1488 1489 #if 0 1490 pci_save_state(dev->pdev); 1491 #endif 1492 1493 error = i915_gem_suspend(dev); 1494 if (error) { 1495 dev_err(&dev->pdev->dev, 1496 "GEM idle failed, resume might fail\n"); 1497 goto out; 1498 } 1499 1500 intel_guc_suspend(dev); 1501 1502 intel_display_suspend(dev); 1503 1504 intel_dp_mst_suspend(dev); 1505 1506 intel_runtime_pm_disable_interrupts(dev_priv); 1507 intel_hpd_cancel_work(dev_priv); 1508 1509 intel_suspend_encoders(dev_priv); 1510 1511 intel_suspend_hw(dev); 1512 1513 i915_gem_suspend_gtt_mappings(dev); 1514 1515 i915_save_state(dev); 1516 1517 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1518 intel_opregion_notify_adapter(dev_priv, opregion_target_state); 1519 1520 intel_uncore_forcewake_reset(dev_priv, false); 1521 intel_opregion_unregister(dev_priv); 1522 1523 #if 0 1524 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1525 #endif 1526 1527 dev_priv->suspend_count++; 1528 1529 intel_csr_ucode_suspend(dev_priv); 1530 1531 out: 1532 enable_rpm_wakeref_asserts(dev_priv); 1533 1534 return error; 1535 } 1536 1537 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 1538 { 1539 struct drm_i915_private *dev_priv = to_i915(drm_dev); 1540 bool fw_csr; 1541 int ret; 1542 1543 disable_rpm_wakeref_asserts(dev_priv); 1544 1545 intel_display_set_init_power(dev_priv, false); 1546 1547 fw_csr = !IS_BROXTON(dev_priv) && 1548 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 1549 /* 1550 * In case of firmware assisted context save/restore don't manually 1551 * deinit the power domains. This also means the CSR/DMC firmware will 1552 * stay active, it will power down any HW resources as required and 1553 * also enable deeper system power states that would be blocked if the 1554 * firmware was inactive. 1555 */ 1556 if (!fw_csr) 1557 intel_power_domains_suspend(dev_priv); 1558 1559 ret = 0; 1560 if (IS_BROXTON(dev_priv)) 1561 bxt_enable_dc9(dev_priv); 1562 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1563 hsw_enable_pc8(dev_priv); 1564 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1565 ret = vlv_suspend_complete(dev_priv); 1566 1567 if (ret) { 1568 DRM_ERROR("Suspend complete failed: %d\n", ret); 1569 if (!fw_csr) 1570 intel_power_domains_init_hw(dev_priv, true); 1571 1572 goto out; 1573 } 1574 1575 #if 0 1576 pci_disable_device(drm_dev->pdev); 1577 /* 1578 * During hibernation on some platforms the BIOS may try to access 1579 * the device even though it's already in D3 and hang the machine. So 1580 * leave the device in D0 on those platforms and hope the BIOS will 1581 * power down the device properly. The issue was seen on multiple old 1582 * GENs with different BIOS vendors, so having an explicit blacklist 1583 * is inpractical; apply the workaround on everything pre GEN6. The 1584 * platforms where the issue was seen: 1585 * Lenovo Thinkpad X301, X61s, X60, T60, X41 1586 * Fujitsu FSC S7110 1587 * Acer Aspire 1830T 1588 */ 1589 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) 1590 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 1591 #endif 1592 1593 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); 1594 1595 out: 1596 enable_rpm_wakeref_asserts(dev_priv); 1597 1598 return ret; 1599 } 1600 1601 int i915_suspend_switcheroo(device_t kdev) 1602 { 1603 struct drm_softc *softc = device_get_softc(kdev); 1604 struct drm_device *dev = softc->drm_driver_data; 1605 int error; 1606 1607 if (!dev) { 1608 DRM_ERROR("dev: %p\n", dev); 1609 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 1610 return -ENODEV; 1611 } 1612 1613 #if 0 1614 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 1615 state.event != PM_EVENT_FREEZE)) 1616 return -EINVAL; 1617 #endif 1618 1619 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1620 return 0; 1621 1622 error = i915_drm_suspend(dev); 1623 if (error) 1624 return error; 1625 1626 return i915_drm_suspend_late(dev, false); 1627 } 1628 1629 static int i915_drm_resume(struct drm_device *dev) 1630 { 1631 struct drm_i915_private *dev_priv = to_i915(dev); 1632 int ret; 1633 1634 disable_rpm_wakeref_asserts(dev_priv); 1635 1636 ret = i915_ggtt_enable_hw(dev); 1637 if (ret) 1638 DRM_ERROR("failed to re-enable GGTT\n"); 1639 1640 intel_csr_ucode_resume(dev_priv); 1641 1642 i915_gem_resume(dev); 1643 1644 i915_restore_state(dev); 1645 intel_opregion_setup(dev_priv); 1646 1647 intel_init_pch_refclk(dev); 1648 drm_mode_config_reset(dev); 1649 1650 /* 1651 * Interrupts have to be enabled before any batches are run. If not the 1652 * GPU will hang. i915_gem_init_hw() will initiate batches to 1653 * update/restore the context. 1654 * 1655 * Modeset enabling in intel_modeset_init_hw() also needs working 1656 * interrupts. 1657 */ 1658 intel_runtime_pm_enable_interrupts(dev_priv); 1659 1660 mutex_lock(&dev->struct_mutex); 1661 if (i915_gem_init_hw(dev)) { 1662 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 1663 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 1664 } 1665 mutex_unlock(&dev->struct_mutex); 1666 1667 intel_guc_resume(dev); 1668 1669 intel_modeset_init_hw(dev); 1670 1671 spin_lock_irq(&dev_priv->irq_lock); 1672 if (dev_priv->display.hpd_irq_setup) 1673 dev_priv->display.hpd_irq_setup(dev_priv); 1674 spin_unlock_irq(&dev_priv->irq_lock); 1675 1676 intel_dp_mst_resume(dev); 1677 1678 intel_display_resume(dev); 1679 1680 /* 1681 * ... but also need to make sure that hotplug processing 1682 * doesn't cause havoc. Like in the driver load code we don't 1683 * bother with the tiny race here where we might loose hotplug 1684 * notifications. 1685 * */ 1686 intel_hpd_init(dev_priv); 1687 /* Config may have changed between suspend and resume */ 1688 drm_helper_hpd_irq_event(dev); 1689 1690 intel_opregion_register(dev_priv); 1691 1692 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 1693 1694 mutex_lock(&dev_priv->modeset_restore_lock); 1695 dev_priv->modeset_restore = MODESET_DONE; 1696 mutex_unlock(&dev_priv->modeset_restore_lock); 1697 1698 intel_opregion_notify_adapter(dev_priv, PCI_D0); 1699 1700 intel_autoenable_gt_powersave(dev_priv); 1701 drm_kms_helper_poll_enable(dev); 1702 1703 enable_rpm_wakeref_asserts(dev_priv); 1704 1705 return 0; 1706 } 1707 1708 static int i915_drm_resume_early(struct drm_device *dev) 1709 { 1710 struct drm_i915_private *dev_priv = to_i915(dev); 1711 int ret = 0; 1712 1713 /* 1714 * We have a resume ordering issue with the snd-hda driver also 1715 * requiring our device to be power up. Due to the lack of a 1716 * parent/child relationship we currently solve this with an early 1717 * resume hook. 1718 * 1719 * FIXME: This should be solved with a special hdmi sink device or 1720 * similar so that power domains can be employed. 1721 */ 1722 1723 /* 1724 * Note that we need to set the power state explicitly, since we 1725 * powered off the device during freeze and the PCI core won't power 1726 * it back up for us during thaw. Powering off the device during 1727 * freeze is not a hard requirement though, and during the 1728 * suspend/resume phases the PCI core makes sure we get here with the 1729 * device powered on. So in case we change our freeze logic and keep 1730 * the device powered we can also remove the following set power state 1731 * call. 1732 */ 1733 #if 0 1734 ret = pci_set_power_state(dev->pdev, PCI_D0); 1735 if (ret) { 1736 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); 1737 goto out; 1738 } 1739 1740 /* 1741 * Note that pci_enable_device() first enables any parent bridge 1742 * device and only then sets the power state for this device. The 1743 * bridge enabling is a nop though, since bridge devices are resumed 1744 * first. The order of enabling power and enabling the device is 1745 * imposed by the PCI core as described above, so here we preserve the 1746 * same order for the freeze/thaw phases. 1747 * 1748 * TODO: eventually we should remove pci_disable_device() / 1749 * pci_enable_enable_device() from suspend/resume. Due to how they 1750 * depend on the device enable refcount we can't anyway depend on them 1751 * disabling/enabling the device. 1752 */ 1753 if (pci_enable_device(dev->pdev)) { 1754 ret = -EIO; 1755 goto out; 1756 } 1757 1758 pci_set_master(dev->pdev); 1759 #endif 1760 1761 disable_rpm_wakeref_asserts(dev_priv); 1762 1763 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1764 ret = vlv_resume_prepare(dev_priv, false); 1765 if (ret) 1766 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 1767 ret); 1768 1769 intel_uncore_early_sanitize(dev_priv, true); 1770 1771 if (IS_BROXTON(dev_priv)) { 1772 if (!dev_priv->suspended_to_idle) 1773 gen9_sanitize_dc_state(dev_priv); 1774 bxt_disable_dc9(dev_priv); 1775 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1776 hsw_disable_pc8(dev_priv); 1777 } 1778 1779 intel_uncore_sanitize(dev_priv); 1780 1781 if (IS_BROXTON(dev_priv) || 1782 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 1783 intel_power_domains_init_hw(dev_priv, true); 1784 1785 enable_rpm_wakeref_asserts(dev_priv); 1786 1787 #if 0 1788 out: 1789 #endif 1790 dev_priv->suspended_to_idle = false; 1791 1792 return ret; 1793 } 1794 1795 int i915_resume_switcheroo(struct drm_device *dev) 1796 { 1797 int ret; 1798 1799 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1800 return 0; 1801 1802 ret = i915_drm_resume_early(dev); 1803 if (ret) 1804 return ret; 1805 1806 return i915_drm_resume(dev); 1807 } 1808 1809 /** 1810 * i915_reset - reset chip after a hang 1811 * @dev: drm device to reset 1812 * 1813 * Reset the chip. Useful if a hang is detected. Returns zero on successful 1814 * reset or otherwise an error code. 1815 * 1816 * Procedure is fairly simple: 1817 * - reset the chip using the reset reg 1818 * - re-init context state 1819 * - re-init hardware status page 1820 * - re-init ring buffer 1821 * - re-init interrupt state 1822 * - re-init display 1823 */ 1824 int i915_reset(struct drm_i915_private *dev_priv) 1825 { 1826 struct drm_device *dev = &dev_priv->drm; 1827 struct i915_gpu_error *error = &dev_priv->gpu_error; 1828 unsigned reset_counter; 1829 int ret; 1830 1831 mutex_lock(&dev->struct_mutex); 1832 1833 /* Clear any previous failed attempts at recovery. Time to try again. */ 1834 atomic_andnot(I915_WEDGED, &error->reset_counter); 1835 1836 /* Clear the reset-in-progress flag and increment the reset epoch. */ 1837 reset_counter = atomic_inc_return(&error->reset_counter); 1838 if (WARN_ON(__i915_reset_in_progress(reset_counter))) { 1839 ret = -EIO; 1840 goto error; 1841 } 1842 1843 pr_notice("drm/i915: Resetting chip after gpu hang\n"); 1844 1845 i915_gem_reset(dev); 1846 1847 ret = intel_gpu_reset(dev_priv, ALL_ENGINES); 1848 if (ret) { 1849 if (ret != -ENODEV) 1850 DRM_ERROR("Failed to reset chip: %i\n", ret); 1851 else 1852 DRM_DEBUG_DRIVER("GPU reset disabled\n"); 1853 goto error; 1854 } 1855 1856 intel_overlay_reset(dev_priv); 1857 1858 /* Ok, now get things going again... */ 1859 1860 /* 1861 * Everything depends on having the GTT running, so we need to start 1862 * there. Fortunately we don't need to do this unless we reset the 1863 * chip at a PCI level. 1864 * 1865 * Next we need to restore the context, but we don't use those 1866 * yet either... 1867 * 1868 * Ring buffer needs to be re-initialized in the KMS case, or if X 1869 * was running at the time of the reset (i.e. we weren't VT 1870 * switched away). 1871 */ 1872 ret = i915_gem_init_hw(dev); 1873 if (ret) { 1874 DRM_ERROR("Failed hw init on reset %d\n", ret); 1875 goto error; 1876 } 1877 1878 mutex_unlock(&dev->struct_mutex); 1879 1880 /* 1881 * rps/rc6 re-init is necessary to restore state lost after the 1882 * reset and the re-install of gt irqs. Skip for ironlake per 1883 * previous concerns that it doesn't respond well to some forms 1884 * of re-init after reset. 1885 */ 1886 intel_autoenable_gt_powersave(dev_priv); 1887 1888 return 0; 1889 1890 error: 1891 atomic_or(I915_WEDGED, &error->reset_counter); 1892 mutex_unlock(&dev->struct_mutex); 1893 return ret; 1894 } 1895 1896 #if 0 1897 static int i915_pm_suspend(struct device *dev) 1898 { 1899 struct pci_dev *pdev = to_pci_dev(dev); 1900 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1901 1902 if (!drm_dev) { 1903 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 1904 return -ENODEV; 1905 } 1906 1907 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1908 return 0; 1909 1910 return i915_drm_suspend(drm_dev); 1911 } 1912 1913 static int i915_pm_suspend_late(struct device *dev) 1914 { 1915 struct drm_device *drm_dev = &dev_to_i915(dev)->drm; 1916 1917 /* 1918 * We have a suspend ordering issue with the snd-hda driver also 1919 * requiring our device to be power up. Due to the lack of a 1920 * parent/child relationship we currently solve this with an late 1921 * suspend hook. 1922 * 1923 * FIXME: This should be solved with a special hdmi sink device or 1924 * similar so that power domains can be employed. 1925 */ 1926 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1927 return 0; 1928 1929 return i915_drm_suspend_late(drm_dev, false); 1930 } 1931 1932 static int i915_pm_poweroff_late(struct device *dev) 1933 { 1934 struct drm_device *drm_dev = &dev_to_i915(dev)->drm; 1935 1936 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1937 return 0; 1938 1939 return i915_drm_suspend_late(drm_dev, true); 1940 } 1941 1942 static int i915_pm_resume_early(struct device *dev) 1943 { 1944 struct drm_device *drm_dev = &dev_to_i915(dev)->drm; 1945 1946 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1947 return 0; 1948 1949 return i915_drm_resume_early(drm_dev); 1950 } 1951 1952 static int i915_pm_resume(struct device *dev) 1953 { 1954 struct drm_device *drm_dev = &dev_to_i915(dev)->drm; 1955 1956 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1957 return 0; 1958 1959 return i915_drm_resume(drm_dev); 1960 } 1961 1962 /* freeze: before creating the hibernation_image */ 1963 static int i915_pm_freeze(struct device *dev) 1964 { 1965 return i915_pm_suspend(dev); 1966 } 1967 1968 static int i915_pm_freeze_late(struct device *dev) 1969 { 1970 int ret; 1971 1972 ret = i915_pm_suspend_late(dev); 1973 if (ret) 1974 return ret; 1975 1976 ret = i915_gem_freeze_late(dev_to_i915(dev)); 1977 if (ret) 1978 return ret; 1979 1980 return 0; 1981 } 1982 1983 /* thaw: called after creating the hibernation image, but before turning off. */ 1984 static int i915_pm_thaw_early(struct device *dev) 1985 { 1986 return i915_pm_resume_early(dev); 1987 } 1988 1989 static int i915_pm_thaw(struct device *dev) 1990 { 1991 return i915_pm_resume(dev); 1992 } 1993 1994 /* restore: called after loading the hibernation image. */ 1995 static int i915_pm_restore_early(struct device *dev) 1996 { 1997 return i915_pm_resume_early(dev); 1998 } 1999 2000 static int i915_pm_restore(struct device *dev) 2001 { 2002 return i915_pm_resume(dev); 2003 } 2004 #endif 2005 2006 /* 2007 * Save all Gunit registers that may be lost after a D3 and a subsequent 2008 * S0i[R123] transition. The list of registers needing a save/restore is 2009 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit 2010 * registers in the following way: 2011 * - Driver: saved/restored by the driver 2012 * - Punit : saved/restored by the Punit firmware 2013 * - No, w/o marking: no need to save/restore, since the register is R/O or 2014 * used internally by the HW in a way that doesn't depend 2015 * keeping the content across a suspend/resume. 2016 * - Debug : used for debugging 2017 * 2018 * We save/restore all registers marked with 'Driver', with the following 2019 * exceptions: 2020 * - Registers out of use, including also registers marked with 'Debug'. 2021 * These have no effect on the driver's operation, so we don't save/restore 2022 * them to reduce the overhead. 2023 * - Registers that are fully setup by an initialization function called from 2024 * the resume path. For example many clock gating and RPS/RC6 registers. 2025 * - Registers that provide the right functionality with their reset defaults. 2026 * 2027 * TODO: Except for registers that based on the above 3 criteria can be safely 2028 * ignored, we save/restore all others, practically treating the HW context as 2029 * a black-box for the driver. Further investigation is needed to reduce the 2030 * saved/restored registers even further, by following the same 3 criteria. 2031 */ 2032 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) 2033 { 2034 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 2035 int i; 2036 2037 /* GAM 0x4000-0x4770 */ 2038 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); 2039 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); 2040 s->arb_mode = I915_READ(ARB_MODE); 2041 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); 2042 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 2043 2044 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 2045 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); 2046 2047 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 2048 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 2049 2050 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); 2051 s->ecochk = I915_READ(GAM_ECOCHK); 2052 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); 2053 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); 2054 2055 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); 2056 2057 /* MBC 0x9024-0x91D0, 0x8500 */ 2058 s->g3dctl = I915_READ(VLV_G3DCTL); 2059 s->gsckgctl = I915_READ(VLV_GSCKGCTL); 2060 s->mbctl = I915_READ(GEN6_MBCTL); 2061 2062 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 2063 s->ucgctl1 = I915_READ(GEN6_UCGCTL1); 2064 s->ucgctl3 = I915_READ(GEN6_UCGCTL3); 2065 s->rcgctl1 = I915_READ(GEN6_RCGCTL1); 2066 s->rcgctl2 = I915_READ(GEN6_RCGCTL2); 2067 s->rstctl = I915_READ(GEN6_RSTCTL); 2068 s->misccpctl = I915_READ(GEN7_MISCCPCTL); 2069 2070 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 2071 s->gfxpause = I915_READ(GEN6_GFXPAUSE); 2072 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); 2073 s->rpdeuc = I915_READ(GEN6_RPDEUC); 2074 s->ecobus = I915_READ(ECOBUS); 2075 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); 2076 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); 2077 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); 2078 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); 2079 s->rcedata = I915_READ(VLV_RCEDATA); 2080 s->spare2gh = I915_READ(VLV_SPAREG2H); 2081 2082 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 2083 s->gt_imr = I915_READ(GTIMR); 2084 s->gt_ier = I915_READ(GTIER); 2085 s->pm_imr = I915_READ(GEN6_PMIMR); 2086 s->pm_ier = I915_READ(GEN6_PMIER); 2087 2088 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 2089 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); 2090 2091 /* GT SA CZ domain, 0x100000-0x138124 */ 2092 s->tilectl = I915_READ(TILECTL); 2093 s->gt_fifoctl = I915_READ(GTFIFOCTL); 2094 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); 2095 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2096 s->pmwgicz = I915_READ(VLV_PMWGICZ); 2097 2098 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 2099 s->gu_ctl0 = I915_READ(VLV_GU_CTL0); 2100 s->gu_ctl1 = I915_READ(VLV_GU_CTL1); 2101 s->pcbr = I915_READ(VLV_PCBR); 2102 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); 2103 2104 /* 2105 * Not saving any of: 2106 * DFT, 0x9800-0x9EC0 2107 * SARB, 0xB000-0xB1FC 2108 * GAC, 0x5208-0x524C, 0x14000-0x14C000 2109 * PCI CFG 2110 */ 2111 } 2112 2113 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) 2114 { 2115 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 2116 u32 val; 2117 int i; 2118 2119 /* GAM 0x4000-0x4770 */ 2120 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); 2121 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); 2122 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); 2123 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); 2124 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 2125 2126 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 2127 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); 2128 2129 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 2130 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 2131 2132 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); 2133 I915_WRITE(GAM_ECOCHK, s->ecochk); 2134 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); 2135 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); 2136 2137 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); 2138 2139 /* MBC 0x9024-0x91D0, 0x8500 */ 2140 I915_WRITE(VLV_G3DCTL, s->g3dctl); 2141 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); 2142 I915_WRITE(GEN6_MBCTL, s->mbctl); 2143 2144 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 2145 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); 2146 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); 2147 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); 2148 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); 2149 I915_WRITE(GEN6_RSTCTL, s->rstctl); 2150 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); 2151 2152 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 2153 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); 2154 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); 2155 I915_WRITE(GEN6_RPDEUC, s->rpdeuc); 2156 I915_WRITE(ECOBUS, s->ecobus); 2157 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); 2158 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); 2159 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); 2160 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); 2161 I915_WRITE(VLV_RCEDATA, s->rcedata); 2162 I915_WRITE(VLV_SPAREG2H, s->spare2gh); 2163 2164 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 2165 I915_WRITE(GTIMR, s->gt_imr); 2166 I915_WRITE(GTIER, s->gt_ier); 2167 I915_WRITE(GEN6_PMIMR, s->pm_imr); 2168 I915_WRITE(GEN6_PMIER, s->pm_ier); 2169 2170 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 2171 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); 2172 2173 /* GT SA CZ domain, 0x100000-0x138124 */ 2174 I915_WRITE(TILECTL, s->tilectl); 2175 I915_WRITE(GTFIFOCTL, s->gt_fifoctl); 2176 /* 2177 * Preserve the GT allow wake and GFX force clock bit, they are not 2178 * be restored, as they are used to control the s0ix suspend/resume 2179 * sequence by the caller. 2180 */ 2181 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2182 val &= VLV_GTLC_ALLOWWAKEREQ; 2183 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; 2184 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2185 2186 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2187 val &= VLV_GFX_CLK_FORCE_ON_BIT; 2188 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; 2189 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 2190 2191 I915_WRITE(VLV_PMWGICZ, s->pmwgicz); 2192 2193 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 2194 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); 2195 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); 2196 I915_WRITE(VLV_PCBR, s->pcbr); 2197 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 2198 } 2199 2200 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 2201 { 2202 u32 val; 2203 int err; 2204 2205 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2206 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 2207 if (force_on) 2208 val |= VLV_GFX_CLK_FORCE_ON_BIT; 2209 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 2210 2211 if (!force_on) 2212 return 0; 2213 2214 err = intel_wait_for_register(dev_priv, 2215 VLV_GTLC_SURVIVABILITY_REG, 2216 VLV_GFX_CLK_STATUS_BIT, 2217 VLV_GFX_CLK_STATUS_BIT, 2218 20); 2219 if (err) 2220 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 2221 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 2222 2223 return err; 2224 } 2225 2226 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 2227 { 2228 u32 val; 2229 int err = 0; 2230 2231 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2232 val &= ~VLV_GTLC_ALLOWWAKEREQ; 2233 if (allow) 2234 val |= VLV_GTLC_ALLOWWAKEREQ; 2235 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2236 POSTING_READ(VLV_GTLC_WAKE_CTRL); 2237 2238 err = intel_wait_for_register(dev_priv, 2239 VLV_GTLC_PW_STATUS, 2240 VLV_GTLC_ALLOWWAKEACK, 2241 allow, 2242 1); 2243 if (err) 2244 DRM_ERROR("timeout disabling GT waking\n"); 2245 2246 return err; 2247 } 2248 2249 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 2250 bool wait_for_on) 2251 { 2252 u32 mask; 2253 u32 val; 2254 int err; 2255 2256 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 2257 val = wait_for_on ? mask : 0; 2258 if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 2259 return 0; 2260 2261 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 2262 onoff(wait_for_on), 2263 I915_READ(VLV_GTLC_PW_STATUS)); 2264 2265 /* 2266 * RC6 transitioning can be delayed up to 2 msec (see 2267 * valleyview_enable_rps), use 3 msec for safety. 2268 */ 2269 err = intel_wait_for_register(dev_priv, 2270 VLV_GTLC_PW_STATUS, mask, val, 2271 3); 2272 if (err) 2273 DRM_ERROR("timeout waiting for GT wells to go %s\n", 2274 onoff(wait_for_on)); 2275 2276 return err; 2277 } 2278 2279 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 2280 { 2281 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) 2282 return; 2283 2284 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); 2285 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 2286 } 2287 2288 static int vlv_suspend_complete(struct drm_i915_private *dev_priv) 2289 { 2290 u32 mask; 2291 int err; 2292 2293 /* 2294 * Bspec defines the following GT well on flags as debug only, so 2295 * don't treat them as hard failures. 2296 */ 2297 (void)vlv_wait_for_gt_wells(dev_priv, false); 2298 2299 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 2300 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 2301 2302 vlv_check_no_gt_access(dev_priv); 2303 2304 err = vlv_force_gfx_clock(dev_priv, true); 2305 if (err) 2306 goto err1; 2307 2308 err = vlv_allow_gt_wake(dev_priv, false); 2309 if (err) 2310 goto err2; 2311 2312 if (!IS_CHERRYVIEW(dev_priv)) 2313 vlv_save_gunit_s0ix_state(dev_priv); 2314 2315 err = vlv_force_gfx_clock(dev_priv, false); 2316 if (err) 2317 goto err2; 2318 2319 return 0; 2320 2321 err2: 2322 /* For safety always re-enable waking and disable gfx clock forcing */ 2323 vlv_allow_gt_wake(dev_priv, true); 2324 err1: 2325 vlv_force_gfx_clock(dev_priv, false); 2326 2327 return err; 2328 } 2329 2330 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 2331 bool rpm_resume) 2332 { 2333 struct drm_device *dev = &dev_priv->drm; 2334 int err; 2335 int ret; 2336 2337 /* 2338 * If any of the steps fail just try to continue, that's the best we 2339 * can do at this point. Return the first error code (which will also 2340 * leave RPM permanently disabled). 2341 */ 2342 ret = vlv_force_gfx_clock(dev_priv, true); 2343 2344 if (!IS_CHERRYVIEW(dev_priv)) 2345 vlv_restore_gunit_s0ix_state(dev_priv); 2346 2347 err = vlv_allow_gt_wake(dev_priv, true); 2348 if (!ret) 2349 ret = err; 2350 2351 err = vlv_force_gfx_clock(dev_priv, false); 2352 if (!ret) 2353 ret = err; 2354 2355 vlv_check_no_gt_access(dev_priv); 2356 2357 if (rpm_resume) { 2358 intel_init_clock_gating(dev); 2359 i915_gem_restore_fences(dev); 2360 } 2361 2362 return ret; 2363 } 2364 2365 #if 0 2366 static int intel_runtime_suspend(struct device *device) 2367 { 2368 struct pci_dev *pdev = to_pci_dev(device); 2369 struct drm_device *dev = pci_get_drvdata(pdev); 2370 struct drm_i915_private *dev_priv = to_i915(dev); 2371 int ret; 2372 2373 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) 2374 return -ENODEV; 2375 2376 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 2377 return -ENODEV; 2378 2379 DRM_DEBUG_KMS("Suspending device\n"); 2380 2381 /* 2382 * We could deadlock here in case another thread holding struct_mutex 2383 * calls RPM suspend concurrently, since the RPM suspend will wait 2384 * first for this RPM suspend to finish. In this case the concurrent 2385 * RPM resume will be followed by its RPM suspend counterpart. Still 2386 * for consistency return -EAGAIN, which will reschedule this suspend. 2387 */ 2388 if (!mutex_trylock(&dev->struct_mutex)) { 2389 DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); 2390 /* 2391 * Bump the expiration timestamp, otherwise the suspend won't 2392 * be rescheduled. 2393 */ 2394 pm_runtime_mark_last_busy(device); 2395 2396 return -EAGAIN; 2397 } 2398 2399 disable_rpm_wakeref_asserts(dev_priv); 2400 2401 /* 2402 * We are safe here against re-faults, since the fault handler takes 2403 * an RPM reference. 2404 */ 2405 i915_gem_release_all_mmaps(dev_priv); 2406 mutex_unlock(&dev->struct_mutex); 2407 2408 intel_guc_suspend(dev); 2409 2410 intel_runtime_pm_disable_interrupts(dev_priv); 2411 2412 ret = 0; 2413 if (IS_BROXTON(dev_priv)) { 2414 bxt_display_core_uninit(dev_priv); 2415 bxt_enable_dc9(dev_priv); 2416 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2417 hsw_enable_pc8(dev_priv); 2418 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2419 ret = vlv_suspend_complete(dev_priv); 2420 } 2421 2422 if (ret) { 2423 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 2424 intel_runtime_pm_enable_interrupts(dev_priv); 2425 2426 enable_rpm_wakeref_asserts(dev_priv); 2427 2428 return ret; 2429 } 2430 2431 intel_uncore_forcewake_reset(dev_priv, false); 2432 2433 enable_rpm_wakeref_asserts(dev_priv); 2434 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2435 2436 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) 2437 DRM_ERROR("Unclaimed access detected prior to suspending\n"); 2438 2439 dev_priv->pm.suspended = true; 2440 2441 /* 2442 * FIXME: We really should find a document that references the arguments 2443 * used below! 2444 */ 2445 if (IS_BROADWELL(dev_priv)) { 2446 /* 2447 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 2448 * being detected, and the call we do at intel_runtime_resume() 2449 * won't be able to restore them. Since PCI_D3hot matches the 2450 * actual specification and appears to be working, use it. 2451 */ 2452 intel_opregion_notify_adapter(dev_priv, PCI_D3hot); 2453 } else { 2454 /* 2455 * current versions of firmware which depend on this opregion 2456 * notification have repurposed the D1 definition to mean 2457 * "runtime suspended" vs. what you would normally expect (D3) 2458 * to distinguish it from notifications that might be sent via 2459 * the suspend path. 2460 */ 2461 intel_opregion_notify_adapter(dev_priv, PCI_D1); 2462 } 2463 2464 assert_forcewakes_inactive(dev_priv); 2465 2466 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2467 intel_hpd_poll_init(dev_priv); 2468 2469 DRM_DEBUG_KMS("Device suspended\n"); 2470 return 0; 2471 } 2472 2473 static int intel_runtime_resume(struct device *device) 2474 { 2475 struct pci_dev *pdev = to_pci_dev(device); 2476 struct drm_device *dev = pci_get_drvdata(pdev); 2477 struct drm_i915_private *dev_priv = to_i915(dev); 2478 int ret = 0; 2479 2480 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 2481 return -ENODEV; 2482 2483 DRM_DEBUG_KMS("Resuming device\n"); 2484 2485 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2486 disable_rpm_wakeref_asserts(dev_priv); 2487 2488 intel_opregion_notify_adapter(dev_priv, PCI_D0); 2489 dev_priv->pm.suspended = false; 2490 if (intel_uncore_unclaimed_mmio(dev_priv)) 2491 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 2492 2493 intel_guc_resume(dev); 2494 2495 if (IS_GEN6(dev_priv)) 2496 intel_init_pch_refclk(dev); 2497 2498 if (IS_BROXTON(dev)) { 2499 bxt_disable_dc9(dev_priv); 2500 bxt_display_core_init(dev_priv, true); 2501 if (dev_priv->csr.dmc_payload && 2502 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2503 gen9_enable_dc5(dev_priv); 2504 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2505 hsw_disable_pc8(dev_priv); 2506 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2507 ret = vlv_resume_prepare(dev_priv, true); 2508 } 2509 2510 /* 2511 * No point of rolling back things in case of an error, as the best 2512 * we can do is to hope that things will still work (and disable RPM). 2513 */ 2514 i915_gem_init_swizzling(dev); 2515 2516 intel_runtime_pm_enable_interrupts(dev_priv); 2517 2518 /* 2519 * On VLV/CHV display interrupts are part of the display 2520 * power well, so hpd is reinitialized from there. For 2521 * everyone else do it here. 2522 */ 2523 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2524 intel_hpd_init(dev_priv); 2525 2526 enable_rpm_wakeref_asserts(dev_priv); 2527 2528 if (ret) 2529 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 2530 else 2531 DRM_DEBUG_KMS("Device resumed\n"); 2532 2533 return ret; 2534 } 2535 2536 const struct dev_pm_ops i915_pm_ops = { 2537 /* 2538 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 2539 * PMSG_RESUME] 2540 */ 2541 .suspend = i915_pm_suspend, 2542 .suspend_late = i915_pm_suspend_late, 2543 .resume_early = i915_pm_resume_early, 2544 .resume = i915_pm_resume, 2545 2546 /* 2547 * S4 event handlers 2548 * @freeze, @freeze_late : called (1) before creating the 2549 * hibernation image [PMSG_FREEZE] and 2550 * (2) after rebooting, before restoring 2551 * the image [PMSG_QUIESCE] 2552 * @thaw, @thaw_early : called (1) after creating the hibernation 2553 * image, before writing it [PMSG_THAW] 2554 * and (2) after failing to create or 2555 * restore the image [PMSG_RECOVER] 2556 * @poweroff, @poweroff_late: called after writing the hibernation 2557 * image, before rebooting [PMSG_HIBERNATE] 2558 * @restore, @restore_early : called after rebooting and restoring the 2559 * hibernation image [PMSG_RESTORE] 2560 */ 2561 .freeze = i915_pm_freeze, 2562 .freeze_late = i915_pm_freeze_late, 2563 .thaw_early = i915_pm_thaw_early, 2564 .thaw = i915_pm_thaw, 2565 .poweroff = i915_pm_suspend, 2566 .poweroff_late = i915_pm_poweroff_late, 2567 .restore_early = i915_pm_restore_early, 2568 .restore = i915_pm_restore, 2569 2570 /* S0ix (via runtime suspend) event handlers */ 2571 .runtime_suspend = intel_runtime_suspend, 2572 .runtime_resume = intel_runtime_resume, 2573 }; 2574 2575 static const struct vm_operations_struct i915_gem_vm_ops = { 2576 .fault = i915_gem_fault, 2577 .open = drm_gem_vm_open, 2578 .close = drm_gem_vm_close, 2579 }; 2580 #endif 2581 2582 static struct cdev_pager_ops i915_gem_vm_ops = { 2583 .cdev_pg_fault = i915_gem_fault, 2584 .cdev_pg_ctor = i915_gem_pager_ctor, 2585 .cdev_pg_dtor = i915_gem_pager_dtor 2586 }; 2587 2588 static const struct file_operations i915_driver_fops = { 2589 .owner = THIS_MODULE, 2590 #if 0 2591 .open = drm_open, 2592 .release = drm_release, 2593 .unlocked_ioctl = drm_ioctl, 2594 .mmap = drm_gem_mmap, 2595 .poll = drm_poll, 2596 .read = drm_read, 2597 #ifdef CONFIG_COMPAT 2598 .compat_ioctl = i915_compat_ioctl, 2599 #endif 2600 .llseek = noop_llseek, 2601 #endif 2602 }; 2603 2604 static int 2605 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 2606 struct drm_file *file) 2607 { 2608 return -ENODEV; 2609 } 2610 2611 static const struct drm_ioctl_desc i915_ioctls[] = { 2612 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2613 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 2614 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 2615 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 2616 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 2617 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 2618 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 2619 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2620 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 2621 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 2622 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2623 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 2624 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2625 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2626 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 2627 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 2628 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2629 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2630 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 2631 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), 2632 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2633 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2634 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2635 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), 2636 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), 2637 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2638 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2639 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2640 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), 2641 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), 2642 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), 2643 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), 2644 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), 2645 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), 2646 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), 2647 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), 2648 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), 2649 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 2650 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 2651 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 2652 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2653 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2654 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 2655 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 2656 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2657 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 2658 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 2659 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 2660 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), 2661 #if 0 2662 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 2663 #endif 2664 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 2665 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 2666 }; 2667 2668 static int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 2669 struct sysctl_oid *top) 2670 { 2671 return drm_add_busid_modesetting(dev, ctx, top); 2672 } 2673 2674 static struct drm_driver driver = { 2675 /* Don't use MTRRs here; the Xserver or userspace app should 2676 * deal with them for Intel hardware. 2677 */ 2678 .driver_features = 2679 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 2680 DRIVER_RENDER | DRIVER_MODESET, 2681 .open = i915_driver_open, 2682 .lastclose = i915_driver_lastclose, 2683 .preclose = i915_driver_preclose, 2684 .postclose = i915_driver_postclose, 2685 .set_busid = drm_pci_set_busid, 2686 2687 .gem_free_object = i915_gem_free_object, 2688 .gem_vm_ops = &i915_gem_vm_ops, 2689 2690 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 2691 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 2692 .gem_prime_export = i915_gem_prime_export, 2693 .gem_prime_import = i915_gem_prime_import, 2694 2695 .dumb_create = i915_gem_dumb_create, 2696 .dumb_map_offset = i915_gem_mmap_gtt, 2697 .dumb_destroy = drm_gem_dumb_destroy, 2698 .ioctls = i915_ioctls, 2699 .num_ioctls = ARRAY_SIZE(i915_ioctls), 2700 .fops = &i915_driver_fops, 2701 .name = DRIVER_NAME, 2702 .desc = DRIVER_DESC, 2703 .date = DRIVER_DATE, 2704 .major = DRIVER_MAJOR, 2705 .minor = DRIVER_MINOR, 2706 .patchlevel = DRIVER_PATCHLEVEL, 2707 #ifdef __DragonFly__ 2708 .sysctl_init = i915_sysctl_init, 2709 #endif 2710 }; 2711