1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #include <linux/device.h> 31 #include <linux/acpi.h> 32 #include <drm/drmP.h> 33 #include <drm/i915_drm.h> 34 #include "i915_drv.h" 35 #include "i915_trace.h" 36 #include "intel_drv.h" 37 38 #include <linux/apple-gmux.h> 39 #include <linux/console.h> 40 #include <linux/module.h> 41 #include <linux/pm_runtime.h> 42 #include <linux/vgaarb.h> 43 #include <linux/vga_switcheroo.h> 44 #include <drm/drm_crtc_helper.h> 45 46 static struct drm_driver driver; 47 48 #define GEN_DEFAULT_PIPEOFFSETS \ 49 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 50 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 51 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 52 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ 53 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } 54 55 #define GEN_CHV_PIPEOFFSETS \ 56 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 57 CHV_PIPE_C_OFFSET }, \ 58 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 59 CHV_TRANSCODER_C_OFFSET, }, \ 60 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ 61 CHV_PALETTE_C_OFFSET } 62 63 #define CURSOR_OFFSETS \ 64 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } 65 66 #define IVB_CURSOR_OFFSETS \ 67 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } 68 69 #define BDW_COLORS \ 70 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } 71 #define CHV_COLORS \ 72 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } 73 74 static const struct intel_device_info intel_i830_info = { 75 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 76 .has_overlay = 1, .overlay_needs_physical = 1, 77 .ring_mask = RENDER_RING, 78 GEN_DEFAULT_PIPEOFFSETS, 79 CURSOR_OFFSETS, 80 }; 81 82 static const struct intel_device_info intel_845g_info = { 83 .gen = 2, .num_pipes = 1, 84 .has_overlay = 1, .overlay_needs_physical = 1, 85 .ring_mask = RENDER_RING, 86 GEN_DEFAULT_PIPEOFFSETS, 87 CURSOR_OFFSETS, 88 }; 89 90 static const struct intel_device_info intel_i85x_info = { 91 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, 92 .cursor_needs_physical = 1, 93 .has_overlay = 1, .overlay_needs_physical = 1, 94 .has_fbc = 1, 95 .ring_mask = RENDER_RING, 96 GEN_DEFAULT_PIPEOFFSETS, 97 CURSOR_OFFSETS, 98 }; 99 100 static const struct intel_device_info intel_i865g_info = { 101 .gen = 2, .num_pipes = 1, 102 .has_overlay = 1, .overlay_needs_physical = 1, 103 .ring_mask = RENDER_RING, 104 GEN_DEFAULT_PIPEOFFSETS, 105 CURSOR_OFFSETS, 106 }; 107 108 static const struct intel_device_info intel_i915g_info = { 109 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 110 .has_overlay = 1, .overlay_needs_physical = 1, 111 .ring_mask = RENDER_RING, 112 GEN_DEFAULT_PIPEOFFSETS, 113 CURSOR_OFFSETS, 114 }; 115 static const struct intel_device_info intel_i915gm_info = { 116 .gen = 3, .is_mobile = 1, .num_pipes = 2, 117 .cursor_needs_physical = 1, 118 .has_overlay = 1, .overlay_needs_physical = 1, 119 .supports_tv = 1, 120 .has_fbc = 1, 121 .ring_mask = RENDER_RING, 122 GEN_DEFAULT_PIPEOFFSETS, 123 CURSOR_OFFSETS, 124 }; 125 static const struct intel_device_info intel_i945g_info = { 126 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 127 .has_overlay = 1, .overlay_needs_physical = 1, 128 .ring_mask = RENDER_RING, 129 GEN_DEFAULT_PIPEOFFSETS, 130 CURSOR_OFFSETS, 131 }; 132 static const struct intel_device_info intel_i945gm_info = { 133 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 134 .has_hotplug = 1, .cursor_needs_physical = 1, 135 .has_overlay = 1, .overlay_needs_physical = 1, 136 .supports_tv = 1, 137 .has_fbc = 1, 138 .ring_mask = RENDER_RING, 139 GEN_DEFAULT_PIPEOFFSETS, 140 CURSOR_OFFSETS, 141 }; 142 143 static const struct intel_device_info intel_i965g_info = { 144 .gen = 4, .is_broadwater = 1, .num_pipes = 2, 145 .has_hotplug = 1, 146 .has_overlay = 1, 147 .ring_mask = RENDER_RING, 148 GEN_DEFAULT_PIPEOFFSETS, 149 CURSOR_OFFSETS, 150 }; 151 152 static const struct intel_device_info intel_i965gm_info = { 153 .gen = 4, .is_crestline = 1, .num_pipes = 2, 154 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 155 .has_overlay = 1, 156 .supports_tv = 1, 157 .ring_mask = RENDER_RING, 158 GEN_DEFAULT_PIPEOFFSETS, 159 CURSOR_OFFSETS, 160 }; 161 162 static const struct intel_device_info intel_g33_info = { 163 .gen = 3, .is_g33 = 1, .num_pipes = 2, 164 .need_gfx_hws = 1, .has_hotplug = 1, 165 .has_overlay = 1, 166 .ring_mask = RENDER_RING, 167 GEN_DEFAULT_PIPEOFFSETS, 168 CURSOR_OFFSETS, 169 }; 170 171 static const struct intel_device_info intel_g45_info = { 172 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 173 .has_pipe_cxsr = 1, .has_hotplug = 1, 174 .ring_mask = RENDER_RING | BSD_RING, 175 GEN_DEFAULT_PIPEOFFSETS, 176 CURSOR_OFFSETS, 177 }; 178 179 static const struct intel_device_info intel_gm45_info = { 180 .gen = 4, .is_g4x = 1, .num_pipes = 2, 181 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 182 .has_pipe_cxsr = 1, .has_hotplug = 1, 183 .supports_tv = 1, 184 .ring_mask = RENDER_RING | BSD_RING, 185 GEN_DEFAULT_PIPEOFFSETS, 186 CURSOR_OFFSETS, 187 }; 188 189 static const struct intel_device_info intel_pineview_info = { 190 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, 191 .need_gfx_hws = 1, .has_hotplug = 1, 192 .has_overlay = 1, 193 GEN_DEFAULT_PIPEOFFSETS, 194 CURSOR_OFFSETS, 195 }; 196 197 static const struct intel_device_info intel_ironlake_d_info = { 198 .gen = 5, .num_pipes = 2, 199 .need_gfx_hws = 1, .has_hotplug = 1, 200 .ring_mask = RENDER_RING | BSD_RING, 201 GEN_DEFAULT_PIPEOFFSETS, 202 CURSOR_OFFSETS, 203 }; 204 205 static const struct intel_device_info intel_ironlake_m_info = { 206 .gen = 5, .is_mobile = 1, .num_pipes = 2, 207 .need_gfx_hws = 1, .has_hotplug = 1, 208 .has_fbc = 1, 209 .ring_mask = RENDER_RING | BSD_RING, 210 GEN_DEFAULT_PIPEOFFSETS, 211 CURSOR_OFFSETS, 212 }; 213 214 static const struct intel_device_info intel_sandybridge_d_info = { 215 .gen = 6, .num_pipes = 2, 216 .need_gfx_hws = 1, .has_hotplug = 1, 217 .has_fbc = 1, 218 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 219 .has_llc = 1, 220 GEN_DEFAULT_PIPEOFFSETS, 221 CURSOR_OFFSETS, 222 }; 223 224 static const struct intel_device_info intel_sandybridge_m_info = { 225 .gen = 6, .is_mobile = 1, .num_pipes = 2, 226 .need_gfx_hws = 1, .has_hotplug = 1, 227 .has_fbc = 1, 228 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 229 .has_llc = 1, 230 GEN_DEFAULT_PIPEOFFSETS, 231 CURSOR_OFFSETS, 232 }; 233 234 #define GEN7_FEATURES \ 235 .gen = 7, .num_pipes = 3, \ 236 .need_gfx_hws = 1, .has_hotplug = 1, \ 237 .has_fbc = 1, \ 238 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 239 .has_llc = 1, \ 240 GEN_DEFAULT_PIPEOFFSETS, \ 241 IVB_CURSOR_OFFSETS 242 243 static const struct intel_device_info intel_ivybridge_d_info = { 244 GEN7_FEATURES, 245 .is_ivybridge = 1, 246 }; 247 248 static const struct intel_device_info intel_ivybridge_m_info = { 249 GEN7_FEATURES, 250 .is_ivybridge = 1, 251 .is_mobile = 1, 252 }; 253 254 static const struct intel_device_info intel_ivybridge_q_info = { 255 GEN7_FEATURES, 256 .is_ivybridge = 1, 257 .num_pipes = 0, /* legal, last one wins */ 258 }; 259 260 #define VLV_FEATURES \ 261 .gen = 7, .num_pipes = 2, \ 262 .need_gfx_hws = 1, .has_hotplug = 1, \ 263 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 264 .display_mmio_offset = VLV_DISPLAY_BASE, \ 265 GEN_DEFAULT_PIPEOFFSETS, \ 266 CURSOR_OFFSETS 267 268 static const struct intel_device_info intel_valleyview_m_info = { 269 VLV_FEATURES, 270 .is_valleyview = 1, 271 .is_mobile = 1, 272 }; 273 274 static const struct intel_device_info intel_valleyview_d_info = { 275 VLV_FEATURES, 276 .is_valleyview = 1, 277 }; 278 279 #define HSW_FEATURES \ 280 GEN7_FEATURES, \ 281 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ 282 .has_ddi = 1, \ 283 .has_fpga_dbg = 1 284 285 static const struct intel_device_info intel_haswell_d_info = { 286 HSW_FEATURES, 287 .is_haswell = 1, 288 }; 289 290 static const struct intel_device_info intel_haswell_m_info = { 291 HSW_FEATURES, 292 .is_haswell = 1, 293 .is_mobile = 1, 294 }; 295 296 #define BDW_FEATURES \ 297 HSW_FEATURES, \ 298 BDW_COLORS 299 300 static const struct intel_device_info intel_broadwell_d_info = { 301 BDW_FEATURES, 302 .gen = 8, 303 }; 304 305 static const struct intel_device_info intel_broadwell_m_info = { 306 BDW_FEATURES, 307 .gen = 8, .is_mobile = 1, 308 }; 309 310 static const struct intel_device_info intel_broadwell_gt3d_info = { 311 BDW_FEATURES, 312 .gen = 8, 313 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 314 }; 315 316 static const struct intel_device_info intel_broadwell_gt3m_info = { 317 BDW_FEATURES, 318 .gen = 8, .is_mobile = 1, 319 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 320 }; 321 322 static const struct intel_device_info intel_cherryview_info = { 323 .gen = 8, .num_pipes = 3, 324 .need_gfx_hws = 1, .has_hotplug = 1, 325 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 326 .is_cherryview = 1, 327 .display_mmio_offset = VLV_DISPLAY_BASE, 328 GEN_CHV_PIPEOFFSETS, 329 CURSOR_OFFSETS, 330 CHV_COLORS, 331 }; 332 333 static const struct intel_device_info intel_skylake_info = { 334 BDW_FEATURES, 335 .is_skylake = 1, 336 .gen = 9, 337 }; 338 339 static const struct intel_device_info intel_skylake_gt3_info = { 340 BDW_FEATURES, 341 .is_skylake = 1, 342 .gen = 9, 343 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 344 }; 345 346 static const struct intel_device_info intel_broxton_info = { 347 .is_preliminary = 1, 348 .is_broxton = 1, 349 .gen = 9, 350 .need_gfx_hws = 1, .has_hotplug = 1, 351 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 352 .num_pipes = 3, 353 .has_ddi = 1, 354 .has_fpga_dbg = 1, 355 .has_fbc = 1, 356 GEN_DEFAULT_PIPEOFFSETS, 357 IVB_CURSOR_OFFSETS, 358 BDW_COLORS, 359 }; 360 361 static const struct intel_device_info intel_kabylake_info = { 362 BDW_FEATURES, 363 .is_kabylake = 1, 364 .gen = 9, 365 }; 366 367 static const struct intel_device_info intel_kabylake_gt2_info = { 368 BDW_FEATURES, 369 .is_kabylake = 1, 370 .gen = 9, 371 }; 372 373 static const struct intel_device_info intel_kabylake_gt3_info = { 374 BDW_FEATURES, 375 .is_kabylake = 1, 376 .gen = 9, 377 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 378 }; 379 380 static const struct intel_device_info intel_coffeelake_gt1_info = { 381 BDW_FEATURES, \ 382 .is_kabylake = 1, 383 .gen = 9, 384 }; 385 386 static const struct intel_device_info intel_coffeelake_gt2_info = { 387 BDW_FEATURES, \ 388 .is_kabylake = 1, 389 .gen = 9, 390 }; 391 392 static const struct intel_device_info intel_coffeelake_gt3_info = { 393 BDW_FEATURES, \ 394 .is_kabylake = 1, 395 .gen = 9, 396 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 397 }; 398 399 /* 400 * Make sure any device matches here are from most specific to most 401 * general. For example, since the Quanta match is based on the subsystem 402 * and subvendor IDs, we need it to come before the more general IVB 403 * PCI ID matches, otherwise we'll use the wrong info struct above. 404 */ 405 406 static const struct pci_device_id pciidlist[] = { 407 INTEL_I830_IDS(&intel_i830_info), 408 INTEL_I845G_IDS(&intel_845g_info), 409 INTEL_I85X_IDS(&intel_i85x_info), 410 INTEL_I865G_IDS(&intel_i865g_info), 411 INTEL_I915G_IDS(&intel_i915g_info), 412 INTEL_I915GM_IDS(&intel_i915gm_info), 413 INTEL_I945G_IDS(&intel_i945g_info), 414 INTEL_I945GM_IDS(&intel_i945gm_info), 415 INTEL_I965G_IDS(&intel_i965g_info), 416 INTEL_G33_IDS(&intel_g33_info), 417 INTEL_I965GM_IDS(&intel_i965gm_info), 418 INTEL_GM45_IDS(&intel_gm45_info), 419 INTEL_G45_IDS(&intel_g45_info), 420 INTEL_PINEVIEW_IDS(&intel_pineview_info), 421 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), 422 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), 423 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), 424 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), 425 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ 426 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), 427 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), 428 INTEL_HSW_D_IDS(&intel_haswell_d_info), 429 INTEL_HSW_M_IDS(&intel_haswell_m_info), 430 INTEL_VLV_M_IDS(&intel_valleyview_m_info), 431 INTEL_VLV_D_IDS(&intel_valleyview_d_info), 432 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), 433 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), 434 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), 435 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), 436 INTEL_CHV_IDS(&intel_cherryview_info), 437 INTEL_SKL_GT1_IDS(&intel_skylake_info), 438 INTEL_SKL_GT2_IDS(&intel_skylake_info), 439 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), 440 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), 441 INTEL_BXT_IDS(&intel_broxton_info), 442 INTEL_KBL_GT1_IDS(&intel_kabylake_info), 443 INTEL_KBL_GT2_IDS(&intel_kabylake_info), 444 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), 445 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), 446 INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info), 447 INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), 448 INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), 449 INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), 450 INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info), 451 INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), 452 INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), 453 INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), 454 INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), 455 {0, 0, 0} 456 }; 457 458 #define PCI_VENDOR_INTEL 0x8086 459 460 static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) 461 { 462 enum intel_pch ret = PCH_NOP; 463 464 /* 465 * In a virtualized passthrough environment we can be in a 466 * setup where the ISA bridge is not able to be passed through. 467 * In this case, a south bridge can be emulated and we have to 468 * make an educated guess as to which PCH is really there. 469 */ 470 471 if (IS_GEN5(dev)) { 472 ret = PCH_IBX; 473 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 474 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 475 ret = PCH_CPT; 476 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); 477 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 478 ret = PCH_LPT; 479 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 480 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 481 ret = PCH_SPT; 482 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 483 } 484 485 return ret; 486 } 487 488 void intel_detect_pch(struct drm_device *dev) 489 { 490 struct drm_i915_private *dev_priv = dev->dev_private; 491 device_t pch = NULL; 492 struct pci_devinfo *di; 493 494 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 495 * (which really amounts to a PCH but no South Display). 496 */ 497 if (INTEL_INFO(dev)->num_pipes == 0) { 498 dev_priv->pch_type = PCH_NOP; 499 return; 500 } 501 502 /* XXX The ISA bridge probe causes some old Core2 machines to hang */ 503 if (INTEL_INFO(dev)->gen < 5) 504 return; 505 506 /* 507 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 508 * make graphics device passthrough work easy for VMM, that only 509 * need to expose ISA bridge to let driver know the real hardware 510 * underneath. This is a requirement from virtualization team. 511 * 512 * In some virtualized environments (e.g. XEN), there is irrelevant 513 * ISA bridge in the system. To work reliably, we should scan trhough 514 * all the ISA bridge devices and check for the first match, instead 515 * of only checking the first one. 516 */ 517 di = NULL; 518 519 while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) { 520 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) { 521 unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; 522 dev_priv->pch_id = id; 523 524 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 525 dev_priv->pch_type = PCH_IBX; 526 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 527 WARN_ON(!IS_GEN5(dev)); 528 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 529 dev_priv->pch_type = PCH_CPT; 530 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 531 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 532 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 533 /* PantherPoint is CPT compatible */ 534 dev_priv->pch_type = PCH_CPT; 535 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 536 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 537 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 538 dev_priv->pch_type = PCH_LPT; 539 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 540 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 541 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); 542 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 543 dev_priv->pch_type = PCH_LPT; 544 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 545 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 546 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); 547 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 548 dev_priv->pch_type = PCH_SPT; 549 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 550 WARN_ON(!IS_SKYLAKE(dev) && 551 !IS_KABYLAKE(dev)); 552 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 553 dev_priv->pch_type = PCH_SPT; 554 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 555 WARN_ON(!IS_SKYLAKE(dev) && 556 !IS_KABYLAKE(dev)); 557 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 558 dev_priv->pch_type = PCH_KBP; 559 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 560 WARN_ON(!IS_KABYLAKE(dev)); 561 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 562 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 563 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 564 1)) { 565 dev_priv->pch_type = intel_virt_detect_pch(dev); 566 } else 567 continue; 568 569 break; 570 } 571 } 572 if (!pch) 573 DRM_DEBUG_KMS("No PCH found.\n"); 574 575 #if 0 576 pci_dev_put(pch); 577 #endif 578 } 579 580 bool i915_semaphore_is_enabled(struct drm_device *dev) 581 { 582 if (INTEL_INFO(dev)->gen < 6) 583 return false; 584 585 if (i915.semaphores >= 0) 586 return i915.semaphores; 587 588 /* TODO: make semaphores and Execlists play nicely together */ 589 if (i915.enable_execlists) 590 return false; 591 592 /* Until we get further testing... */ 593 if (IS_GEN8(dev)) 594 return false; 595 596 #ifdef CONFIG_INTEL_IOMMU 597 /* Enable semaphores on SNB when IO remapping is off */ 598 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 599 return false; 600 #endif 601 602 return true; 603 } 604 605 #ifdef __DragonFly__ 606 #define IS_BUILTIN(blah) 0 607 #endif 608 609 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 610 { 611 struct drm_device *dev = dev_priv->dev; 612 struct intel_encoder *encoder; 613 614 drm_modeset_lock_all(dev); 615 for_each_intel_encoder(dev, encoder) 616 if (encoder->suspend) 617 encoder->suspend(encoder); 618 drm_modeset_unlock_all(dev); 619 } 620 621 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 622 bool rpm_resume); 623 static int vlv_suspend_complete(struct drm_i915_private *dev_priv); 624 625 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 626 { 627 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 628 if (acpi_target_system_state() < ACPI_STATE_S3) 629 return true; 630 #endif 631 return false; 632 } 633 634 static int i915_drm_suspend(struct drm_device *dev) 635 { 636 struct drm_i915_private *dev_priv = dev->dev_private; 637 pci_power_t opregion_target_state; 638 int error; 639 640 /* ignore lid events during suspend */ 641 mutex_lock(&dev_priv->modeset_restore_lock); 642 dev_priv->modeset_restore = MODESET_SUSPENDED; 643 mutex_unlock(&dev_priv->modeset_restore_lock); 644 645 disable_rpm_wakeref_asserts(dev_priv); 646 647 /* We do a lot of poking in a lot of registers, make sure they work 648 * properly. */ 649 intel_display_set_init_power(dev_priv, true); 650 651 drm_kms_helper_poll_disable(dev); 652 653 #if 0 654 pci_save_state(dev->pdev); 655 #endif 656 657 error = i915_gem_suspend(dev); 658 if (error) { 659 dev_err(dev->dev, 660 "GEM idle failed, resume might fail\n"); 661 goto out; 662 } 663 664 intel_guc_suspend(dev); 665 666 intel_suspend_gt_powersave(dev); 667 668 intel_display_suspend(dev); 669 670 #if 0 671 intel_dp_mst_suspend(dev); 672 #endif 673 674 intel_runtime_pm_disable_interrupts(dev_priv); 675 intel_hpd_cancel_work(dev_priv); 676 677 intel_suspend_encoders(dev_priv); 678 679 intel_suspend_hw(dev); 680 681 i915_gem_suspend_gtt_mappings(dev); 682 683 i915_save_state(dev); 684 685 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 686 intel_opregion_notify_adapter(dev, opregion_target_state); 687 688 intel_uncore_forcewake_reset(dev, false); 689 intel_opregion_fini(dev); 690 691 #if 0 692 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 693 #endif 694 695 dev_priv->suspend_count++; 696 697 intel_display_set_init_power(dev_priv, false); 698 699 intel_csr_ucode_suspend(dev_priv); 700 701 out: 702 enable_rpm_wakeref_asserts(dev_priv); 703 704 return error; 705 } 706 707 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 708 { 709 struct drm_i915_private *dev_priv = drm_dev->dev_private; 710 bool fw_csr; 711 int ret; 712 713 disable_rpm_wakeref_asserts(dev_priv); 714 715 fw_csr = !IS_BROXTON(dev_priv) && 716 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 717 /* 718 * In case of firmware assisted context save/restore don't manually 719 * deinit the power domains. This also means the CSR/DMC firmware will 720 * stay active, it will power down any HW resources as required and 721 * also enable deeper system power states that would be blocked if the 722 * firmware was inactive. 723 */ 724 if (!fw_csr) 725 intel_power_domains_suspend(dev_priv); 726 727 ret = 0; 728 if (IS_BROXTON(dev_priv)) 729 bxt_enable_dc9(dev_priv); 730 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 731 hsw_enable_pc8(dev_priv); 732 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 733 ret = vlv_suspend_complete(dev_priv); 734 735 if (ret) { 736 DRM_ERROR("Suspend complete failed: %d\n", ret); 737 if (!fw_csr) 738 intel_power_domains_init_hw(dev_priv, true); 739 740 goto out; 741 } 742 743 #if 0 744 pci_disable_device(drm_dev->pdev); 745 /* 746 * During hibernation on some platforms the BIOS may try to access 747 * the device even though it's already in D3 and hang the machine. So 748 * leave the device in D0 on those platforms and hope the BIOS will 749 * power down the device properly. The issue was seen on multiple old 750 * GENs with different BIOS vendors, so having an explicit blacklist 751 * is inpractical; apply the workaround on everything pre GEN6. The 752 * platforms where the issue was seen: 753 * Lenovo Thinkpad X301, X61s, X60, T60, X41 754 * Fujitsu FSC S7110 755 * Acer Aspire 1830T 756 */ 757 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) 758 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 759 #endif 760 761 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); 762 763 out: 764 enable_rpm_wakeref_asserts(dev_priv); 765 766 return ret; 767 } 768 769 int i915_suspend_switcheroo(device_t kdev) 770 { 771 struct drm_device *dev = device_get_softc(kdev); 772 int error; 773 774 if (!dev || !dev->dev_private) { 775 DRM_ERROR("dev: %p\n", dev); 776 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 777 return -ENODEV; 778 } 779 780 #if 0 781 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 782 state.event != PM_EVENT_FREEZE)) 783 return -EINVAL; 784 #endif 785 786 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 787 return 0; 788 789 error = i915_drm_suspend(dev); 790 if (error) 791 return error; 792 793 return i915_drm_suspend_late(dev, false); 794 } 795 796 static int i915_drm_resume(struct drm_device *dev) 797 { 798 struct drm_i915_private *dev_priv = dev->dev_private; 799 int ret; 800 801 disable_rpm_wakeref_asserts(dev_priv); 802 803 ret = i915_ggtt_enable_hw(dev); 804 if (ret) 805 DRM_ERROR("failed to re-enable GGTT\n"); 806 807 intel_csr_ucode_resume(dev_priv); 808 809 mutex_lock(&dev->struct_mutex); 810 i915_gem_restore_gtt_mappings(dev); 811 mutex_unlock(&dev->struct_mutex); 812 813 i915_restore_state(dev); 814 intel_opregion_setup(dev); 815 816 intel_init_pch_refclk(dev); 817 drm_mode_config_reset(dev); 818 819 /* 820 * Interrupts have to be enabled before any batches are run. If not the 821 * GPU will hang. i915_gem_init_hw() will initiate batches to 822 * update/restore the context. 823 * 824 * Modeset enabling in intel_modeset_init_hw() also needs working 825 * interrupts. 826 */ 827 intel_runtime_pm_enable_interrupts(dev_priv); 828 829 mutex_lock(&dev->struct_mutex); 830 if (i915_gem_init_hw(dev)) { 831 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 832 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 833 } 834 mutex_unlock(&dev->struct_mutex); 835 836 intel_guc_resume(dev); 837 838 intel_modeset_init_hw(dev); 839 840 spin_lock_irq(&dev_priv->irq_lock); 841 if (dev_priv->display.hpd_irq_setup) 842 dev_priv->display.hpd_irq_setup(dev); 843 spin_unlock_irq(&dev_priv->irq_lock); 844 845 intel_dp_mst_resume(dev); 846 847 intel_display_resume(dev); 848 849 /* 850 * ... but also need to make sure that hotplug processing 851 * doesn't cause havoc. Like in the driver load code we don't 852 * bother with the tiny race here where we might loose hotplug 853 * notifications. 854 * */ 855 intel_hpd_init(dev_priv); 856 /* Config may have changed between suspend and resume */ 857 drm_helper_hpd_irq_event(dev); 858 859 intel_opregion_init(dev); 860 861 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 862 863 mutex_lock(&dev_priv->modeset_restore_lock); 864 dev_priv->modeset_restore = MODESET_DONE; 865 mutex_unlock(&dev_priv->modeset_restore_lock); 866 867 intel_opregion_notify_adapter(dev, PCI_D0); 868 869 drm_kms_helper_poll_enable(dev); 870 871 enable_rpm_wakeref_asserts(dev_priv); 872 873 return 0; 874 } 875 876 static int i915_drm_resume_early(struct drm_device *dev) 877 { 878 struct drm_i915_private *dev_priv = dev->dev_private; 879 int ret = 0; 880 881 /* 882 * We have a resume ordering issue with the snd-hda driver also 883 * requiring our device to be power up. Due to the lack of a 884 * parent/child relationship we currently solve this with an early 885 * resume hook. 886 * 887 * FIXME: This should be solved with a special hdmi sink device or 888 * similar so that power domains can be employed. 889 */ 890 891 /* 892 * Note that we need to set the power state explicitly, since we 893 * powered off the device during freeze and the PCI core won't power 894 * it back up for us during thaw. Powering off the device during 895 * freeze is not a hard requirement though, and during the 896 * suspend/resume phases the PCI core makes sure we get here with the 897 * device powered on. So in case we change our freeze logic and keep 898 * the device powered we can also remove the following set power state 899 * call. 900 */ 901 #if 0 902 ret = pci_set_power_state(dev->pdev, PCI_D0); 903 if (ret) { 904 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); 905 goto out; 906 } 907 908 /* 909 * Note that pci_enable_device() first enables any parent bridge 910 * device and only then sets the power state for this device. The 911 * bridge enabling is a nop though, since bridge devices are resumed 912 * first. The order of enabling power and enabling the device is 913 * imposed by the PCI core as described above, so here we preserve the 914 * same order for the freeze/thaw phases. 915 * 916 * TODO: eventually we should remove pci_disable_device() / 917 * pci_enable_enable_device() from suspend/resume. Due to how they 918 * depend on the device enable refcount we can't anyway depend on them 919 * disabling/enabling the device. 920 */ 921 if (pci_enable_device(dev->pdev)) { 922 ret = -EIO; 923 goto out; 924 } 925 926 pci_set_master(dev->pdev); 927 #endif 928 929 disable_rpm_wakeref_asserts(dev_priv); 930 931 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 932 ret = vlv_resume_prepare(dev_priv, false); 933 if (ret) 934 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 935 ret); 936 937 intel_uncore_early_sanitize(dev, true); 938 939 if (IS_BROXTON(dev)) { 940 if (!dev_priv->suspended_to_idle) 941 gen9_sanitize_dc_state(dev_priv); 942 bxt_disable_dc9(dev_priv); 943 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 944 hsw_disable_pc8(dev_priv); 945 } 946 947 intel_uncore_sanitize(dev); 948 949 if (IS_BROXTON(dev_priv) || 950 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 951 intel_power_domains_init_hw(dev_priv, true); 952 953 enable_rpm_wakeref_asserts(dev_priv); 954 955 #if 0 956 out: 957 #endif 958 dev_priv->suspended_to_idle = false; 959 960 return ret; 961 } 962 963 int i915_resume_switcheroo(struct drm_device *dev) 964 { 965 int ret; 966 967 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 968 return 0; 969 970 ret = i915_drm_resume_early(dev); 971 if (ret) 972 return ret; 973 974 return i915_drm_resume(dev); 975 } 976 977 /* XXX Hack for the old *BSD drm code base 978 * The device id field is set at probe time */ 979 static drm_pci_id_list_t i915_attach_list[] = { 980 {0x8086, 0, 0, "Intel i915 GPU"}, 981 {0, 0, 0, NULL} 982 }; 983 984 struct intel_device_info * 985 i915_get_device_id(int device) 986 { 987 const struct pci_device_id *did; 988 989 for (did = &pciidlist[0]; did->device != 0; did++) { 990 if (did->device != device) 991 continue; 992 return (struct intel_device_info *)did->driver_data; 993 } 994 return (NULL); 995 } 996 997 static int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 998 struct sysctl_oid *top) 999 { 1000 return drm_add_busid_modesetting(dev, ctx, top); 1001 } 1002 1003 extern devclass_t drm_devclass; 1004 1005 /** 1006 * i915_reset - reset chip after a hang 1007 * @dev: drm device to reset 1008 * 1009 * Reset the chip. Useful if a hang is detected. Returns zero on successful 1010 * reset or otherwise an error code. 1011 * 1012 * Procedure is fairly simple: 1013 * - reset the chip using the reset reg 1014 * - re-init context state 1015 * - re-init hardware status page 1016 * - re-init ring buffer 1017 * - re-init interrupt state 1018 * - re-init display 1019 */ 1020 int i915_reset(struct drm_device *dev) 1021 { 1022 struct drm_i915_private *dev_priv = dev->dev_private; 1023 struct i915_gpu_error *error = &dev_priv->gpu_error; 1024 unsigned reset_counter; 1025 int ret; 1026 1027 intel_reset_gt_powersave(dev); 1028 1029 mutex_lock(&dev->struct_mutex); 1030 1031 /* Clear any previous failed attempts at recovery. Time to try again. */ 1032 atomic_andnot(I915_WEDGED, &error->reset_counter); 1033 1034 /* Clear the reset-in-progress flag and increment the reset epoch. */ 1035 reset_counter = atomic_inc_return(&error->reset_counter); 1036 if (WARN_ON(__i915_reset_in_progress(reset_counter))) { 1037 ret = -EIO; 1038 goto error; 1039 } 1040 1041 i915_gem_reset(dev); 1042 1043 ret = intel_gpu_reset(dev, ALL_ENGINES); 1044 1045 /* Also reset the gpu hangman. */ 1046 if (error->stop_rings != 0) { 1047 DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); 1048 error->stop_rings = 0; 1049 if (ret == -ENODEV) { 1050 DRM_INFO("Reset not implemented, but ignoring " 1051 "error for simulated gpu hangs\n"); 1052 ret = 0; 1053 } 1054 } 1055 1056 if (i915_stop_ring_allow_warn(dev_priv)) 1057 pr_notice("drm/i915: Resetting chip after gpu hang\n"); 1058 1059 if (ret) { 1060 if (ret != -ENODEV) 1061 DRM_ERROR("Failed to reset chip: %i\n", ret); 1062 else 1063 DRM_DEBUG_DRIVER("GPU reset disabled\n"); 1064 goto error; 1065 } 1066 1067 intel_overlay_reset(dev_priv); 1068 1069 /* Ok, now get things going again... */ 1070 1071 /* 1072 * Everything depends on having the GTT running, so we need to start 1073 * there. Fortunately we don't need to do this unless we reset the 1074 * chip at a PCI level. 1075 * 1076 * Next we need to restore the context, but we don't use those 1077 * yet either... 1078 * 1079 * Ring buffer needs to be re-initialized in the KMS case, or if X 1080 * was running at the time of the reset (i.e. we weren't VT 1081 * switched away). 1082 */ 1083 ret = i915_gem_init_hw(dev); 1084 if (ret) { 1085 DRM_ERROR("Failed hw init on reset %d\n", ret); 1086 goto error; 1087 } 1088 1089 mutex_unlock(&dev->struct_mutex); 1090 1091 /* 1092 * rps/rc6 re-init is necessary to restore state lost after the 1093 * reset and the re-install of gt irqs. Skip for ironlake per 1094 * previous concerns that it doesn't respond well to some forms 1095 * of re-init after reset. 1096 */ 1097 if (INTEL_INFO(dev)->gen > 5) 1098 intel_enable_gt_powersave(dev); 1099 1100 return 0; 1101 1102 error: 1103 atomic_or(I915_WEDGED, &error->reset_counter); 1104 mutex_unlock(&dev->struct_mutex); 1105 return ret; 1106 } 1107 1108 static int i915_pci_probe(device_t kdev) 1109 { 1110 int device, i = 0; 1111 1112 if (pci_get_class(kdev) != PCIC_DISPLAY) 1113 return ENXIO; 1114 1115 if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL) 1116 return ENXIO; 1117 1118 device = pci_get_device(kdev); 1119 1120 for (i = 0; pciidlist[i].device != 0; i++) { 1121 if (pciidlist[i].device == device) { 1122 i915_attach_list[0].device = device; 1123 return 0; 1124 } 1125 } 1126 1127 return ENXIO; 1128 } 1129 1130 #if 0 1131 static void 1132 i915_pci_remove(struct pci_dev *pdev) 1133 { 1134 struct drm_device *dev = pci_get_drvdata(pdev); 1135 1136 drm_put_dev(dev); 1137 } 1138 1139 static int i915_pm_suspend(struct device *dev) 1140 { 1141 struct pci_dev *pdev = to_pci_dev(dev); 1142 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1143 1144 if (!drm_dev || !drm_dev->dev_private) { 1145 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 1146 return -ENODEV; 1147 } 1148 1149 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1150 return 0; 1151 1152 return i915_drm_suspend(drm_dev); 1153 } 1154 1155 static int i915_pm_suspend_late(struct device *dev) 1156 { 1157 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1158 1159 /* 1160 * We have a suspend ordering issue with the snd-hda driver also 1161 * requiring our device to be power up. Due to the lack of a 1162 * parent/child relationship we currently solve this with an late 1163 * suspend hook. 1164 * 1165 * FIXME: This should be solved with a special hdmi sink device or 1166 * similar so that power domains can be employed. 1167 */ 1168 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1169 return 0; 1170 1171 return i915_drm_suspend_late(drm_dev, false); 1172 } 1173 1174 static int i915_pm_poweroff_late(struct device *dev) 1175 { 1176 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1177 1178 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1179 return 0; 1180 1181 return i915_drm_suspend_late(drm_dev, true); 1182 } 1183 1184 static int i915_pm_resume_early(struct device *dev) 1185 { 1186 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1187 1188 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1189 return 0; 1190 1191 return i915_drm_resume_early(drm_dev); 1192 } 1193 1194 static int i915_pm_resume(struct device *dev) 1195 { 1196 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1197 1198 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1199 return 0; 1200 1201 return i915_drm_resume(drm_dev); 1202 } 1203 #endif 1204 1205 /* 1206 * Save all Gunit registers that may be lost after a D3 and a subsequent 1207 * S0i[R123] transition. The list of registers needing a save/restore is 1208 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit 1209 * registers in the following way: 1210 * - Driver: saved/restored by the driver 1211 * - Punit : saved/restored by the Punit firmware 1212 * - No, w/o marking: no need to save/restore, since the register is R/O or 1213 * used internally by the HW in a way that doesn't depend 1214 * keeping the content across a suspend/resume. 1215 * - Debug : used for debugging 1216 * 1217 * We save/restore all registers marked with 'Driver', with the following 1218 * exceptions: 1219 * - Registers out of use, including also registers marked with 'Debug'. 1220 * These have no effect on the driver's operation, so we don't save/restore 1221 * them to reduce the overhead. 1222 * - Registers that are fully setup by an initialization function called from 1223 * the resume path. For example many clock gating and RPS/RC6 registers. 1224 * - Registers that provide the right functionality with their reset defaults. 1225 * 1226 * TODO: Except for registers that based on the above 3 criteria can be safely 1227 * ignored, we save/restore all others, practically treating the HW context as 1228 * a black-box for the driver. Further investigation is needed to reduce the 1229 * saved/restored registers even further, by following the same 3 criteria. 1230 */ 1231 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) 1232 { 1233 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 1234 int i; 1235 1236 /* GAM 0x4000-0x4770 */ 1237 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); 1238 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); 1239 s->arb_mode = I915_READ(ARB_MODE); 1240 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); 1241 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 1242 1243 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1244 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); 1245 1246 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 1247 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 1248 1249 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); 1250 s->ecochk = I915_READ(GAM_ECOCHK); 1251 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); 1252 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); 1253 1254 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); 1255 1256 /* MBC 0x9024-0x91D0, 0x8500 */ 1257 s->g3dctl = I915_READ(VLV_G3DCTL); 1258 s->gsckgctl = I915_READ(VLV_GSCKGCTL); 1259 s->mbctl = I915_READ(GEN6_MBCTL); 1260 1261 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 1262 s->ucgctl1 = I915_READ(GEN6_UCGCTL1); 1263 s->ucgctl3 = I915_READ(GEN6_UCGCTL3); 1264 s->rcgctl1 = I915_READ(GEN6_RCGCTL1); 1265 s->rcgctl2 = I915_READ(GEN6_RCGCTL2); 1266 s->rstctl = I915_READ(GEN6_RSTCTL); 1267 s->misccpctl = I915_READ(GEN7_MISCCPCTL); 1268 1269 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 1270 s->gfxpause = I915_READ(GEN6_GFXPAUSE); 1271 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); 1272 s->rpdeuc = I915_READ(GEN6_RPDEUC); 1273 s->ecobus = I915_READ(ECOBUS); 1274 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); 1275 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); 1276 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); 1277 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); 1278 s->rcedata = I915_READ(VLV_RCEDATA); 1279 s->spare2gh = I915_READ(VLV_SPAREG2H); 1280 1281 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 1282 s->gt_imr = I915_READ(GTIMR); 1283 s->gt_ier = I915_READ(GTIER); 1284 s->pm_imr = I915_READ(GEN6_PMIMR); 1285 s->pm_ier = I915_READ(GEN6_PMIER); 1286 1287 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1288 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); 1289 1290 /* GT SA CZ domain, 0x100000-0x138124 */ 1291 s->tilectl = I915_READ(TILECTL); 1292 s->gt_fifoctl = I915_READ(GTFIFOCTL); 1293 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); 1294 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1295 s->pmwgicz = I915_READ(VLV_PMWGICZ); 1296 1297 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 1298 s->gu_ctl0 = I915_READ(VLV_GU_CTL0); 1299 s->gu_ctl1 = I915_READ(VLV_GU_CTL1); 1300 s->pcbr = I915_READ(VLV_PCBR); 1301 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); 1302 1303 /* 1304 * Not saving any of: 1305 * DFT, 0x9800-0x9EC0 1306 * SARB, 0xB000-0xB1FC 1307 * GAC, 0x5208-0x524C, 0x14000-0x14C000 1308 * PCI CFG 1309 */ 1310 } 1311 1312 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) 1313 { 1314 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 1315 u32 val; 1316 int i; 1317 1318 /* GAM 0x4000-0x4770 */ 1319 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); 1320 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); 1321 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); 1322 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); 1323 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 1324 1325 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1326 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); 1327 1328 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 1329 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 1330 1331 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); 1332 I915_WRITE(GAM_ECOCHK, s->ecochk); 1333 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); 1334 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); 1335 1336 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); 1337 1338 /* MBC 0x9024-0x91D0, 0x8500 */ 1339 I915_WRITE(VLV_G3DCTL, s->g3dctl); 1340 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); 1341 I915_WRITE(GEN6_MBCTL, s->mbctl); 1342 1343 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 1344 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); 1345 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); 1346 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); 1347 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); 1348 I915_WRITE(GEN6_RSTCTL, s->rstctl); 1349 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); 1350 1351 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 1352 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); 1353 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); 1354 I915_WRITE(GEN6_RPDEUC, s->rpdeuc); 1355 I915_WRITE(ECOBUS, s->ecobus); 1356 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); 1357 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); 1358 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); 1359 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); 1360 I915_WRITE(VLV_RCEDATA, s->rcedata); 1361 I915_WRITE(VLV_SPAREG2H, s->spare2gh); 1362 1363 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 1364 I915_WRITE(GTIMR, s->gt_imr); 1365 I915_WRITE(GTIER, s->gt_ier); 1366 I915_WRITE(GEN6_PMIMR, s->pm_imr); 1367 I915_WRITE(GEN6_PMIER, s->pm_ier); 1368 1369 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1370 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); 1371 1372 /* GT SA CZ domain, 0x100000-0x138124 */ 1373 I915_WRITE(TILECTL, s->tilectl); 1374 I915_WRITE(GTFIFOCTL, s->gt_fifoctl); 1375 /* 1376 * Preserve the GT allow wake and GFX force clock bit, they are not 1377 * be restored, as they are used to control the s0ix suspend/resume 1378 * sequence by the caller. 1379 */ 1380 val = I915_READ(VLV_GTLC_WAKE_CTRL); 1381 val &= VLV_GTLC_ALLOWWAKEREQ; 1382 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; 1383 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 1384 1385 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1386 val &= VLV_GFX_CLK_FORCE_ON_BIT; 1387 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; 1388 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 1389 1390 I915_WRITE(VLV_PMWGICZ, s->pmwgicz); 1391 1392 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 1393 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); 1394 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); 1395 I915_WRITE(VLV_PCBR, s->pcbr); 1396 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 1397 } 1398 1399 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 1400 { 1401 u32 val; 1402 int err; 1403 1404 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) 1405 1406 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1407 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 1408 if (force_on) 1409 val |= VLV_GFX_CLK_FORCE_ON_BIT; 1410 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 1411 1412 if (!force_on) 1413 return 0; 1414 1415 err = wait_for(COND, 20); 1416 if (err) 1417 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 1418 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 1419 1420 return err; 1421 #undef COND 1422 } 1423 1424 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 1425 { 1426 u32 val; 1427 int err = 0; 1428 1429 val = I915_READ(VLV_GTLC_WAKE_CTRL); 1430 val &= ~VLV_GTLC_ALLOWWAKEREQ; 1431 if (allow) 1432 val |= VLV_GTLC_ALLOWWAKEREQ; 1433 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 1434 POSTING_READ(VLV_GTLC_WAKE_CTRL); 1435 1436 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ 1437 allow) 1438 err = wait_for(COND, 1); 1439 if (err) 1440 DRM_ERROR("timeout disabling GT waking\n"); 1441 return err; 1442 #undef COND 1443 } 1444 1445 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 1446 bool wait_for_on) 1447 { 1448 u32 mask; 1449 u32 val; 1450 int err; 1451 1452 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 1453 val = wait_for_on ? mask : 0; 1454 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 1455 if (COND) 1456 return 0; 1457 1458 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 1459 onoff(wait_for_on), 1460 I915_READ(VLV_GTLC_PW_STATUS)); 1461 1462 /* 1463 * RC6 transitioning can be delayed up to 2 msec (see 1464 * valleyview_enable_rps), use 3 msec for safety. 1465 */ 1466 err = wait_for(COND, 3); 1467 if (err) 1468 DRM_ERROR("timeout waiting for GT wells to go %s\n", 1469 onoff(wait_for_on)); 1470 1471 return err; 1472 #undef COND 1473 } 1474 1475 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 1476 { 1477 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) 1478 return; 1479 1480 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); 1481 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 1482 } 1483 1484 static int vlv_suspend_complete(struct drm_i915_private *dev_priv) 1485 { 1486 u32 mask; 1487 int err; 1488 1489 /* 1490 * Bspec defines the following GT well on flags as debug only, so 1491 * don't treat them as hard failures. 1492 */ 1493 (void)vlv_wait_for_gt_wells(dev_priv, false); 1494 1495 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 1496 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 1497 1498 vlv_check_no_gt_access(dev_priv); 1499 1500 err = vlv_force_gfx_clock(dev_priv, true); 1501 if (err) 1502 goto err1; 1503 1504 err = vlv_allow_gt_wake(dev_priv, false); 1505 if (err) 1506 goto err2; 1507 1508 if (!IS_CHERRYVIEW(dev_priv)) 1509 vlv_save_gunit_s0ix_state(dev_priv); 1510 1511 err = vlv_force_gfx_clock(dev_priv, false); 1512 if (err) 1513 goto err2; 1514 1515 return 0; 1516 1517 err2: 1518 /* For safety always re-enable waking and disable gfx clock forcing */ 1519 vlv_allow_gt_wake(dev_priv, true); 1520 err1: 1521 vlv_force_gfx_clock(dev_priv, false); 1522 1523 return err; 1524 } 1525 1526 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 1527 bool rpm_resume) 1528 { 1529 struct drm_device *dev = dev_priv->dev; 1530 int err; 1531 int ret; 1532 1533 /* 1534 * If any of the steps fail just try to continue, that's the best we 1535 * can do at this point. Return the first error code (which will also 1536 * leave RPM permanently disabled). 1537 */ 1538 ret = vlv_force_gfx_clock(dev_priv, true); 1539 1540 if (!IS_CHERRYVIEW(dev_priv)) 1541 vlv_restore_gunit_s0ix_state(dev_priv); 1542 1543 err = vlv_allow_gt_wake(dev_priv, true); 1544 if (!ret) 1545 ret = err; 1546 1547 err = vlv_force_gfx_clock(dev_priv, false); 1548 if (!ret) 1549 ret = err; 1550 1551 vlv_check_no_gt_access(dev_priv); 1552 1553 if (rpm_resume) { 1554 intel_init_clock_gating(dev); 1555 i915_gem_restore_fences(dev); 1556 } 1557 1558 return ret; 1559 } 1560 1561 #if 0 1562 static int intel_runtime_suspend(struct device *device) 1563 { 1564 struct pci_dev *pdev = to_pci_dev(device); 1565 struct drm_device *dev = pci_get_drvdata(pdev); 1566 struct drm_i915_private *dev_priv = dev->dev_private; 1567 int ret; 1568 1569 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1570 return -ENODEV; 1571 1572 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1573 return -ENODEV; 1574 1575 DRM_DEBUG_KMS("Suspending device\n"); 1576 1577 /* 1578 * We could deadlock here in case another thread holding struct_mutex 1579 * calls RPM suspend concurrently, since the RPM suspend will wait 1580 * first for this RPM suspend to finish. In this case the concurrent 1581 * RPM resume will be followed by its RPM suspend counterpart. Still 1582 * for consistency return -EAGAIN, which will reschedule this suspend. 1583 */ 1584 if (!mutex_trylock(&dev->struct_mutex)) { 1585 DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); 1586 /* 1587 * Bump the expiration timestamp, otherwise the suspend won't 1588 * be rescheduled. 1589 */ 1590 pm_runtime_mark_last_busy(device); 1591 1592 return -EAGAIN; 1593 } 1594 1595 disable_rpm_wakeref_asserts(dev_priv); 1596 1597 /* 1598 * We are safe here against re-faults, since the fault handler takes 1599 * an RPM reference. 1600 */ 1601 i915_gem_release_all_mmaps(dev_priv); 1602 mutex_unlock(&dev->struct_mutex); 1603 1604 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1605 1606 intel_guc_suspend(dev); 1607 1608 intel_suspend_gt_powersave(dev); 1609 intel_runtime_pm_disable_interrupts(dev_priv); 1610 1611 ret = 0; 1612 if (IS_BROXTON(dev_priv)) { 1613 bxt_display_core_uninit(dev_priv); 1614 bxt_enable_dc9(dev_priv); 1615 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1616 hsw_enable_pc8(dev_priv); 1617 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1618 ret = vlv_suspend_complete(dev_priv); 1619 } 1620 1621 if (ret) { 1622 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1623 intel_runtime_pm_enable_interrupts(dev_priv); 1624 1625 enable_rpm_wakeref_asserts(dev_priv); 1626 1627 return ret; 1628 } 1629 1630 intel_uncore_forcewake_reset(dev, false); 1631 1632 enable_rpm_wakeref_asserts(dev_priv); 1633 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1634 1635 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) 1636 DRM_ERROR("Unclaimed access detected prior to suspending\n"); 1637 1638 dev_priv->pm.suspended = true; 1639 1640 /* 1641 * FIXME: We really should find a document that references the arguments 1642 * used below! 1643 */ 1644 if (IS_BROADWELL(dev)) { 1645 /* 1646 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1647 * being detected, and the call we do at intel_runtime_resume() 1648 * won't be able to restore them. Since PCI_D3hot matches the 1649 * actual specification and appears to be working, use it. 1650 */ 1651 intel_opregion_notify_adapter(dev, PCI_D3hot); 1652 } else { 1653 /* 1654 * current versions of firmware which depend on this opregion 1655 * notification have repurposed the D1 definition to mean 1656 * "runtime suspended" vs. what you would normally expect (D3) 1657 * to distinguish it from notifications that might be sent via 1658 * the suspend path. 1659 */ 1660 intel_opregion_notify_adapter(dev, PCI_D1); 1661 } 1662 1663 assert_forcewakes_inactive(dev_priv); 1664 1665 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 1666 intel_hpd_poll_init(dev_priv); 1667 1668 DRM_DEBUG_KMS("Device suspended\n"); 1669 return 0; 1670 } 1671 1672 static int intel_runtime_resume(struct device *device) 1673 { 1674 struct pci_dev *pdev = to_pci_dev(device); 1675 struct drm_device *dev = pci_get_drvdata(pdev); 1676 struct drm_i915_private *dev_priv = dev->dev_private; 1677 int ret = 0; 1678 1679 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1680 return -ENODEV; 1681 1682 DRM_DEBUG_KMS("Resuming device\n"); 1683 1684 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1685 disable_rpm_wakeref_asserts(dev_priv); 1686 1687 intel_opregion_notify_adapter(dev, PCI_D0); 1688 dev_priv->pm.suspended = false; 1689 if (intel_uncore_unclaimed_mmio(dev_priv)) 1690 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 1691 1692 intel_guc_resume(dev); 1693 1694 if (IS_GEN6(dev_priv)) 1695 intel_init_pch_refclk(dev); 1696 1697 if (IS_BROXTON(dev)) { 1698 bxt_disable_dc9(dev_priv); 1699 bxt_display_core_init(dev_priv, true); 1700 if (dev_priv->csr.dmc_payload && 1701 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 1702 gen9_enable_dc5(dev_priv); 1703 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1704 hsw_disable_pc8(dev_priv); 1705 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1706 ret = vlv_resume_prepare(dev_priv, true); 1707 } 1708 1709 /* 1710 * No point of rolling back things in case of an error, as the best 1711 * we can do is to hope that things will still work (and disable RPM). 1712 */ 1713 i915_gem_init_swizzling(dev); 1714 gen6_update_ring_freq(dev); 1715 1716 intel_runtime_pm_enable_interrupts(dev_priv); 1717 1718 /* 1719 * On VLV/CHV display interrupts are part of the display 1720 * power well, so hpd is reinitialized from there. For 1721 * everyone else do it here. 1722 */ 1723 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 1724 intel_hpd_init(dev_priv); 1725 1726 intel_enable_gt_powersave(dev); 1727 1728 enable_rpm_wakeref_asserts(dev_priv); 1729 1730 if (ret) 1731 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 1732 else 1733 DRM_DEBUG_KMS("Device resumed\n"); 1734 1735 return ret; 1736 } 1737 1738 static const struct dev_pm_ops i915_pm_ops = { 1739 /* 1740 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 1741 * PMSG_RESUME] 1742 */ 1743 .suspend = i915_pm_suspend, 1744 .suspend_late = i915_pm_suspend_late, 1745 .resume_early = i915_pm_resume_early, 1746 .resume = i915_pm_resume, 1747 1748 /* 1749 * S4 event handlers 1750 * @freeze, @freeze_late : called (1) before creating the 1751 * hibernation image [PMSG_FREEZE] and 1752 * (2) after rebooting, before restoring 1753 * the image [PMSG_QUIESCE] 1754 * @thaw, @thaw_early : called (1) after creating the hibernation 1755 * image, before writing it [PMSG_THAW] 1756 * and (2) after failing to create or 1757 * restore the image [PMSG_RECOVER] 1758 * @poweroff, @poweroff_late: called after writing the hibernation 1759 * image, before rebooting [PMSG_HIBERNATE] 1760 * @restore, @restore_early : called after rebooting and restoring the 1761 * hibernation image [PMSG_RESTORE] 1762 */ 1763 .freeze = i915_pm_suspend, 1764 .freeze_late = i915_pm_suspend_late, 1765 .thaw_early = i915_pm_resume_early, 1766 .thaw = i915_pm_resume, 1767 .poweroff = i915_pm_suspend, 1768 .poweroff_late = i915_pm_poweroff_late, 1769 .restore_early = i915_pm_resume_early, 1770 .restore = i915_pm_resume, 1771 1772 /* S0ix (via runtime suspend) event handlers */ 1773 .runtime_suspend = intel_runtime_suspend, 1774 .runtime_resume = intel_runtime_resume, 1775 }; 1776 1777 static const struct vm_operations_struct i915_gem_vm_ops = { 1778 .fault = i915_gem_fault, 1779 .open = drm_gem_vm_open, 1780 .close = drm_gem_vm_close, 1781 }; 1782 #endif 1783 1784 static const struct file_operations i915_driver_fops = { 1785 .owner = THIS_MODULE, 1786 #if 0 1787 .open = drm_open, 1788 .release = drm_release, 1789 .unlocked_ioctl = drm_ioctl, 1790 .mmap = drm_gem_mmap, 1791 .poll = drm_poll, 1792 .read = drm_read, 1793 #ifdef CONFIG_COMPAT 1794 .compat_ioctl = i915_compat_ioctl, 1795 #endif 1796 .llseek = noop_llseek, 1797 #endif 1798 }; 1799 1800 static struct cdev_pager_ops i915_gem_vm_ops = { 1801 .cdev_pg_fault = i915_gem_fault, 1802 .cdev_pg_ctor = i915_gem_pager_ctor, 1803 .cdev_pg_dtor = i915_gem_pager_dtor 1804 }; 1805 1806 static struct drm_driver driver = { 1807 /* Don't use MTRRs here; the Xserver or userspace app should 1808 * deal with them for Intel hardware. 1809 */ 1810 .driver_features = 1811 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 1812 DRIVER_RENDER | DRIVER_MODESET, 1813 .load = i915_driver_load, 1814 .unload = i915_driver_unload, 1815 .open = i915_driver_open, 1816 .lastclose = i915_driver_lastclose, 1817 .preclose = i915_driver_preclose, 1818 .postclose = i915_driver_postclose, 1819 .set_busid = drm_pci_set_busid, 1820 1821 #if defined(CONFIG_DEBUG_FS) 1822 .debugfs_init = i915_debugfs_init, 1823 .debugfs_cleanup = i915_debugfs_cleanup, 1824 #endif 1825 .gem_free_object = i915_gem_free_object, 1826 .gem_vm_ops = &i915_gem_vm_ops, 1827 1828 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1829 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1830 .gem_prime_export = i915_gem_prime_export, 1831 .gem_prime_import = i915_gem_prime_import, 1832 1833 .dumb_create = i915_gem_dumb_create, 1834 .dumb_map_offset = i915_gem_mmap_gtt, 1835 .dumb_destroy = drm_gem_dumb_destroy, 1836 .ioctls = i915_ioctls, 1837 .sysctl_init = i915_sysctl_init, 1838 .fops = &i915_driver_fops, 1839 .name = DRIVER_NAME, 1840 .desc = DRIVER_DESC, 1841 .date = DRIVER_DATE, 1842 .major = DRIVER_MAJOR, 1843 .minor = DRIVER_MINOR, 1844 .patchlevel = DRIVER_PATCHLEVEL, 1845 }; 1846 1847 static int __init i915_init(void); 1848 1849 static int 1850 i915_attach(device_t kdev) 1851 { 1852 struct drm_device *dev = device_get_softc(kdev); 1853 int error; 1854 1855 dev->driver = &driver; 1856 error = drm_attach(kdev, i915_attach_list); 1857 1858 return error; 1859 } 1860 1861 static device_method_t i915_methods[] = { 1862 /* Device interface */ 1863 DEVMETHOD(device_probe, i915_pci_probe), 1864 DEVMETHOD(device_attach, i915_attach), 1865 DEVMETHOD(device_suspend, i915_suspend_switcheroo), 1866 DEVMETHOD(device_resume, i915_resume_switcheroo), 1867 DEVMETHOD(device_detach, drm_release), 1868 DEVMETHOD_END 1869 }; 1870 1871 static driver_t i915_driver = { 1872 "drm", 1873 i915_methods, 1874 sizeof(struct drm_device) 1875 }; 1876 1877 static int __init i915_init(void) 1878 { 1879 driver.num_ioctls = i915_max_ioctl; 1880 1881 /* 1882 * Enable KMS by default, unless explicitly overriden by 1883 * either the i915.modeset prarameter or by the 1884 * vga_text_mode_force boot option. 1885 */ 1886 1887 if (i915.modeset == 0) 1888 driver.driver_features &= ~DRIVER_MODESET; 1889 1890 if (vgacon_text_force() && i915.modeset == -1) 1891 driver.driver_features &= ~DRIVER_MODESET; 1892 1893 if (!(driver.driver_features & DRIVER_MODESET)) { 1894 /* Silently fail loading to not upset userspace. */ 1895 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); 1896 return 0; 1897 } 1898 1899 if (i915.nuclear_pageflip) 1900 driver.driver_features |= DRIVER_ATOMIC; 1901 1902 #if 0 1903 return drm_pci_init(&driver, &i915_pci_driver); 1904 #else 1905 return 1; 1906 #endif 1907 } 1908 1909 #if 0 1910 static void __exit i915_exit(void) 1911 { 1912 if (!(driver.driver_features & DRIVER_MODESET)) 1913 return; /* Never loaded a driver. */ 1914 1915 drm_pci_exit(&driver, &i915_pci_driver); 1916 } 1917 #endif 1918 1919 module_init(i915_init); 1920 1921 DRIVER_MODULE_ORDERED(i915, vgapci, i915_driver, drm_devclass, NULL, NULL, SI_ORDER_ANY); 1922 MODULE_DEPEND(i915, drm, 1, 1, 1); 1923 #ifdef CONFIG_ACPI 1924 MODULE_DEPEND(i915, acpi, 1, 1, 1); 1925 #endif 1926