1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #include <linux/device.h> 31 #include <drm/drmP.h> 32 #include <drm/i915_drm.h> 33 #include "i915_drv.h" 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 37 #include <linux/console.h> 38 #include <linux/module.h> 39 #include <linux/vga_switcheroo.h> 40 #include <drm/drm_crtc_helper.h> 41 42 static struct drm_driver driver; 43 44 #define GEN_DEFAULT_PIPEOFFSETS \ 45 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 46 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 47 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 48 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ 49 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } 50 51 #define GEN_CHV_PIPEOFFSETS \ 52 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 53 CHV_PIPE_C_OFFSET }, \ 54 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 55 CHV_TRANSCODER_C_OFFSET, }, \ 56 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ 57 CHV_PALETTE_C_OFFSET } 58 59 #define CURSOR_OFFSETS \ 60 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } 61 62 #define IVB_CURSOR_OFFSETS \ 63 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } 64 65 #define BDW_COLORS \ 66 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } 67 #define CHV_COLORS \ 68 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } 69 70 static const struct intel_device_info intel_i830_info = { 71 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 72 .has_overlay = 1, .overlay_needs_physical = 1, 73 .ring_mask = RENDER_RING, 74 GEN_DEFAULT_PIPEOFFSETS, 75 CURSOR_OFFSETS, 76 }; 77 78 static const struct intel_device_info intel_845g_info = { 79 .gen = 2, .num_pipes = 1, 80 .has_overlay = 1, .overlay_needs_physical = 1, 81 .ring_mask = RENDER_RING, 82 GEN_DEFAULT_PIPEOFFSETS, 83 CURSOR_OFFSETS, 84 }; 85 86 static const struct intel_device_info intel_i85x_info = { 87 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, 88 .cursor_needs_physical = 1, 89 .has_overlay = 1, .overlay_needs_physical = 1, 90 .has_fbc = 1, 91 .ring_mask = RENDER_RING, 92 GEN_DEFAULT_PIPEOFFSETS, 93 CURSOR_OFFSETS, 94 }; 95 96 static const struct intel_device_info intel_i865g_info = { 97 .gen = 2, .num_pipes = 1, 98 .has_overlay = 1, .overlay_needs_physical = 1, 99 .ring_mask = RENDER_RING, 100 GEN_DEFAULT_PIPEOFFSETS, 101 CURSOR_OFFSETS, 102 }; 103 104 static const struct intel_device_info intel_i915g_info = { 105 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 106 .has_overlay = 1, .overlay_needs_physical = 1, 107 .ring_mask = RENDER_RING, 108 GEN_DEFAULT_PIPEOFFSETS, 109 CURSOR_OFFSETS, 110 }; 111 static const struct intel_device_info intel_i915gm_info = { 112 .gen = 3, .is_mobile = 1, .num_pipes = 2, 113 .cursor_needs_physical = 1, 114 .has_overlay = 1, .overlay_needs_physical = 1, 115 .supports_tv = 1, 116 .has_fbc = 1, 117 .ring_mask = RENDER_RING, 118 GEN_DEFAULT_PIPEOFFSETS, 119 CURSOR_OFFSETS, 120 }; 121 static const struct intel_device_info intel_i945g_info = { 122 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 123 .has_overlay = 1, .overlay_needs_physical = 1, 124 .ring_mask = RENDER_RING, 125 GEN_DEFAULT_PIPEOFFSETS, 126 CURSOR_OFFSETS, 127 }; 128 static const struct intel_device_info intel_i945gm_info = { 129 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 130 .has_hotplug = 1, .cursor_needs_physical = 1, 131 .has_overlay = 1, .overlay_needs_physical = 1, 132 .supports_tv = 1, 133 .has_fbc = 1, 134 .ring_mask = RENDER_RING, 135 GEN_DEFAULT_PIPEOFFSETS, 136 CURSOR_OFFSETS, 137 }; 138 139 static const struct intel_device_info intel_i965g_info = { 140 .gen = 4, .is_broadwater = 1, .num_pipes = 2, 141 .has_hotplug = 1, 142 .has_overlay = 1, 143 .ring_mask = RENDER_RING, 144 GEN_DEFAULT_PIPEOFFSETS, 145 CURSOR_OFFSETS, 146 }; 147 148 static const struct intel_device_info intel_i965gm_info = { 149 .gen = 4, .is_crestline = 1, .num_pipes = 2, 150 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 151 .has_overlay = 1, 152 .supports_tv = 1, 153 .ring_mask = RENDER_RING, 154 GEN_DEFAULT_PIPEOFFSETS, 155 CURSOR_OFFSETS, 156 }; 157 158 static const struct intel_device_info intel_g33_info = { 159 .gen = 3, .is_g33 = 1, .num_pipes = 2, 160 .need_gfx_hws = 1, .has_hotplug = 1, 161 .has_overlay = 1, 162 .ring_mask = RENDER_RING, 163 GEN_DEFAULT_PIPEOFFSETS, 164 CURSOR_OFFSETS, 165 }; 166 167 static const struct intel_device_info intel_g45_info = { 168 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 169 .has_pipe_cxsr = 1, .has_hotplug = 1, 170 .ring_mask = RENDER_RING | BSD_RING, 171 GEN_DEFAULT_PIPEOFFSETS, 172 CURSOR_OFFSETS, 173 }; 174 175 static const struct intel_device_info intel_gm45_info = { 176 .gen = 4, .is_g4x = 1, .num_pipes = 2, 177 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 178 .has_pipe_cxsr = 1, .has_hotplug = 1, 179 .supports_tv = 1, 180 .ring_mask = RENDER_RING | BSD_RING, 181 GEN_DEFAULT_PIPEOFFSETS, 182 CURSOR_OFFSETS, 183 }; 184 185 static const struct intel_device_info intel_pineview_info = { 186 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, 187 .need_gfx_hws = 1, .has_hotplug = 1, 188 .has_overlay = 1, 189 GEN_DEFAULT_PIPEOFFSETS, 190 CURSOR_OFFSETS, 191 }; 192 193 static const struct intel_device_info intel_ironlake_d_info = { 194 .gen = 5, .num_pipes = 2, 195 .need_gfx_hws = 1, .has_hotplug = 1, 196 .ring_mask = RENDER_RING | BSD_RING, 197 GEN_DEFAULT_PIPEOFFSETS, 198 CURSOR_OFFSETS, 199 }; 200 201 static const struct intel_device_info intel_ironlake_m_info = { 202 .gen = 5, .is_mobile = 1, .num_pipes = 2, 203 .need_gfx_hws = 1, .has_hotplug = 1, 204 .has_fbc = 1, 205 .ring_mask = RENDER_RING | BSD_RING, 206 GEN_DEFAULT_PIPEOFFSETS, 207 CURSOR_OFFSETS, 208 }; 209 210 static const struct intel_device_info intel_sandybridge_d_info = { 211 .gen = 6, .num_pipes = 2, 212 .need_gfx_hws = 1, .has_hotplug = 1, 213 .has_fbc = 1, 214 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 215 .has_llc = 1, 216 GEN_DEFAULT_PIPEOFFSETS, 217 CURSOR_OFFSETS, 218 }; 219 220 static const struct intel_device_info intel_sandybridge_m_info = { 221 .gen = 6, .is_mobile = 1, .num_pipes = 2, 222 .need_gfx_hws = 1, .has_hotplug = 1, 223 .has_fbc = 1, 224 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 225 .has_llc = 1, 226 GEN_DEFAULT_PIPEOFFSETS, 227 CURSOR_OFFSETS, 228 }; 229 230 #define GEN7_FEATURES \ 231 .gen = 7, .num_pipes = 3, \ 232 .need_gfx_hws = 1, .has_hotplug = 1, \ 233 .has_fbc = 1, \ 234 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 235 .has_llc = 1, \ 236 GEN_DEFAULT_PIPEOFFSETS, \ 237 IVB_CURSOR_OFFSETS 238 239 static const struct intel_device_info intel_ivybridge_d_info = { 240 GEN7_FEATURES, 241 .is_ivybridge = 1, 242 }; 243 244 static const struct intel_device_info intel_ivybridge_m_info = { 245 GEN7_FEATURES, 246 .is_ivybridge = 1, 247 .is_mobile = 1, 248 }; 249 250 static const struct intel_device_info intel_ivybridge_q_info = { 251 GEN7_FEATURES, 252 .is_ivybridge = 1, 253 .num_pipes = 0, /* legal, last one wins */ 254 }; 255 256 #define VLV_FEATURES \ 257 .gen = 7, .num_pipes = 2, \ 258 .need_gfx_hws = 1, .has_hotplug = 1, \ 259 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 260 .display_mmio_offset = VLV_DISPLAY_BASE, \ 261 GEN_DEFAULT_PIPEOFFSETS, \ 262 CURSOR_OFFSETS 263 264 static const struct intel_device_info intel_valleyview_m_info = { 265 VLV_FEATURES, 266 .is_valleyview = 1, 267 .is_mobile = 1, 268 }; 269 270 static const struct intel_device_info intel_valleyview_d_info = { 271 VLV_FEATURES, 272 .is_valleyview = 1, 273 }; 274 275 #define HSW_FEATURES \ 276 GEN7_FEATURES, \ 277 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ 278 .has_ddi = 1, \ 279 .has_fpga_dbg = 1 280 281 static const struct intel_device_info intel_haswell_d_info = { 282 HSW_FEATURES, 283 .is_haswell = 1, 284 }; 285 286 static const struct intel_device_info intel_haswell_m_info = { 287 HSW_FEATURES, 288 .is_haswell = 1, 289 .is_mobile = 1, 290 }; 291 292 #define BDW_FEATURES \ 293 HSW_FEATURES, \ 294 BDW_COLORS 295 296 static const struct intel_device_info intel_broadwell_d_info = { 297 BDW_FEATURES, 298 .gen = 8, 299 }; 300 301 static const struct intel_device_info intel_broadwell_m_info = { 302 BDW_FEATURES, 303 .gen = 8, .is_mobile = 1, 304 }; 305 306 static const struct intel_device_info intel_broadwell_gt3d_info = { 307 BDW_FEATURES, 308 .gen = 8, 309 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 310 }; 311 312 static const struct intel_device_info intel_broadwell_gt3m_info = { 313 BDW_FEATURES, 314 .gen = 8, .is_mobile = 1, 315 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 316 }; 317 318 static const struct intel_device_info intel_cherryview_info = { 319 .gen = 8, .num_pipes = 3, 320 .need_gfx_hws = 1, .has_hotplug = 1, 321 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 322 .is_cherryview = 1, 323 .display_mmio_offset = VLV_DISPLAY_BASE, 324 GEN_CHV_PIPEOFFSETS, 325 CURSOR_OFFSETS, 326 CHV_COLORS, 327 }; 328 329 static const struct intel_device_info intel_skylake_info = { 330 BDW_FEATURES, 331 .is_skylake = 1, 332 .gen = 9, 333 }; 334 335 static const struct intel_device_info intel_skylake_gt3_info = { 336 BDW_FEATURES, 337 .is_skylake = 1, 338 .gen = 9, 339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 340 }; 341 342 static const struct intel_device_info intel_broxton_info = { 343 .is_preliminary = 1, 344 .is_broxton = 1, 345 .gen = 9, 346 .need_gfx_hws = 1, .has_hotplug = 1, 347 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 348 .num_pipes = 3, 349 .has_ddi = 1, 350 .has_fpga_dbg = 1, 351 .has_fbc = 1, 352 GEN_DEFAULT_PIPEOFFSETS, 353 IVB_CURSOR_OFFSETS, 354 BDW_COLORS, 355 }; 356 357 static const struct intel_device_info intel_kabylake_info = { 358 BDW_FEATURES, 359 .is_kabylake = 1, 360 .gen = 9, 361 }; 362 363 static const struct intel_device_info intel_kabylake_gt3_info = { 364 BDW_FEATURES, 365 .is_kabylake = 1, 366 .gen = 9, 367 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 368 }; 369 370 /* 371 * Make sure any device matches here are from most specific to most 372 * general. For example, since the Quanta match is based on the subsystem 373 * and subvendor IDs, we need it to come before the more general IVB 374 * PCI ID matches, otherwise we'll use the wrong info struct above. 375 */ 376 377 static const struct pci_device_id pciidlist[] = { 378 INTEL_I830_IDS(&intel_i830_info), 379 INTEL_I845G_IDS(&intel_845g_info), 380 INTEL_I85X_IDS(&intel_i85x_info), 381 INTEL_I865G_IDS(&intel_i865g_info), 382 INTEL_I915G_IDS(&intel_i915g_info), 383 INTEL_I915GM_IDS(&intel_i915gm_info), 384 INTEL_I945G_IDS(&intel_i945g_info), 385 INTEL_I945GM_IDS(&intel_i945gm_info), 386 INTEL_I965G_IDS(&intel_i965g_info), 387 INTEL_G33_IDS(&intel_g33_info), 388 INTEL_I965GM_IDS(&intel_i965gm_info), 389 INTEL_GM45_IDS(&intel_gm45_info), 390 INTEL_G45_IDS(&intel_g45_info), 391 INTEL_PINEVIEW_IDS(&intel_pineview_info), 392 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), 393 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), 394 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), 395 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), 396 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ 397 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), 398 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), 399 INTEL_HSW_D_IDS(&intel_haswell_d_info), 400 INTEL_HSW_M_IDS(&intel_haswell_m_info), 401 INTEL_VLV_M_IDS(&intel_valleyview_m_info), 402 INTEL_VLV_D_IDS(&intel_valleyview_d_info), 403 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), 404 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), 405 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), 406 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), 407 INTEL_CHV_IDS(&intel_cherryview_info), 408 INTEL_SKL_GT1_IDS(&intel_skylake_info), 409 INTEL_SKL_GT2_IDS(&intel_skylake_info), 410 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), 411 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), 412 INTEL_BXT_IDS(&intel_broxton_info), 413 INTEL_KBL_GT1_IDS(&intel_kabylake_info), 414 INTEL_KBL_GT2_IDS(&intel_kabylake_info), 415 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), 416 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), 417 {0, 0, 0} 418 }; 419 420 #define PCI_VENDOR_INTEL 0x8086 421 422 static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) 423 { 424 enum intel_pch ret = PCH_NOP; 425 426 /* 427 * In a virtualized passthrough environment we can be in a 428 * setup where the ISA bridge is not able to be passed through. 429 * In this case, a south bridge can be emulated and we have to 430 * make an educated guess as to which PCH is really there. 431 */ 432 433 if (IS_GEN5(dev)) { 434 ret = PCH_IBX; 435 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 436 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 437 ret = PCH_CPT; 438 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); 439 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 440 ret = PCH_LPT; 441 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 442 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 443 ret = PCH_SPT; 444 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 445 } 446 447 return ret; 448 } 449 450 void intel_detect_pch(struct drm_device *dev) 451 { 452 struct drm_i915_private *dev_priv = dev->dev_private; 453 device_t pch = NULL; 454 struct pci_devinfo *di; 455 456 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 457 * (which really amounts to a PCH but no South Display). 458 */ 459 if (INTEL_INFO(dev)->num_pipes == 0) { 460 dev_priv->pch_type = PCH_NOP; 461 return; 462 } 463 464 /* XXX The ISA bridge probe causes some old Core2 machines to hang */ 465 if (INTEL_INFO(dev)->gen < 5) 466 return; 467 468 /* 469 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 470 * make graphics device passthrough work easy for VMM, that only 471 * need to expose ISA bridge to let driver know the real hardware 472 * underneath. This is a requirement from virtualization team. 473 * 474 * In some virtualized environments (e.g. XEN), there is irrelevant 475 * ISA bridge in the system. To work reliably, we should scan trhough 476 * all the ISA bridge devices and check for the first match, instead 477 * of only checking the first one. 478 */ 479 di = NULL; 480 481 while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) { 482 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) { 483 unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; 484 dev_priv->pch_id = id; 485 486 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 487 dev_priv->pch_type = PCH_IBX; 488 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 489 WARN_ON(!IS_GEN5(dev)); 490 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 491 dev_priv->pch_type = PCH_CPT; 492 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 493 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 494 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 495 /* PantherPoint is CPT compatible */ 496 dev_priv->pch_type = PCH_CPT; 497 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 498 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 499 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 500 dev_priv->pch_type = PCH_LPT; 501 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 502 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 503 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); 504 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 505 dev_priv->pch_type = PCH_LPT; 506 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 507 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 508 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); 509 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 510 dev_priv->pch_type = PCH_SPT; 511 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 512 WARN_ON(!IS_SKYLAKE(dev) && 513 !IS_KABYLAKE(dev)); 514 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 515 dev_priv->pch_type = PCH_SPT; 516 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 517 WARN_ON(!IS_SKYLAKE(dev) && 518 !IS_KABYLAKE(dev)); 519 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 520 dev_priv->pch_type = PCH_KBP; 521 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 522 WARN_ON(!IS_KABYLAKE(dev)); 523 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 524 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 525 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 526 1)) { 527 dev_priv->pch_type = intel_virt_detect_pch(dev); 528 } else 529 continue; 530 531 break; 532 } 533 } 534 if (!pch) 535 DRM_DEBUG_KMS("No PCH found.\n"); 536 537 #if 0 538 pci_dev_put(pch); 539 #endif 540 } 541 542 bool i915_semaphore_is_enabled(struct drm_device *dev) 543 { 544 if (INTEL_INFO(dev)->gen < 6) 545 return false; 546 547 if (i915.semaphores >= 0) 548 return i915.semaphores; 549 550 /* TODO: make semaphores and Execlists play nicely together */ 551 if (i915.enable_execlists) 552 return false; 553 554 /* Until we get further testing... */ 555 if (IS_GEN8(dev)) 556 return false; 557 558 #ifdef CONFIG_INTEL_IOMMU 559 /* Enable semaphores on SNB when IO remapping is off */ 560 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 561 return false; 562 #endif 563 564 return true; 565 } 566 567 #ifdef __DragonFly__ 568 #define IS_BUILTIN(blah) 0 569 #endif 570 571 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 572 { 573 struct drm_device *dev = dev_priv->dev; 574 struct intel_encoder *encoder; 575 576 drm_modeset_lock_all(dev); 577 for_each_intel_encoder(dev, encoder) 578 if (encoder->suspend) 579 encoder->suspend(encoder); 580 drm_modeset_unlock_all(dev); 581 } 582 583 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 584 bool rpm_resume); 585 static int vlv_suspend_complete(struct drm_i915_private *dev_priv); 586 587 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 588 { 589 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 590 if (acpi_target_system_state() < ACPI_STATE_S3) 591 return true; 592 #endif 593 return false; 594 } 595 596 static int i915_drm_suspend(struct drm_device *dev) 597 { 598 struct drm_i915_private *dev_priv = dev->dev_private; 599 pci_power_t opregion_target_state; 600 int error; 601 602 /* ignore lid events during suspend */ 603 mutex_lock(&dev_priv->modeset_restore_lock); 604 dev_priv->modeset_restore = MODESET_SUSPENDED; 605 mutex_unlock(&dev_priv->modeset_restore_lock); 606 607 disable_rpm_wakeref_asserts(dev_priv); 608 609 /* We do a lot of poking in a lot of registers, make sure they work 610 * properly. */ 611 intel_display_set_init_power(dev_priv, true); 612 613 drm_kms_helper_poll_disable(dev); 614 615 #if 0 616 pci_save_state(dev->pdev); 617 #endif 618 619 error = i915_gem_suspend(dev); 620 if (error) { 621 dev_err(dev->dev, 622 "GEM idle failed, resume might fail\n"); 623 goto out; 624 } 625 626 intel_guc_suspend(dev); 627 628 intel_suspend_gt_powersave(dev); 629 630 intel_display_suspend(dev); 631 632 #if 0 633 intel_dp_mst_suspend(dev); 634 #endif 635 636 intel_runtime_pm_disable_interrupts(dev_priv); 637 intel_hpd_cancel_work(dev_priv); 638 639 intel_suspend_encoders(dev_priv); 640 641 intel_suspend_hw(dev); 642 643 i915_gem_suspend_gtt_mappings(dev); 644 645 i915_save_state(dev); 646 647 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 648 intel_opregion_notify_adapter(dev, opregion_target_state); 649 650 intel_uncore_forcewake_reset(dev, false); 651 intel_opregion_fini(dev); 652 653 #if 0 654 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 655 #endif 656 657 dev_priv->suspend_count++; 658 659 intel_display_set_init_power(dev_priv, false); 660 661 intel_csr_ucode_suspend(dev_priv); 662 663 out: 664 enable_rpm_wakeref_asserts(dev_priv); 665 666 return error; 667 } 668 669 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 670 { 671 struct drm_i915_private *dev_priv = drm_dev->dev_private; 672 bool fw_csr; 673 int ret; 674 675 disable_rpm_wakeref_asserts(dev_priv); 676 677 fw_csr = !IS_BROXTON(dev_priv) && 678 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 679 /* 680 * In case of firmware assisted context save/restore don't manually 681 * deinit the power domains. This also means the CSR/DMC firmware will 682 * stay active, it will power down any HW resources as required and 683 * also enable deeper system power states that would be blocked if the 684 * firmware was inactive. 685 */ 686 if (!fw_csr) 687 intel_power_domains_suspend(dev_priv); 688 689 ret = 0; 690 if (IS_BROXTON(dev_priv)) 691 bxt_enable_dc9(dev_priv); 692 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 693 hsw_enable_pc8(dev_priv); 694 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 695 ret = vlv_suspend_complete(dev_priv); 696 697 if (ret) { 698 DRM_ERROR("Suspend complete failed: %d\n", ret); 699 if (!fw_csr) 700 intel_power_domains_init_hw(dev_priv, true); 701 702 goto out; 703 } 704 705 #if 0 706 pci_disable_device(drm_dev->pdev); 707 /* 708 * During hibernation on some platforms the BIOS may try to access 709 * the device even though it's already in D3 and hang the machine. So 710 * leave the device in D0 on those platforms and hope the BIOS will 711 * power down the device properly. The issue was seen on multiple old 712 * GENs with different BIOS vendors, so having an explicit blacklist 713 * is inpractical; apply the workaround on everything pre GEN6. The 714 * platforms where the issue was seen: 715 * Lenovo Thinkpad X301, X61s, X60, T60, X41 716 * Fujitsu FSC S7110 717 * Acer Aspire 1830T 718 */ 719 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) 720 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 721 #endif 722 723 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); 724 725 out: 726 enable_rpm_wakeref_asserts(dev_priv); 727 728 return ret; 729 } 730 731 int i915_suspend_switcheroo(device_t kdev) 732 { 733 struct drm_device *dev = device_get_softc(kdev); 734 int error; 735 736 if (!dev || !dev->dev_private) { 737 DRM_ERROR("dev: %p\n", dev); 738 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 739 return -ENODEV; 740 } 741 742 #if 0 743 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 744 state.event != PM_EVENT_FREEZE)) 745 return -EINVAL; 746 #endif 747 748 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 749 return 0; 750 751 error = i915_drm_suspend(dev); 752 if (error) 753 return error; 754 755 return i915_drm_suspend_late(dev, false); 756 } 757 758 static int i915_drm_resume(struct drm_device *dev) 759 { 760 struct drm_i915_private *dev_priv = dev->dev_private; 761 int ret; 762 763 disable_rpm_wakeref_asserts(dev_priv); 764 765 ret = i915_ggtt_enable_hw(dev); 766 if (ret) 767 DRM_ERROR("failed to re-enable GGTT\n"); 768 769 intel_csr_ucode_resume(dev_priv); 770 771 mutex_lock(&dev->struct_mutex); 772 i915_gem_restore_gtt_mappings(dev); 773 mutex_unlock(&dev->struct_mutex); 774 775 i915_restore_state(dev); 776 intel_opregion_setup(dev); 777 778 intel_init_pch_refclk(dev); 779 drm_mode_config_reset(dev); 780 781 /* 782 * Interrupts have to be enabled before any batches are run. If not the 783 * GPU will hang. i915_gem_init_hw() will initiate batches to 784 * update/restore the context. 785 * 786 * Modeset enabling in intel_modeset_init_hw() also needs working 787 * interrupts. 788 */ 789 intel_runtime_pm_enable_interrupts(dev_priv); 790 791 mutex_lock(&dev->struct_mutex); 792 if (i915_gem_init_hw(dev)) { 793 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 794 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 795 } 796 mutex_unlock(&dev->struct_mutex); 797 798 intel_guc_resume(dev); 799 800 intel_modeset_init_hw(dev); 801 802 spin_lock_irq(&dev_priv->irq_lock); 803 if (dev_priv->display.hpd_irq_setup) 804 dev_priv->display.hpd_irq_setup(dev); 805 spin_unlock_irq(&dev_priv->irq_lock); 806 807 intel_dp_mst_resume(dev); 808 809 intel_display_resume(dev); 810 811 /* 812 * ... but also need to make sure that hotplug processing 813 * doesn't cause havoc. Like in the driver load code we don't 814 * bother with the tiny race here where we might loose hotplug 815 * notifications. 816 * */ 817 intel_hpd_init(dev_priv); 818 /* Config may have changed between suspend and resume */ 819 drm_helper_hpd_irq_event(dev); 820 821 intel_opregion_init(dev); 822 823 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 824 825 mutex_lock(&dev_priv->modeset_restore_lock); 826 dev_priv->modeset_restore = MODESET_DONE; 827 mutex_unlock(&dev_priv->modeset_restore_lock); 828 829 #if 0 830 intel_opregion_notify_adapter(dev, PCI_D0); 831 #endif 832 833 drm_kms_helper_poll_enable(dev); 834 835 enable_rpm_wakeref_asserts(dev_priv); 836 837 return 0; 838 } 839 840 static int i915_drm_resume_early(struct drm_device *dev) 841 { 842 struct drm_i915_private *dev_priv = dev->dev_private; 843 int ret = 0; 844 845 /* 846 * We have a resume ordering issue with the snd-hda driver also 847 * requiring our device to be power up. Due to the lack of a 848 * parent/child relationship we currently solve this with an early 849 * resume hook. 850 * 851 * FIXME: This should be solved with a special hdmi sink device or 852 * similar so that power domains can be employed. 853 */ 854 855 /* 856 * Note that we need to set the power state explicitly, since we 857 * powered off the device during freeze and the PCI core won't power 858 * it back up for us during thaw. Powering off the device during 859 * freeze is not a hard requirement though, and during the 860 * suspend/resume phases the PCI core makes sure we get here with the 861 * device powered on. So in case we change our freeze logic and keep 862 * the device powered we can also remove the following set power state 863 * call. 864 */ 865 #if 0 866 ret = pci_set_power_state(dev->pdev, PCI_D0); 867 if (ret) { 868 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); 869 goto out; 870 } 871 872 /* 873 * Note that pci_enable_device() first enables any parent bridge 874 * device and only then sets the power state for this device. The 875 * bridge enabling is a nop though, since bridge devices are resumed 876 * first. The order of enabling power and enabling the device is 877 * imposed by the PCI core as described above, so here we preserve the 878 * same order for the freeze/thaw phases. 879 * 880 * TODO: eventually we should remove pci_disable_device() / 881 * pci_enable_enable_device() from suspend/resume. Due to how they 882 * depend on the device enable refcount we can't anyway depend on them 883 * disabling/enabling the device. 884 */ 885 if (pci_enable_device(dev->pdev)) { 886 ret = -EIO; 887 goto out; 888 } 889 890 pci_set_master(dev->pdev); 891 #endif 892 893 disable_rpm_wakeref_asserts(dev_priv); 894 895 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 896 ret = vlv_resume_prepare(dev_priv, false); 897 if (ret) 898 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 899 ret); 900 901 intel_uncore_early_sanitize(dev, true); 902 903 if (IS_BROXTON(dev)) { 904 if (!dev_priv->suspended_to_idle) 905 gen9_sanitize_dc_state(dev_priv); 906 bxt_disable_dc9(dev_priv); 907 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 908 hsw_disable_pc8(dev_priv); 909 } 910 911 intel_uncore_sanitize(dev); 912 913 if (IS_BROXTON(dev_priv) || 914 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 915 intel_power_domains_init_hw(dev_priv, true); 916 917 enable_rpm_wakeref_asserts(dev_priv); 918 919 #if 0 920 out: 921 #endif 922 dev_priv->suspended_to_idle = false; 923 924 return ret; 925 } 926 927 int i915_resume_switcheroo(struct drm_device *dev) 928 { 929 int ret; 930 931 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 932 return 0; 933 934 ret = i915_drm_resume_early(dev); 935 if (ret) 936 return ret; 937 938 return i915_drm_resume(dev); 939 } 940 941 /* XXX Hack for the old *BSD drm code base 942 * The device id field is set at probe time */ 943 static drm_pci_id_list_t i915_attach_list[] = { 944 {0x8086, 0, 0, "Intel i915 GPU"}, 945 {0, 0, 0, NULL} 946 }; 947 948 struct intel_device_info * 949 i915_get_device_id(int device) 950 { 951 const struct pci_device_id *did; 952 953 for (did = &pciidlist[0]; did->device != 0; did++) { 954 if (did->device != device) 955 continue; 956 return (struct intel_device_info *)did->driver_data; 957 } 958 return (NULL); 959 } 960 961 extern devclass_t drm_devclass; 962 963 /** 964 * i915_reset - reset chip after a hang 965 * @dev: drm device to reset 966 * 967 * Reset the chip. Useful if a hang is detected. Returns zero on successful 968 * reset or otherwise an error code. 969 * 970 * Procedure is fairly simple: 971 * - reset the chip using the reset reg 972 * - re-init context state 973 * - re-init hardware status page 974 * - re-init ring buffer 975 * - re-init interrupt state 976 * - re-init display 977 */ 978 int i915_reset(struct drm_device *dev) 979 { 980 struct drm_i915_private *dev_priv = dev->dev_private; 981 struct i915_gpu_error *error = &dev_priv->gpu_error; 982 unsigned reset_counter; 983 int ret; 984 985 intel_reset_gt_powersave(dev); 986 987 mutex_lock(&dev->struct_mutex); 988 989 /* Clear any previous failed attempts at recovery. Time to try again. */ 990 atomic_andnot(I915_WEDGED, &error->reset_counter); 991 992 /* Clear the reset-in-progress flag and increment the reset epoch. */ 993 reset_counter = atomic_inc_return(&error->reset_counter); 994 if (WARN_ON(__i915_reset_in_progress(reset_counter))) { 995 ret = -EIO; 996 goto error; 997 } 998 999 i915_gem_reset(dev); 1000 1001 ret = intel_gpu_reset(dev, ALL_ENGINES); 1002 1003 /* Also reset the gpu hangman. */ 1004 if (error->stop_rings != 0) { 1005 DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); 1006 error->stop_rings = 0; 1007 if (ret == -ENODEV) { 1008 DRM_INFO("Reset not implemented, but ignoring " 1009 "error for simulated gpu hangs\n"); 1010 ret = 0; 1011 } 1012 } 1013 1014 if (i915_stop_ring_allow_warn(dev_priv)) 1015 pr_notice("drm/i915: Resetting chip after gpu hang\n"); 1016 1017 if (ret) { 1018 if (ret != -ENODEV) 1019 DRM_ERROR("Failed to reset chip: %i\n", ret); 1020 else 1021 DRM_DEBUG_DRIVER("GPU reset disabled\n"); 1022 goto error; 1023 } 1024 1025 intel_overlay_reset(dev_priv); 1026 1027 /* Ok, now get things going again... */ 1028 1029 /* 1030 * Everything depends on having the GTT running, so we need to start 1031 * there. Fortunately we don't need to do this unless we reset the 1032 * chip at a PCI level. 1033 * 1034 * Next we need to restore the context, but we don't use those 1035 * yet either... 1036 * 1037 * Ring buffer needs to be re-initialized in the KMS case, or if X 1038 * was running at the time of the reset (i.e. we weren't VT 1039 * switched away). 1040 */ 1041 ret = i915_gem_init_hw(dev); 1042 if (ret) { 1043 DRM_ERROR("Failed hw init on reset %d\n", ret); 1044 goto error; 1045 } 1046 1047 mutex_unlock(&dev->struct_mutex); 1048 1049 /* 1050 * rps/rc6 re-init is necessary to restore state lost after the 1051 * reset and the re-install of gt irqs. Skip for ironlake per 1052 * previous concerns that it doesn't respond well to some forms 1053 * of re-init after reset. 1054 */ 1055 if (INTEL_INFO(dev)->gen > 5) 1056 intel_enable_gt_powersave(dev); 1057 1058 return 0; 1059 1060 error: 1061 atomic_or(I915_WEDGED, &error->reset_counter); 1062 mutex_unlock(&dev->struct_mutex); 1063 return ret; 1064 } 1065 1066 static int i915_pci_probe(device_t kdev) 1067 { 1068 int device, i = 0; 1069 1070 if (pci_get_class(kdev) != PCIC_DISPLAY) 1071 return ENXIO; 1072 1073 if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL) 1074 return ENXIO; 1075 1076 device = pci_get_device(kdev); 1077 1078 for (i = 0; pciidlist[i].device != 0; i++) { 1079 if (pciidlist[i].device == device) { 1080 i915_attach_list[0].device = device; 1081 return 0; 1082 } 1083 } 1084 1085 return ENXIO; 1086 } 1087 1088 #if 0 1089 static void 1090 i915_pci_remove(struct pci_dev *pdev) 1091 { 1092 struct drm_device *dev = pci_get_drvdata(pdev); 1093 1094 drm_put_dev(dev); 1095 } 1096 1097 static int i915_pm_suspend(struct device *dev) 1098 { 1099 struct pci_dev *pdev = to_pci_dev(dev); 1100 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1101 1102 if (!drm_dev || !drm_dev->dev_private) { 1103 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 1104 return -ENODEV; 1105 } 1106 1107 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1108 return 0; 1109 1110 return i915_drm_suspend(drm_dev); 1111 } 1112 1113 static int i915_pm_suspend_late(struct device *dev) 1114 { 1115 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1116 1117 /* 1118 * We have a suspend ordering issue with the snd-hda driver also 1119 * requiring our device to be power up. Due to the lack of a 1120 * parent/child relationship we currently solve this with an late 1121 * suspend hook. 1122 * 1123 * FIXME: This should be solved with a special hdmi sink device or 1124 * similar so that power domains can be employed. 1125 */ 1126 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1127 return 0; 1128 1129 return i915_drm_suspend_late(drm_dev, false); 1130 } 1131 1132 static int i915_pm_poweroff_late(struct device *dev) 1133 { 1134 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1135 1136 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1137 return 0; 1138 1139 return i915_drm_suspend_late(drm_dev, true); 1140 } 1141 1142 static int i915_pm_resume_early(struct device *dev) 1143 { 1144 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1145 1146 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1147 return 0; 1148 1149 return i915_drm_resume_early(drm_dev); 1150 } 1151 1152 static int i915_pm_resume(struct device *dev) 1153 { 1154 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1155 1156 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1157 return 0; 1158 1159 return i915_drm_resume(drm_dev); 1160 } 1161 #endif 1162 1163 /* 1164 * Save all Gunit registers that may be lost after a D3 and a subsequent 1165 * S0i[R123] transition. The list of registers needing a save/restore is 1166 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit 1167 * registers in the following way: 1168 * - Driver: saved/restored by the driver 1169 * - Punit : saved/restored by the Punit firmware 1170 * - No, w/o marking: no need to save/restore, since the register is R/O or 1171 * used internally by the HW in a way that doesn't depend 1172 * keeping the content across a suspend/resume. 1173 * - Debug : used for debugging 1174 * 1175 * We save/restore all registers marked with 'Driver', with the following 1176 * exceptions: 1177 * - Registers out of use, including also registers marked with 'Debug'. 1178 * These have no effect on the driver's operation, so we don't save/restore 1179 * them to reduce the overhead. 1180 * - Registers that are fully setup by an initialization function called from 1181 * the resume path. For example many clock gating and RPS/RC6 registers. 1182 * - Registers that provide the right functionality with their reset defaults. 1183 * 1184 * TODO: Except for registers that based on the above 3 criteria can be safely 1185 * ignored, we save/restore all others, practically treating the HW context as 1186 * a black-box for the driver. Further investigation is needed to reduce the 1187 * saved/restored registers even further, by following the same 3 criteria. 1188 */ 1189 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) 1190 { 1191 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 1192 int i; 1193 1194 /* GAM 0x4000-0x4770 */ 1195 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); 1196 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); 1197 s->arb_mode = I915_READ(ARB_MODE); 1198 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); 1199 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 1200 1201 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1202 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); 1203 1204 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 1205 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 1206 1207 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); 1208 s->ecochk = I915_READ(GAM_ECOCHK); 1209 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); 1210 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); 1211 1212 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); 1213 1214 /* MBC 0x9024-0x91D0, 0x8500 */ 1215 s->g3dctl = I915_READ(VLV_G3DCTL); 1216 s->gsckgctl = I915_READ(VLV_GSCKGCTL); 1217 s->mbctl = I915_READ(GEN6_MBCTL); 1218 1219 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 1220 s->ucgctl1 = I915_READ(GEN6_UCGCTL1); 1221 s->ucgctl3 = I915_READ(GEN6_UCGCTL3); 1222 s->rcgctl1 = I915_READ(GEN6_RCGCTL1); 1223 s->rcgctl2 = I915_READ(GEN6_RCGCTL2); 1224 s->rstctl = I915_READ(GEN6_RSTCTL); 1225 s->misccpctl = I915_READ(GEN7_MISCCPCTL); 1226 1227 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 1228 s->gfxpause = I915_READ(GEN6_GFXPAUSE); 1229 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); 1230 s->rpdeuc = I915_READ(GEN6_RPDEUC); 1231 s->ecobus = I915_READ(ECOBUS); 1232 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); 1233 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); 1234 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); 1235 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); 1236 s->rcedata = I915_READ(VLV_RCEDATA); 1237 s->spare2gh = I915_READ(VLV_SPAREG2H); 1238 1239 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 1240 s->gt_imr = I915_READ(GTIMR); 1241 s->gt_ier = I915_READ(GTIER); 1242 s->pm_imr = I915_READ(GEN6_PMIMR); 1243 s->pm_ier = I915_READ(GEN6_PMIER); 1244 1245 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1246 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); 1247 1248 /* GT SA CZ domain, 0x100000-0x138124 */ 1249 s->tilectl = I915_READ(TILECTL); 1250 s->gt_fifoctl = I915_READ(GTFIFOCTL); 1251 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); 1252 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1253 s->pmwgicz = I915_READ(VLV_PMWGICZ); 1254 1255 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 1256 s->gu_ctl0 = I915_READ(VLV_GU_CTL0); 1257 s->gu_ctl1 = I915_READ(VLV_GU_CTL1); 1258 s->pcbr = I915_READ(VLV_PCBR); 1259 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); 1260 1261 /* 1262 * Not saving any of: 1263 * DFT, 0x9800-0x9EC0 1264 * SARB, 0xB000-0xB1FC 1265 * GAC, 0x5208-0x524C, 0x14000-0x14C000 1266 * PCI CFG 1267 */ 1268 } 1269 1270 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) 1271 { 1272 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 1273 u32 val; 1274 int i; 1275 1276 /* GAM 0x4000-0x4770 */ 1277 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); 1278 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); 1279 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); 1280 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); 1281 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 1282 1283 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1284 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); 1285 1286 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 1287 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 1288 1289 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); 1290 I915_WRITE(GAM_ECOCHK, s->ecochk); 1291 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); 1292 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); 1293 1294 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); 1295 1296 /* MBC 0x9024-0x91D0, 0x8500 */ 1297 I915_WRITE(VLV_G3DCTL, s->g3dctl); 1298 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); 1299 I915_WRITE(GEN6_MBCTL, s->mbctl); 1300 1301 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 1302 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); 1303 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); 1304 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); 1305 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); 1306 I915_WRITE(GEN6_RSTCTL, s->rstctl); 1307 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); 1308 1309 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 1310 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); 1311 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); 1312 I915_WRITE(GEN6_RPDEUC, s->rpdeuc); 1313 I915_WRITE(ECOBUS, s->ecobus); 1314 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); 1315 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); 1316 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); 1317 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); 1318 I915_WRITE(VLV_RCEDATA, s->rcedata); 1319 I915_WRITE(VLV_SPAREG2H, s->spare2gh); 1320 1321 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 1322 I915_WRITE(GTIMR, s->gt_imr); 1323 I915_WRITE(GTIER, s->gt_ier); 1324 I915_WRITE(GEN6_PMIMR, s->pm_imr); 1325 I915_WRITE(GEN6_PMIER, s->pm_ier); 1326 1327 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1328 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); 1329 1330 /* GT SA CZ domain, 0x100000-0x138124 */ 1331 I915_WRITE(TILECTL, s->tilectl); 1332 I915_WRITE(GTFIFOCTL, s->gt_fifoctl); 1333 /* 1334 * Preserve the GT allow wake and GFX force clock bit, they are not 1335 * be restored, as they are used to control the s0ix suspend/resume 1336 * sequence by the caller. 1337 */ 1338 val = I915_READ(VLV_GTLC_WAKE_CTRL); 1339 val &= VLV_GTLC_ALLOWWAKEREQ; 1340 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; 1341 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 1342 1343 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1344 val &= VLV_GFX_CLK_FORCE_ON_BIT; 1345 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; 1346 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 1347 1348 I915_WRITE(VLV_PMWGICZ, s->pmwgicz); 1349 1350 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 1351 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); 1352 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); 1353 I915_WRITE(VLV_PCBR, s->pcbr); 1354 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 1355 } 1356 1357 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 1358 { 1359 u32 val; 1360 int err; 1361 1362 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) 1363 1364 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1365 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 1366 if (force_on) 1367 val |= VLV_GFX_CLK_FORCE_ON_BIT; 1368 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 1369 1370 if (!force_on) 1371 return 0; 1372 1373 err = wait_for(COND, 20); 1374 if (err) 1375 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 1376 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 1377 1378 return err; 1379 #undef COND 1380 } 1381 1382 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 1383 { 1384 u32 val; 1385 int err = 0; 1386 1387 val = I915_READ(VLV_GTLC_WAKE_CTRL); 1388 val &= ~VLV_GTLC_ALLOWWAKEREQ; 1389 if (allow) 1390 val |= VLV_GTLC_ALLOWWAKEREQ; 1391 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 1392 POSTING_READ(VLV_GTLC_WAKE_CTRL); 1393 1394 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ 1395 allow) 1396 err = wait_for(COND, 1); 1397 if (err) 1398 DRM_ERROR("timeout disabling GT waking\n"); 1399 return err; 1400 #undef COND 1401 } 1402 1403 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 1404 bool wait_for_on) 1405 { 1406 u32 mask; 1407 u32 val; 1408 int err; 1409 1410 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 1411 val = wait_for_on ? mask : 0; 1412 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 1413 if (COND) 1414 return 0; 1415 1416 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 1417 onoff(wait_for_on), 1418 I915_READ(VLV_GTLC_PW_STATUS)); 1419 1420 /* 1421 * RC6 transitioning can be delayed up to 2 msec (see 1422 * valleyview_enable_rps), use 3 msec for safety. 1423 */ 1424 err = wait_for(COND, 3); 1425 if (err) 1426 DRM_ERROR("timeout waiting for GT wells to go %s\n", 1427 onoff(wait_for_on)); 1428 1429 return err; 1430 #undef COND 1431 } 1432 1433 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 1434 { 1435 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) 1436 return; 1437 1438 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); 1439 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 1440 } 1441 1442 static int vlv_suspend_complete(struct drm_i915_private *dev_priv) 1443 { 1444 u32 mask; 1445 int err; 1446 1447 /* 1448 * Bspec defines the following GT well on flags as debug only, so 1449 * don't treat them as hard failures. 1450 */ 1451 (void)vlv_wait_for_gt_wells(dev_priv, false); 1452 1453 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 1454 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 1455 1456 vlv_check_no_gt_access(dev_priv); 1457 1458 err = vlv_force_gfx_clock(dev_priv, true); 1459 if (err) 1460 goto err1; 1461 1462 err = vlv_allow_gt_wake(dev_priv, false); 1463 if (err) 1464 goto err2; 1465 1466 if (!IS_CHERRYVIEW(dev_priv)) 1467 vlv_save_gunit_s0ix_state(dev_priv); 1468 1469 err = vlv_force_gfx_clock(dev_priv, false); 1470 if (err) 1471 goto err2; 1472 1473 return 0; 1474 1475 err2: 1476 /* For safety always re-enable waking and disable gfx clock forcing */ 1477 vlv_allow_gt_wake(dev_priv, true); 1478 err1: 1479 vlv_force_gfx_clock(dev_priv, false); 1480 1481 return err; 1482 } 1483 1484 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 1485 bool rpm_resume) 1486 { 1487 struct drm_device *dev = dev_priv->dev; 1488 int err; 1489 int ret; 1490 1491 /* 1492 * If any of the steps fail just try to continue, that's the best we 1493 * can do at this point. Return the first error code (which will also 1494 * leave RPM permanently disabled). 1495 */ 1496 ret = vlv_force_gfx_clock(dev_priv, true); 1497 1498 if (!IS_CHERRYVIEW(dev_priv)) 1499 vlv_restore_gunit_s0ix_state(dev_priv); 1500 1501 err = vlv_allow_gt_wake(dev_priv, true); 1502 if (!ret) 1503 ret = err; 1504 1505 err = vlv_force_gfx_clock(dev_priv, false); 1506 if (!ret) 1507 ret = err; 1508 1509 vlv_check_no_gt_access(dev_priv); 1510 1511 if (rpm_resume) { 1512 intel_init_clock_gating(dev); 1513 i915_gem_restore_fences(dev); 1514 } 1515 1516 return ret; 1517 } 1518 1519 #if 0 1520 static int intel_runtime_suspend(struct device *device) 1521 { 1522 struct pci_dev *pdev = to_pci_dev(device); 1523 struct drm_device *dev = pci_get_drvdata(pdev); 1524 struct drm_i915_private *dev_priv = dev->dev_private; 1525 int ret; 1526 1527 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1528 return -ENODEV; 1529 1530 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1531 return -ENODEV; 1532 1533 DRM_DEBUG_KMS("Suspending device\n"); 1534 1535 /* 1536 * We could deadlock here in case another thread holding struct_mutex 1537 * calls RPM suspend concurrently, since the RPM suspend will wait 1538 * first for this RPM suspend to finish. In this case the concurrent 1539 * RPM resume will be followed by its RPM suspend counterpart. Still 1540 * for consistency return -EAGAIN, which will reschedule this suspend. 1541 */ 1542 if (!mutex_trylock(&dev->struct_mutex)) { 1543 DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); 1544 /* 1545 * Bump the expiration timestamp, otherwise the suspend won't 1546 * be rescheduled. 1547 */ 1548 pm_runtime_mark_last_busy(device); 1549 1550 return -EAGAIN; 1551 } 1552 1553 disable_rpm_wakeref_asserts(dev_priv); 1554 1555 /* 1556 * We are safe here against re-faults, since the fault handler takes 1557 * an RPM reference. 1558 */ 1559 i915_gem_release_all_mmaps(dev_priv); 1560 mutex_unlock(&dev->struct_mutex); 1561 1562 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1563 1564 intel_guc_suspend(dev); 1565 1566 intel_suspend_gt_powersave(dev); 1567 intel_runtime_pm_disable_interrupts(dev_priv); 1568 1569 ret = 0; 1570 if (IS_BROXTON(dev_priv)) { 1571 bxt_display_core_uninit(dev_priv); 1572 bxt_enable_dc9(dev_priv); 1573 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1574 hsw_enable_pc8(dev_priv); 1575 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1576 ret = vlv_suspend_complete(dev_priv); 1577 } 1578 1579 if (ret) { 1580 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1581 intel_runtime_pm_enable_interrupts(dev_priv); 1582 1583 enable_rpm_wakeref_asserts(dev_priv); 1584 1585 return ret; 1586 } 1587 1588 intel_uncore_forcewake_reset(dev, false); 1589 1590 enable_rpm_wakeref_asserts(dev_priv); 1591 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1592 1593 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) 1594 DRM_ERROR("Unclaimed access detected prior to suspending\n"); 1595 1596 dev_priv->pm.suspended = true; 1597 1598 /* 1599 * FIXME: We really should find a document that references the arguments 1600 * used below! 1601 */ 1602 if (IS_BROADWELL(dev)) { 1603 /* 1604 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1605 * being detected, and the call we do at intel_runtime_resume() 1606 * won't be able to restore them. Since PCI_D3hot matches the 1607 * actual specification and appears to be working, use it. 1608 */ 1609 intel_opregion_notify_adapter(dev, PCI_D3hot); 1610 } else { 1611 /* 1612 * current versions of firmware which depend on this opregion 1613 * notification have repurposed the D1 definition to mean 1614 * "runtime suspended" vs. what you would normally expect (D3) 1615 * to distinguish it from notifications that might be sent via 1616 * the suspend path. 1617 */ 1618 intel_opregion_notify_adapter(dev, PCI_D1); 1619 } 1620 1621 assert_forcewakes_inactive(dev_priv); 1622 1623 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 1624 intel_hpd_poll_init(dev_priv); 1625 1626 DRM_DEBUG_KMS("Device suspended\n"); 1627 return 0; 1628 } 1629 1630 static int intel_runtime_resume(struct device *device) 1631 { 1632 struct pci_dev *pdev = to_pci_dev(device); 1633 struct drm_device *dev = pci_get_drvdata(pdev); 1634 struct drm_i915_private *dev_priv = dev->dev_private; 1635 int ret = 0; 1636 1637 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1638 return -ENODEV; 1639 1640 DRM_DEBUG_KMS("Resuming device\n"); 1641 1642 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1643 disable_rpm_wakeref_asserts(dev_priv); 1644 1645 intel_opregion_notify_adapter(dev, PCI_D0); 1646 dev_priv->pm.suspended = false; 1647 if (intel_uncore_unclaimed_mmio(dev_priv)) 1648 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 1649 1650 intel_guc_resume(dev); 1651 1652 if (IS_GEN6(dev_priv)) 1653 intel_init_pch_refclk(dev); 1654 1655 if (IS_BROXTON(dev)) { 1656 bxt_disable_dc9(dev_priv); 1657 bxt_display_core_init(dev_priv, true); 1658 if (dev_priv->csr.dmc_payload && 1659 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 1660 gen9_enable_dc5(dev_priv); 1661 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1662 hsw_disable_pc8(dev_priv); 1663 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1664 ret = vlv_resume_prepare(dev_priv, true); 1665 } 1666 1667 /* 1668 * No point of rolling back things in case of an error, as the best 1669 * we can do is to hope that things will still work (and disable RPM). 1670 */ 1671 i915_gem_init_swizzling(dev); 1672 gen6_update_ring_freq(dev); 1673 1674 intel_runtime_pm_enable_interrupts(dev_priv); 1675 1676 /* 1677 * On VLV/CHV display interrupts are part of the display 1678 * power well, so hpd is reinitialized from there. For 1679 * everyone else do it here. 1680 */ 1681 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 1682 intel_hpd_init(dev_priv); 1683 1684 intel_enable_gt_powersave(dev); 1685 1686 enable_rpm_wakeref_asserts(dev_priv); 1687 1688 if (ret) 1689 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 1690 else 1691 DRM_DEBUG_KMS("Device resumed\n"); 1692 1693 return ret; 1694 } 1695 1696 static const struct dev_pm_ops i915_pm_ops = { 1697 /* 1698 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 1699 * PMSG_RESUME] 1700 */ 1701 .suspend = i915_pm_suspend, 1702 .suspend_late = i915_pm_suspend_late, 1703 .resume_early = i915_pm_resume_early, 1704 .resume = i915_pm_resume, 1705 1706 /* 1707 * S4 event handlers 1708 * @freeze, @freeze_late : called (1) before creating the 1709 * hibernation image [PMSG_FREEZE] and 1710 * (2) after rebooting, before restoring 1711 * the image [PMSG_QUIESCE] 1712 * @thaw, @thaw_early : called (1) after creating the hibernation 1713 * image, before writing it [PMSG_THAW] 1714 * and (2) after failing to create or 1715 * restore the image [PMSG_RECOVER] 1716 * @poweroff, @poweroff_late: called after writing the hibernation 1717 * image, before rebooting [PMSG_HIBERNATE] 1718 * @restore, @restore_early : called after rebooting and restoring the 1719 * hibernation image [PMSG_RESTORE] 1720 */ 1721 .freeze = i915_pm_suspend, 1722 .freeze_late = i915_pm_suspend_late, 1723 .thaw_early = i915_pm_resume_early, 1724 .thaw = i915_pm_resume, 1725 .poweroff = i915_pm_suspend, 1726 .poweroff_late = i915_pm_poweroff_late, 1727 .restore_early = i915_pm_resume_early, 1728 .restore = i915_pm_resume, 1729 1730 /* S0ix (via runtime suspend) event handlers */ 1731 .runtime_suspend = intel_runtime_suspend, 1732 .runtime_resume = intel_runtime_resume, 1733 }; 1734 1735 static const struct vm_operations_struct i915_gem_vm_ops = { 1736 .fault = i915_gem_fault, 1737 .open = drm_gem_vm_open, 1738 .close = drm_gem_vm_close, 1739 }; 1740 1741 static const struct file_operations i915_driver_fops = { 1742 .owner = THIS_MODULE, 1743 .open = drm_open, 1744 .release = drm_release, 1745 .unlocked_ioctl = drm_ioctl, 1746 .mmap = drm_gem_mmap, 1747 .poll = drm_poll, 1748 .read = drm_read, 1749 #ifdef CONFIG_COMPAT 1750 .compat_ioctl = i915_compat_ioctl, 1751 #endif 1752 .llseek = noop_llseek, 1753 }; 1754 #endif 1755 1756 static struct cdev_pager_ops i915_gem_vm_ops = { 1757 .cdev_pg_fault = i915_gem_fault, 1758 .cdev_pg_ctor = i915_gem_pager_ctor, 1759 .cdev_pg_dtor = i915_gem_pager_dtor 1760 }; 1761 1762 static struct drm_driver driver = { 1763 /* Don't use MTRRs here; the Xserver or userspace app should 1764 * deal with them for Intel hardware. 1765 */ 1766 .driver_features = 1767 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 1768 DRIVER_RENDER | DRIVER_MODESET, 1769 .load = i915_driver_load, 1770 .unload = i915_driver_unload, 1771 .open = i915_driver_open, 1772 .lastclose = i915_driver_lastclose, 1773 .preclose = i915_driver_preclose, 1774 .postclose = i915_driver_postclose, 1775 1776 #if defined(CONFIG_DEBUG_FS) 1777 .debugfs_init = i915_debugfs_init, 1778 .debugfs_cleanup = i915_debugfs_cleanup, 1779 #endif 1780 .gem_free_object = i915_gem_free_object, 1781 .gem_pager_ops = &i915_gem_vm_ops, 1782 1783 .dumb_create = i915_gem_dumb_create, 1784 .dumb_map_offset = i915_gem_mmap_gtt, 1785 .dumb_destroy = drm_gem_dumb_destroy, 1786 .ioctls = i915_ioctls, 1787 .name = DRIVER_NAME, 1788 .desc = DRIVER_DESC, 1789 .date = DRIVER_DATE, 1790 .major = DRIVER_MAJOR, 1791 .minor = DRIVER_MINOR, 1792 .patchlevel = DRIVER_PATCHLEVEL, 1793 }; 1794 1795 static int __init i915_init(void); 1796 1797 static int 1798 i915_attach(device_t kdev) 1799 { 1800 struct drm_device *dev = device_get_softc(kdev); 1801 int error; 1802 int dummy; 1803 1804 i915_init(); 1805 1806 dev->driver = &driver; 1807 error = drm_attach(kdev, i915_attach_list); 1808 1809 /* 1810 * XXX hack - give the kvm_console time to come up before X starts 1811 * messing with everything, avoiding at least one deadlock. 1812 */ 1813 tsleep(&dummy, 0, "i915_attach", hz*2); 1814 1815 return error; 1816 } 1817 1818 static device_method_t i915_methods[] = { 1819 /* Device interface */ 1820 DEVMETHOD(device_probe, i915_pci_probe), 1821 DEVMETHOD(device_attach, i915_attach), 1822 DEVMETHOD(device_suspend, i915_suspend_switcheroo), 1823 DEVMETHOD(device_resume, i915_resume_switcheroo), 1824 DEVMETHOD(device_detach, drm_release), 1825 DEVMETHOD_END 1826 }; 1827 1828 static driver_t i915_driver = { 1829 "drm", 1830 i915_methods, 1831 sizeof(struct drm_device) 1832 }; 1833 1834 static int __init i915_init(void) 1835 { 1836 driver.num_ioctls = i915_max_ioctl; 1837 1838 /* 1839 * Enable KMS by default, unless explicitly overriden by 1840 * either the i915.modeset prarameter or by the 1841 * vga_text_mode_force boot option. 1842 */ 1843 1844 if (i915.modeset == 0) 1845 driver.driver_features &= ~DRIVER_MODESET; 1846 1847 if (vgacon_text_force() && i915.modeset == -1) 1848 driver.driver_features &= ~DRIVER_MODESET; 1849 1850 if (!(driver.driver_features & DRIVER_MODESET)) { 1851 /* Silently fail loading to not upset userspace. */ 1852 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); 1853 return 0; 1854 } 1855 1856 if (i915.nuclear_pageflip) 1857 driver.driver_features |= DRIVER_ATOMIC; 1858 1859 #if 0 1860 return drm_pci_init(&driver, &i915_pci_driver); 1861 #else 1862 return 1; 1863 #endif 1864 } 1865 1866 #if 0 1867 static void __exit i915_exit(void) 1868 { 1869 if (!(driver.driver_features & DRIVER_MODESET)) 1870 return; /* Never loaded a driver. */ 1871 1872 drm_pci_exit(&driver, &i915_pci_driver); 1873 } 1874 #endif 1875 1876 DRIVER_MODULE_ORDERED(i915, vgapci, i915_driver, drm_devclass, NULL, NULL, SI_ORDER_ANY); 1877 MODULE_DEPEND(i915, drm, 1, 1, 1); 1878 MODULE_DEPEND(i915, iicbus, 1, 1, 1); 1879 MODULE_DEPEND(i915, iic, 1, 1, 1); 1880 MODULE_DEPEND(i915, iicbb, 1, 1, 1); 1881 #ifdef CONFIG_ACPI 1882 MODULE_DEPEND(i915, acpi, 1, 1, 1); 1883 #endif 1884