1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #include <drm/drmP.h> 31 #include <drm/i915_drm.h> 32 #include "i915_drv.h" 33 #include <drm/drm_pciids.h> 34 #include "intel_drv.h" 35 36 /* "Specify LVDS channel mode " 37 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)" */ 38 int i915_lvds_channel_mode __read_mostly = 0; 39 TUNABLE_INT("drm.i915.lvds_channel_mode", &i915_lvds_channel_mode); 40 41 static struct drm_driver driver; 42 43 #define INTEL_VGA_DEVICE(id, info_) { \ 44 .device = id, \ 45 .info = info_, \ 46 } 47 48 static const struct intel_device_info intel_i830_info = { 49 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, 50 .has_overlay = 1, .overlay_needs_physical = 1, 51 }; 52 53 static const struct intel_device_info intel_845g_info = { 54 .gen = 2, 55 .has_overlay = 1, .overlay_needs_physical = 1, 56 }; 57 58 static const struct intel_device_info intel_i85x_info = { 59 .gen = 2, .is_i85x = 1, .is_mobile = 1, 60 .cursor_needs_physical = 1, 61 .has_overlay = 1, .overlay_needs_physical = 1, 62 }; 63 64 static const struct intel_device_info intel_i865g_info = { 65 .gen = 2, 66 .has_overlay = 1, .overlay_needs_physical = 1, 67 }; 68 69 static const struct intel_device_info intel_i915g_info = { 70 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, 71 .has_overlay = 1, .overlay_needs_physical = 1, 72 }; 73 static const struct intel_device_info intel_i915gm_info = { 74 .gen = 3, .is_mobile = 1, 75 .cursor_needs_physical = 1, 76 .has_overlay = 1, .overlay_needs_physical = 1, 77 .supports_tv = 1, 78 }; 79 static const struct intel_device_info intel_i945g_info = { 80 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, 81 .has_overlay = 1, .overlay_needs_physical = 1, 82 }; 83 static const struct intel_device_info intel_i945gm_info = { 84 .gen = 3, .is_i945gm = 1, .is_mobile = 1, 85 .has_hotplug = 1, .cursor_needs_physical = 1, 86 .has_overlay = 1, .overlay_needs_physical = 1, 87 .supports_tv = 1, 88 }; 89 90 static const struct intel_device_info intel_i965g_info = { 91 .gen = 4, .is_broadwater = 1, 92 .has_hotplug = 1, 93 .has_overlay = 1, 94 }; 95 96 static const struct intel_device_info intel_i965gm_info = { 97 .gen = 4, .is_crestline = 1, 98 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 99 .has_overlay = 1, 100 .supports_tv = 1, 101 }; 102 103 static const struct intel_device_info intel_g33_info = { 104 .gen = 3, .is_g33 = 1, 105 .need_gfx_hws = 1, .has_hotplug = 1, 106 .has_overlay = 1, 107 }; 108 109 static const struct intel_device_info intel_g45_info = { 110 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, 111 .has_pipe_cxsr = 1, .has_hotplug = 1, 112 .has_bsd_ring = 1, 113 }; 114 115 static const struct intel_device_info intel_gm45_info = { 116 .gen = 4, .is_g4x = 1, 117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 118 .has_pipe_cxsr = 1, .has_hotplug = 1, 119 .supports_tv = 1, 120 .has_bsd_ring = 1, 121 }; 122 123 static const struct intel_device_info intel_pineview_info = { 124 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, 125 .need_gfx_hws = 1, .has_hotplug = 1, 126 .has_overlay = 1, 127 }; 128 129 static const struct intel_device_info intel_ironlake_d_info = { 130 .gen = 5, 131 .need_gfx_hws = 1, .has_hotplug = 1, 132 .has_bsd_ring = 1, 133 }; 134 135 static const struct intel_device_info intel_ironlake_m_info = { 136 .gen = 5, .is_mobile = 1, 137 .need_gfx_hws = 1, .has_hotplug = 1, 138 .has_fbc = 1, 139 .has_bsd_ring = 1, 140 }; 141 142 static const struct intel_device_info intel_sandybridge_d_info = { 143 .gen = 6, 144 .need_gfx_hws = 1, .has_hotplug = 1, 145 .has_bsd_ring = 1, 146 .has_blt_ring = 1, 147 .has_llc = 1, 148 .has_force_wake = 1, 149 }; 150 151 static const struct intel_device_info intel_sandybridge_m_info = { 152 .gen = 6, .is_mobile = 1, 153 .need_gfx_hws = 1, .has_hotplug = 1, 154 .has_fbc = 1, 155 .has_bsd_ring = 1, 156 .has_blt_ring = 1, 157 .has_llc = 1, 158 .has_force_wake = 1, 159 }; 160 161 static const struct intel_device_info intel_ivybridge_d_info = { 162 .is_ivybridge = 1, .gen = 7, 163 .need_gfx_hws = 1, .has_hotplug = 1, 164 .has_bsd_ring = 1, 165 .has_blt_ring = 1, 166 .has_llc = 1, 167 .has_force_wake = 1, 168 }; 169 170 static const struct intel_device_info intel_ivybridge_m_info = { 171 .is_ivybridge = 1, .gen = 7, .is_mobile = 1, 172 .need_gfx_hws = 1, .has_hotplug = 1, 173 .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ 174 .has_bsd_ring = 1, 175 .has_blt_ring = 1, 176 .has_llc = 1, 177 .has_force_wake = 1, 178 }; 179 180 static const struct intel_device_info intel_valleyview_m_info = { 181 .gen = 7, .is_mobile = 1, 182 .need_gfx_hws = 1, .has_hotplug = 1, 183 .has_fbc = 0, 184 .has_bsd_ring = 1, 185 .has_blt_ring = 1, 186 .is_valleyview = 1, 187 }; 188 189 static const struct intel_device_info intel_valleyview_d_info = { 190 .gen = 7, 191 .need_gfx_hws = 1, .has_hotplug = 1, 192 .has_fbc = 0, 193 .has_bsd_ring = 1, 194 .has_blt_ring = 1, 195 .is_valleyview = 1, 196 }; 197 198 static const struct intel_device_info intel_haswell_d_info = { 199 .is_haswell = 1, .gen = 7, 200 .need_gfx_hws = 1, .has_hotplug = 1, 201 .has_bsd_ring = 1, 202 .has_blt_ring = 1, 203 .has_llc = 1, 204 .has_force_wake = 1, 205 }; 206 207 static const struct intel_device_info intel_haswell_m_info = { 208 .is_haswell = 1, .gen = 7, .is_mobile = 1, 209 .need_gfx_hws = 1, .has_hotplug = 1, 210 .has_bsd_ring = 1, 211 .has_blt_ring = 1, 212 .has_llc = 1, 213 .has_force_wake = 1, 214 }; 215 216 static const struct intel_gfx_device_id { 217 int device; 218 const struct intel_device_info *info; 219 } pciidlist[] = { /* aka */ 220 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ 221 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ 222 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ 223 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), 224 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ 225 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ 226 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ 227 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ 228 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ 229 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ 230 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ 231 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ 232 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ 233 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ 234 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ 235 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ 236 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ 237 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ 238 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ 239 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ 240 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ 241 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ 242 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ 243 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ 244 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ 245 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ 246 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ 247 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 248 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 249 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 250 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 251 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), 252 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), 253 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), 254 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), 255 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), 256 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 257 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), 258 INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ 259 INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ 260 INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ 261 INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 262 INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 263 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ 264 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ 265 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ 266 INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ 267 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ 268 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ 269 INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ 270 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ 271 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ 272 INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ 273 INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ 274 INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ 275 INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ 276 INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ 277 INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ 278 INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ 279 INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ 280 INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ 281 INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ 282 INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ 283 INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ 284 INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ 285 INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ 286 INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ 287 INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ 288 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ 289 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ 290 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ 291 INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ 292 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ 293 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ 294 INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ 295 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ 296 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ 297 INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ 298 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ 299 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ 300 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), 301 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), 302 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), 303 {0, 0} 304 }; 305 306 #define PCI_VENDOR_INTEL 0x8086 307 308 void intel_detect_pch(struct drm_device *dev) 309 { 310 struct drm_i915_private *dev_priv = dev->dev_private; 311 device_t pch; 312 313 /* 314 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 315 * make graphics device passthrough work easy for VMM, that only 316 * need to expose ISA bridge to let driver know the real hardware 317 * underneath. This is a requirement from virtualization team. 318 */ 319 pch = pci_find_class(PCIC_BRIDGE, PCIS_BRIDGE_ISA); 320 if (pch) { 321 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) { 322 unsigned short id; 323 id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; 324 dev_priv->pch_id = id; 325 326 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 327 dev_priv->pch_type = PCH_IBX; 328 dev_priv->num_pch_pll = 2; 329 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 330 WARN_ON(!IS_GEN5(dev)); 331 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 332 dev_priv->pch_type = PCH_CPT; 333 dev_priv->num_pch_pll = 2; 334 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 335 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 336 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 337 /* PantherPoint is CPT compatible */ 338 dev_priv->pch_type = PCH_CPT; 339 dev_priv->num_pch_pll = 2; 340 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 341 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 342 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 343 dev_priv->pch_type = PCH_LPT; 344 dev_priv->num_pch_pll = 0; 345 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 346 WARN_ON(!IS_HASWELL(dev)); 347 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 348 dev_priv->pch_type = PCH_LPT; 349 dev_priv->num_pch_pll = 0; 350 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 351 WARN_ON(!IS_HASWELL(dev)); 352 } 353 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); 354 } 355 #if 0 356 pci_dev_put(pch); 357 #endif 358 } 359 } 360 361 bool i915_semaphore_is_enabled(struct drm_device *dev) 362 { 363 if (INTEL_INFO(dev)->gen < 6) 364 return 0; 365 366 if (i915_semaphores >= 0) 367 return i915_semaphores; 368 369 #ifdef CONFIG_INTEL_IOMMU 370 /* Enable semaphores on SNB when IO remapping is off */ 371 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 372 return false; 373 #endif 374 375 return 1; 376 } 377 378 static int i915_drm_freeze(struct drm_device *dev) 379 { 380 struct drm_i915_private *dev_priv = dev->dev_private; 381 382 drm_kms_helper_poll_disable(dev); 383 384 #if 0 385 pci_save_state(dev->pdev); 386 #endif 387 388 /* If KMS is active, we do the leavevt stuff here */ 389 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 390 int error = i915_gem_idle(dev); 391 if (error) { 392 device_printf(dev->dev, 393 "GEM idle failed, resume might fail"); 394 return error; 395 } 396 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 397 398 #if 0 399 intel_modeset_disable(dev); 400 #endif 401 402 drm_irq_uninstall(dev); 403 } 404 405 i915_save_state(dev); 406 407 intel_opregion_fini(dev); 408 409 /* Modeset on resume, not lid events */ 410 dev_priv->modeset_on_lid = 0; 411 412 return 0; 413 } 414 415 static int 416 i915_suspend(device_t kdev) 417 { 418 struct drm_device *dev; 419 int error; 420 421 dev = device_get_softc(kdev); 422 if (dev == NULL || dev->dev_private == NULL) { 423 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 424 return -ENODEV; 425 } 426 427 DRM_DEBUG_KMS("starting suspend\n"); 428 error = i915_drm_freeze(dev); 429 if (error) 430 return (error); 431 432 error = bus_generic_suspend(kdev); 433 DRM_DEBUG_KMS("finished suspend %d\n", error); 434 return (error); 435 } 436 437 static int i915_drm_thaw(struct drm_device *dev) 438 { 439 struct drm_i915_private *dev_priv = dev->dev_private; 440 int error = 0; 441 442 intel_gt_reset(dev); 443 444 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 445 DRM_LOCK(dev); 446 i915_gem_restore_gtt_mappings(dev); 447 DRM_UNLOCK(dev); 448 } 449 450 i915_restore_state(dev); 451 intel_opregion_setup(dev); 452 453 /* KMS EnterVT equivalent */ 454 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 455 intel_init_pch_refclk(dev); 456 457 DRM_LOCK(dev); 458 dev_priv->mm.suspended = 0; 459 460 error = i915_gem_init_hw(dev); 461 DRM_UNLOCK(dev); 462 463 intel_modeset_init_hw(dev); 464 intel_modeset_setup_hw_state(dev, false); 465 drm_irq_install(dev); 466 } 467 468 intel_opregion_init(dev); 469 470 dev_priv->modeset_on_lid = 0; 471 472 #if 0 473 console_lock(); 474 intel_fbdev_set_suspend(dev, 0); 475 console_unlock(); 476 #endif 477 return error; 478 } 479 480 static int 481 i915_resume(device_t kdev) 482 { 483 struct drm_device *dev; 484 int ret; 485 486 dev = device_get_softc(kdev); 487 DRM_DEBUG_KMS("starting resume\n"); 488 #if 0 489 if (pci_enable_device(dev->pdev)) 490 return -EIO; 491 492 pci_set_master(dev->pdev); 493 #endif 494 495 ret = -i915_drm_thaw(dev); 496 if (ret != 0) 497 return (ret); 498 499 drm_kms_helper_poll_enable(dev); 500 ret = bus_generic_resume(kdev); 501 DRM_DEBUG_KMS("finished resume %d\n", ret); 502 return (ret); 503 } 504 505 /* XXX Hack for the old *BSD drm code base 506 * The device id field is set at probe time */ 507 static drm_pci_id_list_t i915_attach_list[] = { 508 {0x8086, 0, 0, "Intel i915 GPU"}, 509 {0, 0, 0, NULL} 510 }; 511 512 static int 513 i915_probe(device_t kdev) 514 { 515 int device, i = 0; 516 517 if (pci_get_class(kdev) != PCIC_DISPLAY) 518 return ENXIO; 519 520 if (pci_get_vendor(kdev) != PCI_VENDOR_INTEL) 521 return ENXIO; 522 523 device = pci_get_device(kdev); 524 525 for (i = 0; pciidlist[i].device != 0; i++) { 526 if (pciidlist[i].device == device) { 527 i915_attach_list[0].device = device; 528 return 0; 529 } 530 } 531 532 return ENXIO; 533 } 534 535 int i915_modeset; 536 537 /* static int __init i915_init(void) */ 538 static int 539 i915_attach(device_t kdev) 540 { 541 struct drm_device *dev; 542 543 dev = device_get_softc(kdev); 544 545 driver.num_ioctls = i915_max_ioctl; 546 547 if (i915_modeset == 1) 548 driver.driver_features |= DRIVER_MODESET; 549 550 dev->driver = &driver; 551 return (drm_attach(kdev, i915_attach_list)); 552 } 553 554 const struct intel_device_info * 555 i915_get_device_id(int device) 556 { 557 const struct intel_gfx_device_id *did; 558 559 for (did = &pciidlist[0]; did->device != 0; did++) { 560 if (did->device != device) 561 continue; 562 return (did->info); 563 } 564 return (NULL); 565 } 566 567 static device_method_t i915_methods[] = { 568 /* Device interface */ 569 DEVMETHOD(device_probe, i915_probe), 570 DEVMETHOD(device_attach, i915_attach), 571 DEVMETHOD(device_suspend, i915_suspend), 572 DEVMETHOD(device_resume, i915_resume), 573 DEVMETHOD(device_detach, drm_detach), 574 DEVMETHOD_END 575 }; 576 577 static driver_t i915_driver = { 578 "drm", 579 i915_methods, 580 sizeof(struct drm_device) 581 }; 582 583 extern devclass_t drm_devclass; 584 DRIVER_MODULE_ORDERED(i915kms, vgapci, i915_driver, drm_devclass, 0, 0, 585 SI_ORDER_ANY); 586 MODULE_DEPEND(i915kms, drm, 1, 1, 1); 587 MODULE_DEPEND(i915kms, agp, 1, 1, 1); 588 MODULE_DEPEND(i915kms, iicbus, 1, 1, 1); 589 MODULE_DEPEND(i915kms, iic, 1, 1, 1); 590 MODULE_DEPEND(i915kms, iicbb, 1, 1, 1); 591 592 int intel_iommu_enabled = 0; 593 TUNABLE_INT("drm.i915.intel_iommu_enabled", &intel_iommu_enabled); 594 595 int i915_semaphores = -1; 596 TUNABLE_INT("drm.i915.semaphores", &i915_semaphores); 597 static int i915_try_reset = 1; 598 TUNABLE_INT("drm.i915.try_reset", &i915_try_reset); 599 unsigned int i915_lvds_downclock = 0; 600 TUNABLE_INT("drm.i915.lvds_downclock", &i915_lvds_downclock); 601 int i915_vbt_sdvo_panel_type = -1; 602 TUNABLE_INT("drm.i915.vbt_sdvo_panel_type", &i915_vbt_sdvo_panel_type); 603 unsigned int i915_powersave = 1; 604 TUNABLE_INT("drm.i915.powersave", &i915_powersave); 605 int i915_enable_fbc = 0; 606 TUNABLE_INT("drm.i915.enable_fbc", &i915_enable_fbc); 607 int i915_enable_rc6 = 0; 608 TUNABLE_INT("drm.i915.enable_rc6", &i915_enable_rc6); 609 int i915_panel_use_ssc = -1; 610 TUNABLE_INT("drm.i915.panel_use_ssc", &i915_panel_use_ssc); 611 int i915_panel_ignore_lid = 0; 612 TUNABLE_INT("drm.i915.panel_ignore_lid", &i915_panel_ignore_lid); 613 int i915_modeset = 1; 614 TUNABLE_INT("drm.i915.modeset", &i915_modeset); 615 int i915_enable_ppgtt = -1; 616 TUNABLE_INT("drm.i915.enable_ppgtt", &i915_enable_ppgtt); 617 int i915_enable_hangcheck = 1; 618 TUNABLE_INT("drm.i915.enable_hangcheck", &i915_enable_hangcheck); 619 620 static int i8xx_do_reset(struct drm_device *dev) 621 { 622 struct drm_i915_private *dev_priv = dev->dev_private; 623 624 if (IS_I85X(dev)) 625 return -ENODEV; 626 627 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); 628 POSTING_READ(D_STATE); 629 630 if (IS_I830(dev) || IS_845G(dev)) { 631 I915_WRITE(DEBUG_RESET_I830, 632 DEBUG_RESET_DISPLAY | 633 DEBUG_RESET_RENDER | 634 DEBUG_RESET_FULL); 635 POSTING_READ(DEBUG_RESET_I830); 636 msleep(1); 637 638 I915_WRITE(DEBUG_RESET_I830, 0); 639 POSTING_READ(DEBUG_RESET_I830); 640 } 641 642 msleep(1); 643 644 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); 645 POSTING_READ(D_STATE); 646 647 return 0; 648 } 649 650 static int i965_reset_complete(struct drm_device *dev) 651 { 652 u8 gdrst; 653 gdrst = pci_read_config(dev->dev, I965_GDRST, 1); 654 return (gdrst & 0x1); 655 } 656 657 static int i965_do_reset(struct drm_device *dev) 658 { 659 int ret; 660 u8 gdrst; 661 662 /* 663 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 664 * well as the reset bit (GR/bit 0). Setting the GR bit 665 * triggers the reset; when done, the hardware will clear it. 666 */ 667 gdrst = pci_read_config(dev->dev, I965_GDRST, 1); 668 pci_write_config(dev->dev, I965_GDRST, 669 gdrst | GRDOM_RENDER | 670 GRDOM_RESET_ENABLE, 1); 671 ret = wait_for(i965_reset_complete(dev), 500); 672 if (ret) 673 return ret; 674 675 /* We can't reset render&media without also resetting display ... */ 676 gdrst = pci_read_config(dev->dev, I965_GDRST, 1); 677 pci_write_config(dev->dev, I965_GDRST, 678 gdrst | GRDOM_MEDIA | 679 GRDOM_RESET_ENABLE, 1); 680 681 return wait_for(i965_reset_complete(dev), 500); 682 } 683 684 static int ironlake_do_reset(struct drm_device *dev) 685 { 686 struct drm_i915_private *dev_priv = dev->dev_private; 687 u32 gdrst; 688 int ret; 689 690 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 691 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 692 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); 693 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 694 if (ret) 695 return ret; 696 697 /* We can't reset render&media without also resetting display ... */ 698 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 699 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 700 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); 701 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 702 } 703 704 static int gen6_do_reset(struct drm_device *dev) 705 { 706 struct drm_i915_private *dev_priv = dev->dev_private; 707 int ret; 708 709 dev_priv = dev->dev_private; 710 711 /* Hold gt_lock across reset to prevent any register access 712 * with forcewake not set correctly 713 */ 714 lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); 715 716 /* Reset the chip */ 717 718 /* GEN6_GDRST is not in the gt power well, no need to check 719 * for fifo space for the write or forcewake the chip for 720 * the read 721 */ 722 I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); 723 724 /* Spin waiting for the device to ack the reset request */ 725 ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 726 727 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 728 if (dev_priv->forcewake_count) 729 dev_priv->gt.force_wake_get(dev_priv); 730 else 731 dev_priv->gt.force_wake_put(dev_priv); 732 733 /* Restore fifo count */ 734 dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 735 736 lockmgr(&dev_priv->gt_lock, LK_RELEASE); 737 return ret; 738 } 739 740 int intel_gpu_reset(struct drm_device *dev) 741 { 742 struct drm_i915_private *dev_priv = dev->dev_private; 743 int ret = -ENODEV; 744 745 switch (INTEL_INFO(dev)->gen) { 746 case 7: 747 case 6: 748 ret = gen6_do_reset(dev); 749 break; 750 case 5: 751 ret = ironlake_do_reset(dev); 752 break; 753 case 4: 754 ret = i965_do_reset(dev); 755 break; 756 case 2: 757 ret = i8xx_do_reset(dev); 758 break; 759 } 760 761 /* Also reset the gpu hangman. */ 762 if (dev_priv->stop_rings) { 763 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); 764 dev_priv->stop_rings = 0; 765 if (ret == -ENODEV) { 766 DRM_ERROR("Reset not implemented, but ignoring " 767 "error for simulated gpu hangs\n"); 768 ret = 0; 769 } 770 } 771 772 return ret; 773 } 774 775 /** 776 * i915_reset - reset chip after a hang 777 * @dev: drm device to reset 778 * 779 * Reset the chip. Useful if a hang is detected. Returns zero on successful 780 * reset or otherwise an error code. 781 * 782 * Procedure is fairly simple: 783 * - reset the chip using the reset reg 784 * - re-init context state 785 * - re-init hardware status page 786 * - re-init ring buffer 787 * - re-init interrupt state 788 * - re-init display 789 */ 790 int i915_reset(struct drm_device *dev) 791 { 792 drm_i915_private_t *dev_priv = dev->dev_private; 793 int ret; 794 795 if (!i915_try_reset) 796 return 0; 797 798 DRM_LOCK(dev); 799 800 i915_gem_reset(dev); 801 802 ret = -ENODEV; 803 if (time_uptime - dev_priv->last_gpu_reset < 5) 804 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 805 else 806 ret = intel_gpu_reset(dev); 807 808 dev_priv->last_gpu_reset = time_uptime; 809 if (ret) { 810 DRM_ERROR("Failed to reset chip.\n"); 811 DRM_UNLOCK(dev); 812 return ret; 813 } 814 815 /* Ok, now get things going again... */ 816 817 /* 818 * Everything depends on having the GTT running, so we need to start 819 * there. Fortunately we don't need to do this unless we reset the 820 * chip at a PCI level. 821 * 822 * Next we need to restore the context, but we don't use those 823 * yet either... 824 * 825 * Ring buffer needs to be re-initialized in the KMS case, or if X 826 * was running at the time of the reset (i.e. we weren't VT 827 * switched away). 828 */ 829 if (drm_core_check_feature(dev, DRIVER_MODESET) || 830 !dev_priv->mm.suspended) { 831 struct intel_ring_buffer *ring; 832 int i; 833 834 dev_priv->mm.suspended = 0; 835 836 i915_gem_init_swizzling(dev); 837 838 for_each_ring(ring, dev_priv, i) 839 ring->init(ring); 840 841 i915_gem_context_init(dev); 842 i915_gem_init_ppgtt(dev); 843 844 /* 845 * It would make sense to re-init all the other hw state, at 846 * least the rps/rc6/emon init done within modeset_init_hw. For 847 * some unknown reason, this blows up my ilk, so don't. 848 */ 849 850 DRM_UNLOCK(dev); 851 852 drm_irq_uninstall(dev); 853 drm_irq_install(dev); 854 } else { 855 DRM_UNLOCK(dev); 856 } 857 858 return 0; 859 } 860 861 static struct drm_driver driver = { 862 .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | 863 DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_LOCKLESS_IRQ | 864 DRIVER_GEM /*| DRIVER_MODESET*/, 865 866 .buf_priv_size = sizeof(drm_i915_private_t), 867 .load = i915_driver_load, 868 .open = i915_driver_open, 869 .unload = i915_driver_unload, 870 .preclose = i915_driver_preclose, 871 .lastclose = i915_driver_lastclose, 872 .postclose = i915_driver_postclose, 873 .device_is_agp = i915_driver_device_is_agp, 874 .gem_init_object = i915_gem_init_object, 875 .gem_free_object = i915_gem_free_object, 876 .gem_pager_ops = &i915_gem_pager_ops, 877 .dumb_create = i915_gem_dumb_create, 878 .dumb_map_offset = i915_gem_mmap_gtt, 879 .dumb_destroy = i915_gem_dumb_destroy, 880 881 .ioctls = i915_ioctls, 882 883 .name = DRIVER_NAME, 884 .desc = DRIVER_DESC, 885 .date = DRIVER_DATE, 886 .major = DRIVER_MAJOR, 887 .minor = DRIVER_MINOR, 888 .patchlevel = DRIVER_PATCHLEVEL, 889 }; 890 891 /* We give fast paths for the really cool registers */ 892 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 893 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ 894 ((reg) < 0x40000) && \ 895 ((reg) != FORCEWAKE)) 896 897 static bool IS_DISPLAYREG(u32 reg) 898 { 899 /* 900 * This should make it easier to transition modules over to the 901 * new register block scheme, since we can do it incrementally. 902 */ 903 if (reg >= VLV_DISPLAY_BASE) 904 return false; 905 906 if (reg >= RENDER_RING_BASE && 907 reg < RENDER_RING_BASE + 0xff) 908 return false; 909 if (reg >= GEN6_BSD_RING_BASE && 910 reg < GEN6_BSD_RING_BASE + 0xff) 911 return false; 912 if (reg >= BLT_RING_BASE && 913 reg < BLT_RING_BASE + 0xff) 914 return false; 915 916 if (reg == PGTBL_ER) 917 return false; 918 919 if (reg >= IPEIR_I965 && 920 reg < HWSTAM) 921 return false; 922 923 if (reg == MI_MODE) 924 return false; 925 926 if (reg == GFX_MODE_GEN7) 927 return false; 928 929 if (reg == RENDER_HWS_PGA_GEN7 || 930 reg == BSD_HWS_PGA_GEN7 || 931 reg == BLT_HWS_PGA_GEN7) 932 return false; 933 934 if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL || 935 reg == GEN6_BSD_RNCID) 936 return false; 937 938 if (reg == GEN6_BLITTER_ECOSKPD) 939 return false; 940 941 if (reg >= 0x4000c && 942 reg <= 0x4002c) 943 return false; 944 945 if (reg >= 0x4f000 && 946 reg <= 0x4f08f) 947 return false; 948 949 if (reg >= 0x4f100 && 950 reg <= 0x4f11f) 951 return false; 952 953 if (reg >= VLV_MASTER_IER && 954 reg <= GEN6_PMIER) 955 return false; 956 957 if (reg >= FENCE_REG_SANDYBRIDGE_0 && 958 reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8))) 959 return false; 960 961 if (reg >= VLV_IIR_RW && 962 reg <= VLV_ISR) 963 return false; 964 965 if (reg == FORCEWAKE_VLV || 966 reg == FORCEWAKE_ACK_VLV) 967 return false; 968 969 if (reg == GEN6_GDRST) 970 return false; 971 972 switch (reg) { 973 case _3D_CHICKEN3: 974 case IVB_CHICKEN3: 975 case GEN7_COMMON_SLICE_CHICKEN1: 976 case GEN7_L3CNTLREG1: 977 case GEN7_L3_CHICKEN_MODE_REGISTER: 978 case GEN7_ROW_CHICKEN2: 979 case GEN7_L3SQCREG4: 980 case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG: 981 case GEN7_HALF_SLICE_CHICKEN1: 982 case GEN6_MBCTL: 983 case GEN6_UCGCTL2: 984 return false; 985 default: 986 break; 987 } 988 989 return true; 990 } 991 992 static void 993 ilk_dummy_write(struct drm_i915_private *dev_priv) 994 { 995 /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the 996 * chip from rc6 before touching it for real. MI_MODE is masked, hence 997 * harmless to write 0 into. */ 998 I915_WRITE_NOTRACE(MI_MODE, 0); 999 } 1000 1001 #define __i915_read(x, y) \ 1002 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1003 u##x val = 0; \ 1004 if (IS_GEN5(dev_priv->dev)) \ 1005 ilk_dummy_write(dev_priv); \ 1006 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1007 lockmgr(&dev_priv->gt_lock, LK_EXCLUSIVE); \ 1008 if (dev_priv->forcewake_count == 0) \ 1009 dev_priv->gt.force_wake_get(dev_priv); \ 1010 val = DRM_READ##y(dev_priv->mmio_map, reg); \ 1011 if (dev_priv->forcewake_count == 0) \ 1012 dev_priv->gt.force_wake_put(dev_priv); \ 1013 lockmgr(&dev_priv->gt_lock, LK_RELEASE); \ 1014 } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ 1015 val = DRM_READ##y(dev_priv->mmio_map, reg + 0x180000); \ 1016 } else { \ 1017 val = DRM_READ##y(dev_priv->mmio_map, reg); \ 1018 } \ 1019 trace_i915_reg_rw(false, reg, val, sizeof(val)); \ 1020 return val; \ 1021 } 1022 1023 __i915_read(8, 8) 1024 __i915_read(16, 16) 1025 __i915_read(32, 32) 1026 __i915_read(64, 64) 1027 #undef __i915_read 1028 1029 #define __i915_write(x, y) \ 1030 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ 1031 u32 __fifo_ret = 0; \ 1032 trace_i915_reg_rw(true, reg, val, sizeof(val)); \ 1033 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1034 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1035 } \ 1036 if (IS_GEN5(dev_priv->dev)) \ 1037 ilk_dummy_write(dev_priv); \ 1038 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ 1039 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ 1040 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ 1041 } \ 1042 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ 1043 DRM_WRITE##y(dev_priv->mmio_map, reg + 0x180000, val); \ 1044 } else { \ 1045 DRM_WRITE##y(dev_priv->mmio_map, reg, val); \ 1046 } \ 1047 if (unlikely(__fifo_ret)) { \ 1048 gen6_gt_check_fifodbg(dev_priv); \ 1049 } \ 1050 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ 1051 DRM_ERROR("Unclaimed write to %x\n", reg); \ 1052 DRM_WRITE32(dev_priv->mmio_map, GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ 1053 } \ 1054 } 1055 1056 __i915_write(8, 8) 1057 __i915_write(16, 16) 1058 __i915_write(32, 32) 1059 __i915_write(64, 64) 1060 #undef __i915_write 1061 1062 static const struct register_whitelist { 1063 uint64_t offset; 1064 uint32_t size; 1065 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1066 } whitelist[] = { 1067 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, 1068 }; 1069 1070 int i915_reg_read_ioctl(struct drm_device *dev, 1071 void *data, struct drm_file *file) 1072 { 1073 struct drm_i915_private *dev_priv = dev->dev_private; 1074 struct drm_i915_reg_read *reg = data; 1075 struct register_whitelist const *entry = whitelist; 1076 int i; 1077 1078 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1079 if (entry->offset == reg->offset && 1080 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1081 break; 1082 } 1083 1084 if (i == ARRAY_SIZE(whitelist)) 1085 return -EINVAL; 1086 1087 switch (entry->size) { 1088 case 8: 1089 reg->val = I915_READ64(reg->offset); 1090 break; 1091 case 4: 1092 reg->val = I915_READ(reg->offset); 1093 break; 1094 case 2: 1095 reg->val = I915_READ16(reg->offset); 1096 break; 1097 case 1: 1098 reg->val = I915_READ8(reg->offset); 1099 break; 1100 default: 1101 WARN_ON(1); 1102 return -EINVAL; 1103 } 1104 1105 return 0; 1106 } 1107