1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi_drm/i915_drm.h> 34 #include <uapi_drm/drm_fourcc.h> 35 36 #include "i915_reg.h" 37 #include "intel_bios.h" 38 #include "intel_ringbuffer.h" 39 #include "intel_lrc.h" 40 #include "i915_gem_gtt.h" 41 #include "i915_gem_render_state.h" 42 #include <linux/io-mapping.h> 43 #include <linux/i2c.h> 44 #include <linux/i2c-algo-bit.h> 45 #include <drm/intel-gtt.h> 46 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 47 #include <drm/drm_gem.h> 48 #include <linux/backlight.h> 49 #include <linux/hashtable.h> 50 #include <linux/kref.h> 51 #include <linux/kconfig.h> 52 #include <linux/pm_qos.h> 53 #include <linux/delay.h> 54 #include "intel_guc.h" 55 56 #define CONFIG_DRM_FBDEV_EMULATION 1 57 #define CONFIG_DRM_I915_KMS 1 58 #define CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT 1 59 #define CONFIG_ACPI 1 60 #define CONFIG_X86 1 61 62 /* General customization: 63 */ 64 65 #define DRIVER_NAME "i915" 66 #define DRIVER_DESC "Intel Graphics" 67 #define DRIVER_DATE "20151010" 68 69 #undef WARN_ON 70 /* Many gcc seem to no see through this and fall over :( */ 71 #if 0 72 #define WARN_ON(x) ({ \ 73 bool __i915_warn_cond = (x); \ 74 if (__builtin_constant_p(__i915_warn_cond)) \ 75 BUILD_BUG_ON(__i915_warn_cond); \ 76 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 77 #else 78 #define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x ) 79 #endif 80 81 #undef WARN_ON_ONCE 82 #define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x ) 83 84 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 85 (long) (x), __func__); 86 87 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 88 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 89 * which may not necessarily be a user visible problem. This will either 90 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 91 * enable distros and users to tailor their preferred amount of i915 abrt 92 * spam. 93 */ 94 #define I915_STATE_WARN(condition, format...) ({ \ 95 int __ret_warn_on = !!(condition); \ 96 if (unlikely(__ret_warn_on)) { \ 97 if (i915.verbose_state_checks) \ 98 WARN(1, format); \ 99 else \ 100 DRM_ERROR(format); \ 101 } \ 102 unlikely(__ret_warn_on); \ 103 }) 104 105 #define I915_STATE_WARN_ON(condition) ({ \ 106 int __ret_warn_on = !!(condition); \ 107 if (unlikely(__ret_warn_on)) { \ 108 if (i915.verbose_state_checks) \ 109 WARN(1, "WARN_ON(" #condition ")\n"); \ 110 else \ 111 DRM_ERROR("WARN_ON(" #condition ")\n"); \ 112 } \ 113 unlikely(__ret_warn_on); \ 114 }) 115 116 static inline const char *yesno(bool v) 117 { 118 return v ? "yes" : "no"; 119 } 120 121 enum i915_pipe { 122 INVALID_PIPE = -1, 123 PIPE_A = 0, 124 PIPE_B, 125 PIPE_C, 126 _PIPE_EDP, 127 I915_MAX_PIPES = _PIPE_EDP 128 }; 129 #define pipe_name(p) ((p) + 'A') 130 131 enum transcoder { 132 TRANSCODER_A = 0, 133 TRANSCODER_B, 134 TRANSCODER_C, 135 TRANSCODER_EDP, 136 I915_MAX_TRANSCODERS 137 }; 138 #define transcoder_name(t) ((t) + 'A') 139 140 /* 141 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 142 * number of planes per CRTC. Not all platforms really have this many planes, 143 * which means some arrays of size I915_MAX_PLANES may have unused entries 144 * between the topmost sprite plane and the cursor plane. 145 */ 146 enum plane { 147 PLANE_A = 0, 148 PLANE_B, 149 PLANE_C, 150 PLANE_CURSOR, 151 I915_MAX_PLANES, 152 }; 153 #define plane_name(p) ((p) + 'A') 154 155 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 156 157 enum port { 158 PORT_A = 0, 159 PORT_B, 160 PORT_C, 161 PORT_D, 162 PORT_E, 163 I915_MAX_PORTS 164 }; 165 #define port_name(p) ((p) + 'A') 166 167 #define I915_NUM_PHYS_VLV 2 168 169 enum dpio_channel { 170 DPIO_CH0, 171 DPIO_CH1 172 }; 173 174 enum dpio_phy { 175 DPIO_PHY0, 176 DPIO_PHY1 177 }; 178 179 enum intel_display_power_domain { 180 POWER_DOMAIN_PIPE_A, 181 POWER_DOMAIN_PIPE_B, 182 POWER_DOMAIN_PIPE_C, 183 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 184 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 185 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 186 POWER_DOMAIN_TRANSCODER_A, 187 POWER_DOMAIN_TRANSCODER_B, 188 POWER_DOMAIN_TRANSCODER_C, 189 POWER_DOMAIN_TRANSCODER_EDP, 190 POWER_DOMAIN_PORT_DDI_A_2_LANES, 191 POWER_DOMAIN_PORT_DDI_A_4_LANES, 192 POWER_DOMAIN_PORT_DDI_B_2_LANES, 193 POWER_DOMAIN_PORT_DDI_B_4_LANES, 194 POWER_DOMAIN_PORT_DDI_C_2_LANES, 195 POWER_DOMAIN_PORT_DDI_C_4_LANES, 196 POWER_DOMAIN_PORT_DDI_D_2_LANES, 197 POWER_DOMAIN_PORT_DDI_D_4_LANES, 198 POWER_DOMAIN_PORT_DDI_E_2_LANES, 199 POWER_DOMAIN_PORT_DSI, 200 POWER_DOMAIN_PORT_CRT, 201 POWER_DOMAIN_PORT_OTHER, 202 POWER_DOMAIN_VGA, 203 POWER_DOMAIN_AUDIO, 204 POWER_DOMAIN_PLLS, 205 POWER_DOMAIN_AUX_A, 206 POWER_DOMAIN_AUX_B, 207 POWER_DOMAIN_AUX_C, 208 POWER_DOMAIN_AUX_D, 209 POWER_DOMAIN_GMBUS, 210 POWER_DOMAIN_INIT, 211 212 POWER_DOMAIN_NUM, 213 }; 214 215 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 216 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 217 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 218 #define POWER_DOMAIN_TRANSCODER(tran) \ 219 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 220 (tran) + POWER_DOMAIN_TRANSCODER_A) 221 222 enum hpd_pin { 223 HPD_NONE = 0, 224 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 225 HPD_CRT, 226 HPD_SDVO_B, 227 HPD_SDVO_C, 228 HPD_PORT_A, 229 HPD_PORT_B, 230 HPD_PORT_C, 231 HPD_PORT_D, 232 HPD_PORT_E, 233 HPD_NUM_PINS 234 }; 235 236 #define for_each_hpd_pin(__pin) \ 237 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 238 239 struct i915_hotplug { 240 struct work_struct hotplug_work; 241 242 struct { 243 unsigned long last_jiffies; 244 int count; 245 enum { 246 HPD_ENABLED = 0, 247 HPD_DISABLED = 1, 248 HPD_MARK_DISABLED = 2 249 } state; 250 } stats[HPD_NUM_PINS]; 251 u32 event_bits; 252 struct delayed_work reenable_work; 253 254 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 255 u32 long_port_mask; 256 u32 short_port_mask; 257 struct work_struct dig_port_work; 258 259 /* 260 * if we get a HPD irq from DP and a HPD irq from non-DP 261 * the non-DP HPD could block the workqueue on a mode config 262 * mutex getting, that userspace may have taken. However 263 * userspace is waiting on the DP workqueue to run which is 264 * blocked behind the non-DP one. 265 */ 266 struct workqueue_struct *dp_wq; 267 }; 268 269 #define I915_GEM_GPU_DOMAINS \ 270 (I915_GEM_DOMAIN_RENDER | \ 271 I915_GEM_DOMAIN_SAMPLER | \ 272 I915_GEM_DOMAIN_COMMAND | \ 273 I915_GEM_DOMAIN_INSTRUCTION | \ 274 I915_GEM_DOMAIN_VERTEX) 275 276 #define for_each_pipe(__dev_priv, __p) \ 277 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 278 #define for_each_plane(__dev_priv, __pipe, __p) \ 279 for ((__p) = 0; \ 280 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 281 (__p)++) 282 #define for_each_sprite(__dev_priv, __p, __s) \ 283 for ((__s) = 0; \ 284 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 285 (__s)++) 286 287 #define for_each_crtc(dev, crtc) \ 288 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 289 290 #define for_each_intel_plane(dev, intel_plane) \ 291 list_for_each_entry(intel_plane, \ 292 &dev->mode_config.plane_list, \ 293 base.head) 294 295 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 296 list_for_each_entry(intel_plane, \ 297 &(dev)->mode_config.plane_list, \ 298 base.head) \ 299 if ((intel_plane)->pipe == (intel_crtc)->pipe) 300 301 #define for_each_intel_crtc(dev, intel_crtc) \ 302 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 303 304 #define for_each_intel_encoder(dev, intel_encoder) \ 305 list_for_each_entry(intel_encoder, \ 306 &(dev)->mode_config.encoder_list, \ 307 base.head) 308 309 #define for_each_intel_connector(dev, intel_connector) \ 310 list_for_each_entry(intel_connector, \ 311 &dev->mode_config.connector_list, \ 312 base.head) 313 314 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 315 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 316 if ((intel_encoder)->base.crtc == (__crtc)) 317 318 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 319 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 320 if ((intel_connector)->base.encoder == (__encoder)) 321 322 #define for_each_power_domain(domain, mask) \ 323 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 324 if ((1 << (domain)) & (mask)) 325 326 struct drm_i915_private; 327 struct i915_mm_struct; 328 struct i915_mmu_object; 329 330 struct drm_i915_file_private { 331 struct drm_i915_private *dev_priv; 332 struct drm_file *file; 333 334 struct { 335 struct spinlock lock; 336 struct list_head request_list; 337 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 338 * chosen to prevent the CPU getting more than a frame ahead of the GPU 339 * (when using lax throttling for the frontbuffer). We also use it to 340 * offer free GPU waitboosts for severely congested workloads. 341 */ 342 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 343 } mm; 344 struct idr context_idr; 345 346 struct intel_rps_client { 347 struct list_head link; 348 unsigned boosts; 349 } rps; 350 351 struct intel_engine_cs *bsd_ring; 352 }; 353 354 enum intel_dpll_id { 355 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 356 /* real shared dpll ids must be >= 0 */ 357 DPLL_ID_PCH_PLL_A = 0, 358 DPLL_ID_PCH_PLL_B = 1, 359 /* hsw/bdw */ 360 DPLL_ID_WRPLL1 = 0, 361 DPLL_ID_WRPLL2 = 1, 362 DPLL_ID_SPLL = 2, 363 364 /* skl */ 365 DPLL_ID_SKL_DPLL1 = 0, 366 DPLL_ID_SKL_DPLL2 = 1, 367 DPLL_ID_SKL_DPLL3 = 2, 368 }; 369 #define I915_NUM_PLLS 3 370 371 struct intel_dpll_hw_state { 372 /* i9xx, pch plls */ 373 uint32_t dpll; 374 uint32_t dpll_md; 375 uint32_t fp0; 376 uint32_t fp1; 377 378 /* hsw, bdw */ 379 uint32_t wrpll; 380 uint32_t spll; 381 382 /* skl */ 383 /* 384 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in 385 * lower part of ctrl1 and they get shifted into position when writing 386 * the register. This allows us to easily compare the state to share 387 * the DPLL. 388 */ 389 uint32_t ctrl1; 390 /* HDMI only, 0 when used for DP */ 391 uint32_t cfgcr1, cfgcr2; 392 393 /* bxt */ 394 uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, 395 pcsdw12; 396 }; 397 398 struct intel_shared_dpll_config { 399 unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ 400 struct intel_dpll_hw_state hw_state; 401 }; 402 403 struct intel_shared_dpll { 404 struct intel_shared_dpll_config config; 405 406 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 407 bool on; /* is the PLL actually active? Disabled during modeset */ 408 const char *name; 409 /* should match the index in the dev_priv->shared_dplls array */ 410 enum intel_dpll_id id; 411 /* The mode_set hook is optional and should be used together with the 412 * intel_prepare_shared_dpll function. */ 413 void (*mode_set)(struct drm_i915_private *dev_priv, 414 struct intel_shared_dpll *pll); 415 void (*enable)(struct drm_i915_private *dev_priv, 416 struct intel_shared_dpll *pll); 417 void (*disable)(struct drm_i915_private *dev_priv, 418 struct intel_shared_dpll *pll); 419 bool (*get_hw_state)(struct drm_i915_private *dev_priv, 420 struct intel_shared_dpll *pll, 421 struct intel_dpll_hw_state *hw_state); 422 }; 423 424 #define SKL_DPLL0 0 425 #define SKL_DPLL1 1 426 #define SKL_DPLL2 2 427 #define SKL_DPLL3 3 428 429 /* Used by dp and fdi links */ 430 struct intel_link_m_n { 431 uint32_t tu; 432 uint32_t gmch_m; 433 uint32_t gmch_n; 434 uint32_t link_m; 435 uint32_t link_n; 436 }; 437 438 void intel_link_compute_m_n(int bpp, int nlanes, 439 int pixel_clock, int link_clock, 440 struct intel_link_m_n *m_n); 441 442 /* Interface history: 443 * 444 * 1.1: Original. 445 * 1.2: Add Power Management 446 * 1.3: Add vblank support 447 * 1.4: Fix cmdbuffer path, add heap destroy 448 * 1.5: Add vblank pipe configuration 449 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 450 * - Support vertical blank on secondary display pipe 451 */ 452 #define DRIVER_MAJOR 1 453 #define DRIVER_MINOR 6 454 #define DRIVER_PATCHLEVEL 0 455 456 #define WATCH_LISTS 0 457 458 struct opregion_header; 459 struct opregion_acpi; 460 struct opregion_swsci; 461 struct opregion_asle; 462 463 struct intel_opregion { 464 struct opregion_header *header; 465 struct opregion_acpi *acpi; 466 struct opregion_swsci *swsci; 467 u32 swsci_gbda_sub_functions; 468 u32 swsci_sbcb_sub_functions; 469 struct opregion_asle *asle; 470 void *vbt; 471 u32 *lid_state; 472 struct work_struct asle_work; 473 }; 474 #define OPREGION_SIZE (8*1024) 475 476 struct intel_overlay; 477 struct intel_overlay_error_state; 478 479 #define I915_FENCE_REG_NONE -1 480 #define I915_MAX_NUM_FENCES 32 481 /* 32 fences + sign bit for FENCE_REG_NONE */ 482 #define I915_MAX_NUM_FENCE_BITS 6 483 484 struct drm_i915_fence_reg { 485 struct list_head lru_list; 486 struct drm_i915_gem_object *obj; 487 int pin_count; 488 }; 489 490 struct sdvo_device_mapping { 491 u8 initialized; 492 u8 dvo_port; 493 u8 slave_addr; 494 u8 dvo_wiring; 495 u8 i2c_pin; 496 u8 ddc_pin; 497 }; 498 499 struct intel_display_error_state; 500 501 struct drm_i915_error_state { 502 struct kref ref; 503 struct timeval time; 504 505 char error_msg[128]; 506 int iommu; 507 u32 reset_count; 508 u32 suspend_count; 509 510 /* Generic register state */ 511 u32 eir; 512 u32 pgtbl_er; 513 u32 ier; 514 u32 gtier[4]; 515 u32 ccid; 516 u32 derrmr; 517 u32 forcewake; 518 u32 error; /* gen6+ */ 519 u32 err_int; /* gen7 */ 520 u32 fault_data0; /* gen8, gen9 */ 521 u32 fault_data1; /* gen8, gen9 */ 522 u32 done_reg; 523 u32 gac_eco; 524 u32 gam_ecochk; 525 u32 gab_ctl; 526 u32 gfx_mode; 527 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 528 u64 fence[I915_MAX_NUM_FENCES]; 529 struct intel_overlay_error_state *overlay; 530 struct intel_display_error_state *display; 531 struct drm_i915_error_object *semaphore_obj; 532 533 struct drm_i915_error_ring { 534 bool valid; 535 /* Software tracked state */ 536 bool waiting; 537 int hangcheck_score; 538 enum intel_ring_hangcheck_action hangcheck_action; 539 int num_requests; 540 541 /* our own tracking of ring head and tail */ 542 u32 cpu_ring_head; 543 u32 cpu_ring_tail; 544 545 u32 semaphore_seqno[I915_NUM_RINGS - 1]; 546 547 /* Register state */ 548 u32 start; 549 u32 tail; 550 u32 head; 551 u32 ctl; 552 u32 hws; 553 u32 ipeir; 554 u32 ipehr; 555 u32 instdone; 556 u32 bbstate; 557 u32 instpm; 558 u32 instps; 559 u32 seqno; 560 u64 bbaddr; 561 u64 acthd; 562 u32 fault_reg; 563 u64 faddr; 564 u32 rc_psmi; /* sleep state */ 565 u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 566 567 struct drm_i915_error_object { 568 int page_count; 569 u64 gtt_offset; 570 u32 *pages[0]; 571 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 572 573 struct drm_i915_error_request { 574 long jiffies; 575 u32 seqno; 576 u32 tail; 577 } *requests; 578 579 struct { 580 u32 gfx_mode; 581 union { 582 u64 pdp[4]; 583 u32 pp_dir_base; 584 }; 585 } vm_info; 586 587 pid_t pid; 588 char comm[TASK_COMM_LEN]; 589 } ring[I915_NUM_RINGS]; 590 591 struct drm_i915_error_buffer { 592 u32 size; 593 u32 name; 594 u32 rseqno[I915_NUM_RINGS], wseqno; 595 u64 gtt_offset; 596 u32 read_domains; 597 u32 write_domain; 598 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 599 s32 pinned:2; 600 u32 tiling:2; 601 u32 dirty:1; 602 u32 purgeable:1; 603 u32 userptr:1; 604 s32 ring:4; 605 u32 cache_level:3; 606 } **active_bo, **pinned_bo; 607 608 u32 *active_bo_count, *pinned_bo_count; 609 u32 vm_count; 610 }; 611 612 struct intel_connector; 613 struct intel_encoder; 614 struct intel_crtc_state; 615 struct intel_initial_plane_config; 616 struct intel_crtc; 617 struct intel_limit; 618 struct dpll; 619 620 struct drm_i915_display_funcs { 621 int (*get_display_clock_speed)(struct drm_device *dev); 622 int (*get_fifo_size)(struct drm_device *dev, int plane); 623 /** 624 * find_dpll() - Find the best values for the PLL 625 * @limit: limits for the PLL 626 * @crtc: current CRTC 627 * @target: target frequency in kHz 628 * @refclk: reference clock frequency in kHz 629 * @match_clock: if provided, @best_clock P divider must 630 * match the P divider from @match_clock 631 * used for LVDS downclocking 632 * @best_clock: best PLL values found 633 * 634 * Returns true on success, false on failure. 635 */ 636 bool (*find_dpll)(const struct intel_limit *limit, 637 struct intel_crtc_state *crtc_state, 638 int target, int refclk, 639 struct dpll *match_clock, 640 struct dpll *best_clock); 641 void (*update_wm)(struct drm_crtc *crtc); 642 void (*update_sprite_wm)(struct drm_plane *plane, 643 struct drm_crtc *crtc, 644 uint32_t sprite_width, uint32_t sprite_height, 645 int pixel_size, bool enable, bool scaled); 646 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 647 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 648 /* Returns the active state of the crtc, and if the crtc is active, 649 * fills out the pipe-config with the hw state. */ 650 bool (*get_pipe_config)(struct intel_crtc *, 651 struct intel_crtc_state *); 652 void (*get_initial_plane_config)(struct intel_crtc *, 653 struct intel_initial_plane_config *); 654 int (*crtc_compute_clock)(struct intel_crtc *crtc, 655 struct intel_crtc_state *crtc_state); 656 void (*crtc_enable)(struct drm_crtc *crtc); 657 void (*crtc_disable)(struct drm_crtc *crtc); 658 void (*audio_codec_enable)(struct drm_connector *connector, 659 struct intel_encoder *encoder, 660 const struct drm_display_mode *adjusted_mode); 661 void (*audio_codec_disable)(struct intel_encoder *encoder); 662 void (*fdi_link_train)(struct drm_crtc *crtc); 663 void (*init_clock_gating)(struct drm_device *dev); 664 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 665 struct drm_framebuffer *fb, 666 struct drm_i915_gem_object *obj, 667 struct drm_i915_gem_request *req, 668 uint32_t flags); 669 void (*update_primary_plane)(struct drm_crtc *crtc, 670 struct drm_framebuffer *fb, 671 int x, int y); 672 void (*hpd_irq_setup)(struct drm_device *dev); 673 /* clock updates for mode set */ 674 /* cursor updates */ 675 /* render clock increase/decrease */ 676 /* display clock increase/decrease */ 677 /* pll clock increase/decrease */ 678 }; 679 680 enum forcewake_domain_id { 681 FW_DOMAIN_ID_RENDER = 0, 682 FW_DOMAIN_ID_BLITTER, 683 FW_DOMAIN_ID_MEDIA, 684 685 FW_DOMAIN_ID_COUNT 686 }; 687 688 enum forcewake_domains { 689 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 690 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 691 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 692 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 693 FORCEWAKE_BLITTER | 694 FORCEWAKE_MEDIA) 695 }; 696 697 struct intel_uncore_funcs { 698 void (*force_wake_get)(struct drm_i915_private *dev_priv, 699 enum forcewake_domains domains); 700 void (*force_wake_put)(struct drm_i915_private *dev_priv, 701 enum forcewake_domains domains); 702 703 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 704 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 705 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 706 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 707 708 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, 709 uint8_t val, bool trace); 710 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, 711 uint16_t val, bool trace); 712 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, 713 uint32_t val, bool trace); 714 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, 715 uint64_t val, bool trace); 716 }; 717 718 struct intel_uncore { 719 struct lock lock; /** lock is also taken in irq contexts. */ 720 721 struct intel_uncore_funcs funcs; 722 723 unsigned fifo_count; 724 enum forcewake_domains fw_domains; 725 726 struct intel_uncore_forcewake_domain { 727 struct drm_i915_private *i915; 728 enum forcewake_domain_id id; 729 unsigned wake_count; 730 struct timer_list timer; 731 u32 reg_set; 732 u32 val_set; 733 u32 val_clear; 734 u32 reg_ack; 735 u32 reg_post; 736 u32 val_reset; 737 } fw_domain[FW_DOMAIN_ID_COUNT]; 738 }; 739 740 /* Iterate over initialised fw domains */ 741 #define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ 742 for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 743 (i__) < FW_DOMAIN_ID_COUNT; \ 744 (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ 745 if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) 746 747 #define for_each_fw_domain(domain__, dev_priv__, i__) \ 748 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) 749 750 enum csr_state { 751 FW_UNINITIALIZED = 0, 752 FW_LOADED, 753 FW_FAILED 754 }; 755 756 struct intel_csr { 757 const char *fw_path; 758 uint32_t *dmc_payload; 759 uint32_t dmc_fw_size; 760 uint32_t mmio_count; 761 uint32_t mmioaddr[8]; 762 uint32_t mmiodata[8]; 763 enum csr_state state; 764 }; 765 766 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 767 func(is_mobile) sep \ 768 func(is_i85x) sep \ 769 func(is_i915g) sep \ 770 func(is_i945gm) sep \ 771 func(is_g33) sep \ 772 func(need_gfx_hws) sep \ 773 func(is_g4x) sep \ 774 func(is_pineview) sep \ 775 func(is_broadwater) sep \ 776 func(is_crestline) sep \ 777 func(is_ivybridge) sep \ 778 func(is_valleyview) sep \ 779 func(is_haswell) sep \ 780 func(is_skylake) sep \ 781 func(is_preliminary) sep \ 782 func(has_fbc) sep \ 783 func(has_pipe_cxsr) sep \ 784 func(has_hotplug) sep \ 785 func(cursor_needs_physical) sep \ 786 func(has_overlay) sep \ 787 func(overlay_needs_physical) sep \ 788 func(supports_tv) sep \ 789 func(has_llc) sep \ 790 func(has_ddi) sep \ 791 func(has_fpga_dbg) 792 793 #define DEFINE_FLAG(name) u8 name:1 794 #define SEP_SEMICOLON ; 795 796 struct intel_device_info { 797 u32 display_mmio_offset; 798 u16 device_id; 799 u8 num_pipes:3; 800 u8 num_sprites[I915_MAX_PIPES]; 801 u8 gen; 802 u8 ring_mask; /* Rings supported by the HW */ 803 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 804 /* Register offsets for the various display pipes and transcoders */ 805 int pipe_offsets[I915_MAX_TRANSCODERS]; 806 int trans_offsets[I915_MAX_TRANSCODERS]; 807 int palette_offsets[I915_MAX_PIPES]; 808 int cursor_offsets[I915_MAX_PIPES]; 809 810 /* Slice/subslice/EU info */ 811 u8 slice_total; 812 u8 subslice_total; 813 u8 subslice_per_slice; 814 u8 eu_total; 815 u8 eu_per_subslice; 816 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 817 u8 subslice_7eu[3]; 818 u8 has_slice_pg:1; 819 u8 has_subslice_pg:1; 820 u8 has_eu_pg:1; 821 }; 822 823 #undef DEFINE_FLAG 824 #undef SEP_SEMICOLON 825 826 enum i915_cache_level { 827 I915_CACHE_NONE = 0, 828 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 829 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 830 caches, eg sampler/render caches, and the 831 large Last-Level-Cache. LLC is coherent with 832 the CPU, but L3 is only visible to the GPU. */ 833 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 834 }; 835 836 struct i915_ctx_hang_stats { 837 /* This context had batch pending when hang was declared */ 838 unsigned batch_pending; 839 840 /* This context had batch active when hang was declared */ 841 unsigned batch_active; 842 843 /* Time when this context was last blamed for a GPU reset */ 844 unsigned long guilty_ts; 845 846 /* If the contexts causes a second GPU hang within this time, 847 * it is permanently banned from submitting any more work. 848 */ 849 unsigned long ban_period_seconds; 850 851 /* This context is banned to submit more work */ 852 bool banned; 853 }; 854 855 /* This must match up with the value previously used for execbuf2.rsvd1. */ 856 #define DEFAULT_CONTEXT_HANDLE 0 857 858 #define CONTEXT_NO_ZEROMAP (1<<0) 859 /** 860 * struct intel_context - as the name implies, represents a context. 861 * @ref: reference count. 862 * @user_handle: userspace tracking identity for this context. 863 * @remap_slice: l3 row remapping information. 864 * @flags: context specific flags: 865 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 866 * @file_priv: filp associated with this context (NULL for global default 867 * context). 868 * @hang_stats: information about the role of this context in possible GPU 869 * hangs. 870 * @ppgtt: virtual memory space used by this context. 871 * @legacy_hw_ctx: render context backing object and whether it is correctly 872 * initialized (legacy ring submission mechanism only). 873 * @link: link in the global list of contexts. 874 * 875 * Contexts are memory images used by the hardware to store copies of their 876 * internal state. 877 */ 878 struct intel_context { 879 struct kref ref; 880 int user_handle; 881 uint8_t remap_slice; 882 struct drm_i915_private *i915; 883 int flags; 884 struct drm_i915_file_private *file_priv; 885 struct i915_ctx_hang_stats hang_stats; 886 struct i915_hw_ppgtt *ppgtt; 887 888 /* Legacy ring buffer submission */ 889 struct { 890 struct drm_i915_gem_object *rcs_state; 891 bool initialized; 892 } legacy_hw_ctx; 893 894 /* Execlists */ 895 struct { 896 struct drm_i915_gem_object *state; 897 struct intel_ringbuffer *ringbuf; 898 int pin_count; 899 } engine[I915_NUM_RINGS]; 900 901 struct list_head link; 902 }; 903 904 enum fb_op_origin { 905 ORIGIN_GTT, 906 ORIGIN_CPU, 907 ORIGIN_CS, 908 ORIGIN_FLIP, 909 ORIGIN_DIRTYFB, 910 }; 911 912 struct i915_fbc { 913 /* This is always the inner lock when overlapping with struct_mutex and 914 * it's the outer lock when overlapping with stolen_lock. */ 915 struct lock lock; 916 unsigned long uncompressed_size; 917 unsigned threshold; 918 unsigned int fb_id; 919 unsigned int possible_framebuffer_bits; 920 unsigned int busy_bits; 921 struct intel_crtc *crtc; 922 int y; 923 924 struct drm_mm_node compressed_fb; 925 struct drm_mm_node *compressed_llb; 926 927 bool false_color; 928 929 /* Tracks whether the HW is actually enabled, not whether the feature is 930 * possible. */ 931 bool enabled; 932 933 struct intel_fbc_work { 934 struct delayed_work work; 935 struct intel_crtc *crtc; 936 struct drm_framebuffer *fb; 937 } *fbc_work; 938 939 enum no_fbc_reason { 940 FBC_OK, /* FBC is enabled */ 941 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ 942 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 943 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ 944 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 945 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 946 FBC_BAD_PLANE, /* fbc not supported on plane */ 947 FBC_NOT_TILED, /* buffer not tiled */ 948 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 949 FBC_MODULE_PARAM, 950 FBC_CHIP_DEFAULT, /* disabled by default on this chip */ 951 FBC_ROTATION, /* rotation is not supported */ 952 FBC_IN_DBG_MASTER, /* kernel debugger is active */ 953 FBC_BAD_STRIDE, /* stride is not supported */ 954 FBC_PIXEL_RATE, /* pixel rate is too big */ 955 FBC_PIXEL_FORMAT /* pixel format is invalid */ 956 } no_fbc_reason; 957 958 bool (*fbc_enabled)(struct drm_i915_private *dev_priv); 959 void (*enable_fbc)(struct intel_crtc *crtc); 960 void (*disable_fbc)(struct drm_i915_private *dev_priv); 961 }; 962 963 /** 964 * HIGH_RR is the highest eDP panel refresh rate read from EDID 965 * LOW_RR is the lowest eDP panel refresh rate found from EDID 966 * parsing for same resolution. 967 */ 968 enum drrs_refresh_rate_type { 969 DRRS_HIGH_RR, 970 DRRS_LOW_RR, 971 DRRS_MAX_RR, /* RR count */ 972 }; 973 974 enum drrs_support_type { 975 DRRS_NOT_SUPPORTED = 0, 976 STATIC_DRRS_SUPPORT = 1, 977 SEAMLESS_DRRS_SUPPORT = 2 978 }; 979 980 struct intel_dp; 981 struct i915_drrs { 982 struct lock mutex; 983 struct delayed_work work; 984 struct intel_dp *dp; 985 unsigned busy_frontbuffer_bits; 986 enum drrs_refresh_rate_type refresh_rate_type; 987 enum drrs_support_type type; 988 }; 989 990 struct i915_psr { 991 struct lock lock; 992 bool sink_support; 993 bool source_ok; 994 struct intel_dp *enabled; 995 bool active; 996 struct delayed_work work; 997 unsigned busy_frontbuffer_bits; 998 bool psr2_support; 999 bool aux_frame_sync; 1000 }; 1001 1002 enum intel_pch { 1003 PCH_NONE = 0, /* No PCH present */ 1004 PCH_IBX, /* Ibexpeak PCH */ 1005 PCH_CPT, /* Cougarpoint PCH */ 1006 PCH_LPT, /* Lynxpoint PCH */ 1007 PCH_SPT, /* Sunrisepoint PCH */ 1008 PCH_NOP, 1009 }; 1010 1011 enum intel_sbi_destination { 1012 SBI_ICLK, 1013 SBI_MPHY, 1014 }; 1015 1016 #define QUIRK_PIPEA_FORCE (1<<0) 1017 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1018 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1019 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1020 #define QUIRK_PIPEB_FORCE (1<<4) 1021 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1022 1023 struct intel_fbdev; 1024 struct intel_fbc_work; 1025 1026 struct intel_gmbus { 1027 struct i2c_adapter adapter; 1028 u32 force_bit; 1029 u32 reg0; 1030 u32 gpio_reg; 1031 struct i2c_algo_bit_data bit_algo; 1032 struct drm_i915_private *dev_priv; 1033 }; 1034 1035 struct i915_suspend_saved_registers { 1036 u32 saveDSPARB; 1037 u32 saveLVDS; 1038 u32 savePP_ON_DELAYS; 1039 u32 savePP_OFF_DELAYS; 1040 u32 savePP_ON; 1041 u32 savePP_OFF; 1042 u32 savePP_CONTROL; 1043 u32 savePP_DIVISOR; 1044 u32 saveFBC_CONTROL; 1045 u32 saveCACHE_MODE_0; 1046 u32 saveMI_ARB_STATE; 1047 u32 saveSWF0[16]; 1048 u32 saveSWF1[16]; 1049 u32 saveSWF3[3]; 1050 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1051 u32 savePCH_PORT_HOTPLUG; 1052 u16 saveGCDGMBUS; 1053 }; 1054 1055 struct vlv_s0ix_state { 1056 /* GAM */ 1057 u32 wr_watermark; 1058 u32 gfx_prio_ctrl; 1059 u32 arb_mode; 1060 u32 gfx_pend_tlb0; 1061 u32 gfx_pend_tlb1; 1062 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1063 u32 media_max_req_count; 1064 u32 gfx_max_req_count; 1065 u32 render_hwsp; 1066 u32 ecochk; 1067 u32 bsd_hwsp; 1068 u32 blt_hwsp; 1069 u32 tlb_rd_addr; 1070 1071 /* MBC */ 1072 u32 g3dctl; 1073 u32 gsckgctl; 1074 u32 mbctl; 1075 1076 /* GCP */ 1077 u32 ucgctl1; 1078 u32 ucgctl3; 1079 u32 rcgctl1; 1080 u32 rcgctl2; 1081 u32 rstctl; 1082 u32 misccpctl; 1083 1084 /* GPM */ 1085 u32 gfxpause; 1086 u32 rpdeuhwtc; 1087 u32 rpdeuc; 1088 u32 ecobus; 1089 u32 pwrdwnupctl; 1090 u32 rp_down_timeout; 1091 u32 rp_deucsw; 1092 u32 rcubmabdtmr; 1093 u32 rcedata; 1094 u32 spare2gh; 1095 1096 /* Display 1 CZ domain */ 1097 u32 gt_imr; 1098 u32 gt_ier; 1099 u32 pm_imr; 1100 u32 pm_ier; 1101 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1102 1103 /* GT SA CZ domain */ 1104 u32 tilectl; 1105 u32 gt_fifoctl; 1106 u32 gtlc_wake_ctrl; 1107 u32 gtlc_survive; 1108 u32 pmwgicz; 1109 1110 /* Display 2 CZ domain */ 1111 u32 gu_ctl0; 1112 u32 gu_ctl1; 1113 u32 pcbr; 1114 u32 clock_gate_dis2; 1115 }; 1116 1117 struct intel_rps_ei { 1118 u32 cz_clock; 1119 u32 render_c0; 1120 u32 media_c0; 1121 }; 1122 1123 struct intel_gen6_power_mgmt { 1124 /* 1125 * work, interrupts_enabled and pm_iir are protected by 1126 * dev_priv->irq_lock 1127 */ 1128 struct work_struct work; 1129 bool interrupts_enabled; 1130 u32 pm_iir; 1131 1132 /* Frequencies are stored in potentially platform dependent multiples. 1133 * In other words, *_freq needs to be multiplied by X to be interesting. 1134 * Soft limits are those which are used for the dynamic reclocking done 1135 * by the driver (raise frequencies under heavy loads, and lower for 1136 * lighter loads). Hard limits are those imposed by the hardware. 1137 * 1138 * A distinction is made for overclocking, which is never enabled by 1139 * default, and is considered to be above the hard limit if it's 1140 * possible at all. 1141 */ 1142 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1143 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1144 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1145 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1146 u8 min_freq; /* AKA RPn. Minimum frequency */ 1147 u8 idle_freq; /* Frequency to request when we are idle */ 1148 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1149 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1150 u8 rp0_freq; /* Non-overclocked max frequency. */ 1151 1152 u8 up_threshold; /* Current %busy required to uplock */ 1153 u8 down_threshold; /* Current %busy required to downclock */ 1154 1155 int last_adj; 1156 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1157 1158 struct lock client_lock; 1159 struct list_head clients; 1160 bool client_boost; 1161 1162 bool enabled; 1163 struct delayed_work delayed_resume_work; 1164 unsigned boosts; 1165 1166 struct intel_rps_client semaphores, mmioflips; 1167 1168 /* manual wa residency calculations */ 1169 struct intel_rps_ei up_ei, down_ei; 1170 1171 /* 1172 * Protects RPS/RC6 register access and PCU communication. 1173 * Must be taken after struct_mutex if nested. Note that 1174 * this lock may be held for long periods of time when 1175 * talking to hw - so only take it when talking to hw! 1176 */ 1177 struct lock hw_lock; 1178 }; 1179 1180 /* defined intel_pm.c */ 1181 extern struct lock mchdev_lock; 1182 1183 struct intel_ilk_power_mgmt { 1184 u8 cur_delay; 1185 u8 min_delay; 1186 u8 max_delay; 1187 u8 fmax; 1188 u8 fstart; 1189 1190 u64 last_count1; 1191 unsigned long last_time1; 1192 unsigned long chipset_power; 1193 u64 last_count2; 1194 u64 last_time2; 1195 unsigned long gfx_power; 1196 u8 corr; 1197 1198 int c_m; 1199 int r_t; 1200 }; 1201 1202 struct drm_i915_private; 1203 struct i915_power_well; 1204 1205 struct i915_power_well_ops { 1206 /* 1207 * Synchronize the well's hw state to match the current sw state, for 1208 * example enable/disable it based on the current refcount. Called 1209 * during driver init and resume time, possibly after first calling 1210 * the enable/disable handlers. 1211 */ 1212 void (*sync_hw)(struct drm_i915_private *dev_priv, 1213 struct i915_power_well *power_well); 1214 /* 1215 * Enable the well and resources that depend on it (for example 1216 * interrupts located on the well). Called after the 0->1 refcount 1217 * transition. 1218 */ 1219 void (*enable)(struct drm_i915_private *dev_priv, 1220 struct i915_power_well *power_well); 1221 /* 1222 * Disable the well and resources that depend on it. Called after 1223 * the 1->0 refcount transition. 1224 */ 1225 void (*disable)(struct drm_i915_private *dev_priv, 1226 struct i915_power_well *power_well); 1227 /* Returns the hw enabled state. */ 1228 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1229 struct i915_power_well *power_well); 1230 }; 1231 1232 /* Power well structure for haswell */ 1233 struct i915_power_well { 1234 const char *name; 1235 bool always_on; 1236 /* power well enable/disable usage count */ 1237 int count; 1238 /* cached hw enabled state */ 1239 bool hw_enabled; 1240 unsigned long domains; 1241 unsigned long data; 1242 const struct i915_power_well_ops *ops; 1243 }; 1244 1245 struct i915_power_domains { 1246 /* 1247 * Power wells needed for initialization at driver init and suspend 1248 * time are on. They are kept on until after the first modeset. 1249 */ 1250 bool init_power_on; 1251 bool initializing; 1252 int power_well_count; 1253 1254 struct lock lock; 1255 int domain_use_count[POWER_DOMAIN_NUM]; 1256 struct i915_power_well *power_wells; 1257 }; 1258 1259 #define MAX_L3_SLICES 2 1260 struct intel_l3_parity { 1261 u32 *remap_info[MAX_L3_SLICES]; 1262 struct work_struct error_work; 1263 int which_slice; 1264 }; 1265 1266 struct i915_gem_mm { 1267 /** Memory allocator for GTT stolen memory */ 1268 struct drm_mm stolen; 1269 /** Protects the usage of the GTT stolen memory allocator. This is 1270 * always the inner lock when overlapping with struct_mutex. */ 1271 struct lock stolen_lock; 1272 1273 /** List of all objects in gtt_space. Used to restore gtt 1274 * mappings on resume */ 1275 struct list_head bound_list; 1276 /** 1277 * List of objects which are not bound to the GTT (thus 1278 * are idle and not used by the GPU) but still have 1279 * (presumably uncached) pages still attached. 1280 */ 1281 struct list_head unbound_list; 1282 1283 /** Usable portion of the GTT for GEM */ 1284 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1285 1286 /** PPGTT used for aliasing the PPGTT with the GTT */ 1287 struct i915_hw_ppgtt *aliasing_ppgtt; 1288 1289 struct notifier_block oom_notifier; 1290 #if 0 1291 struct shrinker shrinker; 1292 #endif 1293 bool shrinker_no_lock_stealing; 1294 1295 /** LRU list of objects with fence regs on them. */ 1296 struct list_head fence_list; 1297 1298 /** 1299 * We leave the user IRQ off as much as possible, 1300 * but this means that requests will finish and never 1301 * be retired once the system goes idle. Set a timer to 1302 * fire periodically while the ring is running. When it 1303 * fires, go retire requests. 1304 */ 1305 struct delayed_work retire_work; 1306 1307 /** 1308 * When we detect an idle GPU, we want to turn on 1309 * powersaving features. So once we see that there 1310 * are no more requests outstanding and no more 1311 * arrive within a small period of time, we fire 1312 * off the idle_work. 1313 */ 1314 struct delayed_work idle_work; 1315 1316 /** 1317 * Are we in a non-interruptible section of code like 1318 * modesetting? 1319 */ 1320 bool interruptible; 1321 1322 /** 1323 * Is the GPU currently considered idle, or busy executing userspace 1324 * requests? Whilst idle, we attempt to power down the hardware and 1325 * display clocks. In order to reduce the effect on performance, there 1326 * is a slight delay before we do so. 1327 */ 1328 bool busy; 1329 1330 /* the indicator for dispatch video commands on two BSD rings */ 1331 int bsd_ring_dispatch_index; 1332 1333 /** Bit 6 swizzling required for X tiling */ 1334 uint32_t bit_6_swizzle_x; 1335 /** Bit 6 swizzling required for Y tiling */ 1336 uint32_t bit_6_swizzle_y; 1337 1338 /* accounting, useful for userland debugging */ 1339 struct spinlock object_stat_lock; 1340 size_t object_memory; 1341 u32 object_count; 1342 }; 1343 1344 struct drm_i915_error_state_buf { 1345 struct drm_i915_private *i915; 1346 unsigned bytes; 1347 unsigned size; 1348 int err; 1349 u8 *buf; 1350 loff_t start; 1351 loff_t pos; 1352 }; 1353 1354 struct i915_error_state_file_priv { 1355 struct drm_device *dev; 1356 struct drm_i915_error_state *error; 1357 }; 1358 1359 struct i915_gpu_error { 1360 /* For hangcheck timer */ 1361 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1362 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1363 /* Hang gpu twice in this window and your context gets banned */ 1364 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1365 1366 struct workqueue_struct *hangcheck_wq; 1367 struct delayed_work hangcheck_work; 1368 1369 /* For reset and error_state handling. */ 1370 struct lock lock; 1371 /* Protected by the above dev->gpu_error.lock. */ 1372 struct drm_i915_error_state *first_error; 1373 1374 unsigned long missed_irq_rings; 1375 1376 /** 1377 * State variable controlling the reset flow and count 1378 * 1379 * This is a counter which gets incremented when reset is triggered, 1380 * and again when reset has been handled. So odd values (lowest bit set) 1381 * means that reset is in progress and even values that 1382 * (reset_counter >> 1):th reset was successfully completed. 1383 * 1384 * If reset is not completed succesfully, the I915_WEDGE bit is 1385 * set meaning that hardware is terminally sour and there is no 1386 * recovery. All waiters on the reset_queue will be woken when 1387 * that happens. 1388 * 1389 * This counter is used by the wait_seqno code to notice that reset 1390 * event happened and it needs to restart the entire ioctl (since most 1391 * likely the seqno it waited for won't ever signal anytime soon). 1392 * 1393 * This is important for lock-free wait paths, where no contended lock 1394 * naturally enforces the correct ordering between the bail-out of the 1395 * waiter and the gpu reset work code. 1396 */ 1397 atomic_t reset_counter; 1398 1399 #define I915_RESET_IN_PROGRESS_FLAG 1 1400 #define I915_WEDGED (1 << 31) 1401 1402 /** 1403 * Waitqueue to signal when the reset has completed. Used by clients 1404 * that wait for dev_priv->mm.wedged to settle. 1405 */ 1406 wait_queue_head_t reset_queue; 1407 1408 /* Userspace knobs for gpu hang simulation; 1409 * combines both a ring mask, and extra flags 1410 */ 1411 u32 stop_rings; 1412 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1413 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1414 1415 /* For missed irq/seqno simulation. */ 1416 unsigned int test_irq_rings; 1417 1418 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 1419 bool reload_in_reset; 1420 }; 1421 1422 enum modeset_restore { 1423 MODESET_ON_LID_OPEN, 1424 MODESET_DONE, 1425 MODESET_SUSPENDED, 1426 }; 1427 1428 #define DP_AUX_A 0x40 1429 #define DP_AUX_B 0x10 1430 #define DP_AUX_C 0x20 1431 #define DP_AUX_D 0x30 1432 1433 #define DDC_PIN_B 0x05 1434 #define DDC_PIN_C 0x04 1435 #define DDC_PIN_D 0x06 1436 1437 struct ddi_vbt_port_info { 1438 /* 1439 * This is an index in the HDMI/DVI DDI buffer translation table. 1440 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1441 * populate this field. 1442 */ 1443 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1444 uint8_t hdmi_level_shift; 1445 1446 uint8_t supports_dvi:1; 1447 uint8_t supports_hdmi:1; 1448 uint8_t supports_dp:1; 1449 1450 uint8_t alternate_aux_channel; 1451 uint8_t alternate_ddc_pin; 1452 1453 uint8_t dp_boost_level; 1454 uint8_t hdmi_boost_level; 1455 }; 1456 1457 enum psr_lines_to_wait { 1458 PSR_0_LINES_TO_WAIT = 0, 1459 PSR_1_LINE_TO_WAIT, 1460 PSR_4_LINES_TO_WAIT, 1461 PSR_8_LINES_TO_WAIT 1462 }; 1463 1464 struct intel_vbt_data { 1465 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1466 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1467 1468 /* Feature bits */ 1469 unsigned int int_tv_support:1; 1470 unsigned int lvds_dither:1; 1471 unsigned int lvds_vbt:1; 1472 unsigned int int_crt_support:1; 1473 unsigned int lvds_use_ssc:1; 1474 unsigned int display_clock_mode:1; 1475 unsigned int fdi_rx_polarity_inverted:1; 1476 unsigned int has_mipi:1; 1477 int lvds_ssc_freq; 1478 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1479 1480 enum drrs_support_type drrs_type; 1481 1482 /* eDP */ 1483 int edp_rate; 1484 int edp_lanes; 1485 int edp_preemphasis; 1486 int edp_vswing; 1487 bool edp_initialized; 1488 bool edp_support; 1489 int edp_bpp; 1490 struct edp_power_seq edp_pps; 1491 1492 struct { 1493 bool full_link; 1494 bool require_aux_wakeup; 1495 int idle_frames; 1496 enum psr_lines_to_wait lines_to_wait; 1497 int tp1_wakeup_time; 1498 int tp2_tp3_wakeup_time; 1499 } psr; 1500 1501 struct { 1502 u16 pwm_freq_hz; 1503 bool present; 1504 bool active_low_pwm; 1505 u8 min_brightness; /* min_brightness/255 of max */ 1506 } backlight; 1507 1508 /* MIPI DSI */ 1509 struct { 1510 u16 port; 1511 u16 panel_id; 1512 struct mipi_config *config; 1513 struct mipi_pps_data *pps; 1514 u8 seq_version; 1515 u32 size; 1516 u8 *data; 1517 u8 *sequence[MIPI_SEQ_MAX]; 1518 } dsi; 1519 1520 int crt_ddc_pin; 1521 1522 int child_dev_num; 1523 union child_device_config *child_dev; 1524 1525 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1526 }; 1527 1528 enum intel_ddb_partitioning { 1529 INTEL_DDB_PART_1_2, 1530 INTEL_DDB_PART_5_6, /* IVB+ */ 1531 }; 1532 1533 struct intel_wm_level { 1534 bool enable; 1535 uint32_t pri_val; 1536 uint32_t spr_val; 1537 uint32_t cur_val; 1538 uint32_t fbc_val; 1539 }; 1540 1541 struct ilk_wm_values { 1542 uint32_t wm_pipe[3]; 1543 uint32_t wm_lp[3]; 1544 uint32_t wm_lp_spr[3]; 1545 uint32_t wm_linetime[3]; 1546 bool enable_fbc_wm; 1547 enum intel_ddb_partitioning partitioning; 1548 }; 1549 1550 struct vlv_pipe_wm { 1551 uint16_t primary; 1552 uint16_t sprite[2]; 1553 uint8_t cursor; 1554 }; 1555 1556 struct vlv_sr_wm { 1557 uint16_t plane; 1558 uint8_t cursor; 1559 }; 1560 1561 struct vlv_wm_values { 1562 struct vlv_pipe_wm pipe[3]; 1563 struct vlv_sr_wm sr; 1564 struct { 1565 uint8_t cursor; 1566 uint8_t sprite[2]; 1567 uint8_t primary; 1568 } ddl[3]; 1569 uint8_t level; 1570 bool cxsr; 1571 }; 1572 1573 struct skl_ddb_entry { 1574 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1575 }; 1576 1577 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1578 { 1579 return entry->end - entry->start; 1580 } 1581 1582 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1583 const struct skl_ddb_entry *e2) 1584 { 1585 if (e1->start == e2->start && e1->end == e2->end) 1586 return true; 1587 1588 return false; 1589 } 1590 1591 struct skl_ddb_allocation { 1592 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1593 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1594 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1595 }; 1596 1597 struct skl_wm_values { 1598 bool dirty[I915_MAX_PIPES]; 1599 struct skl_ddb_allocation ddb; 1600 uint32_t wm_linetime[I915_MAX_PIPES]; 1601 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1602 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1603 }; 1604 1605 struct skl_wm_level { 1606 bool plane_en[I915_MAX_PLANES]; 1607 uint16_t plane_res_b[I915_MAX_PLANES]; 1608 uint8_t plane_res_l[I915_MAX_PLANES]; 1609 }; 1610 1611 /* 1612 * This struct helps tracking the state needed for runtime PM, which puts the 1613 * device in PCI D3 state. Notice that when this happens, nothing on the 1614 * graphics device works, even register access, so we don't get interrupts nor 1615 * anything else. 1616 * 1617 * Every piece of our code that needs to actually touch the hardware needs to 1618 * either call intel_runtime_pm_get or call intel_display_power_get with the 1619 * appropriate power domain. 1620 * 1621 * Our driver uses the autosuspend delay feature, which means we'll only really 1622 * suspend if we stay with zero refcount for a certain amount of time. The 1623 * default value is currently very conservative (see intel_runtime_pm_enable), but 1624 * it can be changed with the standard runtime PM files from sysfs. 1625 * 1626 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1627 * goes back to false exactly before we reenable the IRQs. We use this variable 1628 * to check if someone is trying to enable/disable IRQs while they're supposed 1629 * to be disabled. This shouldn't happen and we'll print some error messages in 1630 * case it happens. 1631 * 1632 * For more, read the Documentation/power/runtime_pm.txt. 1633 */ 1634 struct i915_runtime_pm { 1635 bool suspended; 1636 bool irqs_enabled; 1637 }; 1638 1639 enum intel_pipe_crc_source { 1640 INTEL_PIPE_CRC_SOURCE_NONE, 1641 INTEL_PIPE_CRC_SOURCE_PLANE1, 1642 INTEL_PIPE_CRC_SOURCE_PLANE2, 1643 INTEL_PIPE_CRC_SOURCE_PF, 1644 INTEL_PIPE_CRC_SOURCE_PIPE, 1645 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1646 INTEL_PIPE_CRC_SOURCE_TV, 1647 INTEL_PIPE_CRC_SOURCE_DP_B, 1648 INTEL_PIPE_CRC_SOURCE_DP_C, 1649 INTEL_PIPE_CRC_SOURCE_DP_D, 1650 INTEL_PIPE_CRC_SOURCE_AUTO, 1651 INTEL_PIPE_CRC_SOURCE_MAX, 1652 }; 1653 1654 struct intel_pipe_crc_entry { 1655 uint32_t frame; 1656 uint32_t crc[5]; 1657 }; 1658 1659 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1660 struct intel_pipe_crc { 1661 struct spinlock lock; 1662 bool opened; /* exclusive access to the result file */ 1663 struct intel_pipe_crc_entry *entries; 1664 enum intel_pipe_crc_source source; 1665 int head, tail; 1666 wait_queue_head_t wq; 1667 }; 1668 1669 struct i915_frontbuffer_tracking { 1670 struct lock lock; 1671 1672 /* 1673 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1674 * scheduled flips. 1675 */ 1676 unsigned busy_bits; 1677 unsigned flip_bits; 1678 }; 1679 1680 struct i915_wa_reg { 1681 u32 addr; 1682 u32 value; 1683 /* bitmask representing WA bits */ 1684 u32 mask; 1685 }; 1686 1687 #define I915_MAX_WA_REGS 16 1688 1689 struct i915_workarounds { 1690 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1691 u32 count; 1692 }; 1693 1694 struct i915_virtual_gpu { 1695 bool active; 1696 }; 1697 1698 struct i915_execbuffer_params { 1699 struct drm_device *dev; 1700 struct drm_file *file; 1701 uint32_t dispatch_flags; 1702 uint32_t args_batch_start_offset; 1703 uint64_t batch_obj_vm_offset; 1704 struct intel_engine_cs *ring; 1705 struct drm_i915_gem_object *batch_obj; 1706 struct intel_context *ctx; 1707 struct drm_i915_gem_request *request; 1708 }; 1709 1710 struct drm_i915_private { 1711 struct drm_device *dev; 1712 struct kmem_cache *objects; 1713 struct kmem_cache *vmas; 1714 struct kmem_cache *requests; 1715 1716 struct intel_device_info info; 1717 1718 int relative_constants_mode; 1719 1720 char __iomem *regs; 1721 1722 struct intel_uncore uncore; 1723 1724 struct i915_virtual_gpu vgpu; 1725 1726 struct intel_guc guc; 1727 1728 struct intel_csr csr; 1729 1730 /* Display CSR-related protection */ 1731 struct lock csr_lock; 1732 1733 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1734 1735 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1736 * controller on different i2c buses. */ 1737 struct lock gmbus_mutex; 1738 1739 /** 1740 * Base address of the gmbus and gpio block. 1741 */ 1742 uint32_t gpio_mmio_base; 1743 1744 /* MMIO base address for MIPI regs */ 1745 uint32_t mipi_mmio_base; 1746 1747 wait_queue_head_t gmbus_wait_queue; 1748 1749 struct pci_dev *bridge_dev; 1750 struct intel_engine_cs ring[I915_NUM_RINGS]; 1751 struct drm_i915_gem_object *semaphore_obj; 1752 uint32_t last_seqno, next_seqno; 1753 1754 struct drm_dma_handle *status_page_dmah; 1755 struct resource *mch_res; 1756 int mch_res_rid; 1757 1758 /* protects the irq masks */ 1759 struct lock irq_lock; 1760 1761 /* protects the mmio flip data */ 1762 struct spinlock mmio_flip_lock; 1763 1764 bool display_irqs_enabled; 1765 1766 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1767 struct pm_qos_request pm_qos; 1768 1769 /* Sideband mailbox protection */ 1770 struct lock sb_lock; 1771 1772 /** Cached value of IMR to avoid reads in updating the bitfield */ 1773 union { 1774 u32 irq_mask; 1775 u32 de_irq_mask[I915_MAX_PIPES]; 1776 }; 1777 u32 gt_irq_mask; 1778 u32 pm_irq_mask; 1779 u32 pm_rps_events; 1780 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1781 1782 struct i915_hotplug hotplug; 1783 struct i915_fbc fbc; 1784 struct i915_drrs drrs; 1785 struct intel_opregion opregion; 1786 struct intel_vbt_data vbt; 1787 1788 bool preserve_bios_swizzle; 1789 1790 /* overlay */ 1791 struct intel_overlay *overlay; 1792 1793 /* backlight registers and fields in struct intel_panel */ 1794 struct lock backlight_lock; 1795 1796 /* LVDS info */ 1797 bool no_aux_handshake; 1798 1799 /* protects panel power sequencer state */ 1800 struct lock pps_mutex; 1801 1802 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1803 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1804 1805 unsigned int fsb_freq, mem_freq, is_ddr3; 1806 unsigned int skl_boot_cdclk; 1807 unsigned int cdclk_freq, max_cdclk_freq; 1808 unsigned int max_dotclk_freq; 1809 unsigned int hpll_freq; 1810 unsigned int czclk_freq; 1811 1812 /** 1813 * wq - Driver workqueue for GEM. 1814 * 1815 * NOTE: Work items scheduled here are not allowed to grab any modeset 1816 * locks, for otherwise the flushing done in the pageflip code will 1817 * result in deadlocks. 1818 */ 1819 struct workqueue_struct *wq; 1820 1821 /* Display functions */ 1822 struct drm_i915_display_funcs display; 1823 1824 /* PCH chipset type */ 1825 enum intel_pch pch_type; 1826 unsigned short pch_id; 1827 1828 unsigned long quirks; 1829 1830 enum modeset_restore modeset_restore; 1831 struct lock modeset_restore_lock; 1832 1833 struct list_head vm_list; /* Global list of all address spaces */ 1834 struct i915_gtt gtt; /* VM representing the global address space */ 1835 1836 struct i915_gem_mm mm; 1837 DECLARE_HASHTABLE(mm_structs, 7); 1838 struct lock mm_lock; 1839 1840 /* Kernel Modesetting */ 1841 1842 struct sdvo_device_mapping sdvo_mappings[2]; 1843 1844 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1845 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1846 wait_queue_head_t pending_flip_queue; 1847 1848 #ifdef CONFIG_DEBUG_FS 1849 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1850 #endif 1851 1852 int num_shared_dpll; 1853 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1854 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1855 1856 struct i915_workarounds workarounds; 1857 1858 /* Reclocking support */ 1859 bool render_reclock_avail; 1860 1861 struct i915_frontbuffer_tracking fb_tracking; 1862 1863 u16 orig_clock; 1864 1865 bool mchbar_need_disable; 1866 1867 struct intel_l3_parity l3_parity; 1868 1869 /* Cannot be determined by PCIID. You must always read a register. */ 1870 size_t ellc_size; 1871 1872 /* gen6+ rps state */ 1873 struct intel_gen6_power_mgmt rps; 1874 1875 /* ilk-only ips/rps state. Everything in here is protected by the global 1876 * mchdev_lock in intel_pm.c */ 1877 struct intel_ilk_power_mgmt ips; 1878 1879 struct i915_power_domains power_domains; 1880 1881 struct i915_psr psr; 1882 1883 struct i915_gpu_error gpu_error; 1884 1885 struct drm_i915_gem_object *vlv_pctx; 1886 1887 #ifdef CONFIG_DRM_FBDEV_EMULATION 1888 /* list of fbdev register on this device */ 1889 struct intel_fbdev *fbdev; 1890 struct work_struct fbdev_suspend_work; 1891 #endif 1892 1893 struct drm_property *broadcast_rgb_property; 1894 struct drm_property *force_audio_property; 1895 1896 /* hda/i915 audio component */ 1897 struct i915_audio_component *audio_component; 1898 bool audio_component_registered; 1899 /** 1900 * av_mutex - mutex for audio/video sync 1901 * 1902 */ 1903 struct lock av_mutex; 1904 1905 uint32_t hw_context_size; 1906 struct list_head context_list; 1907 1908 u32 fdi_rx_config; 1909 1910 u32 chv_phy_control; 1911 1912 u32 suspend_count; 1913 struct i915_suspend_saved_registers regfile; 1914 struct vlv_s0ix_state vlv_s0ix_state; 1915 1916 struct { 1917 /* 1918 * Raw watermark latency values: 1919 * in 0.1us units for WM0, 1920 * in 0.5us units for WM1+. 1921 */ 1922 /* primary */ 1923 uint16_t pri_latency[5]; 1924 /* sprite */ 1925 uint16_t spr_latency[5]; 1926 /* cursor */ 1927 uint16_t cur_latency[5]; 1928 /* 1929 * Raw watermark memory latency values 1930 * for SKL for all 8 levels 1931 * in 1us units. 1932 */ 1933 uint16_t skl_latency[8]; 1934 1935 /* 1936 * The skl_wm_values structure is a bit too big for stack 1937 * allocation, so we keep the staging struct where we store 1938 * intermediate results here instead. 1939 */ 1940 struct skl_wm_values skl_results; 1941 1942 /* current hardware state */ 1943 union { 1944 struct ilk_wm_values hw; 1945 struct skl_wm_values skl_hw; 1946 struct vlv_wm_values vlv; 1947 }; 1948 1949 uint8_t max_level; 1950 } wm; 1951 1952 struct i915_runtime_pm pm; 1953 1954 uint32_t bios_vgacntr; 1955 1956 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1957 struct { 1958 int (*execbuf_submit)(struct i915_execbuffer_params *params, 1959 struct drm_i915_gem_execbuffer2 *args, 1960 struct list_head *vmas); 1961 int (*init_rings)(struct drm_device *dev); 1962 void (*cleanup_ring)(struct intel_engine_cs *ring); 1963 void (*stop_ring)(struct intel_engine_cs *ring); 1964 } gt; 1965 1966 bool edp_low_vswing; 1967 1968 /* perform PHY state sanity checks? */ 1969 bool chv_phy_assert[2]; 1970 1971 /* 1972 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1973 * will be rejected. Instead look for a better place. 1974 */ 1975 }; 1976 1977 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1978 { 1979 return dev->dev_private; 1980 } 1981 1982 static inline struct drm_i915_private *dev_to_i915(struct device *dev) 1983 { 1984 BUG(); 1985 } 1986 1987 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 1988 { 1989 return container_of(guc, struct drm_i915_private, guc); 1990 } 1991 1992 /* Iterate over initialised rings */ 1993 #define for_each_ring(ring__, dev_priv__, i__) \ 1994 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1995 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 1996 1997 enum hdmi_force_audio { 1998 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 1999 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2000 HDMI_AUDIO_AUTO, /* trust EDID */ 2001 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2002 }; 2003 2004 #define I915_GTT_OFFSET_NONE ((u32)-1) 2005 2006 struct drm_i915_gem_object_ops { 2007 /* Interface between the GEM object and its backing storage. 2008 * get_pages() is called once prior to the use of the associated set 2009 * of pages before to binding them into the GTT, and put_pages() is 2010 * called after we no longer need them. As we expect there to be 2011 * associated cost with migrating pages between the backing storage 2012 * and making them available for the GPU (e.g. clflush), we may hold 2013 * onto the pages after they are no longer referenced by the GPU 2014 * in case they may be used again shortly (for example migrating the 2015 * pages to a different memory domain within the GTT). put_pages() 2016 * will therefore most likely be called when the object itself is 2017 * being released or under memory pressure (where we attempt to 2018 * reap pages for the shrinker). 2019 */ 2020 int (*get_pages)(struct drm_i915_gem_object *); 2021 void (*put_pages)(struct drm_i915_gem_object *); 2022 int (*dmabuf_export)(struct drm_i915_gem_object *); 2023 void (*release)(struct drm_i915_gem_object *); 2024 }; 2025 2026 /* 2027 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2028 * considered to be the frontbuffer for the given plane interface-wise. This 2029 * doesn't mean that the hw necessarily already scans it out, but that any 2030 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2031 * 2032 * We have one bit per pipe and per scanout plane type. 2033 */ 2034 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2035 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2036 #define INTEL_FRONTBUFFER_BITS \ 2037 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 2038 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2039 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2040 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2041 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2042 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2043 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2044 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2045 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2046 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2047 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2048 2049 struct drm_i915_gem_object { 2050 struct drm_gem_object base; 2051 2052 const struct drm_i915_gem_object_ops *ops; 2053 2054 /** List of VMAs backed by this object */ 2055 struct list_head vma_list; 2056 2057 /** Stolen memory for this object, instead of being backed by shmem. */ 2058 struct drm_mm_node *stolen; 2059 struct list_head global_list; 2060 2061 struct list_head ring_list[I915_NUM_RINGS]; 2062 /** Used in execbuf to temporarily hold a ref */ 2063 struct list_head obj_exec_link; 2064 2065 struct list_head batch_pool_link; 2066 2067 /** 2068 * This is set if the object is on the active lists (has pending 2069 * rendering and so a non-zero seqno), and is not set if it i s on 2070 * inactive (ready to be unbound) list. 2071 */ 2072 unsigned int active:I915_NUM_RINGS; 2073 2074 /** 2075 * This is set if the object has been written to since last bound 2076 * to the GTT 2077 */ 2078 unsigned int dirty:1; 2079 2080 /** 2081 * Fence register bits (if any) for this object. Will be set 2082 * as needed when mapped into the GTT. 2083 * Protected by dev->struct_mutex. 2084 */ 2085 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 2086 2087 /** 2088 * Advice: are the backing pages purgeable? 2089 */ 2090 unsigned int madv:2; 2091 2092 /** 2093 * Current tiling mode for the object. 2094 */ 2095 unsigned int tiling_mode:2; 2096 /** 2097 * Whether the tiling parameters for the currently associated fence 2098 * register have changed. Note that for the purposes of tracking 2099 * tiling changes we also treat the unfenced register, the register 2100 * slot that the object occupies whilst it executes a fenced 2101 * command (such as BLT on gen2/3), as a "fence". 2102 */ 2103 unsigned int fence_dirty:1; 2104 2105 /** 2106 * Is the object at the current location in the gtt mappable and 2107 * fenceable? Used to avoid costly recalculations. 2108 */ 2109 unsigned int map_and_fenceable:1; 2110 2111 /** 2112 * Whether the current gtt mapping needs to be mappable (and isn't just 2113 * mappable by accident). Track pin and fault separate for a more 2114 * accurate mappable working set. 2115 */ 2116 unsigned int fault_mappable:1; 2117 2118 /* 2119 * Is the object to be mapped as read-only to the GPU 2120 * Only honoured if hardware has relevant pte bit 2121 */ 2122 unsigned long gt_ro:1; 2123 unsigned int cache_level:3; 2124 unsigned int cache_dirty:1; 2125 2126 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2127 2128 unsigned int pin_display; 2129 2130 struct sg_table *pages; 2131 int pages_pin_count; 2132 struct get_page { 2133 struct scatterlist *sg; 2134 int last; 2135 } get_page; 2136 2137 /* prime dma-buf support */ 2138 void *dma_buf_vmapping; 2139 int vmapping_count; 2140 2141 /** Breadcrumb of last rendering to the buffer. 2142 * There can only be one writer, but we allow for multiple readers. 2143 * If there is a writer that necessarily implies that all other 2144 * read requests are complete - but we may only be lazily clearing 2145 * the read requests. A read request is naturally the most recent 2146 * request on a ring, so we may have two different write and read 2147 * requests on one ring where the write request is older than the 2148 * read request. This allows for the CPU to read from an active 2149 * buffer by only waiting for the write to complete. 2150 * */ 2151 struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS]; 2152 struct drm_i915_gem_request *last_write_req; 2153 /** Breadcrumb of last fenced GPU access to the buffer. */ 2154 struct drm_i915_gem_request *last_fenced_req; 2155 2156 /** Current tiling stride for the object, if it's tiled. */ 2157 uint32_t stride; 2158 2159 /** References from framebuffers, locks out tiling changes. */ 2160 unsigned long framebuffer_references; 2161 2162 /** Record of address bit 17 of each page at last unbind. */ 2163 unsigned long *bit_17; 2164 2165 union { 2166 /** for phy allocated objects */ 2167 struct drm_dma_handle *phys_handle; 2168 2169 struct i915_gem_userptr { 2170 uintptr_t ptr; 2171 unsigned read_only :1; 2172 unsigned workers :4; 2173 #define I915_GEM_USERPTR_MAX_WORKERS 15 2174 2175 struct i915_mm_struct *mm; 2176 struct i915_mmu_object *mmu_object; 2177 struct work_struct *work; 2178 } userptr; 2179 }; 2180 }; 2181 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2182 2183 void i915_gem_track_fb(struct drm_i915_gem_object *old, 2184 struct drm_i915_gem_object *new, 2185 unsigned frontbuffer_bits); 2186 2187 /** 2188 * Request queue structure. 2189 * 2190 * The request queue allows us to note sequence numbers that have been emitted 2191 * and may be associated with active buffers to be retired. 2192 * 2193 * By keeping this list, we can avoid having to do questionable sequence 2194 * number comparisons on buffer last_read|write_seqno. It also allows an 2195 * emission time to be associated with the request for tracking how far ahead 2196 * of the GPU the submission is. 2197 * 2198 * The requests are reference counted, so upon creation they should have an 2199 * initial reference taken using kref_init 2200 */ 2201 struct drm_i915_gem_request { 2202 struct kref ref; 2203 2204 /** On Which ring this request was generated */ 2205 struct drm_i915_private *i915; 2206 struct intel_engine_cs *ring; 2207 2208 /** GEM sequence number associated with the previous request, 2209 * when the HWS breadcrumb is equal to this the GPU is processing 2210 * this request. 2211 */ 2212 u32 previous_seqno; 2213 2214 /** GEM sequence number associated with this request, 2215 * when the HWS breadcrumb is equal or greater than this the GPU 2216 * has finished processing this request. 2217 */ 2218 u32 seqno; 2219 2220 /** Position in the ringbuffer of the start of the request */ 2221 u32 head; 2222 2223 /** 2224 * Position in the ringbuffer of the start of the postfix. 2225 * This is required to calculate the maximum available ringbuffer 2226 * space without overwriting the postfix. 2227 */ 2228 u32 postfix; 2229 2230 /** Position in the ringbuffer of the end of the whole request */ 2231 u32 tail; 2232 2233 /** 2234 * Context and ring buffer related to this request 2235 * Contexts are refcounted, so when this request is associated with a 2236 * context, we must increment the context's refcount, to guarantee that 2237 * it persists while any request is linked to it. Requests themselves 2238 * are also refcounted, so the request will only be freed when the last 2239 * reference to it is dismissed, and the code in 2240 * i915_gem_request_free() will then decrement the refcount on the 2241 * context. 2242 */ 2243 struct intel_context *ctx; 2244 struct intel_ringbuffer *ringbuf; 2245 2246 /** Batch buffer related to this request if any (used for 2247 error state dump only) */ 2248 struct drm_i915_gem_object *batch_obj; 2249 2250 /** Time at which this request was emitted, in jiffies. */ 2251 unsigned long emitted_jiffies; 2252 2253 /** global list entry for this request */ 2254 struct list_head list; 2255 2256 struct drm_i915_file_private *file_priv; 2257 /** file_priv list entry for this request */ 2258 struct list_head client_list; 2259 2260 /** process identifier submitting this request */ 2261 pid_t pid; 2262 2263 /** 2264 * The ELSP only accepts two elements at a time, so we queue 2265 * context/tail pairs on a given queue (ring->execlist_queue) until the 2266 * hardware is available. The queue serves a double purpose: we also use 2267 * it to keep track of the up to 2 contexts currently in the hardware 2268 * (usually one in execution and the other queued up by the GPU): We 2269 * only remove elements from the head of the queue when the hardware 2270 * informs us that an element has been completed. 2271 * 2272 * All accesses to the queue are mediated by a spinlock 2273 * (ring->execlist_lock). 2274 */ 2275 2276 /** Execlist link in the submission queue.*/ 2277 struct list_head execlist_link; 2278 2279 /** Execlists no. of times this request has been sent to the ELSP */ 2280 int elsp_submitted; 2281 2282 }; 2283 2284 int i915_gem_request_alloc(struct intel_engine_cs *ring, 2285 struct intel_context *ctx, 2286 struct drm_i915_gem_request **req_out); 2287 void i915_gem_request_cancel(struct drm_i915_gem_request *req); 2288 void i915_gem_request_free(struct kref *req_ref); 2289 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2290 struct drm_file *file); 2291 2292 static inline uint32_t 2293 i915_gem_request_get_seqno(struct drm_i915_gem_request *req) 2294 { 2295 return req ? req->seqno : 0; 2296 } 2297 2298 static inline struct intel_engine_cs * 2299 i915_gem_request_get_ring(struct drm_i915_gem_request *req) 2300 { 2301 return req ? req->ring : NULL; 2302 } 2303 2304 static inline struct drm_i915_gem_request * 2305 i915_gem_request_reference(struct drm_i915_gem_request *req) 2306 { 2307 if (req) 2308 kref_get(&req->ref); 2309 return req; 2310 } 2311 2312 static inline void 2313 i915_gem_request_unreference(struct drm_i915_gem_request *req) 2314 { 2315 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); 2316 kref_put(&req->ref, i915_gem_request_free); 2317 } 2318 2319 static inline void 2320 i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) 2321 { 2322 struct drm_device *dev; 2323 2324 if (!req) 2325 return; 2326 2327 dev = req->ring->dev; 2328 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) 2329 mutex_unlock(&dev->struct_mutex); 2330 } 2331 2332 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2333 struct drm_i915_gem_request *src) 2334 { 2335 if (src) 2336 i915_gem_request_reference(src); 2337 2338 if (*pdst) 2339 i915_gem_request_unreference(*pdst); 2340 2341 *pdst = src; 2342 } 2343 2344 /* 2345 * XXX: i915_gem_request_completed should be here but currently needs the 2346 * definition of i915_seqno_passed() which is below. It will be moved in 2347 * a later patch when the call to i915_seqno_passed() is obsoleted... 2348 */ 2349 2350 /* 2351 * A command that requires special handling by the command parser. 2352 */ 2353 struct drm_i915_cmd_descriptor { 2354 /* 2355 * Flags describing how the command parser processes the command. 2356 * 2357 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2358 * a length mask if not set 2359 * CMD_DESC_SKIP: The command is allowed but does not follow the 2360 * standard length encoding for the opcode range in 2361 * which it falls 2362 * CMD_DESC_REJECT: The command is never allowed 2363 * CMD_DESC_REGISTER: The command should be checked against the 2364 * register whitelist for the appropriate ring 2365 * CMD_DESC_MASTER: The command is allowed if the submitting process 2366 * is the DRM master 2367 */ 2368 u32 flags; 2369 #define CMD_DESC_FIXED (1<<0) 2370 #define CMD_DESC_SKIP (1<<1) 2371 #define CMD_DESC_REJECT (1<<2) 2372 #define CMD_DESC_REGISTER (1<<3) 2373 #define CMD_DESC_BITMASK (1<<4) 2374 #define CMD_DESC_MASTER (1<<5) 2375 2376 /* 2377 * The command's unique identification bits and the bitmask to get them. 2378 * This isn't strictly the opcode field as defined in the spec and may 2379 * also include type, subtype, and/or subop fields. 2380 */ 2381 struct { 2382 u32 value; 2383 u32 mask; 2384 } cmd; 2385 2386 /* 2387 * The command's length. The command is either fixed length (i.e. does 2388 * not include a length field) or has a length field mask. The flag 2389 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2390 * a length mask. All command entries in a command table must include 2391 * length information. 2392 */ 2393 union { 2394 u32 fixed; 2395 u32 mask; 2396 } length; 2397 2398 /* 2399 * Describes where to find a register address in the command to check 2400 * against the ring's register whitelist. Only valid if flags has the 2401 * CMD_DESC_REGISTER bit set. 2402 * 2403 * A non-zero step value implies that the command may access multiple 2404 * registers in sequence (e.g. LRI), in that case step gives the 2405 * distance in dwords between individual offset fields. 2406 */ 2407 struct { 2408 u32 offset; 2409 u32 mask; 2410 u32 step; 2411 } reg; 2412 2413 #define MAX_CMD_DESC_BITMASKS 3 2414 /* 2415 * Describes command checks where a particular dword is masked and 2416 * compared against an expected value. If the command does not match 2417 * the expected value, the parser rejects it. Only valid if flags has 2418 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2419 * are valid. 2420 * 2421 * If the check specifies a non-zero condition_mask then the parser 2422 * only performs the check when the bits specified by condition_mask 2423 * are non-zero. 2424 */ 2425 struct { 2426 u32 offset; 2427 u32 mask; 2428 u32 expected; 2429 u32 condition_offset; 2430 u32 condition_mask; 2431 } bits[MAX_CMD_DESC_BITMASKS]; 2432 }; 2433 2434 /* 2435 * A table of commands requiring special handling by the command parser. 2436 * 2437 * Each ring has an array of tables. Each table consists of an array of command 2438 * descriptors, which must be sorted with command opcodes in ascending order. 2439 */ 2440 struct drm_i915_cmd_table { 2441 const struct drm_i915_cmd_descriptor *table; 2442 int count; 2443 }; 2444 2445 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2446 #define __I915__(p) ({ \ 2447 const struct drm_i915_private *__p; \ 2448 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2449 __p = (const struct drm_i915_private *)p; \ 2450 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2451 __p = to_i915((const struct drm_device *)p); \ 2452 __p; \ 2453 }) 2454 #define INTEL_INFO(p) (&__I915__(p)->info) 2455 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2456 #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2457 2458 #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2459 #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2460 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2461 #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) 2462 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2463 #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) 2464 #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) 2465 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2466 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2467 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2468 #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) 2469 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2470 #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) 2471 #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) 2472 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2473 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2474 #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) 2475 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2476 #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2477 INTEL_DEVID(dev) == 0x0152 || \ 2478 INTEL_DEVID(dev) == 0x015a) 2479 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2480 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2481 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2482 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2483 #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2484 #define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) 2485 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2486 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2487 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2488 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2489 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2490 (INTEL_DEVID(dev) & 0xf) == 0xb || \ 2491 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2492 /* ULX machines are also considered ULT. */ 2493 #define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ 2494 (INTEL_DEVID(dev) & 0xf) == 0xe) 2495 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2496 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2497 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2498 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2499 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2500 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2501 /* ULX machines are also considered ULT. */ 2502 #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ 2503 INTEL_DEVID(dev) == 0x0A1E) 2504 #define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ 2505 INTEL_DEVID(dev) == 0x1913 || \ 2506 INTEL_DEVID(dev) == 0x1916 || \ 2507 INTEL_DEVID(dev) == 0x1921 || \ 2508 INTEL_DEVID(dev) == 0x1926) 2509 #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ 2510 INTEL_DEVID(dev) == 0x1915 || \ 2511 INTEL_DEVID(dev) == 0x191E) 2512 #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ 2513 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2514 #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ 2515 (INTEL_DEVID(dev) & 0x00F0) == 0x0030) 2516 2517 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2518 2519 #define SKL_REVID_A0 (0x0) 2520 #define SKL_REVID_B0 (0x1) 2521 #define SKL_REVID_C0 (0x2) 2522 #define SKL_REVID_D0 (0x3) 2523 #define SKL_REVID_E0 (0x4) 2524 #define SKL_REVID_F0 (0x5) 2525 2526 #define BXT_REVID_A0 (0x0) 2527 #define BXT_REVID_B0 (0x3) 2528 #define BXT_REVID_C0 (0x9) 2529 2530 /* 2531 * The genX designation typically refers to the render engine, so render 2532 * capability related checks should use IS_GEN, while display and other checks 2533 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2534 * chips, etc.). 2535 */ 2536 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2537 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2538 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2539 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2540 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2541 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2542 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2543 #define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2544 2545 #define RENDER_RING (1<<RCS) 2546 #define BSD_RING (1<<VCS) 2547 #define BLT_RING (1<<BCS) 2548 #define VEBOX_RING (1<<VECS) 2549 #define BSD2_RING (1<<VCS2) 2550 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2551 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2552 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2553 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2554 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2555 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2556 __I915__(dev)->ellc_size) 2557 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2558 2559 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2560 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2561 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2562 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) 2563 #define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) 2564 2565 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2566 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2567 2568 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2569 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2570 /* 2571 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2572 * even when in MSI mode. This results in spurious interrupt warnings if the 2573 * legacy irq no. is shared with another device. The kernel then disables that 2574 * interrupt source and so prevents the other device from working properly. 2575 */ 2576 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2577 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2578 2579 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2580 * rows, which changed the alignment requirements and fence programming. 2581 */ 2582 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 2583 IS_I915GM(dev))) 2584 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2585 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2586 2587 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2588 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2589 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2590 2591 #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2592 2593 #define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2594 INTEL_INFO(dev)->gen >= 9) 2595 2596 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2597 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2598 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2599 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2600 IS_SKYLAKE(dev)) 2601 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2602 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ 2603 IS_SKYLAKE(dev)) 2604 #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2605 #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2606 2607 #define HAS_CSR(dev) (IS_GEN9(dev)) 2608 2609 #define HAS_GUC_UCODE(dev) (IS_GEN9(dev)) 2610 #define HAS_GUC_SCHED(dev) (IS_GEN9(dev)) 2611 2612 #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2613 INTEL_INFO(dev)->gen >= 8) 2614 2615 #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ 2616 !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) 2617 2618 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2619 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2620 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2621 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2622 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2623 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2624 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2625 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2626 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2627 2628 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2629 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2630 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2631 #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2632 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2633 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2634 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2635 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2636 2637 #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 2638 2639 /* DPF == dynamic parity feature */ 2640 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2641 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2642 2643 #define GT_FREQUENCY_MULTIPLIER 50 2644 #define GEN9_FREQ_SCALER 3 2645 2646 #include "i915_trace.h" 2647 2648 extern const struct drm_ioctl_desc i915_ioctls[]; 2649 extern int i915_max_ioctl; 2650 2651 extern int i915_suspend_switcheroo(device_t kdev); 2652 extern int i915_resume_switcheroo(struct drm_device *dev); 2653 2654 /* i915_params.c */ 2655 struct i915_params { 2656 int modeset; 2657 int panel_ignore_lid; 2658 int semaphores; 2659 int lvds_channel_mode; 2660 int panel_use_ssc; 2661 int vbt_sdvo_panel_type; 2662 int enable_rc6; 2663 int enable_fbc; 2664 int enable_ppgtt; 2665 int enable_execlists; 2666 int enable_psr; 2667 unsigned int preliminary_hw_support; 2668 int disable_power_well; 2669 int enable_ips; 2670 int invert_brightness; 2671 int enable_cmd_parser; 2672 /* leave bools at the end to not create holes */ 2673 bool enable_hangcheck; 2674 bool fastboot; 2675 bool prefault_disable; 2676 bool load_detect_test; 2677 int reset; 2678 bool disable_display; 2679 bool disable_vtd_wa; 2680 bool enable_guc_submission; 2681 int guc_log_level; 2682 int use_mmio_flip; 2683 int mmio_debug; 2684 bool verbose_state_checks; 2685 bool nuclear_pageflip; 2686 int edp_vswing; 2687 }; 2688 extern struct i915_params i915 __read_mostly; 2689 2690 /* i915_dma.c */ 2691 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2692 extern int i915_driver_unload(struct drm_device *); 2693 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2694 extern void i915_driver_lastclose(struct drm_device * dev); 2695 extern void i915_driver_preclose(struct drm_device *dev, 2696 struct drm_file *file); 2697 extern void i915_driver_postclose(struct drm_device *dev, 2698 struct drm_file *file); 2699 #ifdef CONFIG_COMPAT 2700 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2701 unsigned long arg); 2702 #endif 2703 extern int intel_gpu_reset(struct drm_device *dev); 2704 extern bool intel_has_gpu_reset(struct drm_device *dev); 2705 extern int i915_reset(struct drm_device *dev); 2706 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2707 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2708 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2709 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2710 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2711 void i915_firmware_load_error_print(const char *fw_path, int err); 2712 2713 /* intel_hotplug.c */ 2714 void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2715 void intel_hpd_init(struct drm_i915_private *dev_priv); 2716 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2717 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2718 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2719 2720 /* i915_irq.c */ 2721 void i915_queue_hangcheck(struct drm_device *dev); 2722 __printf(3, 4) 2723 void i915_handle_error(struct drm_device *dev, bool wedged, 2724 const char *fmt, ...); 2725 2726 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2727 int intel_irq_install(struct drm_i915_private *dev_priv); 2728 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2729 2730 extern void intel_uncore_sanitize(struct drm_device *dev); 2731 extern void intel_uncore_early_sanitize(struct drm_device *dev, 2732 bool restore_forcewake); 2733 extern void intel_uncore_init(struct drm_device *dev); 2734 extern void intel_uncore_check_errors(struct drm_device *dev); 2735 extern void intel_uncore_fini(struct drm_device *dev); 2736 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2737 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2738 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2739 enum forcewake_domains domains); 2740 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2741 enum forcewake_domains domains); 2742 /* Like above but the caller must manage the uncore.lock itself. 2743 * Must be used with I915_READ_FW and friends. 2744 */ 2745 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2746 enum forcewake_domains domains); 2747 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2748 enum forcewake_domains domains); 2749 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2750 static inline bool intel_vgpu_active(struct drm_device *dev) 2751 { 2752 return to_i915(dev)->vgpu.active; 2753 } 2754 2755 void 2756 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2757 u32 status_mask); 2758 2759 void 2760 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2761 u32 status_mask); 2762 2763 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2764 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2765 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2766 uint32_t mask, 2767 uint32_t bits); 2768 void 2769 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2770 void 2771 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2772 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2773 uint32_t interrupt_mask, 2774 uint32_t enabled_irq_mask); 2775 #define ibx_enable_display_interrupt(dev_priv, bits) \ 2776 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 2777 #define ibx_disable_display_interrupt(dev_priv, bits) \ 2778 ibx_display_interrupt_update((dev_priv), (bits), 0) 2779 2780 /* i915_gem.c */ 2781 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2782 struct drm_file *file_priv); 2783 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2784 struct drm_file *file_priv); 2785 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2786 struct drm_file *file_priv); 2787 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2788 struct drm_file *file_priv); 2789 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2790 struct drm_file *file_priv); 2791 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2792 struct drm_file *file_priv); 2793 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2794 struct drm_file *file_priv); 2795 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2796 struct drm_i915_gem_request *req); 2797 void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params); 2798 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, 2799 struct drm_i915_gem_execbuffer2 *args, 2800 struct list_head *vmas); 2801 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2802 struct drm_file *file_priv); 2803 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2804 struct drm_file *file_priv); 2805 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2806 struct drm_file *file_priv); 2807 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2808 struct drm_file *file); 2809 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2810 struct drm_file *file); 2811 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2812 struct drm_file *file_priv); 2813 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2814 struct drm_file *file_priv); 2815 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2816 struct drm_file *file_priv); 2817 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2818 struct drm_file *file_priv); 2819 int i915_gem_init_userptr(struct drm_device *dev); 2820 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2821 struct drm_file *file); 2822 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2823 struct drm_file *file_priv); 2824 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2825 struct drm_file *file_priv); 2826 void i915_gem_load(struct drm_device *dev); 2827 void *i915_gem_object_alloc(struct drm_device *dev); 2828 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2829 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2830 const struct drm_i915_gem_object_ops *ops); 2831 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2832 size_t size); 2833 struct drm_i915_gem_object *i915_gem_object_create_from_data( 2834 struct drm_device *dev, const void *data, size_t size); 2835 void i915_gem_free_object(struct drm_gem_object *obj); 2836 void i915_gem_vma_destroy(struct i915_vma *vma); 2837 2838 /* Flags used by pin/bind&friends. */ 2839 #define PIN_MAPPABLE (1<<0) 2840 #define PIN_NONBLOCK (1<<1) 2841 #define PIN_GLOBAL (1<<2) 2842 #define PIN_OFFSET_BIAS (1<<3) 2843 #define PIN_USER (1<<4) 2844 #define PIN_UPDATE (1<<5) 2845 #define PIN_ZONE_4G (1<<6) 2846 #define PIN_HIGH (1<<7) 2847 #define PIN_OFFSET_MASK (~4095) 2848 int __must_check 2849 i915_gem_object_pin(struct drm_i915_gem_object *obj, 2850 struct i915_address_space *vm, 2851 uint32_t alignment, 2852 uint64_t flags); 2853 int __must_check 2854 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2855 const struct i915_ggtt_view *view, 2856 uint32_t alignment, 2857 uint64_t flags); 2858 2859 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2860 u32 flags); 2861 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 2862 int __must_check i915_vma_unbind(struct i915_vma *vma); 2863 /* 2864 * BEWARE: Do not use the function below unless you can _absolutely_ 2865 * _guarantee_ VMA in question is _not in use_ anywhere. 2866 */ 2867 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); 2868 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2869 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2870 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2871 2872 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2873 int *needs_clflush); 2874 2875 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2876 2877 static inline int __sg_page_count(struct scatterlist *sg) 2878 { 2879 return sg->length >> PAGE_SHIFT; 2880 } 2881 2882 static inline struct vm_page * 2883 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 2884 { 2885 if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) 2886 return NULL; 2887 2888 if (n < obj->get_page.last) { 2889 obj->get_page.sg = obj->pages->sgl; 2890 obj->get_page.last = 0; 2891 } 2892 2893 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { 2894 obj->get_page.last += __sg_page_count(obj->get_page.sg++); 2895 #if 0 2896 if (unlikely(sg_is_chain(obj->get_page.sg))) 2897 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); 2898 #endif 2899 } 2900 2901 return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); 2902 } 2903 2904 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2905 { 2906 BUG_ON(obj->pages == NULL); 2907 obj->pages_pin_count++; 2908 } 2909 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2910 { 2911 BUG_ON(obj->pages_pin_count == 0); 2912 obj->pages_pin_count--; 2913 } 2914 2915 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 2916 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 2917 struct intel_engine_cs *to, 2918 struct drm_i915_gem_request **to_req); 2919 void i915_vma_move_to_active(struct i915_vma *vma, 2920 struct drm_i915_gem_request *req); 2921 int i915_gem_dumb_create(struct drm_file *file_priv, 2922 struct drm_device *dev, 2923 struct drm_mode_create_dumb *args); 2924 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 2925 uint32_t handle, uint64_t *offset); 2926 /** 2927 * Returns true if seq1 is later than seq2. 2928 */ 2929 static inline bool 2930 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 2931 { 2932 return (int32_t)(seq1 - seq2) >= 0; 2933 } 2934 2935 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, 2936 bool lazy_coherency) 2937 { 2938 u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2939 return i915_seqno_passed(seqno, req->previous_seqno); 2940 } 2941 2942 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 2943 bool lazy_coherency) 2944 { 2945 u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2946 return i915_seqno_passed(seqno, req->seqno); 2947 } 2948 2949 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 2950 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 2951 2952 struct drm_i915_gem_request * 2953 i915_gem_find_active_request(struct intel_engine_cs *ring); 2954 2955 bool i915_gem_retire_requests(struct drm_device *dev); 2956 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); 2957 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2958 bool interruptible); 2959 2960 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2961 { 2962 return unlikely(atomic_read(&error->reset_counter) 2963 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 2964 } 2965 2966 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 2967 { 2968 return atomic_read(&error->reset_counter) & I915_WEDGED; 2969 } 2970 2971 static inline u32 i915_reset_count(struct i915_gpu_error *error) 2972 { 2973 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 2974 } 2975 2976 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 2977 { 2978 return dev_priv->gpu_error.stop_rings == 0 || 2979 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 2980 } 2981 2982 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 2983 { 2984 return dev_priv->gpu_error.stop_rings == 0 || 2985 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 2986 } 2987 2988 void i915_gem_reset(struct drm_device *dev); 2989 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2990 int __must_check i915_gem_init(struct drm_device *dev); 2991 int i915_gem_init_rings(struct drm_device *dev); 2992 int __must_check i915_gem_init_hw(struct drm_device *dev); 2993 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); 2994 void i915_gem_init_swizzling(struct drm_device *dev); 2995 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2996 int __must_check i915_gpu_idle(struct drm_device *dev); 2997 int __must_check i915_gem_suspend(struct drm_device *dev); 2998 void __i915_add_request(struct drm_i915_gem_request *req, 2999 struct drm_i915_gem_object *batch_obj, 3000 bool flush_caches); 3001 #define i915_add_request(req) \ 3002 __i915_add_request(req, NULL, true) 3003 #define i915_add_request_no_flush(req) \ 3004 __i915_add_request(req, NULL, false) 3005 int __i915_wait_request(struct drm_i915_gem_request *req, 3006 unsigned reset_counter, 3007 bool interruptible, 3008 s64 *timeout, 3009 struct intel_rps_client *rps); 3010 int __must_check i915_wait_request(struct drm_i915_gem_request *req); 3011 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres); 3012 int __must_check 3013 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 3014 bool readonly); 3015 int __must_check 3016 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3017 bool write); 3018 int __must_check 3019 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3020 int __must_check 3021 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3022 u32 alignment, 3023 struct intel_engine_cs *pipelined, 3024 struct drm_i915_gem_request **pipelined_request, 3025 const struct i915_ggtt_view *view); 3026 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 3027 const struct i915_ggtt_view *view); 3028 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3029 int align); 3030 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3031 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3032 3033 uint32_t 3034 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 3035 uint32_t 3036 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 3037 int tiling_mode, bool fenced); 3038 3039 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3040 enum i915_cache_level cache_level); 3041 3042 #if 0 3043 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3044 struct dma_buf *dma_buf); 3045 3046 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3047 struct drm_gem_object *gem_obj, int flags); 3048 #endif 3049 3050 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 3051 const struct i915_ggtt_view *view); 3052 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, 3053 struct i915_address_space *vm); 3054 static inline u64 3055 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) 3056 { 3057 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); 3058 } 3059 3060 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 3061 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 3062 const struct i915_ggtt_view *view); 3063 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 3064 struct i915_address_space *vm); 3065 3066 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 3067 struct i915_address_space *vm); 3068 struct i915_vma * 3069 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3070 struct i915_address_space *vm); 3071 struct i915_vma * 3072 i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 3073 const struct i915_ggtt_view *view); 3074 3075 struct i915_vma * 3076 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3077 struct i915_address_space *vm); 3078 struct i915_vma * 3079 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 3080 const struct i915_ggtt_view *view); 3081 3082 static inline struct i915_vma * 3083 i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 3084 { 3085 return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); 3086 } 3087 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); 3088 3089 /* Some GGTT VM helpers */ 3090 #define i915_obj_to_ggtt(obj) \ 3091 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 3092 static inline bool i915_is_ggtt(struct i915_address_space *vm) 3093 { 3094 struct i915_address_space *ggtt = 3095 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; 3096 return vm == ggtt; 3097 } 3098 3099 static inline struct i915_hw_ppgtt * 3100 i915_vm_to_ppgtt(struct i915_address_space *vm) 3101 { 3102 WARN_ON(i915_is_ggtt(vm)); 3103 3104 return container_of(vm, struct i915_hw_ppgtt, base); 3105 } 3106 3107 3108 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 3109 { 3110 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3111 } 3112 3113 static inline unsigned long 3114 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3115 { 3116 return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); 3117 } 3118 3119 static inline int __must_check 3120 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3121 uint32_t alignment, 3122 unsigned flags) 3123 { 3124 return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), 3125 alignment, flags | PIN_GLOBAL); 3126 } 3127 3128 static inline int 3129 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 3130 { 3131 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 3132 } 3133 3134 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3135 const struct i915_ggtt_view *view); 3136 static inline void 3137 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 3138 { 3139 i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); 3140 } 3141 3142 /* i915_gem_fence.c */ 3143 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 3144 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 3145 3146 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 3147 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 3148 3149 void i915_gem_restore_fences(struct drm_device *dev); 3150 3151 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3152 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3153 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3154 3155 /* i915_gem_context.c */ 3156 int __must_check i915_gem_context_init(struct drm_device *dev); 3157 void i915_gem_context_fini(struct drm_device *dev); 3158 void i915_gem_context_reset(struct drm_device *dev); 3159 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3160 int i915_gem_context_enable(struct drm_i915_gem_request *req); 3161 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3162 int i915_switch_context(struct drm_i915_gem_request *req); 3163 struct intel_context * 3164 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 3165 void i915_gem_context_free(struct kref *ctx_ref); 3166 struct drm_i915_gem_object * 3167 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3168 static inline void i915_gem_context_reference(struct intel_context *ctx) 3169 { 3170 kref_get(&ctx->ref); 3171 } 3172 3173 static inline void i915_gem_context_unreference(struct intel_context *ctx) 3174 { 3175 kref_put(&ctx->ref, i915_gem_context_free); 3176 } 3177 3178 static inline bool i915_gem_context_is_default(const struct intel_context *c) 3179 { 3180 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3181 } 3182 3183 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3184 struct drm_file *file); 3185 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3186 struct drm_file *file); 3187 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3188 struct drm_file *file_priv); 3189 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3190 struct drm_file *file_priv); 3191 3192 /* i915_gem_evict.c */ 3193 int __must_check i915_gem_evict_something(struct drm_device *dev, 3194 struct i915_address_space *vm, 3195 int min_size, 3196 unsigned alignment, 3197 unsigned cache_level, 3198 unsigned long start, 3199 unsigned long end, 3200 unsigned flags); 3201 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3202 3203 /* belongs in i915_gem_gtt.h */ 3204 static inline void i915_gem_chipset_flush(struct drm_device *dev) 3205 { 3206 if (INTEL_INFO(dev)->gen < 6) 3207 intel_gtt_chipset_flush(); 3208 } 3209 3210 /* i915_gem_stolen.c */ 3211 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3212 struct drm_mm_node *node, u64 size, 3213 unsigned alignment); 3214 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3215 struct drm_mm_node *node, u64 size, 3216 unsigned alignment, u64 start, 3217 u64 end); 3218 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3219 struct drm_mm_node *node); 3220 int i915_gem_init_stolen(struct drm_device *dev); 3221 void i915_gem_cleanup_stolen(struct drm_device *dev); 3222 struct drm_i915_gem_object * 3223 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3224 struct drm_i915_gem_object * 3225 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3226 u32 stolen_offset, 3227 u32 gtt_offset, 3228 u32 size); 3229 3230 /* i915_gem_shrinker.c */ 3231 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3232 unsigned long target, 3233 unsigned flags); 3234 #define I915_SHRINK_PURGEABLE 0x1 3235 #define I915_SHRINK_UNBOUND 0x2 3236 #define I915_SHRINK_BOUND 0x4 3237 #define I915_SHRINK_ACTIVE 0x8 3238 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3239 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3240 3241 3242 /* i915_gem_tiling.c */ 3243 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3244 { 3245 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3246 3247 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3248 obj->tiling_mode != I915_TILING_NONE; 3249 } 3250 3251 /* i915_gem_debug.c */ 3252 #if WATCH_LISTS 3253 int i915_verify_lists(struct drm_device *dev); 3254 #else 3255 #define i915_verify_lists(dev) 0 3256 #endif 3257 3258 /* i915_debugfs.c */ 3259 int i915_debugfs_init(struct drm_minor *minor); 3260 void i915_debugfs_cleanup(struct drm_minor *minor); 3261 #ifdef CONFIG_DEBUG_FS 3262 int i915_debugfs_connector_add(struct drm_connector *connector); 3263 void intel_display_crc_init(struct drm_device *dev); 3264 #else 3265 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3266 { return 0; } 3267 static inline void intel_display_crc_init(struct drm_device *dev) {} 3268 #endif 3269 3270 /* i915_gpu_error.c */ 3271 __printf(2, 3) 3272 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3273 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3274 const struct i915_error_state_file_priv *error); 3275 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3276 struct drm_i915_private *i915, 3277 size_t count, loff_t pos); 3278 static inline void i915_error_state_buf_release( 3279 struct drm_i915_error_state_buf *eb) 3280 { 3281 kfree(eb->buf); 3282 } 3283 void i915_capture_error_state(struct drm_device *dev, bool wedge, 3284 const char *error_msg); 3285 void i915_error_state_get(struct drm_device *dev, 3286 struct i915_error_state_file_priv *error_priv); 3287 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3288 void i915_destroy_error_state(struct drm_device *dev); 3289 3290 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3291 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3292 3293 /* i915_cmd_parser.c */ 3294 int i915_cmd_parser_get_version(void); 3295 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); 3296 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); 3297 bool i915_needs_cmd_parser(struct intel_engine_cs *ring); 3298 int i915_parse_cmds(struct intel_engine_cs *ring, 3299 struct drm_i915_gem_object *batch_obj, 3300 struct drm_i915_gem_object *shadow_batch_obj, 3301 u32 batch_start_offset, 3302 u32 batch_len, 3303 bool is_master); 3304 3305 /* i915_suspend.c */ 3306 extern int i915_save_state(struct drm_device *dev); 3307 extern int i915_restore_state(struct drm_device *dev); 3308 3309 /* i915_sysfs.c */ 3310 void i915_setup_sysfs(struct drm_device *dev_priv); 3311 void i915_teardown_sysfs(struct drm_device *dev_priv); 3312 3313 /* intel_i2c.c */ 3314 extern int intel_setup_gmbus(struct drm_device *dev); 3315 extern void intel_teardown_gmbus(struct drm_device *dev); 3316 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3317 unsigned int pin); 3318 3319 extern struct i2c_adapter * 3320 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3321 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3322 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3323 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3324 { 3325 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3326 } 3327 extern void intel_i2c_reset(struct drm_device *dev); 3328 3329 /* intel_opregion.c */ 3330 #ifdef CONFIG_ACPI 3331 extern int intel_opregion_setup(struct drm_device *dev); 3332 extern void intel_opregion_init(struct drm_device *dev); 3333 extern void intel_opregion_fini(struct drm_device *dev); 3334 extern void intel_opregion_asle_intr(struct drm_device *dev); 3335 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3336 bool enable); 3337 extern int intel_opregion_notify_adapter(struct drm_device *dev, 3338 pci_power_t state); 3339 #else 3340 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3341 static inline void intel_opregion_init(struct drm_device *dev) { return; } 3342 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3343 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3344 static inline int 3345 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3346 { 3347 return 0; 3348 } 3349 static inline int 3350 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3351 { 3352 return 0; 3353 } 3354 #endif 3355 3356 /* intel_acpi.c */ 3357 #ifdef CONFIG_ACPI 3358 extern void intel_register_dsm_handler(void); 3359 extern void intel_unregister_dsm_handler(void); 3360 #else 3361 static inline void intel_register_dsm_handler(void) { return; } 3362 static inline void intel_unregister_dsm_handler(void) { return; } 3363 #endif /* CONFIG_ACPI */ 3364 3365 /* modesetting */ 3366 extern void intel_modeset_init_hw(struct drm_device *dev); 3367 extern void intel_modeset_init(struct drm_device *dev); 3368 extern void intel_modeset_gem_init(struct drm_device *dev); 3369 extern void intel_modeset_cleanup(struct drm_device *dev); 3370 extern void intel_connector_unregister(struct intel_connector *); 3371 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3372 extern void intel_display_resume(struct drm_device *dev); 3373 extern void i915_redisable_vga(struct drm_device *dev); 3374 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3375 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3376 extern void intel_init_pch_refclk(struct drm_device *dev); 3377 extern void intel_set_rps(struct drm_device *dev, u8 val); 3378 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3379 bool enable); 3380 extern void intel_detect_pch(struct drm_device *dev); 3381 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 3382 extern int intel_enable_rc6(const struct drm_device *dev); 3383 3384 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3385 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3386 struct drm_file *file); 3387 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3388 struct drm_file *file); 3389 3390 struct intel_device_info *i915_get_device_id(int device); 3391 3392 /* overlay */ 3393 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3394 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3395 struct intel_overlay_error_state *error); 3396 3397 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3398 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3399 struct drm_device *dev, 3400 struct intel_display_error_state *error); 3401 3402 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3403 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3404 3405 /* intel_sideband.c */ 3406 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3407 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3408 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3409 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); 3410 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3411 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3412 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3413 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3414 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3415 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3416 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3417 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); 3418 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3419 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); 3420 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); 3421 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3422 enum intel_sbi_destination destination); 3423 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3424 enum intel_sbi_destination destination); 3425 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3426 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3427 3428 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3429 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3430 3431 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3432 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3433 3434 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3435 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3436 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3437 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3438 3439 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3440 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3441 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3442 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3443 3444 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3445 * will be implemented using 2 32-bit writes in an arbitrary order with 3446 * an arbitrary delay between them. This can cause the hardware to 3447 * act upon the intermediate value, possibly leading to corruption and 3448 * machine death. You have been warned. 3449 */ 3450 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 3451 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3452 3453 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3454 u32 upper, lower, old_upper, loop = 0; \ 3455 upper = I915_READ(upper_reg); \ 3456 do { \ 3457 old_upper = upper; \ 3458 lower = I915_READ(lower_reg); \ 3459 upper = I915_READ(upper_reg); \ 3460 } while (upper != old_upper && loop++ < 2); \ 3461 (u64)upper << 32 | lower; }) 3462 3463 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3464 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3465 3466 /* These are untraced mmio-accessors that are only valid to be used inside 3467 * criticial sections inside IRQ handlers where forcewake is explicitly 3468 * controlled. 3469 * Think twice, and think again, before using these. 3470 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3471 * intel_uncore_forcewake_irqunlock(). 3472 */ 3473 #define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) 3474 #define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) 3475 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3476 3477 /* "Broadcast RGB" property */ 3478 #define INTEL_BROADCAST_RGB_AUTO 0 3479 #define INTEL_BROADCAST_RGB_FULL 1 3480 #define INTEL_BROADCAST_RGB_LIMITED 2 3481 3482 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 3483 { 3484 if (IS_VALLEYVIEW(dev)) 3485 return VLV_VGACNTRL; 3486 else if (INTEL_INFO(dev)->gen >= 5) 3487 return CPU_VGACNTRL; 3488 else 3489 return VGACNTRL; 3490 } 3491 3492 static inline void __user *to_user_ptr(u64 address) 3493 { 3494 return (void __user *)(uintptr_t)address; 3495 } 3496 3497 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3498 { 3499 unsigned long j = msecs_to_jiffies(m); 3500 3501 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3502 } 3503 3504 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3505 { 3506 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3507 } 3508 3509 static inline unsigned long 3510 timespec_to_jiffies_timeout(const struct timespec *value) 3511 { 3512 unsigned long j = timespec_to_jiffies(value); 3513 3514 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3515 } 3516 3517 /* 3518 * If you need to wait X milliseconds between events A and B, but event B 3519 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3520 * when event A happened, then just before event B you call this function and 3521 * pass the timestamp as the first argument, and X as the second argument. 3522 */ 3523 static inline void 3524 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3525 { 3526 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3527 3528 /* 3529 * Don't re-read the value of "jiffies" every time since it may change 3530 * behind our back and break the math. 3531 */ 3532 tmp_jiffies = jiffies; 3533 target_jiffies = timestamp_jiffies + 3534 msecs_to_jiffies_timeout(to_wait_ms); 3535 3536 if (time_after(target_jiffies, tmp_jiffies)) { 3537 remaining_jiffies = target_jiffies - tmp_jiffies; 3538 #if 0 3539 while (remaining_jiffies) 3540 remaining_jiffies = 3541 schedule_timeout_uninterruptible(remaining_jiffies); 3542 #else 3543 msleep(jiffies_to_msecs(remaining_jiffies)); 3544 #endif 3545 } 3546 } 3547 3548 static inline void i915_trace_irq_get(struct intel_engine_cs *ring, 3549 struct drm_i915_gem_request *req) 3550 { 3551 if (ring->trace_irq_req == NULL && ring->irq_get(ring)) 3552 i915_gem_request_assign(&ring->trace_irq_req, req); 3553 } 3554 3555 #endif 3556