1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi_drm/i915_drm.h> 34 35 #include "i915_reg.h" 36 #include "intel_bios.h" 37 #include "intel_ringbuffer.h" 38 #include "intel_lrc.h" 39 #include "i915_gem_gtt.h" 40 #include "i915_gem_render_state.h" 41 #include <linux/io-mapping.h> 42 #include <linux/i2c.h> 43 #include <drm/intel-gtt.h> 44 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 45 #include <drm/drm_gem.h> 46 #include <linux/backlight.h> 47 #include <linux/hashtable.h> 48 #include <linux/kref.h> 49 #include <linux/kconfig.h> 50 #include <linux/pm_qos.h> 51 #include <linux/seq_file.h> 52 #include <linux/delay.h> 53 54 #define CONFIG_DRM_I915_FBDEV 1 55 #define CONFIG_DRM_I915_KMS 1 56 #define CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT 1 57 #define CONFIG_ACPI 1 58 #define CONFIG_X86 1 59 60 /* General customization: 61 */ 62 63 #define DRIVER_NAME "i915" 64 #define DRIVER_DESC "Intel Graphics" 65 #define DRIVER_DATE "20150130" 66 67 #undef WARN_ON 68 /* Many gcc seem to no see through this and fall over :( */ 69 #if 0 70 #define WARN_ON(x) ({ \ 71 bool __i915_warn_cond = (x); \ 72 if (__builtin_constant_p(__i915_warn_cond)) \ 73 BUILD_BUG_ON(__i915_warn_cond); \ 74 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 75 #else 76 #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") 77 #endif 78 79 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 80 (long) (x), __func__); 81 82 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 83 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 84 * which may not necessarily be a user visible problem. This will either 85 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 86 * enable distros and users to tailor their preferred amount of i915 abrt 87 * spam. 88 */ 89 #define I915_STATE_WARN(condition, format...) ({ \ 90 int __ret_warn_on = !!(condition); \ 91 if (unlikely(__ret_warn_on)) { \ 92 if (i915.verbose_state_checks) \ 93 WARN(1, format); \ 94 else \ 95 DRM_ERROR(format); \ 96 } \ 97 unlikely(__ret_warn_on); \ 98 }) 99 100 #define I915_STATE_WARN_ON(condition) ({ \ 101 int __ret_warn_on = !!(condition); \ 102 if (unlikely(__ret_warn_on)) { \ 103 if (i915.verbose_state_checks) \ 104 WARN(1, "WARN_ON(" #condition ")\n"); \ 105 else \ 106 DRM_ERROR("WARN_ON(" #condition ")\n"); \ 107 } \ 108 unlikely(__ret_warn_on); \ 109 }) 110 111 enum i915_pipe { 112 INVALID_PIPE = -1, 113 PIPE_A = 0, 114 PIPE_B, 115 PIPE_C, 116 _PIPE_EDP, 117 I915_MAX_PIPES = _PIPE_EDP 118 }; 119 #define pipe_name(p) ((p) + 'A') 120 121 enum transcoder { 122 TRANSCODER_A = 0, 123 TRANSCODER_B, 124 TRANSCODER_C, 125 TRANSCODER_EDP, 126 I915_MAX_TRANSCODERS 127 }; 128 #define transcoder_name(t) ((t) + 'A') 129 130 /* 131 * This is the maximum (across all platforms) number of planes (primary + 132 * sprites) that can be active at the same time on one pipe. 133 * 134 * This value doesn't count the cursor plane. 135 */ 136 #define I915_MAX_PLANES 3 137 138 enum plane { 139 PLANE_A = 0, 140 PLANE_B, 141 PLANE_C, 142 }; 143 #define plane_name(p) ((p) + 'A') 144 145 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 146 147 enum port { 148 PORT_A = 0, 149 PORT_B, 150 PORT_C, 151 PORT_D, 152 PORT_E, 153 I915_MAX_PORTS 154 }; 155 #define port_name(p) ((p) + 'A') 156 157 #define I915_NUM_PHYS_VLV 2 158 159 enum dpio_channel { 160 DPIO_CH0, 161 DPIO_CH1 162 }; 163 164 enum dpio_phy { 165 DPIO_PHY0, 166 DPIO_PHY1 167 }; 168 169 enum intel_display_power_domain { 170 POWER_DOMAIN_PIPE_A, 171 POWER_DOMAIN_PIPE_B, 172 POWER_DOMAIN_PIPE_C, 173 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 174 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 175 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 176 POWER_DOMAIN_TRANSCODER_A, 177 POWER_DOMAIN_TRANSCODER_B, 178 POWER_DOMAIN_TRANSCODER_C, 179 POWER_DOMAIN_TRANSCODER_EDP, 180 POWER_DOMAIN_PORT_DDI_A_2_LANES, 181 POWER_DOMAIN_PORT_DDI_A_4_LANES, 182 POWER_DOMAIN_PORT_DDI_B_2_LANES, 183 POWER_DOMAIN_PORT_DDI_B_4_LANES, 184 POWER_DOMAIN_PORT_DDI_C_2_LANES, 185 POWER_DOMAIN_PORT_DDI_C_4_LANES, 186 POWER_DOMAIN_PORT_DDI_D_2_LANES, 187 POWER_DOMAIN_PORT_DDI_D_4_LANES, 188 POWER_DOMAIN_PORT_DSI, 189 POWER_DOMAIN_PORT_CRT, 190 POWER_DOMAIN_PORT_OTHER, 191 POWER_DOMAIN_VGA, 192 POWER_DOMAIN_AUDIO, 193 POWER_DOMAIN_PLLS, 194 POWER_DOMAIN_AUX_A, 195 POWER_DOMAIN_AUX_B, 196 POWER_DOMAIN_AUX_C, 197 POWER_DOMAIN_AUX_D, 198 POWER_DOMAIN_INIT, 199 200 POWER_DOMAIN_NUM, 201 }; 202 203 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 204 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 205 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 206 #define POWER_DOMAIN_TRANSCODER(tran) \ 207 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 208 (tran) + POWER_DOMAIN_TRANSCODER_A) 209 210 enum hpd_pin { 211 HPD_NONE = 0, 212 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ 213 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 214 HPD_CRT, 215 HPD_SDVO_B, 216 HPD_SDVO_C, 217 HPD_PORT_B, 218 HPD_PORT_C, 219 HPD_PORT_D, 220 HPD_NUM_PINS 221 }; 222 223 #define I915_GEM_GPU_DOMAINS \ 224 (I915_GEM_DOMAIN_RENDER | \ 225 I915_GEM_DOMAIN_SAMPLER | \ 226 I915_GEM_DOMAIN_COMMAND | \ 227 I915_GEM_DOMAIN_INSTRUCTION | \ 228 I915_GEM_DOMAIN_VERTEX) 229 230 #define for_each_pipe(__dev_priv, __p) \ 231 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 232 #define for_each_plane(pipe, p) \ 233 for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++) 234 #define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) 235 236 #define for_each_crtc(dev, crtc) \ 237 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 238 239 #define for_each_intel_crtc(dev, intel_crtc) \ 240 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 241 242 #define for_each_intel_encoder(dev, intel_encoder) \ 243 list_for_each_entry(intel_encoder, \ 244 &(dev)->mode_config.encoder_list, \ 245 base.head) 246 247 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 248 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 249 if ((intel_encoder)->base.crtc == (__crtc)) 250 251 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 252 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 253 if ((intel_connector)->base.encoder == (__encoder)) 254 255 #define for_each_power_domain(domain, mask) \ 256 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 257 if ((1 << (domain)) & (mask)) 258 259 struct drm_i915_private; 260 struct i915_mmu_object; 261 262 enum intel_dpll_id { 263 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 264 /* real shared dpll ids must be >= 0 */ 265 DPLL_ID_PCH_PLL_A = 0, 266 DPLL_ID_PCH_PLL_B = 1, 267 /* hsw/bdw */ 268 DPLL_ID_WRPLL1 = 0, 269 DPLL_ID_WRPLL2 = 1, 270 /* skl */ 271 DPLL_ID_SKL_DPLL1 = 0, 272 DPLL_ID_SKL_DPLL2 = 1, 273 DPLL_ID_SKL_DPLL3 = 2, 274 }; 275 #define I915_NUM_PLLS 3 276 277 struct intel_dpll_hw_state { 278 /* i9xx, pch plls */ 279 uint32_t dpll; 280 uint32_t dpll_md; 281 uint32_t fp0; 282 uint32_t fp1; 283 284 /* hsw, bdw */ 285 uint32_t wrpll; 286 287 /* skl */ 288 /* 289 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in 290 * lower part of crtl1 and they get shifted into position when writing 291 * the register. This allows us to easily compare the state to share 292 * the DPLL. 293 */ 294 uint32_t ctrl1; 295 /* HDMI only, 0 when used for DP */ 296 uint32_t cfgcr1, cfgcr2; 297 }; 298 299 struct intel_shared_dpll_config { 300 unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ 301 struct intel_dpll_hw_state hw_state; 302 }; 303 304 struct intel_shared_dpll { 305 struct intel_shared_dpll_config config; 306 struct intel_shared_dpll_config *new_config; 307 308 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 309 bool on; /* is the PLL actually active? Disabled during modeset */ 310 const char *name; 311 /* should match the index in the dev_priv->shared_dplls array */ 312 enum intel_dpll_id id; 313 /* The mode_set hook is optional and should be used together with the 314 * intel_prepare_shared_dpll function. */ 315 void (*mode_set)(struct drm_i915_private *dev_priv, 316 struct intel_shared_dpll *pll); 317 void (*enable)(struct drm_i915_private *dev_priv, 318 struct intel_shared_dpll *pll); 319 void (*disable)(struct drm_i915_private *dev_priv, 320 struct intel_shared_dpll *pll); 321 bool (*get_hw_state)(struct drm_i915_private *dev_priv, 322 struct intel_shared_dpll *pll, 323 struct intel_dpll_hw_state *hw_state); 324 }; 325 326 #define SKL_DPLL0 0 327 #define SKL_DPLL1 1 328 #define SKL_DPLL2 2 329 #define SKL_DPLL3 3 330 331 /* Used by dp and fdi links */ 332 struct intel_link_m_n { 333 uint32_t tu; 334 uint32_t gmch_m; 335 uint32_t gmch_n; 336 uint32_t link_m; 337 uint32_t link_n; 338 }; 339 340 void intel_link_compute_m_n(int bpp, int nlanes, 341 int pixel_clock, int link_clock, 342 struct intel_link_m_n *m_n); 343 344 /* Interface history: 345 * 346 * 1.1: Original. 347 * 1.2: Add Power Management 348 * 1.3: Add vblank support 349 * 1.4: Fix cmdbuffer path, add heap destroy 350 * 1.5: Add vblank pipe configuration 351 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 352 * - Support vertical blank on secondary display pipe 353 */ 354 #define DRIVER_MAJOR 1 355 #define DRIVER_MINOR 6 356 #define DRIVER_PATCHLEVEL 0 357 358 #define WATCH_LISTS 0 359 360 struct opregion_header; 361 struct opregion_acpi; 362 struct opregion_swsci; 363 struct opregion_asle; 364 365 struct intel_opregion { 366 struct opregion_header __iomem *header; 367 struct opregion_acpi __iomem *acpi; 368 struct opregion_swsci __iomem *swsci; 369 u32 swsci_gbda_sub_functions; 370 u32 swsci_sbcb_sub_functions; 371 struct opregion_asle __iomem *asle; 372 void __iomem *vbt; 373 u32 __iomem *lid_state; 374 struct work_struct asle_work; 375 }; 376 #define OPREGION_SIZE (8*1024) 377 378 struct intel_overlay; 379 struct intel_overlay_error_state; 380 381 struct drm_i915_master_private { 382 struct drm_local_map *sarea; 383 struct _drm_i915_sarea *sarea_priv; 384 }; 385 #define I915_FENCE_REG_NONE -1 386 #define I915_MAX_NUM_FENCES 32 387 /* 32 fences + sign bit for FENCE_REG_NONE */ 388 #define I915_MAX_NUM_FENCE_BITS 6 389 390 struct drm_i915_fence_reg { 391 struct list_head lru_list; 392 struct drm_i915_gem_object *obj; 393 int pin_count; 394 }; 395 396 struct sdvo_device_mapping { 397 u8 initialized; 398 u8 dvo_port; 399 u8 slave_addr; 400 u8 dvo_wiring; 401 u8 i2c_pin; 402 u8 ddc_pin; 403 }; 404 405 struct intel_display_error_state; 406 407 struct drm_i915_error_state { 408 struct kref ref; 409 struct timeval time; 410 411 char error_msg[128]; 412 u32 reset_count; 413 u32 suspend_count; 414 415 /* Generic register state */ 416 u32 eir; 417 u32 pgtbl_er; 418 u32 ier; 419 u32 gtier[4]; 420 u32 ccid; 421 u32 derrmr; 422 u32 forcewake; 423 u32 error; /* gen6+ */ 424 u32 err_int; /* gen7 */ 425 u32 done_reg; 426 u32 gac_eco; 427 u32 gam_ecochk; 428 u32 gab_ctl; 429 u32 gfx_mode; 430 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 431 u64 fence[I915_MAX_NUM_FENCES]; 432 struct intel_overlay_error_state *overlay; 433 struct intel_display_error_state *display; 434 struct drm_i915_error_object *semaphore_obj; 435 436 struct drm_i915_error_ring { 437 bool valid; 438 /* Software tracked state */ 439 bool waiting; 440 int hangcheck_score; 441 enum intel_ring_hangcheck_action hangcheck_action; 442 int num_requests; 443 444 /* our own tracking of ring head and tail */ 445 u32 cpu_ring_head; 446 u32 cpu_ring_tail; 447 448 u32 semaphore_seqno[I915_NUM_RINGS - 1]; 449 450 /* Register state */ 451 u32 tail; 452 u32 head; 453 u32 ctl; 454 u32 hws; 455 u32 ipeir; 456 u32 ipehr; 457 u32 instdone; 458 u32 bbstate; 459 u32 instpm; 460 u32 instps; 461 u32 seqno; 462 u64 bbaddr; 463 u64 acthd; 464 u32 fault_reg; 465 u64 faddr; 466 u32 rc_psmi; /* sleep state */ 467 u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 468 469 struct drm_i915_error_object { 470 int page_count; 471 u32 gtt_offset; 472 u32 *pages[0]; 473 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 474 475 struct drm_i915_error_request { 476 long jiffies; 477 u32 seqno; 478 u32 tail; 479 } *requests; 480 481 struct { 482 u32 gfx_mode; 483 union { 484 u64 pdp[4]; 485 u32 pp_dir_base; 486 }; 487 } vm_info; 488 489 pid_t pid; 490 char comm[TASK_COMM_LEN]; 491 } ring[I915_NUM_RINGS]; 492 493 struct drm_i915_error_buffer { 494 u32 size; 495 u32 name; 496 u32 rseqno, wseqno; 497 u32 gtt_offset; 498 u32 read_domains; 499 u32 write_domain; 500 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 501 s32 pinned:2; 502 u32 tiling:2; 503 u32 dirty:1; 504 u32 purgeable:1; 505 u32 userptr:1; 506 s32 ring:4; 507 u32 cache_level:3; 508 } **active_bo, **pinned_bo; 509 510 u32 *active_bo_count, *pinned_bo_count; 511 u32 vm_count; 512 }; 513 514 struct intel_connector; 515 struct intel_encoder; 516 struct intel_crtc_state; 517 struct intel_initial_plane_config; 518 struct intel_crtc; 519 struct intel_limit; 520 struct dpll; 521 522 struct drm_i915_display_funcs { 523 bool (*fbc_enabled)(struct drm_device *dev); 524 void (*enable_fbc)(struct drm_crtc *crtc); 525 void (*disable_fbc)(struct drm_device *dev); 526 int (*get_display_clock_speed)(struct drm_device *dev); 527 int (*get_fifo_size)(struct drm_device *dev, int plane); 528 /** 529 * find_dpll() - Find the best values for the PLL 530 * @limit: limits for the PLL 531 * @crtc: current CRTC 532 * @target: target frequency in kHz 533 * @refclk: reference clock frequency in kHz 534 * @match_clock: if provided, @best_clock P divider must 535 * match the P divider from @match_clock 536 * used for LVDS downclocking 537 * @best_clock: best PLL values found 538 * 539 * Returns true on success, false on failure. 540 */ 541 bool (*find_dpll)(const struct intel_limit *limit, 542 struct intel_crtc *crtc, 543 int target, int refclk, 544 struct dpll *match_clock, 545 struct dpll *best_clock); 546 void (*update_wm)(struct drm_crtc *crtc); 547 void (*update_sprite_wm)(struct drm_plane *plane, 548 struct drm_crtc *crtc, 549 uint32_t sprite_width, uint32_t sprite_height, 550 int pixel_size, bool enable, bool scaled); 551 void (*modeset_global_resources)(struct drm_device *dev); 552 /* Returns the active state of the crtc, and if the crtc is active, 553 * fills out the pipe-config with the hw state. */ 554 bool (*get_pipe_config)(struct intel_crtc *, 555 struct intel_crtc_state *); 556 void (*get_initial_plane_config)(struct intel_crtc *, 557 struct intel_initial_plane_config *); 558 int (*crtc_compute_clock)(struct intel_crtc *crtc, 559 struct intel_crtc_state *crtc_state); 560 void (*crtc_enable)(struct drm_crtc *crtc); 561 void (*crtc_disable)(struct drm_crtc *crtc); 562 void (*off)(struct drm_crtc *crtc); 563 void (*audio_codec_enable)(struct drm_connector *connector, 564 struct intel_encoder *encoder, 565 struct drm_display_mode *mode); 566 void (*audio_codec_disable)(struct intel_encoder *encoder); 567 void (*fdi_link_train)(struct drm_crtc *crtc); 568 void (*init_clock_gating)(struct drm_device *dev); 569 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 570 struct drm_framebuffer *fb, 571 struct drm_i915_gem_object *obj, 572 struct intel_engine_cs *ring, 573 uint32_t flags); 574 void (*update_primary_plane)(struct drm_crtc *crtc, 575 struct drm_framebuffer *fb, 576 int x, int y); 577 void (*hpd_irq_setup)(struct drm_device *dev); 578 /* clock updates for mode set */ 579 /* cursor updates */ 580 /* render clock increase/decrease */ 581 /* display clock increase/decrease */ 582 /* pll clock increase/decrease */ 583 584 int (*setup_backlight)(struct intel_connector *connector, enum i915_pipe pipe); 585 uint32_t (*get_backlight)(struct intel_connector *connector); 586 void (*set_backlight)(struct intel_connector *connector, 587 uint32_t level); 588 void (*disable_backlight)(struct intel_connector *connector); 589 void (*enable_backlight)(struct intel_connector *connector); 590 }; 591 592 enum forcewake_domain_id { 593 FW_DOMAIN_ID_RENDER = 0, 594 FW_DOMAIN_ID_BLITTER, 595 FW_DOMAIN_ID_MEDIA, 596 597 FW_DOMAIN_ID_COUNT 598 }; 599 600 enum forcewake_domains { 601 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 602 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 603 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 604 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 605 FORCEWAKE_BLITTER | 606 FORCEWAKE_MEDIA) 607 }; 608 609 struct intel_uncore_funcs { 610 void (*force_wake_get)(struct drm_i915_private *dev_priv, 611 enum forcewake_domains domains); 612 void (*force_wake_put)(struct drm_i915_private *dev_priv, 613 enum forcewake_domains domains); 614 615 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 616 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 617 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 618 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 619 620 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, 621 uint8_t val, bool trace); 622 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, 623 uint16_t val, bool trace); 624 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, 625 uint32_t val, bool trace); 626 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, 627 uint64_t val, bool trace); 628 }; 629 630 struct intel_uncore { 631 struct lock lock; /** lock is also taken in irq contexts. */ 632 633 struct intel_uncore_funcs funcs; 634 635 unsigned fifo_count; 636 enum forcewake_domains fw_domains; 637 638 struct intel_uncore_forcewake_domain { 639 struct drm_i915_private *i915; 640 enum forcewake_domain_id id; 641 unsigned wake_count; 642 struct timer_list timer; 643 u32 reg_set; 644 u32 val_set; 645 u32 val_clear; 646 u32 reg_ack; 647 u32 reg_post; 648 u32 val_reset; 649 } fw_domain[FW_DOMAIN_ID_COUNT]; 650 }; 651 652 /* Iterate over initialised fw domains */ 653 #define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ 654 for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 655 (i__) < FW_DOMAIN_ID_COUNT; \ 656 (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ 657 if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) 658 659 #define for_each_fw_domain(domain__, dev_priv__, i__) \ 660 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) 661 662 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 663 func(is_mobile) sep \ 664 func(is_i85x) sep \ 665 func(is_i915g) sep \ 666 func(is_i945gm) sep \ 667 func(is_g33) sep \ 668 func(need_gfx_hws) sep \ 669 func(is_g4x) sep \ 670 func(is_pineview) sep \ 671 func(is_broadwater) sep \ 672 func(is_crestline) sep \ 673 func(is_ivybridge) sep \ 674 func(is_valleyview) sep \ 675 func(is_haswell) sep \ 676 func(is_skylake) sep \ 677 func(is_preliminary) sep \ 678 func(has_fbc) sep \ 679 func(has_pipe_cxsr) sep \ 680 func(has_hotplug) sep \ 681 func(cursor_needs_physical) sep \ 682 func(has_overlay) sep \ 683 func(overlay_needs_physical) sep \ 684 func(supports_tv) sep \ 685 func(has_llc) sep \ 686 func(has_ddi) sep \ 687 func(has_fpga_dbg) 688 689 #define DEFINE_FLAG(name) u8 name:1 690 #define SEP_SEMICOLON ; 691 692 struct intel_device_info { 693 u32 display_mmio_offset; 694 u16 device_id; 695 u8 num_pipes:3; 696 u8 num_sprites[I915_MAX_PIPES]; 697 u8 gen; 698 u8 ring_mask; /* Rings supported by the HW */ 699 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 700 /* Register offsets for the various display pipes and transcoders */ 701 int pipe_offsets[I915_MAX_TRANSCODERS]; 702 int trans_offsets[I915_MAX_TRANSCODERS]; 703 int palette_offsets[I915_MAX_PIPES]; 704 int cursor_offsets[I915_MAX_PIPES]; 705 unsigned int eu_total; 706 }; 707 708 #undef DEFINE_FLAG 709 #undef SEP_SEMICOLON 710 711 enum i915_cache_level { 712 I915_CACHE_NONE = 0, 713 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 714 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 715 caches, eg sampler/render caches, and the 716 large Last-Level-Cache. LLC is coherent with 717 the CPU, but L3 is only visible to the GPU. */ 718 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 719 }; 720 721 struct i915_ctx_hang_stats { 722 /* This context had batch pending when hang was declared */ 723 unsigned batch_pending; 724 725 /* This context had batch active when hang was declared */ 726 unsigned batch_active; 727 728 /* Time when this context was last blamed for a GPU reset */ 729 unsigned long guilty_ts; 730 731 /* If the contexts causes a second GPU hang within this time, 732 * it is permanently banned from submitting any more work. 733 */ 734 unsigned long ban_period_seconds; 735 736 /* This context is banned to submit more work */ 737 bool banned; 738 }; 739 740 /* This must match up with the value previously used for execbuf2.rsvd1. */ 741 #define DEFAULT_CONTEXT_HANDLE 0 742 /** 743 * struct intel_context - as the name implies, represents a context. 744 * @ref: reference count. 745 * @user_handle: userspace tracking identity for this context. 746 * @remap_slice: l3 row remapping information. 747 * @file_priv: filp associated with this context (NULL for global default 748 * context). 749 * @hang_stats: information about the role of this context in possible GPU 750 * hangs. 751 * @vm: virtual memory space used by this context. 752 * @legacy_hw_ctx: render context backing object and whether it is correctly 753 * initialized (legacy ring submission mechanism only). 754 * @link: link in the global list of contexts. 755 * 756 * Contexts are memory images used by the hardware to store copies of their 757 * internal state. 758 */ 759 struct intel_context { 760 struct kref ref; 761 int user_handle; 762 uint8_t remap_slice; 763 struct drm_i915_file_private *file_priv; 764 struct i915_ctx_hang_stats hang_stats; 765 struct i915_hw_ppgtt *ppgtt; 766 767 /* Legacy ring buffer submission */ 768 struct { 769 struct drm_i915_gem_object *rcs_state; 770 bool initialized; 771 } legacy_hw_ctx; 772 773 /* Execlists */ 774 bool rcs_initialized; 775 struct { 776 struct drm_i915_gem_object *state; 777 struct intel_ringbuffer *ringbuf; 778 int pin_count; 779 } engine[I915_NUM_RINGS]; 780 781 struct list_head link; 782 }; 783 784 struct i915_fbc { 785 unsigned long size; 786 unsigned threshold; 787 unsigned int fb_id; 788 enum plane plane; 789 int y; 790 791 struct drm_mm_node compressed_fb; 792 struct drm_mm_node *compressed_llb; 793 794 bool false_color; 795 796 /* Tracks whether the HW is actually enabled, not whether the feature is 797 * possible. */ 798 bool enabled; 799 800 /* On gen8 some rings cannont perform fbc clean operation so for now 801 * we are doing this on SW with mmio. 802 * This variable works in the opposite information direction 803 * of ring->fbc_dirty telling software on frontbuffer tracking 804 * to perform the cache clean on sw side. 805 */ 806 bool need_sw_cache_clean; 807 808 struct intel_fbc_work { 809 struct delayed_work work; 810 struct drm_crtc *crtc; 811 struct drm_framebuffer *fb; 812 } *fbc_work; 813 814 enum no_fbc_reason { 815 FBC_OK, /* FBC is enabled */ 816 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ 817 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 818 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ 819 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 820 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 821 FBC_BAD_PLANE, /* fbc not supported on plane */ 822 FBC_NOT_TILED, /* buffer not tiled */ 823 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 824 FBC_MODULE_PARAM, 825 FBC_CHIP_DEFAULT, /* disabled by default on this chip */ 826 } no_fbc_reason; 827 }; 828 829 /** 830 * HIGH_RR is the highest eDP panel refresh rate read from EDID 831 * LOW_RR is the lowest eDP panel refresh rate found from EDID 832 * parsing for same resolution. 833 */ 834 enum drrs_refresh_rate_type { 835 DRRS_HIGH_RR, 836 DRRS_LOW_RR, 837 DRRS_MAX_RR, /* RR count */ 838 }; 839 840 enum drrs_support_type { 841 DRRS_NOT_SUPPORTED = 0, 842 STATIC_DRRS_SUPPORT = 1, 843 SEAMLESS_DRRS_SUPPORT = 2 844 }; 845 846 struct intel_dp; 847 struct i915_drrs { 848 struct lock mutex; 849 struct delayed_work work; 850 struct intel_dp *dp; 851 unsigned busy_frontbuffer_bits; 852 enum drrs_refresh_rate_type refresh_rate_type; 853 enum drrs_support_type type; 854 }; 855 856 struct i915_psr { 857 struct lock lock; 858 bool sink_support; 859 bool source_ok; 860 struct intel_dp *enabled; 861 bool active; 862 struct delayed_work work; 863 unsigned busy_frontbuffer_bits; 864 bool link_standby; 865 }; 866 867 enum intel_pch { 868 PCH_NONE = 0, /* No PCH present */ 869 PCH_IBX, /* Ibexpeak PCH */ 870 PCH_CPT, /* Cougarpoint PCH */ 871 PCH_LPT, /* Lynxpoint PCH */ 872 PCH_SPT, /* Sunrisepoint PCH */ 873 PCH_NOP, 874 }; 875 876 enum intel_sbi_destination { 877 SBI_ICLK, 878 SBI_MPHY, 879 }; 880 881 #define QUIRK_PIPEA_FORCE (1<<0) 882 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 883 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 884 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 885 #define QUIRK_PIPEB_FORCE (1<<4) 886 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 887 888 struct intel_fbdev; 889 struct intel_fbc_work; 890 891 struct intel_gmbus { 892 u32 force_bit; 893 u32 reg0; 894 u32 gpio_reg; 895 struct drm_i915_private *dev_priv; 896 }; 897 898 struct intel_iic_softc { 899 struct drm_device *drm_dev; 900 device_t iic_dev; 901 bool force_bit_dev; 902 char name[32]; 903 uint32_t reg; 904 uint32_t reg0; 905 }; 906 907 struct i915_suspend_saved_registers { 908 u8 saveLBB; 909 u32 saveDSPACNTR; 910 u32 saveDSPBCNTR; 911 u32 saveDSPARB; 912 u32 savePIPEACONF; 913 u32 savePIPEBCONF; 914 u32 savePIPEASRC; 915 u32 savePIPEBSRC; 916 u32 saveFPA0; 917 u32 saveFPA1; 918 u32 saveDPLL_A; 919 u32 saveDPLL_A_MD; 920 u32 saveHTOTAL_A; 921 u32 saveHBLANK_A; 922 u32 saveHSYNC_A; 923 u32 saveVTOTAL_A; 924 u32 saveVBLANK_A; 925 u32 saveVSYNC_A; 926 u32 saveBCLRPAT_A; 927 u32 saveTRANSACONF; 928 u32 saveTRANS_HTOTAL_A; 929 u32 saveTRANS_HBLANK_A; 930 u32 saveTRANS_HSYNC_A; 931 u32 saveTRANS_VTOTAL_A; 932 u32 saveTRANS_VBLANK_A; 933 u32 saveTRANS_VSYNC_A; 934 u32 savePIPEASTAT; 935 u32 saveDSPASTRIDE; 936 u32 saveDSPASIZE; 937 u32 saveDSPAPOS; 938 u32 saveDSPAADDR; 939 u32 saveDSPASURF; 940 u32 saveDSPATILEOFF; 941 u32 savePFIT_PGM_RATIOS; 942 u32 saveBLC_HIST_CTL; 943 u32 saveBLC_PWM_CTL; 944 u32 saveBLC_PWM_CTL2; 945 u32 saveBLC_CPU_PWM_CTL; 946 u32 saveBLC_CPU_PWM_CTL2; 947 u32 saveFPB0; 948 u32 saveFPB1; 949 u32 saveDPLL_B; 950 u32 saveDPLL_B_MD; 951 u32 saveHTOTAL_B; 952 u32 saveHBLANK_B; 953 u32 saveHSYNC_B; 954 u32 saveVTOTAL_B; 955 u32 saveVBLANK_B; 956 u32 saveVSYNC_B; 957 u32 saveBCLRPAT_B; 958 u32 saveTRANSBCONF; 959 u32 saveTRANS_HTOTAL_B; 960 u32 saveTRANS_HBLANK_B; 961 u32 saveTRANS_HSYNC_B; 962 u32 saveTRANS_VTOTAL_B; 963 u32 saveTRANS_VBLANK_B; 964 u32 saveTRANS_VSYNC_B; 965 u32 savePIPEBSTAT; 966 u32 saveDSPBSTRIDE; 967 u32 saveDSPBSIZE; 968 u32 saveDSPBPOS; 969 u32 saveDSPBADDR; 970 u32 saveDSPBSURF; 971 u32 saveDSPBTILEOFF; 972 u32 saveVGA0; 973 u32 saveVGA1; 974 u32 saveVGA_PD; 975 u32 saveVGACNTRL; 976 u32 saveADPA; 977 u32 saveLVDS; 978 u32 savePP_ON_DELAYS; 979 u32 savePP_OFF_DELAYS; 980 u32 saveDVOA; 981 u32 saveDVOB; 982 u32 saveDVOC; 983 u32 savePP_ON; 984 u32 savePP_OFF; 985 u32 savePP_CONTROL; 986 u32 savePP_DIVISOR; 987 u32 savePFIT_CONTROL; 988 u32 save_palette_a[256]; 989 u32 save_palette_b[256]; 990 u32 saveFBC_CONTROL; 991 u32 saveIER; 992 u32 saveIIR; 993 u32 saveIMR; 994 u32 saveDEIER; 995 u32 saveDEIMR; 996 u32 saveGTIER; 997 u32 saveGTIMR; 998 u32 saveFDI_RXA_IMR; 999 u32 saveFDI_RXB_IMR; 1000 u32 saveCACHE_MODE_0; 1001 u32 saveMI_ARB_STATE; 1002 u32 saveSWF0[16]; 1003 u32 saveSWF1[16]; 1004 u32 saveSWF2[3]; 1005 u8 saveMSR; 1006 u8 saveSR[8]; 1007 u8 saveGR[25]; 1008 u8 saveAR_INDEX; 1009 u8 saveAR[21]; 1010 u8 saveDACMASK; 1011 u8 saveCR[37]; 1012 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1013 u32 saveCURACNTR; 1014 u32 saveCURAPOS; 1015 u32 saveCURABASE; 1016 u32 saveCURBCNTR; 1017 u32 saveCURBPOS; 1018 u32 saveCURBBASE; 1019 u32 saveCURSIZE; 1020 u32 saveDP_B; 1021 u32 saveDP_C; 1022 u32 saveDP_D; 1023 u32 savePIPEA_GMCH_DATA_M; 1024 u32 savePIPEB_GMCH_DATA_M; 1025 u32 savePIPEA_GMCH_DATA_N; 1026 u32 savePIPEB_GMCH_DATA_N; 1027 u32 savePIPEA_DP_LINK_M; 1028 u32 savePIPEB_DP_LINK_M; 1029 u32 savePIPEA_DP_LINK_N; 1030 u32 savePIPEB_DP_LINK_N; 1031 u32 saveFDI_RXA_CTL; 1032 u32 saveFDI_TXA_CTL; 1033 u32 saveFDI_RXB_CTL; 1034 u32 saveFDI_TXB_CTL; 1035 u32 savePFA_CTL_1; 1036 u32 savePFB_CTL_1; 1037 u32 savePFA_WIN_SZ; 1038 u32 savePFB_WIN_SZ; 1039 u32 savePFA_WIN_POS; 1040 u32 savePFB_WIN_POS; 1041 u32 savePCH_DREF_CONTROL; 1042 u32 saveDISP_ARB_CTL; 1043 u32 savePIPEA_DATA_M1; 1044 u32 savePIPEA_DATA_N1; 1045 u32 savePIPEA_LINK_M1; 1046 u32 savePIPEA_LINK_N1; 1047 u32 savePIPEB_DATA_M1; 1048 u32 savePIPEB_DATA_N1; 1049 u32 savePIPEB_LINK_M1; 1050 u32 savePIPEB_LINK_N1; 1051 u32 saveMCHBAR_RENDER_STANDBY; 1052 u32 savePCH_PORT_HOTPLUG; 1053 u16 saveGCDGMBUS; 1054 }; 1055 1056 struct vlv_s0ix_state { 1057 /* GAM */ 1058 u32 wr_watermark; 1059 u32 gfx_prio_ctrl; 1060 u32 arb_mode; 1061 u32 gfx_pend_tlb0; 1062 u32 gfx_pend_tlb1; 1063 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1064 u32 media_max_req_count; 1065 u32 gfx_max_req_count; 1066 u32 render_hwsp; 1067 u32 ecochk; 1068 u32 bsd_hwsp; 1069 u32 blt_hwsp; 1070 u32 tlb_rd_addr; 1071 1072 /* MBC */ 1073 u32 g3dctl; 1074 u32 gsckgctl; 1075 u32 mbctl; 1076 1077 /* GCP */ 1078 u32 ucgctl1; 1079 u32 ucgctl3; 1080 u32 rcgctl1; 1081 u32 rcgctl2; 1082 u32 rstctl; 1083 u32 misccpctl; 1084 1085 /* GPM */ 1086 u32 gfxpause; 1087 u32 rpdeuhwtc; 1088 u32 rpdeuc; 1089 u32 ecobus; 1090 u32 pwrdwnupctl; 1091 u32 rp_down_timeout; 1092 u32 rp_deucsw; 1093 u32 rcubmabdtmr; 1094 u32 rcedata; 1095 u32 spare2gh; 1096 1097 /* Display 1 CZ domain */ 1098 u32 gt_imr; 1099 u32 gt_ier; 1100 u32 pm_imr; 1101 u32 pm_ier; 1102 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1103 1104 /* GT SA CZ domain */ 1105 u32 tilectl; 1106 u32 gt_fifoctl; 1107 u32 gtlc_wake_ctrl; 1108 u32 gtlc_survive; 1109 u32 pmwgicz; 1110 1111 /* Display 2 CZ domain */ 1112 u32 gu_ctl0; 1113 u32 gu_ctl1; 1114 u32 pcbr; 1115 u32 clock_gate_dis2; 1116 }; 1117 1118 struct intel_rps_ei { 1119 u32 cz_clock; 1120 u32 render_c0; 1121 u32 media_c0; 1122 }; 1123 1124 struct intel_gen6_power_mgmt { 1125 /* 1126 * work, interrupts_enabled and pm_iir are protected by 1127 * dev_priv->irq_lock 1128 */ 1129 struct work_struct work; 1130 bool interrupts_enabled; 1131 u32 pm_iir; 1132 1133 /* Frequencies are stored in potentially platform dependent multiples. 1134 * In other words, *_freq needs to be multiplied by X to be interesting. 1135 * Soft limits are those which are used for the dynamic reclocking done 1136 * by the driver (raise frequencies under heavy loads, and lower for 1137 * lighter loads). Hard limits are those imposed by the hardware. 1138 * 1139 * A distinction is made for overclocking, which is never enabled by 1140 * default, and is considered to be above the hard limit if it's 1141 * possible at all. 1142 */ 1143 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1144 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1145 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1146 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1147 u8 min_freq; /* AKA RPn. Minimum frequency */ 1148 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1149 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1150 u8 rp0_freq; /* Non-overclocked max frequency. */ 1151 u32 cz_freq; 1152 1153 u32 ei_interrupt_count; 1154 1155 int last_adj; 1156 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1157 1158 bool enabled; 1159 struct delayed_work delayed_resume_work; 1160 1161 /* manual wa residency calculations */ 1162 struct intel_rps_ei up_ei, down_ei; 1163 1164 /* 1165 * Protects RPS/RC6 register access and PCU communication. 1166 * Must be taken after struct_mutex if nested. 1167 */ 1168 struct lock hw_lock; 1169 }; 1170 1171 /* defined intel_pm.c */ 1172 extern struct lock mchdev_lock; 1173 1174 struct intel_ilk_power_mgmt { 1175 u8 cur_delay; 1176 u8 min_delay; 1177 u8 max_delay; 1178 u8 fmax; 1179 u8 fstart; 1180 1181 u64 last_count1; 1182 unsigned long last_time1; 1183 unsigned long chipset_power; 1184 u64 last_count2; 1185 u64 last_time2; 1186 unsigned long gfx_power; 1187 u8 corr; 1188 1189 int c_m; 1190 int r_t; 1191 1192 struct drm_i915_gem_object *pwrctx; 1193 struct drm_i915_gem_object *renderctx; 1194 }; 1195 1196 struct drm_i915_private; 1197 struct i915_power_well; 1198 1199 struct i915_power_well_ops { 1200 /* 1201 * Synchronize the well's hw state to match the current sw state, for 1202 * example enable/disable it based on the current refcount. Called 1203 * during driver init and resume time, possibly after first calling 1204 * the enable/disable handlers. 1205 */ 1206 void (*sync_hw)(struct drm_i915_private *dev_priv, 1207 struct i915_power_well *power_well); 1208 /* 1209 * Enable the well and resources that depend on it (for example 1210 * interrupts located on the well). Called after the 0->1 refcount 1211 * transition. 1212 */ 1213 void (*enable)(struct drm_i915_private *dev_priv, 1214 struct i915_power_well *power_well); 1215 /* 1216 * Disable the well and resources that depend on it. Called after 1217 * the 1->0 refcount transition. 1218 */ 1219 void (*disable)(struct drm_i915_private *dev_priv, 1220 struct i915_power_well *power_well); 1221 /* Returns the hw enabled state. */ 1222 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1223 struct i915_power_well *power_well); 1224 }; 1225 1226 /* Power well structure for haswell */ 1227 struct i915_power_well { 1228 const char *name; 1229 bool always_on; 1230 /* power well enable/disable usage count */ 1231 int count; 1232 /* cached hw enabled state */ 1233 bool hw_enabled; 1234 unsigned long domains; 1235 unsigned long data; 1236 const struct i915_power_well_ops *ops; 1237 }; 1238 1239 struct i915_power_domains { 1240 /* 1241 * Power wells needed for initialization at driver init and suspend 1242 * time are on. They are kept on until after the first modeset. 1243 */ 1244 bool init_power_on; 1245 bool initializing; 1246 int power_well_count; 1247 1248 struct lock lock; 1249 int domain_use_count[POWER_DOMAIN_NUM]; 1250 struct i915_power_well *power_wells; 1251 }; 1252 1253 #define MAX_L3_SLICES 2 1254 struct intel_l3_parity { 1255 u32 *remap_info[MAX_L3_SLICES]; 1256 struct work_struct error_work; 1257 int which_slice; 1258 }; 1259 1260 struct i915_gem_batch_pool { 1261 struct drm_device *dev; 1262 struct list_head cache_list; 1263 }; 1264 1265 struct i915_gem_mm { 1266 /** Memory allocator for GTT stolen memory */ 1267 struct drm_mm stolen; 1268 /** List of all objects in gtt_space. Used to restore gtt 1269 * mappings on resume */ 1270 struct list_head bound_list; 1271 /** 1272 * List of objects which are not bound to the GTT (thus 1273 * are idle and not used by the GPU) but still have 1274 * (presumably uncached) pages still attached. 1275 */ 1276 struct list_head unbound_list; 1277 1278 /* 1279 * A pool of objects to use as shadow copies of client batch buffers 1280 * when the command parser is enabled. Prevents the client from 1281 * modifying the batch contents after software parsing. 1282 */ 1283 struct i915_gem_batch_pool batch_pool; 1284 1285 /** Usable portion of the GTT for GEM */ 1286 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1287 1288 /** PPGTT used for aliasing the PPGTT with the GTT */ 1289 struct i915_hw_ppgtt *aliasing_ppgtt; 1290 1291 eventhandler_tag inactive_shrinker; 1292 bool shrinker_no_lock_stealing; 1293 1294 /** LRU list of objects with fence regs on them. */ 1295 struct list_head fence_list; 1296 1297 /** 1298 * We leave the user IRQ off as much as possible, 1299 * but this means that requests will finish and never 1300 * be retired once the system goes idle. Set a timer to 1301 * fire periodically while the ring is running. When it 1302 * fires, go retire requests. 1303 */ 1304 struct delayed_work retire_work; 1305 1306 /** 1307 * When we detect an idle GPU, we want to turn on 1308 * powersaving features. So once we see that there 1309 * are no more requests outstanding and no more 1310 * arrive within a small period of time, we fire 1311 * off the idle_work. 1312 */ 1313 struct delayed_work idle_work; 1314 1315 /** 1316 * Are we in a non-interruptible section of code like 1317 * modesetting? 1318 */ 1319 bool interruptible; 1320 1321 /** 1322 * Is the GPU currently considered idle, or busy executing userspace 1323 * requests? Whilst idle, we attempt to power down the hardware and 1324 * display clocks. In order to reduce the effect on performance, there 1325 * is a slight delay before we do so. 1326 */ 1327 bool busy; 1328 1329 /* the indicator for dispatch video commands on two BSD rings */ 1330 int bsd_ring_dispatch_index; 1331 1332 /** Bit 6 swizzling required for X tiling */ 1333 uint32_t bit_6_swizzle_x; 1334 /** Bit 6 swizzling required for Y tiling */ 1335 uint32_t bit_6_swizzle_y; 1336 1337 /* accounting, useful for userland debugging */ 1338 struct spinlock object_stat_lock; 1339 size_t object_memory; 1340 u32 object_count; 1341 }; 1342 1343 struct drm_i915_error_state_buf { 1344 struct drm_i915_private *i915; 1345 unsigned bytes; 1346 unsigned size; 1347 int err; 1348 u8 *buf; 1349 loff_t start; 1350 loff_t pos; 1351 }; 1352 1353 struct i915_error_state_file_priv { 1354 struct drm_device *dev; 1355 struct drm_i915_error_state *error; 1356 }; 1357 1358 struct i915_gpu_error { 1359 /* For hangcheck timer */ 1360 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1361 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1362 /* Hang gpu twice in this window and your context gets banned */ 1363 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1364 1365 struct workqueue_struct *hangcheck_wq; 1366 struct delayed_work hangcheck_work; 1367 1368 /* For reset and error_state handling. */ 1369 struct lock lock; 1370 /* Protected by the above dev->gpu_error.lock. */ 1371 struct drm_i915_error_state *first_error; 1372 1373 unsigned long missed_irq_rings; 1374 1375 /** 1376 * State variable controlling the reset flow and count 1377 * 1378 * This is a counter which gets incremented when reset is triggered, 1379 * and again when reset has been handled. So odd values (lowest bit set) 1380 * means that reset is in progress and even values that 1381 * (reset_counter >> 1):th reset was successfully completed. 1382 * 1383 * If reset is not completed succesfully, the I915_WEDGE bit is 1384 * set meaning that hardware is terminally sour and there is no 1385 * recovery. All waiters on the reset_queue will be woken when 1386 * that happens. 1387 * 1388 * This counter is used by the wait_seqno code to notice that reset 1389 * event happened and it needs to restart the entire ioctl (since most 1390 * likely the seqno it waited for won't ever signal anytime soon). 1391 * 1392 * This is important for lock-free wait paths, where no contended lock 1393 * naturally enforces the correct ordering between the bail-out of the 1394 * waiter and the gpu reset work code. 1395 */ 1396 atomic_t reset_counter; 1397 1398 #define I915_RESET_IN_PROGRESS_FLAG 1 1399 #define I915_WEDGED (1 << 31) 1400 1401 /** 1402 * Waitqueue to signal when the reset has completed. Used by clients 1403 * that wait for dev_priv->mm.wedged to settle. 1404 */ 1405 wait_queue_head_t reset_queue; 1406 1407 /* Userspace knobs for gpu hang simulation; 1408 * combines both a ring mask, and extra flags 1409 */ 1410 u32 stop_rings; 1411 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1412 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1413 1414 /* For missed irq/seqno simulation. */ 1415 unsigned int test_irq_rings; 1416 1417 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 1418 bool reload_in_reset; 1419 }; 1420 1421 enum modeset_restore { 1422 MODESET_ON_LID_OPEN, 1423 MODESET_DONE, 1424 MODESET_SUSPENDED, 1425 }; 1426 1427 struct ddi_vbt_port_info { 1428 /* 1429 * This is an index in the HDMI/DVI DDI buffer translation table. 1430 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1431 * populate this field. 1432 */ 1433 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1434 uint8_t hdmi_level_shift; 1435 1436 uint8_t supports_dvi:1; 1437 uint8_t supports_hdmi:1; 1438 uint8_t supports_dp:1; 1439 }; 1440 1441 enum psr_lines_to_wait { 1442 PSR_0_LINES_TO_WAIT = 0, 1443 PSR_1_LINE_TO_WAIT, 1444 PSR_4_LINES_TO_WAIT, 1445 PSR_8_LINES_TO_WAIT 1446 }; 1447 1448 struct intel_vbt_data { 1449 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1450 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1451 1452 /* Feature bits */ 1453 unsigned int int_tv_support:1; 1454 unsigned int lvds_dither:1; 1455 unsigned int lvds_vbt:1; 1456 unsigned int int_crt_support:1; 1457 unsigned int lvds_use_ssc:1; 1458 unsigned int display_clock_mode:1; 1459 unsigned int fdi_rx_polarity_inverted:1; 1460 unsigned int has_mipi:1; 1461 int lvds_ssc_freq; 1462 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1463 1464 enum drrs_support_type drrs_type; 1465 1466 /* eDP */ 1467 int edp_rate; 1468 int edp_lanes; 1469 int edp_preemphasis; 1470 int edp_vswing; 1471 bool edp_initialized; 1472 bool edp_support; 1473 int edp_bpp; 1474 struct edp_power_seq edp_pps; 1475 1476 struct { 1477 bool full_link; 1478 bool require_aux_wakeup; 1479 int idle_frames; 1480 enum psr_lines_to_wait lines_to_wait; 1481 int tp1_wakeup_time; 1482 int tp2_tp3_wakeup_time; 1483 } psr; 1484 1485 struct { 1486 u16 pwm_freq_hz; 1487 bool present; 1488 bool active_low_pwm; 1489 u8 min_brightness; /* min_brightness/255 of max */ 1490 } backlight; 1491 1492 /* MIPI DSI */ 1493 struct { 1494 u16 port; 1495 u16 panel_id; 1496 struct mipi_config *config; 1497 struct mipi_pps_data *pps; 1498 u8 seq_version; 1499 u32 size; 1500 u8 *data; 1501 u8 *sequence[MIPI_SEQ_MAX]; 1502 } dsi; 1503 1504 int crt_ddc_pin; 1505 1506 int child_dev_num; 1507 union child_device_config *child_dev; 1508 1509 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1510 }; 1511 1512 enum intel_ddb_partitioning { 1513 INTEL_DDB_PART_1_2, 1514 INTEL_DDB_PART_5_6, /* IVB+ */ 1515 }; 1516 1517 struct intel_wm_level { 1518 bool enable; 1519 uint32_t pri_val; 1520 uint32_t spr_val; 1521 uint32_t cur_val; 1522 uint32_t fbc_val; 1523 }; 1524 1525 struct ilk_wm_values { 1526 uint32_t wm_pipe[3]; 1527 uint32_t wm_lp[3]; 1528 uint32_t wm_lp_spr[3]; 1529 uint32_t wm_linetime[3]; 1530 bool enable_fbc_wm; 1531 enum intel_ddb_partitioning partitioning; 1532 }; 1533 1534 struct skl_ddb_entry { 1535 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1536 }; 1537 1538 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1539 { 1540 return entry->end - entry->start; 1541 } 1542 1543 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1544 const struct skl_ddb_entry *e2) 1545 { 1546 if (e1->start == e2->start && e1->end == e2->end) 1547 return true; 1548 1549 return false; 1550 } 1551 1552 struct skl_ddb_allocation { 1553 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1554 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1555 struct skl_ddb_entry cursor[I915_MAX_PIPES]; 1556 }; 1557 1558 struct skl_wm_values { 1559 bool dirty[I915_MAX_PIPES]; 1560 struct skl_ddb_allocation ddb; 1561 uint32_t wm_linetime[I915_MAX_PIPES]; 1562 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1563 uint32_t cursor[I915_MAX_PIPES][8]; 1564 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1565 uint32_t cursor_trans[I915_MAX_PIPES]; 1566 }; 1567 1568 struct skl_wm_level { 1569 bool plane_en[I915_MAX_PLANES]; 1570 bool cursor_en; 1571 uint16_t plane_res_b[I915_MAX_PLANES]; 1572 uint8_t plane_res_l[I915_MAX_PLANES]; 1573 uint16_t cursor_res_b; 1574 uint8_t cursor_res_l; 1575 }; 1576 1577 /* 1578 * This struct helps tracking the state needed for runtime PM, which puts the 1579 * device in PCI D3 state. Notice that when this happens, nothing on the 1580 * graphics device works, even register access, so we don't get interrupts nor 1581 * anything else. 1582 * 1583 * Every piece of our code that needs to actually touch the hardware needs to 1584 * either call intel_runtime_pm_get or call intel_display_power_get with the 1585 * appropriate power domain. 1586 * 1587 * Our driver uses the autosuspend delay feature, which means we'll only really 1588 * suspend if we stay with zero refcount for a certain amount of time. The 1589 * default value is currently very conservative (see intel_runtime_pm_enable), but 1590 * it can be changed with the standard runtime PM files from sysfs. 1591 * 1592 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1593 * goes back to false exactly before we reenable the IRQs. We use this variable 1594 * to check if someone is trying to enable/disable IRQs while they're supposed 1595 * to be disabled. This shouldn't happen and we'll print some error messages in 1596 * case it happens. 1597 * 1598 * For more, read the Documentation/power/runtime_pm.txt. 1599 */ 1600 struct i915_runtime_pm { 1601 bool suspended; 1602 bool irqs_enabled; 1603 }; 1604 1605 enum intel_pipe_crc_source { 1606 INTEL_PIPE_CRC_SOURCE_NONE, 1607 INTEL_PIPE_CRC_SOURCE_PLANE1, 1608 INTEL_PIPE_CRC_SOURCE_PLANE2, 1609 INTEL_PIPE_CRC_SOURCE_PF, 1610 INTEL_PIPE_CRC_SOURCE_PIPE, 1611 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1612 INTEL_PIPE_CRC_SOURCE_TV, 1613 INTEL_PIPE_CRC_SOURCE_DP_B, 1614 INTEL_PIPE_CRC_SOURCE_DP_C, 1615 INTEL_PIPE_CRC_SOURCE_DP_D, 1616 INTEL_PIPE_CRC_SOURCE_AUTO, 1617 INTEL_PIPE_CRC_SOURCE_MAX, 1618 }; 1619 1620 struct intel_pipe_crc_entry { 1621 uint32_t frame; 1622 uint32_t crc[5]; 1623 }; 1624 1625 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1626 struct intel_pipe_crc { 1627 struct spinlock lock; 1628 bool opened; /* exclusive access to the result file */ 1629 struct intel_pipe_crc_entry *entries; 1630 enum intel_pipe_crc_source source; 1631 int head, tail; 1632 wait_queue_head_t wq; 1633 }; 1634 1635 struct i915_frontbuffer_tracking { 1636 struct lock lock; 1637 1638 /* 1639 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1640 * scheduled flips. 1641 */ 1642 unsigned busy_bits; 1643 unsigned flip_bits; 1644 }; 1645 1646 struct i915_wa_reg { 1647 u32 addr; 1648 u32 value; 1649 /* bitmask representing WA bits */ 1650 u32 mask; 1651 }; 1652 1653 #define I915_MAX_WA_REGS 16 1654 1655 struct i915_workarounds { 1656 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1657 u32 count; 1658 }; 1659 1660 struct drm_i915_private { 1661 struct drm_device *dev; 1662 struct kmem_cache *slab; 1663 1664 struct intel_device_info info; 1665 1666 int relative_constants_mode; 1667 1668 device_t *gmbus_bridge; 1669 device_t *bbbus_bridge; 1670 device_t *bbbus; 1671 1672 drm_local_map_t *sarea; 1673 drm_local_map_t *mmio_map; 1674 char __iomem *regs; 1675 1676 struct intel_uncore uncore; 1677 1678 device_t *gmbus; 1679 1680 1681 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1682 * controller on different i2c buses. */ 1683 struct lock gmbus_mutex; 1684 1685 struct _drm_i915_sarea *sarea_priv; 1686 /** 1687 * Base address of the gmbus and gpio block. 1688 */ 1689 uint32_t gpio_mmio_base; 1690 1691 /* MMIO base address for MIPI regs */ 1692 uint32_t mipi_mmio_base; 1693 1694 wait_queue_head_t gmbus_wait_queue; 1695 1696 struct pci_dev *bridge_dev; 1697 struct intel_engine_cs ring[I915_NUM_RINGS]; 1698 struct drm_i915_gem_object *semaphore_obj; 1699 uint32_t last_seqno, next_seqno; 1700 1701 drm_dma_handle_t *status_page_dmah; 1702 struct resource *mch_res; 1703 int mch_res_rid; 1704 1705 /* protects the irq masks */ 1706 struct lock irq_lock; 1707 1708 /* protects the mmio flip data */ 1709 struct spinlock mmio_flip_lock; 1710 1711 bool display_irqs_enabled; 1712 1713 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1714 struct pm_qos_request pm_qos; 1715 1716 /* DPIO indirect register protection */ 1717 struct lock dpio_lock; 1718 1719 /** Cached value of IMR to avoid reads in updating the bitfield */ 1720 union { 1721 u32 irq_mask; 1722 u32 de_irq_mask[I915_MAX_PIPES]; 1723 }; 1724 u32 gt_irq_mask; 1725 u32 pm_irq_mask; 1726 u32 pm_rps_events; 1727 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1728 1729 struct work_struct hotplug_work; 1730 struct { 1731 unsigned long hpd_last_jiffies; 1732 int hpd_cnt; 1733 enum { 1734 HPD_ENABLED = 0, 1735 HPD_DISABLED = 1, 1736 HPD_MARK_DISABLED = 2 1737 } hpd_mark; 1738 } hpd_stats[HPD_NUM_PINS]; 1739 u32 hpd_event_bits; 1740 struct delayed_work hotplug_reenable_work; 1741 1742 struct i915_fbc fbc; 1743 struct i915_drrs drrs; 1744 struct intel_opregion opregion; 1745 struct intel_vbt_data vbt; 1746 1747 bool preserve_bios_swizzle; 1748 1749 /* overlay */ 1750 struct intel_overlay *overlay; 1751 1752 /* backlight registers and fields in struct intel_panel */ 1753 struct lock backlight_lock; 1754 1755 /* LVDS info */ 1756 bool no_aux_handshake; 1757 1758 /* protects panel power sequencer state */ 1759 struct lock pps_mutex; 1760 1761 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1762 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1763 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1764 1765 unsigned int fsb_freq, mem_freq, is_ddr3; 1766 unsigned int vlv_cdclk_freq; 1767 unsigned int hpll_freq; 1768 1769 /** 1770 * wq - Driver workqueue for GEM. 1771 * 1772 * NOTE: Work items scheduled here are not allowed to grab any modeset 1773 * locks, for otherwise the flushing done in the pageflip code will 1774 * result in deadlocks. 1775 */ 1776 struct workqueue_struct *wq; 1777 1778 /* Display functions */ 1779 struct drm_i915_display_funcs display; 1780 1781 /* PCH chipset type */ 1782 enum intel_pch pch_type; 1783 unsigned short pch_id; 1784 1785 unsigned long quirks; 1786 1787 enum modeset_restore modeset_restore; 1788 struct lock modeset_restore_lock; 1789 1790 struct list_head vm_list; /* Global list of all address spaces */ 1791 struct i915_gtt gtt; /* VM representing the global address space */ 1792 1793 struct i915_gem_mm mm; 1794 #if defined(CONFIG_MMU_NOTIFIER) 1795 DECLARE_HASHTABLE(mmu_notifiers, 7); 1796 #endif 1797 1798 /* Kernel Modesetting */ 1799 1800 struct sdvo_device_mapping sdvo_mappings[2]; 1801 1802 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1803 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1804 wait_queue_head_t pending_flip_queue; 1805 1806 #ifdef CONFIG_DEBUG_FS 1807 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1808 #endif 1809 1810 int num_shared_dpll; 1811 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1812 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1813 1814 struct i915_workarounds workarounds; 1815 1816 /* Reclocking support */ 1817 bool render_reclock_avail; 1818 bool lvds_downclock_avail; 1819 /* indicates the reduced downclock for LVDS*/ 1820 int lvds_downclock; 1821 1822 struct i915_frontbuffer_tracking fb_tracking; 1823 1824 u16 orig_clock; 1825 1826 bool mchbar_need_disable; 1827 1828 struct intel_l3_parity l3_parity; 1829 1830 /* Cannot be determined by PCIID. You must always read a register. */ 1831 size_t ellc_size; 1832 1833 /* gen6+ rps state */ 1834 struct intel_gen6_power_mgmt rps; 1835 1836 /* ilk-only ips/rps state. Everything in here is protected by the global 1837 * mchdev_lock in intel_pm.c */ 1838 struct intel_ilk_power_mgmt ips; 1839 1840 struct i915_power_domains power_domains; 1841 1842 struct i915_psr psr; 1843 1844 struct i915_gpu_error gpu_error; 1845 1846 struct drm_i915_gem_object *vlv_pctx; 1847 1848 #ifdef CONFIG_DRM_I915_FBDEV 1849 /* list of fbdev register on this device */ 1850 struct intel_fbdev *fbdev; 1851 struct work_struct fbdev_suspend_work; 1852 #endif 1853 1854 struct drm_property *broadcast_rgb_property; 1855 struct drm_property *force_audio_property; 1856 1857 /* hda/i915 audio component */ 1858 bool audio_component_registered; 1859 1860 uint32_t hw_context_size; 1861 struct list_head context_list; 1862 1863 u32 fdi_rx_config; 1864 1865 u32 suspend_count; 1866 struct i915_suspend_saved_registers regfile; 1867 struct vlv_s0ix_state vlv_s0ix_state; 1868 1869 struct { 1870 /* 1871 * Raw watermark latency values: 1872 * in 0.1us units for WM0, 1873 * in 0.5us units for WM1+. 1874 */ 1875 /* primary */ 1876 uint16_t pri_latency[5]; 1877 /* sprite */ 1878 uint16_t spr_latency[5]; 1879 /* cursor */ 1880 uint16_t cur_latency[5]; 1881 /* 1882 * Raw watermark memory latency values 1883 * for SKL for all 8 levels 1884 * in 1us units. 1885 */ 1886 uint16_t skl_latency[8]; 1887 1888 /* 1889 * The skl_wm_values structure is a bit too big for stack 1890 * allocation, so we keep the staging struct where we store 1891 * intermediate results here instead. 1892 */ 1893 struct skl_wm_values skl_results; 1894 1895 /* current hardware state */ 1896 union { 1897 struct ilk_wm_values hw; 1898 struct skl_wm_values skl_hw; 1899 }; 1900 } wm; 1901 1902 struct i915_runtime_pm pm; 1903 1904 struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; 1905 u32 long_hpd_port_mask; 1906 u32 short_hpd_port_mask; 1907 struct work_struct dig_port_work; 1908 1909 /* 1910 * if we get a HPD irq from DP and a HPD irq from non-DP 1911 * the non-DP HPD could block the workqueue on a mode config 1912 * mutex getting, that userspace may have taken. However 1913 * userspace is waiting on the DP workqueue to run which is 1914 * blocked behind the non-DP one. 1915 */ 1916 struct workqueue_struct *dp_wq; 1917 1918 uint32_t bios_vgacntr; 1919 1920 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1921 struct { 1922 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, 1923 struct intel_engine_cs *ring, 1924 struct intel_context *ctx, 1925 struct drm_i915_gem_execbuffer2 *args, 1926 struct list_head *vmas, 1927 struct drm_i915_gem_object *batch_obj, 1928 u64 exec_start, u32 flags); 1929 int (*init_rings)(struct drm_device *dev); 1930 void (*cleanup_ring)(struct intel_engine_cs *ring); 1931 void (*stop_ring)(struct intel_engine_cs *ring); 1932 } gt; 1933 1934 uint32_t request_uniq; 1935 1936 /* 1937 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1938 * will be rejected. Instead look for a better place. 1939 */ 1940 }; 1941 1942 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1943 { 1944 return dev->dev_private; 1945 } 1946 1947 static inline struct drm_i915_private *dev_to_i915(struct device *dev) 1948 { 1949 BUG(); 1950 } 1951 1952 /* Iterate over initialised rings */ 1953 #define for_each_ring(ring__, dev_priv__, i__) \ 1954 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1955 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 1956 1957 enum hdmi_force_audio { 1958 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 1959 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 1960 HDMI_AUDIO_AUTO, /* trust EDID */ 1961 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1962 }; 1963 1964 #define I915_GTT_OFFSET_NONE ((u32)-1) 1965 1966 struct drm_i915_gem_object_ops { 1967 /* Interface between the GEM object and its backing storage. 1968 * get_pages() is called once prior to the use of the associated set 1969 * of pages before to binding them into the GTT, and put_pages() is 1970 * called after we no longer need them. As we expect there to be 1971 * associated cost with migrating pages between the backing storage 1972 * and making them available for the GPU (e.g. clflush), we may hold 1973 * onto the pages after they are no longer referenced by the GPU 1974 * in case they may be used again shortly (for example migrating the 1975 * pages to a different memory domain within the GTT). put_pages() 1976 * will therefore most likely be called when the object itself is 1977 * being released or under memory pressure (where we attempt to 1978 * reap pages for the shrinker). 1979 */ 1980 int (*get_pages)(struct drm_i915_gem_object *); 1981 void (*put_pages)(struct drm_i915_gem_object *); 1982 int (*dmabuf_export)(struct drm_i915_gem_object *); 1983 void (*release)(struct drm_i915_gem_object *); 1984 }; 1985 1986 /* 1987 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 1988 * considered to be the frontbuffer for the given plane interface-vise. This 1989 * doesn't mean that the hw necessarily already scans it out, but that any 1990 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 1991 * 1992 * We have one bit per pipe and per scanout plane type. 1993 */ 1994 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 1995 #define INTEL_FRONTBUFFER_BITS \ 1996 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 1997 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 1998 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 1999 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2000 (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2001 #define INTEL_FRONTBUFFER_SPRITE(pipe) \ 2002 (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2003 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2004 (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2005 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2006 (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2007 2008 struct drm_i915_gem_object { 2009 struct drm_gem_object base; 2010 2011 const struct drm_i915_gem_object_ops *ops; 2012 2013 /** List of VMAs backed by this object */ 2014 struct list_head vma_list; 2015 2016 /** Stolen memory for this object, instead of being backed by shmem. */ 2017 struct drm_mm_node *stolen; 2018 struct list_head global_list; 2019 2020 struct list_head ring_list; 2021 /** Used in execbuf to temporarily hold a ref */ 2022 struct list_head obj_exec_link; 2023 2024 struct list_head batch_pool_list; 2025 2026 /** 2027 * This is set if the object is on the active lists (has pending 2028 * rendering and so a non-zero seqno), and is not set if it i s on 2029 * inactive (ready to be unbound) list. 2030 */ 2031 unsigned int active:1; 2032 2033 /** 2034 * This is set if the object has been written to since last bound 2035 * to the GTT 2036 */ 2037 unsigned int dirty:1; 2038 2039 /** 2040 * Fence register bits (if any) for this object. Will be set 2041 * as needed when mapped into the GTT. 2042 * Protected by dev->struct_mutex. 2043 */ 2044 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 2045 2046 /** 2047 * Advice: are the backing pages purgeable? 2048 */ 2049 unsigned int madv:2; 2050 2051 /** 2052 * Current tiling mode for the object. 2053 */ 2054 unsigned int tiling_mode:2; 2055 /** 2056 * Whether the tiling parameters for the currently associated fence 2057 * register have changed. Note that for the purposes of tracking 2058 * tiling changes we also treat the unfenced register, the register 2059 * slot that the object occupies whilst it executes a fenced 2060 * command (such as BLT on gen2/3), as a "fence". 2061 */ 2062 unsigned int fence_dirty:1; 2063 2064 /** 2065 * Is the object at the current location in the gtt mappable and 2066 * fenceable? Used to avoid costly recalculations. 2067 */ 2068 unsigned int map_and_fenceable:1; 2069 2070 /** 2071 * Whether the current gtt mapping needs to be mappable (and isn't just 2072 * mappable by accident). Track pin and fault separate for a more 2073 * accurate mappable working set. 2074 */ 2075 unsigned int fault_mappable:1; 2076 unsigned int pin_mappable:1; 2077 unsigned int pin_display:1; 2078 2079 /* 2080 * Is the object to be mapped as read-only to the GPU 2081 * Only honoured if hardware has relevant pte bit 2082 */ 2083 unsigned long gt_ro:1; 2084 unsigned int cache_level:3; 2085 unsigned int cache_dirty:1; 2086 2087 unsigned int has_dma_mapping:1; 2088 2089 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2090 2091 vm_page_t *pages; 2092 int pages_pin_count; 2093 2094 /* prime dma-buf support */ 2095 void *dma_buf_vmapping; 2096 int vmapping_count; 2097 2098 /** Breadcrumb of last rendering to the buffer. */ 2099 struct drm_i915_gem_request *last_read_req; 2100 struct drm_i915_gem_request *last_write_req; 2101 /** Breadcrumb of last fenced GPU access to the buffer. */ 2102 struct drm_i915_gem_request *last_fenced_req; 2103 2104 /** Current tiling stride for the object, if it's tiled. */ 2105 uint32_t stride; 2106 2107 /** References from framebuffers, locks out tiling changes. */ 2108 unsigned long framebuffer_references; 2109 2110 /** Record of address bit 17 of each page at last unbind. */ 2111 unsigned long *bit_17; 2112 2113 union { 2114 /** for phy allocated objects */ 2115 struct drm_dma_handle *phys_handle; 2116 2117 struct i915_gem_userptr { 2118 uintptr_t ptr; 2119 unsigned read_only :1; 2120 unsigned workers :4; 2121 #define I915_GEM_USERPTR_MAX_WORKERS 15 2122 2123 struct mm_struct *mm; 2124 struct i915_mmu_object *mn; 2125 struct work_struct *work; 2126 } userptr; 2127 }; 2128 }; 2129 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2130 2131 void i915_gem_track_fb(struct drm_i915_gem_object *old, 2132 struct drm_i915_gem_object *new, 2133 unsigned frontbuffer_bits); 2134 2135 /** 2136 * Request queue structure. 2137 * 2138 * The request queue allows us to note sequence numbers that have been emitted 2139 * and may be associated with active buffers to be retired. 2140 * 2141 * By keeping this list, we can avoid having to do questionable sequence 2142 * number comparisons on buffer last_read|write_seqno. It also allows an 2143 * emission time to be associated with the request for tracking how far ahead 2144 * of the GPU the submission is. 2145 * 2146 * The requests are reference counted, so upon creation they should have an 2147 * initial reference taken using kref_init 2148 */ 2149 struct drm_i915_gem_request { 2150 struct kref ref; 2151 2152 /** On Which ring this request was generated */ 2153 struct intel_engine_cs *ring; 2154 2155 /** GEM sequence number associated with this request. */ 2156 uint32_t seqno; 2157 2158 /** Position in the ringbuffer of the start of the request */ 2159 u32 head; 2160 2161 /** 2162 * Position in the ringbuffer of the start of the postfix. 2163 * This is required to calculate the maximum available ringbuffer 2164 * space without overwriting the postfix. 2165 */ 2166 u32 postfix; 2167 2168 /** Position in the ringbuffer of the end of the whole request */ 2169 u32 tail; 2170 2171 /** 2172 * Context related to this request 2173 * Contexts are refcounted, so when this request is associated with a 2174 * context, we must increment the context's refcount, to guarantee that 2175 * it persists while any request is linked to it. Requests themselves 2176 * are also refcounted, so the request will only be freed when the last 2177 * reference to it is dismissed, and the code in 2178 * i915_gem_request_free() will then decrement the refcount on the 2179 * context. 2180 */ 2181 struct intel_context *ctx; 2182 2183 /** Batch buffer related to this request if any */ 2184 struct drm_i915_gem_object *batch_obj; 2185 2186 /** Time at which this request was emitted, in jiffies. */ 2187 unsigned long emitted_jiffies; 2188 2189 /** global list entry for this request */ 2190 struct list_head list; 2191 2192 struct drm_i915_file_private *file_priv; 2193 /** file_priv list entry for this request */ 2194 struct list_head client_list; 2195 2196 uint32_t uniq; 2197 2198 /** 2199 * The ELSP only accepts two elements at a time, so we queue 2200 * context/tail pairs on a given queue (ring->execlist_queue) until the 2201 * hardware is available. The queue serves a double purpose: we also use 2202 * it to keep track of the up to 2 contexts currently in the hardware 2203 * (usually one in execution and the other queued up by the GPU): We 2204 * only remove elements from the head of the queue when the hardware 2205 * informs us that an element has been completed. 2206 * 2207 * All accesses to the queue are mediated by a spinlock 2208 * (ring->execlist_lock). 2209 */ 2210 2211 /** Execlist link in the submission queue.*/ 2212 struct list_head execlist_link; 2213 2214 /** Execlists no. of times this request has been sent to the ELSP */ 2215 int elsp_submitted; 2216 2217 }; 2218 2219 void i915_gem_request_free(struct kref *req_ref); 2220 2221 static inline uint32_t 2222 i915_gem_request_get_seqno(struct drm_i915_gem_request *req) 2223 { 2224 return req ? req->seqno : 0; 2225 } 2226 2227 static inline struct intel_engine_cs * 2228 i915_gem_request_get_ring(struct drm_i915_gem_request *req) 2229 { 2230 return req ? req->ring : NULL; 2231 } 2232 2233 static inline void 2234 i915_gem_request_reference(struct drm_i915_gem_request *req) 2235 { 2236 kref_get(&req->ref); 2237 } 2238 2239 static inline void 2240 i915_gem_request_unreference(struct drm_i915_gem_request *req) 2241 { 2242 kref_put(&req->ref, i915_gem_request_free); 2243 } 2244 2245 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2246 struct drm_i915_gem_request *src) 2247 { 2248 if (src) 2249 i915_gem_request_reference(src); 2250 2251 if (*pdst) 2252 i915_gem_request_unreference(*pdst); 2253 2254 *pdst = src; 2255 } 2256 2257 /* 2258 * XXX: i915_gem_request_completed should be here but currently needs the 2259 * definition of i915_seqno_passed() which is below. It will be moved in 2260 * a later patch when the call to i915_seqno_passed() is obsoleted... 2261 */ 2262 2263 struct drm_i915_file_private { 2264 struct drm_i915_private *dev_priv; 2265 struct drm_file *file; 2266 2267 struct { 2268 struct spinlock lock; 2269 struct list_head request_list; 2270 struct delayed_work idle_work; 2271 } mm; 2272 struct idr context_idr; 2273 2274 atomic_t rps_wait_boost; 2275 struct intel_engine_cs *bsd_ring; 2276 }; 2277 2278 /* 2279 * A command that requires special handling by the command parser. 2280 */ 2281 struct drm_i915_cmd_descriptor { 2282 /* 2283 * Flags describing how the command parser processes the command. 2284 * 2285 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2286 * a length mask if not set 2287 * CMD_DESC_SKIP: The command is allowed but does not follow the 2288 * standard length encoding for the opcode range in 2289 * which it falls 2290 * CMD_DESC_REJECT: The command is never allowed 2291 * CMD_DESC_REGISTER: The command should be checked against the 2292 * register whitelist for the appropriate ring 2293 * CMD_DESC_MASTER: The command is allowed if the submitting process 2294 * is the DRM master 2295 */ 2296 u32 flags; 2297 #define CMD_DESC_FIXED (1<<0) 2298 #define CMD_DESC_SKIP (1<<1) 2299 #define CMD_DESC_REJECT (1<<2) 2300 #define CMD_DESC_REGISTER (1<<3) 2301 #define CMD_DESC_BITMASK (1<<4) 2302 #define CMD_DESC_MASTER (1<<5) 2303 2304 /* 2305 * The command's unique identification bits and the bitmask to get them. 2306 * This isn't strictly the opcode field as defined in the spec and may 2307 * also include type, subtype, and/or subop fields. 2308 */ 2309 struct { 2310 u32 value; 2311 u32 mask; 2312 } cmd; 2313 2314 /* 2315 * The command's length. The command is either fixed length (i.e. does 2316 * not include a length field) or has a length field mask. The flag 2317 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2318 * a length mask. All command entries in a command table must include 2319 * length information. 2320 */ 2321 union { 2322 u32 fixed; 2323 u32 mask; 2324 } length; 2325 2326 /* 2327 * Describes where to find a register address in the command to check 2328 * against the ring's register whitelist. Only valid if flags has the 2329 * CMD_DESC_REGISTER bit set. 2330 */ 2331 struct { 2332 u32 offset; 2333 u32 mask; 2334 } reg; 2335 2336 #define MAX_CMD_DESC_BITMASKS 3 2337 /* 2338 * Describes command checks where a particular dword is masked and 2339 * compared against an expected value. If the command does not match 2340 * the expected value, the parser rejects it. Only valid if flags has 2341 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2342 * are valid. 2343 * 2344 * If the check specifies a non-zero condition_mask then the parser 2345 * only performs the check when the bits specified by condition_mask 2346 * are non-zero. 2347 */ 2348 struct { 2349 u32 offset; 2350 u32 mask; 2351 u32 expected; 2352 u32 condition_offset; 2353 u32 condition_mask; 2354 } bits[MAX_CMD_DESC_BITMASKS]; 2355 }; 2356 2357 /* 2358 * A table of commands requiring special handling by the command parser. 2359 * 2360 * Each ring has an array of tables. Each table consists of an array of command 2361 * descriptors, which must be sorted with command opcodes in ascending order. 2362 */ 2363 struct drm_i915_cmd_table { 2364 const struct drm_i915_cmd_descriptor *table; 2365 int count; 2366 }; 2367 2368 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2369 #define __I915__(p) ({ \ 2370 const struct drm_i915_private *__p; \ 2371 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2372 __p = (const struct drm_i915_private *)p; \ 2373 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2374 __p = to_i915((const struct drm_device *)p); \ 2375 __p; \ 2376 }) 2377 2378 #define INTEL_INFO(p) (&__I915__(p)->info) 2379 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2380 2381 #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2382 #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2383 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2384 #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) 2385 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2386 #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) 2387 #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) 2388 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2389 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2390 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2391 #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) 2392 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2393 #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) 2394 #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) 2395 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2396 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2397 #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) 2398 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2399 #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2400 INTEL_DEVID(dev) == 0x0152 || \ 2401 INTEL_DEVID(dev) == 0x015a) 2402 #define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \ 2403 INTEL_DEVID(dev) == 0x0106 || \ 2404 INTEL_DEVID(dev) == 0x010A) 2405 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2406 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2407 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2408 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2409 #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2410 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2411 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2412 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2413 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2414 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2415 (INTEL_DEVID(dev) & 0xf) == 0xb || \ 2416 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2417 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2418 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2419 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2420 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2421 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2422 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2423 /* ULX machines are also considered ULT. */ 2424 #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ 2425 INTEL_DEVID(dev) == 0x0A1E) 2426 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2427 2428 /* 2429 * The genX designation typically refers to the render engine, so render 2430 * capability related checks should use IS_GEN, while display and other checks 2431 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2432 * chips, etc.). 2433 */ 2434 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2435 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2436 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2437 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2438 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2439 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2440 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2441 #define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2442 2443 #define RENDER_RING (1<<RCS) 2444 #define BSD_RING (1<<VCS) 2445 #define BLT_RING (1<<BCS) 2446 #define VEBOX_RING (1<<VECS) 2447 #define BSD2_RING (1<<VCS2) 2448 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2449 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2450 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2451 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2452 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2453 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2454 __I915__(dev)->ellc_size) 2455 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2456 2457 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2458 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2459 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2460 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) 2461 2462 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2463 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2464 2465 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2466 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2467 /* 2468 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2469 * even when in MSI mode. This results in spurious interrupt warnings if the 2470 * legacy irq no. is shared with another device. The kernel then disables that 2471 * interrupt source and so prevents the other device from working properly. 2472 */ 2473 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2474 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2475 2476 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2477 * rows, which changed the alignment requirements and fence programming. 2478 */ 2479 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 2480 IS_I915GM(dev))) 2481 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 2482 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 2483 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 2484 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2485 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2486 2487 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2488 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2489 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2490 2491 #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2492 2493 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2494 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2495 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2496 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2497 IS_SKYLAKE(dev)) 2498 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2499 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) 2500 #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2501 #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2502 2503 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2504 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2505 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2506 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2507 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2508 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2509 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2510 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2511 2512 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2513 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2514 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2515 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2516 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2517 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2518 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2519 2520 #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 2521 2522 /* DPF == dynamic parity feature */ 2523 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2524 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2525 2526 #define GT_FREQUENCY_MULTIPLIER 50 2527 2528 #include "i915_trace.h" 2529 2530 extern const struct drm_ioctl_desc i915_ioctls[]; 2531 extern int i915_max_ioctl; 2532 2533 extern int i915_suspend_legacy(device_t kdev); 2534 extern int i915_resume_legacy(struct drm_device *dev); 2535 extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 2536 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 2537 2538 /* i915_params.c */ 2539 struct i915_params { 2540 int modeset; 2541 int panel_ignore_lid; 2542 unsigned int powersave; 2543 int semaphores; 2544 unsigned int lvds_downclock; 2545 int lvds_channel_mode; 2546 int panel_use_ssc; 2547 int vbt_sdvo_panel_type; 2548 int enable_rc6; 2549 int enable_fbc; 2550 int enable_ppgtt; 2551 int enable_execlists; 2552 int enable_psr; 2553 unsigned int preliminary_hw_support; 2554 int disable_power_well; 2555 int enable_ips; 2556 int invert_brightness; 2557 int enable_cmd_parser; 2558 /* leave bools at the end to not create holes */ 2559 bool enable_hangcheck; 2560 bool fastboot; 2561 bool prefault_disable; 2562 int reset; 2563 bool disable_display; 2564 bool disable_vtd_wa; 2565 int use_mmio_flip; 2566 bool mmio_debug; 2567 bool verbose_state_checks; 2568 bool nuclear_pageflip; 2569 }; 2570 extern struct i915_params i915 __read_mostly; 2571 2572 /* i915_dma.c */ 2573 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2574 extern int i915_driver_unload(struct drm_device *); 2575 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2576 extern void i915_driver_lastclose(struct drm_device * dev); 2577 extern void i915_driver_preclose(struct drm_device *dev, 2578 struct drm_file *file); 2579 extern void i915_driver_postclose(struct drm_device *dev, 2580 struct drm_file *file); 2581 extern int i915_driver_device_is_agp(struct drm_device * dev); 2582 #ifdef CONFIG_COMPAT 2583 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2584 unsigned long arg); 2585 #endif 2586 extern int intel_gpu_reset(struct drm_device *dev); 2587 extern int i915_reset(struct drm_device *dev); 2588 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2589 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2590 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2591 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2592 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2593 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2594 2595 /* i915_irq.c */ 2596 void i915_queue_hangcheck(struct drm_device *dev); 2597 __printf(3, 4) 2598 void i915_handle_error(struct drm_device *dev, bool wedged, 2599 const char *fmt, ...); 2600 2601 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2602 extern void intel_hpd_init(struct drm_i915_private *dev_priv); 2603 int intel_irq_install(struct drm_i915_private *dev_priv); 2604 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2605 2606 extern void intel_uncore_sanitize(struct drm_device *dev); 2607 extern void intel_uncore_early_sanitize(struct drm_device *dev, 2608 bool restore_forcewake); 2609 extern void intel_uncore_init(struct drm_device *dev); 2610 extern void intel_uncore_check_errors(struct drm_device *dev); 2611 extern void intel_uncore_fini(struct drm_device *dev); 2612 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2613 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2614 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2615 enum forcewake_domains domains); 2616 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2617 enum forcewake_domains domains); 2618 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2619 2620 void 2621 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2622 u32 status_mask); 2623 2624 void 2625 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2626 u32 status_mask); 2627 2628 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2629 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2630 void 2631 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2632 void 2633 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2634 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2635 uint32_t interrupt_mask, 2636 uint32_t enabled_irq_mask); 2637 #define ibx_enable_display_interrupt(dev_priv, bits) \ 2638 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 2639 #define ibx_disable_display_interrupt(dev_priv, bits) \ 2640 ibx_display_interrupt_update((dev_priv), (bits), 0) 2641 2642 /* i915_gem.c */ 2643 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2644 struct drm_file *file_priv); 2645 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2646 struct drm_file *file_priv); 2647 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2648 struct drm_file *file_priv); 2649 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2650 struct drm_file *file_priv); 2651 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2652 struct drm_file *file_priv); 2653 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2654 struct drm_file *file_priv); 2655 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2656 struct drm_file *file_priv); 2657 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2658 struct intel_engine_cs *ring); 2659 void i915_gem_execbuffer_retire_commands(struct drm_device *dev, 2660 struct drm_file *file, 2661 struct intel_engine_cs *ring, 2662 struct drm_i915_gem_object *obj); 2663 int i915_gem_ringbuffer_submission(struct drm_device *dev, 2664 struct drm_file *file, 2665 struct intel_engine_cs *ring, 2666 struct intel_context *ctx, 2667 struct drm_i915_gem_execbuffer2 *args, 2668 struct list_head *vmas, 2669 struct drm_i915_gem_object *batch_obj, 2670 u64 exec_start, u32 flags); 2671 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2672 struct drm_file *file_priv); 2673 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2674 struct drm_file *file_priv); 2675 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2676 struct drm_file *file_priv); 2677 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2678 struct drm_file *file); 2679 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2680 struct drm_file *file); 2681 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2682 struct drm_file *file_priv); 2683 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2684 struct drm_file *file_priv); 2685 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2686 struct drm_file *file_priv); 2687 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2688 struct drm_file *file_priv); 2689 int i915_gem_init_userptr(struct drm_device *dev); 2690 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2691 struct drm_file *file); 2692 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2693 struct drm_file *file_priv); 2694 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2695 struct drm_file *file_priv); 2696 void i915_gem_load(struct drm_device *dev); 2697 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 2698 long target, 2699 unsigned flags); 2700 #define I915_SHRINK_PURGEABLE 0x1 2701 #define I915_SHRINK_UNBOUND 0x2 2702 #define I915_SHRINK_BOUND 0x4 2703 void *i915_gem_object_alloc(struct drm_device *dev); 2704 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2705 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2706 const struct drm_i915_gem_object_ops *ops); 2707 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2708 size_t size); 2709 void i915_init_vm(struct drm_i915_private *dev_priv, 2710 struct i915_address_space *vm); 2711 void i915_gem_free_object(struct drm_gem_object *obj); 2712 void i915_gem_vma_destroy(struct i915_vma *vma); 2713 2714 #define PIN_MAPPABLE 0x1 2715 #define PIN_NONBLOCK 0x2 2716 #define PIN_GLOBAL 0x4 2717 #define PIN_OFFSET_BIAS 0x8 2718 #define PIN_OFFSET_MASK (~4095) 2719 int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj, 2720 struct i915_address_space *vm, 2721 uint32_t alignment, 2722 uint64_t flags, 2723 const struct i915_ggtt_view *view); 2724 static inline 2725 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2726 struct i915_address_space *vm, 2727 uint32_t alignment, 2728 uint64_t flags) 2729 { 2730 return i915_gem_object_pin_view(obj, vm, alignment, flags, 2731 &i915_ggtt_view_normal); 2732 } 2733 2734 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2735 u32 flags); 2736 int __must_check i915_vma_unbind(struct i915_vma *vma); 2737 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2738 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2739 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2740 2741 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2742 int *needs_clflush); 2743 2744 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2745 static inline struct vm_page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 2746 { 2747 return obj->pages[n]; 2748 } 2749 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2750 { 2751 BUG_ON(obj->pages == NULL); 2752 obj->pages_pin_count++; 2753 } 2754 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2755 { 2756 BUG_ON(obj->pages_pin_count == 0); 2757 obj->pages_pin_count--; 2758 } 2759 2760 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 2761 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 2762 struct intel_engine_cs *to); 2763 void i915_vma_move_to_active(struct i915_vma *vma, 2764 struct intel_engine_cs *ring); 2765 int i915_gem_dumb_create(struct drm_file *file_priv, 2766 struct drm_device *dev, 2767 struct drm_mode_create_dumb *args); 2768 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 2769 uint32_t handle, uint64_t *offset); 2770 /** 2771 * Returns true if seq1 is later than seq2. 2772 */ 2773 static inline bool 2774 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 2775 { 2776 return (int32_t)(seq1 - seq2) >= 0; 2777 } 2778 2779 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 2780 bool lazy_coherency) 2781 { 2782 u32 seqno; 2783 2784 BUG_ON(req == NULL); 2785 2786 seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2787 2788 return i915_seqno_passed(seqno, req->seqno); 2789 } 2790 2791 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 2792 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 2793 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 2794 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 2795 2796 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 2797 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 2798 2799 struct drm_i915_gem_request * 2800 i915_gem_find_active_request(struct intel_engine_cs *ring); 2801 2802 bool i915_gem_retire_requests(struct drm_device *dev); 2803 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); 2804 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2805 bool interruptible); 2806 int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req); 2807 2808 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2809 { 2810 return unlikely(atomic_read(&error->reset_counter) 2811 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 2812 } 2813 2814 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 2815 { 2816 return atomic_read(&error->reset_counter) & I915_WEDGED; 2817 } 2818 2819 static inline u32 i915_reset_count(struct i915_gpu_error *error) 2820 { 2821 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 2822 } 2823 2824 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 2825 { 2826 return dev_priv->gpu_error.stop_rings == 0 || 2827 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 2828 } 2829 2830 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 2831 { 2832 return dev_priv->gpu_error.stop_rings == 0 || 2833 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 2834 } 2835 2836 void i915_gem_reset(struct drm_device *dev); 2837 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2838 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2839 int __must_check i915_gem_init(struct drm_device *dev); 2840 int i915_gem_init_rings(struct drm_device *dev); 2841 int __must_check i915_gem_init_hw(struct drm_device *dev); 2842 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); 2843 void i915_gem_init_swizzling(struct drm_device *dev); 2844 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2845 int __must_check i915_gpu_idle(struct drm_device *dev); 2846 int __must_check i915_gem_suspend(struct drm_device *dev); 2847 int __i915_add_request(struct intel_engine_cs *ring, 2848 struct drm_file *file, 2849 struct drm_i915_gem_object *batch_obj); 2850 #define i915_add_request(ring) \ 2851 __i915_add_request(ring, NULL, NULL) 2852 int __i915_wait_request(struct drm_i915_gem_request *req, 2853 unsigned reset_counter, 2854 bool interruptible, 2855 s64 *timeout, 2856 struct drm_i915_file_private *file_priv); 2857 int __must_check i915_wait_request(struct drm_i915_gem_request *req); 2858 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres); 2859 int __must_check 2860 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 2861 bool write); 2862 int __must_check 2863 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 2864 int __must_check 2865 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2866 u32 alignment, 2867 struct intel_engine_cs *pipelined); 2868 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2869 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 2870 int align); 2871 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2872 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2873 2874 uint32_t 2875 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 2876 uint32_t 2877 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 2878 int tiling_mode, bool fenced); 2879 2880 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 2881 enum i915_cache_level cache_level); 2882 2883 #if 0 2884 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 2885 struct dma_buf *dma_buf); 2886 2887 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 2888 struct drm_gem_object *gem_obj, int flags); 2889 #endif 2890 2891 void i915_gem_restore_fences(struct drm_device *dev); 2892 2893 unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, 2894 struct i915_address_space *vm, 2895 enum i915_ggtt_view_type view); 2896 static inline 2897 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, 2898 struct i915_address_space *vm) 2899 { 2900 return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL); 2901 } 2902 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 2903 bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, 2904 struct i915_address_space *vm, 2905 enum i915_ggtt_view_type view); 2906 static inline 2907 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 2908 struct i915_address_space *vm) 2909 { 2910 return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL); 2911 } 2912 2913 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 2914 struct i915_address_space *vm); 2915 struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, 2916 struct i915_address_space *vm, 2917 const struct i915_ggtt_view *view); 2918 static inline 2919 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 2920 struct i915_address_space *vm) 2921 { 2922 return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal); 2923 } 2924 2925 struct i915_vma * 2926 i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, 2927 struct i915_address_space *vm, 2928 const struct i915_ggtt_view *view); 2929 2930 static inline 2931 struct i915_vma * 2932 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2933 struct i915_address_space *vm) 2934 { 2935 return i915_gem_obj_lookup_or_create_vma_view(obj, vm, 2936 &i915_ggtt_view_normal); 2937 } 2938 2939 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); 2940 static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { 2941 struct i915_vma *vma; 2942 list_for_each_entry(vma, &obj->vma_list, vma_link) 2943 if (vma->pin_count > 0) 2944 return true; 2945 return false; 2946 } 2947 2948 /* Some GGTT VM helpers */ 2949 #define i915_obj_to_ggtt(obj) \ 2950 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2951 static inline bool i915_is_ggtt(struct i915_address_space *vm) 2952 { 2953 struct i915_address_space *ggtt = 2954 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; 2955 return vm == ggtt; 2956 } 2957 2958 static inline struct i915_hw_ppgtt * 2959 i915_vm_to_ppgtt(struct i915_address_space *vm) 2960 { 2961 WARN_ON(i915_is_ggtt(vm)); 2962 2963 return container_of(vm, struct i915_hw_ppgtt, base); 2964 } 2965 2966 2967 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 2968 { 2969 return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); 2970 } 2971 2972 static inline unsigned long 2973 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) 2974 { 2975 return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj)); 2976 } 2977 2978 static inline unsigned long 2979 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 2980 { 2981 return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); 2982 } 2983 2984 static inline int __must_check 2985 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 2986 uint32_t alignment, 2987 unsigned flags) 2988 { 2989 return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), 2990 alignment, flags | PIN_GLOBAL); 2991 } 2992 2993 static inline int 2994 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 2995 { 2996 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 2997 } 2998 2999 void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 3000 3001 /* i915_gem_context.c */ 3002 int __must_check i915_gem_context_init(struct drm_device *dev); 3003 void i915_gem_context_fini(struct drm_device *dev); 3004 void i915_gem_context_reset(struct drm_device *dev); 3005 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3006 int i915_gem_context_enable(struct drm_i915_private *dev_priv); 3007 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3008 int i915_switch_context(struct intel_engine_cs *ring, 3009 struct intel_context *to); 3010 struct intel_context * 3011 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 3012 void i915_gem_context_free(struct kref *ctx_ref); 3013 struct drm_i915_gem_object * 3014 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3015 static inline void i915_gem_context_reference(struct intel_context *ctx) 3016 { 3017 kref_get(&ctx->ref); 3018 } 3019 3020 static inline void i915_gem_context_unreference(struct intel_context *ctx) 3021 { 3022 kref_put(&ctx->ref, i915_gem_context_free); 3023 } 3024 3025 static inline bool i915_gem_context_is_default(const struct intel_context *c) 3026 { 3027 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3028 } 3029 3030 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3031 struct drm_file *file); 3032 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3033 struct drm_file *file); 3034 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3035 struct drm_file *file_priv); 3036 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3037 struct drm_file *file_priv); 3038 3039 /* i915_gem_evict.c */ 3040 int __must_check i915_gem_evict_something(struct drm_device *dev, 3041 struct i915_address_space *vm, 3042 int min_size, 3043 unsigned alignment, 3044 unsigned cache_level, 3045 unsigned long start, 3046 unsigned long end, 3047 unsigned flags); 3048 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3049 int i915_gem_evict_everything(struct drm_device *dev); 3050 3051 /* belongs in i915_gem_gtt.h */ 3052 static inline void i915_gem_chipset_flush(struct drm_device *dev) 3053 { 3054 if (INTEL_INFO(dev)->gen < 6) 3055 intel_gtt_chipset_flush(); 3056 } 3057 3058 /* i915_gem_stolen.c */ 3059 int i915_gem_init_stolen(struct drm_device *dev); 3060 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp); 3061 void i915_gem_stolen_cleanup_compression(struct drm_device *dev); 3062 void i915_gem_cleanup_stolen(struct drm_device *dev); 3063 struct drm_i915_gem_object * 3064 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3065 struct drm_i915_gem_object * 3066 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3067 u32 stolen_offset, 3068 u32 gtt_offset, 3069 u32 size); 3070 3071 /* i915_gem_tiling.c */ 3072 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3073 { 3074 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3075 3076 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3077 obj->tiling_mode != I915_TILING_NONE; 3078 } 3079 3080 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3081 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3082 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3083 3084 /* i915_gem_debug.c */ 3085 #if WATCH_LISTS 3086 int i915_verify_lists(struct drm_device *dev); 3087 #else 3088 #define i915_verify_lists(dev) 0 3089 #endif 3090 3091 /* i915_debugfs.c */ 3092 int i915_debugfs_init(struct drm_minor *minor); 3093 void i915_debugfs_cleanup(struct drm_minor *minor); 3094 #ifdef CONFIG_DEBUG_FS 3095 void intel_display_crc_init(struct drm_device *dev); 3096 #else 3097 static inline void intel_display_crc_init(struct drm_device *dev) {} 3098 #endif 3099 3100 /* i915_gpu_error.c */ 3101 __printf(2, 3) 3102 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3103 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3104 const struct i915_error_state_file_priv *error); 3105 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3106 struct drm_i915_private *i915, 3107 size_t count, loff_t pos); 3108 static inline void i915_error_state_buf_release( 3109 struct drm_i915_error_state_buf *eb) 3110 { 3111 kfree(eb->buf); 3112 } 3113 void i915_capture_error_state(struct drm_device *dev, bool wedge, 3114 const char *error_msg); 3115 void i915_error_state_get(struct drm_device *dev, 3116 struct i915_error_state_file_priv *error_priv); 3117 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3118 void i915_destroy_error_state(struct drm_device *dev); 3119 3120 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3121 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3122 3123 /* i915_gem_batch_pool.c */ 3124 void i915_gem_batch_pool_init(struct drm_device *dev, 3125 struct i915_gem_batch_pool *pool); 3126 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); 3127 struct drm_i915_gem_object* 3128 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size); 3129 3130 /* i915_cmd_parser.c */ 3131 int i915_cmd_parser_get_version(void); 3132 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); 3133 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); 3134 bool i915_needs_cmd_parser(struct intel_engine_cs *ring); 3135 int i915_parse_cmds(struct intel_engine_cs *ring, 3136 struct drm_i915_gem_object *batch_obj, 3137 struct drm_i915_gem_object *shadow_batch_obj, 3138 u32 batch_start_offset, 3139 u32 batch_len, 3140 bool is_master); 3141 3142 /* i915_suspend.c */ 3143 extern int i915_save_state(struct drm_device *dev); 3144 extern int i915_restore_state(struct drm_device *dev); 3145 3146 /* i915_ums.c */ 3147 void i915_save_display_reg(struct drm_device *dev); 3148 void i915_restore_display_reg(struct drm_device *dev); 3149 3150 /* i915_sysfs.c */ 3151 void i915_setup_sysfs(struct drm_device *dev_priv); 3152 void i915_teardown_sysfs(struct drm_device *dev_priv); 3153 3154 /* intel_i2c.c */ 3155 extern int intel_setup_gmbus(struct drm_device *dev); 3156 extern void intel_teardown_gmbus(struct drm_device *dev); 3157 static inline bool intel_gmbus_is_port_valid(unsigned port) 3158 { 3159 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); 3160 } 3161 3162 extern struct device *intel_gmbus_get_adapter( 3163 struct drm_i915_private *dev_priv, unsigned port); 3164 extern void intel_gmbus_set_speed(struct device *adapter, int speed); 3165 extern void intel_gmbus_force_bit(struct device *adapter, bool force_bit); 3166 static inline bool intel_gmbus_is_forced_bit(struct device *adapter) 3167 { 3168 struct intel_iic_softc *sc; 3169 sc = device_get_softc(device_get_parent(adapter)); 3170 3171 return sc->force_bit_dev; 3172 } 3173 extern void intel_i2c_reset(struct drm_device *dev); 3174 3175 /* intel_opregion.c */ 3176 #ifdef CONFIG_ACPI 3177 extern int intel_opregion_setup(struct drm_device *dev); 3178 extern void intel_opregion_init(struct drm_device *dev); 3179 extern void intel_opregion_fini(struct drm_device *dev); 3180 extern void intel_opregion_asle_intr(struct drm_device *dev); 3181 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3182 bool enable); 3183 extern int intel_opregion_notify_adapter(struct drm_device *dev, 3184 pci_power_t state); 3185 #else 3186 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3187 static inline void intel_opregion_init(struct drm_device *dev) { return; } 3188 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3189 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3190 static inline int 3191 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3192 { 3193 return 0; 3194 } 3195 static inline int 3196 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3197 { 3198 return 0; 3199 } 3200 #endif 3201 3202 /* intel_acpi.c */ 3203 #ifdef CONFIG_ACPI 3204 extern void intel_register_dsm_handler(void); 3205 extern void intel_unregister_dsm_handler(void); 3206 #else 3207 static inline void intel_register_dsm_handler(void) { return; } 3208 static inline void intel_unregister_dsm_handler(void) { return; } 3209 #endif /* CONFIG_ACPI */ 3210 3211 /* modesetting */ 3212 extern void intel_modeset_init_hw(struct drm_device *dev); 3213 extern void intel_modeset_init(struct drm_device *dev); 3214 extern void intel_modeset_gem_init(struct drm_device *dev); 3215 extern void intel_modeset_cleanup(struct drm_device *dev); 3216 extern void intel_connector_unregister(struct intel_connector *); 3217 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3218 extern void intel_modeset_setup_hw_state(struct drm_device *dev, 3219 bool force_restore); 3220 extern void i915_redisable_vga(struct drm_device *dev); 3221 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3222 extern void intel_disable_fbc(struct drm_device *dev); 3223 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3224 extern void intel_init_pch_refclk(struct drm_device *dev); 3225 extern void gen6_set_rps(struct drm_device *dev, u8 val); 3226 extern void valleyview_set_rps(struct drm_device *dev, u8 val); 3227 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3228 bool enable); 3229 extern void intel_detect_pch(struct drm_device *dev); 3230 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 3231 extern int intel_enable_rc6(const struct drm_device *dev); 3232 3233 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3234 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3235 struct drm_file *file); 3236 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3237 struct drm_file *file); 3238 3239 struct intel_device_info *i915_get_device_id(int device); 3240 3241 void intel_notify_mmio_flip(struct intel_engine_cs *ring); 3242 3243 /* overlay */ 3244 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3245 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3246 struct intel_overlay_error_state *error); 3247 3248 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3249 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3250 struct drm_device *dev, 3251 struct intel_display_error_state *error); 3252 3253 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3254 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3255 3256 /* intel_sideband.c */ 3257 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3258 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3259 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3260 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); 3261 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3262 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3263 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3264 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3265 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3266 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3267 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3268 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); 3269 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3270 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); 3271 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); 3272 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3273 enum intel_sbi_destination destination); 3274 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3275 enum intel_sbi_destination destination); 3276 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3277 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3278 3279 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3280 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3281 3282 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3283 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3284 3285 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3286 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3287 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3288 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3289 3290 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3291 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3292 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3293 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3294 3295 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3296 * will be implemented using 2 32-bit writes in an arbitrary order with 3297 * an arbitrary delay between them. This can cause the hardware to 3298 * act upon the intermediate value, possibly leading to corruption and 3299 * machine death. You have been warned. 3300 */ 3301 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 3302 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3303 3304 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3305 u32 upper = I915_READ(upper_reg); \ 3306 u32 lower = I915_READ(lower_reg); \ 3307 u32 tmp = I915_READ(upper_reg); \ 3308 if (upper != tmp) { \ 3309 upper = tmp; \ 3310 lower = I915_READ(lower_reg); \ 3311 WARN_ON(I915_READ(upper_reg) != upper); \ 3312 } \ 3313 (u64)upper << 32 | lower; }) 3314 3315 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3316 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3317 3318 /* "Broadcast RGB" property */ 3319 #define INTEL_BROADCAST_RGB_AUTO 0 3320 #define INTEL_BROADCAST_RGB_FULL 1 3321 #define INTEL_BROADCAST_RGB_LIMITED 2 3322 3323 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 3324 { 3325 if (IS_VALLEYVIEW(dev)) 3326 return VLV_VGACNTRL; 3327 else if (INTEL_INFO(dev)->gen >= 5) 3328 return CPU_VGACNTRL; 3329 else 3330 return VGACNTRL; 3331 } 3332 3333 static inline void __user *to_user_ptr(u64 address) 3334 { 3335 return (void __user *)(uintptr_t)address; 3336 } 3337 3338 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3339 { 3340 unsigned long j = msecs_to_jiffies(m); 3341 3342 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3343 } 3344 3345 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3346 { 3347 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3348 } 3349 3350 static inline unsigned long 3351 timespec_to_jiffies_timeout(const struct timespec *value) 3352 { 3353 unsigned long j = timespec_to_jiffies(value); 3354 3355 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3356 } 3357 3358 /* 3359 * If you need to wait X milliseconds between events A and B, but event B 3360 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3361 * when event A happened, then just before event B you call this function and 3362 * pass the timestamp as the first argument, and X as the second argument. 3363 */ 3364 static inline void 3365 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3366 { 3367 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3368 3369 /* 3370 * Don't re-read the value of "jiffies" every time since it may change 3371 * behind our back and break the math. 3372 */ 3373 tmp_jiffies = jiffies; 3374 target_jiffies = timestamp_jiffies + 3375 msecs_to_jiffies_timeout(to_wait_ms); 3376 3377 if (time_after(target_jiffies, tmp_jiffies)) { 3378 remaining_jiffies = target_jiffies - tmp_jiffies; 3379 3380 #if 0 3381 while (remaining_jiffies) 3382 remaining_jiffies = 3383 schedule_timeout_uninterruptible(remaining_jiffies); 3384 #else 3385 msleep(jiffies_to_msecs(remaining_jiffies)); 3386 #endif 3387 } 3388 } 3389 3390 static inline void i915_trace_irq_get(struct intel_engine_cs *ring, 3391 struct drm_i915_gem_request *req) 3392 { 3393 if (ring->trace_irq_req == NULL && ring->irq_get(ring)) 3394 i915_gem_request_assign(&ring->trace_irq_req, req); 3395 } 3396 3397 #endif 3398