1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi_drm/i915_drm.h> 34 #include <uapi_drm/drm_fourcc.h> 35 36 #include "i915_reg.h" 37 #include "intel_bios.h" 38 #include "intel_ringbuffer.h" 39 #include "intel_lrc.h" 40 #include "i915_gem_gtt.h" 41 #include "i915_gem_render_state.h" 42 #include <linux/io-mapping.h> 43 #include <linux/i2c.h> 44 #include <drm/intel-gtt.h> 45 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 46 #include <drm/drm_gem.h> 47 #include <linux/backlight.h> 48 #include <linux/hashtable.h> 49 #include <linux/kref.h> 50 #include <linux/kconfig.h> 51 #include <linux/pm_qos.h> 52 #include <linux/delay.h> 53 54 #define CONFIG_DRM_I915_FBDEV 1 55 #define CONFIG_DRM_I915_KMS 1 56 #define CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT 1 57 #define CONFIG_ACPI 1 58 #define CONFIG_X86 1 59 60 /* General customization: 61 */ 62 63 #define DRIVER_NAME "i915" 64 #define DRIVER_DESC "Intel Graphics" 65 #define DRIVER_DATE "20150327" 66 67 #undef WARN_ON 68 /* Many gcc seem to no see through this and fall over :( */ 69 #if 0 70 #define WARN_ON(x) ({ \ 71 bool __i915_warn_cond = (x); \ 72 if (__builtin_constant_p(__i915_warn_cond)) \ 73 BUILD_BUG_ON(__i915_warn_cond); \ 74 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 75 #else 76 #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") 77 #endif 78 79 #undef WARN_ON_ONCE 80 #define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") 81 82 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 83 (long) (x), __func__); 84 85 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 86 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 87 * which may not necessarily be a user visible problem. This will either 88 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 89 * enable distros and users to tailor their preferred amount of i915 abrt 90 * spam. 91 */ 92 #define I915_STATE_WARN(condition, format...) ({ \ 93 int __ret_warn_on = !!(condition); \ 94 if (unlikely(__ret_warn_on)) { \ 95 if (i915.verbose_state_checks) \ 96 WARN(1, format); \ 97 else \ 98 DRM_ERROR(format); \ 99 } \ 100 unlikely(__ret_warn_on); \ 101 }) 102 103 #define I915_STATE_WARN_ON(condition) ({ \ 104 int __ret_warn_on = !!(condition); \ 105 if (unlikely(__ret_warn_on)) { \ 106 if (i915.verbose_state_checks) \ 107 WARN(1, "WARN_ON(" #condition ")\n"); \ 108 else \ 109 DRM_ERROR("WARN_ON(" #condition ")\n"); \ 110 } \ 111 unlikely(__ret_warn_on); \ 112 }) 113 114 enum i915_pipe { 115 INVALID_PIPE = -1, 116 PIPE_A = 0, 117 PIPE_B, 118 PIPE_C, 119 _PIPE_EDP, 120 I915_MAX_PIPES = _PIPE_EDP 121 }; 122 #define pipe_name(p) ((p) + 'A') 123 124 enum transcoder { 125 TRANSCODER_A = 0, 126 TRANSCODER_B, 127 TRANSCODER_C, 128 TRANSCODER_EDP, 129 I915_MAX_TRANSCODERS 130 }; 131 #define transcoder_name(t) ((t) + 'A') 132 133 /* 134 * This is the maximum (across all platforms) number of planes (primary + 135 * sprites) that can be active at the same time on one pipe. 136 * 137 * This value doesn't count the cursor plane. 138 */ 139 #define I915_MAX_PLANES 3 140 141 enum plane { 142 PLANE_A = 0, 143 PLANE_B, 144 PLANE_C, 145 }; 146 #define plane_name(p) ((p) + 'A') 147 148 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 149 150 enum port { 151 PORT_A = 0, 152 PORT_B, 153 PORT_C, 154 PORT_D, 155 PORT_E, 156 I915_MAX_PORTS 157 }; 158 #define port_name(p) ((p) + 'A') 159 160 #define I915_NUM_PHYS_VLV 2 161 162 enum dpio_channel { 163 DPIO_CH0, 164 DPIO_CH1 165 }; 166 167 enum dpio_phy { 168 DPIO_PHY0, 169 DPIO_PHY1 170 }; 171 172 enum intel_display_power_domain { 173 POWER_DOMAIN_PIPE_A, 174 POWER_DOMAIN_PIPE_B, 175 POWER_DOMAIN_PIPE_C, 176 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 177 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 178 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 179 POWER_DOMAIN_TRANSCODER_A, 180 POWER_DOMAIN_TRANSCODER_B, 181 POWER_DOMAIN_TRANSCODER_C, 182 POWER_DOMAIN_TRANSCODER_EDP, 183 POWER_DOMAIN_PORT_DDI_A_2_LANES, 184 POWER_DOMAIN_PORT_DDI_A_4_LANES, 185 POWER_DOMAIN_PORT_DDI_B_2_LANES, 186 POWER_DOMAIN_PORT_DDI_B_4_LANES, 187 POWER_DOMAIN_PORT_DDI_C_2_LANES, 188 POWER_DOMAIN_PORT_DDI_C_4_LANES, 189 POWER_DOMAIN_PORT_DDI_D_2_LANES, 190 POWER_DOMAIN_PORT_DDI_D_4_LANES, 191 POWER_DOMAIN_PORT_DSI, 192 POWER_DOMAIN_PORT_CRT, 193 POWER_DOMAIN_PORT_OTHER, 194 POWER_DOMAIN_VGA, 195 POWER_DOMAIN_AUDIO, 196 POWER_DOMAIN_PLLS, 197 POWER_DOMAIN_AUX_A, 198 POWER_DOMAIN_AUX_B, 199 POWER_DOMAIN_AUX_C, 200 POWER_DOMAIN_AUX_D, 201 POWER_DOMAIN_INIT, 202 203 POWER_DOMAIN_NUM, 204 }; 205 206 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 207 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 208 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 209 #define POWER_DOMAIN_TRANSCODER(tran) \ 210 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 211 (tran) + POWER_DOMAIN_TRANSCODER_A) 212 213 enum hpd_pin { 214 HPD_NONE = 0, 215 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ 216 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 217 HPD_CRT, 218 HPD_SDVO_B, 219 HPD_SDVO_C, 220 HPD_PORT_B, 221 HPD_PORT_C, 222 HPD_PORT_D, 223 HPD_NUM_PINS 224 }; 225 226 #define I915_GEM_GPU_DOMAINS \ 227 (I915_GEM_DOMAIN_RENDER | \ 228 I915_GEM_DOMAIN_SAMPLER | \ 229 I915_GEM_DOMAIN_COMMAND | \ 230 I915_GEM_DOMAIN_INSTRUCTION | \ 231 I915_GEM_DOMAIN_VERTEX) 232 233 #define for_each_pipe(__dev_priv, __p) \ 234 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 235 #define for_each_plane(__dev_priv, __pipe, __p) \ 236 for ((__p) = 0; \ 237 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 238 (__p)++) 239 #define for_each_sprite(__dev_priv, __p, __s) \ 240 for ((__s) = 0; \ 241 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 242 (__s)++) 243 244 #define for_each_crtc(dev, crtc) \ 245 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 246 247 #define for_each_intel_crtc(dev, intel_crtc) \ 248 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 249 250 #define for_each_intel_encoder(dev, intel_encoder) \ 251 list_for_each_entry(intel_encoder, \ 252 &(dev)->mode_config.encoder_list, \ 253 base.head) 254 255 #define for_each_intel_connector(dev, intel_connector) \ 256 list_for_each_entry(intel_connector, \ 257 &dev->mode_config.connector_list, \ 258 base.head) 259 260 261 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 262 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 263 if ((intel_encoder)->base.crtc == (__crtc)) 264 265 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 266 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 267 if ((intel_connector)->base.encoder == (__encoder)) 268 269 #define for_each_power_domain(domain, mask) \ 270 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 271 if ((1 << (domain)) & (mask)) 272 273 struct drm_i915_private; 274 struct i915_mm_struct; 275 struct i915_mmu_object; 276 277 enum intel_dpll_id { 278 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 279 /* real shared dpll ids must be >= 0 */ 280 DPLL_ID_PCH_PLL_A = 0, 281 DPLL_ID_PCH_PLL_B = 1, 282 /* hsw/bdw */ 283 DPLL_ID_WRPLL1 = 0, 284 DPLL_ID_WRPLL2 = 1, 285 /* skl */ 286 DPLL_ID_SKL_DPLL1 = 0, 287 DPLL_ID_SKL_DPLL2 = 1, 288 DPLL_ID_SKL_DPLL3 = 2, 289 }; 290 #define I915_NUM_PLLS 3 291 292 struct intel_dpll_hw_state { 293 /* i9xx, pch plls */ 294 uint32_t dpll; 295 uint32_t dpll_md; 296 uint32_t fp0; 297 uint32_t fp1; 298 299 /* hsw, bdw */ 300 uint32_t wrpll; 301 302 /* skl */ 303 /* 304 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in 305 * lower part of crtl1 and they get shifted into position when writing 306 * the register. This allows us to easily compare the state to share 307 * the DPLL. 308 */ 309 uint32_t ctrl1; 310 /* HDMI only, 0 when used for DP */ 311 uint32_t cfgcr1, cfgcr2; 312 }; 313 314 struct intel_shared_dpll_config { 315 unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ 316 struct intel_dpll_hw_state hw_state; 317 }; 318 319 struct intel_shared_dpll { 320 struct intel_shared_dpll_config config; 321 struct intel_shared_dpll_config *new_config; 322 323 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 324 bool on; /* is the PLL actually active? Disabled during modeset */ 325 const char *name; 326 /* should match the index in the dev_priv->shared_dplls array */ 327 enum intel_dpll_id id; 328 /* The mode_set hook is optional and should be used together with the 329 * intel_prepare_shared_dpll function. */ 330 void (*mode_set)(struct drm_i915_private *dev_priv, 331 struct intel_shared_dpll *pll); 332 void (*enable)(struct drm_i915_private *dev_priv, 333 struct intel_shared_dpll *pll); 334 void (*disable)(struct drm_i915_private *dev_priv, 335 struct intel_shared_dpll *pll); 336 bool (*get_hw_state)(struct drm_i915_private *dev_priv, 337 struct intel_shared_dpll *pll, 338 struct intel_dpll_hw_state *hw_state); 339 }; 340 341 #define SKL_DPLL0 0 342 #define SKL_DPLL1 1 343 #define SKL_DPLL2 2 344 #define SKL_DPLL3 3 345 346 /* Used by dp and fdi links */ 347 struct intel_link_m_n { 348 uint32_t tu; 349 uint32_t gmch_m; 350 uint32_t gmch_n; 351 uint32_t link_m; 352 uint32_t link_n; 353 }; 354 355 void intel_link_compute_m_n(int bpp, int nlanes, 356 int pixel_clock, int link_clock, 357 struct intel_link_m_n *m_n); 358 359 /* Interface history: 360 * 361 * 1.1: Original. 362 * 1.2: Add Power Management 363 * 1.3: Add vblank support 364 * 1.4: Fix cmdbuffer path, add heap destroy 365 * 1.5: Add vblank pipe configuration 366 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 367 * - Support vertical blank on secondary display pipe 368 */ 369 #define DRIVER_MAJOR 1 370 #define DRIVER_MINOR 6 371 #define DRIVER_PATCHLEVEL 0 372 373 #define WATCH_LISTS 0 374 375 struct opregion_header; 376 struct opregion_acpi; 377 struct opregion_swsci; 378 struct opregion_asle; 379 380 struct intel_opregion { 381 struct opregion_header __iomem *header; 382 struct opregion_acpi __iomem *acpi; 383 struct opregion_swsci __iomem *swsci; 384 u32 swsci_gbda_sub_functions; 385 u32 swsci_sbcb_sub_functions; 386 struct opregion_asle __iomem *asle; 387 void __iomem *vbt; 388 u32 __iomem *lid_state; 389 struct work_struct asle_work; 390 }; 391 #define OPREGION_SIZE (8*1024) 392 393 struct intel_overlay; 394 struct intel_overlay_error_state; 395 396 #define I915_FENCE_REG_NONE -1 397 #define I915_MAX_NUM_FENCES 32 398 /* 32 fences + sign bit for FENCE_REG_NONE */ 399 #define I915_MAX_NUM_FENCE_BITS 6 400 401 struct drm_i915_fence_reg { 402 struct list_head lru_list; 403 struct drm_i915_gem_object *obj; 404 int pin_count; 405 }; 406 407 struct sdvo_device_mapping { 408 u8 initialized; 409 u8 dvo_port; 410 u8 slave_addr; 411 u8 dvo_wiring; 412 u8 i2c_pin; 413 u8 ddc_pin; 414 }; 415 416 struct intel_display_error_state; 417 418 struct drm_i915_error_state { 419 struct kref ref; 420 struct timeval time; 421 422 char error_msg[128]; 423 u32 reset_count; 424 u32 suspend_count; 425 426 /* Generic register state */ 427 u32 eir; 428 u32 pgtbl_er; 429 u32 ier; 430 u32 gtier[4]; 431 u32 ccid; 432 u32 derrmr; 433 u32 forcewake; 434 u32 error; /* gen6+ */ 435 u32 err_int; /* gen7 */ 436 u32 fault_data0; /* gen8, gen9 */ 437 u32 fault_data1; /* gen8, gen9 */ 438 u32 done_reg; 439 u32 gac_eco; 440 u32 gam_ecochk; 441 u32 gab_ctl; 442 u32 gfx_mode; 443 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 444 u64 fence[I915_MAX_NUM_FENCES]; 445 struct intel_overlay_error_state *overlay; 446 struct intel_display_error_state *display; 447 struct drm_i915_error_object *semaphore_obj; 448 449 struct drm_i915_error_ring { 450 bool valid; 451 /* Software tracked state */ 452 bool waiting; 453 int hangcheck_score; 454 enum intel_ring_hangcheck_action hangcheck_action; 455 int num_requests; 456 457 /* our own tracking of ring head and tail */ 458 u32 cpu_ring_head; 459 u32 cpu_ring_tail; 460 461 u32 semaphore_seqno[I915_NUM_RINGS - 1]; 462 463 /* Register state */ 464 u32 tail; 465 u32 head; 466 u32 ctl; 467 u32 hws; 468 u32 ipeir; 469 u32 ipehr; 470 u32 instdone; 471 u32 bbstate; 472 u32 instpm; 473 u32 instps; 474 u32 seqno; 475 u64 bbaddr; 476 u64 acthd; 477 u32 fault_reg; 478 u64 faddr; 479 u32 rc_psmi; /* sleep state */ 480 u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 481 482 struct drm_i915_error_object { 483 int page_count; 484 u32 gtt_offset; 485 u32 *pages[0]; 486 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 487 488 struct drm_i915_error_request { 489 long jiffies; 490 u32 seqno; 491 u32 tail; 492 } *requests; 493 494 struct { 495 u32 gfx_mode; 496 union { 497 u64 pdp[4]; 498 u32 pp_dir_base; 499 }; 500 } vm_info; 501 502 pid_t pid; 503 char comm[TASK_COMM_LEN]; 504 } ring[I915_NUM_RINGS]; 505 506 struct drm_i915_error_buffer { 507 u32 size; 508 u32 name; 509 u32 rseqno, wseqno; 510 u32 gtt_offset; 511 u32 read_domains; 512 u32 write_domain; 513 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 514 s32 pinned:2; 515 u32 tiling:2; 516 u32 dirty:1; 517 u32 purgeable:1; 518 u32 userptr:1; 519 s32 ring:4; 520 u32 cache_level:3; 521 } **active_bo, **pinned_bo; 522 523 u32 *active_bo_count, *pinned_bo_count; 524 u32 vm_count; 525 }; 526 527 struct intel_connector; 528 struct intel_encoder; 529 struct intel_crtc_state; 530 struct intel_initial_plane_config; 531 struct intel_crtc; 532 struct intel_limit; 533 struct dpll; 534 535 struct drm_i915_display_funcs { 536 bool (*fbc_enabled)(struct drm_device *dev); 537 void (*enable_fbc)(struct drm_crtc *crtc); 538 void (*disable_fbc)(struct drm_device *dev); 539 int (*get_display_clock_speed)(struct drm_device *dev); 540 int (*get_fifo_size)(struct drm_device *dev, int plane); 541 /** 542 * find_dpll() - Find the best values for the PLL 543 * @limit: limits for the PLL 544 * @crtc: current CRTC 545 * @target: target frequency in kHz 546 * @refclk: reference clock frequency in kHz 547 * @match_clock: if provided, @best_clock P divider must 548 * match the P divider from @match_clock 549 * used for LVDS downclocking 550 * @best_clock: best PLL values found 551 * 552 * Returns true on success, false on failure. 553 */ 554 bool (*find_dpll)(const struct intel_limit *limit, 555 struct intel_crtc_state *crtc_state, 556 int target, int refclk, 557 struct dpll *match_clock, 558 struct dpll *best_clock); 559 void (*update_wm)(struct drm_crtc *crtc); 560 void (*update_sprite_wm)(struct drm_plane *plane, 561 struct drm_crtc *crtc, 562 uint32_t sprite_width, uint32_t sprite_height, 563 int pixel_size, bool enable, bool scaled); 564 void (*modeset_global_resources)(struct drm_atomic_state *state); 565 /* Returns the active state of the crtc, and if the crtc is active, 566 * fills out the pipe-config with the hw state. */ 567 bool (*get_pipe_config)(struct intel_crtc *, 568 struct intel_crtc_state *); 569 void (*get_initial_plane_config)(struct intel_crtc *, 570 struct intel_initial_plane_config *); 571 int (*crtc_compute_clock)(struct intel_crtc *crtc, 572 struct intel_crtc_state *crtc_state); 573 void (*crtc_enable)(struct drm_crtc *crtc); 574 void (*crtc_disable)(struct drm_crtc *crtc); 575 void (*off)(struct drm_crtc *crtc); 576 void (*audio_codec_enable)(struct drm_connector *connector, 577 struct intel_encoder *encoder, 578 struct drm_display_mode *mode); 579 void (*audio_codec_disable)(struct intel_encoder *encoder); 580 void (*fdi_link_train)(struct drm_crtc *crtc); 581 void (*init_clock_gating)(struct drm_device *dev); 582 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 583 struct drm_framebuffer *fb, 584 struct drm_i915_gem_object *obj, 585 struct intel_engine_cs *ring, 586 uint32_t flags); 587 void (*update_primary_plane)(struct drm_crtc *crtc, 588 struct drm_framebuffer *fb, 589 int x, int y); 590 void (*hpd_irq_setup)(struct drm_device *dev); 591 /* clock updates for mode set */ 592 /* cursor updates */ 593 /* render clock increase/decrease */ 594 /* display clock increase/decrease */ 595 /* pll clock increase/decrease */ 596 597 int (*setup_backlight)(struct intel_connector *connector, enum i915_pipe pipe); 598 uint32_t (*get_backlight)(struct intel_connector *connector); 599 void (*set_backlight)(struct intel_connector *connector, 600 uint32_t level); 601 void (*disable_backlight)(struct intel_connector *connector); 602 void (*enable_backlight)(struct intel_connector *connector); 603 }; 604 605 enum forcewake_domain_id { 606 FW_DOMAIN_ID_RENDER = 0, 607 FW_DOMAIN_ID_BLITTER, 608 FW_DOMAIN_ID_MEDIA, 609 610 FW_DOMAIN_ID_COUNT 611 }; 612 613 enum forcewake_domains { 614 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 615 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 616 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 617 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 618 FORCEWAKE_BLITTER | 619 FORCEWAKE_MEDIA) 620 }; 621 622 struct intel_uncore_funcs { 623 void (*force_wake_get)(struct drm_i915_private *dev_priv, 624 enum forcewake_domains domains); 625 void (*force_wake_put)(struct drm_i915_private *dev_priv, 626 enum forcewake_domains domains); 627 628 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 629 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 630 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 631 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 632 633 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, 634 uint8_t val, bool trace); 635 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, 636 uint16_t val, bool trace); 637 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, 638 uint32_t val, bool trace); 639 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, 640 uint64_t val, bool trace); 641 }; 642 643 struct intel_uncore { 644 struct lock lock; /** lock is also taken in irq contexts. */ 645 646 struct intel_uncore_funcs funcs; 647 648 unsigned fifo_count; 649 enum forcewake_domains fw_domains; 650 651 struct intel_uncore_forcewake_domain { 652 struct drm_i915_private *i915; 653 enum forcewake_domain_id id; 654 unsigned wake_count; 655 struct timer_list timer; 656 u32 reg_set; 657 u32 val_set; 658 u32 val_clear; 659 u32 reg_ack; 660 u32 reg_post; 661 u32 val_reset; 662 } fw_domain[FW_DOMAIN_ID_COUNT]; 663 }; 664 665 /* Iterate over initialised fw domains */ 666 #define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ 667 for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 668 (i__) < FW_DOMAIN_ID_COUNT; \ 669 (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ 670 if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) 671 672 #define for_each_fw_domain(domain__, dev_priv__, i__) \ 673 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) 674 675 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 676 func(is_mobile) sep \ 677 func(is_i85x) sep \ 678 func(is_i915g) sep \ 679 func(is_i945gm) sep \ 680 func(is_g33) sep \ 681 func(need_gfx_hws) sep \ 682 func(is_g4x) sep \ 683 func(is_pineview) sep \ 684 func(is_broadwater) sep \ 685 func(is_crestline) sep \ 686 func(is_ivybridge) sep \ 687 func(is_valleyview) sep \ 688 func(is_haswell) sep \ 689 func(is_skylake) sep \ 690 func(is_preliminary) sep \ 691 func(has_fbc) sep \ 692 func(has_pipe_cxsr) sep \ 693 func(has_hotplug) sep \ 694 func(cursor_needs_physical) sep \ 695 func(has_overlay) sep \ 696 func(overlay_needs_physical) sep \ 697 func(supports_tv) sep \ 698 func(has_llc) sep \ 699 func(has_ddi) sep \ 700 func(has_fpga_dbg) 701 702 #define DEFINE_FLAG(name) u8 name:1 703 #define SEP_SEMICOLON ; 704 705 struct intel_device_info { 706 u32 display_mmio_offset; 707 u16 device_id; 708 u8 num_pipes:3; 709 u8 num_sprites[I915_MAX_PIPES]; 710 u8 gen; 711 u8 ring_mask; /* Rings supported by the HW */ 712 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 713 /* Register offsets for the various display pipes and transcoders */ 714 int pipe_offsets[I915_MAX_TRANSCODERS]; 715 int trans_offsets[I915_MAX_TRANSCODERS]; 716 int palette_offsets[I915_MAX_PIPES]; 717 int cursor_offsets[I915_MAX_PIPES]; 718 719 /* Slice/subslice/EU info */ 720 u8 slice_total; 721 u8 subslice_total; 722 u8 subslice_per_slice; 723 u8 eu_total; 724 u8 eu_per_subslice; 725 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 726 u8 subslice_7eu[3]; 727 u8 has_slice_pg:1; 728 u8 has_subslice_pg:1; 729 u8 has_eu_pg:1; 730 }; 731 732 #undef DEFINE_FLAG 733 #undef SEP_SEMICOLON 734 735 enum i915_cache_level { 736 I915_CACHE_NONE = 0, 737 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 738 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 739 caches, eg sampler/render caches, and the 740 large Last-Level-Cache. LLC is coherent with 741 the CPU, but L3 is only visible to the GPU. */ 742 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 743 }; 744 745 struct i915_ctx_hang_stats { 746 /* This context had batch pending when hang was declared */ 747 unsigned batch_pending; 748 749 /* This context had batch active when hang was declared */ 750 unsigned batch_active; 751 752 /* Time when this context was last blamed for a GPU reset */ 753 unsigned long guilty_ts; 754 755 /* If the contexts causes a second GPU hang within this time, 756 * it is permanently banned from submitting any more work. 757 */ 758 unsigned long ban_period_seconds; 759 760 /* This context is banned to submit more work */ 761 bool banned; 762 }; 763 764 /* This must match up with the value previously used for execbuf2.rsvd1. */ 765 #define DEFAULT_CONTEXT_HANDLE 0 766 /** 767 * struct intel_context - as the name implies, represents a context. 768 * @ref: reference count. 769 * @user_handle: userspace tracking identity for this context. 770 * @remap_slice: l3 row remapping information. 771 * @file_priv: filp associated with this context (NULL for global default 772 * context). 773 * @hang_stats: information about the role of this context in possible GPU 774 * hangs. 775 * @vm: virtual memory space used by this context. 776 * @legacy_hw_ctx: render context backing object and whether it is correctly 777 * initialized (legacy ring submission mechanism only). 778 * @link: link in the global list of contexts. 779 * 780 * Contexts are memory images used by the hardware to store copies of their 781 * internal state. 782 */ 783 struct intel_context { 784 struct kref ref; 785 int user_handle; 786 uint8_t remap_slice; 787 struct drm_i915_file_private *file_priv; 788 struct i915_ctx_hang_stats hang_stats; 789 struct i915_hw_ppgtt *ppgtt; 790 791 /* Legacy ring buffer submission */ 792 struct { 793 struct drm_i915_gem_object *rcs_state; 794 bool initialized; 795 } legacy_hw_ctx; 796 797 /* Execlists */ 798 bool rcs_initialized; 799 struct { 800 struct drm_i915_gem_object *state; 801 struct intel_ringbuffer *ringbuf; 802 int pin_count; 803 } engine[I915_NUM_RINGS]; 804 805 struct list_head link; 806 }; 807 808 enum fb_op_origin { 809 ORIGIN_GTT, 810 ORIGIN_CPU, 811 ORIGIN_CS, 812 ORIGIN_FLIP, 813 }; 814 815 struct i915_fbc { 816 unsigned long uncompressed_size; 817 unsigned threshold; 818 unsigned int fb_id; 819 unsigned int possible_framebuffer_bits; 820 unsigned int busy_bits; 821 struct intel_crtc *crtc; 822 int y; 823 824 struct drm_mm_node compressed_fb; 825 struct drm_mm_node *compressed_llb; 826 827 bool false_color; 828 829 /* Tracks whether the HW is actually enabled, not whether the feature is 830 * possible. */ 831 bool enabled; 832 833 struct intel_fbc_work { 834 struct delayed_work work; 835 struct drm_crtc *crtc; 836 struct drm_framebuffer *fb; 837 } *fbc_work; 838 839 enum no_fbc_reason { 840 FBC_OK, /* FBC is enabled */ 841 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ 842 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 843 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ 844 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 845 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 846 FBC_BAD_PLANE, /* fbc not supported on plane */ 847 FBC_NOT_TILED, /* buffer not tiled */ 848 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 849 FBC_MODULE_PARAM, 850 FBC_CHIP_DEFAULT, /* disabled by default on this chip */ 851 } no_fbc_reason; 852 }; 853 854 /** 855 * HIGH_RR is the highest eDP panel refresh rate read from EDID 856 * LOW_RR is the lowest eDP panel refresh rate found from EDID 857 * parsing for same resolution. 858 */ 859 enum drrs_refresh_rate_type { 860 DRRS_HIGH_RR, 861 DRRS_LOW_RR, 862 DRRS_MAX_RR, /* RR count */ 863 }; 864 865 enum drrs_support_type { 866 DRRS_NOT_SUPPORTED = 0, 867 STATIC_DRRS_SUPPORT = 1, 868 SEAMLESS_DRRS_SUPPORT = 2 869 }; 870 871 struct intel_dp; 872 struct i915_drrs { 873 struct lock mutex; 874 struct delayed_work work; 875 struct intel_dp *dp; 876 unsigned busy_frontbuffer_bits; 877 enum drrs_refresh_rate_type refresh_rate_type; 878 enum drrs_support_type type; 879 }; 880 881 struct i915_psr { 882 struct lock lock; 883 bool sink_support; 884 bool source_ok; 885 struct intel_dp *enabled; 886 bool active; 887 struct delayed_work work; 888 unsigned busy_frontbuffer_bits; 889 bool link_standby; 890 }; 891 892 enum intel_pch { 893 PCH_NONE = 0, /* No PCH present */ 894 PCH_IBX, /* Ibexpeak PCH */ 895 PCH_CPT, /* Cougarpoint PCH */ 896 PCH_LPT, /* Lynxpoint PCH */ 897 PCH_SPT, /* Sunrisepoint PCH */ 898 PCH_NOP, 899 }; 900 901 enum intel_sbi_destination { 902 SBI_ICLK, 903 SBI_MPHY, 904 }; 905 906 #define QUIRK_PIPEA_FORCE (1<<0) 907 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 908 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 909 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 910 #define QUIRK_PIPEB_FORCE (1<<4) 911 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 912 913 struct intel_fbdev; 914 struct intel_fbc_work; 915 916 struct intel_gmbus { 917 struct i2c_adapter adapter; 918 u32 force_bit; 919 u32 reg0; 920 u32 gpio_reg; 921 struct drm_i915_private *dev_priv; 922 }; 923 924 struct intel_iic_softc { 925 struct drm_device *drm_dev; 926 device_t iic_dev; 927 bool force_bit_dev; 928 char name[32]; 929 uint32_t reg; 930 uint32_t reg0; 931 }; 932 933 struct i915_suspend_saved_registers { 934 u32 saveDSPARB; 935 u32 saveLVDS; 936 u32 savePP_ON_DELAYS; 937 u32 savePP_OFF_DELAYS; 938 u32 savePP_ON; 939 u32 savePP_OFF; 940 u32 savePP_CONTROL; 941 u32 savePP_DIVISOR; 942 u32 saveFBC_CONTROL; 943 u32 saveCACHE_MODE_0; 944 u32 saveMI_ARB_STATE; 945 u32 saveSWF0[16]; 946 u32 saveSWF1[16]; 947 u32 saveSWF2[3]; 948 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 949 u32 savePCH_PORT_HOTPLUG; 950 u16 saveGCDGMBUS; 951 }; 952 953 struct vlv_s0ix_state { 954 /* GAM */ 955 u32 wr_watermark; 956 u32 gfx_prio_ctrl; 957 u32 arb_mode; 958 u32 gfx_pend_tlb0; 959 u32 gfx_pend_tlb1; 960 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 961 u32 media_max_req_count; 962 u32 gfx_max_req_count; 963 u32 render_hwsp; 964 u32 ecochk; 965 u32 bsd_hwsp; 966 u32 blt_hwsp; 967 u32 tlb_rd_addr; 968 969 /* MBC */ 970 u32 g3dctl; 971 u32 gsckgctl; 972 u32 mbctl; 973 974 /* GCP */ 975 u32 ucgctl1; 976 u32 ucgctl3; 977 u32 rcgctl1; 978 u32 rcgctl2; 979 u32 rstctl; 980 u32 misccpctl; 981 982 /* GPM */ 983 u32 gfxpause; 984 u32 rpdeuhwtc; 985 u32 rpdeuc; 986 u32 ecobus; 987 u32 pwrdwnupctl; 988 u32 rp_down_timeout; 989 u32 rp_deucsw; 990 u32 rcubmabdtmr; 991 u32 rcedata; 992 u32 spare2gh; 993 994 /* Display 1 CZ domain */ 995 u32 gt_imr; 996 u32 gt_ier; 997 u32 pm_imr; 998 u32 pm_ier; 999 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1000 1001 /* GT SA CZ domain */ 1002 u32 tilectl; 1003 u32 gt_fifoctl; 1004 u32 gtlc_wake_ctrl; 1005 u32 gtlc_survive; 1006 u32 pmwgicz; 1007 1008 /* Display 2 CZ domain */ 1009 u32 gu_ctl0; 1010 u32 gu_ctl1; 1011 u32 pcbr; 1012 u32 clock_gate_dis2; 1013 }; 1014 1015 struct intel_rps_ei { 1016 u32 cz_clock; 1017 u32 render_c0; 1018 u32 media_c0; 1019 }; 1020 1021 struct intel_gen6_power_mgmt { 1022 /* 1023 * work, interrupts_enabled and pm_iir are protected by 1024 * dev_priv->irq_lock 1025 */ 1026 struct work_struct work; 1027 bool interrupts_enabled; 1028 u32 pm_iir; 1029 1030 /* Frequencies are stored in potentially platform dependent multiples. 1031 * In other words, *_freq needs to be multiplied by X to be interesting. 1032 * Soft limits are those which are used for the dynamic reclocking done 1033 * by the driver (raise frequencies under heavy loads, and lower for 1034 * lighter loads). Hard limits are those imposed by the hardware. 1035 * 1036 * A distinction is made for overclocking, which is never enabled by 1037 * default, and is considered to be above the hard limit if it's 1038 * possible at all. 1039 */ 1040 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1041 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1042 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1043 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1044 u8 min_freq; /* AKA RPn. Minimum frequency */ 1045 u8 idle_freq; /* Frequency to request when we are idle */ 1046 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1047 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1048 u8 rp0_freq; /* Non-overclocked max frequency. */ 1049 u32 cz_freq; 1050 1051 int last_adj; 1052 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1053 1054 bool enabled; 1055 struct delayed_work delayed_resume_work; 1056 1057 /* manual wa residency calculations */ 1058 struct intel_rps_ei up_ei, down_ei; 1059 1060 /* 1061 * Protects RPS/RC6 register access and PCU communication. 1062 * Must be taken after struct_mutex if nested. 1063 */ 1064 struct lock hw_lock; 1065 }; 1066 1067 /* defined intel_pm.c */ 1068 extern struct lock mchdev_lock; 1069 1070 struct intel_ilk_power_mgmt { 1071 u8 cur_delay; 1072 u8 min_delay; 1073 u8 max_delay; 1074 u8 fmax; 1075 u8 fstart; 1076 1077 u64 last_count1; 1078 unsigned long last_time1; 1079 unsigned long chipset_power; 1080 u64 last_count2; 1081 u64 last_time2; 1082 unsigned long gfx_power; 1083 u8 corr; 1084 1085 int c_m; 1086 int r_t; 1087 }; 1088 1089 struct drm_i915_private; 1090 struct i915_power_well; 1091 1092 struct i915_power_well_ops { 1093 /* 1094 * Synchronize the well's hw state to match the current sw state, for 1095 * example enable/disable it based on the current refcount. Called 1096 * during driver init and resume time, possibly after first calling 1097 * the enable/disable handlers. 1098 */ 1099 void (*sync_hw)(struct drm_i915_private *dev_priv, 1100 struct i915_power_well *power_well); 1101 /* 1102 * Enable the well and resources that depend on it (for example 1103 * interrupts located on the well). Called after the 0->1 refcount 1104 * transition. 1105 */ 1106 void (*enable)(struct drm_i915_private *dev_priv, 1107 struct i915_power_well *power_well); 1108 /* 1109 * Disable the well and resources that depend on it. Called after 1110 * the 1->0 refcount transition. 1111 */ 1112 void (*disable)(struct drm_i915_private *dev_priv, 1113 struct i915_power_well *power_well); 1114 /* Returns the hw enabled state. */ 1115 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1116 struct i915_power_well *power_well); 1117 }; 1118 1119 /* Power well structure for haswell */ 1120 struct i915_power_well { 1121 const char *name; 1122 bool always_on; 1123 /* power well enable/disable usage count */ 1124 int count; 1125 /* cached hw enabled state */ 1126 bool hw_enabled; 1127 unsigned long domains; 1128 unsigned long data; 1129 const struct i915_power_well_ops *ops; 1130 }; 1131 1132 struct i915_power_domains { 1133 /* 1134 * Power wells needed for initialization at driver init and suspend 1135 * time are on. They are kept on until after the first modeset. 1136 */ 1137 bool init_power_on; 1138 bool initializing; 1139 int power_well_count; 1140 1141 struct lock lock; 1142 int domain_use_count[POWER_DOMAIN_NUM]; 1143 struct i915_power_well *power_wells; 1144 }; 1145 1146 #define MAX_L3_SLICES 2 1147 struct intel_l3_parity { 1148 u32 *remap_info[MAX_L3_SLICES]; 1149 struct work_struct error_work; 1150 int which_slice; 1151 }; 1152 1153 struct i915_gem_batch_pool { 1154 struct drm_device *dev; 1155 struct list_head cache_list; 1156 }; 1157 1158 struct i915_gem_mm { 1159 /** Memory allocator for GTT stolen memory */ 1160 struct drm_mm stolen; 1161 /** List of all objects in gtt_space. Used to restore gtt 1162 * mappings on resume */ 1163 struct list_head bound_list; 1164 /** 1165 * List of objects which are not bound to the GTT (thus 1166 * are idle and not used by the GPU) but still have 1167 * (presumably uncached) pages still attached. 1168 */ 1169 struct list_head unbound_list; 1170 1171 /* 1172 * A pool of objects to use as shadow copies of client batch buffers 1173 * when the command parser is enabled. Prevents the client from 1174 * modifying the batch contents after software parsing. 1175 */ 1176 struct i915_gem_batch_pool batch_pool; 1177 1178 /** Usable portion of the GTT for GEM */ 1179 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1180 1181 /** PPGTT used for aliasing the PPGTT with the GTT */ 1182 struct i915_hw_ppgtt *aliasing_ppgtt; 1183 1184 struct notifier_block oom_notifier; 1185 #if 0 1186 struct shrinker shrinker; 1187 #endif 1188 bool shrinker_no_lock_stealing; 1189 1190 /** LRU list of objects with fence regs on them. */ 1191 struct list_head fence_list; 1192 1193 /** 1194 * We leave the user IRQ off as much as possible, 1195 * but this means that requests will finish and never 1196 * be retired once the system goes idle. Set a timer to 1197 * fire periodically while the ring is running. When it 1198 * fires, go retire requests. 1199 */ 1200 struct delayed_work retire_work; 1201 1202 /** 1203 * When we detect an idle GPU, we want to turn on 1204 * powersaving features. So once we see that there 1205 * are no more requests outstanding and no more 1206 * arrive within a small period of time, we fire 1207 * off the idle_work. 1208 */ 1209 struct delayed_work idle_work; 1210 1211 /** 1212 * Are we in a non-interruptible section of code like 1213 * modesetting? 1214 */ 1215 bool interruptible; 1216 1217 /** 1218 * Is the GPU currently considered idle, or busy executing userspace 1219 * requests? Whilst idle, we attempt to power down the hardware and 1220 * display clocks. In order to reduce the effect on performance, there 1221 * is a slight delay before we do so. 1222 */ 1223 bool busy; 1224 1225 /* the indicator for dispatch video commands on two BSD rings */ 1226 int bsd_ring_dispatch_index; 1227 1228 /** Bit 6 swizzling required for X tiling */ 1229 uint32_t bit_6_swizzle_x; 1230 /** Bit 6 swizzling required for Y tiling */ 1231 uint32_t bit_6_swizzle_y; 1232 1233 /* accounting, useful for userland debugging */ 1234 struct spinlock object_stat_lock; 1235 size_t object_memory; 1236 u32 object_count; 1237 }; 1238 1239 struct drm_i915_error_state_buf { 1240 struct drm_i915_private *i915; 1241 unsigned bytes; 1242 unsigned size; 1243 int err; 1244 u8 *buf; 1245 loff_t start; 1246 loff_t pos; 1247 }; 1248 1249 struct i915_error_state_file_priv { 1250 struct drm_device *dev; 1251 struct drm_i915_error_state *error; 1252 }; 1253 1254 struct i915_gpu_error { 1255 /* For hangcheck timer */ 1256 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1257 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1258 /* Hang gpu twice in this window and your context gets banned */ 1259 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1260 1261 struct workqueue_struct *hangcheck_wq; 1262 struct delayed_work hangcheck_work; 1263 1264 /* For reset and error_state handling. */ 1265 struct lock lock; 1266 /* Protected by the above dev->gpu_error.lock. */ 1267 struct drm_i915_error_state *first_error; 1268 1269 unsigned long missed_irq_rings; 1270 1271 /** 1272 * State variable controlling the reset flow and count 1273 * 1274 * This is a counter which gets incremented when reset is triggered, 1275 * and again when reset has been handled. So odd values (lowest bit set) 1276 * means that reset is in progress and even values that 1277 * (reset_counter >> 1):th reset was successfully completed. 1278 * 1279 * If reset is not completed succesfully, the I915_WEDGE bit is 1280 * set meaning that hardware is terminally sour and there is no 1281 * recovery. All waiters on the reset_queue will be woken when 1282 * that happens. 1283 * 1284 * This counter is used by the wait_seqno code to notice that reset 1285 * event happened and it needs to restart the entire ioctl (since most 1286 * likely the seqno it waited for won't ever signal anytime soon). 1287 * 1288 * This is important for lock-free wait paths, where no contended lock 1289 * naturally enforces the correct ordering between the bail-out of the 1290 * waiter and the gpu reset work code. 1291 */ 1292 atomic_t reset_counter; 1293 1294 #define I915_RESET_IN_PROGRESS_FLAG 1 1295 #define I915_WEDGED (1 << 31) 1296 1297 /** 1298 * Waitqueue to signal when the reset has completed. Used by clients 1299 * that wait for dev_priv->mm.wedged to settle. 1300 */ 1301 wait_queue_head_t reset_queue; 1302 1303 /* Userspace knobs for gpu hang simulation; 1304 * combines both a ring mask, and extra flags 1305 */ 1306 u32 stop_rings; 1307 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1308 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1309 1310 /* For missed irq/seqno simulation. */ 1311 unsigned int test_irq_rings; 1312 1313 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 1314 bool reload_in_reset; 1315 }; 1316 1317 enum modeset_restore { 1318 MODESET_ON_LID_OPEN, 1319 MODESET_DONE, 1320 MODESET_SUSPENDED, 1321 }; 1322 1323 struct ddi_vbt_port_info { 1324 /* 1325 * This is an index in the HDMI/DVI DDI buffer translation table. 1326 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1327 * populate this field. 1328 */ 1329 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1330 uint8_t hdmi_level_shift; 1331 1332 uint8_t supports_dvi:1; 1333 uint8_t supports_hdmi:1; 1334 uint8_t supports_dp:1; 1335 }; 1336 1337 enum psr_lines_to_wait { 1338 PSR_0_LINES_TO_WAIT = 0, 1339 PSR_1_LINE_TO_WAIT, 1340 PSR_4_LINES_TO_WAIT, 1341 PSR_8_LINES_TO_WAIT 1342 }; 1343 1344 struct intel_vbt_data { 1345 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1346 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1347 1348 /* Feature bits */ 1349 unsigned int int_tv_support:1; 1350 unsigned int lvds_dither:1; 1351 unsigned int lvds_vbt:1; 1352 unsigned int int_crt_support:1; 1353 unsigned int lvds_use_ssc:1; 1354 unsigned int display_clock_mode:1; 1355 unsigned int fdi_rx_polarity_inverted:1; 1356 unsigned int has_mipi:1; 1357 int lvds_ssc_freq; 1358 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1359 1360 enum drrs_support_type drrs_type; 1361 1362 /* eDP */ 1363 int edp_rate; 1364 int edp_lanes; 1365 int edp_preemphasis; 1366 int edp_vswing; 1367 bool edp_initialized; 1368 bool edp_support; 1369 int edp_bpp; 1370 bool edp_low_vswing; 1371 struct edp_power_seq edp_pps; 1372 1373 struct { 1374 bool full_link; 1375 bool require_aux_wakeup; 1376 int idle_frames; 1377 enum psr_lines_to_wait lines_to_wait; 1378 int tp1_wakeup_time; 1379 int tp2_tp3_wakeup_time; 1380 } psr; 1381 1382 struct { 1383 u16 pwm_freq_hz; 1384 bool present; 1385 bool active_low_pwm; 1386 u8 min_brightness; /* min_brightness/255 of max */ 1387 } backlight; 1388 1389 /* MIPI DSI */ 1390 struct { 1391 u16 port; 1392 u16 panel_id; 1393 struct mipi_config *config; 1394 struct mipi_pps_data *pps; 1395 u8 seq_version; 1396 u32 size; 1397 u8 *data; 1398 u8 *sequence[MIPI_SEQ_MAX]; 1399 } dsi; 1400 1401 int crt_ddc_pin; 1402 1403 int child_dev_num; 1404 union child_device_config *child_dev; 1405 1406 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1407 }; 1408 1409 enum intel_ddb_partitioning { 1410 INTEL_DDB_PART_1_2, 1411 INTEL_DDB_PART_5_6, /* IVB+ */ 1412 }; 1413 1414 struct intel_wm_level { 1415 bool enable; 1416 uint32_t pri_val; 1417 uint32_t spr_val; 1418 uint32_t cur_val; 1419 uint32_t fbc_val; 1420 }; 1421 1422 struct ilk_wm_values { 1423 uint32_t wm_pipe[3]; 1424 uint32_t wm_lp[3]; 1425 uint32_t wm_lp_spr[3]; 1426 uint32_t wm_linetime[3]; 1427 bool enable_fbc_wm; 1428 enum intel_ddb_partitioning partitioning; 1429 }; 1430 1431 struct vlv_wm_values { 1432 struct { 1433 uint16_t primary; 1434 uint16_t sprite[2]; 1435 uint8_t cursor; 1436 } pipe[3]; 1437 1438 struct { 1439 uint16_t plane; 1440 uint8_t cursor; 1441 } sr; 1442 1443 struct { 1444 uint8_t cursor; 1445 uint8_t sprite[2]; 1446 uint8_t primary; 1447 } ddl[3]; 1448 }; 1449 1450 struct skl_ddb_entry { 1451 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1452 }; 1453 1454 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1455 { 1456 return entry->end - entry->start; 1457 } 1458 1459 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1460 const struct skl_ddb_entry *e2) 1461 { 1462 if (e1->start == e2->start && e1->end == e2->end) 1463 return true; 1464 1465 return false; 1466 } 1467 1468 struct skl_ddb_allocation { 1469 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1470 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1471 struct skl_ddb_entry cursor[I915_MAX_PIPES]; 1472 }; 1473 1474 struct skl_wm_values { 1475 bool dirty[I915_MAX_PIPES]; 1476 struct skl_ddb_allocation ddb; 1477 uint32_t wm_linetime[I915_MAX_PIPES]; 1478 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1479 uint32_t cursor[I915_MAX_PIPES][8]; 1480 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1481 uint32_t cursor_trans[I915_MAX_PIPES]; 1482 }; 1483 1484 struct skl_wm_level { 1485 bool plane_en[I915_MAX_PLANES]; 1486 bool cursor_en; 1487 uint16_t plane_res_b[I915_MAX_PLANES]; 1488 uint8_t plane_res_l[I915_MAX_PLANES]; 1489 uint16_t cursor_res_b; 1490 uint8_t cursor_res_l; 1491 }; 1492 1493 /* 1494 * This struct helps tracking the state needed for runtime PM, which puts the 1495 * device in PCI D3 state. Notice that when this happens, nothing on the 1496 * graphics device works, even register access, so we don't get interrupts nor 1497 * anything else. 1498 * 1499 * Every piece of our code that needs to actually touch the hardware needs to 1500 * either call intel_runtime_pm_get or call intel_display_power_get with the 1501 * appropriate power domain. 1502 * 1503 * Our driver uses the autosuspend delay feature, which means we'll only really 1504 * suspend if we stay with zero refcount for a certain amount of time. The 1505 * default value is currently very conservative (see intel_runtime_pm_enable), but 1506 * it can be changed with the standard runtime PM files from sysfs. 1507 * 1508 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1509 * goes back to false exactly before we reenable the IRQs. We use this variable 1510 * to check if someone is trying to enable/disable IRQs while they're supposed 1511 * to be disabled. This shouldn't happen and we'll print some error messages in 1512 * case it happens. 1513 * 1514 * For more, read the Documentation/power/runtime_pm.txt. 1515 */ 1516 struct i915_runtime_pm { 1517 bool suspended; 1518 bool irqs_enabled; 1519 }; 1520 1521 enum intel_pipe_crc_source { 1522 INTEL_PIPE_CRC_SOURCE_NONE, 1523 INTEL_PIPE_CRC_SOURCE_PLANE1, 1524 INTEL_PIPE_CRC_SOURCE_PLANE2, 1525 INTEL_PIPE_CRC_SOURCE_PF, 1526 INTEL_PIPE_CRC_SOURCE_PIPE, 1527 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1528 INTEL_PIPE_CRC_SOURCE_TV, 1529 INTEL_PIPE_CRC_SOURCE_DP_B, 1530 INTEL_PIPE_CRC_SOURCE_DP_C, 1531 INTEL_PIPE_CRC_SOURCE_DP_D, 1532 INTEL_PIPE_CRC_SOURCE_AUTO, 1533 INTEL_PIPE_CRC_SOURCE_MAX, 1534 }; 1535 1536 struct intel_pipe_crc_entry { 1537 uint32_t frame; 1538 uint32_t crc[5]; 1539 }; 1540 1541 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1542 struct intel_pipe_crc { 1543 struct spinlock lock; 1544 bool opened; /* exclusive access to the result file */ 1545 struct intel_pipe_crc_entry *entries; 1546 enum intel_pipe_crc_source source; 1547 int head, tail; 1548 wait_queue_head_t wq; 1549 }; 1550 1551 struct i915_frontbuffer_tracking { 1552 struct lock lock; 1553 1554 /* 1555 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1556 * scheduled flips. 1557 */ 1558 unsigned busy_bits; 1559 unsigned flip_bits; 1560 }; 1561 1562 struct i915_wa_reg { 1563 u32 addr; 1564 u32 value; 1565 /* bitmask representing WA bits */ 1566 u32 mask; 1567 }; 1568 1569 #define I915_MAX_WA_REGS 16 1570 1571 struct i915_workarounds { 1572 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1573 u32 count; 1574 }; 1575 1576 struct i915_virtual_gpu { 1577 bool active; 1578 }; 1579 1580 struct drm_i915_private { 1581 struct drm_device *dev; 1582 struct kmem_cache *slab; 1583 1584 struct intel_device_info info; 1585 1586 int relative_constants_mode; 1587 1588 device_t *gmbus_bridge; 1589 device_t *bbbus_bridge; 1590 device_t *bbbus; 1591 1592 drm_local_map_t *sarea; 1593 drm_local_map_t *mmio_map; 1594 char __iomem *regs; 1595 1596 struct intel_uncore uncore; 1597 1598 struct i915_virtual_gpu vgpu; 1599 1600 device_t *gmbus; 1601 1602 1603 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1604 * controller on different i2c buses. */ 1605 struct lock gmbus_mutex; 1606 1607 struct _drm_i915_sarea *sarea_priv; 1608 /** 1609 * Base address of the gmbus and gpio block. 1610 */ 1611 uint32_t gpio_mmio_base; 1612 1613 /* MMIO base address for MIPI regs */ 1614 uint32_t mipi_mmio_base; 1615 1616 wait_queue_head_t gmbus_wait_queue; 1617 1618 struct pci_dev *bridge_dev; 1619 struct intel_engine_cs ring[I915_NUM_RINGS]; 1620 struct drm_i915_gem_object *semaphore_obj; 1621 uint32_t last_seqno, next_seqno; 1622 1623 struct drm_dma_handle *status_page_dmah; 1624 struct resource *mch_res; 1625 int mch_res_rid; 1626 1627 /* protects the irq masks */ 1628 struct lock irq_lock; 1629 1630 /* protects the mmio flip data */ 1631 struct spinlock mmio_flip_lock; 1632 1633 bool display_irqs_enabled; 1634 1635 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1636 struct pm_qos_request pm_qos; 1637 1638 /* DPIO indirect register protection */ 1639 struct lock dpio_lock; 1640 1641 /** Cached value of IMR to avoid reads in updating the bitfield */ 1642 union { 1643 u32 irq_mask; 1644 u32 de_irq_mask[I915_MAX_PIPES]; 1645 }; 1646 u32 gt_irq_mask; 1647 u32 pm_irq_mask; 1648 u32 pm_rps_events; 1649 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1650 1651 struct work_struct hotplug_work; 1652 struct { 1653 unsigned long hpd_last_jiffies; 1654 int hpd_cnt; 1655 enum { 1656 HPD_ENABLED = 0, 1657 HPD_DISABLED = 1, 1658 HPD_MARK_DISABLED = 2 1659 } hpd_mark; 1660 } hpd_stats[HPD_NUM_PINS]; 1661 u32 hpd_event_bits; 1662 struct delayed_work hotplug_reenable_work; 1663 1664 struct i915_fbc fbc; 1665 struct i915_drrs drrs; 1666 struct intel_opregion opregion; 1667 struct intel_vbt_data vbt; 1668 1669 bool preserve_bios_swizzle; 1670 1671 /* overlay */ 1672 struct intel_overlay *overlay; 1673 1674 /* backlight registers and fields in struct intel_panel */ 1675 struct lock backlight_lock; 1676 1677 /* LVDS info */ 1678 bool no_aux_handshake; 1679 1680 /* protects panel power sequencer state */ 1681 struct lock pps_mutex; 1682 1683 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1684 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1685 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1686 1687 unsigned int fsb_freq, mem_freq, is_ddr3; 1688 unsigned int vlv_cdclk_freq; 1689 unsigned int hpll_freq; 1690 1691 /** 1692 * wq - Driver workqueue for GEM. 1693 * 1694 * NOTE: Work items scheduled here are not allowed to grab any modeset 1695 * locks, for otherwise the flushing done in the pageflip code will 1696 * result in deadlocks. 1697 */ 1698 struct workqueue_struct *wq; 1699 1700 /* Display functions */ 1701 struct drm_i915_display_funcs display; 1702 1703 /* PCH chipset type */ 1704 enum intel_pch pch_type; 1705 unsigned short pch_id; 1706 1707 unsigned long quirks; 1708 1709 enum modeset_restore modeset_restore; 1710 struct lock modeset_restore_lock; 1711 1712 struct list_head vm_list; /* Global list of all address spaces */ 1713 struct i915_gtt gtt; /* VM representing the global address space */ 1714 1715 struct i915_gem_mm mm; 1716 DECLARE_HASHTABLE(mm_structs, 7); 1717 struct lock mm_lock; 1718 1719 /* Kernel Modesetting */ 1720 1721 struct sdvo_device_mapping sdvo_mappings[2]; 1722 1723 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1724 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1725 wait_queue_head_t pending_flip_queue; 1726 1727 #ifdef CONFIG_DEBUG_FS 1728 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1729 #endif 1730 1731 int num_shared_dpll; 1732 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1733 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1734 1735 struct i915_workarounds workarounds; 1736 1737 /* Reclocking support */ 1738 bool render_reclock_avail; 1739 bool lvds_downclock_avail; 1740 /* indicates the reduced downclock for LVDS*/ 1741 int lvds_downclock; 1742 1743 struct i915_frontbuffer_tracking fb_tracking; 1744 1745 u16 orig_clock; 1746 1747 bool mchbar_need_disable; 1748 1749 struct intel_l3_parity l3_parity; 1750 1751 /* Cannot be determined by PCIID. You must always read a register. */ 1752 size_t ellc_size; 1753 1754 /* gen6+ rps state */ 1755 struct intel_gen6_power_mgmt rps; 1756 1757 /* ilk-only ips/rps state. Everything in here is protected by the global 1758 * mchdev_lock in intel_pm.c */ 1759 struct intel_ilk_power_mgmt ips; 1760 1761 struct i915_power_domains power_domains; 1762 1763 struct i915_psr psr; 1764 1765 struct i915_gpu_error gpu_error; 1766 1767 struct drm_i915_gem_object *vlv_pctx; 1768 1769 #ifdef CONFIG_DRM_I915_FBDEV 1770 /* list of fbdev register on this device */ 1771 struct intel_fbdev *fbdev; 1772 struct work_struct fbdev_suspend_work; 1773 #endif 1774 1775 struct drm_property *broadcast_rgb_property; 1776 struct drm_property *force_audio_property; 1777 1778 /* hda/i915 audio component */ 1779 bool audio_component_registered; 1780 1781 uint32_t hw_context_size; 1782 struct list_head context_list; 1783 1784 u32 fdi_rx_config; 1785 1786 u32 suspend_count; 1787 struct i915_suspend_saved_registers regfile; 1788 struct vlv_s0ix_state vlv_s0ix_state; 1789 1790 struct { 1791 /* 1792 * Raw watermark latency values: 1793 * in 0.1us units for WM0, 1794 * in 0.5us units for WM1+. 1795 */ 1796 /* primary */ 1797 uint16_t pri_latency[5]; 1798 /* sprite */ 1799 uint16_t spr_latency[5]; 1800 /* cursor */ 1801 uint16_t cur_latency[5]; 1802 /* 1803 * Raw watermark memory latency values 1804 * for SKL for all 8 levels 1805 * in 1us units. 1806 */ 1807 uint16_t skl_latency[8]; 1808 1809 /* 1810 * The skl_wm_values structure is a bit too big for stack 1811 * allocation, so we keep the staging struct where we store 1812 * intermediate results here instead. 1813 */ 1814 struct skl_wm_values skl_results; 1815 1816 /* current hardware state */ 1817 union { 1818 struct ilk_wm_values hw; 1819 struct skl_wm_values skl_hw; 1820 struct vlv_wm_values vlv; 1821 }; 1822 } wm; 1823 1824 struct i915_runtime_pm pm; 1825 1826 struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; 1827 u32 long_hpd_port_mask; 1828 u32 short_hpd_port_mask; 1829 struct work_struct dig_port_work; 1830 1831 /* 1832 * if we get a HPD irq from DP and a HPD irq from non-DP 1833 * the non-DP HPD could block the workqueue on a mode config 1834 * mutex getting, that userspace may have taken. However 1835 * userspace is waiting on the DP workqueue to run which is 1836 * blocked behind the non-DP one. 1837 */ 1838 struct workqueue_struct *dp_wq; 1839 1840 uint32_t bios_vgacntr; 1841 1842 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1843 struct { 1844 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, 1845 struct intel_engine_cs *ring, 1846 struct intel_context *ctx, 1847 struct drm_i915_gem_execbuffer2 *args, 1848 struct list_head *vmas, 1849 struct drm_i915_gem_object *batch_obj, 1850 u64 exec_start, u32 flags); 1851 int (*init_rings)(struct drm_device *dev); 1852 void (*cleanup_ring)(struct intel_engine_cs *ring); 1853 void (*stop_ring)(struct intel_engine_cs *ring); 1854 } gt; 1855 1856 uint32_t request_uniq; 1857 1858 /* 1859 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1860 * will be rejected. Instead look for a better place. 1861 */ 1862 }; 1863 1864 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1865 { 1866 return dev->dev_private; 1867 } 1868 1869 static inline struct drm_i915_private *dev_to_i915(struct device *dev) 1870 { 1871 BUG(); 1872 } 1873 1874 /* Iterate over initialised rings */ 1875 #define for_each_ring(ring__, dev_priv__, i__) \ 1876 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1877 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 1878 1879 enum hdmi_force_audio { 1880 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 1881 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 1882 HDMI_AUDIO_AUTO, /* trust EDID */ 1883 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1884 }; 1885 1886 #define I915_GTT_OFFSET_NONE ((u32)-1) 1887 1888 struct drm_i915_gem_object_ops { 1889 /* Interface between the GEM object and its backing storage. 1890 * get_pages() is called once prior to the use of the associated set 1891 * of pages before to binding them into the GTT, and put_pages() is 1892 * called after we no longer need them. As we expect there to be 1893 * associated cost with migrating pages between the backing storage 1894 * and making them available for the GPU (e.g. clflush), we may hold 1895 * onto the pages after they are no longer referenced by the GPU 1896 * in case they may be used again shortly (for example migrating the 1897 * pages to a different memory domain within the GTT). put_pages() 1898 * will therefore most likely be called when the object itself is 1899 * being released or under memory pressure (where we attempt to 1900 * reap pages for the shrinker). 1901 */ 1902 int (*get_pages)(struct drm_i915_gem_object *); 1903 void (*put_pages)(struct drm_i915_gem_object *); 1904 int (*dmabuf_export)(struct drm_i915_gem_object *); 1905 void (*release)(struct drm_i915_gem_object *); 1906 }; 1907 1908 /* 1909 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 1910 * considered to be the frontbuffer for the given plane interface-vise. This 1911 * doesn't mean that the hw necessarily already scans it out, but that any 1912 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 1913 * 1914 * We have one bit per pipe and per scanout plane type. 1915 */ 1916 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 1917 #define INTEL_FRONTBUFFER_BITS \ 1918 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 1919 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 1920 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 1921 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 1922 (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 1923 #define INTEL_FRONTBUFFER_SPRITE(pipe) \ 1924 (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 1925 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 1926 (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 1927 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 1928 (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 1929 1930 struct drm_i915_gem_object { 1931 struct drm_gem_object base; 1932 1933 const struct drm_i915_gem_object_ops *ops; 1934 1935 /** List of VMAs backed by this object */ 1936 struct list_head vma_list; 1937 1938 /** Stolen memory for this object, instead of being backed by shmem. */ 1939 struct drm_mm_node *stolen; 1940 struct list_head global_list; 1941 1942 struct list_head ring_list; 1943 /** Used in execbuf to temporarily hold a ref */ 1944 struct list_head obj_exec_link; 1945 1946 struct list_head batch_pool_list; 1947 1948 /** 1949 * This is set if the object is on the active lists (has pending 1950 * rendering and so a non-zero seqno), and is not set if it i s on 1951 * inactive (ready to be unbound) list. 1952 */ 1953 unsigned int active:1; 1954 1955 /** 1956 * This is set if the object has been written to since last bound 1957 * to the GTT 1958 */ 1959 unsigned int dirty:1; 1960 1961 /** 1962 * Fence register bits (if any) for this object. Will be set 1963 * as needed when mapped into the GTT. 1964 * Protected by dev->struct_mutex. 1965 */ 1966 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 1967 1968 /** 1969 * Advice: are the backing pages purgeable? 1970 */ 1971 unsigned int madv:2; 1972 1973 /** 1974 * Current tiling mode for the object. 1975 */ 1976 unsigned int tiling_mode:2; 1977 /** 1978 * Whether the tiling parameters for the currently associated fence 1979 * register have changed. Note that for the purposes of tracking 1980 * tiling changes we also treat the unfenced register, the register 1981 * slot that the object occupies whilst it executes a fenced 1982 * command (such as BLT on gen2/3), as a "fence". 1983 */ 1984 unsigned int fence_dirty:1; 1985 1986 /** 1987 * Is the object at the current location in the gtt mappable and 1988 * fenceable? Used to avoid costly recalculations. 1989 */ 1990 unsigned int map_and_fenceable:1; 1991 1992 /** 1993 * Whether the current gtt mapping needs to be mappable (and isn't just 1994 * mappable by accident). Track pin and fault separate for a more 1995 * accurate mappable working set. 1996 */ 1997 unsigned int fault_mappable:1; 1998 unsigned int pin_mappable:1; 1999 unsigned int pin_display:1; 2000 2001 /* 2002 * Is the object to be mapped as read-only to the GPU 2003 * Only honoured if hardware has relevant pte bit 2004 */ 2005 unsigned long gt_ro:1; 2006 unsigned int cache_level:3; 2007 unsigned int cache_dirty:1; 2008 2009 unsigned int has_dma_mapping:1; 2010 2011 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2012 2013 struct vm_page **pages; 2014 int pages_pin_count; 2015 2016 /* prime dma-buf support */ 2017 void *dma_buf_vmapping; 2018 int vmapping_count; 2019 2020 /** Breadcrumb of last rendering to the buffer. */ 2021 struct drm_i915_gem_request *last_read_req; 2022 struct drm_i915_gem_request *last_write_req; 2023 /** Breadcrumb of last fenced GPU access to the buffer. */ 2024 struct drm_i915_gem_request *last_fenced_req; 2025 2026 /** Current tiling stride for the object, if it's tiled. */ 2027 uint32_t stride; 2028 2029 /** References from framebuffers, locks out tiling changes. */ 2030 unsigned long framebuffer_references; 2031 2032 /** Record of address bit 17 of each page at last unbind. */ 2033 unsigned long *bit_17; 2034 2035 union { 2036 /** for phy allocated objects */ 2037 struct drm_dma_handle *phys_handle; 2038 2039 struct i915_gem_userptr { 2040 uintptr_t ptr; 2041 unsigned read_only :1; 2042 unsigned workers :4; 2043 #define I915_GEM_USERPTR_MAX_WORKERS 15 2044 2045 struct i915_mm_struct *mm; 2046 struct i915_mmu_object *mmu_object; 2047 struct work_struct *work; 2048 } userptr; 2049 }; 2050 }; 2051 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2052 2053 void i915_gem_track_fb(struct drm_i915_gem_object *old, 2054 struct drm_i915_gem_object *new, 2055 unsigned frontbuffer_bits); 2056 2057 /** 2058 * Request queue structure. 2059 * 2060 * The request queue allows us to note sequence numbers that have been emitted 2061 * and may be associated with active buffers to be retired. 2062 * 2063 * By keeping this list, we can avoid having to do questionable sequence 2064 * number comparisons on buffer last_read|write_seqno. It also allows an 2065 * emission time to be associated with the request for tracking how far ahead 2066 * of the GPU the submission is. 2067 * 2068 * The requests are reference counted, so upon creation they should have an 2069 * initial reference taken using kref_init 2070 */ 2071 struct drm_i915_gem_request { 2072 struct kref ref; 2073 2074 /** On Which ring this request was generated */ 2075 struct intel_engine_cs *ring; 2076 2077 /** GEM sequence number associated with this request. */ 2078 uint32_t seqno; 2079 2080 /** Position in the ringbuffer of the start of the request */ 2081 u32 head; 2082 2083 /** 2084 * Position in the ringbuffer of the start of the postfix. 2085 * This is required to calculate the maximum available ringbuffer 2086 * space without overwriting the postfix. 2087 */ 2088 u32 postfix; 2089 2090 /** Position in the ringbuffer of the end of the whole request */ 2091 u32 tail; 2092 2093 /** 2094 * Context and ring buffer related to this request 2095 * Contexts are refcounted, so when this request is associated with a 2096 * context, we must increment the context's refcount, to guarantee that 2097 * it persists while any request is linked to it. Requests themselves 2098 * are also refcounted, so the request will only be freed when the last 2099 * reference to it is dismissed, and the code in 2100 * i915_gem_request_free() will then decrement the refcount on the 2101 * context. 2102 */ 2103 struct intel_context *ctx; 2104 struct intel_ringbuffer *ringbuf; 2105 2106 /** Batch buffer related to this request if any */ 2107 struct drm_i915_gem_object *batch_obj; 2108 2109 /** Time at which this request was emitted, in jiffies. */ 2110 unsigned long emitted_jiffies; 2111 2112 /** global list entry for this request */ 2113 struct list_head list; 2114 2115 struct drm_i915_file_private *file_priv; 2116 /** file_priv list entry for this request */ 2117 struct list_head client_list; 2118 2119 /** process identifier submitting this request */ 2120 pid_t pid; 2121 2122 uint32_t uniq; 2123 2124 /** 2125 * The ELSP only accepts two elements at a time, so we queue 2126 * context/tail pairs on a given queue (ring->execlist_queue) until the 2127 * hardware is available. The queue serves a double purpose: we also use 2128 * it to keep track of the up to 2 contexts currently in the hardware 2129 * (usually one in execution and the other queued up by the GPU): We 2130 * only remove elements from the head of the queue when the hardware 2131 * informs us that an element has been completed. 2132 * 2133 * All accesses to the queue are mediated by a spinlock 2134 * (ring->execlist_lock). 2135 */ 2136 2137 /** Execlist link in the submission queue.*/ 2138 struct list_head execlist_link; 2139 2140 /** Execlists no. of times this request has been sent to the ELSP */ 2141 int elsp_submitted; 2142 2143 }; 2144 2145 void i915_gem_request_free(struct kref *req_ref); 2146 2147 static inline uint32_t 2148 i915_gem_request_get_seqno(struct drm_i915_gem_request *req) 2149 { 2150 return req ? req->seqno : 0; 2151 } 2152 2153 static inline struct intel_engine_cs * 2154 i915_gem_request_get_ring(struct drm_i915_gem_request *req) 2155 { 2156 return req ? req->ring : NULL; 2157 } 2158 2159 static inline void 2160 i915_gem_request_reference(struct drm_i915_gem_request *req) 2161 { 2162 kref_get(&req->ref); 2163 } 2164 2165 static inline void 2166 i915_gem_request_unreference(struct drm_i915_gem_request *req) 2167 { 2168 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); 2169 kref_put(&req->ref, i915_gem_request_free); 2170 } 2171 2172 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2173 struct drm_i915_gem_request *src) 2174 { 2175 if (src) 2176 i915_gem_request_reference(src); 2177 2178 if (*pdst) 2179 i915_gem_request_unreference(*pdst); 2180 2181 *pdst = src; 2182 } 2183 2184 /* 2185 * XXX: i915_gem_request_completed should be here but currently needs the 2186 * definition of i915_seqno_passed() which is below. It will be moved in 2187 * a later patch when the call to i915_seqno_passed() is obsoleted... 2188 */ 2189 2190 struct drm_i915_file_private { 2191 struct drm_i915_private *dev_priv; 2192 struct drm_file *file; 2193 2194 struct { 2195 struct spinlock lock; 2196 struct list_head request_list; 2197 struct delayed_work idle_work; 2198 } mm; 2199 struct idr context_idr; 2200 2201 atomic_t rps_wait_boost; 2202 struct intel_engine_cs *bsd_ring; 2203 }; 2204 2205 /* 2206 * A command that requires special handling by the command parser. 2207 */ 2208 struct drm_i915_cmd_descriptor { 2209 /* 2210 * Flags describing how the command parser processes the command. 2211 * 2212 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2213 * a length mask if not set 2214 * CMD_DESC_SKIP: The command is allowed but does not follow the 2215 * standard length encoding for the opcode range in 2216 * which it falls 2217 * CMD_DESC_REJECT: The command is never allowed 2218 * CMD_DESC_REGISTER: The command should be checked against the 2219 * register whitelist for the appropriate ring 2220 * CMD_DESC_MASTER: The command is allowed if the submitting process 2221 * is the DRM master 2222 */ 2223 u32 flags; 2224 #define CMD_DESC_FIXED (1<<0) 2225 #define CMD_DESC_SKIP (1<<1) 2226 #define CMD_DESC_REJECT (1<<2) 2227 #define CMD_DESC_REGISTER (1<<3) 2228 #define CMD_DESC_BITMASK (1<<4) 2229 #define CMD_DESC_MASTER (1<<5) 2230 2231 /* 2232 * The command's unique identification bits and the bitmask to get them. 2233 * This isn't strictly the opcode field as defined in the spec and may 2234 * also include type, subtype, and/or subop fields. 2235 */ 2236 struct { 2237 u32 value; 2238 u32 mask; 2239 } cmd; 2240 2241 /* 2242 * The command's length. The command is either fixed length (i.e. does 2243 * not include a length field) or has a length field mask. The flag 2244 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2245 * a length mask. All command entries in a command table must include 2246 * length information. 2247 */ 2248 union { 2249 u32 fixed; 2250 u32 mask; 2251 } length; 2252 2253 /* 2254 * Describes where to find a register address in the command to check 2255 * against the ring's register whitelist. Only valid if flags has the 2256 * CMD_DESC_REGISTER bit set. 2257 */ 2258 struct { 2259 u32 offset; 2260 u32 mask; 2261 } reg; 2262 2263 #define MAX_CMD_DESC_BITMASKS 3 2264 /* 2265 * Describes command checks where a particular dword is masked and 2266 * compared against an expected value. If the command does not match 2267 * the expected value, the parser rejects it. Only valid if flags has 2268 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2269 * are valid. 2270 * 2271 * If the check specifies a non-zero condition_mask then the parser 2272 * only performs the check when the bits specified by condition_mask 2273 * are non-zero. 2274 */ 2275 struct { 2276 u32 offset; 2277 u32 mask; 2278 u32 expected; 2279 u32 condition_offset; 2280 u32 condition_mask; 2281 } bits[MAX_CMD_DESC_BITMASKS]; 2282 }; 2283 2284 /* 2285 * A table of commands requiring special handling by the command parser. 2286 * 2287 * Each ring has an array of tables. Each table consists of an array of command 2288 * descriptors, which must be sorted with command opcodes in ascending order. 2289 */ 2290 struct drm_i915_cmd_table { 2291 const struct drm_i915_cmd_descriptor *table; 2292 int count; 2293 }; 2294 2295 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2296 #define __I915__(p) ({ \ 2297 const struct drm_i915_private *__p; \ 2298 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2299 __p = (const struct drm_i915_private *)p; \ 2300 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2301 __p = to_i915((const struct drm_device *)p); \ 2302 __p; \ 2303 }) 2304 #define INTEL_INFO(p) (&__I915__(p)->info) 2305 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2306 #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2307 2308 #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2309 #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2310 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2311 #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) 2312 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2313 #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) 2314 #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) 2315 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2316 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2317 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2318 #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) 2319 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2320 #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) 2321 #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) 2322 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2323 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2324 #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) 2325 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2326 #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2327 INTEL_DEVID(dev) == 0x0152 || \ 2328 INTEL_DEVID(dev) == 0x015a) 2329 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2330 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2331 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2332 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2333 #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2334 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2335 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2336 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2337 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2338 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2339 (INTEL_DEVID(dev) & 0xf) == 0xb || \ 2340 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2341 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2342 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2343 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2344 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2345 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2346 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2347 /* ULX machines are also considered ULT. */ 2348 #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ 2349 INTEL_DEVID(dev) == 0x0A1E) 2350 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2351 2352 #define SKL_REVID_A0 (0x0) 2353 #define SKL_REVID_B0 (0x1) 2354 #define SKL_REVID_C0 (0x2) 2355 #define SKL_REVID_D0 (0x3) 2356 #define SKL_REVID_E0 (0x4) 2357 2358 /* 2359 * The genX designation typically refers to the render engine, so render 2360 * capability related checks should use IS_GEN, while display and other checks 2361 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2362 * chips, etc.). 2363 */ 2364 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2365 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2366 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2367 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2368 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2369 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2370 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2371 #define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2372 2373 #define RENDER_RING (1<<RCS) 2374 #define BSD_RING (1<<VCS) 2375 #define BLT_RING (1<<BCS) 2376 #define VEBOX_RING (1<<VECS) 2377 #define BSD2_RING (1<<VCS2) 2378 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2379 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2380 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2381 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2382 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2383 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2384 __I915__(dev)->ellc_size) 2385 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2386 2387 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2388 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2389 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2390 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) 2391 2392 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2393 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2394 2395 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2396 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2397 /* 2398 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2399 * even when in MSI mode. This results in spurious interrupt warnings if the 2400 * legacy irq no. is shared with another device. The kernel then disables that 2401 * interrupt source and so prevents the other device from working properly. 2402 */ 2403 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2404 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2405 2406 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2407 * rows, which changed the alignment requirements and fence programming. 2408 */ 2409 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 2410 IS_I915GM(dev))) 2411 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 2412 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 2413 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 2414 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2415 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2416 2417 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2418 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2419 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2420 2421 #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2422 2423 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2424 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2425 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2426 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2427 IS_SKYLAKE(dev)) 2428 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2429 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) 2430 #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2431 #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2432 2433 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2434 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2435 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2436 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2437 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2438 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2439 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2440 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2441 2442 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2443 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2444 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2445 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2446 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2447 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2448 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2449 2450 #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 2451 2452 /* DPF == dynamic parity feature */ 2453 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2454 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2455 2456 #define GT_FREQUENCY_MULTIPLIER 50 2457 #define GEN9_FREQ_SCALER 3 2458 2459 #include "i915_trace.h" 2460 2461 extern const struct drm_ioctl_desc i915_ioctls[]; 2462 extern int i915_max_ioctl; 2463 2464 extern int i915_suspend_legacy(device_t kdev); 2465 extern int i915_resume_legacy(struct drm_device *dev); 2466 2467 /* i915_params.c */ 2468 struct i915_params { 2469 int modeset; 2470 int panel_ignore_lid; 2471 int semaphores; 2472 unsigned int lvds_downclock; 2473 int lvds_channel_mode; 2474 int panel_use_ssc; 2475 int vbt_sdvo_panel_type; 2476 int enable_rc6; 2477 int enable_fbc; 2478 int enable_ppgtt; 2479 int enable_execlists; 2480 int enable_psr; 2481 unsigned int preliminary_hw_support; 2482 int disable_power_well; 2483 int enable_ips; 2484 int invert_brightness; 2485 int enable_cmd_parser; 2486 /* leave bools at the end to not create holes */ 2487 bool enable_hangcheck; 2488 bool fastboot; 2489 bool prefault_disable; 2490 bool load_detect_test; 2491 int reset; 2492 bool disable_display; 2493 bool disable_vtd_wa; 2494 int use_mmio_flip; 2495 int mmio_debug; 2496 bool verbose_state_checks; 2497 bool nuclear_pageflip; 2498 }; 2499 extern struct i915_params i915 __read_mostly; 2500 2501 /* i915_dma.c */ 2502 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2503 extern int i915_driver_unload(struct drm_device *); 2504 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2505 extern void i915_driver_lastclose(struct drm_device * dev); 2506 extern void i915_driver_preclose(struct drm_device *dev, 2507 struct drm_file *file); 2508 extern void i915_driver_postclose(struct drm_device *dev, 2509 struct drm_file *file); 2510 extern int i915_driver_device_is_agp(struct drm_device * dev); 2511 #ifdef CONFIG_COMPAT 2512 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2513 unsigned long arg); 2514 #endif 2515 extern int intel_gpu_reset(struct drm_device *dev); 2516 extern int i915_reset(struct drm_device *dev); 2517 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2518 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2519 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2520 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2521 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2522 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2523 2524 /* i915_irq.c */ 2525 void i915_queue_hangcheck(struct drm_device *dev); 2526 __printf(3, 4) 2527 void i915_handle_error(struct drm_device *dev, bool wedged, 2528 const char *fmt, ...); 2529 2530 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2531 extern void intel_hpd_init(struct drm_i915_private *dev_priv); 2532 int intel_irq_install(struct drm_i915_private *dev_priv); 2533 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2534 2535 extern void intel_uncore_sanitize(struct drm_device *dev); 2536 extern void intel_uncore_early_sanitize(struct drm_device *dev, 2537 bool restore_forcewake); 2538 extern void intel_uncore_init(struct drm_device *dev); 2539 extern void intel_uncore_check_errors(struct drm_device *dev); 2540 extern void intel_uncore_fini(struct drm_device *dev); 2541 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2542 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2543 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2544 enum forcewake_domains domains); 2545 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2546 enum forcewake_domains domains); 2547 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2548 static inline bool intel_vgpu_active(struct drm_device *dev) 2549 { 2550 return to_i915(dev)->vgpu.active; 2551 } 2552 2553 void 2554 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2555 u32 status_mask); 2556 2557 void 2558 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2559 u32 status_mask); 2560 2561 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2562 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2563 void 2564 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2565 void 2566 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2567 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2568 uint32_t interrupt_mask, 2569 uint32_t enabled_irq_mask); 2570 #define ibx_enable_display_interrupt(dev_priv, bits) \ 2571 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 2572 #define ibx_disable_display_interrupt(dev_priv, bits) \ 2573 ibx_display_interrupt_update((dev_priv), (bits), 0) 2574 2575 /* i915_gem.c */ 2576 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2577 struct drm_file *file_priv); 2578 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2579 struct drm_file *file_priv); 2580 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2581 struct drm_file *file_priv); 2582 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2583 struct drm_file *file_priv); 2584 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2585 struct drm_file *file_priv); 2586 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2587 struct drm_file *file_priv); 2588 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2589 struct drm_file *file_priv); 2590 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2591 struct intel_engine_cs *ring); 2592 void i915_gem_execbuffer_retire_commands(struct drm_device *dev, 2593 struct drm_file *file, 2594 struct intel_engine_cs *ring, 2595 struct drm_i915_gem_object *obj); 2596 int i915_gem_ringbuffer_submission(struct drm_device *dev, 2597 struct drm_file *file, 2598 struct intel_engine_cs *ring, 2599 struct intel_context *ctx, 2600 struct drm_i915_gem_execbuffer2 *args, 2601 struct list_head *vmas, 2602 struct drm_i915_gem_object *batch_obj, 2603 u64 exec_start, u32 flags); 2604 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2605 struct drm_file *file_priv); 2606 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2607 struct drm_file *file_priv); 2608 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2609 struct drm_file *file_priv); 2610 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2611 struct drm_file *file); 2612 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2613 struct drm_file *file); 2614 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2615 struct drm_file *file_priv); 2616 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2617 struct drm_file *file_priv); 2618 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2619 struct drm_file *file_priv); 2620 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2621 struct drm_file *file_priv); 2622 int i915_gem_init_userptr(struct drm_device *dev); 2623 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2624 struct drm_file *file); 2625 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2626 struct drm_file *file_priv); 2627 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2628 struct drm_file *file_priv); 2629 void i915_gem_load(struct drm_device *dev); 2630 void *i915_gem_object_alloc(struct drm_device *dev); 2631 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2632 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2633 const struct drm_i915_gem_object_ops *ops); 2634 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2635 size_t size); 2636 void i915_init_vm(struct drm_i915_private *dev_priv, 2637 struct i915_address_space *vm); 2638 void i915_gem_free_object(struct drm_gem_object *obj); 2639 void i915_gem_vma_destroy(struct i915_vma *vma); 2640 2641 #define PIN_MAPPABLE 0x1 2642 #define PIN_NONBLOCK 0x2 2643 #define PIN_GLOBAL 0x4 2644 #define PIN_OFFSET_BIAS 0x8 2645 #define PIN_OFFSET_MASK (~4095) 2646 int __must_check 2647 i915_gem_object_pin(struct drm_i915_gem_object *obj, 2648 struct i915_address_space *vm, 2649 uint32_t alignment, 2650 uint64_t flags); 2651 int __must_check 2652 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2653 const struct i915_ggtt_view *view, 2654 uint32_t alignment, 2655 uint64_t flags); 2656 2657 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2658 u32 flags); 2659 int __must_check i915_vma_unbind(struct i915_vma *vma); 2660 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2661 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2662 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2663 2664 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2665 int *needs_clflush); 2666 2667 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2668 static inline struct vm_page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 2669 { 2670 return obj->pages[n]; 2671 } 2672 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2673 { 2674 BUG_ON(obj->pages == NULL); 2675 obj->pages_pin_count++; 2676 } 2677 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2678 { 2679 BUG_ON(obj->pages_pin_count == 0); 2680 obj->pages_pin_count--; 2681 } 2682 2683 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 2684 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 2685 struct intel_engine_cs *to); 2686 void i915_vma_move_to_active(struct i915_vma *vma, 2687 struct intel_engine_cs *ring); 2688 int i915_gem_dumb_create(struct drm_file *file_priv, 2689 struct drm_device *dev, 2690 struct drm_mode_create_dumb *args); 2691 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 2692 uint32_t handle, uint64_t *offset); 2693 /** 2694 * Returns true if seq1 is later than seq2. 2695 */ 2696 static inline bool 2697 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 2698 { 2699 return (int32_t)(seq1 - seq2) >= 0; 2700 } 2701 2702 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 2703 bool lazy_coherency) 2704 { 2705 u32 seqno; 2706 2707 BUG_ON(req == NULL); 2708 2709 seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2710 2711 return i915_seqno_passed(seqno, req->seqno); 2712 } 2713 2714 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 2715 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 2716 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 2717 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 2718 2719 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 2720 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 2721 2722 struct drm_i915_gem_request * 2723 i915_gem_find_active_request(struct intel_engine_cs *ring); 2724 2725 bool i915_gem_retire_requests(struct drm_device *dev); 2726 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); 2727 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2728 bool interruptible); 2729 int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req); 2730 2731 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2732 { 2733 return unlikely(atomic_read(&error->reset_counter) 2734 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 2735 } 2736 2737 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 2738 { 2739 return atomic_read(&error->reset_counter) & I915_WEDGED; 2740 } 2741 2742 static inline u32 i915_reset_count(struct i915_gpu_error *error) 2743 { 2744 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 2745 } 2746 2747 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 2748 { 2749 return dev_priv->gpu_error.stop_rings == 0 || 2750 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 2751 } 2752 2753 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 2754 { 2755 return dev_priv->gpu_error.stop_rings == 0 || 2756 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 2757 } 2758 2759 void i915_gem_reset(struct drm_device *dev); 2760 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2761 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2762 int __must_check i915_gem_init(struct drm_device *dev); 2763 int i915_gem_init_rings(struct drm_device *dev); 2764 int __must_check i915_gem_init_hw(struct drm_device *dev); 2765 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); 2766 void i915_gem_init_swizzling(struct drm_device *dev); 2767 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2768 int __must_check i915_gpu_idle(struct drm_device *dev); 2769 int __must_check i915_gem_suspend(struct drm_device *dev); 2770 int __i915_add_request(struct intel_engine_cs *ring, 2771 struct drm_file *file, 2772 struct drm_i915_gem_object *batch_obj); 2773 #define i915_add_request(ring) \ 2774 __i915_add_request(ring, NULL, NULL) 2775 int __i915_wait_request(struct drm_i915_gem_request *req, 2776 unsigned reset_counter, 2777 bool interruptible, 2778 s64 *timeout, 2779 struct drm_i915_file_private *file_priv); 2780 int __must_check i915_wait_request(struct drm_i915_gem_request *req); 2781 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres); 2782 int __must_check 2783 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 2784 bool write); 2785 int __must_check 2786 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 2787 int __must_check 2788 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2789 u32 alignment, 2790 struct intel_engine_cs *pipelined, 2791 const struct i915_ggtt_view *view); 2792 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 2793 const struct i915_ggtt_view *view); 2794 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 2795 int align); 2796 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2797 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2798 2799 uint32_t 2800 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 2801 uint32_t 2802 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 2803 int tiling_mode, bool fenced); 2804 2805 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 2806 enum i915_cache_level cache_level); 2807 2808 #if 0 2809 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 2810 struct dma_buf *dma_buf); 2811 2812 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 2813 struct drm_gem_object *gem_obj, int flags); 2814 #endif 2815 2816 void i915_gem_restore_fences(struct drm_device *dev); 2817 2818 unsigned long 2819 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 2820 const struct i915_ggtt_view *view); 2821 unsigned long 2822 i915_gem_obj_offset(struct drm_i915_gem_object *o, 2823 struct i915_address_space *vm); 2824 static inline unsigned long 2825 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) 2826 { 2827 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); 2828 } 2829 2830 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 2831 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 2832 const struct i915_ggtt_view *view); 2833 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 2834 struct i915_address_space *vm); 2835 2836 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 2837 struct i915_address_space *vm); 2838 struct i915_vma * 2839 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 2840 struct i915_address_space *vm); 2841 struct i915_vma * 2842 i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 2843 const struct i915_ggtt_view *view); 2844 2845 struct i915_vma * 2846 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2847 struct i915_address_space *vm); 2848 struct i915_vma * 2849 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 2850 const struct i915_ggtt_view *view); 2851 2852 static inline struct i915_vma * 2853 i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 2854 { 2855 return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); 2856 } 2857 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); 2858 2859 /* Some GGTT VM helpers */ 2860 #define i915_obj_to_ggtt(obj) \ 2861 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2862 static inline bool i915_is_ggtt(struct i915_address_space *vm) 2863 { 2864 struct i915_address_space *ggtt = 2865 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; 2866 return vm == ggtt; 2867 } 2868 2869 static inline struct i915_hw_ppgtt * 2870 i915_vm_to_ppgtt(struct i915_address_space *vm) 2871 { 2872 WARN_ON(i915_is_ggtt(vm)); 2873 2874 return container_of(vm, struct i915_hw_ppgtt, base); 2875 } 2876 2877 2878 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 2879 { 2880 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 2881 } 2882 2883 static inline unsigned long 2884 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 2885 { 2886 return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); 2887 } 2888 2889 static inline int __must_check 2890 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 2891 uint32_t alignment, 2892 unsigned flags) 2893 { 2894 return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), 2895 alignment, flags | PIN_GLOBAL); 2896 } 2897 2898 static inline int 2899 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 2900 { 2901 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 2902 } 2903 2904 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 2905 const struct i915_ggtt_view *view); 2906 static inline void 2907 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 2908 { 2909 i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); 2910 } 2911 2912 /* i915_gem_context.c */ 2913 int __must_check i915_gem_context_init(struct drm_device *dev); 2914 void i915_gem_context_fini(struct drm_device *dev); 2915 void i915_gem_context_reset(struct drm_device *dev); 2916 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 2917 int i915_gem_context_enable(struct drm_i915_private *dev_priv); 2918 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2919 int i915_switch_context(struct intel_engine_cs *ring, 2920 struct intel_context *to); 2921 struct intel_context * 2922 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2923 void i915_gem_context_free(struct kref *ctx_ref); 2924 struct drm_i915_gem_object * 2925 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 2926 static inline void i915_gem_context_reference(struct intel_context *ctx) 2927 { 2928 kref_get(&ctx->ref); 2929 } 2930 2931 static inline void i915_gem_context_unreference(struct intel_context *ctx) 2932 { 2933 kref_put(&ctx->ref, i915_gem_context_free); 2934 } 2935 2936 static inline bool i915_gem_context_is_default(const struct intel_context *c) 2937 { 2938 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 2939 } 2940 2941 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2942 struct drm_file *file); 2943 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2944 struct drm_file *file); 2945 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2946 struct drm_file *file_priv); 2947 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2948 struct drm_file *file_priv); 2949 2950 /* i915_gem_evict.c */ 2951 int __must_check i915_gem_evict_something(struct drm_device *dev, 2952 struct i915_address_space *vm, 2953 int min_size, 2954 unsigned alignment, 2955 unsigned cache_level, 2956 unsigned long start, 2957 unsigned long end, 2958 unsigned flags); 2959 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2960 int i915_gem_evict_everything(struct drm_device *dev); 2961 2962 /* belongs in i915_gem_gtt.h */ 2963 static inline void i915_gem_chipset_flush(struct drm_device *dev) 2964 { 2965 if (INTEL_INFO(dev)->gen < 6) 2966 intel_gtt_chipset_flush(); 2967 } 2968 2969 /* i915_gem_stolen.c */ 2970 int i915_gem_init_stolen(struct drm_device *dev); 2971 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp); 2972 void i915_gem_stolen_cleanup_compression(struct drm_device *dev); 2973 void i915_gem_cleanup_stolen(struct drm_device *dev); 2974 struct drm_i915_gem_object * 2975 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 2976 struct drm_i915_gem_object * 2977 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 2978 u32 stolen_offset, 2979 u32 gtt_offset, 2980 u32 size); 2981 2982 /* i915_gem_shrinker.c */ 2983 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 2984 long target, 2985 unsigned flags); 2986 #define I915_SHRINK_PURGEABLE 0x1 2987 #define I915_SHRINK_UNBOUND 0x2 2988 #define I915_SHRINK_BOUND 0x4 2989 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 2990 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 2991 2992 2993 /* i915_gem_tiling.c */ 2994 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 2995 { 2996 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2997 2998 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 2999 obj->tiling_mode != I915_TILING_NONE; 3000 } 3001 3002 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3003 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3004 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3005 3006 /* i915_gem_debug.c */ 3007 #if WATCH_LISTS 3008 int i915_verify_lists(struct drm_device *dev); 3009 #else 3010 #define i915_verify_lists(dev) 0 3011 #endif 3012 3013 /* i915_debugfs.c */ 3014 int i915_debugfs_init(struct drm_minor *minor); 3015 void i915_debugfs_cleanup(struct drm_minor *minor); 3016 #ifdef CONFIG_DEBUG_FS 3017 void intel_display_crc_init(struct drm_device *dev); 3018 #else 3019 static inline void intel_display_crc_init(struct drm_device *dev) {} 3020 #endif 3021 3022 /* i915_gpu_error.c */ 3023 __printf(2, 3) 3024 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3025 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3026 const struct i915_error_state_file_priv *error); 3027 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3028 struct drm_i915_private *i915, 3029 size_t count, loff_t pos); 3030 static inline void i915_error_state_buf_release( 3031 struct drm_i915_error_state_buf *eb) 3032 { 3033 kfree(eb->buf); 3034 } 3035 void i915_capture_error_state(struct drm_device *dev, bool wedge, 3036 const char *error_msg); 3037 void i915_error_state_get(struct drm_device *dev, 3038 struct i915_error_state_file_priv *error_priv); 3039 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3040 void i915_destroy_error_state(struct drm_device *dev); 3041 3042 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3043 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3044 3045 /* i915_gem_batch_pool.c */ 3046 void i915_gem_batch_pool_init(struct drm_device *dev, 3047 struct i915_gem_batch_pool *pool); 3048 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); 3049 struct drm_i915_gem_object* 3050 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size); 3051 3052 /* i915_cmd_parser.c */ 3053 int i915_cmd_parser_get_version(void); 3054 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); 3055 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); 3056 bool i915_needs_cmd_parser(struct intel_engine_cs *ring); 3057 int i915_parse_cmds(struct intel_engine_cs *ring, 3058 struct drm_i915_gem_object *batch_obj, 3059 struct drm_i915_gem_object *shadow_batch_obj, 3060 u32 batch_start_offset, 3061 u32 batch_len, 3062 bool is_master); 3063 3064 /* i915_suspend.c */ 3065 extern int i915_save_state(struct drm_device *dev); 3066 extern int i915_restore_state(struct drm_device *dev); 3067 3068 /* i915_sysfs.c */ 3069 void i915_setup_sysfs(struct drm_device *dev_priv); 3070 void i915_teardown_sysfs(struct drm_device *dev_priv); 3071 3072 /* intel_i2c.c */ 3073 extern int intel_setup_gmbus(struct drm_device *dev); 3074 extern void intel_teardown_gmbus(struct drm_device *dev); 3075 static inline bool intel_gmbus_is_port_valid(unsigned port) 3076 { 3077 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); 3078 } 3079 3080 extern struct i2c_adapter *intel_gmbus_get_adapter( 3081 struct drm_i915_private *dev_priv, unsigned port); 3082 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3083 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3084 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3085 { 3086 struct intel_iic_softc *sc; 3087 sc = device_get_softc(device_get_parent(adapter)); 3088 3089 return sc->force_bit_dev; 3090 } 3091 extern void intel_i2c_reset(struct drm_device *dev); 3092 3093 /* intel_opregion.c */ 3094 #ifdef CONFIG_ACPI 3095 extern int intel_opregion_setup(struct drm_device *dev); 3096 extern void intel_opregion_init(struct drm_device *dev); 3097 extern void intel_opregion_fini(struct drm_device *dev); 3098 extern void intel_opregion_asle_intr(struct drm_device *dev); 3099 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3100 bool enable); 3101 extern int intel_opregion_notify_adapter(struct drm_device *dev, 3102 pci_power_t state); 3103 #else 3104 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3105 static inline void intel_opregion_init(struct drm_device *dev) { return; } 3106 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3107 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3108 static inline int 3109 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3110 { 3111 return 0; 3112 } 3113 static inline int 3114 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3115 { 3116 return 0; 3117 } 3118 #endif 3119 3120 /* intel_acpi.c */ 3121 #ifdef CONFIG_ACPI 3122 extern void intel_register_dsm_handler(void); 3123 extern void intel_unregister_dsm_handler(void); 3124 #else 3125 static inline void intel_register_dsm_handler(void) { return; } 3126 static inline void intel_unregister_dsm_handler(void) { return; } 3127 #endif /* CONFIG_ACPI */ 3128 3129 /* modesetting */ 3130 extern void intel_modeset_init_hw(struct drm_device *dev); 3131 extern void intel_modeset_init(struct drm_device *dev); 3132 extern void intel_modeset_gem_init(struct drm_device *dev); 3133 extern void intel_modeset_cleanup(struct drm_device *dev); 3134 extern void intel_connector_unregister(struct intel_connector *); 3135 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3136 extern void intel_modeset_setup_hw_state(struct drm_device *dev, 3137 bool force_restore); 3138 extern void i915_redisable_vga(struct drm_device *dev); 3139 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3140 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3141 extern void intel_init_pch_refclk(struct drm_device *dev); 3142 extern void intel_set_rps(struct drm_device *dev, u8 val); 3143 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3144 bool enable); 3145 extern void intel_detect_pch(struct drm_device *dev); 3146 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 3147 extern int intel_enable_rc6(const struct drm_device *dev); 3148 3149 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3150 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3151 struct drm_file *file); 3152 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3153 struct drm_file *file); 3154 3155 struct intel_device_info *i915_get_device_id(int device); 3156 3157 /* overlay */ 3158 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3159 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3160 struct intel_overlay_error_state *error); 3161 3162 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3163 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3164 struct drm_device *dev, 3165 struct intel_display_error_state *error); 3166 3167 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3168 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3169 3170 /* intel_sideband.c */ 3171 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3172 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3173 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3174 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); 3175 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3176 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3177 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3178 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3179 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3180 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3181 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3182 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); 3183 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3184 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); 3185 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); 3186 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3187 enum intel_sbi_destination destination); 3188 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3189 enum intel_sbi_destination destination); 3190 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3191 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3192 3193 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3194 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3195 3196 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3197 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3198 3199 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3200 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3201 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3202 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3203 3204 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3205 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3206 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3207 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3208 3209 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3210 * will be implemented using 2 32-bit writes in an arbitrary order with 3211 * an arbitrary delay between them. This can cause the hardware to 3212 * act upon the intermediate value, possibly leading to corruption and 3213 * machine death. You have been warned. 3214 */ 3215 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 3216 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3217 3218 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3219 u32 upper = I915_READ(upper_reg); \ 3220 u32 lower = I915_READ(lower_reg); \ 3221 u32 tmp = I915_READ(upper_reg); \ 3222 if (upper != tmp) { \ 3223 upper = tmp; \ 3224 lower = I915_READ(lower_reg); \ 3225 WARN_ON(I915_READ(upper_reg) != upper); \ 3226 } \ 3227 (u64)upper << 32 | lower; }) 3228 3229 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3230 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3231 3232 /* "Broadcast RGB" property */ 3233 #define INTEL_BROADCAST_RGB_AUTO 0 3234 #define INTEL_BROADCAST_RGB_FULL 1 3235 #define INTEL_BROADCAST_RGB_LIMITED 2 3236 3237 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 3238 { 3239 if (IS_VALLEYVIEW(dev)) 3240 return VLV_VGACNTRL; 3241 else if (INTEL_INFO(dev)->gen >= 5) 3242 return CPU_VGACNTRL; 3243 else 3244 return VGACNTRL; 3245 } 3246 3247 static inline void __user *to_user_ptr(u64 address) 3248 { 3249 return (void __user *)(uintptr_t)address; 3250 } 3251 3252 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3253 { 3254 unsigned long j = msecs_to_jiffies(m); 3255 3256 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3257 } 3258 3259 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3260 { 3261 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3262 } 3263 3264 static inline unsigned long 3265 timespec_to_jiffies_timeout(const struct timespec *value) 3266 { 3267 unsigned long j = timespec_to_jiffies(value); 3268 3269 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3270 } 3271 3272 /* 3273 * If you need to wait X milliseconds between events A and B, but event B 3274 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3275 * when event A happened, then just before event B you call this function and 3276 * pass the timestamp as the first argument, and X as the second argument. 3277 */ 3278 static inline void 3279 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3280 { 3281 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3282 3283 /* 3284 * Don't re-read the value of "jiffies" every time since it may change 3285 * behind our back and break the math. 3286 */ 3287 tmp_jiffies = jiffies; 3288 target_jiffies = timestamp_jiffies + 3289 msecs_to_jiffies_timeout(to_wait_ms); 3290 3291 if (time_after(target_jiffies, tmp_jiffies)) { 3292 remaining_jiffies = target_jiffies - tmp_jiffies; 3293 #if 0 3294 while (remaining_jiffies) 3295 remaining_jiffies = 3296 schedule_timeout_uninterruptible(remaining_jiffies); 3297 #else 3298 msleep(jiffies_to_msecs(remaining_jiffies)); 3299 #endif 3300 } 3301 } 3302 3303 static inline void i915_trace_irq_get(struct intel_engine_cs *ring, 3304 struct drm_i915_gem_request *req) 3305 { 3306 if (ring->trace_irq_req == NULL && ring->irq_get(ring)) 3307 i915_gem_request_assign(&ring->trace_irq_req, req); 3308 } 3309 3310 #endif 3311