1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi_drm/i915_drm.h> 34 #include <uapi_drm/drm_fourcc.h> 35 36 #include "i915_reg.h" 37 #include "intel_bios.h" 38 #include "intel_ringbuffer.h" 39 #include "intel_lrc.h" 40 #include "i915_gem_gtt.h" 41 #include "i915_gem_render_state.h" 42 #include <linux/io-mapping.h> 43 #include <linux/i2c.h> 44 #include <drm/intel-gtt.h> 45 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 46 #include <drm/drm_gem.h> 47 #include <linux/backlight.h> 48 #include <linux/hashtable.h> 49 #include <linux/kref.h> 50 #include <linux/kconfig.h> 51 #include <linux/pm_qos.h> 52 #include <linux/delay.h> 53 54 #define CONFIG_DRM_I915_FBDEV 1 55 #define CONFIG_DRM_I915_KMS 1 56 #define CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT 1 57 #define CONFIG_ACPI 1 58 #define CONFIG_X86 1 59 60 /* General customization: 61 */ 62 63 #define DRIVER_NAME "i915" 64 #define DRIVER_DESC "Intel Graphics" 65 #define DRIVER_DATE "20150522" 66 67 #undef WARN_ON 68 /* Many gcc seem to no see through this and fall over :( */ 69 #if 0 70 #define WARN_ON(x) ({ \ 71 bool __i915_warn_cond = (x); \ 72 if (__builtin_constant_p(__i915_warn_cond)) \ 73 BUILD_BUG_ON(__i915_warn_cond); \ 74 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 75 #else 76 #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") 77 #endif 78 79 #undef WARN_ON_ONCE 80 #define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") 81 82 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 83 (long) (x), __func__); 84 85 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 86 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 87 * which may not necessarily be a user visible problem. This will either 88 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 89 * enable distros and users to tailor their preferred amount of i915 abrt 90 * spam. 91 */ 92 #define I915_STATE_WARN(condition, format...) ({ \ 93 int __ret_warn_on = !!(condition); \ 94 if (unlikely(__ret_warn_on)) { \ 95 if (i915.verbose_state_checks) \ 96 WARN(1, format); \ 97 else \ 98 DRM_ERROR(format); \ 99 } \ 100 unlikely(__ret_warn_on); \ 101 }) 102 103 #define I915_STATE_WARN_ON(condition) ({ \ 104 int __ret_warn_on = !!(condition); \ 105 if (unlikely(__ret_warn_on)) { \ 106 if (i915.verbose_state_checks) \ 107 WARN(1, "WARN_ON(" #condition ")\n"); \ 108 else \ 109 DRM_ERROR("WARN_ON(" #condition ")\n"); \ 110 } \ 111 unlikely(__ret_warn_on); \ 112 }) 113 114 enum i915_pipe { 115 INVALID_PIPE = -1, 116 PIPE_A = 0, 117 PIPE_B, 118 PIPE_C, 119 _PIPE_EDP, 120 I915_MAX_PIPES = _PIPE_EDP 121 }; 122 #define pipe_name(p) ((p) + 'A') 123 124 enum transcoder { 125 TRANSCODER_A = 0, 126 TRANSCODER_B, 127 TRANSCODER_C, 128 TRANSCODER_EDP, 129 I915_MAX_TRANSCODERS 130 }; 131 #define transcoder_name(t) ((t) + 'A') 132 133 /* 134 * This is the maximum (across all platforms) number of planes (primary + 135 * sprites) that can be active at the same time on one pipe. 136 * 137 * This value doesn't count the cursor plane. 138 */ 139 #define I915_MAX_PLANES 4 140 141 enum plane { 142 PLANE_A = 0, 143 PLANE_B, 144 PLANE_C, 145 }; 146 #define plane_name(p) ((p) + 'A') 147 148 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 149 150 enum port { 151 PORT_A = 0, 152 PORT_B, 153 PORT_C, 154 PORT_D, 155 PORT_E, 156 I915_MAX_PORTS 157 }; 158 #define port_name(p) ((p) + 'A') 159 160 #define I915_NUM_PHYS_VLV 2 161 162 enum dpio_channel { 163 DPIO_CH0, 164 DPIO_CH1 165 }; 166 167 enum dpio_phy { 168 DPIO_PHY0, 169 DPIO_PHY1 170 }; 171 172 enum intel_display_power_domain { 173 POWER_DOMAIN_PIPE_A, 174 POWER_DOMAIN_PIPE_B, 175 POWER_DOMAIN_PIPE_C, 176 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 177 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 178 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 179 POWER_DOMAIN_TRANSCODER_A, 180 POWER_DOMAIN_TRANSCODER_B, 181 POWER_DOMAIN_TRANSCODER_C, 182 POWER_DOMAIN_TRANSCODER_EDP, 183 POWER_DOMAIN_PORT_DDI_A_2_LANES, 184 POWER_DOMAIN_PORT_DDI_A_4_LANES, 185 POWER_DOMAIN_PORT_DDI_B_2_LANES, 186 POWER_DOMAIN_PORT_DDI_B_4_LANES, 187 POWER_DOMAIN_PORT_DDI_C_2_LANES, 188 POWER_DOMAIN_PORT_DDI_C_4_LANES, 189 POWER_DOMAIN_PORT_DDI_D_2_LANES, 190 POWER_DOMAIN_PORT_DDI_D_4_LANES, 191 POWER_DOMAIN_PORT_DSI, 192 POWER_DOMAIN_PORT_CRT, 193 POWER_DOMAIN_PORT_OTHER, 194 POWER_DOMAIN_VGA, 195 POWER_DOMAIN_AUDIO, 196 POWER_DOMAIN_PLLS, 197 POWER_DOMAIN_AUX_A, 198 POWER_DOMAIN_AUX_B, 199 POWER_DOMAIN_AUX_C, 200 POWER_DOMAIN_AUX_D, 201 POWER_DOMAIN_INIT, 202 203 POWER_DOMAIN_NUM, 204 }; 205 206 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 207 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 208 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 209 #define POWER_DOMAIN_TRANSCODER(tran) \ 210 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 211 (tran) + POWER_DOMAIN_TRANSCODER_A) 212 213 enum hpd_pin { 214 HPD_NONE = 0, 215 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ 216 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 217 HPD_CRT, 218 HPD_SDVO_B, 219 HPD_SDVO_C, 220 HPD_PORT_B, 221 HPD_PORT_C, 222 HPD_PORT_D, 223 HPD_NUM_PINS 224 }; 225 226 #define I915_GEM_GPU_DOMAINS \ 227 (I915_GEM_DOMAIN_RENDER | \ 228 I915_GEM_DOMAIN_SAMPLER | \ 229 I915_GEM_DOMAIN_COMMAND | \ 230 I915_GEM_DOMAIN_INSTRUCTION | \ 231 I915_GEM_DOMAIN_VERTEX) 232 233 #define for_each_pipe(__dev_priv, __p) \ 234 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 235 #define for_each_plane(__dev_priv, __pipe, __p) \ 236 for ((__p) = 0; \ 237 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 238 (__p)++) 239 #define for_each_sprite(__dev_priv, __p, __s) \ 240 for ((__s) = 0; \ 241 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 242 (__s)++) 243 244 #define for_each_crtc(dev, crtc) \ 245 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 246 247 #define for_each_intel_plane(dev, intel_plane) \ 248 list_for_each_entry(intel_plane, \ 249 &dev->mode_config.plane_list, \ 250 base.head) 251 252 #define for_each_intel_crtc(dev, intel_crtc) \ 253 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 254 255 #define for_each_intel_encoder(dev, intel_encoder) \ 256 list_for_each_entry(intel_encoder, \ 257 &(dev)->mode_config.encoder_list, \ 258 base.head) 259 260 #define for_each_intel_connector(dev, intel_connector) \ 261 list_for_each_entry(intel_connector, \ 262 &dev->mode_config.connector_list, \ 263 base.head) 264 265 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 266 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 267 if ((intel_encoder)->base.crtc == (__crtc)) 268 269 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 270 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 271 if ((intel_connector)->base.encoder == (__encoder)) 272 273 #define for_each_power_domain(domain, mask) \ 274 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 275 if ((1 << (domain)) & (mask)) 276 277 struct drm_i915_private; 278 struct i915_mm_struct; 279 struct i915_mmu_object; 280 281 struct drm_i915_file_private { 282 struct drm_i915_private *dev_priv; 283 struct drm_file *file; 284 285 struct { 286 struct spinlock lock; 287 struct list_head request_list; 288 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 289 * chosen to prevent the CPU getting more than a frame ahead of the GPU 290 * (when using lax throttling for the frontbuffer). We also use it to 291 * offer free GPU waitboosts for severely congested workloads. 292 */ 293 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 294 } mm; 295 struct idr context_idr; 296 297 struct intel_rps_client { 298 struct list_head link; 299 unsigned boosts; 300 } rps; 301 302 struct intel_engine_cs *bsd_ring; 303 }; 304 305 enum intel_dpll_id { 306 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 307 /* real shared dpll ids must be >= 0 */ 308 DPLL_ID_PCH_PLL_A = 0, 309 DPLL_ID_PCH_PLL_B = 1, 310 /* hsw/bdw */ 311 DPLL_ID_WRPLL1 = 0, 312 DPLL_ID_WRPLL2 = 1, 313 /* skl */ 314 DPLL_ID_SKL_DPLL1 = 0, 315 DPLL_ID_SKL_DPLL2 = 1, 316 DPLL_ID_SKL_DPLL3 = 2, 317 }; 318 #define I915_NUM_PLLS 3 319 320 struct intel_dpll_hw_state { 321 /* i9xx, pch plls */ 322 uint32_t dpll; 323 uint32_t dpll_md; 324 uint32_t fp0; 325 uint32_t fp1; 326 327 /* hsw, bdw */ 328 uint32_t wrpll; 329 330 /* skl */ 331 /* 332 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in 333 * lower part of ctrl1 and they get shifted into position when writing 334 * the register. This allows us to easily compare the state to share 335 * the DPLL. 336 */ 337 uint32_t ctrl1; 338 /* HDMI only, 0 when used for DP */ 339 uint32_t cfgcr1, cfgcr2; 340 341 /* bxt */ 342 uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12; 343 }; 344 345 struct intel_shared_dpll_config { 346 unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ 347 struct intel_dpll_hw_state hw_state; 348 }; 349 350 struct intel_shared_dpll { 351 struct intel_shared_dpll_config config; 352 struct intel_shared_dpll_config *new_config; 353 354 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 355 bool on; /* is the PLL actually active? Disabled during modeset */ 356 const char *name; 357 /* should match the index in the dev_priv->shared_dplls array */ 358 enum intel_dpll_id id; 359 /* The mode_set hook is optional and should be used together with the 360 * intel_prepare_shared_dpll function. */ 361 void (*mode_set)(struct drm_i915_private *dev_priv, 362 struct intel_shared_dpll *pll); 363 void (*enable)(struct drm_i915_private *dev_priv, 364 struct intel_shared_dpll *pll); 365 void (*disable)(struct drm_i915_private *dev_priv, 366 struct intel_shared_dpll *pll); 367 bool (*get_hw_state)(struct drm_i915_private *dev_priv, 368 struct intel_shared_dpll *pll, 369 struct intel_dpll_hw_state *hw_state); 370 }; 371 372 #define SKL_DPLL0 0 373 #define SKL_DPLL1 1 374 #define SKL_DPLL2 2 375 #define SKL_DPLL3 3 376 377 /* Used by dp and fdi links */ 378 struct intel_link_m_n { 379 uint32_t tu; 380 uint32_t gmch_m; 381 uint32_t gmch_n; 382 uint32_t link_m; 383 uint32_t link_n; 384 }; 385 386 void intel_link_compute_m_n(int bpp, int nlanes, 387 int pixel_clock, int link_clock, 388 struct intel_link_m_n *m_n); 389 390 /* Interface history: 391 * 392 * 1.1: Original. 393 * 1.2: Add Power Management 394 * 1.3: Add vblank support 395 * 1.4: Fix cmdbuffer path, add heap destroy 396 * 1.5: Add vblank pipe configuration 397 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 398 * - Support vertical blank on secondary display pipe 399 */ 400 #define DRIVER_MAJOR 1 401 #define DRIVER_MINOR 6 402 #define DRIVER_PATCHLEVEL 0 403 404 #define WATCH_LISTS 0 405 406 struct opregion_header; 407 struct opregion_acpi; 408 struct opregion_swsci; 409 struct opregion_asle; 410 411 struct intel_opregion { 412 struct opregion_header __iomem *header; 413 struct opregion_acpi __iomem *acpi; 414 struct opregion_swsci __iomem *swsci; 415 u32 swsci_gbda_sub_functions; 416 u32 swsci_sbcb_sub_functions; 417 struct opregion_asle __iomem *asle; 418 void __iomem *vbt; 419 u32 __iomem *lid_state; 420 struct work_struct asle_work; 421 }; 422 #define OPREGION_SIZE (8*1024) 423 424 struct intel_overlay; 425 struct intel_overlay_error_state; 426 427 #define I915_FENCE_REG_NONE -1 428 #define I915_MAX_NUM_FENCES 32 429 /* 32 fences + sign bit for FENCE_REG_NONE */ 430 #define I915_MAX_NUM_FENCE_BITS 6 431 432 struct drm_i915_fence_reg { 433 struct list_head lru_list; 434 struct drm_i915_gem_object *obj; 435 int pin_count; 436 }; 437 438 struct sdvo_device_mapping { 439 u8 initialized; 440 u8 dvo_port; 441 u8 slave_addr; 442 u8 dvo_wiring; 443 u8 i2c_pin; 444 u8 ddc_pin; 445 }; 446 447 struct intel_display_error_state; 448 449 struct drm_i915_error_state { 450 struct kref ref; 451 struct timeval time; 452 453 char error_msg[128]; 454 u32 reset_count; 455 u32 suspend_count; 456 457 /* Generic register state */ 458 u32 eir; 459 u32 pgtbl_er; 460 u32 ier; 461 u32 gtier[4]; 462 u32 ccid; 463 u32 derrmr; 464 u32 forcewake; 465 u32 error; /* gen6+ */ 466 u32 err_int; /* gen7 */ 467 u32 fault_data0; /* gen8, gen9 */ 468 u32 fault_data1; /* gen8, gen9 */ 469 u32 done_reg; 470 u32 gac_eco; 471 u32 gam_ecochk; 472 u32 gab_ctl; 473 u32 gfx_mode; 474 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 475 u64 fence[I915_MAX_NUM_FENCES]; 476 struct intel_overlay_error_state *overlay; 477 struct intel_display_error_state *display; 478 struct drm_i915_error_object *semaphore_obj; 479 480 struct drm_i915_error_ring { 481 bool valid; 482 /* Software tracked state */ 483 bool waiting; 484 int hangcheck_score; 485 enum intel_ring_hangcheck_action hangcheck_action; 486 int num_requests; 487 488 /* our own tracking of ring head and tail */ 489 u32 cpu_ring_head; 490 u32 cpu_ring_tail; 491 492 u32 semaphore_seqno[I915_NUM_RINGS - 1]; 493 494 /* Register state */ 495 u32 start; 496 u32 tail; 497 u32 head; 498 u32 ctl; 499 u32 hws; 500 u32 ipeir; 501 u32 ipehr; 502 u32 instdone; 503 u32 bbstate; 504 u32 instpm; 505 u32 instps; 506 u32 seqno; 507 u64 bbaddr; 508 u64 acthd; 509 u32 fault_reg; 510 u64 faddr; 511 u32 rc_psmi; /* sleep state */ 512 u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 513 514 struct drm_i915_error_object { 515 int page_count; 516 u32 gtt_offset; 517 u32 *pages[0]; 518 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 519 520 struct drm_i915_error_request { 521 long jiffies; 522 u32 seqno; 523 u32 tail; 524 } *requests; 525 526 struct { 527 u32 gfx_mode; 528 union { 529 u64 pdp[4]; 530 u32 pp_dir_base; 531 }; 532 } vm_info; 533 534 pid_t pid; 535 char comm[TASK_COMM_LEN]; 536 } ring[I915_NUM_RINGS]; 537 538 struct drm_i915_error_buffer { 539 u32 size; 540 u32 name; 541 u32 rseqno[I915_NUM_RINGS], wseqno; 542 u32 gtt_offset; 543 u32 read_domains; 544 u32 write_domain; 545 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 546 s32 pinned:2; 547 u32 tiling:2; 548 u32 dirty:1; 549 u32 purgeable:1; 550 u32 userptr:1; 551 s32 ring:4; 552 u32 cache_level:3; 553 } **active_bo, **pinned_bo; 554 555 u32 *active_bo_count, *pinned_bo_count; 556 u32 vm_count; 557 }; 558 559 struct intel_connector; 560 struct intel_encoder; 561 struct intel_crtc_state; 562 struct intel_initial_plane_config; 563 struct intel_crtc; 564 struct intel_limit; 565 struct dpll; 566 567 struct drm_i915_display_funcs { 568 bool (*fbc_enabled)(struct drm_device *dev); 569 void (*enable_fbc)(struct drm_crtc *crtc); 570 void (*disable_fbc)(struct drm_device *dev); 571 int (*get_display_clock_speed)(struct drm_device *dev); 572 int (*get_fifo_size)(struct drm_device *dev, int plane); 573 /** 574 * find_dpll() - Find the best values for the PLL 575 * @limit: limits for the PLL 576 * @crtc: current CRTC 577 * @target: target frequency in kHz 578 * @refclk: reference clock frequency in kHz 579 * @match_clock: if provided, @best_clock P divider must 580 * match the P divider from @match_clock 581 * used for LVDS downclocking 582 * @best_clock: best PLL values found 583 * 584 * Returns true on success, false on failure. 585 */ 586 bool (*find_dpll)(const struct intel_limit *limit, 587 struct intel_crtc_state *crtc_state, 588 int target, int refclk, 589 struct dpll *match_clock, 590 struct dpll *best_clock); 591 void (*update_wm)(struct drm_crtc *crtc); 592 void (*update_sprite_wm)(struct drm_plane *plane, 593 struct drm_crtc *crtc, 594 uint32_t sprite_width, uint32_t sprite_height, 595 int pixel_size, bool enable, bool scaled); 596 void (*modeset_global_resources)(struct drm_atomic_state *state); 597 /* Returns the active state of the crtc, and if the crtc is active, 598 * fills out the pipe-config with the hw state. */ 599 bool (*get_pipe_config)(struct intel_crtc *, 600 struct intel_crtc_state *); 601 void (*get_initial_plane_config)(struct intel_crtc *, 602 struct intel_initial_plane_config *); 603 int (*crtc_compute_clock)(struct intel_crtc *crtc, 604 struct intel_crtc_state *crtc_state); 605 void (*crtc_enable)(struct drm_crtc *crtc); 606 void (*crtc_disable)(struct drm_crtc *crtc); 607 void (*off)(struct drm_crtc *crtc); 608 void (*audio_codec_enable)(struct drm_connector *connector, 609 struct intel_encoder *encoder, 610 struct drm_display_mode *mode); 611 void (*audio_codec_disable)(struct intel_encoder *encoder); 612 void (*fdi_link_train)(struct drm_crtc *crtc); 613 void (*init_clock_gating)(struct drm_device *dev); 614 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 615 struct drm_framebuffer *fb, 616 struct drm_i915_gem_object *obj, 617 struct intel_engine_cs *ring, 618 uint32_t flags); 619 void (*update_primary_plane)(struct drm_crtc *crtc, 620 struct drm_framebuffer *fb, 621 int x, int y); 622 void (*hpd_irq_setup)(struct drm_device *dev); 623 /* clock updates for mode set */ 624 /* cursor updates */ 625 /* render clock increase/decrease */ 626 /* display clock increase/decrease */ 627 /* pll clock increase/decrease */ 628 629 int (*setup_backlight)(struct intel_connector *connector, enum i915_pipe pipe); 630 uint32_t (*get_backlight)(struct intel_connector *connector); 631 void (*set_backlight)(struct intel_connector *connector, 632 uint32_t level); 633 void (*disable_backlight)(struct intel_connector *connector); 634 void (*enable_backlight)(struct intel_connector *connector); 635 }; 636 637 enum forcewake_domain_id { 638 FW_DOMAIN_ID_RENDER = 0, 639 FW_DOMAIN_ID_BLITTER, 640 FW_DOMAIN_ID_MEDIA, 641 642 FW_DOMAIN_ID_COUNT 643 }; 644 645 enum forcewake_domains { 646 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 647 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 648 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 649 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 650 FORCEWAKE_BLITTER | 651 FORCEWAKE_MEDIA) 652 }; 653 654 struct intel_uncore_funcs { 655 void (*force_wake_get)(struct drm_i915_private *dev_priv, 656 enum forcewake_domains domains); 657 void (*force_wake_put)(struct drm_i915_private *dev_priv, 658 enum forcewake_domains domains); 659 660 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 661 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 662 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 663 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); 664 665 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, 666 uint8_t val, bool trace); 667 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, 668 uint16_t val, bool trace); 669 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, 670 uint32_t val, bool trace); 671 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, 672 uint64_t val, bool trace); 673 }; 674 675 struct intel_uncore { 676 struct lock lock; /** lock is also taken in irq contexts. */ 677 678 struct intel_uncore_funcs funcs; 679 680 unsigned fifo_count; 681 enum forcewake_domains fw_domains; 682 683 struct intel_uncore_forcewake_domain { 684 struct drm_i915_private *i915; 685 enum forcewake_domain_id id; 686 unsigned wake_count; 687 struct timer_list timer; 688 u32 reg_set; 689 u32 val_set; 690 u32 val_clear; 691 u32 reg_ack; 692 u32 reg_post; 693 u32 val_reset; 694 } fw_domain[FW_DOMAIN_ID_COUNT]; 695 }; 696 697 /* Iterate over initialised fw domains */ 698 #define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ 699 for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 700 (i__) < FW_DOMAIN_ID_COUNT; \ 701 (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ 702 if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) 703 704 #define for_each_fw_domain(domain__, dev_priv__, i__) \ 705 for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) 706 707 enum csr_state { 708 FW_UNINITIALIZED = 0, 709 FW_LOADED, 710 FW_FAILED 711 }; 712 713 struct intel_csr { 714 const char *fw_path; 715 __be32 *dmc_payload; 716 uint32_t dmc_fw_size; 717 uint32_t mmio_count; 718 uint32_t mmioaddr[8]; 719 uint32_t mmiodata[8]; 720 enum csr_state state; 721 }; 722 723 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 724 func(is_mobile) sep \ 725 func(is_i85x) sep \ 726 func(is_i915g) sep \ 727 func(is_i945gm) sep \ 728 func(is_g33) sep \ 729 func(need_gfx_hws) sep \ 730 func(is_g4x) sep \ 731 func(is_pineview) sep \ 732 func(is_broadwater) sep \ 733 func(is_crestline) sep \ 734 func(is_ivybridge) sep \ 735 func(is_valleyview) sep \ 736 func(is_haswell) sep \ 737 func(is_skylake) sep \ 738 func(is_preliminary) sep \ 739 func(has_fbc) sep \ 740 func(has_pipe_cxsr) sep \ 741 func(has_hotplug) sep \ 742 func(cursor_needs_physical) sep \ 743 func(has_overlay) sep \ 744 func(overlay_needs_physical) sep \ 745 func(supports_tv) sep \ 746 func(has_llc) sep \ 747 func(has_ddi) sep \ 748 func(has_fpga_dbg) 749 750 #define DEFINE_FLAG(name) u8 name:1 751 #define SEP_SEMICOLON ; 752 753 struct intel_device_info { 754 u32 display_mmio_offset; 755 u16 device_id; 756 u8 num_pipes:3; 757 u8 num_sprites[I915_MAX_PIPES]; 758 u8 gen; 759 u8 ring_mask; /* Rings supported by the HW */ 760 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 761 /* Register offsets for the various display pipes and transcoders */ 762 int pipe_offsets[I915_MAX_TRANSCODERS]; 763 int trans_offsets[I915_MAX_TRANSCODERS]; 764 int palette_offsets[I915_MAX_PIPES]; 765 int cursor_offsets[I915_MAX_PIPES]; 766 767 /* Slice/subslice/EU info */ 768 u8 slice_total; 769 u8 subslice_total; 770 u8 subslice_per_slice; 771 u8 eu_total; 772 u8 eu_per_subslice; 773 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 774 u8 subslice_7eu[3]; 775 u8 has_slice_pg:1; 776 u8 has_subslice_pg:1; 777 u8 has_eu_pg:1; 778 }; 779 780 #undef DEFINE_FLAG 781 #undef SEP_SEMICOLON 782 783 enum i915_cache_level { 784 I915_CACHE_NONE = 0, 785 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 786 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 787 caches, eg sampler/render caches, and the 788 large Last-Level-Cache. LLC is coherent with 789 the CPU, but L3 is only visible to the GPU. */ 790 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 791 }; 792 793 struct i915_ctx_hang_stats { 794 /* This context had batch pending when hang was declared */ 795 unsigned batch_pending; 796 797 /* This context had batch active when hang was declared */ 798 unsigned batch_active; 799 800 /* Time when this context was last blamed for a GPU reset */ 801 unsigned long guilty_ts; 802 803 /* If the contexts causes a second GPU hang within this time, 804 * it is permanently banned from submitting any more work. 805 */ 806 unsigned long ban_period_seconds; 807 808 /* This context is banned to submit more work */ 809 bool banned; 810 }; 811 812 /* This must match up with the value previously used for execbuf2.rsvd1. */ 813 #define DEFAULT_CONTEXT_HANDLE 0 814 /** 815 * struct intel_context - as the name implies, represents a context. 816 * @ref: reference count. 817 * @user_handle: userspace tracking identity for this context. 818 * @remap_slice: l3 row remapping information. 819 * @file_priv: filp associated with this context (NULL for global default 820 * context). 821 * @hang_stats: information about the role of this context in possible GPU 822 * hangs. 823 * @ppgtt: virtual memory space used by this context. 824 * @legacy_hw_ctx: render context backing object and whether it is correctly 825 * initialized (legacy ring submission mechanism only). 826 * @link: link in the global list of contexts. 827 * 828 * Contexts are memory images used by the hardware to store copies of their 829 * internal state. 830 */ 831 struct intel_context { 832 struct kref ref; 833 int user_handle; 834 uint8_t remap_slice; 835 struct drm_i915_private *i915; 836 struct drm_i915_file_private *file_priv; 837 struct i915_ctx_hang_stats hang_stats; 838 struct i915_hw_ppgtt *ppgtt; 839 840 /* Legacy ring buffer submission */ 841 struct { 842 struct drm_i915_gem_object *rcs_state; 843 bool initialized; 844 } legacy_hw_ctx; 845 846 /* Execlists */ 847 bool rcs_initialized; 848 struct { 849 struct drm_i915_gem_object *state; 850 struct intel_ringbuffer *ringbuf; 851 int pin_count; 852 } engine[I915_NUM_RINGS]; 853 854 struct list_head link; 855 }; 856 857 enum fb_op_origin { 858 ORIGIN_GTT, 859 ORIGIN_CPU, 860 ORIGIN_CS, 861 ORIGIN_FLIP, 862 }; 863 864 struct i915_fbc { 865 unsigned long uncompressed_size; 866 unsigned threshold; 867 unsigned int fb_id; 868 unsigned int possible_framebuffer_bits; 869 unsigned int busy_bits; 870 struct intel_crtc *crtc; 871 int y; 872 873 struct drm_mm_node compressed_fb; 874 struct drm_mm_node *compressed_llb; 875 876 bool false_color; 877 878 /* Tracks whether the HW is actually enabled, not whether the feature is 879 * possible. */ 880 bool enabled; 881 882 struct intel_fbc_work { 883 struct delayed_work work; 884 struct drm_crtc *crtc; 885 struct drm_framebuffer *fb; 886 } *fbc_work; 887 888 enum no_fbc_reason { 889 FBC_OK, /* FBC is enabled */ 890 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ 891 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 892 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ 893 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 894 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 895 FBC_BAD_PLANE, /* fbc not supported on plane */ 896 FBC_NOT_TILED, /* buffer not tiled */ 897 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 898 FBC_MODULE_PARAM, 899 FBC_CHIP_DEFAULT, /* disabled by default on this chip */ 900 } no_fbc_reason; 901 }; 902 903 /** 904 * HIGH_RR is the highest eDP panel refresh rate read from EDID 905 * LOW_RR is the lowest eDP panel refresh rate found from EDID 906 * parsing for same resolution. 907 */ 908 enum drrs_refresh_rate_type { 909 DRRS_HIGH_RR, 910 DRRS_LOW_RR, 911 DRRS_MAX_RR, /* RR count */ 912 }; 913 914 enum drrs_support_type { 915 DRRS_NOT_SUPPORTED = 0, 916 STATIC_DRRS_SUPPORT = 1, 917 SEAMLESS_DRRS_SUPPORT = 2 918 }; 919 920 struct intel_dp; 921 struct i915_drrs { 922 struct lock mutex; 923 struct delayed_work work; 924 struct intel_dp *dp; 925 unsigned busy_frontbuffer_bits; 926 enum drrs_refresh_rate_type refresh_rate_type; 927 enum drrs_support_type type; 928 }; 929 930 struct i915_psr { 931 struct lock lock; 932 bool sink_support; 933 bool source_ok; 934 struct intel_dp *enabled; 935 bool active; 936 struct delayed_work work; 937 unsigned busy_frontbuffer_bits; 938 bool psr2_support; 939 bool aux_frame_sync; 940 }; 941 942 enum intel_pch { 943 PCH_NONE = 0, /* No PCH present */ 944 PCH_IBX, /* Ibexpeak PCH */ 945 PCH_CPT, /* Cougarpoint PCH */ 946 PCH_LPT, /* Lynxpoint PCH */ 947 PCH_SPT, /* Sunrisepoint PCH */ 948 PCH_NOP, 949 }; 950 951 enum intel_sbi_destination { 952 SBI_ICLK, 953 SBI_MPHY, 954 }; 955 956 #define QUIRK_PIPEA_FORCE (1<<0) 957 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 958 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 959 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 960 #define QUIRK_PIPEB_FORCE (1<<4) 961 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 962 963 struct intel_fbdev; 964 struct intel_fbc_work; 965 966 struct intel_gmbus { 967 struct i2c_adapter adapter; 968 u32 force_bit; 969 u32 reg0; 970 u32 gpio_reg; 971 struct drm_i915_private *dev_priv; 972 }; 973 974 struct intel_iic_softc { 975 struct drm_device *drm_dev; 976 device_t iic_dev; 977 bool force_bit_dev; 978 char name[32]; 979 uint32_t reg; 980 uint32_t reg0; 981 }; 982 983 struct i915_suspend_saved_registers { 984 u32 saveDSPARB; 985 u32 saveLVDS; 986 u32 savePP_ON_DELAYS; 987 u32 savePP_OFF_DELAYS; 988 u32 savePP_ON; 989 u32 savePP_OFF; 990 u32 savePP_CONTROL; 991 u32 savePP_DIVISOR; 992 u32 saveFBC_CONTROL; 993 u32 saveCACHE_MODE_0; 994 u32 saveMI_ARB_STATE; 995 u32 saveSWF0[16]; 996 u32 saveSWF1[16]; 997 u32 saveSWF2[3]; 998 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 999 u32 savePCH_PORT_HOTPLUG; 1000 u16 saveGCDGMBUS; 1001 }; 1002 1003 struct vlv_s0ix_state { 1004 /* GAM */ 1005 u32 wr_watermark; 1006 u32 gfx_prio_ctrl; 1007 u32 arb_mode; 1008 u32 gfx_pend_tlb0; 1009 u32 gfx_pend_tlb1; 1010 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1011 u32 media_max_req_count; 1012 u32 gfx_max_req_count; 1013 u32 render_hwsp; 1014 u32 ecochk; 1015 u32 bsd_hwsp; 1016 u32 blt_hwsp; 1017 u32 tlb_rd_addr; 1018 1019 /* MBC */ 1020 u32 g3dctl; 1021 u32 gsckgctl; 1022 u32 mbctl; 1023 1024 /* GCP */ 1025 u32 ucgctl1; 1026 u32 ucgctl3; 1027 u32 rcgctl1; 1028 u32 rcgctl2; 1029 u32 rstctl; 1030 u32 misccpctl; 1031 1032 /* GPM */ 1033 u32 gfxpause; 1034 u32 rpdeuhwtc; 1035 u32 rpdeuc; 1036 u32 ecobus; 1037 u32 pwrdwnupctl; 1038 u32 rp_down_timeout; 1039 u32 rp_deucsw; 1040 u32 rcubmabdtmr; 1041 u32 rcedata; 1042 u32 spare2gh; 1043 1044 /* Display 1 CZ domain */ 1045 u32 gt_imr; 1046 u32 gt_ier; 1047 u32 pm_imr; 1048 u32 pm_ier; 1049 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1050 1051 /* GT SA CZ domain */ 1052 u32 tilectl; 1053 u32 gt_fifoctl; 1054 u32 gtlc_wake_ctrl; 1055 u32 gtlc_survive; 1056 u32 pmwgicz; 1057 1058 /* Display 2 CZ domain */ 1059 u32 gu_ctl0; 1060 u32 gu_ctl1; 1061 u32 pcbr; 1062 u32 clock_gate_dis2; 1063 }; 1064 1065 struct intel_rps_ei { 1066 u32 cz_clock; 1067 u32 render_c0; 1068 u32 media_c0; 1069 }; 1070 1071 struct intel_gen6_power_mgmt { 1072 /* 1073 * work, interrupts_enabled and pm_iir are protected by 1074 * dev_priv->irq_lock 1075 */ 1076 struct work_struct work; 1077 bool interrupts_enabled; 1078 u32 pm_iir; 1079 1080 /* Frequencies are stored in potentially platform dependent multiples. 1081 * In other words, *_freq needs to be multiplied by X to be interesting. 1082 * Soft limits are those which are used for the dynamic reclocking done 1083 * by the driver (raise frequencies under heavy loads, and lower for 1084 * lighter loads). Hard limits are those imposed by the hardware. 1085 * 1086 * A distinction is made for overclocking, which is never enabled by 1087 * default, and is considered to be above the hard limit if it's 1088 * possible at all. 1089 */ 1090 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1091 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1092 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1093 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1094 u8 min_freq; /* AKA RPn. Minimum frequency */ 1095 u8 idle_freq; /* Frequency to request when we are idle */ 1096 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1097 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1098 u8 rp0_freq; /* Non-overclocked max frequency. */ 1099 u32 cz_freq; 1100 1101 u8 up_threshold; /* Current %busy required to uplock */ 1102 u8 down_threshold; /* Current %busy required to downclock */ 1103 1104 int last_adj; 1105 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1106 1107 struct spinlock client_lock; 1108 struct list_head clients; 1109 bool client_boost; 1110 1111 bool enabled; 1112 struct delayed_work delayed_resume_work; 1113 unsigned boosts; 1114 1115 struct intel_rps_client semaphores, mmioflips; 1116 1117 /* manual wa residency calculations */ 1118 struct intel_rps_ei up_ei, down_ei; 1119 1120 /* 1121 * Protects RPS/RC6 register access and PCU communication. 1122 * Must be taken after struct_mutex if nested. Note that 1123 * this lock may be held for long periods of time when 1124 * talking to hw - so only take it when talking to hw! 1125 */ 1126 struct lock hw_lock; 1127 }; 1128 1129 /* defined intel_pm.c */ 1130 extern struct lock mchdev_lock; 1131 1132 struct intel_ilk_power_mgmt { 1133 u8 cur_delay; 1134 u8 min_delay; 1135 u8 max_delay; 1136 u8 fmax; 1137 u8 fstart; 1138 1139 u64 last_count1; 1140 unsigned long last_time1; 1141 unsigned long chipset_power; 1142 u64 last_count2; 1143 u64 last_time2; 1144 unsigned long gfx_power; 1145 u8 corr; 1146 1147 int c_m; 1148 int r_t; 1149 }; 1150 1151 struct drm_i915_private; 1152 struct i915_power_well; 1153 1154 struct i915_power_well_ops { 1155 /* 1156 * Synchronize the well's hw state to match the current sw state, for 1157 * example enable/disable it based on the current refcount. Called 1158 * during driver init and resume time, possibly after first calling 1159 * the enable/disable handlers. 1160 */ 1161 void (*sync_hw)(struct drm_i915_private *dev_priv, 1162 struct i915_power_well *power_well); 1163 /* 1164 * Enable the well and resources that depend on it (for example 1165 * interrupts located on the well). Called after the 0->1 refcount 1166 * transition. 1167 */ 1168 void (*enable)(struct drm_i915_private *dev_priv, 1169 struct i915_power_well *power_well); 1170 /* 1171 * Disable the well and resources that depend on it. Called after 1172 * the 1->0 refcount transition. 1173 */ 1174 void (*disable)(struct drm_i915_private *dev_priv, 1175 struct i915_power_well *power_well); 1176 /* Returns the hw enabled state. */ 1177 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1178 struct i915_power_well *power_well); 1179 }; 1180 1181 /* Power well structure for haswell */ 1182 struct i915_power_well { 1183 const char *name; 1184 bool always_on; 1185 /* power well enable/disable usage count */ 1186 int count; 1187 /* cached hw enabled state */ 1188 bool hw_enabled; 1189 unsigned long domains; 1190 unsigned long data; 1191 const struct i915_power_well_ops *ops; 1192 }; 1193 1194 struct i915_power_domains { 1195 /* 1196 * Power wells needed for initialization at driver init and suspend 1197 * time are on. They are kept on until after the first modeset. 1198 */ 1199 bool init_power_on; 1200 bool initializing; 1201 int power_well_count; 1202 1203 struct lock lock; 1204 int domain_use_count[POWER_DOMAIN_NUM]; 1205 struct i915_power_well *power_wells; 1206 }; 1207 1208 #define MAX_L3_SLICES 2 1209 struct intel_l3_parity { 1210 u32 *remap_info[MAX_L3_SLICES]; 1211 struct work_struct error_work; 1212 int which_slice; 1213 }; 1214 1215 struct i915_gem_mm { 1216 /** Memory allocator for GTT stolen memory */ 1217 struct drm_mm stolen; 1218 /** List of all objects in gtt_space. Used to restore gtt 1219 * mappings on resume */ 1220 struct list_head bound_list; 1221 /** 1222 * List of objects which are not bound to the GTT (thus 1223 * are idle and not used by the GPU) but still have 1224 * (presumably uncached) pages still attached. 1225 */ 1226 struct list_head unbound_list; 1227 1228 /** Usable portion of the GTT for GEM */ 1229 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1230 1231 /** PPGTT used for aliasing the PPGTT with the GTT */ 1232 struct i915_hw_ppgtt *aliasing_ppgtt; 1233 1234 struct notifier_block oom_notifier; 1235 #if 0 1236 struct shrinker shrinker; 1237 #endif 1238 bool shrinker_no_lock_stealing; 1239 1240 /** LRU list of objects with fence regs on them. */ 1241 struct list_head fence_list; 1242 1243 /** 1244 * We leave the user IRQ off as much as possible, 1245 * but this means that requests will finish and never 1246 * be retired once the system goes idle. Set a timer to 1247 * fire periodically while the ring is running. When it 1248 * fires, go retire requests. 1249 */ 1250 struct delayed_work retire_work; 1251 1252 /** 1253 * When we detect an idle GPU, we want to turn on 1254 * powersaving features. So once we see that there 1255 * are no more requests outstanding and no more 1256 * arrive within a small period of time, we fire 1257 * off the idle_work. 1258 */ 1259 struct delayed_work idle_work; 1260 1261 /** 1262 * Are we in a non-interruptible section of code like 1263 * modesetting? 1264 */ 1265 bool interruptible; 1266 1267 /** 1268 * Is the GPU currently considered idle, or busy executing userspace 1269 * requests? Whilst idle, we attempt to power down the hardware and 1270 * display clocks. In order to reduce the effect on performance, there 1271 * is a slight delay before we do so. 1272 */ 1273 bool busy; 1274 1275 /* the indicator for dispatch video commands on two BSD rings */ 1276 int bsd_ring_dispatch_index; 1277 1278 /** Bit 6 swizzling required for X tiling */ 1279 uint32_t bit_6_swizzle_x; 1280 /** Bit 6 swizzling required for Y tiling */ 1281 uint32_t bit_6_swizzle_y; 1282 1283 /* accounting, useful for userland debugging */ 1284 struct spinlock object_stat_lock; 1285 size_t object_memory; 1286 u32 object_count; 1287 }; 1288 1289 struct drm_i915_error_state_buf { 1290 struct drm_i915_private *i915; 1291 unsigned bytes; 1292 unsigned size; 1293 int err; 1294 u8 *buf; 1295 loff_t start; 1296 loff_t pos; 1297 }; 1298 1299 struct i915_error_state_file_priv { 1300 struct drm_device *dev; 1301 struct drm_i915_error_state *error; 1302 }; 1303 1304 struct i915_gpu_error { 1305 /* For hangcheck timer */ 1306 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1307 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1308 /* Hang gpu twice in this window and your context gets banned */ 1309 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1310 1311 struct workqueue_struct *hangcheck_wq; 1312 struct delayed_work hangcheck_work; 1313 1314 /* For reset and error_state handling. */ 1315 struct lock lock; 1316 /* Protected by the above dev->gpu_error.lock. */ 1317 struct drm_i915_error_state *first_error; 1318 1319 unsigned long missed_irq_rings; 1320 1321 /** 1322 * State variable controlling the reset flow and count 1323 * 1324 * This is a counter which gets incremented when reset is triggered, 1325 * and again when reset has been handled. So odd values (lowest bit set) 1326 * means that reset is in progress and even values that 1327 * (reset_counter >> 1):th reset was successfully completed. 1328 * 1329 * If reset is not completed succesfully, the I915_WEDGE bit is 1330 * set meaning that hardware is terminally sour and there is no 1331 * recovery. All waiters on the reset_queue will be woken when 1332 * that happens. 1333 * 1334 * This counter is used by the wait_seqno code to notice that reset 1335 * event happened and it needs to restart the entire ioctl (since most 1336 * likely the seqno it waited for won't ever signal anytime soon). 1337 * 1338 * This is important for lock-free wait paths, where no contended lock 1339 * naturally enforces the correct ordering between the bail-out of the 1340 * waiter and the gpu reset work code. 1341 */ 1342 atomic_t reset_counter; 1343 1344 #define I915_RESET_IN_PROGRESS_FLAG 1 1345 #define I915_WEDGED (1 << 31) 1346 1347 /** 1348 * Waitqueue to signal when the reset has completed. Used by clients 1349 * that wait for dev_priv->mm.wedged to settle. 1350 */ 1351 wait_queue_head_t reset_queue; 1352 1353 /* Userspace knobs for gpu hang simulation; 1354 * combines both a ring mask, and extra flags 1355 */ 1356 u32 stop_rings; 1357 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1358 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1359 1360 /* For missed irq/seqno simulation. */ 1361 unsigned int test_irq_rings; 1362 1363 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 1364 bool reload_in_reset; 1365 }; 1366 1367 enum modeset_restore { 1368 MODESET_ON_LID_OPEN, 1369 MODESET_DONE, 1370 MODESET_SUSPENDED, 1371 }; 1372 1373 struct ddi_vbt_port_info { 1374 /* 1375 * This is an index in the HDMI/DVI DDI buffer translation table. 1376 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1377 * populate this field. 1378 */ 1379 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1380 uint8_t hdmi_level_shift; 1381 1382 uint8_t supports_dvi:1; 1383 uint8_t supports_hdmi:1; 1384 uint8_t supports_dp:1; 1385 }; 1386 1387 enum psr_lines_to_wait { 1388 PSR_0_LINES_TO_WAIT = 0, 1389 PSR_1_LINE_TO_WAIT, 1390 PSR_4_LINES_TO_WAIT, 1391 PSR_8_LINES_TO_WAIT 1392 }; 1393 1394 struct intel_vbt_data { 1395 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1396 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1397 1398 /* Feature bits */ 1399 unsigned int int_tv_support:1; 1400 unsigned int lvds_dither:1; 1401 unsigned int lvds_vbt:1; 1402 unsigned int int_crt_support:1; 1403 unsigned int lvds_use_ssc:1; 1404 unsigned int display_clock_mode:1; 1405 unsigned int fdi_rx_polarity_inverted:1; 1406 unsigned int has_mipi:1; 1407 int lvds_ssc_freq; 1408 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1409 1410 enum drrs_support_type drrs_type; 1411 1412 /* eDP */ 1413 int edp_rate; 1414 int edp_lanes; 1415 int edp_preemphasis; 1416 int edp_vswing; 1417 bool edp_initialized; 1418 bool edp_support; 1419 int edp_bpp; 1420 struct edp_power_seq edp_pps; 1421 1422 struct { 1423 bool full_link; 1424 bool require_aux_wakeup; 1425 int idle_frames; 1426 enum psr_lines_to_wait lines_to_wait; 1427 int tp1_wakeup_time; 1428 int tp2_tp3_wakeup_time; 1429 } psr; 1430 1431 struct { 1432 u16 pwm_freq_hz; 1433 bool present; 1434 bool active_low_pwm; 1435 u8 min_brightness; /* min_brightness/255 of max */ 1436 } backlight; 1437 1438 /* MIPI DSI */ 1439 struct { 1440 u16 port; 1441 u16 panel_id; 1442 struct mipi_config *config; 1443 struct mipi_pps_data *pps; 1444 u8 seq_version; 1445 u32 size; 1446 u8 *data; 1447 u8 *sequence[MIPI_SEQ_MAX]; 1448 } dsi; 1449 1450 int crt_ddc_pin; 1451 1452 int child_dev_num; 1453 union child_device_config *child_dev; 1454 1455 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1456 }; 1457 1458 enum intel_ddb_partitioning { 1459 INTEL_DDB_PART_1_2, 1460 INTEL_DDB_PART_5_6, /* IVB+ */ 1461 }; 1462 1463 struct intel_wm_level { 1464 bool enable; 1465 uint32_t pri_val; 1466 uint32_t spr_val; 1467 uint32_t cur_val; 1468 uint32_t fbc_val; 1469 }; 1470 1471 struct ilk_wm_values { 1472 uint32_t wm_pipe[3]; 1473 uint32_t wm_lp[3]; 1474 uint32_t wm_lp_spr[3]; 1475 uint32_t wm_linetime[3]; 1476 bool enable_fbc_wm; 1477 enum intel_ddb_partitioning partitioning; 1478 }; 1479 1480 struct vlv_wm_values { 1481 struct { 1482 uint16_t primary; 1483 uint16_t sprite[2]; 1484 uint8_t cursor; 1485 } pipe[3]; 1486 1487 struct { 1488 uint16_t plane; 1489 uint8_t cursor; 1490 } sr; 1491 1492 struct { 1493 uint8_t cursor; 1494 uint8_t sprite[2]; 1495 uint8_t primary; 1496 } ddl[3]; 1497 }; 1498 1499 struct skl_ddb_entry { 1500 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1501 }; 1502 1503 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1504 { 1505 return entry->end - entry->start; 1506 } 1507 1508 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1509 const struct skl_ddb_entry *e2) 1510 { 1511 if (e1->start == e2->start && e1->end == e2->end) 1512 return true; 1513 1514 return false; 1515 } 1516 1517 struct skl_ddb_allocation { 1518 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1519 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1520 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */ 1521 struct skl_ddb_entry cursor[I915_MAX_PIPES]; 1522 }; 1523 1524 struct skl_wm_values { 1525 bool dirty[I915_MAX_PIPES]; 1526 struct skl_ddb_allocation ddb; 1527 uint32_t wm_linetime[I915_MAX_PIPES]; 1528 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1529 uint32_t cursor[I915_MAX_PIPES][8]; 1530 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1531 uint32_t cursor_trans[I915_MAX_PIPES]; 1532 }; 1533 1534 struct skl_wm_level { 1535 bool plane_en[I915_MAX_PLANES]; 1536 bool cursor_en; 1537 uint16_t plane_res_b[I915_MAX_PLANES]; 1538 uint8_t plane_res_l[I915_MAX_PLANES]; 1539 uint16_t cursor_res_b; 1540 uint8_t cursor_res_l; 1541 }; 1542 1543 /* 1544 * This struct helps tracking the state needed for runtime PM, which puts the 1545 * device in PCI D3 state. Notice that when this happens, nothing on the 1546 * graphics device works, even register access, so we don't get interrupts nor 1547 * anything else. 1548 * 1549 * Every piece of our code that needs to actually touch the hardware needs to 1550 * either call intel_runtime_pm_get or call intel_display_power_get with the 1551 * appropriate power domain. 1552 * 1553 * Our driver uses the autosuspend delay feature, which means we'll only really 1554 * suspend if we stay with zero refcount for a certain amount of time. The 1555 * default value is currently very conservative (see intel_runtime_pm_enable), but 1556 * it can be changed with the standard runtime PM files from sysfs. 1557 * 1558 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1559 * goes back to false exactly before we reenable the IRQs. We use this variable 1560 * to check if someone is trying to enable/disable IRQs while they're supposed 1561 * to be disabled. This shouldn't happen and we'll print some error messages in 1562 * case it happens. 1563 * 1564 * For more, read the Documentation/power/runtime_pm.txt. 1565 */ 1566 struct i915_runtime_pm { 1567 bool suspended; 1568 bool irqs_enabled; 1569 }; 1570 1571 enum intel_pipe_crc_source { 1572 INTEL_PIPE_CRC_SOURCE_NONE, 1573 INTEL_PIPE_CRC_SOURCE_PLANE1, 1574 INTEL_PIPE_CRC_SOURCE_PLANE2, 1575 INTEL_PIPE_CRC_SOURCE_PF, 1576 INTEL_PIPE_CRC_SOURCE_PIPE, 1577 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1578 INTEL_PIPE_CRC_SOURCE_TV, 1579 INTEL_PIPE_CRC_SOURCE_DP_B, 1580 INTEL_PIPE_CRC_SOURCE_DP_C, 1581 INTEL_PIPE_CRC_SOURCE_DP_D, 1582 INTEL_PIPE_CRC_SOURCE_AUTO, 1583 INTEL_PIPE_CRC_SOURCE_MAX, 1584 }; 1585 1586 struct intel_pipe_crc_entry { 1587 uint32_t frame; 1588 uint32_t crc[5]; 1589 }; 1590 1591 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1592 struct intel_pipe_crc { 1593 struct spinlock lock; 1594 bool opened; /* exclusive access to the result file */ 1595 struct intel_pipe_crc_entry *entries; 1596 enum intel_pipe_crc_source source; 1597 int head, tail; 1598 wait_queue_head_t wq; 1599 }; 1600 1601 struct i915_frontbuffer_tracking { 1602 struct lock lock; 1603 1604 /* 1605 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1606 * scheduled flips. 1607 */ 1608 unsigned busy_bits; 1609 unsigned flip_bits; 1610 }; 1611 1612 struct i915_wa_reg { 1613 u32 addr; 1614 u32 value; 1615 /* bitmask representing WA bits */ 1616 u32 mask; 1617 }; 1618 1619 #define I915_MAX_WA_REGS 16 1620 1621 struct i915_workarounds { 1622 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1623 u32 count; 1624 }; 1625 1626 struct i915_virtual_gpu { 1627 bool active; 1628 }; 1629 1630 struct drm_i915_private { 1631 struct drm_device *dev; 1632 struct kmem_cache *objects; 1633 struct kmem_cache *vmas; 1634 struct kmem_cache *requests; 1635 1636 struct intel_device_info info; 1637 1638 int relative_constants_mode; 1639 1640 device_t *gmbus_bridge; 1641 device_t *bbbus_bridge; 1642 device_t *bbbus; 1643 1644 drm_local_map_t *sarea; 1645 drm_local_map_t *mmio_map; 1646 char __iomem *regs; 1647 1648 struct intel_uncore uncore; 1649 1650 struct i915_virtual_gpu vgpu; 1651 1652 struct intel_csr csr; 1653 1654 /* Display CSR-related protection */ 1655 struct lock csr_lock; 1656 1657 device_t *gmbus; 1658 1659 1660 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1661 * controller on different i2c buses. */ 1662 struct lock gmbus_mutex; 1663 1664 struct _drm_i915_sarea *sarea_priv; 1665 /** 1666 * Base address of the gmbus and gpio block. 1667 */ 1668 uint32_t gpio_mmio_base; 1669 1670 /* MMIO base address for MIPI regs */ 1671 uint32_t mipi_mmio_base; 1672 1673 wait_queue_head_t gmbus_wait_queue; 1674 1675 struct pci_dev *bridge_dev; 1676 struct intel_engine_cs ring[I915_NUM_RINGS]; 1677 struct drm_i915_gem_object *semaphore_obj; 1678 uint32_t last_seqno, next_seqno; 1679 1680 struct drm_dma_handle *status_page_dmah; 1681 struct resource *mch_res; 1682 int mch_res_rid; 1683 1684 /* protects the irq masks */ 1685 struct lock irq_lock; 1686 1687 /* protects the mmio flip data */ 1688 struct spinlock mmio_flip_lock; 1689 1690 bool display_irqs_enabled; 1691 1692 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1693 struct pm_qos_request pm_qos; 1694 1695 /* Sideband mailbox protection */ 1696 struct lock sb_lock; 1697 1698 /** Cached value of IMR to avoid reads in updating the bitfield */ 1699 union { 1700 u32 irq_mask; 1701 u32 de_irq_mask[I915_MAX_PIPES]; 1702 }; 1703 u32 gt_irq_mask; 1704 u32 pm_irq_mask; 1705 u32 pm_rps_events; 1706 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1707 1708 struct work_struct hotplug_work; 1709 struct { 1710 unsigned long hpd_last_jiffies; 1711 int hpd_cnt; 1712 enum { 1713 HPD_ENABLED = 0, 1714 HPD_DISABLED = 1, 1715 HPD_MARK_DISABLED = 2 1716 } hpd_mark; 1717 } hpd_stats[HPD_NUM_PINS]; 1718 u32 hpd_event_bits; 1719 struct delayed_work hotplug_reenable_work; 1720 1721 struct i915_fbc fbc; 1722 struct i915_drrs drrs; 1723 struct intel_opregion opregion; 1724 struct intel_vbt_data vbt; 1725 1726 bool preserve_bios_swizzle; 1727 1728 /* overlay */ 1729 struct intel_overlay *overlay; 1730 1731 /* backlight registers and fields in struct intel_panel */ 1732 struct lock backlight_lock; 1733 1734 /* LVDS info */ 1735 bool no_aux_handshake; 1736 1737 /* protects panel power sequencer state */ 1738 struct lock pps_mutex; 1739 1740 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1741 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1742 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1743 1744 unsigned int fsb_freq, mem_freq, is_ddr3; 1745 unsigned int skl_boot_cdclk; 1746 unsigned int cdclk_freq; 1747 unsigned int hpll_freq; 1748 1749 /** 1750 * wq - Driver workqueue for GEM. 1751 * 1752 * NOTE: Work items scheduled here are not allowed to grab any modeset 1753 * locks, for otherwise the flushing done in the pageflip code will 1754 * result in deadlocks. 1755 */ 1756 struct workqueue_struct *wq; 1757 1758 /* Display functions */ 1759 struct drm_i915_display_funcs display; 1760 1761 /* PCH chipset type */ 1762 enum intel_pch pch_type; 1763 unsigned short pch_id; 1764 1765 unsigned long quirks; 1766 1767 enum modeset_restore modeset_restore; 1768 struct lock modeset_restore_lock; 1769 1770 struct list_head vm_list; /* Global list of all address spaces */ 1771 struct i915_gtt gtt; /* VM representing the global address space */ 1772 1773 struct i915_gem_mm mm; 1774 DECLARE_HASHTABLE(mm_structs, 7); 1775 struct lock mm_lock; 1776 1777 /* Kernel Modesetting */ 1778 1779 struct sdvo_device_mapping sdvo_mappings[2]; 1780 1781 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1782 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1783 wait_queue_head_t pending_flip_queue; 1784 1785 #ifdef CONFIG_DEBUG_FS 1786 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1787 #endif 1788 1789 int num_shared_dpll; 1790 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1791 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1792 1793 struct i915_workarounds workarounds; 1794 1795 /* Reclocking support */ 1796 bool render_reclock_avail; 1797 bool lvds_downclock_avail; 1798 /* indicates the reduced downclock for LVDS*/ 1799 int lvds_downclock; 1800 1801 struct i915_frontbuffer_tracking fb_tracking; 1802 1803 u16 orig_clock; 1804 1805 bool mchbar_need_disable; 1806 1807 struct intel_l3_parity l3_parity; 1808 1809 /* Cannot be determined by PCIID. You must always read a register. */ 1810 size_t ellc_size; 1811 1812 /* gen6+ rps state */ 1813 struct intel_gen6_power_mgmt rps; 1814 1815 /* ilk-only ips/rps state. Everything in here is protected by the global 1816 * mchdev_lock in intel_pm.c */ 1817 struct intel_ilk_power_mgmt ips; 1818 1819 struct i915_power_domains power_domains; 1820 1821 struct i915_psr psr; 1822 1823 struct i915_gpu_error gpu_error; 1824 1825 struct drm_i915_gem_object *vlv_pctx; 1826 1827 #ifdef CONFIG_DRM_I915_FBDEV 1828 /* list of fbdev register on this device */ 1829 struct intel_fbdev *fbdev; 1830 struct work_struct fbdev_suspend_work; 1831 #endif 1832 1833 struct drm_property *broadcast_rgb_property; 1834 struct drm_property *force_audio_property; 1835 1836 /* hda/i915 audio component */ 1837 bool audio_component_registered; 1838 1839 uint32_t hw_context_size; 1840 struct list_head context_list; 1841 1842 u32 fdi_rx_config; 1843 1844 u32 chv_phy_control; 1845 1846 u32 suspend_count; 1847 struct i915_suspend_saved_registers regfile; 1848 struct vlv_s0ix_state vlv_s0ix_state; 1849 1850 struct { 1851 /* 1852 * Raw watermark latency values: 1853 * in 0.1us units for WM0, 1854 * in 0.5us units for WM1+. 1855 */ 1856 /* primary */ 1857 uint16_t pri_latency[5]; 1858 /* sprite */ 1859 uint16_t spr_latency[5]; 1860 /* cursor */ 1861 uint16_t cur_latency[5]; 1862 /* 1863 * Raw watermark memory latency values 1864 * for SKL for all 8 levels 1865 * in 1us units. 1866 */ 1867 uint16_t skl_latency[8]; 1868 1869 /* 1870 * The skl_wm_values structure is a bit too big for stack 1871 * allocation, so we keep the staging struct where we store 1872 * intermediate results here instead. 1873 */ 1874 struct skl_wm_values skl_results; 1875 1876 /* current hardware state */ 1877 union { 1878 struct ilk_wm_values hw; 1879 struct skl_wm_values skl_hw; 1880 struct vlv_wm_values vlv; 1881 }; 1882 } wm; 1883 1884 struct i915_runtime_pm pm; 1885 1886 struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; 1887 u32 long_hpd_port_mask; 1888 u32 short_hpd_port_mask; 1889 struct work_struct dig_port_work; 1890 1891 /* 1892 * if we get a HPD irq from DP and a HPD irq from non-DP 1893 * the non-DP HPD could block the workqueue on a mode config 1894 * mutex getting, that userspace may have taken. However 1895 * userspace is waiting on the DP workqueue to run which is 1896 * blocked behind the non-DP one. 1897 */ 1898 struct workqueue_struct *dp_wq; 1899 1900 uint32_t bios_vgacntr; 1901 1902 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1903 struct { 1904 int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file, 1905 struct intel_engine_cs *ring, 1906 struct intel_context *ctx, 1907 struct drm_i915_gem_execbuffer2 *args, 1908 struct list_head *vmas, 1909 struct drm_i915_gem_object *batch_obj, 1910 u64 exec_start, u32 flags); 1911 int (*init_rings)(struct drm_device *dev); 1912 void (*cleanup_ring)(struct intel_engine_cs *ring); 1913 void (*stop_ring)(struct intel_engine_cs *ring); 1914 } gt; 1915 1916 bool edp_low_vswing; 1917 1918 /* 1919 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1920 * will be rejected. Instead look for a better place. 1921 */ 1922 }; 1923 1924 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 1925 { 1926 return dev->dev_private; 1927 } 1928 1929 static inline struct drm_i915_private *dev_to_i915(struct device *dev) 1930 { 1931 BUG(); 1932 } 1933 1934 /* Iterate over initialised rings */ 1935 #define for_each_ring(ring__, dev_priv__, i__) \ 1936 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1937 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 1938 1939 enum hdmi_force_audio { 1940 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 1941 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 1942 HDMI_AUDIO_AUTO, /* trust EDID */ 1943 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1944 }; 1945 1946 #define I915_GTT_OFFSET_NONE ((u32)-1) 1947 1948 struct drm_i915_gem_object_ops { 1949 /* Interface between the GEM object and its backing storage. 1950 * get_pages() is called once prior to the use of the associated set 1951 * of pages before to binding them into the GTT, and put_pages() is 1952 * called after we no longer need them. As we expect there to be 1953 * associated cost with migrating pages between the backing storage 1954 * and making them available for the GPU (e.g. clflush), we may hold 1955 * onto the pages after they are no longer referenced by the GPU 1956 * in case they may be used again shortly (for example migrating the 1957 * pages to a different memory domain within the GTT). put_pages() 1958 * will therefore most likely be called when the object itself is 1959 * being released or under memory pressure (where we attempt to 1960 * reap pages for the shrinker). 1961 */ 1962 int (*get_pages)(struct drm_i915_gem_object *); 1963 void (*put_pages)(struct drm_i915_gem_object *); 1964 int (*dmabuf_export)(struct drm_i915_gem_object *); 1965 void (*release)(struct drm_i915_gem_object *); 1966 }; 1967 1968 /* 1969 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 1970 * considered to be the frontbuffer for the given plane interface-vise. This 1971 * doesn't mean that the hw necessarily already scans it out, but that any 1972 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 1973 * 1974 * We have one bit per pipe and per scanout plane type. 1975 */ 1976 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 1977 #define INTEL_FRONTBUFFER_BITS \ 1978 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 1979 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 1980 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 1981 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 1982 (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 1983 #define INTEL_FRONTBUFFER_SPRITE(pipe) \ 1984 (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 1985 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 1986 (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 1987 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 1988 (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 1989 1990 struct drm_i915_gem_object { 1991 struct drm_gem_object base; 1992 1993 const struct drm_i915_gem_object_ops *ops; 1994 1995 /** List of VMAs backed by this object */ 1996 struct list_head vma_list; 1997 1998 /** Stolen memory for this object, instead of being backed by shmem. */ 1999 struct drm_mm_node *stolen; 2000 struct list_head global_list; 2001 2002 struct list_head ring_list[I915_NUM_RINGS]; 2003 /** Used in execbuf to temporarily hold a ref */ 2004 struct list_head obj_exec_link; 2005 2006 struct list_head batch_pool_link; 2007 2008 /** 2009 * This is set if the object is on the active lists (has pending 2010 * rendering and so a non-zero seqno), and is not set if it i s on 2011 * inactive (ready to be unbound) list. 2012 */ 2013 unsigned int active:I915_NUM_RINGS; 2014 2015 /** 2016 * This is set if the object has been written to since last bound 2017 * to the GTT 2018 */ 2019 unsigned int dirty:1; 2020 2021 /** 2022 * Fence register bits (if any) for this object. Will be set 2023 * as needed when mapped into the GTT. 2024 * Protected by dev->struct_mutex. 2025 */ 2026 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 2027 2028 /** 2029 * Advice: are the backing pages purgeable? 2030 */ 2031 unsigned int madv:2; 2032 2033 /** 2034 * Current tiling mode for the object. 2035 */ 2036 unsigned int tiling_mode:2; 2037 /** 2038 * Whether the tiling parameters for the currently associated fence 2039 * register have changed. Note that for the purposes of tracking 2040 * tiling changes we also treat the unfenced register, the register 2041 * slot that the object occupies whilst it executes a fenced 2042 * command (such as BLT on gen2/3), as a "fence". 2043 */ 2044 unsigned int fence_dirty:1; 2045 2046 /** 2047 * Is the object at the current location in the gtt mappable and 2048 * fenceable? Used to avoid costly recalculations. 2049 */ 2050 unsigned int map_and_fenceable:1; 2051 2052 /** 2053 * Whether the current gtt mapping needs to be mappable (and isn't just 2054 * mappable by accident). Track pin and fault separate for a more 2055 * accurate mappable working set. 2056 */ 2057 unsigned int fault_mappable:1; 2058 2059 /* 2060 * Is the object to be mapped as read-only to the GPU 2061 * Only honoured if hardware has relevant pte bit 2062 */ 2063 unsigned long gt_ro:1; 2064 unsigned int cache_level:3; 2065 unsigned int cache_dirty:1; 2066 2067 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2068 2069 unsigned int pin_display; 2070 2071 struct vm_page **pages; 2072 int pages_pin_count; 2073 2074 /* prime dma-buf support */ 2075 void *dma_buf_vmapping; 2076 int vmapping_count; 2077 2078 /** Breadcrumb of last rendering to the buffer. 2079 * There can only be one writer, but we allow for multiple readers. 2080 * If there is a writer that necessarily implies that all other 2081 * read requests are complete - but we may only be lazily clearing 2082 * the read requests. A read request is naturally the most recent 2083 * request on a ring, so we may have two different write and read 2084 * requests on one ring where the write request is older than the 2085 * read request. This allows for the CPU to read from an active 2086 * buffer by only waiting for the write to complete. 2087 * */ 2088 struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS]; 2089 struct drm_i915_gem_request *last_write_req; 2090 /** Breadcrumb of last fenced GPU access to the buffer. */ 2091 struct drm_i915_gem_request *last_fenced_req; 2092 2093 /** Current tiling stride for the object, if it's tiled. */ 2094 uint32_t stride; 2095 2096 /** References from framebuffers, locks out tiling changes. */ 2097 unsigned long framebuffer_references; 2098 2099 /** Record of address bit 17 of each page at last unbind. */ 2100 unsigned long *bit_17; 2101 2102 union { 2103 /** for phy allocated objects */ 2104 struct drm_dma_handle *phys_handle; 2105 2106 struct i915_gem_userptr { 2107 uintptr_t ptr; 2108 unsigned read_only :1; 2109 unsigned workers :4; 2110 #define I915_GEM_USERPTR_MAX_WORKERS 15 2111 2112 struct i915_mm_struct *mm; 2113 struct i915_mmu_object *mmu_object; 2114 struct work_struct *work; 2115 } userptr; 2116 }; 2117 }; 2118 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2119 2120 void i915_gem_track_fb(struct drm_i915_gem_object *old, 2121 struct drm_i915_gem_object *new, 2122 unsigned frontbuffer_bits); 2123 2124 /** 2125 * Request queue structure. 2126 * 2127 * The request queue allows us to note sequence numbers that have been emitted 2128 * and may be associated with active buffers to be retired. 2129 * 2130 * By keeping this list, we can avoid having to do questionable sequence 2131 * number comparisons on buffer last_read|write_seqno. It also allows an 2132 * emission time to be associated with the request for tracking how far ahead 2133 * of the GPU the submission is. 2134 * 2135 * The requests are reference counted, so upon creation they should have an 2136 * initial reference taken using kref_init 2137 */ 2138 struct drm_i915_gem_request { 2139 struct kref ref; 2140 2141 /** On Which ring this request was generated */ 2142 struct drm_i915_private *i915; 2143 struct intel_engine_cs *ring; 2144 2145 /** GEM sequence number associated with this request. */ 2146 uint32_t seqno; 2147 2148 /** Position in the ringbuffer of the start of the request */ 2149 u32 head; 2150 2151 /** 2152 * Position in the ringbuffer of the start of the postfix. 2153 * This is required to calculate the maximum available ringbuffer 2154 * space without overwriting the postfix. 2155 */ 2156 u32 postfix; 2157 2158 /** Position in the ringbuffer of the end of the whole request */ 2159 u32 tail; 2160 2161 /** 2162 * Context and ring buffer related to this request 2163 * Contexts are refcounted, so when this request is associated with a 2164 * context, we must increment the context's refcount, to guarantee that 2165 * it persists while any request is linked to it. Requests themselves 2166 * are also refcounted, so the request will only be freed when the last 2167 * reference to it is dismissed, and the code in 2168 * i915_gem_request_free() will then decrement the refcount on the 2169 * context. 2170 */ 2171 struct intel_context *ctx; 2172 struct intel_ringbuffer *ringbuf; 2173 2174 /** Batch buffer related to this request if any */ 2175 struct drm_i915_gem_object *batch_obj; 2176 2177 /** Time at which this request was emitted, in jiffies. */ 2178 unsigned long emitted_jiffies; 2179 2180 /** global list entry for this request */ 2181 struct list_head list; 2182 2183 struct drm_i915_file_private *file_priv; 2184 /** file_priv list entry for this request */ 2185 struct list_head client_list; 2186 2187 /** process identifier submitting this request */ 2188 pid_t pid; 2189 2190 /** 2191 * The ELSP only accepts two elements at a time, so we queue 2192 * context/tail pairs on a given queue (ring->execlist_queue) until the 2193 * hardware is available. The queue serves a double purpose: we also use 2194 * it to keep track of the up to 2 contexts currently in the hardware 2195 * (usually one in execution and the other queued up by the GPU): We 2196 * only remove elements from the head of the queue when the hardware 2197 * informs us that an element has been completed. 2198 * 2199 * All accesses to the queue are mediated by a spinlock 2200 * (ring->execlist_lock). 2201 */ 2202 2203 /** Execlist link in the submission queue.*/ 2204 struct list_head execlist_link; 2205 2206 /** Execlists no. of times this request has been sent to the ELSP */ 2207 int elsp_submitted; 2208 2209 }; 2210 2211 int i915_gem_request_alloc(struct intel_engine_cs *ring, 2212 struct intel_context *ctx); 2213 void i915_gem_request_free(struct kref *req_ref); 2214 2215 static inline uint32_t 2216 i915_gem_request_get_seqno(struct drm_i915_gem_request *req) 2217 { 2218 return req ? req->seqno : 0; 2219 } 2220 2221 static inline struct intel_engine_cs * 2222 i915_gem_request_get_ring(struct drm_i915_gem_request *req) 2223 { 2224 return req ? req->ring : NULL; 2225 } 2226 2227 static inline struct drm_i915_gem_request * 2228 i915_gem_request_reference(struct drm_i915_gem_request *req) 2229 { 2230 if (req) 2231 kref_get(&req->ref); 2232 return req; 2233 } 2234 2235 static inline void 2236 i915_gem_request_unreference(struct drm_i915_gem_request *req) 2237 { 2238 WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); 2239 kref_put(&req->ref, i915_gem_request_free); 2240 } 2241 2242 static inline void 2243 i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) 2244 { 2245 struct drm_device *dev; 2246 2247 if (!req) 2248 return; 2249 2250 dev = req->ring->dev; 2251 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) 2252 mutex_unlock(&dev->struct_mutex); 2253 } 2254 2255 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2256 struct drm_i915_gem_request *src) 2257 { 2258 if (src) 2259 i915_gem_request_reference(src); 2260 2261 if (*pdst) 2262 i915_gem_request_unreference(*pdst); 2263 2264 *pdst = src; 2265 } 2266 2267 /* 2268 * XXX: i915_gem_request_completed should be here but currently needs the 2269 * definition of i915_seqno_passed() which is below. It will be moved in 2270 * a later patch when the call to i915_seqno_passed() is obsoleted... 2271 */ 2272 2273 /* 2274 * A command that requires special handling by the command parser. 2275 */ 2276 struct drm_i915_cmd_descriptor { 2277 /* 2278 * Flags describing how the command parser processes the command. 2279 * 2280 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2281 * a length mask if not set 2282 * CMD_DESC_SKIP: The command is allowed but does not follow the 2283 * standard length encoding for the opcode range in 2284 * which it falls 2285 * CMD_DESC_REJECT: The command is never allowed 2286 * CMD_DESC_REGISTER: The command should be checked against the 2287 * register whitelist for the appropriate ring 2288 * CMD_DESC_MASTER: The command is allowed if the submitting process 2289 * is the DRM master 2290 */ 2291 u32 flags; 2292 #define CMD_DESC_FIXED (1<<0) 2293 #define CMD_DESC_SKIP (1<<1) 2294 #define CMD_DESC_REJECT (1<<2) 2295 #define CMD_DESC_REGISTER (1<<3) 2296 #define CMD_DESC_BITMASK (1<<4) 2297 #define CMD_DESC_MASTER (1<<5) 2298 2299 /* 2300 * The command's unique identification bits and the bitmask to get them. 2301 * This isn't strictly the opcode field as defined in the spec and may 2302 * also include type, subtype, and/or subop fields. 2303 */ 2304 struct { 2305 u32 value; 2306 u32 mask; 2307 } cmd; 2308 2309 /* 2310 * The command's length. The command is either fixed length (i.e. does 2311 * not include a length field) or has a length field mask. The flag 2312 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2313 * a length mask. All command entries in a command table must include 2314 * length information. 2315 */ 2316 union { 2317 u32 fixed; 2318 u32 mask; 2319 } length; 2320 2321 /* 2322 * Describes where to find a register address in the command to check 2323 * against the ring's register whitelist. Only valid if flags has the 2324 * CMD_DESC_REGISTER bit set. 2325 * 2326 * A non-zero step value implies that the command may access multiple 2327 * registers in sequence (e.g. LRI), in that case step gives the 2328 * distance in dwords between individual offset fields. 2329 */ 2330 struct { 2331 u32 offset; 2332 u32 mask; 2333 u32 step; 2334 } reg; 2335 2336 #define MAX_CMD_DESC_BITMASKS 3 2337 /* 2338 * Describes command checks where a particular dword is masked and 2339 * compared against an expected value. If the command does not match 2340 * the expected value, the parser rejects it. Only valid if flags has 2341 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2342 * are valid. 2343 * 2344 * If the check specifies a non-zero condition_mask then the parser 2345 * only performs the check when the bits specified by condition_mask 2346 * are non-zero. 2347 */ 2348 struct { 2349 u32 offset; 2350 u32 mask; 2351 u32 expected; 2352 u32 condition_offset; 2353 u32 condition_mask; 2354 } bits[MAX_CMD_DESC_BITMASKS]; 2355 }; 2356 2357 /* 2358 * A table of commands requiring special handling by the command parser. 2359 * 2360 * Each ring has an array of tables. Each table consists of an array of command 2361 * descriptors, which must be sorted with command opcodes in ascending order. 2362 */ 2363 struct drm_i915_cmd_table { 2364 const struct drm_i915_cmd_descriptor *table; 2365 int count; 2366 }; 2367 2368 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2369 #define __I915__(p) ({ \ 2370 const struct drm_i915_private *__p; \ 2371 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2372 __p = (const struct drm_i915_private *)p; \ 2373 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2374 __p = to_i915((const struct drm_device *)p); \ 2375 __p; \ 2376 }) 2377 #define INTEL_INFO(p) (&__I915__(p)->info) 2378 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2379 #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2380 2381 #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2382 #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2383 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2384 #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) 2385 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2386 #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) 2387 #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) 2388 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2389 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2390 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2391 #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) 2392 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2393 #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) 2394 #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) 2395 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2396 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2397 #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) 2398 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2399 #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2400 INTEL_DEVID(dev) == 0x0152 || \ 2401 INTEL_DEVID(dev) == 0x015a) 2402 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2403 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2404 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2405 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2406 #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2407 #define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) 2408 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2409 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2410 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2411 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2412 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2413 (INTEL_DEVID(dev) & 0xf) == 0xb || \ 2414 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2415 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2416 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2417 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2418 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2419 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2420 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2421 /* ULX machines are also considered ULT. */ 2422 #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ 2423 INTEL_DEVID(dev) == 0x0A1E) 2424 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2425 2426 #define SKL_REVID_A0 (0x0) 2427 #define SKL_REVID_B0 (0x1) 2428 #define SKL_REVID_C0 (0x2) 2429 #define SKL_REVID_D0 (0x3) 2430 #define SKL_REVID_E0 (0x4) 2431 #define SKL_REVID_F0 (0x5) 2432 2433 #define BXT_REVID_A0 (0x0) 2434 #define BXT_REVID_B0 (0x3) 2435 #define BXT_REVID_C0 (0x6) 2436 2437 /* 2438 * The genX designation typically refers to the render engine, so render 2439 * capability related checks should use IS_GEN, while display and other checks 2440 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2441 * chips, etc.). 2442 */ 2443 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2444 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2445 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2446 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2447 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2448 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2449 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2450 #define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2451 2452 #define RENDER_RING (1<<RCS) 2453 #define BSD_RING (1<<VCS) 2454 #define BLT_RING (1<<BCS) 2455 #define VEBOX_RING (1<<VECS) 2456 #define BSD2_RING (1<<VCS2) 2457 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2458 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2459 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2460 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2461 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2462 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2463 __I915__(dev)->ellc_size) 2464 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2465 2466 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2467 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2468 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2469 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) 2470 2471 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2472 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2473 2474 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2475 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2476 /* 2477 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2478 * even when in MSI mode. This results in spurious interrupt warnings if the 2479 * legacy irq no. is shared with another device. The kernel then disables that 2480 * interrupt source and so prevents the other device from working properly. 2481 */ 2482 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2483 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2484 2485 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2486 * rows, which changed the alignment requirements and fence programming. 2487 */ 2488 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 2489 IS_I915GM(dev))) 2490 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 2491 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 2492 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 2493 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2494 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2495 2496 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2497 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2498 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2499 2500 #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2501 2502 #define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2503 INTEL_INFO(dev)->gen >= 9) 2504 2505 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2506 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2507 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2508 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2509 IS_SKYLAKE(dev)) 2510 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2511 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ 2512 IS_SKYLAKE(dev)) 2513 #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2514 #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2515 2516 #define HAS_CSR(dev) (IS_SKYLAKE(dev)) 2517 2518 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2519 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2520 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2521 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2522 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2523 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2524 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2525 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2526 2527 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2528 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2529 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2530 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2531 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2532 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2533 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2534 2535 #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 2536 2537 /* DPF == dynamic parity feature */ 2538 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2539 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2540 2541 #define GT_FREQUENCY_MULTIPLIER 50 2542 #define GEN9_FREQ_SCALER 3 2543 2544 #include "i915_trace.h" 2545 2546 extern const struct drm_ioctl_desc i915_ioctls[]; 2547 extern int i915_max_ioctl; 2548 2549 extern int i915_suspend_legacy(device_t kdev); 2550 extern int i915_resume_legacy(struct drm_device *dev); 2551 2552 /* i915_params.c */ 2553 struct i915_params { 2554 int modeset; 2555 int panel_ignore_lid; 2556 int semaphores; 2557 unsigned int lvds_downclock; 2558 int lvds_channel_mode; 2559 int panel_use_ssc; 2560 int vbt_sdvo_panel_type; 2561 int enable_rc6; 2562 int enable_fbc; 2563 int enable_ppgtt; 2564 int enable_execlists; 2565 int enable_psr; 2566 unsigned int preliminary_hw_support; 2567 int disable_power_well; 2568 int enable_ips; 2569 int invert_brightness; 2570 int enable_cmd_parser; 2571 /* leave bools at the end to not create holes */ 2572 bool enable_hangcheck; 2573 bool fastboot; 2574 bool prefault_disable; 2575 bool load_detect_test; 2576 int reset; 2577 bool disable_display; 2578 bool disable_vtd_wa; 2579 int use_mmio_flip; 2580 int mmio_debug; 2581 bool verbose_state_checks; 2582 bool nuclear_pageflip; 2583 int edp_vswing; 2584 }; 2585 extern struct i915_params i915 __read_mostly; 2586 2587 /* i915_dma.c */ 2588 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2589 extern int i915_driver_unload(struct drm_device *); 2590 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2591 extern void i915_driver_lastclose(struct drm_device * dev); 2592 extern void i915_driver_preclose(struct drm_device *dev, 2593 struct drm_file *file); 2594 extern void i915_driver_postclose(struct drm_device *dev, 2595 struct drm_file *file); 2596 extern int i915_driver_device_is_agp(struct drm_device * dev); 2597 #ifdef CONFIG_COMPAT 2598 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2599 unsigned long arg); 2600 #endif 2601 extern int intel_gpu_reset(struct drm_device *dev); 2602 extern int i915_reset(struct drm_device *dev); 2603 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2604 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2605 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2606 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2607 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2608 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2609 void i915_firmware_load_error_print(const char *fw_path, int err); 2610 2611 /* i915_irq.c */ 2612 void i915_queue_hangcheck(struct drm_device *dev); 2613 __printf(3, 4) 2614 void i915_handle_error(struct drm_device *dev, bool wedged, 2615 const char *fmt, ...); 2616 2617 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2618 extern void intel_hpd_init(struct drm_i915_private *dev_priv); 2619 int intel_irq_install(struct drm_i915_private *dev_priv); 2620 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2621 2622 extern void intel_uncore_sanitize(struct drm_device *dev); 2623 extern void intel_uncore_early_sanitize(struct drm_device *dev, 2624 bool restore_forcewake); 2625 extern void intel_uncore_init(struct drm_device *dev); 2626 extern void intel_uncore_check_errors(struct drm_device *dev); 2627 extern void intel_uncore_fini(struct drm_device *dev); 2628 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2629 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2630 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2631 enum forcewake_domains domains); 2632 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2633 enum forcewake_domains domains); 2634 /* Like above but the caller must manage the uncore.lock itself. 2635 * Must be used with I915_READ_FW and friends. 2636 */ 2637 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2638 enum forcewake_domains domains); 2639 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2640 enum forcewake_domains domains); 2641 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2642 static inline bool intel_vgpu_active(struct drm_device *dev) 2643 { 2644 return to_i915(dev)->vgpu.active; 2645 } 2646 2647 void 2648 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2649 u32 status_mask); 2650 2651 void 2652 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2653 u32 status_mask); 2654 2655 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2656 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2657 void 2658 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2659 void 2660 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2661 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2662 uint32_t interrupt_mask, 2663 uint32_t enabled_irq_mask); 2664 #define ibx_enable_display_interrupt(dev_priv, bits) \ 2665 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 2666 #define ibx_disable_display_interrupt(dev_priv, bits) \ 2667 ibx_display_interrupt_update((dev_priv), (bits), 0) 2668 2669 /* i915_gem.c */ 2670 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2671 struct drm_file *file_priv); 2672 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2673 struct drm_file *file_priv); 2674 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2675 struct drm_file *file_priv); 2676 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2677 struct drm_file *file_priv); 2678 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2679 struct drm_file *file_priv); 2680 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2681 struct drm_file *file_priv); 2682 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2683 struct drm_file *file_priv); 2684 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2685 struct intel_engine_cs *ring); 2686 void i915_gem_execbuffer_retire_commands(struct drm_device *dev, 2687 struct drm_file *file, 2688 struct intel_engine_cs *ring, 2689 struct drm_i915_gem_object *obj); 2690 int i915_gem_ringbuffer_submission(struct drm_device *dev, 2691 struct drm_file *file, 2692 struct intel_engine_cs *ring, 2693 struct intel_context *ctx, 2694 struct drm_i915_gem_execbuffer2 *args, 2695 struct list_head *vmas, 2696 struct drm_i915_gem_object *batch_obj, 2697 u64 exec_start, u32 flags); 2698 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2699 struct drm_file *file_priv); 2700 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2701 struct drm_file *file_priv); 2702 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2703 struct drm_file *file_priv); 2704 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2705 struct drm_file *file); 2706 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2707 struct drm_file *file); 2708 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2709 struct drm_file *file_priv); 2710 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2711 struct drm_file *file_priv); 2712 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2713 struct drm_file *file_priv); 2714 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2715 struct drm_file *file_priv); 2716 int i915_gem_init_userptr(struct drm_device *dev); 2717 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2718 struct drm_file *file); 2719 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2720 struct drm_file *file_priv); 2721 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2722 struct drm_file *file_priv); 2723 void i915_gem_load(struct drm_device *dev); 2724 void *i915_gem_object_alloc(struct drm_device *dev); 2725 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2726 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2727 const struct drm_i915_gem_object_ops *ops); 2728 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2729 size_t size); 2730 void i915_init_vm(struct drm_i915_private *dev_priv, 2731 struct i915_address_space *vm); 2732 void i915_gem_free_object(struct drm_gem_object *obj); 2733 void i915_gem_vma_destroy(struct i915_vma *vma); 2734 2735 /* Flags used by pin/bind&friends. */ 2736 #define PIN_MAPPABLE (1<<0) 2737 #define PIN_NONBLOCK (1<<1) 2738 #define PIN_GLOBAL (1<<2) 2739 #define PIN_OFFSET_BIAS (1<<3) 2740 #define PIN_USER (1<<4) 2741 #define PIN_UPDATE (1<<5) 2742 #define PIN_OFFSET_MASK (~4095) 2743 int __must_check 2744 i915_gem_object_pin(struct drm_i915_gem_object *obj, 2745 struct i915_address_space *vm, 2746 uint32_t alignment, 2747 uint64_t flags); 2748 int __must_check 2749 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2750 const struct i915_ggtt_view *view, 2751 uint32_t alignment, 2752 uint64_t flags); 2753 2754 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2755 u32 flags); 2756 int __must_check i915_vma_unbind(struct i915_vma *vma); 2757 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2758 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2759 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2760 2761 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2762 int *needs_clflush); 2763 2764 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2765 static inline struct vm_page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 2766 { 2767 return obj->pages[n]; 2768 } 2769 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2770 { 2771 BUG_ON(obj->pages == NULL); 2772 obj->pages_pin_count++; 2773 } 2774 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2775 { 2776 BUG_ON(obj->pages_pin_count == 0); 2777 obj->pages_pin_count--; 2778 } 2779 2780 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 2781 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 2782 struct intel_engine_cs *to); 2783 void i915_vma_move_to_active(struct i915_vma *vma, 2784 struct intel_engine_cs *ring); 2785 int i915_gem_dumb_create(struct drm_file *file_priv, 2786 struct drm_device *dev, 2787 struct drm_mode_create_dumb *args); 2788 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 2789 uint32_t handle, uint64_t *offset); 2790 /** 2791 * Returns true if seq1 is later than seq2. 2792 */ 2793 static inline bool 2794 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 2795 { 2796 return (int32_t)(seq1 - seq2) >= 0; 2797 } 2798 2799 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 2800 bool lazy_coherency) 2801 { 2802 u32 seqno; 2803 2804 BUG_ON(req == NULL); 2805 2806 seqno = req->ring->get_seqno(req->ring, lazy_coherency); 2807 2808 return i915_seqno_passed(seqno, req->seqno); 2809 } 2810 2811 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 2812 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 2813 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 2814 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 2815 2816 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 2817 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 2818 2819 struct drm_i915_gem_request * 2820 i915_gem_find_active_request(struct intel_engine_cs *ring); 2821 2822 bool i915_gem_retire_requests(struct drm_device *dev); 2823 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); 2824 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 2825 bool interruptible); 2826 int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req); 2827 2828 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 2829 { 2830 return unlikely(atomic_read(&error->reset_counter) 2831 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 2832 } 2833 2834 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 2835 { 2836 return atomic_read(&error->reset_counter) & I915_WEDGED; 2837 } 2838 2839 static inline u32 i915_reset_count(struct i915_gpu_error *error) 2840 { 2841 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; 2842 } 2843 2844 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 2845 { 2846 return dev_priv->gpu_error.stop_rings == 0 || 2847 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 2848 } 2849 2850 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 2851 { 2852 return dev_priv->gpu_error.stop_rings == 0 || 2853 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 2854 } 2855 2856 void i915_gem_reset(struct drm_device *dev); 2857 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2858 int __must_check i915_gem_init(struct drm_device *dev); 2859 int i915_gem_init_rings(struct drm_device *dev); 2860 int __must_check i915_gem_init_hw(struct drm_device *dev); 2861 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); 2862 void i915_gem_init_swizzling(struct drm_device *dev); 2863 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 2864 int __must_check i915_gpu_idle(struct drm_device *dev); 2865 int __must_check i915_gem_suspend(struct drm_device *dev); 2866 int __i915_add_request(struct intel_engine_cs *ring, 2867 struct drm_file *file, 2868 struct drm_i915_gem_object *batch_obj); 2869 #define i915_add_request(ring) \ 2870 __i915_add_request(ring, NULL, NULL) 2871 int __i915_wait_request(struct drm_i915_gem_request *req, 2872 unsigned reset_counter, 2873 bool interruptible, 2874 s64 *timeout, 2875 struct intel_rps_client *rps); 2876 int __must_check i915_wait_request(struct drm_i915_gem_request *req); 2877 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres); 2878 int __must_check 2879 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 2880 bool readonly); 2881 int __must_check 2882 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 2883 bool write); 2884 int __must_check 2885 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 2886 int __must_check 2887 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2888 u32 alignment, 2889 struct intel_engine_cs *pipelined, 2890 const struct i915_ggtt_view *view); 2891 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 2892 const struct i915_ggtt_view *view); 2893 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 2894 int align); 2895 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2896 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2897 2898 uint32_t 2899 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 2900 uint32_t 2901 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 2902 int tiling_mode, bool fenced); 2903 2904 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 2905 enum i915_cache_level cache_level); 2906 2907 #if 0 2908 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 2909 struct dma_buf *dma_buf); 2910 2911 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 2912 struct drm_gem_object *gem_obj, int flags); 2913 #endif 2914 2915 void i915_gem_restore_fences(struct drm_device *dev); 2916 2917 unsigned long 2918 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 2919 const struct i915_ggtt_view *view); 2920 unsigned long 2921 i915_gem_obj_offset(struct drm_i915_gem_object *o, 2922 struct i915_address_space *vm); 2923 static inline unsigned long 2924 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) 2925 { 2926 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); 2927 } 2928 2929 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 2930 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 2931 const struct i915_ggtt_view *view); 2932 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 2933 struct i915_address_space *vm); 2934 2935 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 2936 struct i915_address_space *vm); 2937 struct i915_vma * 2938 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 2939 struct i915_address_space *vm); 2940 struct i915_vma * 2941 i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 2942 const struct i915_ggtt_view *view); 2943 2944 struct i915_vma * 2945 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2946 struct i915_address_space *vm); 2947 struct i915_vma * 2948 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 2949 const struct i915_ggtt_view *view); 2950 2951 static inline struct i915_vma * 2952 i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 2953 { 2954 return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); 2955 } 2956 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); 2957 2958 /* Some GGTT VM helpers */ 2959 #define i915_obj_to_ggtt(obj) \ 2960 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2961 static inline bool i915_is_ggtt(struct i915_address_space *vm) 2962 { 2963 struct i915_address_space *ggtt = 2964 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; 2965 return vm == ggtt; 2966 } 2967 2968 static inline struct i915_hw_ppgtt * 2969 i915_vm_to_ppgtt(struct i915_address_space *vm) 2970 { 2971 WARN_ON(i915_is_ggtt(vm)); 2972 2973 return container_of(vm, struct i915_hw_ppgtt, base); 2974 } 2975 2976 2977 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 2978 { 2979 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 2980 } 2981 2982 static inline unsigned long 2983 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 2984 { 2985 return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); 2986 } 2987 2988 static inline int __must_check 2989 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 2990 uint32_t alignment, 2991 unsigned flags) 2992 { 2993 return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), 2994 alignment, flags | PIN_GLOBAL); 2995 } 2996 2997 static inline int 2998 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 2999 { 3000 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 3001 } 3002 3003 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3004 const struct i915_ggtt_view *view); 3005 static inline void 3006 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 3007 { 3008 i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); 3009 } 3010 3011 /* i915_gem_context.c */ 3012 int __must_check i915_gem_context_init(struct drm_device *dev); 3013 void i915_gem_context_fini(struct drm_device *dev); 3014 void i915_gem_context_reset(struct drm_device *dev); 3015 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3016 int i915_gem_context_enable(struct drm_i915_private *dev_priv); 3017 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3018 int i915_switch_context(struct intel_engine_cs *ring, 3019 struct intel_context *to); 3020 struct intel_context * 3021 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 3022 void i915_gem_context_free(struct kref *ctx_ref); 3023 struct drm_i915_gem_object * 3024 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3025 static inline void i915_gem_context_reference(struct intel_context *ctx) 3026 { 3027 kref_get(&ctx->ref); 3028 } 3029 3030 static inline void i915_gem_context_unreference(struct intel_context *ctx) 3031 { 3032 kref_put(&ctx->ref, i915_gem_context_free); 3033 } 3034 3035 static inline bool i915_gem_context_is_default(const struct intel_context *c) 3036 { 3037 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3038 } 3039 3040 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3041 struct drm_file *file); 3042 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3043 struct drm_file *file); 3044 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3045 struct drm_file *file_priv); 3046 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3047 struct drm_file *file_priv); 3048 3049 /* i915_gem_evict.c */ 3050 int __must_check i915_gem_evict_something(struct drm_device *dev, 3051 struct i915_address_space *vm, 3052 int min_size, 3053 unsigned alignment, 3054 unsigned cache_level, 3055 unsigned long start, 3056 unsigned long end, 3057 unsigned flags); 3058 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3059 int i915_gem_evict_everything(struct drm_device *dev); 3060 3061 /* belongs in i915_gem_gtt.h */ 3062 static inline void i915_gem_chipset_flush(struct drm_device *dev) 3063 { 3064 if (INTEL_INFO(dev)->gen < 6) 3065 intel_gtt_chipset_flush(); 3066 } 3067 3068 /* i915_gem_stolen.c */ 3069 int i915_gem_init_stolen(struct drm_device *dev); 3070 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp); 3071 void i915_gem_stolen_cleanup_compression(struct drm_device *dev); 3072 void i915_gem_cleanup_stolen(struct drm_device *dev); 3073 struct drm_i915_gem_object * 3074 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3075 struct drm_i915_gem_object * 3076 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3077 u32 stolen_offset, 3078 u32 gtt_offset, 3079 u32 size); 3080 3081 /* i915_gem_shrinker.c */ 3082 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3083 long target, 3084 unsigned flags); 3085 #define I915_SHRINK_PURGEABLE 0x1 3086 #define I915_SHRINK_UNBOUND 0x2 3087 #define I915_SHRINK_BOUND 0x4 3088 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3089 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3090 3091 3092 /* i915_gem_tiling.c */ 3093 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3094 { 3095 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3096 3097 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3098 obj->tiling_mode != I915_TILING_NONE; 3099 } 3100 3101 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3102 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3103 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3104 3105 /* i915_gem_debug.c */ 3106 #if WATCH_LISTS 3107 int i915_verify_lists(struct drm_device *dev); 3108 #else 3109 #define i915_verify_lists(dev) 0 3110 #endif 3111 3112 /* i915_debugfs.c */ 3113 int i915_debugfs_init(struct drm_minor *minor); 3114 void i915_debugfs_cleanup(struct drm_minor *minor); 3115 #ifdef CONFIG_DEBUG_FS 3116 int i915_debugfs_connector_add(struct drm_connector *connector); 3117 void intel_display_crc_init(struct drm_device *dev); 3118 #else 3119 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3120 { return 0; } 3121 static inline void intel_display_crc_init(struct drm_device *dev) {} 3122 #endif 3123 3124 /* i915_gpu_error.c */ 3125 __printf(2, 3) 3126 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3127 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3128 const struct i915_error_state_file_priv *error); 3129 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3130 struct drm_i915_private *i915, 3131 size_t count, loff_t pos); 3132 static inline void i915_error_state_buf_release( 3133 struct drm_i915_error_state_buf *eb) 3134 { 3135 kfree(eb->buf); 3136 } 3137 void i915_capture_error_state(struct drm_device *dev, bool wedge, 3138 const char *error_msg); 3139 void i915_error_state_get(struct drm_device *dev, 3140 struct i915_error_state_file_priv *error_priv); 3141 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3142 void i915_destroy_error_state(struct drm_device *dev); 3143 3144 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3145 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3146 3147 /* i915_cmd_parser.c */ 3148 int i915_cmd_parser_get_version(void); 3149 int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); 3150 void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); 3151 bool i915_needs_cmd_parser(struct intel_engine_cs *ring); 3152 int i915_parse_cmds(struct intel_engine_cs *ring, 3153 struct drm_i915_gem_object *batch_obj, 3154 struct drm_i915_gem_object *shadow_batch_obj, 3155 u32 batch_start_offset, 3156 u32 batch_len, 3157 bool is_master); 3158 3159 /* i915_suspend.c */ 3160 extern int i915_save_state(struct drm_device *dev); 3161 extern int i915_restore_state(struct drm_device *dev); 3162 3163 /* i915_sysfs.c */ 3164 void i915_setup_sysfs(struct drm_device *dev_priv); 3165 void i915_teardown_sysfs(struct drm_device *dev_priv); 3166 3167 /* intel_i2c.c */ 3168 extern int intel_setup_gmbus(struct drm_device *dev); 3169 extern void intel_teardown_gmbus(struct drm_device *dev); 3170 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3171 unsigned int pin); 3172 3173 extern struct i2c_adapter * 3174 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3175 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3176 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3177 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3178 { 3179 struct intel_iic_softc *sc; 3180 sc = device_get_softc(device_get_parent(adapter)); 3181 3182 return sc->force_bit_dev; 3183 } 3184 extern void intel_i2c_reset(struct drm_device *dev); 3185 3186 /* intel_opregion.c */ 3187 #ifdef CONFIG_ACPI 3188 extern int intel_opregion_setup(struct drm_device *dev); 3189 extern void intel_opregion_init(struct drm_device *dev); 3190 extern void intel_opregion_fini(struct drm_device *dev); 3191 extern void intel_opregion_asle_intr(struct drm_device *dev); 3192 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3193 bool enable); 3194 extern int intel_opregion_notify_adapter(struct drm_device *dev, 3195 pci_power_t state); 3196 #else 3197 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3198 static inline void intel_opregion_init(struct drm_device *dev) { return; } 3199 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3200 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3201 static inline int 3202 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3203 { 3204 return 0; 3205 } 3206 static inline int 3207 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3208 { 3209 return 0; 3210 } 3211 #endif 3212 3213 /* intel_acpi.c */ 3214 #ifdef CONFIG_ACPI 3215 extern void intel_register_dsm_handler(void); 3216 extern void intel_unregister_dsm_handler(void); 3217 #else 3218 static inline void intel_register_dsm_handler(void) { return; } 3219 static inline void intel_unregister_dsm_handler(void) { return; } 3220 #endif /* CONFIG_ACPI */ 3221 3222 /* modesetting */ 3223 extern void intel_modeset_init_hw(struct drm_device *dev); 3224 extern void intel_modeset_init(struct drm_device *dev); 3225 extern void intel_modeset_gem_init(struct drm_device *dev); 3226 extern void intel_modeset_cleanup(struct drm_device *dev); 3227 extern void intel_connector_unregister(struct intel_connector *); 3228 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3229 extern void intel_modeset_setup_hw_state(struct drm_device *dev, 3230 bool force_restore); 3231 extern void i915_redisable_vga(struct drm_device *dev); 3232 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3233 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3234 extern void intel_init_pch_refclk(struct drm_device *dev); 3235 extern void intel_set_rps(struct drm_device *dev, u8 val); 3236 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3237 bool enable); 3238 extern void intel_detect_pch(struct drm_device *dev); 3239 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 3240 extern int intel_enable_rc6(const struct drm_device *dev); 3241 3242 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3243 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3244 struct drm_file *file); 3245 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3246 struct drm_file *file); 3247 3248 struct intel_device_info *i915_get_device_id(int device); 3249 3250 /* overlay */ 3251 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3252 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3253 struct intel_overlay_error_state *error); 3254 3255 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3256 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3257 struct drm_device *dev, 3258 struct intel_display_error_state *error); 3259 3260 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3261 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3262 3263 /* intel_sideband.c */ 3264 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3265 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3266 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3267 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); 3268 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3269 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3270 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3271 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3272 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3273 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3274 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3275 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); 3276 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3277 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); 3278 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); 3279 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3280 enum intel_sbi_destination destination); 3281 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3282 enum intel_sbi_destination destination); 3283 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3284 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3285 3286 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3287 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3288 3289 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3290 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3291 3292 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3293 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3294 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3295 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3296 3297 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3298 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3299 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3300 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3301 3302 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3303 * will be implemented using 2 32-bit writes in an arbitrary order with 3304 * an arbitrary delay between them. This can cause the hardware to 3305 * act upon the intermediate value, possibly leading to corruption and 3306 * machine death. You have been warned. 3307 */ 3308 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 3309 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3310 3311 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3312 u32 upper, lower, tmp; \ 3313 tmp = I915_READ(upper_reg); \ 3314 do { \ 3315 upper = tmp; \ 3316 lower = I915_READ(lower_reg); \ 3317 tmp = I915_READ(upper_reg); \ 3318 } while (upper != tmp); \ 3319 (u64)upper << 32 | lower; }) 3320 3321 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3322 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3323 3324 /* These are untraced mmio-accessors that are only valid to be used inside 3325 * criticial sections inside IRQ handlers where forcewake is explicitly 3326 * controlled. 3327 * Think twice, and think again, before using these. 3328 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3329 * intel_uncore_forcewake_irqunlock(). 3330 */ 3331 #define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) 3332 #define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) 3333 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3334 3335 /* "Broadcast RGB" property */ 3336 #define INTEL_BROADCAST_RGB_AUTO 0 3337 #define INTEL_BROADCAST_RGB_FULL 1 3338 #define INTEL_BROADCAST_RGB_LIMITED 2 3339 3340 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 3341 { 3342 if (IS_VALLEYVIEW(dev)) 3343 return VLV_VGACNTRL; 3344 else if (INTEL_INFO(dev)->gen >= 5) 3345 return CPU_VGACNTRL; 3346 else 3347 return VGACNTRL; 3348 } 3349 3350 static inline void __user *to_user_ptr(u64 address) 3351 { 3352 return (void __user *)(uintptr_t)address; 3353 } 3354 3355 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3356 { 3357 unsigned long j = msecs_to_jiffies(m); 3358 3359 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3360 } 3361 3362 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3363 { 3364 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3365 } 3366 3367 static inline unsigned long 3368 timespec_to_jiffies_timeout(const struct timespec *value) 3369 { 3370 unsigned long j = timespec_to_jiffies(value); 3371 3372 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3373 } 3374 3375 /* 3376 * If you need to wait X milliseconds between events A and B, but event B 3377 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3378 * when event A happened, then just before event B you call this function and 3379 * pass the timestamp as the first argument, and X as the second argument. 3380 */ 3381 static inline void 3382 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3383 { 3384 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3385 3386 /* 3387 * Don't re-read the value of "jiffies" every time since it may change 3388 * behind our back and break the math. 3389 */ 3390 tmp_jiffies = jiffies; 3391 target_jiffies = timestamp_jiffies + 3392 msecs_to_jiffies_timeout(to_wait_ms); 3393 3394 if (time_after(target_jiffies, tmp_jiffies)) { 3395 remaining_jiffies = target_jiffies - tmp_jiffies; 3396 #if 0 3397 while (remaining_jiffies) 3398 remaining_jiffies = 3399 schedule_timeout_uninterruptible(remaining_jiffies); 3400 #else 3401 msleep(jiffies_to_msecs(remaining_jiffies)); 3402 #endif 3403 } 3404 } 3405 3406 static inline void i915_trace_irq_get(struct intel_engine_cs *ring, 3407 struct drm_i915_gem_request *req) 3408 { 3409 if (ring->trace_irq_req == NULL && ring->irq_get(ring)) 3410 i915_gem_request_assign(&ring->trace_irq_req, req); 3411 } 3412 3413 #endif 3414