1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <linux/io-mapping.h> 37 #include <linux/i2c.h> 38 #include <linux/i2c-algo-bit.h> 39 #include <linux/backlight.h> 40 #include <linux/hashtable.h> 41 #include <linux/intel-iommu.h> 42 #include <linux/kref.h> 43 #include <linux/pm_qos.h> 44 #include <linux/shmem_fs.h> 45 #include <linux/llist.h> 46 47 #include <drm/drmP.h> 48 #include <drm/intel-gtt.h> 49 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 50 #include <drm/drm_gem.h> 51 #include <drm/drm_auth.h> 52 53 #include "i915_params.h" 54 #include "i915_reg.h" 55 56 #include "intel_bios.h" 57 #include "intel_dpll_mgr.h" 58 #include "intel_guc.h" 59 #include "intel_lrc.h" 60 #include "intel_ringbuffer.h" 61 62 #include "i915_gem.h" 63 #include "i915_gem_gtt.h" 64 #include "i915_gem_render_state.h" 65 #include "i915_gem_request.h" 66 67 #include "intel_gvt.h" 68 69 /* General customization: 70 */ 71 72 #define DRIVER_NAME "i915" 73 #define DRIVER_DESC "Intel Graphics" 74 #define DRIVER_DATE "20161024" 75 #define DRIVER_TIMESTAMP 1477290335 76 77 #undef WARN_ON 78 /* Many gcc seem to no see through this and fall over :( */ 79 #if 0 80 #define WARN_ON(x) ({ \ 81 bool __i915_warn_cond = (x); \ 82 if (__builtin_constant_p(__i915_warn_cond)) \ 83 BUILD_BUG_ON(__i915_warn_cond); \ 84 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 85 #else 86 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 87 #endif 88 89 #undef WARN_ON_ONCE 90 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 91 92 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 93 (long) (x), __func__); 94 95 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 96 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 97 * which may not necessarily be a user visible problem. This will either 98 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 99 * enable distros and users to tailor their preferred amount of i915 abrt 100 * spam. 101 */ 102 #define I915_STATE_WARN(condition, format...) ({ \ 103 int __ret_warn_on = !!(condition); \ 104 if (unlikely(__ret_warn_on)) \ 105 if (!WARN(i915.verbose_state_checks, format)) \ 106 DRM_ERROR(format); \ 107 unlikely(__ret_warn_on); \ 108 }) 109 110 #define I915_STATE_WARN_ON(x) \ 111 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 112 113 bool __i915_inject_load_failure(const char *func, int line); 114 #define i915_inject_load_failure() \ 115 __i915_inject_load_failure(__func__, __LINE__) 116 117 static inline const char *yesno(bool v) 118 { 119 return v ? "yes" : "no"; 120 } 121 122 static inline const char *onoff(bool v) 123 { 124 return v ? "on" : "off"; 125 } 126 127 enum i915_pipe { 128 INVALID_PIPE = -1, 129 PIPE_A = 0, 130 PIPE_B, 131 PIPE_C, 132 _PIPE_EDP, 133 I915_MAX_PIPES = _PIPE_EDP 134 }; 135 #define pipe_name(p) ((p) + 'A') 136 137 enum transcoder { 138 TRANSCODER_A = 0, 139 TRANSCODER_B, 140 TRANSCODER_C, 141 TRANSCODER_EDP, 142 TRANSCODER_DSI_A, 143 TRANSCODER_DSI_C, 144 I915_MAX_TRANSCODERS 145 }; 146 147 static inline const char *transcoder_name(enum transcoder transcoder) 148 { 149 switch (transcoder) { 150 case TRANSCODER_A: 151 return "A"; 152 case TRANSCODER_B: 153 return "B"; 154 case TRANSCODER_C: 155 return "C"; 156 case TRANSCODER_EDP: 157 return "EDP"; 158 case TRANSCODER_DSI_A: 159 return "DSI A"; 160 case TRANSCODER_DSI_C: 161 return "DSI C"; 162 default: 163 return "<invalid>"; 164 } 165 } 166 167 static inline bool transcoder_is_dsi(enum transcoder transcoder) 168 { 169 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; 170 } 171 172 /* 173 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 174 * number of planes per CRTC. Not all platforms really have this many planes, 175 * which means some arrays of size I915_MAX_PLANES may have unused entries 176 * between the topmost sprite plane and the cursor plane. 177 */ 178 enum plane { 179 PLANE_A = 0, 180 PLANE_B, 181 PLANE_C, 182 PLANE_CURSOR, 183 I915_MAX_PLANES, 184 }; 185 #define plane_name(p) ((p) + 'A') 186 187 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 188 189 enum port { 190 PORT_NONE = -1, 191 PORT_A = 0, 192 PORT_B, 193 PORT_C, 194 PORT_D, 195 PORT_E, 196 I915_MAX_PORTS 197 }; 198 #define port_name(p) ((p) + 'A') 199 200 #define I915_NUM_PHYS_VLV 2 201 202 enum dpio_channel { 203 DPIO_CH0, 204 DPIO_CH1 205 }; 206 207 enum dpio_phy { 208 DPIO_PHY0, 209 DPIO_PHY1 210 }; 211 212 enum intel_display_power_domain { 213 POWER_DOMAIN_PIPE_A, 214 POWER_DOMAIN_PIPE_B, 215 POWER_DOMAIN_PIPE_C, 216 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 217 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 218 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 219 POWER_DOMAIN_TRANSCODER_A, 220 POWER_DOMAIN_TRANSCODER_B, 221 POWER_DOMAIN_TRANSCODER_C, 222 POWER_DOMAIN_TRANSCODER_EDP, 223 POWER_DOMAIN_TRANSCODER_DSI_A, 224 POWER_DOMAIN_TRANSCODER_DSI_C, 225 POWER_DOMAIN_PORT_DDI_A_LANES, 226 POWER_DOMAIN_PORT_DDI_B_LANES, 227 POWER_DOMAIN_PORT_DDI_C_LANES, 228 POWER_DOMAIN_PORT_DDI_D_LANES, 229 POWER_DOMAIN_PORT_DDI_E_LANES, 230 POWER_DOMAIN_PORT_DSI, 231 POWER_DOMAIN_PORT_CRT, 232 POWER_DOMAIN_PORT_OTHER, 233 POWER_DOMAIN_VGA, 234 POWER_DOMAIN_AUDIO, 235 POWER_DOMAIN_PLLS, 236 POWER_DOMAIN_AUX_A, 237 POWER_DOMAIN_AUX_B, 238 POWER_DOMAIN_AUX_C, 239 POWER_DOMAIN_AUX_D, 240 POWER_DOMAIN_GMBUS, 241 POWER_DOMAIN_MODESET, 242 POWER_DOMAIN_INIT, 243 244 POWER_DOMAIN_NUM, 245 }; 246 247 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 248 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 249 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 250 #define POWER_DOMAIN_TRANSCODER(tran) \ 251 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 252 (tran) + POWER_DOMAIN_TRANSCODER_A) 253 254 enum hpd_pin { 255 HPD_NONE = 0, 256 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 257 HPD_CRT, 258 HPD_SDVO_B, 259 HPD_SDVO_C, 260 HPD_PORT_A, 261 HPD_PORT_B, 262 HPD_PORT_C, 263 HPD_PORT_D, 264 HPD_PORT_E, 265 HPD_NUM_PINS 266 }; 267 268 #define for_each_hpd_pin(__pin) \ 269 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 270 271 struct i915_hotplug { 272 struct work_struct hotplug_work; 273 274 struct { 275 unsigned long last_jiffies; 276 int count; 277 enum { 278 HPD_ENABLED = 0, 279 HPD_DISABLED = 1, 280 HPD_MARK_DISABLED = 2 281 } state; 282 } stats[HPD_NUM_PINS]; 283 u32 event_bits; 284 struct delayed_work reenable_work; 285 286 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 287 u32 long_port_mask; 288 u32 short_port_mask; 289 struct work_struct dig_port_work; 290 291 struct work_struct poll_init_work; 292 bool poll_enabled; 293 294 /* 295 * if we get a HPD irq from DP and a HPD irq from non-DP 296 * the non-DP HPD could block the workqueue on a mode config 297 * mutex getting, that userspace may have taken. However 298 * userspace is waiting on the DP workqueue to run which is 299 * blocked behind the non-DP one. 300 */ 301 struct workqueue_struct *dp_wq; 302 }; 303 304 #define I915_GEM_GPU_DOMAINS \ 305 (I915_GEM_DOMAIN_RENDER | \ 306 I915_GEM_DOMAIN_SAMPLER | \ 307 I915_GEM_DOMAIN_COMMAND | \ 308 I915_GEM_DOMAIN_INSTRUCTION | \ 309 I915_GEM_DOMAIN_VERTEX) 310 311 #define for_each_pipe(__dev_priv, __p) \ 312 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 313 #define for_each_pipe_masked(__dev_priv, __p, __mask) \ 314 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ 315 for_each_if ((__mask) & (1 << (__p))) 316 #define for_each_plane(__dev_priv, __pipe, __p) \ 317 for ((__p) = 0; \ 318 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 319 (__p)++) 320 #define for_each_sprite(__dev_priv, __p, __s) \ 321 for ((__s) = 0; \ 322 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 323 (__s)++) 324 325 #define for_each_port_masked(__port, __ports_mask) \ 326 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 327 for_each_if ((__ports_mask) & (1 << (__port))) 328 329 #define for_each_crtc(dev, crtc) \ 330 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) 331 332 #define for_each_intel_plane(dev, intel_plane) \ 333 list_for_each_entry(intel_plane, \ 334 &(dev)->mode_config.plane_list, \ 335 base.head) 336 337 #define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ 338 list_for_each_entry(intel_plane, \ 339 &(dev)->mode_config.plane_list, \ 340 base.head) \ 341 for_each_if ((plane_mask) & \ 342 (1 << drm_plane_index(&intel_plane->base))) 343 344 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 345 list_for_each_entry(intel_plane, \ 346 &(dev)->mode_config.plane_list, \ 347 base.head) \ 348 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 349 350 #define for_each_intel_crtc(dev, intel_crtc) \ 351 list_for_each_entry(intel_crtc, \ 352 &(dev)->mode_config.crtc_list, \ 353 base.head) 354 355 #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ 356 list_for_each_entry(intel_crtc, \ 357 &(dev)->mode_config.crtc_list, \ 358 base.head) \ 359 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) 360 361 #define for_each_intel_encoder(dev, intel_encoder) \ 362 list_for_each_entry(intel_encoder, \ 363 &(dev)->mode_config.encoder_list, \ 364 base.head) 365 366 #define for_each_intel_connector(dev, intel_connector) \ 367 list_for_each_entry(intel_connector, \ 368 &(dev)->mode_config.connector_list, \ 369 base.head) 370 371 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 372 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 373 for_each_if ((intel_encoder)->base.crtc == (__crtc)) 374 375 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 376 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 377 for_each_if ((intel_connector)->base.encoder == (__encoder)) 378 379 #define for_each_power_domain(domain, mask) \ 380 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 381 for_each_if ((1 << (domain)) & (mask)) 382 383 struct drm_i915_private; 384 struct i915_mm_struct; 385 struct i915_mmu_object; 386 387 struct drm_i915_file_private { 388 struct drm_i915_private *dev_priv; 389 struct drm_file *file; 390 391 struct { 392 spinlock_t lock; 393 struct list_head request_list; 394 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 395 * chosen to prevent the CPU getting more than a frame ahead of the GPU 396 * (when using lax throttling for the frontbuffer). We also use it to 397 * offer free GPU waitboosts for severely congested workloads. 398 */ 399 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 400 } mm; 401 struct idr context_idr; 402 403 struct intel_rps_client { 404 struct list_head link; 405 unsigned boosts; 406 } rps; 407 408 unsigned int bsd_engine; 409 }; 410 411 /* Used by dp and fdi links */ 412 struct intel_link_m_n { 413 uint32_t tu; 414 uint32_t gmch_m; 415 uint32_t gmch_n; 416 uint32_t link_m; 417 uint32_t link_n; 418 }; 419 420 void intel_link_compute_m_n(int bpp, int nlanes, 421 int pixel_clock, int link_clock, 422 struct intel_link_m_n *m_n); 423 424 /* Interface history: 425 * 426 * 1.1: Original. 427 * 1.2: Add Power Management 428 * 1.3: Add vblank support 429 * 1.4: Fix cmdbuffer path, add heap destroy 430 * 1.5: Add vblank pipe configuration 431 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 432 * - Support vertical blank on secondary display pipe 433 */ 434 #define DRIVER_MAJOR 1 435 #define DRIVER_MINOR 6 436 #define DRIVER_PATCHLEVEL 0 437 438 struct opregion_header; 439 struct opregion_acpi; 440 struct opregion_swsci; 441 struct opregion_asle; 442 443 struct intel_opregion { 444 struct opregion_header *header; 445 struct opregion_acpi *acpi; 446 struct opregion_swsci *swsci; 447 u32 swsci_gbda_sub_functions; 448 u32 swsci_sbcb_sub_functions; 449 struct opregion_asle *asle; 450 void *rvda; 451 const void *vbt; 452 u32 vbt_size; 453 u32 *lid_state; 454 struct work_struct asle_work; 455 }; 456 #define OPREGION_SIZE (8*1024) 457 458 struct intel_overlay; 459 struct intel_overlay_error_state; 460 461 struct drm_i915_fence_reg { 462 struct list_head link; 463 struct drm_i915_private *i915; 464 struct i915_vma *vma; 465 int pin_count; 466 int id; 467 /** 468 * Whether the tiling parameters for the currently 469 * associated fence register have changed. Note that 470 * for the purposes of tracking tiling changes we also 471 * treat the unfenced register, the register slot that 472 * the object occupies whilst it executes a fenced 473 * command (such as BLT on gen2/3), as a "fence". 474 */ 475 bool dirty; 476 }; 477 478 struct sdvo_device_mapping { 479 u8 initialized; 480 u8 dvo_port; 481 u8 slave_addr; 482 u8 dvo_wiring; 483 u8 i2c_pin; 484 u8 ddc_pin; 485 }; 486 487 struct intel_connector; 488 struct intel_encoder; 489 struct intel_crtc_state; 490 struct intel_initial_plane_config; 491 struct intel_crtc; 492 struct intel_limit; 493 struct dpll; 494 495 struct drm_i915_display_funcs { 496 int (*get_display_clock_speed)(struct drm_device *dev); 497 int (*get_fifo_size)(struct drm_device *dev, int plane); 498 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 499 int (*compute_intermediate_wm)(struct drm_device *dev, 500 struct intel_crtc *intel_crtc, 501 struct intel_crtc_state *newstate); 502 void (*initial_watermarks)(struct intel_crtc_state *cstate); 503 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 504 int (*compute_global_watermarks)(struct drm_atomic_state *state); 505 void (*update_wm)(struct drm_crtc *crtc); 506 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 507 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 508 /* Returns the active state of the crtc, and if the crtc is active, 509 * fills out the pipe-config with the hw state. */ 510 bool (*get_pipe_config)(struct intel_crtc *, 511 struct intel_crtc_state *); 512 void (*get_initial_plane_config)(struct intel_crtc *, 513 struct intel_initial_plane_config *); 514 int (*crtc_compute_clock)(struct intel_crtc *crtc, 515 struct intel_crtc_state *crtc_state); 516 void (*crtc_enable)(struct intel_crtc_state *pipe_config, 517 struct drm_atomic_state *old_state); 518 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, 519 struct drm_atomic_state *old_state); 520 void (*update_crtcs)(struct drm_atomic_state *state, 521 unsigned int *crtc_vblank_mask); 522 void (*audio_codec_enable)(struct drm_connector *connector, 523 struct intel_encoder *encoder, 524 const struct drm_display_mode *adjusted_mode); 525 void (*audio_codec_disable)(struct intel_encoder *encoder); 526 void (*fdi_link_train)(struct drm_crtc *crtc); 527 void (*init_clock_gating)(struct drm_device *dev); 528 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 529 struct drm_framebuffer *fb, 530 struct drm_i915_gem_object *obj, 531 struct drm_i915_gem_request *req, 532 uint32_t flags); 533 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 534 /* clock updates for mode set */ 535 /* cursor updates */ 536 /* render clock increase/decrease */ 537 /* display clock increase/decrease */ 538 /* pll clock increase/decrease */ 539 540 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 541 void (*load_luts)(struct drm_crtc_state *crtc_state); 542 }; 543 544 enum forcewake_domain_id { 545 FW_DOMAIN_ID_RENDER = 0, 546 FW_DOMAIN_ID_BLITTER, 547 FW_DOMAIN_ID_MEDIA, 548 549 FW_DOMAIN_ID_COUNT 550 }; 551 552 enum forcewake_domains { 553 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 554 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 555 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 556 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 557 FORCEWAKE_BLITTER | 558 FORCEWAKE_MEDIA) 559 }; 560 561 #define FW_REG_READ (1) 562 #define FW_REG_WRITE (2) 563 564 enum forcewake_domains 565 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 566 i915_reg_t reg, unsigned int op); 567 568 struct intel_uncore_funcs { 569 void (*force_wake_get)(struct drm_i915_private *dev_priv, 570 enum forcewake_domains domains); 571 void (*force_wake_put)(struct drm_i915_private *dev_priv, 572 enum forcewake_domains domains); 573 574 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 575 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 576 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 577 u64 (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 578 579 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, 580 uint8_t val, bool trace); 581 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, 582 uint16_t val, bool trace); 583 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, 584 uint32_t val, bool trace); 585 }; 586 587 struct intel_forcewake_range { 588 u32 start; 589 u32 end; 590 591 enum forcewake_domains domains; 592 }; 593 594 struct intel_uncore { 595 spinlock_t lock; /** lock is also taken in irq contexts. */ 596 597 const struct intel_forcewake_range *fw_domains_table; 598 unsigned int fw_domains_table_entries; 599 600 struct intel_uncore_funcs funcs; 601 602 unsigned fifo_count; 603 604 enum forcewake_domains fw_domains; 605 enum forcewake_domains fw_domains_active; 606 607 struct intel_uncore_forcewake_domain { 608 struct drm_i915_private *i915; 609 enum forcewake_domain_id id; 610 enum forcewake_domains mask; 611 unsigned wake_count; 612 struct hrtimer timer; 613 i915_reg_t reg_set; 614 u32 val_set; 615 u32 val_clear; 616 i915_reg_t reg_ack; 617 i915_reg_t reg_post; 618 u32 val_reset; 619 } fw_domain[FW_DOMAIN_ID_COUNT]; 620 621 int unclaimed_mmio_check; 622 }; 623 624 /* Iterate over initialised fw domains */ 625 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ 626 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 627 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ 628 (domain__)++) \ 629 for_each_if ((mask__) & (domain__)->mask) 630 631 #define for_each_fw_domain(domain__, dev_priv__) \ 632 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) 633 634 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 635 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 636 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 637 638 struct intel_csr { 639 struct work_struct work; 640 const char *fw_path; 641 uint32_t *dmc_payload; 642 uint32_t dmc_fw_size; 643 uint32_t version; 644 uint32_t mmio_count; 645 i915_reg_t mmioaddr[8]; 646 uint32_t mmiodata[8]; 647 uint32_t dc_state; 648 uint32_t allowed_dc_mask; 649 }; 650 651 #define DEV_INFO_FOR_EACH_FLAG(func) \ 652 /* Keep is_* in chronological order */ \ 653 func(is_mobile); \ 654 func(is_i85x); \ 655 func(is_i915g); \ 656 func(is_i945gm); \ 657 func(is_g33); \ 658 func(is_g4x); \ 659 func(is_pineview); \ 660 func(is_broadwater); \ 661 func(is_crestline); \ 662 func(is_ivybridge); \ 663 func(is_valleyview); \ 664 func(is_cherryview); \ 665 func(is_haswell); \ 666 func(is_broadwell); \ 667 func(is_skylake); \ 668 func(is_broxton); \ 669 func(is_kabylake); \ 670 func(is_preliminary); \ 671 /* Keep has_* in alphabetical order */ \ 672 func(has_csr); \ 673 func(has_ddi); \ 674 func(has_dp_mst); \ 675 func(has_fbc); \ 676 func(has_fpga_dbg); \ 677 func(has_gmbus_irq); \ 678 func(has_gmch_display); \ 679 func(has_guc); \ 680 func(has_hotplug); \ 681 func(has_hw_contexts); \ 682 func(has_l3_dpf); \ 683 func(has_llc); \ 684 func(has_logical_ring_contexts); \ 685 func(has_overlay); \ 686 func(has_pipe_cxsr); \ 687 func(has_pooled_eu); \ 688 func(has_psr); \ 689 func(has_rc6); \ 690 func(has_rc6p); \ 691 func(has_resource_streamer); \ 692 func(has_runtime_pm); \ 693 func(has_snoop); \ 694 func(cursor_needs_physical); \ 695 func(hws_needs_physical); \ 696 func(overlay_needs_physical); \ 697 func(supports_tv) 698 699 struct sseu_dev_info { 700 u8 slice_mask; 701 u8 subslice_mask; 702 u8 eu_total; 703 u8 eu_per_subslice; 704 u8 min_eu_in_pool; 705 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 706 u8 subslice_7eu[3]; 707 u8 has_slice_pg:1; 708 u8 has_subslice_pg:1; 709 u8 has_eu_pg:1; 710 }; 711 712 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) 713 { 714 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); 715 } 716 717 struct intel_device_info { 718 u32 display_mmio_offset; 719 u16 device_id; 720 u8 num_pipes; 721 u8 num_sprites[I915_MAX_PIPES]; 722 u8 gen; 723 u16 gen_mask; 724 u8 ring_mask; /* Rings supported by the HW */ 725 u8 num_rings; 726 #define DEFINE_FLAG(name) u8 name:1 727 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); 728 #undef DEFINE_FLAG 729 u16 ddb_size; /* in blocks */ 730 /* Register offsets for the various display pipes and transcoders */ 731 int pipe_offsets[I915_MAX_TRANSCODERS]; 732 int trans_offsets[I915_MAX_TRANSCODERS]; 733 int palette_offsets[I915_MAX_PIPES]; 734 int cursor_offsets[I915_MAX_PIPES]; 735 736 /* Slice/subslice/EU info */ 737 struct sseu_dev_info sseu; 738 739 struct color_luts { 740 u16 degamma_lut_size; 741 u16 gamma_lut_size; 742 } color; 743 }; 744 745 struct intel_display_error_state; 746 747 struct drm_i915_error_state { 748 struct kref ref; 749 struct timeval time; 750 751 struct drm_i915_private *i915; 752 753 char error_msg[128]; 754 bool simulated; 755 int iommu; 756 u32 reset_count; 757 u32 suspend_count; 758 struct intel_device_info device_info; 759 760 /* Generic register state */ 761 u32 eir; 762 u32 pgtbl_er; 763 u32 ier; 764 u32 gtier[4]; 765 u32 ccid; 766 u32 derrmr; 767 u32 forcewake; 768 u32 error; /* gen6+ */ 769 u32 err_int; /* gen7 */ 770 u32 fault_data0; /* gen8, gen9 */ 771 u32 fault_data1; /* gen8, gen9 */ 772 u32 done_reg; 773 u32 gac_eco; 774 u32 gam_ecochk; 775 u32 gab_ctl; 776 u32 gfx_mode; 777 778 u64 fence[I915_MAX_NUM_FENCES]; 779 struct intel_overlay_error_state *overlay; 780 struct intel_display_error_state *display; 781 struct drm_i915_error_object *semaphore; 782 783 struct drm_i915_error_engine { 784 int engine_id; 785 /* Software tracked state */ 786 bool waiting; 787 int num_waiters; 788 int hangcheck_score; 789 enum intel_engine_hangcheck_action hangcheck_action; 790 struct i915_address_space *vm; 791 int num_requests; 792 793 /* position of active request inside the ring */ 794 u32 rq_head, rq_post, rq_tail; 795 796 /* our own tracking of ring head and tail */ 797 u32 cpu_ring_head; 798 u32 cpu_ring_tail; 799 800 u32 last_seqno; 801 u32 semaphore_seqno[I915_NUM_ENGINES - 1]; 802 803 /* Register state */ 804 u32 start; 805 u32 tail; 806 u32 head; 807 u32 ctl; 808 u32 mode; 809 u32 hws; 810 u32 ipeir; 811 u32 ipehr; 812 u32 bbstate; 813 u32 instpm; 814 u32 instps; 815 u32 seqno; 816 u64 bbaddr; 817 u64 acthd; 818 u32 fault_reg; 819 u64 faddr; 820 u32 rc_psmi; /* sleep state */ 821 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 822 struct intel_instdone instdone; 823 824 struct drm_i915_error_object { 825 u64 gtt_offset; 826 u64 gtt_size; 827 int page_count; 828 int unused; 829 u32 *pages[0]; 830 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 831 832 struct drm_i915_error_object *wa_ctx; 833 834 struct drm_i915_error_request { 835 long jiffies; 836 pid_t pid; 837 u32 context; 838 u32 seqno; 839 u32 head; 840 u32 tail; 841 } *requests, execlist[2]; 842 843 struct drm_i915_error_waiter { 844 char comm[TASK_COMM_LEN]; 845 pid_t pid; 846 u32 seqno; 847 } *waiters; 848 849 struct { 850 u32 gfx_mode; 851 union { 852 u64 pdp[4]; 853 u32 pp_dir_base; 854 }; 855 } vm_info; 856 857 pid_t pid; 858 char comm[TASK_COMM_LEN]; 859 } engine[I915_NUM_ENGINES]; 860 861 struct drm_i915_error_buffer { 862 u32 size; 863 u32 name; 864 u32 rseqno[I915_NUM_ENGINES], wseqno; 865 u64 gtt_offset; 866 u32 read_domains; 867 u32 write_domain; 868 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 869 u32 tiling:2; 870 u32 dirty:1; 871 u32 purgeable:1; 872 u32 userptr:1; 873 s32 engine:4; 874 u32 cache_level:3; 875 } *active_bo[I915_NUM_ENGINES], *pinned_bo; 876 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; 877 struct i915_address_space *active_vm[I915_NUM_ENGINES]; 878 }; 879 880 enum i915_cache_level { 881 I915_CACHE_NONE = 0, 882 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 883 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 884 caches, eg sampler/render caches, and the 885 large Last-Level-Cache. LLC is coherent with 886 the CPU, but L3 is only visible to the GPU. */ 887 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 888 }; 889 890 struct i915_ctx_hang_stats { 891 /* This context had batch pending when hang was declared */ 892 unsigned batch_pending; 893 894 /* This context had batch active when hang was declared */ 895 unsigned batch_active; 896 897 /* Time when this context was last blamed for a GPU reset */ 898 unsigned long guilty_ts; 899 900 /* If the contexts causes a second GPU hang within this time, 901 * it is permanently banned from submitting any more work. 902 */ 903 unsigned long ban_period_seconds; 904 905 /* This context is banned to submit more work */ 906 bool banned; 907 }; 908 909 /* This must match up with the value previously used for execbuf2.rsvd1. */ 910 #define DEFAULT_CONTEXT_HANDLE 0 911 912 /** 913 * struct i915_gem_context - as the name implies, represents a context. 914 * @ref: reference count. 915 * @user_handle: userspace tracking identity for this context. 916 * @remap_slice: l3 row remapping information. 917 * @flags: context specific flags: 918 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 919 * @file_priv: filp associated with this context (NULL for global default 920 * context). 921 * @hang_stats: information about the role of this context in possible GPU 922 * hangs. 923 * @ppgtt: virtual memory space used by this context. 924 * @legacy_hw_ctx: render context backing object and whether it is correctly 925 * initialized (legacy ring submission mechanism only). 926 * @link: link in the global list of contexts. 927 * 928 * Contexts are memory images used by the hardware to store copies of their 929 * internal state. 930 */ 931 struct i915_gem_context { 932 struct kref ref; 933 struct drm_i915_private *i915; 934 struct drm_i915_file_private *file_priv; 935 struct i915_hw_ppgtt *ppgtt; 936 pid_t pid; 937 938 struct i915_ctx_hang_stats hang_stats; 939 940 unsigned long flags; 941 #define CONTEXT_NO_ZEROMAP BIT(0) 942 #define CONTEXT_NO_ERROR_CAPTURE BIT(1) 943 944 /* Unique identifier for this context, used by the hw for tracking */ 945 unsigned int hw_id; 946 u32 user_handle; 947 948 u32 ggtt_alignment; 949 950 struct intel_context { 951 struct i915_vma *state; 952 struct intel_ring *ring; 953 uint32_t *lrc_reg_state; 954 u64 lrc_desc; 955 int pin_count; 956 bool initialised; 957 } engine[I915_NUM_ENGINES]; 958 u32 ring_size; 959 u32 desc_template; 960 struct atomic_notifier_head status_notifier; 961 bool execlists_force_single_submission; 962 963 struct list_head link; 964 965 u8 remap_slice; 966 bool closed:1; 967 }; 968 969 enum fb_op_origin { 970 ORIGIN_GTT, 971 ORIGIN_CPU, 972 ORIGIN_CS, 973 ORIGIN_FLIP, 974 ORIGIN_DIRTYFB, 975 }; 976 977 struct intel_fbc { 978 /* This is always the inner lock when overlapping with struct_mutex and 979 * it's the outer lock when overlapping with stolen_lock. */ 980 struct lock lock; 981 unsigned threshold; 982 unsigned int possible_framebuffer_bits; 983 unsigned int busy_bits; 984 unsigned int visible_pipes_mask; 985 struct intel_crtc *crtc; 986 987 struct drm_mm_node compressed_fb; 988 struct drm_mm_node *compressed_llb; 989 990 bool false_color; 991 992 bool enabled; 993 bool active; 994 995 bool underrun_detected; 996 struct work_struct underrun_work; 997 998 struct intel_fbc_state_cache { 999 struct { 1000 unsigned int mode_flags; 1001 uint32_t hsw_bdw_pixel_rate; 1002 } crtc; 1003 1004 struct { 1005 unsigned int rotation; 1006 int src_w; 1007 int src_h; 1008 bool visible; 1009 } plane; 1010 1011 struct { 1012 u64 ilk_ggtt_offset; 1013 uint32_t pixel_format; 1014 unsigned int stride; 1015 int fence_reg; 1016 unsigned int tiling_mode; 1017 } fb; 1018 } state_cache; 1019 1020 struct intel_fbc_reg_params { 1021 struct { 1022 enum i915_pipe pipe; 1023 enum plane plane; 1024 unsigned int fence_y_offset; 1025 } crtc; 1026 1027 struct { 1028 u64 ggtt_offset; 1029 uint32_t pixel_format; 1030 unsigned int stride; 1031 int fence_reg; 1032 } fb; 1033 1034 int cfb_size; 1035 } params; 1036 1037 struct intel_fbc_work { 1038 bool scheduled; 1039 u32 scheduled_vblank; 1040 struct work_struct work; 1041 } work; 1042 1043 const char *no_fbc_reason; 1044 }; 1045 1046 /** 1047 * HIGH_RR is the highest eDP panel refresh rate read from EDID 1048 * LOW_RR is the lowest eDP panel refresh rate found from EDID 1049 * parsing for same resolution. 1050 */ 1051 enum drrs_refresh_rate_type { 1052 DRRS_HIGH_RR, 1053 DRRS_LOW_RR, 1054 DRRS_MAX_RR, /* RR count */ 1055 }; 1056 1057 enum drrs_support_type { 1058 DRRS_NOT_SUPPORTED = 0, 1059 STATIC_DRRS_SUPPORT = 1, 1060 SEAMLESS_DRRS_SUPPORT = 2 1061 }; 1062 1063 struct intel_dp; 1064 struct i915_drrs { 1065 struct lock mutex; 1066 struct delayed_work work; 1067 struct intel_dp *dp; 1068 unsigned busy_frontbuffer_bits; 1069 enum drrs_refresh_rate_type refresh_rate_type; 1070 enum drrs_support_type type; 1071 }; 1072 1073 struct i915_psr { 1074 struct lock lock; 1075 bool sink_support; 1076 bool source_ok; 1077 struct intel_dp *enabled; 1078 bool active; 1079 struct delayed_work work; 1080 unsigned busy_frontbuffer_bits; 1081 bool psr2_support; 1082 bool aux_frame_sync; 1083 bool link_standby; 1084 }; 1085 1086 enum intel_pch { 1087 PCH_NONE = 0, /* No PCH present */ 1088 PCH_IBX, /* Ibexpeak PCH */ 1089 PCH_CPT, /* Cougarpoint PCH */ 1090 PCH_LPT, /* Lynxpoint PCH */ 1091 PCH_SPT, /* Sunrisepoint PCH */ 1092 PCH_KBP, /* Kabypoint PCH */ 1093 PCH_NOP, 1094 }; 1095 1096 enum intel_sbi_destination { 1097 SBI_ICLK, 1098 SBI_MPHY, 1099 }; 1100 1101 #define QUIRK_PIPEA_FORCE (1<<0) 1102 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1103 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1104 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1105 #define QUIRK_PIPEB_FORCE (1<<4) 1106 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1107 1108 struct intel_fbdev; 1109 struct intel_fbc_work; 1110 1111 struct intel_gmbus { 1112 struct i2c_adapter adapter; 1113 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 1114 u32 force_bit; 1115 u32 reg0; 1116 i915_reg_t gpio_reg; 1117 struct i2c_algo_bit_data bit_algo; 1118 struct drm_i915_private *dev_priv; 1119 }; 1120 1121 struct i915_suspend_saved_registers { 1122 u32 saveDSPARB; 1123 u32 saveFBC_CONTROL; 1124 u32 saveCACHE_MODE_0; 1125 u32 saveMI_ARB_STATE; 1126 u32 saveSWF0[16]; 1127 u32 saveSWF1[16]; 1128 u32 saveSWF3[3]; 1129 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1130 u32 savePCH_PORT_HOTPLUG; 1131 u16 saveGCDGMBUS; 1132 }; 1133 1134 struct vlv_s0ix_state { 1135 /* GAM */ 1136 u32 wr_watermark; 1137 u32 gfx_prio_ctrl; 1138 u32 arb_mode; 1139 u32 gfx_pend_tlb0; 1140 u32 gfx_pend_tlb1; 1141 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1142 u32 media_max_req_count; 1143 u32 gfx_max_req_count; 1144 u32 render_hwsp; 1145 u32 ecochk; 1146 u32 bsd_hwsp; 1147 u32 blt_hwsp; 1148 u32 tlb_rd_addr; 1149 1150 /* MBC */ 1151 u32 g3dctl; 1152 u32 gsckgctl; 1153 u32 mbctl; 1154 1155 /* GCP */ 1156 u32 ucgctl1; 1157 u32 ucgctl3; 1158 u32 rcgctl1; 1159 u32 rcgctl2; 1160 u32 rstctl; 1161 u32 misccpctl; 1162 1163 /* GPM */ 1164 u32 gfxpause; 1165 u32 rpdeuhwtc; 1166 u32 rpdeuc; 1167 u32 ecobus; 1168 u32 pwrdwnupctl; 1169 u32 rp_down_timeout; 1170 u32 rp_deucsw; 1171 u32 rcubmabdtmr; 1172 u32 rcedata; 1173 u32 spare2gh; 1174 1175 /* Display 1 CZ domain */ 1176 u32 gt_imr; 1177 u32 gt_ier; 1178 u32 pm_imr; 1179 u32 pm_ier; 1180 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1181 1182 /* GT SA CZ domain */ 1183 u32 tilectl; 1184 u32 gt_fifoctl; 1185 u32 gtlc_wake_ctrl; 1186 u32 gtlc_survive; 1187 u32 pmwgicz; 1188 1189 /* Display 2 CZ domain */ 1190 u32 gu_ctl0; 1191 u32 gu_ctl1; 1192 u32 pcbr; 1193 u32 clock_gate_dis2; 1194 }; 1195 1196 struct intel_rps_ei { 1197 u32 cz_clock; 1198 u32 render_c0; 1199 u32 media_c0; 1200 }; 1201 1202 struct intel_gen6_power_mgmt { 1203 /* 1204 * work, interrupts_enabled and pm_iir are protected by 1205 * dev_priv->irq_lock 1206 */ 1207 struct work_struct work; 1208 bool interrupts_enabled; 1209 u32 pm_iir; 1210 1211 /* PM interrupt bits that should never be masked */ 1212 u32 pm_intr_keep; 1213 1214 /* Frequencies are stored in potentially platform dependent multiples. 1215 * In other words, *_freq needs to be multiplied by X to be interesting. 1216 * Soft limits are those which are used for the dynamic reclocking done 1217 * by the driver (raise frequencies under heavy loads, and lower for 1218 * lighter loads). Hard limits are those imposed by the hardware. 1219 * 1220 * A distinction is made for overclocking, which is never enabled by 1221 * default, and is considered to be above the hard limit if it's 1222 * possible at all. 1223 */ 1224 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1225 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1226 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1227 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1228 u8 min_freq; /* AKA RPn. Minimum frequency */ 1229 u8 boost_freq; /* Frequency to request when wait boosting */ 1230 u8 idle_freq; /* Frequency to request when we are idle */ 1231 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1232 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1233 u8 rp0_freq; /* Non-overclocked max frequency. */ 1234 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 1235 1236 u8 up_threshold; /* Current %busy required to uplock */ 1237 u8 down_threshold; /* Current %busy required to downclock */ 1238 1239 int last_adj; 1240 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1241 1242 spinlock_t client_lock; 1243 struct list_head clients; 1244 bool client_boost; 1245 1246 bool enabled; 1247 struct delayed_work autoenable_work; 1248 unsigned boosts; 1249 1250 /* manual wa residency calculations */ 1251 struct intel_rps_ei up_ei, down_ei; 1252 1253 /* 1254 * Protects RPS/RC6 register access and PCU communication. 1255 * Must be taken after struct_mutex if nested. Note that 1256 * this lock may be held for long periods of time when 1257 * talking to hw - so only take it when talking to hw! 1258 */ 1259 struct lock hw_lock; 1260 }; 1261 1262 /* defined intel_pm.c */ 1263 extern spinlock_t mchdev_lock; 1264 1265 struct intel_ilk_power_mgmt { 1266 u8 cur_delay; 1267 u8 min_delay; 1268 u8 max_delay; 1269 u8 fmax; 1270 u8 fstart; 1271 1272 u64 last_count1; 1273 unsigned long last_time1; 1274 unsigned long chipset_power; 1275 u64 last_count2; 1276 u64 last_time2; 1277 unsigned long gfx_power; 1278 u8 corr; 1279 1280 int c_m; 1281 int r_t; 1282 }; 1283 1284 struct drm_i915_private; 1285 struct i915_power_well; 1286 1287 struct i915_power_well_ops { 1288 /* 1289 * Synchronize the well's hw state to match the current sw state, for 1290 * example enable/disable it based on the current refcount. Called 1291 * during driver init and resume time, possibly after first calling 1292 * the enable/disable handlers. 1293 */ 1294 void (*sync_hw)(struct drm_i915_private *dev_priv, 1295 struct i915_power_well *power_well); 1296 /* 1297 * Enable the well and resources that depend on it (for example 1298 * interrupts located on the well). Called after the 0->1 refcount 1299 * transition. 1300 */ 1301 void (*enable)(struct drm_i915_private *dev_priv, 1302 struct i915_power_well *power_well); 1303 /* 1304 * Disable the well and resources that depend on it. Called after 1305 * the 1->0 refcount transition. 1306 */ 1307 void (*disable)(struct drm_i915_private *dev_priv, 1308 struct i915_power_well *power_well); 1309 /* Returns the hw enabled state. */ 1310 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1311 struct i915_power_well *power_well); 1312 }; 1313 1314 /* Power well structure for haswell */ 1315 struct i915_power_well { 1316 const char *name; 1317 bool always_on; 1318 /* power well enable/disable usage count */ 1319 int count; 1320 /* cached hw enabled state */ 1321 bool hw_enabled; 1322 unsigned long domains; 1323 unsigned long data; 1324 const struct i915_power_well_ops *ops; 1325 }; 1326 1327 struct i915_power_domains { 1328 /* 1329 * Power wells needed for initialization at driver init and suspend 1330 * time are on. They are kept on until after the first modeset. 1331 */ 1332 bool init_power_on; 1333 bool initializing; 1334 int power_well_count; 1335 1336 struct lock lock; 1337 int domain_use_count[POWER_DOMAIN_NUM]; 1338 struct i915_power_well *power_wells; 1339 }; 1340 1341 #define MAX_L3_SLICES 2 1342 struct intel_l3_parity { 1343 u32 *remap_info[MAX_L3_SLICES]; 1344 struct work_struct error_work; 1345 int which_slice; 1346 }; 1347 1348 struct i915_gem_mm { 1349 /** Memory allocator for GTT stolen memory */ 1350 struct drm_mm stolen; 1351 /** Protects the usage of the GTT stolen memory allocator. This is 1352 * always the inner lock when overlapping with struct_mutex. */ 1353 struct lock stolen_lock; 1354 1355 /** List of all objects in gtt_space. Used to restore gtt 1356 * mappings on resume */ 1357 struct list_head bound_list; 1358 /** 1359 * List of objects which are not bound to the GTT (thus 1360 * are idle and not used by the GPU) but still have 1361 * (presumably uncached) pages still attached. 1362 */ 1363 struct list_head unbound_list; 1364 1365 /** Usable portion of the GTT for GEM */ 1366 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1367 1368 /** PPGTT used for aliasing the PPGTT with the GTT */ 1369 struct i915_hw_ppgtt *aliasing_ppgtt; 1370 1371 struct notifier_block oom_notifier; 1372 struct notifier_block vmap_notifier; 1373 struct shrinker shrinker; 1374 1375 /** LRU list of objects with fence regs on them. */ 1376 struct list_head fence_list; 1377 1378 /** 1379 * Are we in a non-interruptible section of code like 1380 * modesetting? 1381 */ 1382 bool interruptible; 1383 1384 /* the indicator for dispatch video commands on two BSD rings */ 1385 atomic_t bsd_engine_dispatch_index; 1386 1387 /** Bit 6 swizzling required for X tiling */ 1388 uint32_t bit_6_swizzle_x; 1389 /** Bit 6 swizzling required for Y tiling */ 1390 uint32_t bit_6_swizzle_y; 1391 1392 /* accounting, useful for userland debugging */ 1393 spinlock_t object_stat_lock; 1394 size_t object_memory; 1395 u32 object_count; 1396 }; 1397 1398 struct drm_i915_error_state_buf { 1399 struct drm_i915_private *i915; 1400 unsigned bytes; 1401 unsigned size; 1402 int err; 1403 u8 *buf; 1404 loff_t start; 1405 loff_t pos; 1406 }; 1407 1408 struct i915_error_state_file_priv { 1409 struct drm_device *dev; 1410 struct drm_i915_error_state *error; 1411 }; 1412 1413 struct i915_gpu_error { 1414 /* For hangcheck timer */ 1415 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1416 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1417 /* Hang gpu twice in this window and your context gets banned */ 1418 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1419 1420 struct delayed_work hangcheck_work; 1421 1422 /* For reset and error_state handling. */ 1423 spinlock_t lock; 1424 /* Protected by the above dev->gpu_error.lock. */ 1425 struct drm_i915_error_state *first_error; 1426 1427 unsigned long missed_irq_rings; 1428 1429 /** 1430 * State variable controlling the reset flow and count 1431 * 1432 * This is a counter which gets incremented when reset is triggered, 1433 * 1434 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set 1435 * meaning that any waiters holding onto the struct_mutex should 1436 * relinquish the lock immediately in order for the reset to start. 1437 * 1438 * If reset is not completed succesfully, the I915_WEDGE bit is 1439 * set meaning that hardware is terminally sour and there is no 1440 * recovery. All waiters on the reset_queue will be woken when 1441 * that happens. 1442 * 1443 * This counter is used by the wait_seqno code to notice that reset 1444 * event happened and it needs to restart the entire ioctl (since most 1445 * likely the seqno it waited for won't ever signal anytime soon). 1446 * 1447 * This is important for lock-free wait paths, where no contended lock 1448 * naturally enforces the correct ordering between the bail-out of the 1449 * waiter and the gpu reset work code. 1450 */ 1451 unsigned long reset_count; 1452 1453 unsigned long flags; 1454 #define I915_RESET_IN_PROGRESS 0 1455 #define I915_WEDGED (BITS_PER_LONG - 1) 1456 1457 /** 1458 * Waitqueue to signal when a hang is detected. Used to for waiters 1459 * to release the struct_mutex for the reset to procede. 1460 */ 1461 wait_queue_head_t wait_queue; 1462 1463 /** 1464 * Waitqueue to signal when the reset has completed. Used by clients 1465 * that wait for dev_priv->mm.wedged to settle. 1466 */ 1467 wait_queue_head_t reset_queue; 1468 1469 /* For missed irq/seqno simulation. */ 1470 unsigned long test_irq_rings; 1471 }; 1472 1473 enum modeset_restore { 1474 MODESET_ON_LID_OPEN, 1475 MODESET_DONE, 1476 MODESET_SUSPENDED, 1477 }; 1478 1479 #define DP_AUX_A 0x40 1480 #define DP_AUX_B 0x10 1481 #define DP_AUX_C 0x20 1482 #define DP_AUX_D 0x30 1483 1484 #define DDC_PIN_B 0x05 1485 #define DDC_PIN_C 0x04 1486 #define DDC_PIN_D 0x06 1487 1488 struct ddi_vbt_port_info { 1489 /* 1490 * This is an index in the HDMI/DVI DDI buffer translation table. 1491 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1492 * populate this field. 1493 */ 1494 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1495 uint8_t hdmi_level_shift; 1496 1497 uint8_t supports_dvi:1; 1498 uint8_t supports_hdmi:1; 1499 uint8_t supports_dp:1; 1500 1501 uint8_t alternate_aux_channel; 1502 uint8_t alternate_ddc_pin; 1503 1504 uint8_t dp_boost_level; 1505 uint8_t hdmi_boost_level; 1506 }; 1507 1508 enum psr_lines_to_wait { 1509 PSR_0_LINES_TO_WAIT = 0, 1510 PSR_1_LINE_TO_WAIT, 1511 PSR_4_LINES_TO_WAIT, 1512 PSR_8_LINES_TO_WAIT 1513 }; 1514 1515 struct intel_vbt_data { 1516 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1517 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1518 1519 /* Feature bits */ 1520 unsigned int int_tv_support:1; 1521 unsigned int lvds_dither:1; 1522 unsigned int lvds_vbt:1; 1523 unsigned int int_crt_support:1; 1524 unsigned int lvds_use_ssc:1; 1525 unsigned int display_clock_mode:1; 1526 unsigned int fdi_rx_polarity_inverted:1; 1527 unsigned int panel_type:4; 1528 int lvds_ssc_freq; 1529 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1530 1531 enum drrs_support_type drrs_type; 1532 1533 struct { 1534 int rate; 1535 int lanes; 1536 int preemphasis; 1537 int vswing; 1538 bool low_vswing; 1539 bool initialized; 1540 bool support; 1541 int bpp; 1542 struct edp_power_seq pps; 1543 } edp; 1544 1545 struct { 1546 bool full_link; 1547 bool require_aux_wakeup; 1548 int idle_frames; 1549 enum psr_lines_to_wait lines_to_wait; 1550 int tp1_wakeup_time; 1551 int tp2_tp3_wakeup_time; 1552 } psr; 1553 1554 struct { 1555 u16 pwm_freq_hz; 1556 bool present; 1557 bool active_low_pwm; 1558 u8 min_brightness; /* min_brightness/255 of max */ 1559 enum intel_backlight_type type; 1560 } backlight; 1561 1562 /* MIPI DSI */ 1563 struct { 1564 u16 panel_id; 1565 struct mipi_config *config; 1566 struct mipi_pps_data *pps; 1567 u8 seq_version; 1568 u32 size; 1569 u8 *data; 1570 const u8 *sequence[MIPI_SEQ_MAX]; 1571 } dsi; 1572 1573 int crt_ddc_pin; 1574 1575 int child_dev_num; 1576 union child_device_config *child_dev; 1577 1578 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1579 struct sdvo_device_mapping sdvo_mappings[2]; 1580 }; 1581 1582 enum intel_ddb_partitioning { 1583 INTEL_DDB_PART_1_2, 1584 INTEL_DDB_PART_5_6, /* IVB+ */ 1585 }; 1586 1587 struct intel_wm_level { 1588 bool enable; 1589 uint32_t pri_val; 1590 uint32_t spr_val; 1591 uint32_t cur_val; 1592 uint32_t fbc_val; 1593 }; 1594 1595 struct ilk_wm_values { 1596 uint32_t wm_pipe[3]; 1597 uint32_t wm_lp[3]; 1598 uint32_t wm_lp_spr[3]; 1599 uint32_t wm_linetime[3]; 1600 bool enable_fbc_wm; 1601 enum intel_ddb_partitioning partitioning; 1602 }; 1603 1604 struct vlv_pipe_wm { 1605 uint16_t primary; 1606 uint16_t sprite[2]; 1607 uint8_t cursor; 1608 }; 1609 1610 struct vlv_sr_wm { 1611 uint16_t plane; 1612 uint8_t cursor; 1613 }; 1614 1615 struct vlv_wm_values { 1616 struct vlv_pipe_wm pipe[3]; 1617 struct vlv_sr_wm sr; 1618 struct { 1619 uint8_t cursor; 1620 uint8_t sprite[2]; 1621 uint8_t primary; 1622 } ddl[3]; 1623 uint8_t level; 1624 bool cxsr; 1625 }; 1626 1627 struct skl_ddb_entry { 1628 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1629 }; 1630 1631 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1632 { 1633 return entry->end - entry->start; 1634 } 1635 1636 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1637 const struct skl_ddb_entry *e2) 1638 { 1639 if (e1->start == e2->start && e1->end == e2->end) 1640 return true; 1641 1642 return false; 1643 } 1644 1645 struct skl_ddb_allocation { 1646 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1647 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1648 }; 1649 1650 struct skl_wm_values { 1651 unsigned dirty_pipes; 1652 struct skl_ddb_allocation ddb; 1653 }; 1654 1655 struct skl_wm_level { 1656 bool plane_en; 1657 uint16_t plane_res_b; 1658 uint8_t plane_res_l; 1659 }; 1660 1661 /* 1662 * This struct helps tracking the state needed for runtime PM, which puts the 1663 * device in PCI D3 state. Notice that when this happens, nothing on the 1664 * graphics device works, even register access, so we don't get interrupts nor 1665 * anything else. 1666 * 1667 * Every piece of our code that needs to actually touch the hardware needs to 1668 * either call intel_runtime_pm_get or call intel_display_power_get with the 1669 * appropriate power domain. 1670 * 1671 * Our driver uses the autosuspend delay feature, which means we'll only really 1672 * suspend if we stay with zero refcount for a certain amount of time. The 1673 * default value is currently very conservative (see intel_runtime_pm_enable), but 1674 * it can be changed with the standard runtime PM files from sysfs. 1675 * 1676 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1677 * goes back to false exactly before we reenable the IRQs. We use this variable 1678 * to check if someone is trying to enable/disable IRQs while they're supposed 1679 * to be disabled. This shouldn't happen and we'll print some error messages in 1680 * case it happens. 1681 * 1682 * For more, read the Documentation/power/runtime_pm.txt. 1683 */ 1684 struct i915_runtime_pm { 1685 atomic_t wakeref_count; 1686 atomic_t atomic_seq; 1687 bool suspended; 1688 bool irqs_enabled; 1689 }; 1690 1691 enum intel_pipe_crc_source { 1692 INTEL_PIPE_CRC_SOURCE_NONE, 1693 INTEL_PIPE_CRC_SOURCE_PLANE1, 1694 INTEL_PIPE_CRC_SOURCE_PLANE2, 1695 INTEL_PIPE_CRC_SOURCE_PF, 1696 INTEL_PIPE_CRC_SOURCE_PIPE, 1697 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1698 INTEL_PIPE_CRC_SOURCE_TV, 1699 INTEL_PIPE_CRC_SOURCE_DP_B, 1700 INTEL_PIPE_CRC_SOURCE_DP_C, 1701 INTEL_PIPE_CRC_SOURCE_DP_D, 1702 INTEL_PIPE_CRC_SOURCE_AUTO, 1703 INTEL_PIPE_CRC_SOURCE_MAX, 1704 }; 1705 1706 struct intel_pipe_crc_entry { 1707 uint32_t frame; 1708 uint32_t crc[5]; 1709 }; 1710 1711 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1712 struct intel_pipe_crc { 1713 spinlock_t lock; 1714 bool opened; /* exclusive access to the result file */ 1715 struct intel_pipe_crc_entry *entries; 1716 enum intel_pipe_crc_source source; 1717 int head, tail; 1718 wait_queue_head_t wq; 1719 }; 1720 1721 struct i915_frontbuffer_tracking { 1722 spinlock_t lock; 1723 1724 /* 1725 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1726 * scheduled flips. 1727 */ 1728 unsigned busy_bits; 1729 unsigned flip_bits; 1730 }; 1731 1732 struct i915_wa_reg { 1733 i915_reg_t addr; 1734 u32 value; 1735 /* bitmask representing WA bits */ 1736 u32 mask; 1737 }; 1738 1739 /* 1740 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only 1741 * allowing it for RCS as we don't foresee any requirement of having 1742 * a whitelist for other engines. When it is really required for 1743 * other engines then the limit need to be increased. 1744 */ 1745 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) 1746 1747 struct i915_workarounds { 1748 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1749 u32 count; 1750 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1751 }; 1752 1753 struct i915_virtual_gpu { 1754 bool active; 1755 }; 1756 1757 /* used in computing the new watermarks state */ 1758 struct intel_wm_config { 1759 unsigned int num_pipes_active; 1760 bool sprites_enabled; 1761 bool sprites_scaled; 1762 }; 1763 1764 struct drm_i915_private { 1765 struct drm_device drm; 1766 1767 struct kmem_cache *objects; 1768 struct kmem_cache *vmas; 1769 struct kmem_cache *requests; 1770 1771 const struct intel_device_info info; 1772 1773 int relative_constants_mode; 1774 1775 void __iomem *regs; 1776 1777 struct intel_uncore uncore; 1778 1779 struct i915_virtual_gpu vgpu; 1780 1781 struct intel_gvt *gvt; 1782 1783 struct intel_guc guc; 1784 1785 struct intel_csr csr; 1786 1787 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1788 1789 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1790 * controller on different i2c buses. */ 1791 struct lock gmbus_mutex; 1792 1793 /** 1794 * Base address of the gmbus and gpio block. 1795 */ 1796 uint32_t gpio_mmio_base; 1797 1798 /* MMIO base address for MIPI regs */ 1799 uint32_t mipi_mmio_base; 1800 1801 uint32_t psr_mmio_base; 1802 1803 uint32_t pps_mmio_base; 1804 1805 wait_queue_head_t gmbus_wait_queue; 1806 1807 struct pci_dev *bridge_dev; 1808 struct i915_gem_context *kernel_context; 1809 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 1810 struct i915_vma *semaphore; 1811 u32 next_seqno; 1812 1813 struct drm_dma_handle *status_page_dmah; 1814 struct resource *mch_res; 1815 int mch_res_rid; 1816 1817 /* protects the irq masks */ 1818 spinlock_t irq_lock; 1819 1820 /* protects the mmio flip data */ 1821 spinlock_t mmio_flip_lock; 1822 1823 bool display_irqs_enabled; 1824 1825 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1826 struct pm_qos_request pm_qos; 1827 1828 /* Sideband mailbox protection */ 1829 struct lock sb_lock; 1830 1831 /** Cached value of IMR to avoid reads in updating the bitfield */ 1832 union { 1833 u32 irq_mask; 1834 u32 de_irq_mask[I915_MAX_PIPES]; 1835 }; 1836 u32 gt_irq_mask; 1837 u32 pm_irq_mask; 1838 u32 pm_rps_events; 1839 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1840 1841 struct i915_hotplug hotplug; 1842 struct intel_fbc fbc; 1843 struct i915_drrs drrs; 1844 struct intel_opregion opregion; 1845 struct intel_vbt_data vbt; 1846 1847 bool preserve_bios_swizzle; 1848 1849 /* overlay */ 1850 struct intel_overlay *overlay; 1851 1852 /* backlight registers and fields in struct intel_panel */ 1853 struct lock backlight_lock; 1854 1855 /* LVDS info */ 1856 bool no_aux_handshake; 1857 1858 /* protects panel power sequencer state */ 1859 struct lock pps_mutex; 1860 1861 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1862 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1863 1864 unsigned int fsb_freq, mem_freq, is_ddr3; 1865 unsigned int skl_preferred_vco_freq; 1866 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1867 unsigned int max_dotclk_freq; 1868 unsigned int rawclk_freq; 1869 unsigned int hpll_freq; 1870 unsigned int czclk_freq; 1871 1872 struct { 1873 unsigned int vco, ref; 1874 } cdclk_pll; 1875 1876 /** 1877 * wq - Driver workqueue for GEM. 1878 * 1879 * NOTE: Work items scheduled here are not allowed to grab any modeset 1880 * locks, for otherwise the flushing done in the pageflip code will 1881 * result in deadlocks. 1882 */ 1883 struct workqueue_struct *wq; 1884 1885 /* Display functions */ 1886 struct drm_i915_display_funcs display; 1887 1888 /* PCH chipset type */ 1889 enum intel_pch pch_type; 1890 unsigned short pch_id; 1891 1892 unsigned long quirks; 1893 1894 enum modeset_restore modeset_restore; 1895 struct lock modeset_restore_lock; 1896 struct drm_atomic_state *modeset_restore_state; 1897 struct drm_modeset_acquire_ctx reset_ctx; 1898 1899 struct list_head vm_list; /* Global list of all address spaces */ 1900 struct i915_ggtt ggtt; /* VM representing the global address space */ 1901 1902 struct i915_gem_mm mm; 1903 DECLARE_HASHTABLE(mm_structs, 7); 1904 struct lock mm_lock; 1905 1906 /* The hw wants to have a stable context identifier for the lifetime 1907 * of the context (for OA, PASID, faults, etc). This is limited 1908 * in execlists to 21 bits. 1909 */ 1910 struct ida context_hw_ida; 1911 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ 1912 1913 /* Kernel Modesetting */ 1914 1915 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1916 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1917 wait_queue_head_t pending_flip_queue; 1918 1919 #ifdef CONFIG_DEBUG_FS 1920 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1921 #endif 1922 1923 /* dpll and cdclk state is protected by connection_mutex */ 1924 int num_shared_dpll; 1925 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1926 const struct intel_dpll_mgr *dpll_mgr; 1927 1928 /* 1929 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1930 * Must be global rather than per dpll, because on some platforms 1931 * plls share registers. 1932 */ 1933 struct lock dpll_lock; 1934 1935 unsigned int active_crtcs; 1936 unsigned int min_pixclk[I915_MAX_PIPES]; 1937 1938 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1939 1940 struct i915_workarounds workarounds; 1941 1942 struct i915_frontbuffer_tracking fb_tracking; 1943 1944 u16 orig_clock; 1945 1946 bool mchbar_need_disable; 1947 1948 struct intel_l3_parity l3_parity; 1949 1950 /* Cannot be determined by PCIID. You must always read a register. */ 1951 u32 edram_cap; 1952 1953 /* gen6+ rps state */ 1954 struct intel_gen6_power_mgmt rps; 1955 1956 /* ilk-only ips/rps state. Everything in here is protected by the global 1957 * mchdev_lock in intel_pm.c */ 1958 struct intel_ilk_power_mgmt ips; 1959 1960 struct i915_power_domains power_domains; 1961 1962 struct i915_psr psr; 1963 1964 struct i915_gpu_error gpu_error; 1965 1966 struct drm_i915_gem_object *vlv_pctx; 1967 1968 #ifdef CONFIG_DRM_FBDEV_EMULATION 1969 /* list of fbdev register on this device */ 1970 struct intel_fbdev *fbdev; 1971 struct work_struct fbdev_suspend_work; 1972 #endif 1973 1974 struct drm_property *broadcast_rgb_property; 1975 struct drm_property *force_audio_property; 1976 1977 /* hda/i915 audio component */ 1978 struct i915_audio_component *audio_component; 1979 bool audio_component_registered; 1980 /** 1981 * av_mutex - mutex for audio/video sync 1982 * 1983 */ 1984 struct lock av_mutex; 1985 1986 uint32_t hw_context_size; 1987 struct list_head context_list; 1988 1989 u32 fdi_rx_config; 1990 1991 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 1992 u32 chv_phy_control; 1993 /* 1994 * Shadows for CHV DPLL_MD regs to keep the state 1995 * checker somewhat working in the presence hardware 1996 * crappiness (can't read out DPLL_MD for pipes B & C). 1997 */ 1998 u32 chv_dpll_md[I915_MAX_PIPES]; 1999 u32 bxt_phy_grc; 2000 2001 u32 suspend_count; 2002 bool suspended_to_idle; 2003 struct i915_suspend_saved_registers regfile; 2004 struct vlv_s0ix_state vlv_s0ix_state; 2005 2006 enum { 2007 I915_SAGV_UNKNOWN = 0, 2008 I915_SAGV_DISABLED, 2009 I915_SAGV_ENABLED, 2010 I915_SAGV_NOT_CONTROLLED 2011 } sagv_status; 2012 2013 struct { 2014 /* 2015 * Raw watermark latency values: 2016 * in 0.1us units for WM0, 2017 * in 0.5us units for WM1+. 2018 */ 2019 /* primary */ 2020 uint16_t pri_latency[5]; 2021 /* sprite */ 2022 uint16_t spr_latency[5]; 2023 /* cursor */ 2024 uint16_t cur_latency[5]; 2025 /* 2026 * Raw watermark memory latency values 2027 * for SKL for all 8 levels 2028 * in 1us units. 2029 */ 2030 uint16_t skl_latency[8]; 2031 2032 /* 2033 * The skl_wm_values structure is a bit too big for stack 2034 * allocation, so we keep the staging struct where we store 2035 * intermediate results here instead. 2036 */ 2037 struct skl_wm_values skl_results; 2038 2039 /* current hardware state */ 2040 union { 2041 struct ilk_wm_values hw; 2042 struct skl_wm_values skl_hw; 2043 struct vlv_wm_values vlv; 2044 }; 2045 2046 uint8_t max_level; 2047 2048 /* 2049 * Should be held around atomic WM register writing; also 2050 * protects * intel_crtc->wm.active and 2051 * cstate->wm.need_postvbl_update. 2052 */ 2053 struct lock wm_mutex; 2054 2055 /* 2056 * Set during HW readout of watermarks/DDB. Some platforms 2057 * need to know when we're still using BIOS-provided values 2058 * (which we don't fully trust). 2059 */ 2060 bool distrust_bios_wm; 2061 } wm; 2062 2063 struct i915_runtime_pm pm; 2064 2065 uint32_t bios_vgacntr; 2066 2067 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 2068 struct { 2069 void (*resume)(struct drm_i915_private *); 2070 void (*cleanup_engine)(struct intel_engine_cs *engine); 2071 2072 /** 2073 * Is the GPU currently considered idle, or busy executing 2074 * userspace requests? Whilst idle, we allow runtime power 2075 * management to power down the hardware and display clocks. 2076 * In order to reduce the effect on performance, there 2077 * is a slight delay before we do so. 2078 */ 2079 unsigned int active_engines; 2080 bool awake; 2081 2082 /** 2083 * We leave the user IRQ off as much as possible, 2084 * but this means that requests will finish and never 2085 * be retired once the system goes idle. Set a timer to 2086 * fire periodically while the ring is running. When it 2087 * fires, go retire requests. 2088 */ 2089 struct delayed_work retire_work; 2090 2091 /** 2092 * When we detect an idle GPU, we want to turn on 2093 * powersaving features. So once we see that there 2094 * are no more requests outstanding and no more 2095 * arrive within a small period of time, we fire 2096 * off the idle_work. 2097 */ 2098 struct delayed_work idle_work; 2099 } gt; 2100 2101 /* perform PHY state sanity checks? */ 2102 bool chv_phy_assert[2]; 2103 2104 /* Used to save the pipe-to-encoder mapping for audio */ 2105 struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 2106 2107 /* 2108 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2109 * will be rejected. Instead look for a better place. 2110 */ 2111 }; 2112 2113 static inline struct drm_i915_private *to_i915(struct drm_device *dev) 2114 { 2115 return container_of(dev, struct drm_i915_private, drm); 2116 } 2117 2118 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 2119 { 2120 return to_i915(dev_get_drvdata(kdev)); 2121 } 2122 2123 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2124 { 2125 return container_of(guc, struct drm_i915_private, guc); 2126 } 2127 2128 /* Simple iterator over all initialised engines */ 2129 #define for_each_engine(engine__, dev_priv__, id__) \ 2130 for ((id__) = 0; \ 2131 (id__) < I915_NUM_ENGINES; \ 2132 (id__)++) \ 2133 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 2134 2135 #define __mask_next_bit(mask) ({ \ 2136 int __idx = ffs(mask) - 1; \ 2137 mask &= ~BIT(__idx); \ 2138 __idx; \ 2139 }) 2140 2141 /* Iterator over subset of engines selected by mask */ 2142 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ 2143 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ 2144 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) 2145 2146 enum hdmi_force_audio { 2147 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2148 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2149 HDMI_AUDIO_AUTO, /* trust EDID */ 2150 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2151 }; 2152 2153 #define I915_GTT_OFFSET_NONE ((u32)-1) 2154 2155 struct drm_i915_gem_object_ops { 2156 unsigned int flags; 2157 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 2158 2159 /* Interface between the GEM object and its backing storage. 2160 * get_pages() is called once prior to the use of the associated set 2161 * of pages before to binding them into the GTT, and put_pages() is 2162 * called after we no longer need them. As we expect there to be 2163 * associated cost with migrating pages between the backing storage 2164 * and making them available for the GPU (e.g. clflush), we may hold 2165 * onto the pages after they are no longer referenced by the GPU 2166 * in case they may be used again shortly (for example migrating the 2167 * pages to a different memory domain within the GTT). put_pages() 2168 * will therefore most likely be called when the object itself is 2169 * being released or under memory pressure (where we attempt to 2170 * reap pages for the shrinker). 2171 */ 2172 int (*get_pages)(struct drm_i915_gem_object *); 2173 void (*put_pages)(struct drm_i915_gem_object *); 2174 2175 int (*dmabuf_export)(struct drm_i915_gem_object *); 2176 void (*release)(struct drm_i915_gem_object *); 2177 }; 2178 2179 /* 2180 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2181 * considered to be the frontbuffer for the given plane interface-wise. This 2182 * doesn't mean that the hw necessarily already scans it out, but that any 2183 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2184 * 2185 * We have one bit per pipe and per scanout plane type. 2186 */ 2187 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2188 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2189 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2190 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2191 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2192 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2193 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2194 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2195 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2196 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2197 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2198 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2199 2200 struct drm_i915_gem_object { 2201 struct drm_gem_object base; 2202 2203 const struct drm_i915_gem_object_ops *ops; 2204 2205 /** List of VMAs backed by this object */ 2206 struct list_head vma_list; 2207 2208 /** Stolen memory for this object, instead of being backed by shmem. */ 2209 struct drm_mm_node *stolen; 2210 struct list_head global_list; 2211 2212 /** Used in execbuf to temporarily hold a ref */ 2213 struct list_head obj_exec_link; 2214 2215 struct list_head batch_pool_link; 2216 2217 unsigned long flags; 2218 /** 2219 * This is set if the object is on the active lists (has pending 2220 * rendering and so a non-zero seqno), and is not set if it i s on 2221 * inactive (ready to be unbound) list. 2222 */ 2223 #define I915_BO_ACTIVE_SHIFT 0 2224 #define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1) 2225 #define __I915_BO_ACTIVE(bo) \ 2226 ((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK) 2227 2228 /** 2229 * This is set if the object has been written to since last bound 2230 * to the GTT 2231 */ 2232 unsigned int dirty:1; 2233 2234 /** 2235 * Advice: are the backing pages purgeable? 2236 */ 2237 unsigned int madv:2; 2238 2239 /** 2240 * Whether the current gtt mapping needs to be mappable (and isn't just 2241 * mappable by accident). Track pin and fault separate for a more 2242 * accurate mappable working set. 2243 */ 2244 unsigned int fault_mappable:1; 2245 2246 /* 2247 * Is the object to be mapped as read-only to the GPU 2248 * Only honoured if hardware has relevant pte bit 2249 */ 2250 unsigned long gt_ro:1; 2251 unsigned int cache_level:3; 2252 unsigned int cache_dirty:1; 2253 2254 atomic_t frontbuffer_bits; 2255 unsigned int frontbuffer_ggtt_origin; /* write once */ 2256 2257 /** Current tiling stride for the object, if it's tiled. */ 2258 unsigned int tiling_and_stride; 2259 #define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ 2260 #define TILING_MASK (FENCE_MINIMUM_STRIDE-1) 2261 #define STRIDE_MASK (~TILING_MASK) 2262 2263 /** Count of VMA actually bound by this object */ 2264 unsigned int bind_count; 2265 unsigned int pin_display; 2266 2267 struct sg_table *pages; 2268 int pages_pin_count; 2269 struct get_page { 2270 struct scatterlist *sg; 2271 int last; 2272 } get_page; 2273 void *mapping; 2274 2275 /** Breadcrumb of last rendering to the buffer. 2276 * There can only be one writer, but we allow for multiple readers. 2277 * If there is a writer that necessarily implies that all other 2278 * read requests are complete - but we may only be lazily clearing 2279 * the read requests. A read request is naturally the most recent 2280 * request on a ring, so we may have two different write and read 2281 * requests on one ring where the write request is older than the 2282 * read request. This allows for the CPU to read from an active 2283 * buffer by only waiting for the write to complete. 2284 */ 2285 struct i915_gem_active last_read[I915_NUM_ENGINES]; 2286 struct i915_gem_active last_write; 2287 2288 /** References from framebuffers, locks out tiling changes. */ 2289 unsigned long framebuffer_references; 2290 2291 /** Record of address bit 17 of each page at last unbind. */ 2292 unsigned long *bit_17; 2293 2294 struct i915_gem_userptr { 2295 uintptr_t ptr; 2296 unsigned read_only :1; 2297 unsigned workers :4; 2298 #define I915_GEM_USERPTR_MAX_WORKERS 15 2299 2300 struct i915_mm_struct *mm; 2301 struct i915_mmu_object *mmu_object; 2302 struct work_struct *work; 2303 } userptr; 2304 2305 /** for phys allocated objects */ 2306 struct drm_dma_handle *phys_handle; 2307 }; 2308 2309 static inline struct drm_i915_gem_object * 2310 to_intel_bo(struct drm_gem_object *gem) 2311 { 2312 /* Assert that to_intel_bo(NULL) == NULL */ 2313 BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); 2314 2315 return container_of(gem, struct drm_i915_gem_object, base); 2316 } 2317 2318 static inline struct drm_i915_gem_object * 2319 i915_gem_object_lookup(struct drm_file *file, u32 handle) 2320 { 2321 return to_intel_bo(drm_gem_object_lookup(file, handle)); 2322 } 2323 2324 __attribute__((nonnull)) 2325 static inline struct drm_i915_gem_object * 2326 i915_gem_object_get(struct drm_i915_gem_object *obj) 2327 { 2328 drm_gem_object_reference(&obj->base); 2329 return obj; 2330 } 2331 2332 __attribute__((nonnull)) 2333 static inline void 2334 i915_gem_object_put(struct drm_i915_gem_object *obj) 2335 { 2336 drm_gem_object_unreference(&obj->base); 2337 } 2338 2339 __attribute__((nonnull)) 2340 static inline void 2341 i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj) 2342 { 2343 drm_gem_object_unreference_unlocked(&obj->base); 2344 } 2345 2346 static inline bool 2347 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) 2348 { 2349 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; 2350 } 2351 2352 static inline unsigned long 2353 i915_gem_object_get_active(const struct drm_i915_gem_object *obj) 2354 { 2355 return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK; 2356 } 2357 2358 static inline bool 2359 i915_gem_object_is_active(const struct drm_i915_gem_object *obj) 2360 { 2361 return i915_gem_object_get_active(obj); 2362 } 2363 2364 static inline void 2365 i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine) 2366 { 2367 obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT); 2368 } 2369 2370 static inline void 2371 i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine) 2372 { 2373 obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT); 2374 } 2375 2376 static inline bool 2377 i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj, 2378 int engine) 2379 { 2380 return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT); 2381 } 2382 2383 static inline unsigned int 2384 i915_gem_object_get_tiling(struct drm_i915_gem_object *obj) 2385 { 2386 return obj->tiling_and_stride & TILING_MASK; 2387 } 2388 2389 static inline bool 2390 i915_gem_object_is_tiled(struct drm_i915_gem_object *obj) 2391 { 2392 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; 2393 } 2394 2395 static inline unsigned int 2396 i915_gem_object_get_stride(struct drm_i915_gem_object *obj) 2397 { 2398 return obj->tiling_and_stride & STRIDE_MASK; 2399 } 2400 2401 static inline struct i915_vma *i915_vma_get(struct i915_vma *vma) 2402 { 2403 i915_gem_object_get(vma->obj); 2404 return vma; 2405 } 2406 2407 static inline void i915_vma_put(struct i915_vma *vma) 2408 { 2409 lockdep_assert_held(&vma->vm->dev->struct_mutex); 2410 i915_gem_object_put(vma->obj); 2411 } 2412 2413 /* 2414 * Optimised SGL iterator for GEM objects 2415 */ 2416 static __always_inline struct sgt_iter { 2417 struct scatterlist *sgp; 2418 union { 2419 unsigned long pfn; 2420 dma_addr_t dma; 2421 }; 2422 unsigned int curr; 2423 unsigned int max; 2424 } __sgt_iter(struct scatterlist *sgl, bool dma) { 2425 struct sgt_iter s = { .sgp = sgl }; 2426 2427 if (s.sgp) { 2428 s.max = s.curr = s.sgp->offset; 2429 s.max += s.sgp->length; 2430 if (dma) 2431 s.dma = sg_dma_address(s.sgp); 2432 else 2433 s.pfn = page_to_pfn(sg_page(s.sgp)); 2434 } 2435 2436 return s; 2437 } 2438 2439 /** 2440 * __sg_next - return the next scatterlist entry in a list 2441 * @sg: The current sg entry 2442 * 2443 * Description: 2444 * If the entry is the last, return NULL; otherwise, step to the next 2445 * element in the array (@sg@+1). If that's a chain pointer, follow it; 2446 * otherwise just return the pointer to the current element. 2447 **/ 2448 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2449 { 2450 #ifdef CONFIG_DEBUG_SG 2451 BUG_ON(sg->sg_magic != SG_MAGIC); 2452 #endif 2453 return sg_is_last(sg) ? NULL : 2454 likely(!sg_is_chain(++sg)) ? sg : 2455 sg_chain_ptr(sg); 2456 } 2457 2458 /** 2459 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table 2460 * @__dmap: DMA address (output) 2461 * @__iter: 'struct sgt_iter' (iterator state, internal) 2462 * @__sgt: sg_table to iterate over (input) 2463 */ 2464 #define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2465 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2466 ((__dmap) = (__iter).dma + (__iter).curr); \ 2467 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2468 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) 2469 2470 /** 2471 * for_each_sgt_page - iterate over the pages of the given sg_table 2472 * @__pp: page pointer (output) 2473 * @__iter: 'struct sgt_iter' (iterator state, internal) 2474 * @__sgt: sg_table to iterate over (input) 2475 */ 2476 #define for_each_sgt_page(__pp, __iter, __sgt) \ 2477 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 2478 ((__pp) = (__iter).pfn == 0 ? NULL : \ 2479 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 2480 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2481 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) 2482 2483 /* 2484 * A command that requires special handling by the command parser. 2485 */ 2486 struct drm_i915_cmd_descriptor { 2487 /* 2488 * Flags describing how the command parser processes the command. 2489 * 2490 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2491 * a length mask if not set 2492 * CMD_DESC_SKIP: The command is allowed but does not follow the 2493 * standard length encoding for the opcode range in 2494 * which it falls 2495 * CMD_DESC_REJECT: The command is never allowed 2496 * CMD_DESC_REGISTER: The command should be checked against the 2497 * register whitelist for the appropriate ring 2498 * CMD_DESC_MASTER: The command is allowed if the submitting process 2499 * is the DRM master 2500 */ 2501 u32 flags; 2502 #define CMD_DESC_FIXED (1<<0) 2503 #define CMD_DESC_SKIP (1<<1) 2504 #define CMD_DESC_REJECT (1<<2) 2505 #define CMD_DESC_REGISTER (1<<3) 2506 #define CMD_DESC_BITMASK (1<<4) 2507 #define CMD_DESC_MASTER (1<<5) 2508 2509 /* 2510 * The command's unique identification bits and the bitmask to get them. 2511 * This isn't strictly the opcode field as defined in the spec and may 2512 * also include type, subtype, and/or subop fields. 2513 */ 2514 struct { 2515 u32 value; 2516 u32 mask; 2517 } cmd; 2518 2519 /* 2520 * The command's length. The command is either fixed length (i.e. does 2521 * not include a length field) or has a length field mask. The flag 2522 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2523 * a length mask. All command entries in a command table must include 2524 * length information. 2525 */ 2526 union { 2527 u32 fixed; 2528 u32 mask; 2529 } length; 2530 2531 /* 2532 * Describes where to find a register address in the command to check 2533 * against the ring's register whitelist. Only valid if flags has the 2534 * CMD_DESC_REGISTER bit set. 2535 * 2536 * A non-zero step value implies that the command may access multiple 2537 * registers in sequence (e.g. LRI), in that case step gives the 2538 * distance in dwords between individual offset fields. 2539 */ 2540 struct { 2541 u32 offset; 2542 u32 mask; 2543 u32 step; 2544 } reg; 2545 2546 #define MAX_CMD_DESC_BITMASKS 3 2547 /* 2548 * Describes command checks where a particular dword is masked and 2549 * compared against an expected value. If the command does not match 2550 * the expected value, the parser rejects it. Only valid if flags has 2551 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2552 * are valid. 2553 * 2554 * If the check specifies a non-zero condition_mask then the parser 2555 * only performs the check when the bits specified by condition_mask 2556 * are non-zero. 2557 */ 2558 struct { 2559 u32 offset; 2560 u32 mask; 2561 u32 expected; 2562 u32 condition_offset; 2563 u32 condition_mask; 2564 } bits[MAX_CMD_DESC_BITMASKS]; 2565 }; 2566 2567 /* 2568 * A table of commands requiring special handling by the command parser. 2569 * 2570 * Each engine has an array of tables. Each table consists of an array of 2571 * command descriptors, which must be sorted with command opcodes in 2572 * ascending order. 2573 */ 2574 struct drm_i915_cmd_table { 2575 const struct drm_i915_cmd_descriptor *table; 2576 int count; 2577 }; 2578 2579 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2580 #define __I915__(p) ({ \ 2581 struct drm_i915_private *__p; \ 2582 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2583 __p = (struct drm_i915_private *)p; \ 2584 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2585 __p = to_i915((struct drm_device *)p); \ 2586 else \ 2587 BUILD_BUG(); \ 2588 __p; \ 2589 }) 2590 #define INTEL_INFO(p) (&__I915__(p)->info) 2591 2592 #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) 2593 #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) 2594 2595 #define REVID_FOREVER 0xff 2596 #define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision) 2597 2598 #define GEN_FOREVER (0) 2599 /* 2600 * Returns true if Gen is in inclusive range [Start, End]. 2601 * 2602 * Use GEN_FOREVER for unbound start and or end. 2603 */ 2604 #define IS_GEN(dev_priv, s, e) ({ \ 2605 unsigned int __s = (s), __e = (e); \ 2606 BUILD_BUG_ON(!__builtin_constant_p(s)); \ 2607 BUILD_BUG_ON(!__builtin_constant_p(e)); \ 2608 if ((__s) != GEN_FOREVER) \ 2609 __s = (s) - 1; \ 2610 if ((__e) == GEN_FOREVER) \ 2611 __e = BITS_PER_LONG - 1; \ 2612 else \ 2613 __e = (e) - 1; \ 2614 !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \ 2615 }) 2616 2617 /* 2618 * Return true if revision is in range [since,until] inclusive. 2619 * 2620 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2621 */ 2622 #define IS_REVID(p, since, until) \ 2623 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2624 2625 #define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577) 2626 #define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) 2627 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2628 #define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572) 2629 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2630 #define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592) 2631 #define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772) 2632 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2633 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2634 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2635 #define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42) 2636 #define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x) 2637 #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) 2638 #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) 2639 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2640 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2641 #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2642 #define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge) 2643 #define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ 2644 INTEL_DEVID(dev_priv) == 0x0152 || \ 2645 INTEL_DEVID(dev_priv) == 0x015a) 2646 #define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview) 2647 #define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview) 2648 #define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell) 2649 #define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell) 2650 #define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake) 2651 #define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton) 2652 #define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake) 2653 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2654 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2655 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2656 #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ 2657 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ 2658 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ 2659 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) 2660 /* ULX machines are also considered ULT. */ 2661 #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ 2662 (INTEL_DEVID(dev_priv) & 0xf) == 0xe) 2663 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 2664 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2665 #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ 2666 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) 2667 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 2668 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2669 /* ULX machines are also considered ULT. */ 2670 #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ 2671 INTEL_DEVID(dev_priv) == 0x0A1E) 2672 #define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ 2673 INTEL_DEVID(dev_priv) == 0x1913 || \ 2674 INTEL_DEVID(dev_priv) == 0x1916 || \ 2675 INTEL_DEVID(dev_priv) == 0x1921 || \ 2676 INTEL_DEVID(dev_priv) == 0x1926) 2677 #define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ 2678 INTEL_DEVID(dev_priv) == 0x1915 || \ 2679 INTEL_DEVID(dev_priv) == 0x191E) 2680 #define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ 2681 INTEL_DEVID(dev_priv) == 0x5913 || \ 2682 INTEL_DEVID(dev_priv) == 0x5916 || \ 2683 INTEL_DEVID(dev_priv) == 0x5921 || \ 2684 INTEL_DEVID(dev_priv) == 0x5926) 2685 #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2686 INTEL_DEVID(dev_priv) == 0x5915 || \ 2687 INTEL_DEVID(dev_priv) == 0x591E) 2688 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2689 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2690 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2691 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030) 2692 2693 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2694 2695 #define SKL_REVID_A0 0x0 2696 #define SKL_REVID_B0 0x1 2697 #define SKL_REVID_C0 0x2 2698 #define SKL_REVID_D0 0x3 2699 #define SKL_REVID_E0 0x4 2700 #define SKL_REVID_F0 0x5 2701 #define SKL_REVID_G0 0x6 2702 #define SKL_REVID_H0 0x7 2703 2704 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2705 2706 #define BXT_REVID_A0 0x0 2707 #define BXT_REVID_A1 0x1 2708 #define BXT_REVID_B0 0x3 2709 #define BXT_REVID_C0 0x9 2710 2711 #define IS_BXT_REVID(dev_priv, since, until) \ 2712 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) 2713 2714 #define KBL_REVID_A0 0x0 2715 #define KBL_REVID_B0 0x1 2716 #define KBL_REVID_C0 0x2 2717 #define KBL_REVID_D0 0x3 2718 #define KBL_REVID_E0 0x4 2719 2720 #define IS_KBL_REVID(dev_priv, since, until) \ 2721 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2722 2723 /* 2724 * The genX designation typically refers to the render engine, so render 2725 * capability related checks should use IS_GEN, while display and other checks 2726 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2727 * chips, etc.). 2728 */ 2729 #define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) 2730 #define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) 2731 #define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) 2732 #define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) 2733 #define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) 2734 #define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) 2735 #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) 2736 #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) 2737 2738 #define ENGINE_MASK(id) BIT(id) 2739 #define RENDER_RING ENGINE_MASK(RCS) 2740 #define BSD_RING ENGINE_MASK(VCS) 2741 #define BLT_RING ENGINE_MASK(BCS) 2742 #define VEBOX_RING ENGINE_MASK(VECS) 2743 #define BSD2_RING ENGINE_MASK(VCS2) 2744 #define ALL_ENGINES (~0) 2745 2746 #define HAS_ENGINE(dev_priv, id) \ 2747 (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) 2748 2749 #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) 2750 #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) 2751 #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) 2752 #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) 2753 2754 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2755 #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) 2756 #define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED)) 2757 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 2758 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 2759 #define HWS_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->hws_needs_physical) 2760 2761 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->has_hw_contexts) 2762 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->has_logical_ring_contexts) 2763 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2764 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) 2765 #define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) 2766 2767 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2768 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2769 2770 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2771 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv)) 2772 2773 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2774 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 2775 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ 2776 IS_SKL_GT3(dev_priv) || \ 2777 IS_SKL_GT4(dev_priv)) 2778 2779 /* 2780 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2781 * even when in MSI mode. This results in spurious interrupt warnings if the 2782 * legacy irq no. is shared with another device. The kernel then disables that 2783 * interrupt source and so prevents the other device from working properly. 2784 */ 2785 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2786 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->has_gmbus_irq) 2787 2788 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2789 * rows, which changed the alignment requirements and fence programming. 2790 */ 2791 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ 2792 !(IS_I915G(dev_priv) || \ 2793 IS_I915GM(dev_priv))) 2794 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2795 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2796 2797 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2798 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2799 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2800 2801 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2802 2803 #define HAS_DP_MST(dev) (INTEL_INFO(dev)->has_dp_mst) 2804 2805 #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) 2806 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2807 #define HAS_PSR(dev) (INTEL_INFO(dev)->has_psr) 2808 #define HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 2809 #define HAS_RC6p(dev) (INTEL_INFO(dev)->has_rc6p) 2810 2811 #define HAS_CSR(dev) (INTEL_INFO(dev)->has_csr) 2812 2813 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) 2814 /* 2815 * For now, anything with a GuC requires uCode loading, and then supports 2816 * command submission once loaded. But these are logically independent 2817 * properties, so we have separate macros to test them. 2818 */ 2819 #define HAS_GUC(dev) (INTEL_INFO(dev)->has_guc) 2820 #define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) 2821 #define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) 2822 2823 #define HAS_RESOURCE_STREAMER(dev) (INTEL_INFO(dev)->has_resource_streamer) 2824 2825 #define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu) 2826 2827 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2828 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2829 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2830 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2831 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2832 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2833 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2834 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2835 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 2836 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2837 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2838 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2839 2840 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2841 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2842 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2843 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2844 #define HAS_PCH_LPT_LP(dev_priv) \ 2845 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2846 #define HAS_PCH_LPT_H(dev_priv) \ 2847 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 2848 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2849 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2850 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2851 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) 2852 2853 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) 2854 2855 #define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv)) 2856 2857 /* DPF == dynamic parity feature */ 2858 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) 2859 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2860 2 : HAS_L3_DPF(dev_priv)) 2861 2862 #define GT_FREQUENCY_MULTIPLIER 50 2863 #define GEN9_FREQ_SCALER 3 2864 2865 #include "i915_trace.h" 2866 2867 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 2868 { 2869 #ifdef CONFIG_INTEL_IOMMU 2870 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) 2871 return true; 2872 #endif 2873 return false; 2874 } 2875 2876 extern int i915_suspend_switcheroo(device_t kdev); 2877 extern int i915_resume_switcheroo(struct drm_device *dev); 2878 2879 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 2880 int enable_ppgtt); 2881 2882 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value); 2883 2884 /* i915_drv.c */ 2885 void __printf(3, 4) 2886 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2887 const char *fmt, ...); 2888 2889 #define i915_report_error(dev_priv, fmt, ...) \ 2890 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2891 2892 #ifdef CONFIG_COMPAT 2893 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2894 unsigned long arg); 2895 #endif 2896 extern const struct dev_pm_ops i915_pm_ops; 2897 2898 extern int i915_driver_load(struct pci_dev *pdev, 2899 const struct pci_device_id *ent); 2900 extern void i915_driver_unload(struct drm_device *dev); 2901 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); 2902 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); 2903 extern void i915_reset(struct drm_i915_private *dev_priv); 2904 extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2905 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2906 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2907 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2908 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2909 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2910 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2911 2912 /* intel_hotplug.c */ 2913 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 2914 u32 pin_mask, u32 long_mask); 2915 void intel_hpd_init(struct drm_i915_private *dev_priv); 2916 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2917 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2918 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2919 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2920 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2921 2922 /* i915_irq.c */ 2923 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) 2924 { 2925 unsigned long delay; 2926 2927 if (unlikely(!i915.enable_hangcheck)) 2928 return; 2929 2930 /* Don't continually defer the hangcheck so that it is always run at 2931 * least once after work has been scheduled on any ring. Otherwise, 2932 * we will ignore a hung ring if a second ring is kept busy. 2933 */ 2934 2935 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); 2936 queue_delayed_work(system_long_wq, 2937 &dev_priv->gpu_error.hangcheck_work, delay); 2938 } 2939 2940 __printf(3, 4) 2941 void i915_handle_error(struct drm_i915_private *dev_priv, 2942 u32 engine_mask, 2943 const char *fmt, ...); 2944 2945 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2946 int intel_irq_install(struct drm_i915_private *dev_priv); 2947 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2948 2949 extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); 2950 extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 2951 bool restore_forcewake); 2952 extern void intel_uncore_init(struct drm_i915_private *dev_priv); 2953 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2954 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2955 extern void intel_uncore_fini(struct drm_i915_private *dev_priv); 2956 extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 2957 bool restore); 2958 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2959 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2960 enum forcewake_domains domains); 2961 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2962 enum forcewake_domains domains); 2963 /* Like above but the caller must manage the uncore.lock itself. 2964 * Must be used with I915_READ_FW and friends. 2965 */ 2966 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2967 enum forcewake_domains domains); 2968 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2969 enum forcewake_domains domains); 2970 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 2971 2972 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2973 2974 int intel_wait_for_register(struct drm_i915_private *dev_priv, 2975 i915_reg_t reg, 2976 const u32 mask, 2977 const u32 value, 2978 const unsigned long timeout_ms); 2979 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 2980 i915_reg_t reg, 2981 const u32 mask, 2982 const u32 value, 2983 const unsigned long timeout_ms); 2984 2985 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 2986 { 2987 return dev_priv->gvt; 2988 } 2989 2990 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) 2991 { 2992 return dev_priv->vgpu.active; 2993 } 2994 2995 void 2996 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2997 u32 status_mask); 2998 2999 void 3000 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 3001 u32 status_mask); 3002 3003 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 3004 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 3005 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 3006 uint32_t mask, 3007 uint32_t bits); 3008 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 3009 uint32_t interrupt_mask, 3010 uint32_t enabled_irq_mask); 3011 static inline void 3012 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 3013 { 3014 ilk_update_display_irq(dev_priv, bits, bits); 3015 } 3016 static inline void 3017 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 3018 { 3019 ilk_update_display_irq(dev_priv, bits, 0); 3020 } 3021 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 3022 enum i915_pipe pipe, 3023 uint32_t interrupt_mask, 3024 uint32_t enabled_irq_mask); 3025 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 3026 enum i915_pipe pipe, uint32_t bits) 3027 { 3028 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 3029 } 3030 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 3031 enum i915_pipe pipe, uint32_t bits) 3032 { 3033 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 3034 } 3035 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 3036 uint32_t interrupt_mask, 3037 uint32_t enabled_irq_mask); 3038 static inline void 3039 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 3040 { 3041 ibx_display_interrupt_update(dev_priv, bits, bits); 3042 } 3043 static inline void 3044 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 3045 { 3046 ibx_display_interrupt_update(dev_priv, bits, 0); 3047 } 3048 3049 /* i915_gem.c */ 3050 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 3051 struct drm_file *file_priv); 3052 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 3053 struct drm_file *file_priv); 3054 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 3055 struct drm_file *file_priv); 3056 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 3057 struct drm_file *file_priv); 3058 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 3059 struct drm_file *file_priv); 3060 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 3061 struct drm_file *file_priv); 3062 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 3063 struct drm_file *file_priv); 3064 int i915_gem_execbuffer(struct drm_device *dev, void *data, 3065 struct drm_file *file_priv); 3066 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 3067 struct drm_file *file_priv); 3068 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3069 struct drm_file *file_priv); 3070 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3071 struct drm_file *file); 3072 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3073 struct drm_file *file); 3074 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 3075 struct drm_file *file_priv); 3076 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 3077 struct drm_file *file_priv); 3078 int i915_gem_set_tiling(struct drm_device *dev, void *data, 3079 struct drm_file *file_priv); 3080 int i915_gem_get_tiling(struct drm_device *dev, void *data, 3081 struct drm_file *file_priv); 3082 void i915_gem_init_userptr(struct drm_i915_private *dev_priv); 3083 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3084 struct drm_file *file); 3085 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3086 struct drm_file *file_priv); 3087 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 3088 struct drm_file *file_priv); 3089 void i915_gem_load_init(struct drm_device *dev); 3090 void i915_gem_load_cleanup(struct drm_device *dev); 3091 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3092 int i915_gem_freeze(struct drm_i915_private *dev_priv); 3093 int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 3094 3095 void *i915_gem_object_alloc(struct drm_device *dev); 3096 void i915_gem_object_free(struct drm_i915_gem_object *obj); 3097 void i915_gem_object_init(struct drm_i915_gem_object *obj, 3098 const struct drm_i915_gem_object_ops *ops); 3099 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, 3100 size_t size); 3101 struct drm_i915_gem_object *i915_gem_object_create_from_data( 3102 struct drm_device *dev, const void *data, size_t size); 3103 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 3104 void i915_gem_free_object(struct drm_gem_object *obj); 3105 3106 struct i915_vma * __must_check 3107 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 3108 const struct i915_ggtt_view *view, 3109 u64 size, 3110 u64 alignment, 3111 u64 flags); 3112 3113 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 3114 u32 flags); 3115 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 3116 int __must_check i915_vma_unbind(struct i915_vma *vma); 3117 void i915_vma_close(struct i915_vma *vma); 3118 void i915_vma_destroy(struct i915_vma *vma); 3119 3120 int i915_gem_object_unbind(struct drm_i915_gem_object *obj); 3121 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 3122 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 3123 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 3124 3125 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 3126 3127 static inline int __sg_page_count(struct scatterlist *sg) 3128 { 3129 return sg->length >> PAGE_SHIFT; 3130 } 3131 3132 struct page * 3133 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); 3134 3135 static inline dma_addr_t 3136 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n) 3137 { 3138 if (n < obj->get_page.last) { 3139 obj->get_page.sg = obj->pages->sgl; 3140 obj->get_page.last = 0; 3141 } 3142 3143 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { 3144 obj->get_page.last += __sg_page_count(obj->get_page.sg++); 3145 if (unlikely(sg_is_chain(obj->get_page.sg))) 3146 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); 3147 } 3148 3149 return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT); 3150 } 3151 3152 static inline struct page * 3153 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 3154 { 3155 if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) 3156 return NULL; 3157 3158 if (n < obj->get_page.last) { 3159 obj->get_page.sg = obj->pages->sgl; 3160 obj->get_page.last = 0; 3161 } 3162 3163 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { 3164 obj->get_page.last += __sg_page_count(obj->get_page.sg++); 3165 if (unlikely(sg_is_chain(obj->get_page.sg))) 3166 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); 3167 } 3168 3169 return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); 3170 } 3171 3172 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3173 { 3174 GEM_BUG_ON(obj->pages == NULL); 3175 obj->pages_pin_count++; 3176 } 3177 3178 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3179 { 3180 GEM_BUG_ON(obj->pages_pin_count == 0); 3181 obj->pages_pin_count--; 3182 GEM_BUG_ON(obj->pages_pin_count < obj->bind_count); 3183 } 3184 3185 enum i915_map_type { 3186 I915_MAP_WB = 0, 3187 I915_MAP_WC, 3188 }; 3189 3190 /** 3191 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3192 * @obj - the object to map into kernel address space 3193 * @type - the type of mapping, used to select pgprot_t 3194 * 3195 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3196 * pages and then returns a contiguous mapping of the backing storage into 3197 * the kernel address space. Based on the @type of mapping, the PTE will be 3198 * set to either WriteBack or WriteCombine (via pgprot_t). 3199 * 3200 * The caller must hold the struct_mutex, and is responsible for calling 3201 * i915_gem_object_unpin_map() when the mapping is no longer required. 3202 * 3203 * Returns the pointer through which to access the mapped object, or an 3204 * ERR_PTR() on error. 3205 */ 3206 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 3207 enum i915_map_type type); 3208 3209 /** 3210 * i915_gem_object_unpin_map - releases an earlier mapping 3211 * @obj - the object to unmap 3212 * 3213 * After pinning the object and mapping its pages, once you are finished 3214 * with your access, call i915_gem_object_unpin_map() to release the pin 3215 * upon the mapping. Once the pin count reaches zero, that mapping may be 3216 * removed. 3217 * 3218 * The caller must hold the struct_mutex. 3219 */ 3220 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3221 { 3222 lockdep_assert_held(&obj->base.dev->struct_mutex); 3223 i915_gem_object_unpin_pages(obj); 3224 } 3225 3226 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 3227 unsigned int *needs_clflush); 3228 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 3229 unsigned int *needs_clflush); 3230 #define CLFLUSH_BEFORE 0x1 3231 #define CLFLUSH_AFTER 0x2 3232 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 3233 3234 static inline void 3235 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) 3236 { 3237 i915_gem_object_unpin_pages(obj); 3238 } 3239 3240 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3241 void i915_vma_move_to_active(struct i915_vma *vma, 3242 struct drm_i915_gem_request *req, 3243 unsigned int flags); 3244 int i915_gem_dumb_create(struct drm_file *file_priv, 3245 struct drm_device *dev, 3246 struct drm_mode_create_dumb *args); 3247 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3248 uint32_t handle, uint64_t *offset); 3249 int i915_gem_mmap_gtt_version(void); 3250 3251 void i915_gem_track_fb(struct drm_i915_gem_object *old, 3252 struct drm_i915_gem_object *new, 3253 unsigned frontbuffer_bits); 3254 3255 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3256 3257 struct drm_i915_gem_request * 3258 i915_gem_find_active_request(struct intel_engine_cs *engine); 3259 3260 void i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3261 3262 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 3263 { 3264 return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags)); 3265 } 3266 3267 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3268 { 3269 return unlikely(test_bit(I915_WEDGED, &error->flags)); 3270 } 3271 3272 static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) 3273 { 3274 return i915_reset_in_progress(error) | i915_terminally_wedged(error); 3275 } 3276 3277 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3278 { 3279 return READ_ONCE(error->reset_count); 3280 } 3281 3282 void i915_gem_reset(struct drm_i915_private *dev_priv); 3283 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3284 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3285 int __must_check i915_gem_init(struct drm_device *dev); 3286 int __must_check i915_gem_init_hw(struct drm_device *dev); 3287 void i915_gem_init_swizzling(struct drm_device *dev); 3288 void i915_gem_cleanup_engines(struct drm_device *dev); 3289 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, 3290 unsigned int flags); 3291 int __must_check i915_gem_suspend(struct drm_device *dev); 3292 void i915_gem_resume(struct drm_device *dev); 3293 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres); 3294 int __must_check 3295 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 3296 bool readonly); 3297 int __must_check 3298 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3299 bool write); 3300 int __must_check 3301 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3302 struct i915_vma * __must_check 3303 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3304 u32 alignment, 3305 const struct i915_ggtt_view *view); 3306 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3307 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3308 int align); 3309 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3310 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3311 3312 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size, 3313 int tiling_mode); 3314 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, 3315 int tiling_mode, bool fenced); 3316 3317 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3318 enum i915_cache_level cache_level); 3319 3320 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3321 struct dma_buf *dma_buf); 3322 3323 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3324 struct drm_gem_object *gem_obj, int flags); 3325 3326 struct i915_vma * 3327 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3328 struct i915_address_space *vm, 3329 const struct i915_ggtt_view *view); 3330 3331 struct i915_vma * 3332 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3333 struct i915_address_space *vm, 3334 const struct i915_ggtt_view *view); 3335 3336 static inline struct i915_hw_ppgtt * 3337 i915_vm_to_ppgtt(struct i915_address_space *vm) 3338 { 3339 return container_of(vm, struct i915_hw_ppgtt, base); 3340 } 3341 3342 static inline struct i915_vma * 3343 i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, 3344 const struct i915_ggtt_view *view) 3345 { 3346 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); 3347 } 3348 3349 static inline unsigned long 3350 i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, 3351 const struct i915_ggtt_view *view) 3352 { 3353 return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view)); 3354 } 3355 3356 /* i915_gem_fence.c */ 3357 int __must_check i915_vma_get_fence(struct i915_vma *vma); 3358 int __must_check i915_vma_put_fence(struct i915_vma *vma); 3359 3360 /** 3361 * i915_vma_pin_fence - pin fencing state 3362 * @vma: vma to pin fencing for 3363 * 3364 * This pins the fencing state (whether tiled or untiled) to make sure the 3365 * vma (and its object) is ready to be used as a scanout target. Fencing 3366 * status must be synchronize first by calling i915_vma_get_fence(): 3367 * 3368 * The resulting fence pin reference must be released again with 3369 * i915_vma_unpin_fence(). 3370 * 3371 * Returns: 3372 * 3373 * True if the vma has a fence, false otherwise. 3374 */ 3375 static inline bool 3376 i915_vma_pin_fence(struct i915_vma *vma) 3377 { 3378 if (vma->fence) { 3379 vma->fence->pin_count++; 3380 return true; 3381 } else 3382 return false; 3383 } 3384 3385 /** 3386 * i915_vma_unpin_fence - unpin fencing state 3387 * @vma: vma to unpin fencing for 3388 * 3389 * This releases the fence pin reference acquired through 3390 * i915_vma_pin_fence. It will handle both objects with and without an 3391 * attached fence correctly, callers do not need to distinguish this. 3392 */ 3393 static inline void 3394 i915_vma_unpin_fence(struct i915_vma *vma) 3395 { 3396 if (vma->fence) { 3397 GEM_BUG_ON(vma->fence->pin_count <= 0); 3398 vma->fence->pin_count--; 3399 } 3400 } 3401 3402 void i915_gem_restore_fences(struct drm_device *dev); 3403 3404 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3405 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3406 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3407 3408 /* i915_gem_context.c */ 3409 int __must_check i915_gem_context_init(struct drm_device *dev); 3410 void i915_gem_context_lost(struct drm_i915_private *dev_priv); 3411 void i915_gem_context_fini(struct drm_device *dev); 3412 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3413 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3414 int i915_switch_context(struct drm_i915_gem_request *req); 3415 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); 3416 void i915_gem_context_free(struct kref *ctx_ref); 3417 struct drm_i915_gem_object * 3418 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3419 struct i915_gem_context * 3420 i915_gem_context_create_gvt(struct drm_device *dev); 3421 3422 static inline struct i915_gem_context * 3423 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 3424 { 3425 struct i915_gem_context *ctx; 3426 3427 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); 3428 3429 ctx = idr_find(&file_priv->context_idr, id); 3430 if (!ctx) 3431 return ERR_PTR(-ENOENT); 3432 3433 return ctx; 3434 } 3435 3436 static inline struct i915_gem_context * 3437 i915_gem_context_get(struct i915_gem_context *ctx) 3438 { 3439 kref_get(&ctx->ref); 3440 return ctx; 3441 } 3442 3443 static inline void i915_gem_context_put(struct i915_gem_context *ctx) 3444 { 3445 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 3446 kref_put(&ctx->ref, i915_gem_context_free); 3447 } 3448 3449 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) 3450 { 3451 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3452 } 3453 3454 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3455 struct drm_file *file); 3456 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3457 struct drm_file *file); 3458 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3459 struct drm_file *file_priv); 3460 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3461 struct drm_file *file_priv); 3462 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, 3463 struct drm_file *file); 3464 3465 /* i915_gem_evict.c */ 3466 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 3467 u64 min_size, u64 alignment, 3468 unsigned cache_level, 3469 u64 start, u64 end, 3470 unsigned flags); 3471 int __must_check i915_gem_evict_for_vma(struct i915_vma *target); 3472 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3473 3474 /* belongs in i915_gem_gtt.h */ 3475 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3476 { 3477 wmb(); 3478 if (INTEL_GEN(dev_priv) < 6) 3479 intel_gtt_chipset_flush(); 3480 } 3481 3482 /* i915_gem_stolen.c */ 3483 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3484 struct drm_mm_node *node, u64 size, 3485 unsigned alignment); 3486 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3487 struct drm_mm_node *node, u64 size, 3488 unsigned alignment, u64 start, 3489 u64 end); 3490 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3491 struct drm_mm_node *node); 3492 int i915_gem_init_stolen(struct drm_device *dev); 3493 void i915_gem_cleanup_stolen(struct drm_device *dev); 3494 struct drm_i915_gem_object * 3495 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3496 struct drm_i915_gem_object * 3497 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3498 u32 stolen_offset, 3499 u32 gtt_offset, 3500 u32 size); 3501 3502 /* i915_gem_shrinker.c */ 3503 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3504 unsigned long target, 3505 unsigned flags); 3506 #define I915_SHRINK_PURGEABLE 0x1 3507 #define I915_SHRINK_UNBOUND 0x2 3508 #define I915_SHRINK_BOUND 0x4 3509 #define I915_SHRINK_ACTIVE 0x8 3510 #define I915_SHRINK_VMAPS 0x10 3511 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3512 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3513 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); 3514 3515 3516 /* i915_gem_tiling.c */ 3517 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3518 { 3519 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3520 3521 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3522 i915_gem_object_is_tiled(obj); 3523 } 3524 3525 /* i915_debugfs.c */ 3526 #ifdef CONFIG_DEBUG_FS 3527 int i915_debugfs_register(struct drm_i915_private *dev_priv); 3528 void i915_debugfs_unregister(struct drm_i915_private *dev_priv); 3529 int i915_debugfs_connector_add(struct drm_connector *connector); 3530 void intel_display_crc_init(struct drm_i915_private *dev_priv); 3531 #else 3532 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} 3533 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {} 3534 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3535 { return 0; } 3536 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} 3537 #endif 3538 3539 /* i915_gpu_error.c */ 3540 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 3541 3542 __printf(2, 3) 3543 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3544 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3545 const struct i915_error_state_file_priv *error); 3546 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3547 struct drm_i915_private *i915, 3548 size_t count, loff_t pos); 3549 static inline void i915_error_state_buf_release( 3550 struct drm_i915_error_state_buf *eb) 3551 { 3552 kfree(eb->buf); 3553 } 3554 void i915_capture_error_state(struct drm_i915_private *dev_priv, 3555 u32 engine_mask, 3556 const char *error_msg); 3557 void i915_error_state_get(struct drm_device *dev, 3558 struct i915_error_state_file_priv *error_priv); 3559 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3560 void i915_destroy_error_state(struct drm_device *dev); 3561 3562 #else 3563 3564 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, 3565 u32 engine_mask, 3566 const char *error_msg) 3567 { 3568 } 3569 3570 static inline void i915_destroy_error_state(struct drm_device *dev) 3571 { 3572 } 3573 3574 #endif 3575 3576 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3577 3578 /* i915_cmd_parser.c */ 3579 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 3580 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 3581 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 3582 bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); 3583 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 3584 struct drm_i915_gem_object *batch_obj, 3585 struct drm_i915_gem_object *shadow_batch_obj, 3586 u32 batch_start_offset, 3587 u32 batch_len, 3588 bool is_master); 3589 3590 /* i915_suspend.c */ 3591 extern int i915_save_state(struct drm_device *dev); 3592 extern int i915_restore_state(struct drm_device *dev); 3593 3594 /* i915_sysfs.c */ 3595 void i915_setup_sysfs(struct drm_i915_private *dev_priv); 3596 void i915_teardown_sysfs(struct drm_i915_private *dev_priv); 3597 3598 /* intel_i2c.c */ 3599 extern int intel_setup_gmbus(struct drm_device *dev); 3600 extern void intel_teardown_gmbus(struct drm_device *dev); 3601 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3602 unsigned int pin); 3603 3604 extern struct i2c_adapter * 3605 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3606 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3607 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3608 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3609 { 3610 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3611 } 3612 extern void intel_i2c_reset(struct drm_device *dev); 3613 3614 /* intel_bios.c */ 3615 int intel_bios_init(struct drm_i915_private *dev_priv); 3616 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3617 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3618 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3619 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3620 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3621 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3622 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3623 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3624 enum port port); 3625 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3626 enum port port); 3627 3628 /* intel_opregion.c */ 3629 #ifdef CONFIG_ACPI 3630 extern int intel_opregion_setup(struct drm_i915_private *dev_priv); 3631 extern void intel_opregion_register(struct drm_i915_private *dev_priv); 3632 extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); 3633 extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); 3634 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3635 bool enable); 3636 extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, 3637 pci_power_t state); 3638 extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); 3639 #else 3640 static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } 3641 static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } 3642 static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } 3643 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 3644 { 3645 } 3646 static inline int 3647 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3648 { 3649 return 0; 3650 } 3651 static inline int 3652 intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) 3653 { 3654 return 0; 3655 } 3656 static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) 3657 { 3658 return -ENODEV; 3659 } 3660 #endif 3661 3662 /* intel_acpi.c */ 3663 #ifdef CONFIG_ACPI 3664 extern void intel_register_dsm_handler(void); 3665 extern void intel_unregister_dsm_handler(void); 3666 #else 3667 static inline void intel_register_dsm_handler(void) { return; } 3668 static inline void intel_unregister_dsm_handler(void) { return; } 3669 #endif /* CONFIG_ACPI */ 3670 3671 /* intel_device_info.c */ 3672 static inline struct intel_device_info * 3673 mkwrite_device_info(struct drm_i915_private *dev_priv) 3674 { 3675 return (struct intel_device_info *)&dev_priv->info; 3676 } 3677 3678 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); 3679 void intel_device_info_dump(struct drm_i915_private *dev_priv); 3680 3681 /* modesetting */ 3682 extern void intel_modeset_init_hw(struct drm_device *dev); 3683 extern void intel_modeset_init(struct drm_device *dev); 3684 extern void intel_modeset_gem_init(struct drm_device *dev); 3685 extern void intel_modeset_cleanup(struct drm_device *dev); 3686 extern int intel_connector_register(struct drm_connector *); 3687 extern void intel_connector_unregister(struct drm_connector *); 3688 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3689 extern void intel_display_resume(struct drm_device *dev); 3690 extern void i915_redisable_vga(struct drm_device *dev); 3691 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3692 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); 3693 extern void intel_init_pch_refclk(struct drm_device *dev); 3694 extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); 3695 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3696 bool enable); 3697 3698 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3699 struct drm_file *file); 3700 3701 /* overlay */ 3702 extern struct intel_overlay_error_state * 3703 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); 3704 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3705 struct intel_overlay_error_state *error); 3706 3707 extern struct intel_display_error_state * 3708 intel_display_capture_error_state(struct drm_i915_private *dev_priv); 3709 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3710 struct drm_device *dev, 3711 struct intel_display_error_state *error); 3712 3713 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3714 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3715 3716 /* intel_sideband.c */ 3717 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3718 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3719 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3720 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3721 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3722 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3723 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3724 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3725 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3726 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3727 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3728 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); 3729 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); 3730 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3731 enum intel_sbi_destination destination); 3732 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3733 enum intel_sbi_destination destination); 3734 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3735 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3736 3737 /* intel_dpio_phy.c */ 3738 void chv_set_phy_signal_level(struct intel_encoder *encoder, 3739 u32 deemph_reg_value, u32 margin_reg_value, 3740 bool uniq_trans_scale); 3741 void chv_data_lane_soft_reset(struct intel_encoder *encoder, 3742 bool reset); 3743 void chv_phy_pre_pll_enable(struct intel_encoder *encoder); 3744 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3745 void chv_phy_release_cl2_override(struct intel_encoder *encoder); 3746 void chv_phy_post_pll_disable(struct intel_encoder *encoder); 3747 3748 void vlv_set_phy_signal_level(struct intel_encoder *encoder, 3749 u32 demph_reg_value, u32 preemph_reg_value, 3750 u32 uniqtranscale_reg_value, u32 tx3_demph); 3751 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); 3752 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3753 void vlv_phy_reset_lanes(struct intel_encoder *encoder); 3754 3755 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3756 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3757 3758 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3759 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3760 3761 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3762 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3763 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3764 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3765 3766 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3767 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3768 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3769 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3770 3771 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3772 * will be implemented using 2 32-bit writes in an arbitrary order with 3773 * an arbitrary delay between them. This can cause the hardware to 3774 * act upon the intermediate value, possibly leading to corruption and 3775 * machine death. For this reason we do not support I915_WRITE64, or 3776 * dev_priv->uncore.funcs.mmio_writeq. 3777 * 3778 * When reading a 64-bit value as two 32-bit values, the delay may cause 3779 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 3780 * occasionally a 64-bit register does not actualy support a full readq 3781 * and must be read using two 32-bit reads. 3782 * 3783 * You have been warned. 3784 */ 3785 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3786 3787 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3788 u32 upper, lower, old_upper, loop = 0; \ 3789 upper = I915_READ(upper_reg); \ 3790 do { \ 3791 old_upper = upper; \ 3792 lower = I915_READ(lower_reg); \ 3793 upper = I915_READ(upper_reg); \ 3794 } while (upper != old_upper && loop++ < 2); \ 3795 (u64)upper << 32 | lower; }) 3796 3797 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3798 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3799 3800 #define __raw_read(x, s) \ 3801 static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ 3802 i915_reg_t reg) \ 3803 { \ 3804 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3805 } 3806 3807 #define __raw_write(x, s) \ 3808 static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ 3809 i915_reg_t reg, uint##x##_t val) \ 3810 { \ 3811 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3812 } 3813 __raw_read(8, b) 3814 __raw_read(16, w) 3815 __raw_read(32, l) 3816 __raw_read(64, q) 3817 3818 __raw_write(8, b) 3819 __raw_write(16, w) 3820 __raw_write(32, l) 3821 __raw_write(64, q) 3822 3823 #undef __raw_read 3824 #undef __raw_write 3825 3826 /* These are untraced mmio-accessors that are only valid to be used inside 3827 * critical sections inside IRQ handlers where forcewake is explicitly 3828 * controlled. 3829 * Think twice, and think again, before using these. 3830 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3831 * intel_uncore_forcewake_irqunlock(). 3832 */ 3833 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3834 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3835 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) 3836 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3837 3838 /* "Broadcast RGB" property */ 3839 #define INTEL_BROADCAST_RGB_AUTO 0 3840 #define INTEL_BROADCAST_RGB_FULL 1 3841 #define INTEL_BROADCAST_RGB_LIMITED 2 3842 3843 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 3844 { 3845 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3846 return VLV_VGACNTRL; 3847 else if (INTEL_GEN(dev_priv) >= 5) 3848 return CPU_VGACNTRL; 3849 else 3850 return VGACNTRL; 3851 } 3852 3853 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3854 { 3855 unsigned long j = msecs_to_jiffies(m); 3856 3857 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3858 } 3859 3860 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3861 { 3862 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3863 } 3864 3865 static inline unsigned long 3866 timespec_to_jiffies_timeout(const struct timespec *value) 3867 { 3868 unsigned long j = timespec_to_jiffies(value); 3869 3870 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3871 } 3872 3873 /* 3874 * If you need to wait X milliseconds between events A and B, but event B 3875 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3876 * when event A happened, then just before event B you call this function and 3877 * pass the timestamp as the first argument, and X as the second argument. 3878 */ 3879 static inline void 3880 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3881 { 3882 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3883 3884 /* 3885 * Don't re-read the value of "jiffies" every time since it may change 3886 * behind our back and break the math. 3887 */ 3888 tmp_jiffies = jiffies; 3889 target_jiffies = timestamp_jiffies + 3890 msecs_to_jiffies_timeout(to_wait_ms); 3891 3892 if (time_after(target_jiffies, tmp_jiffies)) { 3893 remaining_jiffies = target_jiffies - tmp_jiffies; 3894 while (remaining_jiffies) 3895 remaining_jiffies = 3896 schedule_timeout_uninterruptible(remaining_jiffies); 3897 } 3898 } 3899 3900 static inline bool 3901 __i915_request_irq_complete(struct drm_i915_gem_request *req) 3902 { 3903 struct intel_engine_cs *engine = req->engine; 3904 3905 /* Before we do the heavier coherent read of the seqno, 3906 * check the value (hopefully) in the CPU cacheline. 3907 */ 3908 if (i915_gem_request_completed(req)) 3909 return true; 3910 3911 /* Ensure our read of the seqno is coherent so that we 3912 * do not "miss an interrupt" (i.e. if this is the last 3913 * request and the seqno write from the GPU is not visible 3914 * by the time the interrupt fires, we will see that the 3915 * request is incomplete and go back to sleep awaiting 3916 * another interrupt that will never come.) 3917 * 3918 * Strictly, we only need to do this once after an interrupt, 3919 * but it is easier and safer to do it every time the waiter 3920 * is woken. 3921 */ 3922 if (engine->irq_seqno_barrier && 3923 rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && 3924 cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { 3925 struct task_struct *tsk; 3926 3927 /* The ordering of irq_posted versus applying the barrier 3928 * is crucial. The clearing of the current irq_posted must 3929 * be visible before we perform the barrier operation, 3930 * such that if a subsequent interrupt arrives, irq_posted 3931 * is reasserted and our task rewoken (which causes us to 3932 * do another __i915_request_irq_complete() immediately 3933 * and reapply the barrier). Conversely, if the clear 3934 * occurs after the barrier, then an interrupt that arrived 3935 * whilst we waited on the barrier would not trigger a 3936 * barrier on the next pass, and the read may not see the 3937 * seqno update. 3938 */ 3939 engine->irq_seqno_barrier(engine); 3940 3941 /* If we consume the irq, but we are no longer the bottom-half, 3942 * the real bottom-half may not have serialised their own 3943 * seqno check with the irq-barrier (i.e. may have inspected 3944 * the seqno before we believe it coherent since they see 3945 * irq_posted == false but we are still running). 3946 */ 3947 rcu_read_lock(); 3948 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); 3949 if (tsk && tsk != current) 3950 /* Note that if the bottom-half is changed as we 3951 * are sending the wake-up, the new bottom-half will 3952 * be woken by whomever made the change. We only have 3953 * to worry about when we steal the irq-posted for 3954 * ourself. 3955 */ 3956 wake_up_process(tsk); 3957 rcu_read_unlock(); 3958 3959 if (i915_gem_request_completed(req)) 3960 return true; 3961 } 3962 3963 return false; 3964 } 3965 3966 void i915_memcpy_init_early(struct drm_i915_private *dev_priv); 3967 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); 3968 3969 /* i915_mm.c */ 3970 int remap_io_mapping(struct vm_area_struct *vma, 3971 unsigned long addr, unsigned long pfn, unsigned long size, 3972 struct io_mapping *iomap); 3973 3974 #define ptr_mask_bits(ptr) ({ \ 3975 unsigned long __v = (unsigned long)(ptr); \ 3976 (typeof(ptr))(__v & PAGE_MASK); \ 3977 }) 3978 3979 #define ptr_unpack_bits(ptr, bits) ({ \ 3980 unsigned long __v = (unsigned long)(ptr); \ 3981 (bits) = __v & ~PAGE_MASK; \ 3982 (typeof(ptr))(__v & PAGE_MASK); \ 3983 }) 3984 3985 #define ptr_pack_bits(ptr, bits) \ 3986 ((typeof(ptr))((unsigned long)(ptr) | (bits))) 3987 3988 #define fetch_and_zero(ptr) ({ \ 3989 typeof(*ptr) __T = *(ptr); \ 3990 *(ptr) = (typeof(*ptr))0; \ 3991 __T; \ 3992 }) 3993 3994 #endif 3995