1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <linux/io-mapping.h> 37 #include <linux/i2c.h> 38 #include <linux/i2c-algo-bit.h> 39 #include <linux/backlight.h> 40 #include <linux/hashtable.h> 41 #include <linux/intel-iommu.h> 42 #include <linux/kref.h> 43 #include <linux/pm_qos.h> 44 #include <linux/reservation.h> 45 #include <linux/shmem_fs.h> 46 47 #include <drm/drmP.h> 48 #include <drm/intel-gtt.h> 49 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 50 #include <drm/drm_gem.h> 51 #include <drm/drm_auth.h> 52 53 #include "i915_params.h" 54 #include "i915_reg.h" 55 56 #include "intel_bios.h" 57 #include "intel_dpll_mgr.h" 58 #include "intel_guc.h" 59 #include "intel_lrc.h" 60 #include "intel_ringbuffer.h" 61 62 #include "i915_gem.h" 63 #include "i915_gem_fence_reg.h" 64 #include "i915_gem_object.h" 65 #include "i915_gem_gtt.h" 66 #include "i915_gem_render_state.h" 67 #include "i915_gem_request.h" 68 #include "i915_gem_timeline.h" 69 70 #include "i915_vma.h" 71 72 #include "intel_gvt.h" 73 74 /* General customization: 75 */ 76 77 #define DRIVER_NAME "i915" 78 #define DRIVER_DESC "Intel Graphics" 79 #define DRIVER_DATE "20161121" 80 #define DRIVER_TIMESTAMP 1479717903 81 82 #undef WARN_ON 83 /* Many gcc seem to no see through this and fall over :( */ 84 #if 0 85 #define WARN_ON(x) ({ \ 86 bool __i915_warn_cond = (x); \ 87 if (__builtin_constant_p(__i915_warn_cond)) \ 88 BUILD_BUG_ON(__i915_warn_cond); \ 89 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 90 #else 91 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 92 #endif 93 94 #undef WARN_ON_ONCE 95 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 96 97 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 98 (long) (x), __func__); 99 100 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 101 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 102 * which may not necessarily be a user visible problem. This will either 103 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 104 * enable distros and users to tailor their preferred amount of i915 abrt 105 * spam. 106 */ 107 #define I915_STATE_WARN(condition, format...) ({ \ 108 int __ret_warn_on = !!(condition); \ 109 if (unlikely(__ret_warn_on)) \ 110 if (!WARN(i915.verbose_state_checks, format)) \ 111 DRM_ERROR(format); \ 112 unlikely(__ret_warn_on); \ 113 }) 114 115 #define I915_STATE_WARN_ON(x) \ 116 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 117 118 bool __i915_inject_load_failure(const char *func, int line); 119 #define i915_inject_load_failure() \ 120 __i915_inject_load_failure(__func__, __LINE__) 121 122 static inline const char *yesno(bool v) 123 { 124 return v ? "yes" : "no"; 125 } 126 127 static inline const char *onoff(bool v) 128 { 129 return v ? "on" : "off"; 130 } 131 132 static inline const char *enableddisabled(bool v) 133 { 134 return v ? "enabled" : "disabled"; 135 } 136 137 enum i915_pipe { 138 INVALID_PIPE = -1, 139 PIPE_A = 0, 140 PIPE_B, 141 PIPE_C, 142 _PIPE_EDP, 143 I915_MAX_PIPES = _PIPE_EDP 144 }; 145 #define pipe_name(p) ((p) + 'A') 146 147 enum transcoder { 148 TRANSCODER_A = 0, 149 TRANSCODER_B, 150 TRANSCODER_C, 151 TRANSCODER_EDP, 152 TRANSCODER_DSI_A, 153 TRANSCODER_DSI_C, 154 I915_MAX_TRANSCODERS 155 }; 156 157 static inline const char *transcoder_name(enum transcoder transcoder) 158 { 159 switch (transcoder) { 160 case TRANSCODER_A: 161 return "A"; 162 case TRANSCODER_B: 163 return "B"; 164 case TRANSCODER_C: 165 return "C"; 166 case TRANSCODER_EDP: 167 return "EDP"; 168 case TRANSCODER_DSI_A: 169 return "DSI A"; 170 case TRANSCODER_DSI_C: 171 return "DSI C"; 172 default: 173 return "<invalid>"; 174 } 175 } 176 177 static inline bool transcoder_is_dsi(enum transcoder transcoder) 178 { 179 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; 180 } 181 182 /* 183 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 184 * number of planes per CRTC. Not all platforms really have this many planes, 185 * which means some arrays of size I915_MAX_PLANES may have unused entries 186 * between the topmost sprite plane and the cursor plane. 187 */ 188 enum plane { 189 PLANE_A = 0, 190 PLANE_B, 191 PLANE_C, 192 PLANE_CURSOR, 193 I915_MAX_PLANES, 194 }; 195 #define plane_name(p) ((p) + 'A') 196 197 #define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') 198 199 enum port { 200 PORT_NONE = -1, 201 PORT_A = 0, 202 PORT_B, 203 PORT_C, 204 PORT_D, 205 PORT_E, 206 I915_MAX_PORTS 207 }; 208 #define port_name(p) ((p) + 'A') 209 210 #define I915_NUM_PHYS_VLV 2 211 212 enum dpio_channel { 213 DPIO_CH0, 214 DPIO_CH1 215 }; 216 217 enum dpio_phy { 218 DPIO_PHY0, 219 DPIO_PHY1 220 }; 221 222 enum intel_display_power_domain { 223 POWER_DOMAIN_PIPE_A, 224 POWER_DOMAIN_PIPE_B, 225 POWER_DOMAIN_PIPE_C, 226 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 227 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 228 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 229 POWER_DOMAIN_TRANSCODER_A, 230 POWER_DOMAIN_TRANSCODER_B, 231 POWER_DOMAIN_TRANSCODER_C, 232 POWER_DOMAIN_TRANSCODER_EDP, 233 POWER_DOMAIN_TRANSCODER_DSI_A, 234 POWER_DOMAIN_TRANSCODER_DSI_C, 235 POWER_DOMAIN_PORT_DDI_A_LANES, 236 POWER_DOMAIN_PORT_DDI_B_LANES, 237 POWER_DOMAIN_PORT_DDI_C_LANES, 238 POWER_DOMAIN_PORT_DDI_D_LANES, 239 POWER_DOMAIN_PORT_DDI_E_LANES, 240 POWER_DOMAIN_PORT_DSI, 241 POWER_DOMAIN_PORT_CRT, 242 POWER_DOMAIN_PORT_OTHER, 243 POWER_DOMAIN_VGA, 244 POWER_DOMAIN_AUDIO, 245 POWER_DOMAIN_PLLS, 246 POWER_DOMAIN_AUX_A, 247 POWER_DOMAIN_AUX_B, 248 POWER_DOMAIN_AUX_C, 249 POWER_DOMAIN_AUX_D, 250 POWER_DOMAIN_GMBUS, 251 POWER_DOMAIN_MODESET, 252 POWER_DOMAIN_INIT, 253 254 POWER_DOMAIN_NUM, 255 }; 256 257 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 258 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 259 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 260 #define POWER_DOMAIN_TRANSCODER(tran) \ 261 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 262 (tran) + POWER_DOMAIN_TRANSCODER_A) 263 264 enum hpd_pin { 265 HPD_NONE = 0, 266 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 267 HPD_CRT, 268 HPD_SDVO_B, 269 HPD_SDVO_C, 270 HPD_PORT_A, 271 HPD_PORT_B, 272 HPD_PORT_C, 273 HPD_PORT_D, 274 HPD_PORT_E, 275 HPD_NUM_PINS 276 }; 277 278 #define for_each_hpd_pin(__pin) \ 279 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 280 281 struct i915_hotplug { 282 struct work_struct hotplug_work; 283 284 struct { 285 unsigned long last_jiffies; 286 int count; 287 enum { 288 HPD_ENABLED = 0, 289 HPD_DISABLED = 1, 290 HPD_MARK_DISABLED = 2 291 } state; 292 } stats[HPD_NUM_PINS]; 293 u32 event_bits; 294 struct delayed_work reenable_work; 295 296 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 297 u32 long_port_mask; 298 u32 short_port_mask; 299 struct work_struct dig_port_work; 300 301 struct work_struct poll_init_work; 302 bool poll_enabled; 303 304 /* 305 * if we get a HPD irq from DP and a HPD irq from non-DP 306 * the non-DP HPD could block the workqueue on a mode config 307 * mutex getting, that userspace may have taken. However 308 * userspace is waiting on the DP workqueue to run which is 309 * blocked behind the non-DP one. 310 */ 311 struct workqueue_struct *dp_wq; 312 }; 313 314 #define I915_GEM_GPU_DOMAINS \ 315 (I915_GEM_DOMAIN_RENDER | \ 316 I915_GEM_DOMAIN_SAMPLER | \ 317 I915_GEM_DOMAIN_COMMAND | \ 318 I915_GEM_DOMAIN_INSTRUCTION | \ 319 I915_GEM_DOMAIN_VERTEX) 320 321 #define for_each_pipe(__dev_priv, __p) \ 322 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 323 #define for_each_pipe_masked(__dev_priv, __p, __mask) \ 324 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ 325 for_each_if ((__mask) & (1 << (__p))) 326 #define for_each_universal_plane(__dev_priv, __pipe, __p) \ 327 for ((__p) = 0; \ 328 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 329 (__p)++) 330 #define for_each_sprite(__dev_priv, __p, __s) \ 331 for ((__s) = 0; \ 332 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 333 (__s)++) 334 335 #define for_each_port_masked(__port, __ports_mask) \ 336 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 337 for_each_if ((__ports_mask) & (1 << (__port))) 338 339 #define for_each_crtc(dev, crtc) \ 340 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) 341 342 #define for_each_intel_plane(dev, intel_plane) \ 343 list_for_each_entry(intel_plane, \ 344 &(dev)->mode_config.plane_list, \ 345 base.head) 346 347 #define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ 348 list_for_each_entry(intel_plane, \ 349 &(dev)->mode_config.plane_list, \ 350 base.head) \ 351 for_each_if ((plane_mask) & \ 352 (1 << drm_plane_index(&intel_plane->base))) 353 354 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 355 list_for_each_entry(intel_plane, \ 356 &(dev)->mode_config.plane_list, \ 357 base.head) \ 358 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 359 360 #define for_each_intel_crtc(dev, intel_crtc) \ 361 list_for_each_entry(intel_crtc, \ 362 &(dev)->mode_config.crtc_list, \ 363 base.head) 364 365 #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ 366 list_for_each_entry(intel_crtc, \ 367 &(dev)->mode_config.crtc_list, \ 368 base.head) \ 369 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) 370 371 #define for_each_intel_encoder(dev, intel_encoder) \ 372 list_for_each_entry(intel_encoder, \ 373 &(dev)->mode_config.encoder_list, \ 374 base.head) 375 376 #define for_each_intel_connector(dev, intel_connector) \ 377 list_for_each_entry(intel_connector, \ 378 &(dev)->mode_config.connector_list, \ 379 base.head) 380 381 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 382 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 383 for_each_if ((intel_encoder)->base.crtc == (__crtc)) 384 385 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 386 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 387 for_each_if ((intel_connector)->base.encoder == (__encoder)) 388 389 #define for_each_power_domain(domain, mask) \ 390 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 391 for_each_if ((1 << (domain)) & (mask)) 392 393 struct drm_i915_private; 394 struct i915_mm_struct; 395 struct i915_mmu_object; 396 397 struct drm_i915_file_private { 398 struct drm_i915_private *dev_priv; 399 struct drm_file *file; 400 401 struct { 402 spinlock_t lock; 403 struct list_head request_list; 404 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 405 * chosen to prevent the CPU getting more than a frame ahead of the GPU 406 * (when using lax throttling for the frontbuffer). We also use it to 407 * offer free GPU waitboosts for severely congested workloads. 408 */ 409 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 410 } mm; 411 struct idr context_idr; 412 413 struct intel_rps_client { 414 struct list_head link; 415 unsigned boosts; 416 } rps; 417 418 unsigned int bsd_engine; 419 }; 420 421 /* Used by dp and fdi links */ 422 struct intel_link_m_n { 423 uint32_t tu; 424 uint32_t gmch_m; 425 uint32_t gmch_n; 426 uint32_t link_m; 427 uint32_t link_n; 428 }; 429 430 void intel_link_compute_m_n(int bpp, int nlanes, 431 int pixel_clock, int link_clock, 432 struct intel_link_m_n *m_n); 433 434 /* Interface history: 435 * 436 * 1.1: Original. 437 * 1.2: Add Power Management 438 * 1.3: Add vblank support 439 * 1.4: Fix cmdbuffer path, add heap destroy 440 * 1.5: Add vblank pipe configuration 441 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 442 * - Support vertical blank on secondary display pipe 443 */ 444 #define DRIVER_MAJOR 1 445 #define DRIVER_MINOR 6 446 #define DRIVER_PATCHLEVEL 0 447 448 struct opregion_header; 449 struct opregion_acpi; 450 struct opregion_swsci; 451 struct opregion_asle; 452 453 struct intel_opregion { 454 struct opregion_header *header; 455 struct opregion_acpi *acpi; 456 struct opregion_swsci *swsci; 457 u32 swsci_gbda_sub_functions; 458 u32 swsci_sbcb_sub_functions; 459 struct opregion_asle *asle; 460 void *rvda; 461 const void *vbt; 462 u32 vbt_size; 463 u32 *lid_state; 464 struct work_struct asle_work; 465 }; 466 #define OPREGION_SIZE (8*1024) 467 468 struct intel_overlay; 469 struct intel_overlay_error_state; 470 471 struct sdvo_device_mapping { 472 u8 initialized; 473 u8 dvo_port; 474 u8 slave_addr; 475 u8 dvo_wiring; 476 u8 i2c_pin; 477 u8 ddc_pin; 478 }; 479 480 struct intel_connector; 481 struct intel_encoder; 482 struct intel_atomic_state; 483 struct intel_crtc_state; 484 struct intel_initial_plane_config; 485 struct intel_crtc; 486 struct intel_limit; 487 struct dpll; 488 489 struct drm_i915_display_funcs { 490 int (*get_display_clock_speed)(struct drm_i915_private *dev_priv); 491 int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane); 492 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 493 int (*compute_intermediate_wm)(struct drm_device *dev, 494 struct intel_crtc *intel_crtc, 495 struct intel_crtc_state *newstate); 496 void (*initial_watermarks)(struct intel_atomic_state *state, 497 struct intel_crtc_state *cstate); 498 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 499 struct intel_crtc_state *cstate); 500 void (*optimize_watermarks)(struct intel_atomic_state *state, 501 struct intel_crtc_state *cstate); 502 int (*compute_global_watermarks)(struct drm_atomic_state *state); 503 void (*update_wm)(struct intel_crtc *crtc); 504 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 505 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 506 /* Returns the active state of the crtc, and if the crtc is active, 507 * fills out the pipe-config with the hw state. */ 508 bool (*get_pipe_config)(struct intel_crtc *, 509 struct intel_crtc_state *); 510 void (*get_initial_plane_config)(struct intel_crtc *, 511 struct intel_initial_plane_config *); 512 int (*crtc_compute_clock)(struct intel_crtc *crtc, 513 struct intel_crtc_state *crtc_state); 514 void (*crtc_enable)(struct intel_crtc_state *pipe_config, 515 struct drm_atomic_state *old_state); 516 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, 517 struct drm_atomic_state *old_state); 518 void (*update_crtcs)(struct drm_atomic_state *state, 519 unsigned int *crtc_vblank_mask); 520 void (*audio_codec_enable)(struct drm_connector *connector, 521 struct intel_encoder *encoder, 522 const struct drm_display_mode *adjusted_mode); 523 void (*audio_codec_disable)(struct intel_encoder *encoder); 524 void (*fdi_link_train)(struct drm_crtc *crtc); 525 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 526 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 527 struct drm_framebuffer *fb, 528 struct drm_i915_gem_object *obj, 529 struct drm_i915_gem_request *req, 530 uint32_t flags); 531 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 532 /* clock updates for mode set */ 533 /* cursor updates */ 534 /* render clock increase/decrease */ 535 /* display clock increase/decrease */ 536 /* pll clock increase/decrease */ 537 538 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 539 void (*load_luts)(struct drm_crtc_state *crtc_state); 540 }; 541 542 enum forcewake_domain_id { 543 FW_DOMAIN_ID_RENDER = 0, 544 FW_DOMAIN_ID_BLITTER, 545 FW_DOMAIN_ID_MEDIA, 546 547 FW_DOMAIN_ID_COUNT 548 }; 549 550 enum forcewake_domains { 551 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 552 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 553 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 554 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 555 FORCEWAKE_BLITTER | 556 FORCEWAKE_MEDIA) 557 }; 558 559 #define FW_REG_READ (1) 560 #define FW_REG_WRITE (2) 561 562 enum decoupled_power_domain { 563 GEN9_DECOUPLED_PD_BLITTER = 0, 564 GEN9_DECOUPLED_PD_RENDER, 565 GEN9_DECOUPLED_PD_MEDIA, 566 GEN9_DECOUPLED_PD_ALL 567 }; 568 569 enum decoupled_ops { 570 GEN9_DECOUPLED_OP_WRITE = 0, 571 GEN9_DECOUPLED_OP_READ 572 }; 573 574 enum forcewake_domains 575 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 576 i915_reg_t reg, unsigned int op); 577 578 struct intel_uncore_funcs { 579 void (*force_wake_get)(struct drm_i915_private *dev_priv, 580 enum forcewake_domains domains); 581 void (*force_wake_put)(struct drm_i915_private *dev_priv, 582 enum forcewake_domains domains); 583 584 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 585 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 586 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 587 u64 (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 588 589 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, 590 uint8_t val, bool trace); 591 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, 592 uint16_t val, bool trace); 593 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, 594 uint32_t val, bool trace); 595 }; 596 597 struct intel_forcewake_range { 598 u32 start; 599 u32 end; 600 601 enum forcewake_domains domains; 602 }; 603 604 struct intel_uncore { 605 spinlock_t lock; /** lock is also taken in irq contexts. */ 606 607 const struct intel_forcewake_range *fw_domains_table; 608 unsigned int fw_domains_table_entries; 609 610 struct intel_uncore_funcs funcs; 611 612 unsigned fifo_count; 613 614 enum forcewake_domains fw_domains; 615 enum forcewake_domains fw_domains_active; 616 617 struct intel_uncore_forcewake_domain { 618 struct drm_i915_private *i915; 619 enum forcewake_domain_id id; 620 enum forcewake_domains mask; 621 unsigned wake_count; 622 struct hrtimer timer; 623 i915_reg_t reg_set; 624 u32 val_set; 625 u32 val_clear; 626 i915_reg_t reg_ack; 627 i915_reg_t reg_post; 628 u32 val_reset; 629 } fw_domain[FW_DOMAIN_ID_COUNT]; 630 631 int unclaimed_mmio_check; 632 }; 633 634 /* Iterate over initialised fw domains */ 635 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ 636 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 637 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ 638 (domain__)++) \ 639 for_each_if ((mask__) & (domain__)->mask) 640 641 #define for_each_fw_domain(domain__, dev_priv__) \ 642 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) 643 644 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 645 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 646 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 647 648 struct intel_csr { 649 struct work_struct work; 650 const char *fw_path; 651 uint32_t *dmc_payload; 652 uint32_t dmc_fw_size; 653 uint32_t version; 654 uint32_t mmio_count; 655 i915_reg_t mmioaddr[8]; 656 uint32_t mmiodata[8]; 657 uint32_t dc_state; 658 uint32_t allowed_dc_mask; 659 }; 660 661 #define DEV_INFO_FOR_EACH_FLAG(func) \ 662 /* Keep is_* in chronological order */ \ 663 func(is_mobile); \ 664 func(is_i85x); \ 665 func(is_i915g); \ 666 func(is_i945gm); \ 667 func(is_g33); \ 668 func(is_g4x); \ 669 func(is_pineview); \ 670 func(is_broadwater); \ 671 func(is_crestline); \ 672 func(is_ivybridge); \ 673 func(is_valleyview); \ 674 func(is_cherryview); \ 675 func(is_haswell); \ 676 func(is_broadwell); \ 677 func(is_skylake); \ 678 func(is_broxton); \ 679 func(is_kabylake); \ 680 func(is_alpha_support); \ 681 /* Keep has_* in alphabetical order */ \ 682 func(has_64bit_reloc); \ 683 func(has_csr); \ 684 func(has_ddi); \ 685 func(has_dp_mst); \ 686 func(has_fbc); \ 687 func(has_fpga_dbg); \ 688 func(has_gmbus_irq); \ 689 func(has_gmch_display); \ 690 func(has_guc); \ 691 func(has_hotplug); \ 692 func(has_hw_contexts); \ 693 func(has_l3_dpf); \ 694 func(has_llc); \ 695 func(has_logical_ring_contexts); \ 696 func(has_overlay); \ 697 func(has_pipe_cxsr); \ 698 func(has_pooled_eu); \ 699 func(has_psr); \ 700 func(has_rc6); \ 701 func(has_rc6p); \ 702 func(has_resource_streamer); \ 703 func(has_runtime_pm); \ 704 func(has_snoop); \ 705 func(cursor_needs_physical); \ 706 func(hws_needs_physical); \ 707 func(overlay_needs_physical); \ 708 func(supports_tv); \ 709 func(has_decoupled_mmio) 710 711 struct sseu_dev_info { 712 u8 slice_mask; 713 u8 subslice_mask; 714 u8 eu_total; 715 u8 eu_per_subslice; 716 u8 min_eu_in_pool; 717 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 718 u8 subslice_7eu[3]; 719 u8 has_slice_pg:1; 720 u8 has_subslice_pg:1; 721 u8 has_eu_pg:1; 722 }; 723 724 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) 725 { 726 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); 727 } 728 729 struct intel_device_info { 730 u32 display_mmio_offset; 731 u16 device_id; 732 u8 num_pipes; 733 u8 num_sprites[I915_MAX_PIPES]; 734 u8 gen; 735 u16 gen_mask; 736 u8 ring_mask; /* Rings supported by the HW */ 737 u8 num_rings; 738 #define DEFINE_FLAG(name) u8 name:1 739 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); 740 #undef DEFINE_FLAG 741 u16 ddb_size; /* in blocks */ 742 /* Register offsets for the various display pipes and transcoders */ 743 int pipe_offsets[I915_MAX_TRANSCODERS]; 744 int trans_offsets[I915_MAX_TRANSCODERS]; 745 int palette_offsets[I915_MAX_PIPES]; 746 int cursor_offsets[I915_MAX_PIPES]; 747 748 /* Slice/subslice/EU info */ 749 struct sseu_dev_info sseu; 750 751 struct color_luts { 752 u16 degamma_lut_size; 753 u16 gamma_lut_size; 754 } color; 755 }; 756 757 struct intel_display_error_state; 758 759 struct drm_i915_error_state { 760 struct kref ref; 761 struct timeval time; 762 struct timeval boottime; 763 struct timeval uptime; 764 765 struct drm_i915_private *i915; 766 767 char error_msg[128]; 768 bool simulated; 769 int iommu; 770 u32 reset_count; 771 u32 suspend_count; 772 struct intel_device_info device_info; 773 774 /* Generic register state */ 775 u32 eir; 776 u32 pgtbl_er; 777 u32 ier; 778 u32 gtier[4]; 779 u32 ccid; 780 u32 derrmr; 781 u32 forcewake; 782 u32 error; /* gen6+ */ 783 u32 err_int; /* gen7 */ 784 u32 fault_data0; /* gen8, gen9 */ 785 u32 fault_data1; /* gen8, gen9 */ 786 u32 done_reg; 787 u32 gac_eco; 788 u32 gam_ecochk; 789 u32 gab_ctl; 790 u32 gfx_mode; 791 792 u64 fence[I915_MAX_NUM_FENCES]; 793 struct intel_overlay_error_state *overlay; 794 struct intel_display_error_state *display; 795 struct drm_i915_error_object *semaphore; 796 struct drm_i915_error_object *guc_log; 797 798 struct drm_i915_error_engine { 799 int engine_id; 800 /* Software tracked state */ 801 bool waiting; 802 int num_waiters; 803 int hangcheck_score; 804 enum intel_engine_hangcheck_action hangcheck_action; 805 struct i915_address_space *vm; 806 int num_requests; 807 808 /* position of active request inside the ring */ 809 u32 rq_head, rq_post, rq_tail; 810 811 /* our own tracking of ring head and tail */ 812 u32 cpu_ring_head; 813 u32 cpu_ring_tail; 814 815 u32 last_seqno; 816 817 /* Register state */ 818 u32 start; 819 u32 tail; 820 u32 head; 821 u32 ctl; 822 u32 mode; 823 u32 hws; 824 u32 ipeir; 825 u32 ipehr; 826 u32 bbstate; 827 u32 instpm; 828 u32 instps; 829 u32 seqno; 830 u64 bbaddr; 831 u64 acthd; 832 u32 fault_reg; 833 u64 faddr; 834 u32 rc_psmi; /* sleep state */ 835 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 836 struct intel_instdone instdone; 837 838 struct drm_i915_error_object { 839 u64 gtt_offset; 840 u64 gtt_size; 841 int page_count; 842 int unused; 843 u32 *pages[0]; 844 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 845 846 struct drm_i915_error_object *wa_ctx; 847 848 struct drm_i915_error_request { 849 long jiffies; 850 pid_t pid; 851 u32 context; 852 u32 seqno; 853 u32 head; 854 u32 tail; 855 } *requests, execlist[2]; 856 857 struct drm_i915_error_waiter { 858 char comm[TASK_COMM_LEN]; 859 pid_t pid; 860 u32 seqno; 861 } *waiters; 862 863 struct { 864 u32 gfx_mode; 865 union { 866 u64 pdp[4]; 867 u32 pp_dir_base; 868 }; 869 } vm_info; 870 871 pid_t pid; 872 char comm[TASK_COMM_LEN]; 873 } engine[I915_NUM_ENGINES]; 874 875 struct drm_i915_error_buffer { 876 u32 size; 877 u32 name; 878 u32 rseqno[I915_NUM_ENGINES], wseqno; 879 u64 gtt_offset; 880 u32 read_domains; 881 u32 write_domain; 882 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 883 u32 tiling:2; 884 u32 dirty:1; 885 u32 purgeable:1; 886 u32 userptr:1; 887 s32 engine:4; 888 u32 cache_level:3; 889 } *active_bo[I915_NUM_ENGINES], *pinned_bo; 890 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; 891 struct i915_address_space *active_vm[I915_NUM_ENGINES]; 892 }; 893 894 enum i915_cache_level { 895 I915_CACHE_NONE = 0, 896 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 897 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 898 caches, eg sampler/render caches, and the 899 large Last-Level-Cache. LLC is coherent with 900 the CPU, but L3 is only visible to the GPU. */ 901 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 902 }; 903 904 struct i915_ctx_hang_stats { 905 /* This context had batch pending when hang was declared */ 906 unsigned batch_pending; 907 908 /* This context had batch active when hang was declared */ 909 unsigned batch_active; 910 911 /* Time when this context was last blamed for a GPU reset */ 912 unsigned long guilty_ts; 913 914 /* If the contexts causes a second GPU hang within this time, 915 * it is permanently banned from submitting any more work. 916 */ 917 unsigned long ban_period_seconds; 918 919 /* This context is banned to submit more work */ 920 bool banned; 921 }; 922 923 /* This must match up with the value previously used for execbuf2.rsvd1. */ 924 #define DEFAULT_CONTEXT_HANDLE 0 925 926 /** 927 * struct i915_gem_context - as the name implies, represents a context. 928 * @ref: reference count. 929 * @user_handle: userspace tracking identity for this context. 930 * @remap_slice: l3 row remapping information. 931 * @flags: context specific flags: 932 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 933 * @file_priv: filp associated with this context (NULL for global default 934 * context). 935 * @hang_stats: information about the role of this context in possible GPU 936 * hangs. 937 * @ppgtt: virtual memory space used by this context. 938 * @legacy_hw_ctx: render context backing object and whether it is correctly 939 * initialized (legacy ring submission mechanism only). 940 * @link: link in the global list of contexts. 941 * 942 * Contexts are memory images used by the hardware to store copies of their 943 * internal state. 944 */ 945 struct i915_gem_context { 946 struct kref ref; 947 struct drm_i915_private *i915; 948 struct drm_i915_file_private *file_priv; 949 struct i915_hw_ppgtt *ppgtt; 950 pid_t pid; 951 const char *name; 952 953 struct i915_ctx_hang_stats hang_stats; 954 955 unsigned long flags; 956 #define CONTEXT_NO_ZEROMAP BIT(0) 957 #define CONTEXT_NO_ERROR_CAPTURE BIT(1) 958 959 /* Unique identifier for this context, used by the hw for tracking */ 960 unsigned int hw_id; 961 u32 user_handle; 962 int priority; /* greater priorities are serviced first */ 963 964 u32 ggtt_alignment; 965 966 struct intel_context { 967 struct i915_vma *state; 968 struct intel_ring *ring; 969 uint32_t *lrc_reg_state; 970 u64 lrc_desc; 971 int pin_count; 972 bool initialised; 973 } engine[I915_NUM_ENGINES]; 974 u32 ring_size; 975 u32 desc_template; 976 struct atomic_notifier_head status_notifier; 977 bool execlists_force_single_submission; 978 979 struct list_head link; 980 981 u8 remap_slice; 982 bool closed:1; 983 }; 984 985 enum fb_op_origin { 986 ORIGIN_GTT, 987 ORIGIN_CPU, 988 ORIGIN_CS, 989 ORIGIN_FLIP, 990 ORIGIN_DIRTYFB, 991 }; 992 993 struct intel_fbc { 994 /* This is always the inner lock when overlapping with struct_mutex and 995 * it's the outer lock when overlapping with stolen_lock. */ 996 struct lock lock; 997 unsigned threshold; 998 unsigned int possible_framebuffer_bits; 999 unsigned int busy_bits; 1000 unsigned int visible_pipes_mask; 1001 struct intel_crtc *crtc; 1002 1003 struct drm_mm_node compressed_fb; 1004 struct drm_mm_node *compressed_llb; 1005 1006 bool false_color; 1007 1008 bool enabled; 1009 bool active; 1010 1011 bool underrun_detected; 1012 struct work_struct underrun_work; 1013 1014 struct intel_fbc_state_cache { 1015 struct i915_vma *vma; 1016 1017 struct { 1018 unsigned int mode_flags; 1019 uint32_t hsw_bdw_pixel_rate; 1020 } crtc; 1021 1022 struct { 1023 unsigned int rotation; 1024 int src_w; 1025 int src_h; 1026 bool visible; 1027 } plane; 1028 1029 struct { 1030 uint32_t pixel_format; 1031 unsigned int stride; 1032 } fb; 1033 } state_cache; 1034 1035 struct intel_fbc_reg_params { 1036 struct i915_vma *vma; 1037 1038 struct { 1039 enum i915_pipe pipe; 1040 enum plane plane; 1041 unsigned int fence_y_offset; 1042 } crtc; 1043 1044 struct { 1045 uint32_t pixel_format; 1046 unsigned int stride; 1047 } fb; 1048 1049 int cfb_size; 1050 } params; 1051 1052 struct intel_fbc_work { 1053 bool scheduled; 1054 u32 scheduled_vblank; 1055 struct work_struct work; 1056 } work; 1057 1058 const char *no_fbc_reason; 1059 }; 1060 1061 /** 1062 * HIGH_RR is the highest eDP panel refresh rate read from EDID 1063 * LOW_RR is the lowest eDP panel refresh rate found from EDID 1064 * parsing for same resolution. 1065 */ 1066 enum drrs_refresh_rate_type { 1067 DRRS_HIGH_RR, 1068 DRRS_LOW_RR, 1069 DRRS_MAX_RR, /* RR count */ 1070 }; 1071 1072 enum drrs_support_type { 1073 DRRS_NOT_SUPPORTED = 0, 1074 STATIC_DRRS_SUPPORT = 1, 1075 SEAMLESS_DRRS_SUPPORT = 2 1076 }; 1077 1078 struct intel_dp; 1079 struct i915_drrs { 1080 struct lock mutex; 1081 struct delayed_work work; 1082 struct intel_dp *dp; 1083 unsigned busy_frontbuffer_bits; 1084 enum drrs_refresh_rate_type refresh_rate_type; 1085 enum drrs_support_type type; 1086 }; 1087 1088 struct i915_psr { 1089 struct lock lock; 1090 bool sink_support; 1091 bool source_ok; 1092 struct intel_dp *enabled; 1093 bool active; 1094 struct delayed_work work; 1095 unsigned busy_frontbuffer_bits; 1096 bool psr2_support; 1097 bool aux_frame_sync; 1098 bool link_standby; 1099 }; 1100 1101 enum intel_pch { 1102 PCH_NONE = 0, /* No PCH present */ 1103 PCH_IBX, /* Ibexpeak PCH */ 1104 PCH_CPT, /* Cougarpoint PCH */ 1105 PCH_LPT, /* Lynxpoint PCH */ 1106 PCH_SPT, /* Sunrisepoint PCH */ 1107 PCH_KBP, /* Kabypoint PCH */ 1108 PCH_NOP, 1109 }; 1110 1111 enum intel_sbi_destination { 1112 SBI_ICLK, 1113 SBI_MPHY, 1114 }; 1115 1116 #define QUIRK_PIPEA_FORCE (1<<0) 1117 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1118 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1119 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1120 #define QUIRK_PIPEB_FORCE (1<<4) 1121 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1122 1123 struct intel_fbdev; 1124 struct intel_fbc_work; 1125 1126 struct intel_gmbus { 1127 struct i2c_adapter adapter; 1128 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 1129 u32 force_bit; 1130 u32 reg0; 1131 i915_reg_t gpio_reg; 1132 struct i2c_algo_bit_data bit_algo; 1133 struct drm_i915_private *dev_priv; 1134 }; 1135 1136 struct i915_suspend_saved_registers { 1137 u32 saveDSPARB; 1138 u32 saveFBC_CONTROL; 1139 u32 saveCACHE_MODE_0; 1140 u32 saveMI_ARB_STATE; 1141 u32 saveSWF0[16]; 1142 u32 saveSWF1[16]; 1143 u32 saveSWF3[3]; 1144 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1145 u32 savePCH_PORT_HOTPLUG; 1146 u16 saveGCDGMBUS; 1147 }; 1148 1149 struct vlv_s0ix_state { 1150 /* GAM */ 1151 u32 wr_watermark; 1152 u32 gfx_prio_ctrl; 1153 u32 arb_mode; 1154 u32 gfx_pend_tlb0; 1155 u32 gfx_pend_tlb1; 1156 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1157 u32 media_max_req_count; 1158 u32 gfx_max_req_count; 1159 u32 render_hwsp; 1160 u32 ecochk; 1161 u32 bsd_hwsp; 1162 u32 blt_hwsp; 1163 u32 tlb_rd_addr; 1164 1165 /* MBC */ 1166 u32 g3dctl; 1167 u32 gsckgctl; 1168 u32 mbctl; 1169 1170 /* GCP */ 1171 u32 ucgctl1; 1172 u32 ucgctl3; 1173 u32 rcgctl1; 1174 u32 rcgctl2; 1175 u32 rstctl; 1176 u32 misccpctl; 1177 1178 /* GPM */ 1179 u32 gfxpause; 1180 u32 rpdeuhwtc; 1181 u32 rpdeuc; 1182 u32 ecobus; 1183 u32 pwrdwnupctl; 1184 u32 rp_down_timeout; 1185 u32 rp_deucsw; 1186 u32 rcubmabdtmr; 1187 u32 rcedata; 1188 u32 spare2gh; 1189 1190 /* Display 1 CZ domain */ 1191 u32 gt_imr; 1192 u32 gt_ier; 1193 u32 pm_imr; 1194 u32 pm_ier; 1195 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1196 1197 /* GT SA CZ domain */ 1198 u32 tilectl; 1199 u32 gt_fifoctl; 1200 u32 gtlc_wake_ctrl; 1201 u32 gtlc_survive; 1202 u32 pmwgicz; 1203 1204 /* Display 2 CZ domain */ 1205 u32 gu_ctl0; 1206 u32 gu_ctl1; 1207 u32 pcbr; 1208 u32 clock_gate_dis2; 1209 }; 1210 1211 struct intel_rps_ei { 1212 u32 cz_clock; 1213 u32 render_c0; 1214 u32 media_c0; 1215 }; 1216 1217 struct intel_gen6_power_mgmt { 1218 /* 1219 * work, interrupts_enabled and pm_iir are protected by 1220 * dev_priv->irq_lock 1221 */ 1222 struct work_struct work; 1223 bool interrupts_enabled; 1224 u32 pm_iir; 1225 1226 /* PM interrupt bits that should never be masked */ 1227 u32 pm_intr_keep; 1228 1229 /* Frequencies are stored in potentially platform dependent multiples. 1230 * In other words, *_freq needs to be multiplied by X to be interesting. 1231 * Soft limits are those which are used for the dynamic reclocking done 1232 * by the driver (raise frequencies under heavy loads, and lower for 1233 * lighter loads). Hard limits are those imposed by the hardware. 1234 * 1235 * A distinction is made for overclocking, which is never enabled by 1236 * default, and is considered to be above the hard limit if it's 1237 * possible at all. 1238 */ 1239 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1240 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1241 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1242 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1243 u8 min_freq; /* AKA RPn. Minimum frequency */ 1244 u8 boost_freq; /* Frequency to request when wait boosting */ 1245 u8 idle_freq; /* Frequency to request when we are idle */ 1246 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1247 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1248 u8 rp0_freq; /* Non-overclocked max frequency. */ 1249 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 1250 1251 u8 up_threshold; /* Current %busy required to uplock */ 1252 u8 down_threshold; /* Current %busy required to downclock */ 1253 1254 int last_adj; 1255 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1256 1257 spinlock_t client_lock; 1258 struct list_head clients; 1259 bool client_boost; 1260 1261 bool enabled; 1262 struct delayed_work autoenable_work; 1263 unsigned boosts; 1264 1265 /* manual wa residency calculations */ 1266 struct intel_rps_ei ei; 1267 1268 /* 1269 * Protects RPS/RC6 register access and PCU communication. 1270 * Must be taken after struct_mutex if nested. Note that 1271 * this lock may be held for long periods of time when 1272 * talking to hw - so only take it when talking to hw! 1273 */ 1274 struct lock hw_lock; 1275 }; 1276 1277 /* defined intel_pm.c */ 1278 extern spinlock_t mchdev_lock; 1279 1280 struct intel_ilk_power_mgmt { 1281 u8 cur_delay; 1282 u8 min_delay; 1283 u8 max_delay; 1284 u8 fmax; 1285 u8 fstart; 1286 1287 u64 last_count1; 1288 unsigned long last_time1; 1289 unsigned long chipset_power; 1290 u64 last_count2; 1291 u64 last_time2; 1292 unsigned long gfx_power; 1293 u8 corr; 1294 1295 int c_m; 1296 int r_t; 1297 }; 1298 1299 struct drm_i915_private; 1300 struct i915_power_well; 1301 1302 struct i915_power_well_ops { 1303 /* 1304 * Synchronize the well's hw state to match the current sw state, for 1305 * example enable/disable it based on the current refcount. Called 1306 * during driver init and resume time, possibly after first calling 1307 * the enable/disable handlers. 1308 */ 1309 void (*sync_hw)(struct drm_i915_private *dev_priv, 1310 struct i915_power_well *power_well); 1311 /* 1312 * Enable the well and resources that depend on it (for example 1313 * interrupts located on the well). Called after the 0->1 refcount 1314 * transition. 1315 */ 1316 void (*enable)(struct drm_i915_private *dev_priv, 1317 struct i915_power_well *power_well); 1318 /* 1319 * Disable the well and resources that depend on it. Called after 1320 * the 1->0 refcount transition. 1321 */ 1322 void (*disable)(struct drm_i915_private *dev_priv, 1323 struct i915_power_well *power_well); 1324 /* Returns the hw enabled state. */ 1325 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1326 struct i915_power_well *power_well); 1327 }; 1328 1329 /* Power well structure for haswell */ 1330 struct i915_power_well { 1331 const char *name; 1332 bool always_on; 1333 /* power well enable/disable usage count */ 1334 int count; 1335 /* cached hw enabled state */ 1336 bool hw_enabled; 1337 unsigned long domains; 1338 /* unique identifier for this power well */ 1339 unsigned long id; 1340 /* 1341 * Arbitraty data associated with this power well. Platform and power 1342 * well specific. 1343 */ 1344 unsigned long data; 1345 const struct i915_power_well_ops *ops; 1346 }; 1347 1348 struct i915_power_domains { 1349 /* 1350 * Power wells needed for initialization at driver init and suspend 1351 * time are on. They are kept on until after the first modeset. 1352 */ 1353 bool init_power_on; 1354 bool initializing; 1355 int power_well_count; 1356 1357 struct lock lock; 1358 int domain_use_count[POWER_DOMAIN_NUM]; 1359 struct i915_power_well *power_wells; 1360 }; 1361 1362 #define MAX_L3_SLICES 2 1363 struct intel_l3_parity { 1364 u32 *remap_info[MAX_L3_SLICES]; 1365 struct work_struct error_work; 1366 int which_slice; 1367 }; 1368 1369 struct i915_gem_mm { 1370 /** Memory allocator for GTT stolen memory */ 1371 struct drm_mm stolen; 1372 /** Protects the usage of the GTT stolen memory allocator. This is 1373 * always the inner lock when overlapping with struct_mutex. */ 1374 struct lock stolen_lock; 1375 1376 /** List of all objects in gtt_space. Used to restore gtt 1377 * mappings on resume */ 1378 struct list_head bound_list; 1379 /** 1380 * List of objects which are not bound to the GTT (thus 1381 * are idle and not used by the GPU). These objects may or may 1382 * not actually have any pages attached. 1383 */ 1384 struct list_head unbound_list; 1385 1386 /** List of all objects in gtt_space, currently mmaped by userspace. 1387 * All objects within this list must also be on bound_list. 1388 */ 1389 struct list_head userfault_list; 1390 1391 /** 1392 * List of objects which are pending destruction. 1393 */ 1394 struct llist_head free_list; 1395 struct work_struct free_work; 1396 1397 /** Usable portion of the GTT for GEM */ 1398 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1399 1400 /** PPGTT used for aliasing the PPGTT with the GTT */ 1401 struct i915_hw_ppgtt *aliasing_ppgtt; 1402 1403 struct notifier_block oom_notifier; 1404 struct notifier_block vmap_notifier; 1405 struct shrinker shrinker; 1406 1407 /** LRU list of objects with fence regs on them. */ 1408 struct list_head fence_list; 1409 1410 /** 1411 * Are we in a non-interruptible section of code like 1412 * modesetting? 1413 */ 1414 bool interruptible; 1415 1416 /* the indicator for dispatch video commands on two BSD rings */ 1417 atomic_t bsd_engine_dispatch_index; 1418 1419 /** Bit 6 swizzling required for X tiling */ 1420 uint32_t bit_6_swizzle_x; 1421 /** Bit 6 swizzling required for Y tiling */ 1422 uint32_t bit_6_swizzle_y; 1423 1424 /* accounting, useful for userland debugging */ 1425 spinlock_t object_stat_lock; 1426 u64 object_memory; 1427 u32 object_count; 1428 }; 1429 1430 struct drm_i915_error_state_buf { 1431 struct drm_i915_private *i915; 1432 unsigned bytes; 1433 unsigned size; 1434 int err; 1435 u8 *buf; 1436 loff_t start; 1437 loff_t pos; 1438 }; 1439 1440 struct i915_error_state_file_priv { 1441 struct drm_device *dev; 1442 struct drm_i915_error_state *error; 1443 }; 1444 1445 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ 1446 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ 1447 1448 struct i915_gpu_error { 1449 /* For hangcheck timer */ 1450 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1451 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1452 /* Hang gpu twice in this window and your context gets banned */ 1453 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1454 1455 struct delayed_work hangcheck_work; 1456 1457 /* For reset and error_state handling. */ 1458 spinlock_t lock; 1459 /* Protected by the above dev->gpu_error.lock. */ 1460 struct drm_i915_error_state *first_error; 1461 1462 unsigned long missed_irq_rings; 1463 1464 /** 1465 * State variable controlling the reset flow and count 1466 * 1467 * This is a counter which gets incremented when reset is triggered, 1468 * 1469 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set 1470 * meaning that any waiters holding onto the struct_mutex should 1471 * relinquish the lock immediately in order for the reset to start. 1472 * 1473 * If reset is not completed succesfully, the I915_WEDGE bit is 1474 * set meaning that hardware is terminally sour and there is no 1475 * recovery. All waiters on the reset_queue will be woken when 1476 * that happens. 1477 * 1478 * This counter is used by the wait_seqno code to notice that reset 1479 * event happened and it needs to restart the entire ioctl (since most 1480 * likely the seqno it waited for won't ever signal anytime soon). 1481 * 1482 * This is important for lock-free wait paths, where no contended lock 1483 * naturally enforces the correct ordering between the bail-out of the 1484 * waiter and the gpu reset work code. 1485 */ 1486 unsigned long reset_count; 1487 1488 unsigned long flags; 1489 #define I915_RESET_IN_PROGRESS 0 1490 #define I915_WEDGED (BITS_PER_LONG - 1) 1491 1492 /** 1493 * Waitqueue to signal when a hang is detected. Used to for waiters 1494 * to release the struct_mutex for the reset to procede. 1495 */ 1496 wait_queue_head_t wait_queue; 1497 1498 /** 1499 * Waitqueue to signal when the reset has completed. Used by clients 1500 * that wait for dev_priv->mm.wedged to settle. 1501 */ 1502 wait_queue_head_t reset_queue; 1503 1504 /* For missed irq/seqno simulation. */ 1505 unsigned long test_irq_rings; 1506 }; 1507 1508 enum modeset_restore { 1509 MODESET_ON_LID_OPEN, 1510 MODESET_DONE, 1511 MODESET_SUSPENDED, 1512 }; 1513 1514 #define DP_AUX_A 0x40 1515 #define DP_AUX_B 0x10 1516 #define DP_AUX_C 0x20 1517 #define DP_AUX_D 0x30 1518 1519 #define DDC_PIN_B 0x05 1520 #define DDC_PIN_C 0x04 1521 #define DDC_PIN_D 0x06 1522 1523 struct ddi_vbt_port_info { 1524 /* 1525 * This is an index in the HDMI/DVI DDI buffer translation table. 1526 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1527 * populate this field. 1528 */ 1529 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1530 uint8_t hdmi_level_shift; 1531 1532 uint8_t supports_dvi:1; 1533 uint8_t supports_hdmi:1; 1534 uint8_t supports_dp:1; 1535 1536 uint8_t alternate_aux_channel; 1537 uint8_t alternate_ddc_pin; 1538 1539 uint8_t dp_boost_level; 1540 uint8_t hdmi_boost_level; 1541 }; 1542 1543 enum psr_lines_to_wait { 1544 PSR_0_LINES_TO_WAIT = 0, 1545 PSR_1_LINE_TO_WAIT, 1546 PSR_4_LINES_TO_WAIT, 1547 PSR_8_LINES_TO_WAIT 1548 }; 1549 1550 struct intel_vbt_data { 1551 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1552 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1553 1554 /* Feature bits */ 1555 unsigned int int_tv_support:1; 1556 unsigned int lvds_dither:1; 1557 unsigned int lvds_vbt:1; 1558 unsigned int int_crt_support:1; 1559 unsigned int lvds_use_ssc:1; 1560 unsigned int display_clock_mode:1; 1561 unsigned int fdi_rx_polarity_inverted:1; 1562 unsigned int panel_type:4; 1563 int lvds_ssc_freq; 1564 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1565 1566 enum drrs_support_type drrs_type; 1567 1568 struct { 1569 int rate; 1570 int lanes; 1571 int preemphasis; 1572 int vswing; 1573 bool low_vswing; 1574 bool initialized; 1575 bool support; 1576 int bpp; 1577 struct edp_power_seq pps; 1578 } edp; 1579 1580 struct { 1581 bool full_link; 1582 bool require_aux_wakeup; 1583 int idle_frames; 1584 enum psr_lines_to_wait lines_to_wait; 1585 int tp1_wakeup_time; 1586 int tp2_tp3_wakeup_time; 1587 } psr; 1588 1589 struct { 1590 u16 pwm_freq_hz; 1591 bool present; 1592 bool active_low_pwm; 1593 u8 min_brightness; /* min_brightness/255 of max */ 1594 enum intel_backlight_type type; 1595 } backlight; 1596 1597 /* MIPI DSI */ 1598 struct { 1599 u16 panel_id; 1600 struct mipi_config *config; 1601 struct mipi_pps_data *pps; 1602 u8 seq_version; 1603 u32 size; 1604 u8 *data; 1605 const u8 *sequence[MIPI_SEQ_MAX]; 1606 } dsi; 1607 1608 int crt_ddc_pin; 1609 1610 int child_dev_num; 1611 union child_device_config *child_dev; 1612 1613 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1614 struct sdvo_device_mapping sdvo_mappings[2]; 1615 }; 1616 1617 enum intel_ddb_partitioning { 1618 INTEL_DDB_PART_1_2, 1619 INTEL_DDB_PART_5_6, /* IVB+ */ 1620 }; 1621 1622 struct intel_wm_level { 1623 bool enable; 1624 uint32_t pri_val; 1625 uint32_t spr_val; 1626 uint32_t cur_val; 1627 uint32_t fbc_val; 1628 }; 1629 1630 struct ilk_wm_values { 1631 uint32_t wm_pipe[3]; 1632 uint32_t wm_lp[3]; 1633 uint32_t wm_lp_spr[3]; 1634 uint32_t wm_linetime[3]; 1635 bool enable_fbc_wm; 1636 enum intel_ddb_partitioning partitioning; 1637 }; 1638 1639 struct vlv_pipe_wm { 1640 uint16_t primary; 1641 uint16_t sprite[2]; 1642 uint8_t cursor; 1643 }; 1644 1645 struct vlv_sr_wm { 1646 uint16_t plane; 1647 uint8_t cursor; 1648 }; 1649 1650 struct vlv_wm_values { 1651 struct vlv_pipe_wm pipe[3]; 1652 struct vlv_sr_wm sr; 1653 struct { 1654 uint8_t cursor; 1655 uint8_t sprite[2]; 1656 uint8_t primary; 1657 } ddl[3]; 1658 uint8_t level; 1659 bool cxsr; 1660 }; 1661 1662 struct skl_ddb_entry { 1663 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1664 }; 1665 1666 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1667 { 1668 return entry->end - entry->start; 1669 } 1670 1671 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1672 const struct skl_ddb_entry *e2) 1673 { 1674 if (e1->start == e2->start && e1->end == e2->end) 1675 return true; 1676 1677 return false; 1678 } 1679 1680 struct skl_ddb_allocation { 1681 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1682 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1683 }; 1684 1685 struct skl_wm_values { 1686 unsigned dirty_pipes; 1687 struct skl_ddb_allocation ddb; 1688 }; 1689 1690 struct skl_wm_level { 1691 bool plane_en; 1692 uint16_t plane_res_b; 1693 uint8_t plane_res_l; 1694 }; 1695 1696 /* 1697 * This struct helps tracking the state needed for runtime PM, which puts the 1698 * device in PCI D3 state. Notice that when this happens, nothing on the 1699 * graphics device works, even register access, so we don't get interrupts nor 1700 * anything else. 1701 * 1702 * Every piece of our code that needs to actually touch the hardware needs to 1703 * either call intel_runtime_pm_get or call intel_display_power_get with the 1704 * appropriate power domain. 1705 * 1706 * Our driver uses the autosuspend delay feature, which means we'll only really 1707 * suspend if we stay with zero refcount for a certain amount of time. The 1708 * default value is currently very conservative (see intel_runtime_pm_enable), but 1709 * it can be changed with the standard runtime PM files from sysfs. 1710 * 1711 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1712 * goes back to false exactly before we reenable the IRQs. We use this variable 1713 * to check if someone is trying to enable/disable IRQs while they're supposed 1714 * to be disabled. This shouldn't happen and we'll print some error messages in 1715 * case it happens. 1716 * 1717 * For more, read the Documentation/power/runtime_pm.txt. 1718 */ 1719 struct i915_runtime_pm { 1720 atomic_t wakeref_count; 1721 bool suspended; 1722 bool irqs_enabled; 1723 }; 1724 1725 enum intel_pipe_crc_source { 1726 INTEL_PIPE_CRC_SOURCE_NONE, 1727 INTEL_PIPE_CRC_SOURCE_PLANE1, 1728 INTEL_PIPE_CRC_SOURCE_PLANE2, 1729 INTEL_PIPE_CRC_SOURCE_PF, 1730 INTEL_PIPE_CRC_SOURCE_PIPE, 1731 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1732 INTEL_PIPE_CRC_SOURCE_TV, 1733 INTEL_PIPE_CRC_SOURCE_DP_B, 1734 INTEL_PIPE_CRC_SOURCE_DP_C, 1735 INTEL_PIPE_CRC_SOURCE_DP_D, 1736 INTEL_PIPE_CRC_SOURCE_AUTO, 1737 INTEL_PIPE_CRC_SOURCE_MAX, 1738 }; 1739 1740 struct intel_pipe_crc_entry { 1741 uint32_t frame; 1742 uint32_t crc[5]; 1743 }; 1744 1745 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1746 struct intel_pipe_crc { 1747 spinlock_t lock; 1748 bool opened; /* exclusive access to the result file */ 1749 struct intel_pipe_crc_entry *entries; 1750 enum intel_pipe_crc_source source; 1751 int head, tail; 1752 wait_queue_head_t wq; 1753 }; 1754 1755 struct i915_frontbuffer_tracking { 1756 spinlock_t lock; 1757 1758 /* 1759 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1760 * scheduled flips. 1761 */ 1762 unsigned busy_bits; 1763 unsigned flip_bits; 1764 }; 1765 1766 struct i915_wa_reg { 1767 i915_reg_t addr; 1768 u32 value; 1769 /* bitmask representing WA bits */ 1770 u32 mask; 1771 }; 1772 1773 /* 1774 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only 1775 * allowing it for RCS as we don't foresee any requirement of having 1776 * a whitelist for other engines. When it is really required for 1777 * other engines then the limit need to be increased. 1778 */ 1779 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) 1780 1781 struct i915_workarounds { 1782 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1783 u32 count; 1784 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1785 }; 1786 1787 struct i915_virtual_gpu { 1788 bool active; 1789 }; 1790 1791 /* used in computing the new watermarks state */ 1792 struct intel_wm_config { 1793 unsigned int num_pipes_active; 1794 bool sprites_enabled; 1795 bool sprites_scaled; 1796 }; 1797 1798 struct drm_i915_private { 1799 struct drm_device drm; 1800 1801 struct kmem_cache *objects; 1802 struct kmem_cache *vmas; 1803 struct kmem_cache *requests; 1804 struct kmem_cache *dependencies; 1805 1806 const struct intel_device_info info; 1807 1808 void __iomem *regs; 1809 1810 struct intel_uncore uncore; 1811 1812 struct i915_virtual_gpu vgpu; 1813 1814 struct intel_gvt *gvt; 1815 1816 struct intel_guc guc; 1817 1818 struct intel_csr csr; 1819 1820 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1821 1822 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1823 * controller on different i2c buses. */ 1824 struct lock gmbus_mutex; 1825 1826 /** 1827 * Base address of the gmbus and gpio block. 1828 */ 1829 uint32_t gpio_mmio_base; 1830 1831 /* MMIO base address for MIPI regs */ 1832 uint32_t mipi_mmio_base; 1833 1834 uint32_t psr_mmio_base; 1835 1836 uint32_t pps_mmio_base; 1837 1838 wait_queue_head_t gmbus_wait_queue; 1839 1840 struct pci_dev *bridge_dev; 1841 struct i915_gem_context *kernel_context; 1842 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 1843 struct i915_vma *semaphore; 1844 1845 struct drm_dma_handle *status_page_dmah; 1846 struct resource *mch_res; 1847 int mch_res_rid; 1848 1849 /* protects the irq masks */ 1850 spinlock_t irq_lock; 1851 1852 /* protects the mmio flip data */ 1853 spinlock_t mmio_flip_lock; 1854 1855 bool display_irqs_enabled; 1856 1857 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1858 struct pm_qos_request pm_qos; 1859 1860 /* Sideband mailbox protection */ 1861 struct lock sb_lock; 1862 1863 /** Cached value of IMR to avoid reads in updating the bitfield */ 1864 union { 1865 u32 irq_mask; 1866 u32 de_irq_mask[I915_MAX_PIPES]; 1867 }; 1868 u32 gt_irq_mask; 1869 u32 pm_imr; 1870 u32 pm_ier; 1871 u32 pm_rps_events; 1872 u32 pm_guc_events; 1873 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1874 1875 struct i915_hotplug hotplug; 1876 struct intel_fbc fbc; 1877 struct i915_drrs drrs; 1878 struct intel_opregion opregion; 1879 struct intel_vbt_data vbt; 1880 1881 bool preserve_bios_swizzle; 1882 1883 /* overlay */ 1884 struct intel_overlay *overlay; 1885 1886 /* backlight registers and fields in struct intel_panel */ 1887 struct lock backlight_lock; 1888 1889 /* LVDS info */ 1890 bool no_aux_handshake; 1891 1892 /* protects panel power sequencer state */ 1893 struct lock pps_mutex; 1894 1895 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1896 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1897 1898 unsigned int fsb_freq, mem_freq, is_ddr3; 1899 unsigned int skl_preferred_vco_freq; 1900 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1901 unsigned int max_dotclk_freq; 1902 unsigned int rawclk_freq; 1903 unsigned int hpll_freq; 1904 unsigned int czclk_freq; 1905 1906 struct { 1907 unsigned int vco, ref; 1908 } cdclk_pll; 1909 1910 /** 1911 * wq - Driver workqueue for GEM. 1912 * 1913 * NOTE: Work items scheduled here are not allowed to grab any modeset 1914 * locks, for otherwise the flushing done in the pageflip code will 1915 * result in deadlocks. 1916 */ 1917 struct workqueue_struct *wq; 1918 1919 /* Display functions */ 1920 struct drm_i915_display_funcs display; 1921 1922 /* PCH chipset type */ 1923 enum intel_pch pch_type; 1924 unsigned short pch_id; 1925 1926 unsigned long quirks; 1927 1928 enum modeset_restore modeset_restore; 1929 struct lock modeset_restore_lock; 1930 struct drm_atomic_state *modeset_restore_state; 1931 struct drm_modeset_acquire_ctx reset_ctx; 1932 1933 struct list_head vm_list; /* Global list of all address spaces */ 1934 struct i915_ggtt ggtt; /* VM representing the global address space */ 1935 1936 struct i915_gem_mm mm; 1937 DECLARE_HASHTABLE(mm_structs, 7); 1938 struct lock mm_lock; 1939 1940 /* The hw wants to have a stable context identifier for the lifetime 1941 * of the context (for OA, PASID, faults, etc). This is limited 1942 * in execlists to 21 bits. 1943 */ 1944 struct ida context_hw_ida; 1945 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ 1946 1947 /* Kernel Modesetting */ 1948 1949 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1950 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1951 wait_queue_head_t pending_flip_queue; 1952 1953 #ifdef CONFIG_DEBUG_FS 1954 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1955 #endif 1956 1957 /* dpll and cdclk state is protected by connection_mutex */ 1958 int num_shared_dpll; 1959 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1960 const struct intel_dpll_mgr *dpll_mgr; 1961 1962 /* 1963 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1964 * Must be global rather than per dpll, because on some platforms 1965 * plls share registers. 1966 */ 1967 struct lock dpll_lock; 1968 1969 unsigned int active_crtcs; 1970 unsigned int min_pixclk[I915_MAX_PIPES]; 1971 1972 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1973 1974 struct i915_workarounds workarounds; 1975 1976 struct i915_frontbuffer_tracking fb_tracking; 1977 1978 struct intel_atomic_helper { 1979 struct llist_head free_list; 1980 struct work_struct free_work; 1981 } atomic_helper; 1982 1983 u16 orig_clock; 1984 1985 bool mchbar_need_disable; 1986 1987 struct intel_l3_parity l3_parity; 1988 1989 /* Cannot be determined by PCIID. You must always read a register. */ 1990 u32 edram_cap; 1991 1992 /* gen6+ rps state */ 1993 struct intel_gen6_power_mgmt rps; 1994 1995 /* ilk-only ips/rps state. Everything in here is protected by the global 1996 * mchdev_lock in intel_pm.c */ 1997 struct intel_ilk_power_mgmt ips; 1998 1999 struct i915_power_domains power_domains; 2000 2001 struct i915_psr psr; 2002 2003 struct i915_gpu_error gpu_error; 2004 2005 struct drm_i915_gem_object *vlv_pctx; 2006 2007 #ifdef CONFIG_DRM_FBDEV_EMULATION 2008 /* list of fbdev register on this device */ 2009 struct intel_fbdev *fbdev; 2010 struct work_struct fbdev_suspend_work; 2011 #endif 2012 2013 struct drm_property *broadcast_rgb_property; 2014 struct drm_property *force_audio_property; 2015 2016 /* hda/i915 audio component */ 2017 struct i915_audio_component *audio_component; 2018 bool audio_component_registered; 2019 /** 2020 * av_mutex - mutex for audio/video sync 2021 * 2022 */ 2023 struct lock av_mutex; 2024 2025 uint32_t hw_context_size; 2026 struct list_head context_list; 2027 2028 u32 fdi_rx_config; 2029 2030 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 2031 u32 chv_phy_control; 2032 /* 2033 * Shadows for CHV DPLL_MD regs to keep the state 2034 * checker somewhat working in the presence hardware 2035 * crappiness (can't read out DPLL_MD for pipes B & C). 2036 */ 2037 u32 chv_dpll_md[I915_MAX_PIPES]; 2038 u32 bxt_phy_grc; 2039 2040 u32 suspend_count; 2041 bool suspended_to_idle; 2042 struct i915_suspend_saved_registers regfile; 2043 struct vlv_s0ix_state vlv_s0ix_state; 2044 2045 enum { 2046 I915_SAGV_UNKNOWN = 0, 2047 I915_SAGV_DISABLED, 2048 I915_SAGV_ENABLED, 2049 I915_SAGV_NOT_CONTROLLED 2050 } sagv_status; 2051 2052 struct { 2053 /* 2054 * Raw watermark latency values: 2055 * in 0.1us units for WM0, 2056 * in 0.5us units for WM1+. 2057 */ 2058 /* primary */ 2059 uint16_t pri_latency[5]; 2060 /* sprite */ 2061 uint16_t spr_latency[5]; 2062 /* cursor */ 2063 uint16_t cur_latency[5]; 2064 /* 2065 * Raw watermark memory latency values 2066 * for SKL for all 8 levels 2067 * in 1us units. 2068 */ 2069 uint16_t skl_latency[8]; 2070 2071 /* current hardware state */ 2072 union { 2073 struct ilk_wm_values hw; 2074 struct skl_wm_values skl_hw; 2075 struct vlv_wm_values vlv; 2076 }; 2077 2078 uint8_t max_level; 2079 2080 /* 2081 * Should be held around atomic WM register writing; also 2082 * protects * intel_crtc->wm.active and 2083 * cstate->wm.need_postvbl_update. 2084 */ 2085 struct lock wm_mutex; 2086 2087 /* 2088 * Set during HW readout of watermarks/DDB. Some platforms 2089 * need to know when we're still using BIOS-provided values 2090 * (which we don't fully trust). 2091 */ 2092 bool distrust_bios_wm; 2093 } wm; 2094 2095 struct i915_runtime_pm pm; 2096 2097 uint32_t bios_vgacntr; 2098 2099 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 2100 struct { 2101 void (*resume)(struct drm_i915_private *); 2102 void (*cleanup_engine)(struct intel_engine_cs *engine); 2103 2104 struct list_head timelines; 2105 struct i915_gem_timeline global_timeline; 2106 u32 active_requests; 2107 2108 /** 2109 * Is the GPU currently considered idle, or busy executing 2110 * userspace requests? Whilst idle, we allow runtime power 2111 * management to power down the hardware and display clocks. 2112 * In order to reduce the effect on performance, there 2113 * is a slight delay before we do so. 2114 */ 2115 bool awake; 2116 2117 /** 2118 * We leave the user IRQ off as much as possible, 2119 * but this means that requests will finish and never 2120 * be retired once the system goes idle. Set a timer to 2121 * fire periodically while the ring is running. When it 2122 * fires, go retire requests. 2123 */ 2124 struct delayed_work retire_work; 2125 2126 /** 2127 * When we detect an idle GPU, we want to turn on 2128 * powersaving features. So once we see that there 2129 * are no more requests outstanding and no more 2130 * arrive within a small period of time, we fire 2131 * off the idle_work. 2132 */ 2133 struct delayed_work idle_work; 2134 2135 ktime_t last_init_time; 2136 } gt; 2137 2138 /* perform PHY state sanity checks? */ 2139 bool chv_phy_assert[2]; 2140 2141 /* Used to save the pipe-to-encoder mapping for audio */ 2142 struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 2143 2144 /* 2145 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2146 * will be rejected. Instead look for a better place. 2147 */ 2148 }; 2149 2150 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2151 { 2152 return container_of(dev, struct drm_i915_private, drm); 2153 } 2154 2155 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 2156 { 2157 return to_i915(dev_get_drvdata(kdev)); 2158 } 2159 2160 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2161 { 2162 return container_of(guc, struct drm_i915_private, guc); 2163 } 2164 2165 /* Simple iterator over all initialised engines */ 2166 #define for_each_engine(engine__, dev_priv__, id__) \ 2167 for ((id__) = 0; \ 2168 (id__) < I915_NUM_ENGINES; \ 2169 (id__)++) \ 2170 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 2171 2172 #define __mask_next_bit(mask) ({ \ 2173 int __idx = ffs(mask) - 1; \ 2174 mask &= ~BIT(__idx); \ 2175 __idx; \ 2176 }) 2177 2178 /* Iterator over subset of engines selected by mask */ 2179 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ 2180 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ 2181 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) 2182 2183 enum hdmi_force_audio { 2184 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2185 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2186 HDMI_AUDIO_AUTO, /* trust EDID */ 2187 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2188 }; 2189 2190 #define I915_GTT_OFFSET_NONE ((u32)-1) 2191 2192 /* 2193 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2194 * considered to be the frontbuffer for the given plane interface-wise. This 2195 * doesn't mean that the hw necessarily already scans it out, but that any 2196 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2197 * 2198 * We have one bit per pipe and per scanout plane type. 2199 */ 2200 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2201 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2202 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2203 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2204 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2205 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2206 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2207 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2208 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2209 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2210 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2211 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2212 2213 /* 2214 * Optimised SGL iterator for GEM objects 2215 */ 2216 static __always_inline struct sgt_iter { 2217 struct scatterlist *sgp; 2218 union { 2219 unsigned long pfn; 2220 dma_addr_t dma; 2221 }; 2222 unsigned int curr; 2223 unsigned int max; 2224 } __sgt_iter(struct scatterlist *sgl, bool dma) { 2225 struct sgt_iter s = { .sgp = sgl }; 2226 2227 if (s.sgp) { 2228 s.max = s.curr = s.sgp->offset; 2229 s.max += s.sgp->length; 2230 if (dma) 2231 s.dma = sg_dma_address(s.sgp); 2232 else 2233 s.pfn = page_to_pfn(sg_page(s.sgp)); 2234 } 2235 2236 return s; 2237 } 2238 2239 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 2240 { 2241 ++sg; 2242 if (unlikely(sg_is_chain(sg))) 2243 sg = sg_chain_ptr(sg); 2244 return sg; 2245 } 2246 2247 /** 2248 * __sg_next - return the next scatterlist entry in a list 2249 * @sg: The current sg entry 2250 * 2251 * Description: 2252 * If the entry is the last, return NULL; otherwise, step to the next 2253 * element in the array (@sg@+1). If that's a chain pointer, follow it; 2254 * otherwise just return the pointer to the current element. 2255 **/ 2256 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2257 { 2258 #ifdef CONFIG_DEBUG_SG 2259 BUG_ON(sg->sg_magic != SG_MAGIC); 2260 #endif 2261 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2262 } 2263 2264 /** 2265 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table 2266 * @__dmap: DMA address (output) 2267 * @__iter: 'struct sgt_iter' (iterator state, internal) 2268 * @__sgt: sg_table to iterate over (input) 2269 */ 2270 #define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2271 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2272 ((__dmap) = (__iter).dma + (__iter).curr); \ 2273 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2274 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) 2275 2276 /** 2277 * for_each_sgt_page - iterate over the pages of the given sg_table 2278 * @__pp: page pointer (output) 2279 * @__iter: 'struct sgt_iter' (iterator state, internal) 2280 * @__sgt: sg_table to iterate over (input) 2281 */ 2282 #define for_each_sgt_page(__pp, __iter, __sgt) \ 2283 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 2284 ((__pp) = (__iter).pfn == 0 ? NULL : \ 2285 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 2286 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2287 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) 2288 2289 /* 2290 * A command that requires special handling by the command parser. 2291 */ 2292 struct drm_i915_cmd_descriptor { 2293 /* 2294 * Flags describing how the command parser processes the command. 2295 * 2296 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2297 * a length mask if not set 2298 * CMD_DESC_SKIP: The command is allowed but does not follow the 2299 * standard length encoding for the opcode range in 2300 * which it falls 2301 * CMD_DESC_REJECT: The command is never allowed 2302 * CMD_DESC_REGISTER: The command should be checked against the 2303 * register whitelist for the appropriate ring 2304 * CMD_DESC_MASTER: The command is allowed if the submitting process 2305 * is the DRM master 2306 */ 2307 u32 flags; 2308 #define CMD_DESC_FIXED (1<<0) 2309 #define CMD_DESC_SKIP (1<<1) 2310 #define CMD_DESC_REJECT (1<<2) 2311 #define CMD_DESC_REGISTER (1<<3) 2312 #define CMD_DESC_BITMASK (1<<4) 2313 #define CMD_DESC_MASTER (1<<5) 2314 2315 /* 2316 * The command's unique identification bits and the bitmask to get them. 2317 * This isn't strictly the opcode field as defined in the spec and may 2318 * also include type, subtype, and/or subop fields. 2319 */ 2320 struct { 2321 u32 value; 2322 u32 mask; 2323 } cmd; 2324 2325 /* 2326 * The command's length. The command is either fixed length (i.e. does 2327 * not include a length field) or has a length field mask. The flag 2328 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2329 * a length mask. All command entries in a command table must include 2330 * length information. 2331 */ 2332 union { 2333 u32 fixed; 2334 u32 mask; 2335 } length; 2336 2337 /* 2338 * Describes where to find a register address in the command to check 2339 * against the ring's register whitelist. Only valid if flags has the 2340 * CMD_DESC_REGISTER bit set. 2341 * 2342 * A non-zero step value implies that the command may access multiple 2343 * registers in sequence (e.g. LRI), in that case step gives the 2344 * distance in dwords between individual offset fields. 2345 */ 2346 struct { 2347 u32 offset; 2348 u32 mask; 2349 u32 step; 2350 } reg; 2351 2352 #define MAX_CMD_DESC_BITMASKS 3 2353 /* 2354 * Describes command checks where a particular dword is masked and 2355 * compared against an expected value. If the command does not match 2356 * the expected value, the parser rejects it. Only valid if flags has 2357 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2358 * are valid. 2359 * 2360 * If the check specifies a non-zero condition_mask then the parser 2361 * only performs the check when the bits specified by condition_mask 2362 * are non-zero. 2363 */ 2364 struct { 2365 u32 offset; 2366 u32 mask; 2367 u32 expected; 2368 u32 condition_offset; 2369 u32 condition_mask; 2370 } bits[MAX_CMD_DESC_BITMASKS]; 2371 }; 2372 2373 /* 2374 * A table of commands requiring special handling by the command parser. 2375 * 2376 * Each engine has an array of tables. Each table consists of an array of 2377 * command descriptors, which must be sorted with command opcodes in 2378 * ascending order. 2379 */ 2380 struct drm_i915_cmd_table { 2381 const struct drm_i915_cmd_descriptor *table; 2382 int count; 2383 }; 2384 2385 static inline const struct intel_device_info * 2386 intel_info(const struct drm_i915_private *dev_priv) 2387 { 2388 return &dev_priv->info; 2389 } 2390 2391 #define INTEL_INFO(dev_priv) intel_info((dev_priv)) 2392 2393 #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) 2394 #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) 2395 2396 #define REVID_FOREVER 0xff 2397 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2398 2399 #define GEN_FOREVER (0) 2400 /* 2401 * Returns true if Gen is in inclusive range [Start, End]. 2402 * 2403 * Use GEN_FOREVER for unbound start and or end. 2404 */ 2405 #define IS_GEN(dev_priv, s, e) ({ \ 2406 unsigned int __s = (s), __e = (e); \ 2407 BUILD_BUG_ON(!__builtin_constant_p(s)); \ 2408 BUILD_BUG_ON(!__builtin_constant_p(e)); \ 2409 if ((__s) != GEN_FOREVER) \ 2410 __s = (s) - 1; \ 2411 if ((__e) == GEN_FOREVER) \ 2412 __e = BITS_PER_LONG - 1; \ 2413 else \ 2414 __e = (e) - 1; \ 2415 !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \ 2416 }) 2417 2418 /* 2419 * Return true if revision is in range [since,until] inclusive. 2420 * 2421 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2422 */ 2423 #define IS_REVID(p, since, until) \ 2424 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2425 2426 #define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577) 2427 #define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) 2428 #define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x) 2429 #define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572) 2430 #define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g) 2431 #define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592) 2432 #define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772) 2433 #define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm) 2434 #define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater) 2435 #define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline) 2436 #define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42) 2437 #define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x) 2438 #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) 2439 #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) 2440 #define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview) 2441 #define IS_G33(dev_priv) ((dev_priv)->info.is_g33) 2442 #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2443 #define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge) 2444 #define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ 2445 INTEL_DEVID(dev_priv) == 0x0152 || \ 2446 INTEL_DEVID(dev_priv) == 0x015a) 2447 #define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview) 2448 #define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview) 2449 #define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell) 2450 #define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell) 2451 #define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake) 2452 #define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton) 2453 #define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake) 2454 #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) 2455 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2456 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2457 #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ 2458 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ 2459 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ 2460 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) 2461 /* ULX machines are also considered ULT. */ 2462 #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ 2463 (INTEL_DEVID(dev_priv) & 0xf) == 0xe) 2464 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 2465 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2466 #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ 2467 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) 2468 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 2469 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2470 /* ULX machines are also considered ULT. */ 2471 #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ 2472 INTEL_DEVID(dev_priv) == 0x0A1E) 2473 #define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ 2474 INTEL_DEVID(dev_priv) == 0x1913 || \ 2475 INTEL_DEVID(dev_priv) == 0x1916 || \ 2476 INTEL_DEVID(dev_priv) == 0x1921 || \ 2477 INTEL_DEVID(dev_priv) == 0x1926) 2478 #define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ 2479 INTEL_DEVID(dev_priv) == 0x1915 || \ 2480 INTEL_DEVID(dev_priv) == 0x191E) 2481 #define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ 2482 INTEL_DEVID(dev_priv) == 0x5913 || \ 2483 INTEL_DEVID(dev_priv) == 0x5916 || \ 2484 INTEL_DEVID(dev_priv) == 0x5921 || \ 2485 INTEL_DEVID(dev_priv) == 0x5926) 2486 #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2487 INTEL_DEVID(dev_priv) == 0x5915 || \ 2488 INTEL_DEVID(dev_priv) == 0x591E) 2489 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2490 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2491 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2492 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030) 2493 2494 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) 2495 2496 #define SKL_REVID_A0 0x0 2497 #define SKL_REVID_B0 0x1 2498 #define SKL_REVID_C0 0x2 2499 #define SKL_REVID_D0 0x3 2500 #define SKL_REVID_E0 0x4 2501 #define SKL_REVID_F0 0x5 2502 #define SKL_REVID_G0 0x6 2503 #define SKL_REVID_H0 0x7 2504 2505 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2506 2507 #define BXT_REVID_A0 0x0 2508 #define BXT_REVID_A1 0x1 2509 #define BXT_REVID_B0 0x3 2510 #define BXT_REVID_C0 0x9 2511 2512 #define IS_BXT_REVID(dev_priv, since, until) \ 2513 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) 2514 2515 #define KBL_REVID_A0 0x0 2516 #define KBL_REVID_B0 0x1 2517 #define KBL_REVID_C0 0x2 2518 #define KBL_REVID_D0 0x3 2519 #define KBL_REVID_E0 0x4 2520 2521 #define IS_KBL_REVID(dev_priv, since, until) \ 2522 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2523 2524 /* 2525 * The genX designation typically refers to the render engine, so render 2526 * capability related checks should use IS_GEN, while display and other checks 2527 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2528 * chips, etc.). 2529 */ 2530 #define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) 2531 #define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) 2532 #define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) 2533 #define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) 2534 #define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) 2535 #define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) 2536 #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) 2537 #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) 2538 2539 #define ENGINE_MASK(id) BIT(id) 2540 #define RENDER_RING ENGINE_MASK(RCS) 2541 #define BSD_RING ENGINE_MASK(VCS) 2542 #define BLT_RING ENGINE_MASK(BCS) 2543 #define VEBOX_RING ENGINE_MASK(VECS) 2544 #define BSD2_RING ENGINE_MASK(VCS2) 2545 #define ALL_ENGINES (~0) 2546 2547 #define HAS_ENGINE(dev_priv, id) \ 2548 (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) 2549 2550 #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) 2551 #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) 2552 #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) 2553 #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) 2554 2555 #define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) 2556 #define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) 2557 #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) 2558 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 2559 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 2560 2561 #define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) 2562 2563 #define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts) 2564 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 2565 ((dev_priv)->info.has_logical_ring_contexts) 2566 #define USES_PPGTT(dev_priv) (i915.enable_ppgtt) 2567 #define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) 2568 #define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) 2569 2570 #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) 2571 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 2572 ((dev_priv)->info.overlay_needs_physical) 2573 2574 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2575 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv)) 2576 2577 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2578 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 2579 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ 2580 IS_SKL_GT3(dev_priv) || \ 2581 IS_SKL_GT4(dev_priv)) 2582 2583 /* 2584 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2585 * even when in MSI mode. This results in spurious interrupt warnings if the 2586 * legacy irq no. is shared with another device. The kernel then disables that 2587 * interrupt source and so prevents the other device from working properly. 2588 */ 2589 #define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5) 2590 #define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq) 2591 2592 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2593 * rows, which changed the alignment requirements and fence programming. 2594 */ 2595 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ 2596 !(IS_I915G(dev_priv) || \ 2597 IS_I915GM(dev_priv))) 2598 #define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv) 2599 #define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) 2600 2601 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2602 #define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr) 2603 #define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) 2604 2605 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2606 2607 #define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst) 2608 2609 #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) 2610 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) 2611 #define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) 2612 #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) 2613 #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) 2614 2615 #define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) 2616 2617 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) 2618 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) 2619 2620 /* 2621 * For now, anything with a GuC requires uCode loading, and then supports 2622 * command submission once loaded. But these are logically independent 2623 * properties, so we have separate macros to test them. 2624 */ 2625 #define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) 2626 #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2627 #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) 2628 2629 #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer) 2630 2631 #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) 2632 2633 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2634 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2635 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2636 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2637 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2638 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2639 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2640 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2641 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 2642 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2643 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2644 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2645 2646 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2647 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2648 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2649 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2650 #define HAS_PCH_LPT_LP(dev_priv) \ 2651 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2652 #define HAS_PCH_LPT_H(dev_priv) \ 2653 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 2654 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2655 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2656 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2657 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) 2658 2659 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) 2660 2661 #define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv)) 2662 2663 /* DPF == dynamic parity feature */ 2664 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) 2665 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2666 2 : HAS_L3_DPF(dev_priv)) 2667 2668 #define GT_FREQUENCY_MULTIPLIER 50 2669 #define GEN9_FREQ_SCALER 3 2670 2671 #define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio) 2672 2673 #include "i915_trace.h" 2674 2675 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 2676 { 2677 #ifdef CONFIG_INTEL_IOMMU 2678 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) 2679 return true; 2680 #endif 2681 return false; 2682 } 2683 2684 extern int i915_suspend_switcheroo(device_t kdev); 2685 extern int i915_resume_switcheroo(struct drm_device *dev); 2686 2687 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 2688 int enable_ppgtt); 2689 2690 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value); 2691 2692 /* i915_drv.c */ 2693 void __printf(3, 4) 2694 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2695 const char *fmt, ...); 2696 2697 #define i915_report_error(dev_priv, fmt, ...) \ 2698 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2699 2700 #ifdef CONFIG_COMPAT 2701 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2702 unsigned long arg); 2703 #endif 2704 extern const struct dev_pm_ops i915_pm_ops; 2705 2706 extern int i915_driver_load(struct pci_dev *pdev, 2707 const struct pci_device_id *ent); 2708 extern void i915_driver_unload(struct drm_device *dev); 2709 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); 2710 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); 2711 extern void i915_reset(struct drm_i915_private *dev_priv); 2712 extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2713 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2714 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); 2715 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2716 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2717 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2718 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2719 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2720 2721 /* intel_hotplug.c */ 2722 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 2723 u32 pin_mask, u32 long_mask); 2724 void intel_hpd_init(struct drm_i915_private *dev_priv); 2725 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2726 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2727 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2728 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2729 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2730 2731 /* i915_irq.c */ 2732 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) 2733 { 2734 unsigned long delay; 2735 2736 if (unlikely(!i915.enable_hangcheck)) 2737 return; 2738 2739 /* Don't continually defer the hangcheck so that it is always run at 2740 * least once after work has been scheduled on any ring. Otherwise, 2741 * we will ignore a hung ring if a second ring is kept busy. 2742 */ 2743 2744 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); 2745 queue_delayed_work(system_long_wq, 2746 &dev_priv->gpu_error.hangcheck_work, delay); 2747 } 2748 2749 __printf(3, 4) 2750 void i915_handle_error(struct drm_i915_private *dev_priv, 2751 u32 engine_mask, 2752 const char *fmt, ...); 2753 2754 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2755 int intel_irq_install(struct drm_i915_private *dev_priv); 2756 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2757 2758 extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); 2759 extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 2760 bool restore_forcewake); 2761 extern void intel_uncore_init(struct drm_i915_private *dev_priv); 2762 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2763 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2764 extern void intel_uncore_fini(struct drm_i915_private *dev_priv); 2765 extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 2766 bool restore); 2767 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2768 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2769 enum forcewake_domains domains); 2770 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2771 enum forcewake_domains domains); 2772 /* Like above but the caller must manage the uncore.lock itself. 2773 * Must be used with I915_READ_FW and friends. 2774 */ 2775 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2776 enum forcewake_domains domains); 2777 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2778 enum forcewake_domains domains); 2779 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 2780 2781 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2782 2783 int intel_wait_for_register(struct drm_i915_private *dev_priv, 2784 i915_reg_t reg, 2785 const u32 mask, 2786 const u32 value, 2787 const unsigned long timeout_ms); 2788 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 2789 i915_reg_t reg, 2790 const u32 mask, 2791 const u32 value, 2792 const unsigned long timeout_ms); 2793 2794 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 2795 { 2796 return dev_priv->gvt; 2797 } 2798 2799 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) 2800 { 2801 return dev_priv->vgpu.active; 2802 } 2803 2804 void 2805 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2806 u32 status_mask); 2807 2808 void 2809 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2810 u32 status_mask); 2811 2812 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2813 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2814 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2815 uint32_t mask, 2816 uint32_t bits); 2817 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 2818 uint32_t interrupt_mask, 2819 uint32_t enabled_irq_mask); 2820 static inline void 2821 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2822 { 2823 ilk_update_display_irq(dev_priv, bits, bits); 2824 } 2825 static inline void 2826 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2827 { 2828 ilk_update_display_irq(dev_priv, bits, 0); 2829 } 2830 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 2831 enum i915_pipe pipe, 2832 uint32_t interrupt_mask, 2833 uint32_t enabled_irq_mask); 2834 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 2835 enum i915_pipe pipe, uint32_t bits) 2836 { 2837 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 2838 } 2839 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 2840 enum i915_pipe pipe, uint32_t bits) 2841 { 2842 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 2843 } 2844 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2845 uint32_t interrupt_mask, 2846 uint32_t enabled_irq_mask); 2847 static inline void 2848 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2849 { 2850 ibx_display_interrupt_update(dev_priv, bits, bits); 2851 } 2852 static inline void 2853 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2854 { 2855 ibx_display_interrupt_update(dev_priv, bits, 0); 2856 } 2857 2858 /* i915_gem.c */ 2859 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2860 struct drm_file *file_priv); 2861 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2862 struct drm_file *file_priv); 2863 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2864 struct drm_file *file_priv); 2865 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2866 struct drm_file *file_priv); 2867 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2868 struct drm_file *file_priv); 2869 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2870 struct drm_file *file_priv); 2871 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2872 struct drm_file *file_priv); 2873 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2874 struct drm_file *file_priv); 2875 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2876 struct drm_file *file_priv); 2877 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2878 struct drm_file *file_priv); 2879 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2880 struct drm_file *file); 2881 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2882 struct drm_file *file); 2883 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2884 struct drm_file *file_priv); 2885 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2886 struct drm_file *file_priv); 2887 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2888 struct drm_file *file_priv); 2889 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2890 struct drm_file *file_priv); 2891 void i915_gem_init_userptr(struct drm_i915_private *dev_priv); 2892 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2893 struct drm_file *file); 2894 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2895 struct drm_file *file_priv); 2896 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2897 struct drm_file *file_priv); 2898 int i915_gem_load_init(struct drm_device *dev); 2899 void i915_gem_load_cleanup(struct drm_device *dev); 2900 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 2901 int i915_gem_freeze(struct drm_i915_private *dev_priv); 2902 int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 2903 2904 void *i915_gem_object_alloc(struct drm_device *dev); 2905 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2906 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2907 const struct drm_i915_gem_object_ops *ops); 2908 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, 2909 u64 size); 2910 struct drm_i915_gem_object *i915_gem_object_create_from_data( 2911 struct drm_device *dev, const void *data, size_t size); 2912 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 2913 void i915_gem_free_object(struct drm_gem_object *obj); 2914 2915 struct i915_vma * __must_check 2916 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2917 const struct i915_ggtt_view *view, 2918 u64 size, 2919 u64 alignment, 2920 u64 flags); 2921 2922 int i915_gem_object_unbind(struct drm_i915_gem_object *obj); 2923 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2924 2925 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 2926 2927 static inline int __sg_page_count(const struct scatterlist *sg) 2928 { 2929 return sg->length >> PAGE_SHIFT; 2930 } 2931 2932 struct scatterlist * 2933 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 2934 unsigned int n, unsigned int *offset); 2935 2936 struct page * 2937 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 2938 unsigned int n); 2939 2940 struct page * 2941 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 2942 unsigned int n); 2943 2944 dma_addr_t 2945 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 2946 unsigned long n); 2947 2948 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2949 struct sg_table *pages); 2950 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2951 2952 static inline int __must_check 2953 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2954 { 2955 might_lock(&obj->mm.lock); 2956 2957 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 2958 return 0; 2959 2960 return __i915_gem_object_get_pages(obj); 2961 } 2962 2963 static inline void 2964 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2965 { 2966 GEM_BUG_ON(!obj->mm.pages); 2967 2968 atomic_inc(&obj->mm.pages_pin_count); 2969 } 2970 2971 static inline bool 2972 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 2973 { 2974 return atomic_read(&obj->mm.pages_pin_count); 2975 } 2976 2977 static inline void 2978 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2979 { 2980 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 2981 GEM_BUG_ON(!obj->mm.pages); 2982 2983 atomic_dec(&obj->mm.pages_pin_count); 2984 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 2985 } 2986 2987 static inline void 2988 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2989 { 2990 __i915_gem_object_unpin_pages(obj); 2991 } 2992 2993 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ 2994 I915_MM_NORMAL = 0, 2995 I915_MM_SHRINKER 2996 }; 2997 2998 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2999 enum i915_mm_subclass subclass); 3000 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); 3001 3002 enum i915_map_type { 3003 I915_MAP_WB = 0, 3004 I915_MAP_WC, 3005 }; 3006 3007 /** 3008 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3009 * @obj - the object to map into kernel address space 3010 * @type - the type of mapping, used to select pgprot_t 3011 * 3012 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3013 * pages and then returns a contiguous mapping of the backing storage into 3014 * the kernel address space. Based on the @type of mapping, the PTE will be 3015 * set to either WriteBack or WriteCombine (via pgprot_t). 3016 * 3017 * The caller is responsible for calling i915_gem_object_unpin_map() when the 3018 * mapping is no longer required. 3019 * 3020 * Returns the pointer through which to access the mapped object, or an 3021 * ERR_PTR() on error. 3022 */ 3023 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 3024 enum i915_map_type type); 3025 3026 /** 3027 * i915_gem_object_unpin_map - releases an earlier mapping 3028 * @obj - the object to unmap 3029 * 3030 * After pinning the object and mapping its pages, once you are finished 3031 * with your access, call i915_gem_object_unpin_map() to release the pin 3032 * upon the mapping. Once the pin count reaches zero, that mapping may be 3033 * removed. 3034 */ 3035 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3036 { 3037 i915_gem_object_unpin_pages(obj); 3038 } 3039 3040 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 3041 unsigned int *needs_clflush); 3042 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 3043 unsigned int *needs_clflush); 3044 #define CLFLUSH_BEFORE 0x1 3045 #define CLFLUSH_AFTER 0x2 3046 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 3047 3048 static inline void 3049 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) 3050 { 3051 i915_gem_object_unpin_pages(obj); 3052 } 3053 3054 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3055 void i915_vma_move_to_active(struct i915_vma *vma, 3056 struct drm_i915_gem_request *req, 3057 unsigned int flags); 3058 int i915_gem_dumb_create(struct drm_file *file_priv, 3059 struct drm_device *dev, 3060 struct drm_mode_create_dumb *args); 3061 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3062 uint32_t handle, uint64_t *offset); 3063 int i915_gem_mmap_gtt_version(void); 3064 3065 void i915_gem_track_fb(struct drm_i915_gem_object *old, 3066 struct drm_i915_gem_object *new, 3067 unsigned frontbuffer_bits); 3068 3069 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 3070 3071 struct drm_i915_gem_request * 3072 i915_gem_find_active_request(struct intel_engine_cs *engine); 3073 3074 void i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3075 3076 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 3077 { 3078 return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags)); 3079 } 3080 3081 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3082 { 3083 return unlikely(test_bit(I915_WEDGED, &error->flags)); 3084 } 3085 3086 static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) 3087 { 3088 return i915_reset_in_progress(error) | i915_terminally_wedged(error); 3089 } 3090 3091 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3092 { 3093 return READ_ONCE(error->reset_count); 3094 } 3095 3096 void i915_gem_reset(struct drm_i915_private *dev_priv); 3097 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3098 void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3099 int __must_check i915_gem_init(struct drm_device *dev); 3100 int __must_check i915_gem_init_hw(struct drm_device *dev); 3101 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); 3102 void i915_gem_cleanup_engines(struct drm_device *dev); 3103 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, 3104 unsigned int flags); 3105 int __must_check i915_gem_suspend(struct drm_device *dev); 3106 void i915_gem_resume(struct drm_device *dev); 3107 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres); 3108 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 3109 unsigned int flags, 3110 long timeout, 3111 struct intel_rps_client *rps); 3112 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 3113 unsigned int flags, 3114 int priority); 3115 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX 3116 3117 int __must_check 3118 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3119 bool write); 3120 int __must_check 3121 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3122 struct i915_vma * __must_check 3123 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3124 u32 alignment, 3125 const struct i915_ggtt_view *view); 3126 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3127 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3128 int align); 3129 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3130 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3131 3132 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size, 3133 int tiling_mode); 3134 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, 3135 int tiling_mode, bool fenced); 3136 3137 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3138 enum i915_cache_level cache_level); 3139 3140 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3141 struct dma_buf *dma_buf); 3142 3143 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3144 struct drm_gem_object *gem_obj, int flags); 3145 3146 struct i915_vma * 3147 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3148 struct i915_address_space *vm, 3149 const struct i915_ggtt_view *view); 3150 3151 struct i915_vma * 3152 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3153 struct i915_address_space *vm, 3154 const struct i915_ggtt_view *view); 3155 3156 static inline struct i915_hw_ppgtt * 3157 i915_vm_to_ppgtt(struct i915_address_space *vm) 3158 { 3159 return container_of(vm, struct i915_hw_ppgtt, base); 3160 } 3161 3162 static inline struct i915_vma * 3163 i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, 3164 const struct i915_ggtt_view *view) 3165 { 3166 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); 3167 } 3168 3169 /* i915_gem_fence_reg.c */ 3170 int __must_check i915_vma_get_fence(struct i915_vma *vma); 3171 int __must_check i915_vma_put_fence(struct i915_vma *vma); 3172 3173 void i915_gem_restore_fences(struct drm_i915_private *dev_priv); 3174 3175 void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); 3176 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, 3177 struct sg_table *pages); 3178 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, 3179 struct sg_table *pages); 3180 3181 /* i915_gem_context.c */ 3182 int __must_check i915_gem_context_init(struct drm_device *dev); 3183 void i915_gem_context_lost(struct drm_i915_private *dev_priv); 3184 void i915_gem_context_fini(struct drm_device *dev); 3185 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3186 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3187 int i915_switch_context(struct drm_i915_gem_request *req); 3188 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); 3189 struct i915_vma * 3190 i915_gem_context_pin_legacy(struct i915_gem_context *ctx, 3191 unsigned int flags); 3192 void i915_gem_context_free(struct kref *ctx_ref); 3193 struct drm_i915_gem_object * 3194 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3195 struct i915_gem_context * 3196 i915_gem_context_create_gvt(struct drm_device *dev); 3197 3198 static inline struct i915_gem_context * 3199 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 3200 { 3201 struct i915_gem_context *ctx; 3202 3203 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); 3204 3205 ctx = idr_find(&file_priv->context_idr, id); 3206 if (!ctx) 3207 return ERR_PTR(-ENOENT); 3208 3209 return ctx; 3210 } 3211 3212 static inline struct i915_gem_context * 3213 i915_gem_context_get(struct i915_gem_context *ctx) 3214 { 3215 kref_get(&ctx->ref); 3216 return ctx; 3217 } 3218 3219 static inline void i915_gem_context_put(struct i915_gem_context *ctx) 3220 { 3221 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 3222 kref_put(&ctx->ref, i915_gem_context_free); 3223 } 3224 3225 static inline struct intel_timeline * 3226 i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, 3227 struct intel_engine_cs *engine) 3228 { 3229 struct i915_address_space *vm; 3230 3231 vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; 3232 return &vm->timeline.engine[engine->id]; 3233 } 3234 3235 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) 3236 { 3237 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3238 } 3239 3240 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3241 struct drm_file *file); 3242 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3243 struct drm_file *file); 3244 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3245 struct drm_file *file_priv); 3246 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3247 struct drm_file *file_priv); 3248 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, 3249 struct drm_file *file); 3250 3251 /* i915_gem_evict.c */ 3252 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 3253 u64 min_size, u64 alignment, 3254 unsigned cache_level, 3255 u64 start, u64 end, 3256 unsigned flags); 3257 int __must_check i915_gem_evict_for_vma(struct i915_vma *target); 3258 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3259 3260 /* belongs in i915_gem_gtt.h */ 3261 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3262 { 3263 wmb(); 3264 if (INTEL_GEN(dev_priv) < 6) 3265 intel_gtt_chipset_flush(); 3266 } 3267 3268 /* i915_gem_stolen.c */ 3269 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3270 struct drm_mm_node *node, u64 size, 3271 unsigned alignment); 3272 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3273 struct drm_mm_node *node, u64 size, 3274 unsigned alignment, u64 start, 3275 u64 end); 3276 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3277 struct drm_mm_node *node); 3278 int i915_gem_init_stolen(struct drm_i915_private *dev_priv); 3279 void i915_gem_cleanup_stolen(struct drm_device *dev); 3280 struct drm_i915_gem_object * 3281 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3282 struct drm_i915_gem_object * 3283 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3284 u32 stolen_offset, 3285 u32 gtt_offset, 3286 u32 size); 3287 3288 /* i915_gem_internal.c */ 3289 struct drm_i915_gem_object * 3290 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 3291 unsigned int size); 3292 3293 /* i915_gem_shrinker.c */ 3294 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3295 unsigned long target, 3296 unsigned flags); 3297 #define I915_SHRINK_PURGEABLE 0x1 3298 #define I915_SHRINK_UNBOUND 0x2 3299 #define I915_SHRINK_BOUND 0x4 3300 #define I915_SHRINK_ACTIVE 0x8 3301 #define I915_SHRINK_VMAPS 0x10 3302 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3303 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3304 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); 3305 3306 3307 /* i915_gem_tiling.c */ 3308 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3309 { 3310 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3311 3312 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3313 i915_gem_object_is_tiled(obj); 3314 } 3315 3316 /* i915_debugfs.c */ 3317 #ifdef CONFIG_DEBUG_FS 3318 int i915_debugfs_register(struct drm_i915_private *dev_priv); 3319 void i915_debugfs_unregister(struct drm_i915_private *dev_priv); 3320 int i915_debugfs_connector_add(struct drm_connector *connector); 3321 void intel_display_crc_init(struct drm_i915_private *dev_priv); 3322 #else 3323 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} 3324 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {} 3325 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3326 { return 0; } 3327 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} 3328 #endif 3329 3330 /* i915_gpu_error.c */ 3331 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 3332 3333 __printf(2, 3) 3334 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3335 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3336 const struct i915_error_state_file_priv *error); 3337 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3338 struct drm_i915_private *i915, 3339 size_t count, loff_t pos); 3340 static inline void i915_error_state_buf_release( 3341 struct drm_i915_error_state_buf *eb) 3342 { 3343 kfree(eb->buf); 3344 } 3345 void i915_capture_error_state(struct drm_i915_private *dev_priv, 3346 u32 engine_mask, 3347 const char *error_msg); 3348 void i915_error_state_get(struct drm_device *dev, 3349 struct i915_error_state_file_priv *error_priv); 3350 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3351 void i915_destroy_error_state(struct drm_device *dev); 3352 3353 #else 3354 3355 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, 3356 u32 engine_mask, 3357 const char *error_msg) 3358 { 3359 } 3360 3361 static inline void i915_destroy_error_state(struct drm_device *dev) 3362 { 3363 } 3364 3365 #endif 3366 3367 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3368 3369 /* i915_cmd_parser.c */ 3370 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 3371 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 3372 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 3373 bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); 3374 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 3375 struct drm_i915_gem_object *batch_obj, 3376 struct drm_i915_gem_object *shadow_batch_obj, 3377 u32 batch_start_offset, 3378 u32 batch_len, 3379 bool is_master); 3380 3381 /* i915_suspend.c */ 3382 extern int i915_save_state(struct drm_device *dev); 3383 extern int i915_restore_state(struct drm_device *dev); 3384 3385 /* i915_sysfs.c */ 3386 void i915_setup_sysfs(struct drm_i915_private *dev_priv); 3387 void i915_teardown_sysfs(struct drm_i915_private *dev_priv); 3388 3389 /* intel_i2c.c */ 3390 extern int intel_setup_gmbus(struct drm_device *dev); 3391 extern void intel_teardown_gmbus(struct drm_device *dev); 3392 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3393 unsigned int pin); 3394 3395 extern struct i2c_adapter * 3396 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3397 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3398 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3399 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3400 { 3401 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3402 } 3403 extern void intel_i2c_reset(struct drm_device *dev); 3404 3405 /* intel_bios.c */ 3406 int intel_bios_init(struct drm_i915_private *dev_priv); 3407 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3408 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3409 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3410 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3411 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3412 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3413 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3414 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3415 enum port port); 3416 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3417 enum port port); 3418 3419 3420 /* intel_opregion.c */ 3421 #ifdef CONFIG_ACPI 3422 extern int intel_opregion_setup(struct drm_i915_private *dev_priv); 3423 extern void intel_opregion_register(struct drm_i915_private *dev_priv); 3424 extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); 3425 extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); 3426 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3427 bool enable); 3428 extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, 3429 pci_power_t state); 3430 extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); 3431 #else 3432 static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } 3433 static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } 3434 static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } 3435 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 3436 { 3437 } 3438 static inline int 3439 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3440 { 3441 return 0; 3442 } 3443 static inline int 3444 intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) 3445 { 3446 return 0; 3447 } 3448 static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) 3449 { 3450 return -ENODEV; 3451 } 3452 #endif 3453 3454 /* intel_acpi.c */ 3455 #ifdef CONFIG_ACPI 3456 extern void intel_register_dsm_handler(void); 3457 extern void intel_unregister_dsm_handler(void); 3458 #else 3459 static inline void intel_register_dsm_handler(void) { return; } 3460 static inline void intel_unregister_dsm_handler(void) { return; } 3461 #endif /* CONFIG_ACPI */ 3462 3463 /* intel_device_info.c */ 3464 static inline struct intel_device_info * 3465 mkwrite_device_info(struct drm_i915_private *dev_priv) 3466 { 3467 return (struct intel_device_info *)&dev_priv->info; 3468 } 3469 3470 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); 3471 void intel_device_info_dump(struct drm_i915_private *dev_priv); 3472 3473 /* modesetting */ 3474 extern void intel_modeset_init_hw(struct drm_device *dev); 3475 extern int intel_modeset_init(struct drm_device *dev); 3476 extern void intel_modeset_gem_init(struct drm_device *dev); 3477 extern void intel_modeset_cleanup(struct drm_device *dev); 3478 extern int intel_connector_register(struct drm_connector *); 3479 extern void intel_connector_unregister(struct drm_connector *); 3480 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, 3481 bool state); 3482 extern void intel_display_resume(struct drm_device *dev); 3483 extern void i915_redisable_vga(struct drm_i915_private *dev_priv); 3484 extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); 3485 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); 3486 extern void intel_init_pch_refclk(struct drm_device *dev); 3487 extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); 3488 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3489 bool enable); 3490 3491 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3492 struct drm_file *file); 3493 3494 /* overlay */ 3495 extern struct intel_overlay_error_state * 3496 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); 3497 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3498 struct intel_overlay_error_state *error); 3499 3500 extern struct intel_display_error_state * 3501 intel_display_capture_error_state(struct drm_i915_private *dev_priv); 3502 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3503 struct drm_i915_private *dev_priv, 3504 struct intel_display_error_state *error); 3505 3506 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3507 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3508 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 3509 u32 reply_mask, u32 reply, int timeout_base_ms); 3510 3511 /* intel_sideband.c */ 3512 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3513 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3514 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3515 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3516 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3517 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3518 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3519 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3520 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3521 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3522 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3523 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); 3524 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); 3525 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3526 enum intel_sbi_destination destination); 3527 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3528 enum intel_sbi_destination destination); 3529 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3530 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3531 3532 /* intel_dpio_phy.c */ 3533 void bxt_port_to_phy_channel(enum port port, 3534 enum dpio_phy *phy, enum dpio_channel *ch); 3535 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, 3536 enum port port, u32 margin, u32 scale, 3537 u32 enable, u32 deemphasis); 3538 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3539 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3540 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, 3541 enum dpio_phy phy); 3542 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, 3543 enum dpio_phy phy); 3544 uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder, 3545 uint8_t lane_count); 3546 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, 3547 uint8_t lane_lat_optim_mask); 3548 uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); 3549 3550 void chv_set_phy_signal_level(struct intel_encoder *encoder, 3551 u32 deemph_reg_value, u32 margin_reg_value, 3552 bool uniq_trans_scale); 3553 void chv_data_lane_soft_reset(struct intel_encoder *encoder, 3554 bool reset); 3555 void chv_phy_pre_pll_enable(struct intel_encoder *encoder); 3556 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3557 void chv_phy_release_cl2_override(struct intel_encoder *encoder); 3558 void chv_phy_post_pll_disable(struct intel_encoder *encoder); 3559 3560 void vlv_set_phy_signal_level(struct intel_encoder *encoder, 3561 u32 demph_reg_value, u32 preemph_reg_value, 3562 u32 uniqtranscale_reg_value, u32 tx3_demph); 3563 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); 3564 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3565 void vlv_phy_reset_lanes(struct intel_encoder *encoder); 3566 3567 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3568 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3569 3570 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3571 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3572 3573 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3574 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3575 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3576 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3577 3578 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3579 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3580 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3581 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3582 3583 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3584 * will be implemented using 2 32-bit writes in an arbitrary order with 3585 * an arbitrary delay between them. This can cause the hardware to 3586 * act upon the intermediate value, possibly leading to corruption and 3587 * machine death. For this reason we do not support I915_WRITE64, or 3588 * dev_priv->uncore.funcs.mmio_writeq. 3589 * 3590 * When reading a 64-bit value as two 32-bit values, the delay may cause 3591 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 3592 * occasionally a 64-bit register does not actualy support a full readq 3593 * and must be read using two 32-bit reads. 3594 * 3595 * You have been warned. 3596 */ 3597 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3598 3599 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3600 u32 upper, lower, old_upper, loop = 0; \ 3601 upper = I915_READ(upper_reg); \ 3602 do { \ 3603 old_upper = upper; \ 3604 lower = I915_READ(lower_reg); \ 3605 upper = I915_READ(upper_reg); \ 3606 } while (upper != old_upper && loop++ < 2); \ 3607 (u64)upper << 32 | lower; }) 3608 3609 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3610 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3611 3612 #define __raw_read(x, s) \ 3613 static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ 3614 i915_reg_t reg) \ 3615 { \ 3616 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3617 } 3618 3619 #define __raw_write(x, s) \ 3620 static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ 3621 i915_reg_t reg, uint##x##_t val) \ 3622 { \ 3623 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3624 } 3625 __raw_read(8, b) 3626 __raw_read(16, w) 3627 __raw_read(32, l) 3628 __raw_read(64, q) 3629 3630 __raw_write(8, b) 3631 __raw_write(16, w) 3632 __raw_write(32, l) 3633 __raw_write(64, q) 3634 3635 #undef __raw_read 3636 #undef __raw_write 3637 3638 /* These are untraced mmio-accessors that are only valid to be used inside 3639 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 3640 * controlled. 3641 * 3642 * Think twice, and think again, before using these. 3643 * 3644 * As an example, these accessors can possibly be used between: 3645 * 3646 * spin_lock_irq(&dev_priv->uncore.lock); 3647 * intel_uncore_forcewake_get__locked(); 3648 * 3649 * and 3650 * 3651 * intel_uncore_forcewake_put__locked(); 3652 * spin_unlock_irq(&dev_priv->uncore.lock); 3653 * 3654 * 3655 * Note: some registers may not need forcewake held, so 3656 * intel_uncore_forcewake_{get,put} can be omitted, see 3657 * intel_uncore_forcewake_for_reg(). 3658 * 3659 * Certain architectures will die if the same cacheline is concurrently accessed 3660 * by different clients (e.g. on Ivybridge). Access to registers should 3661 * therefore generally be serialised, by either the dev_priv->uncore.lock or 3662 * a more localised lock guarding all access to that bank of registers. 3663 */ 3664 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3665 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3666 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) 3667 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3668 3669 /* "Broadcast RGB" property */ 3670 #define INTEL_BROADCAST_RGB_AUTO 0 3671 #define INTEL_BROADCAST_RGB_FULL 1 3672 #define INTEL_BROADCAST_RGB_LIMITED 2 3673 3674 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 3675 { 3676 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3677 return VLV_VGACNTRL; 3678 else if (INTEL_GEN(dev_priv) >= 5) 3679 return CPU_VGACNTRL; 3680 else 3681 return VGACNTRL; 3682 } 3683 3684 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3685 { 3686 unsigned long j = msecs_to_jiffies(m); 3687 3688 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3689 } 3690 3691 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3692 { 3693 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3694 } 3695 3696 static inline unsigned long 3697 timespec_to_jiffies_timeout(const struct timespec *value) 3698 { 3699 unsigned long j = timespec_to_jiffies(value); 3700 3701 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3702 } 3703 3704 /* 3705 * If you need to wait X milliseconds between events A and B, but event B 3706 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3707 * when event A happened, then just before event B you call this function and 3708 * pass the timestamp as the first argument, and X as the second argument. 3709 */ 3710 static inline void 3711 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3712 { 3713 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3714 3715 /* 3716 * Don't re-read the value of "jiffies" every time since it may change 3717 * behind our back and break the math. 3718 */ 3719 tmp_jiffies = jiffies; 3720 target_jiffies = timestamp_jiffies + 3721 msecs_to_jiffies_timeout(to_wait_ms); 3722 3723 if (time_after(target_jiffies, tmp_jiffies)) { 3724 remaining_jiffies = target_jiffies - tmp_jiffies; 3725 while (remaining_jiffies) 3726 remaining_jiffies = 3727 schedule_timeout_uninterruptible(remaining_jiffies); 3728 } 3729 } 3730 3731 static inline bool 3732 __i915_request_irq_complete(struct drm_i915_gem_request *req) 3733 { 3734 struct intel_engine_cs *engine = req->engine; 3735 3736 /* Before we do the heavier coherent read of the seqno, 3737 * check the value (hopefully) in the CPU cacheline. 3738 */ 3739 if (__i915_gem_request_completed(req)) 3740 return true; 3741 3742 /* Ensure our read of the seqno is coherent so that we 3743 * do not "miss an interrupt" (i.e. if this is the last 3744 * request and the seqno write from the GPU is not visible 3745 * by the time the interrupt fires, we will see that the 3746 * request is incomplete and go back to sleep awaiting 3747 * another interrupt that will never come.) 3748 * 3749 * Strictly, we only need to do this once after an interrupt, 3750 * but it is easier and safer to do it every time the waiter 3751 * is woken. 3752 */ 3753 if (engine->irq_seqno_barrier && 3754 rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && 3755 cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { 3756 struct task_struct *tsk; 3757 3758 /* The ordering of irq_posted versus applying the barrier 3759 * is crucial. The clearing of the current irq_posted must 3760 * be visible before we perform the barrier operation, 3761 * such that if a subsequent interrupt arrives, irq_posted 3762 * is reasserted and our task rewoken (which causes us to 3763 * do another __i915_request_irq_complete() immediately 3764 * and reapply the barrier). Conversely, if the clear 3765 * occurs after the barrier, then an interrupt that arrived 3766 * whilst we waited on the barrier would not trigger a 3767 * barrier on the next pass, and the read may not see the 3768 * seqno update. 3769 */ 3770 engine->irq_seqno_barrier(engine); 3771 3772 /* If we consume the irq, but we are no longer the bottom-half, 3773 * the real bottom-half may not have serialised their own 3774 * seqno check with the irq-barrier (i.e. may have inspected 3775 * the seqno before we believe it coherent since they see 3776 * irq_posted == false but we are still running). 3777 */ 3778 rcu_read_lock(); 3779 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); 3780 if (tsk && tsk != current) 3781 /* Note that if the bottom-half is changed as we 3782 * are sending the wake-up, the new bottom-half will 3783 * be woken by whomever made the change. We only have 3784 * to worry about when we steal the irq-posted for 3785 * ourself. 3786 */ 3787 wake_up_process(tsk); 3788 rcu_read_unlock(); 3789 3790 if (__i915_gem_request_completed(req)) 3791 return true; 3792 } 3793 3794 return false; 3795 } 3796 3797 void i915_memcpy_init_early(struct drm_i915_private *dev_priv); 3798 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); 3799 3800 /* i915_mm.c */ 3801 int remap_io_mapping(struct vm_area_struct *vma, 3802 unsigned long addr, unsigned long pfn, unsigned long size, 3803 struct io_mapping *iomap); 3804 3805 #define ptr_mask_bits(ptr) ({ \ 3806 unsigned long __v = (unsigned long)(ptr); \ 3807 (typeof(ptr))(__v & LINUX_PAGE_MASK); \ 3808 }) 3809 3810 #define ptr_unpack_bits(ptr, bits) ({ \ 3811 unsigned long __v = (unsigned long)(ptr); \ 3812 (bits) = __v & ~LINUX_PAGE_MASK; \ 3813 (typeof(ptr))(__v & LINUX_PAGE_MASK); \ 3814 }) 3815 3816 #define ptr_pack_bits(ptr, bits) \ 3817 ((typeof(ptr))((unsigned long)(ptr) | (bits))) 3818 3819 #define fetch_and_zero(ptr) ({ \ 3820 typeof(*ptr) __T = *(ptr); \ 3821 *(ptr) = (typeof(*ptr))0; \ 3822 __T; \ 3823 }) 3824 3825 #endif 3826