1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <drm/drmP.h> 37 #include <linux/io-mapping.h> 38 #include <linux/i2c.h> 39 #include <linux/i2c-algo-bit.h> 40 #include <linux/backlight.h> 41 #include <linux/hashtable.h> 42 #include <linux/kref.h> 43 #include <linux/pm_qos.h> 44 #include <linux/shmem_fs.h> 45 46 #include <drm/drmP.h> 47 #include <drm/intel-gtt.h> 48 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 49 #include <drm/drm_gem.h> 50 51 #include "i915_params.h" 52 #include "i915_reg.h" 53 54 #include "intel_bios.h" 55 #include "intel_dpll_mgr.h" 56 #include "intel_guc.h" 57 #include "intel_lrc.h" 58 #include "intel_ringbuffer.h" 59 60 #include "i915_gem.h" 61 #include "i915_gem_gtt.h" 62 #include "i915_gem_render_state.h" 63 64 /* General customization: 65 */ 66 67 #define DRIVER_NAME "i915" 68 #define DRIVER_DESC "Intel Graphics" 69 #define DRIVER_DATE "20160425" 70 71 #undef WARN_ON 72 /* Many gcc seem to no see through this and fall over :( */ 73 #if 0 74 #define WARN_ON(x) ({ \ 75 bool __i915_warn_cond = (x); \ 76 if (__builtin_constant_p(__i915_warn_cond)) \ 77 BUILD_BUG_ON(__i915_warn_cond); \ 78 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 79 #else 80 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 81 #endif 82 83 #undef WARN_ON_ONCE 84 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 85 86 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 87 (long) (x), __func__); 88 89 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 91 * which may not necessarily be a user visible problem. This will either 92 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 93 * enable distros and users to tailor their preferred amount of i915 abrt 94 * spam. 95 */ 96 #define I915_STATE_WARN(condition, format...) ({ \ 97 int __ret_warn_on = !!(condition); \ 98 if (unlikely(__ret_warn_on)) \ 99 if (!WARN(i915.verbose_state_checks, format)) \ 100 DRM_ERROR(format); \ 101 unlikely(__ret_warn_on); \ 102 }) 103 104 #define I915_STATE_WARN_ON(x) \ 105 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 106 107 bool __i915_inject_load_failure(const char *func, int line); 108 #define i915_inject_load_failure() \ 109 __i915_inject_load_failure(__func__, __LINE__) 110 111 static inline const char *yesno(bool v) 112 { 113 return v ? "yes" : "no"; 114 } 115 116 static inline const char *onoff(bool v) 117 { 118 return v ? "on" : "off"; 119 } 120 121 enum i915_pipe { 122 INVALID_PIPE = -1, 123 PIPE_A = 0, 124 PIPE_B, 125 PIPE_C, 126 _PIPE_EDP, 127 I915_MAX_PIPES = _PIPE_EDP 128 }; 129 #define pipe_name(p) ((p) + 'A') 130 131 enum transcoder { 132 TRANSCODER_A = 0, 133 TRANSCODER_B, 134 TRANSCODER_C, 135 TRANSCODER_EDP, 136 TRANSCODER_DSI_A, 137 TRANSCODER_DSI_C, 138 I915_MAX_TRANSCODERS 139 }; 140 141 static inline const char *transcoder_name(enum transcoder transcoder) 142 { 143 switch (transcoder) { 144 case TRANSCODER_A: 145 return "A"; 146 case TRANSCODER_B: 147 return "B"; 148 case TRANSCODER_C: 149 return "C"; 150 case TRANSCODER_EDP: 151 return "EDP"; 152 case TRANSCODER_DSI_A: 153 return "DSI A"; 154 case TRANSCODER_DSI_C: 155 return "DSI C"; 156 default: 157 return "<invalid>"; 158 } 159 } 160 161 static inline bool transcoder_is_dsi(enum transcoder transcoder) 162 { 163 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; 164 } 165 166 /* 167 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 168 * number of planes per CRTC. Not all platforms really have this many planes, 169 * which means some arrays of size I915_MAX_PLANES may have unused entries 170 * between the topmost sprite plane and the cursor plane. 171 */ 172 enum plane { 173 PLANE_A = 0, 174 PLANE_B, 175 PLANE_C, 176 PLANE_CURSOR, 177 I915_MAX_PLANES, 178 }; 179 #define plane_name(p) ((p) + 'A') 180 181 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 182 183 enum port { 184 PORT_A = 0, 185 PORT_B, 186 PORT_C, 187 PORT_D, 188 PORT_E, 189 I915_MAX_PORTS 190 }; 191 #define port_name(p) ((p) + 'A') 192 193 #define I915_NUM_PHYS_VLV 2 194 195 enum dpio_channel { 196 DPIO_CH0, 197 DPIO_CH1 198 }; 199 200 enum dpio_phy { 201 DPIO_PHY0, 202 DPIO_PHY1 203 }; 204 205 enum intel_display_power_domain { 206 POWER_DOMAIN_PIPE_A, 207 POWER_DOMAIN_PIPE_B, 208 POWER_DOMAIN_PIPE_C, 209 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 210 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 211 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 212 POWER_DOMAIN_TRANSCODER_A, 213 POWER_DOMAIN_TRANSCODER_B, 214 POWER_DOMAIN_TRANSCODER_C, 215 POWER_DOMAIN_TRANSCODER_EDP, 216 POWER_DOMAIN_TRANSCODER_DSI_A, 217 POWER_DOMAIN_TRANSCODER_DSI_C, 218 POWER_DOMAIN_PORT_DDI_A_LANES, 219 POWER_DOMAIN_PORT_DDI_B_LANES, 220 POWER_DOMAIN_PORT_DDI_C_LANES, 221 POWER_DOMAIN_PORT_DDI_D_LANES, 222 POWER_DOMAIN_PORT_DDI_E_LANES, 223 POWER_DOMAIN_PORT_DSI, 224 POWER_DOMAIN_PORT_CRT, 225 POWER_DOMAIN_PORT_OTHER, 226 POWER_DOMAIN_VGA, 227 POWER_DOMAIN_AUDIO, 228 POWER_DOMAIN_PLLS, 229 POWER_DOMAIN_AUX_A, 230 POWER_DOMAIN_AUX_B, 231 POWER_DOMAIN_AUX_C, 232 POWER_DOMAIN_AUX_D, 233 POWER_DOMAIN_GMBUS, 234 POWER_DOMAIN_MODESET, 235 POWER_DOMAIN_INIT, 236 237 POWER_DOMAIN_NUM, 238 }; 239 240 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 241 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 242 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 243 #define POWER_DOMAIN_TRANSCODER(tran) \ 244 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 245 (tran) + POWER_DOMAIN_TRANSCODER_A) 246 247 enum hpd_pin { 248 HPD_NONE = 0, 249 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 250 HPD_CRT, 251 HPD_SDVO_B, 252 HPD_SDVO_C, 253 HPD_PORT_A, 254 HPD_PORT_B, 255 HPD_PORT_C, 256 HPD_PORT_D, 257 HPD_PORT_E, 258 HPD_NUM_PINS 259 }; 260 261 #define for_each_hpd_pin(__pin) \ 262 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 263 264 struct i915_hotplug { 265 struct work_struct hotplug_work; 266 267 struct { 268 unsigned long last_jiffies; 269 int count; 270 enum { 271 HPD_ENABLED = 0, 272 HPD_DISABLED = 1, 273 HPD_MARK_DISABLED = 2 274 } state; 275 } stats[HPD_NUM_PINS]; 276 u32 event_bits; 277 struct delayed_work reenable_work; 278 279 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 280 u32 long_port_mask; 281 u32 short_port_mask; 282 struct work_struct dig_port_work; 283 284 struct work_struct poll_init_work; 285 bool poll_enabled; 286 287 /* 288 * if we get a HPD irq from DP and a HPD irq from non-DP 289 * the non-DP HPD could block the workqueue on a mode config 290 * mutex getting, that userspace may have taken. However 291 * userspace is waiting on the DP workqueue to run which is 292 * blocked behind the non-DP one. 293 */ 294 struct workqueue_struct *dp_wq; 295 }; 296 297 #define I915_GEM_GPU_DOMAINS \ 298 (I915_GEM_DOMAIN_RENDER | \ 299 I915_GEM_DOMAIN_SAMPLER | \ 300 I915_GEM_DOMAIN_COMMAND | \ 301 I915_GEM_DOMAIN_INSTRUCTION | \ 302 I915_GEM_DOMAIN_VERTEX) 303 304 #define for_each_pipe(__dev_priv, __p) \ 305 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 306 #define for_each_pipe_masked(__dev_priv, __p, __mask) \ 307 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ 308 for_each_if ((__mask) & (1 << (__p))) 309 #define for_each_plane(__dev_priv, __pipe, __p) \ 310 for ((__p) = 0; \ 311 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 312 (__p)++) 313 #define for_each_sprite(__dev_priv, __p, __s) \ 314 for ((__s) = 0; \ 315 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 316 (__s)++) 317 318 #define for_each_port_masked(__port, __ports_mask) \ 319 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 320 for_each_if ((__ports_mask) & (1 << (__port))) 321 322 #define for_each_crtc(dev, crtc) \ 323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 324 325 #define for_each_intel_plane(dev, intel_plane) \ 326 list_for_each_entry(intel_plane, \ 327 &dev->mode_config.plane_list, \ 328 base.head) 329 330 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 331 list_for_each_entry(intel_plane, \ 332 &(dev)->mode_config.plane_list, \ 333 base.head) \ 334 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 335 336 #define for_each_intel_crtc(dev, intel_crtc) \ 337 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 338 339 #define for_each_intel_encoder(dev, intel_encoder) \ 340 list_for_each_entry(intel_encoder, \ 341 &(dev)->mode_config.encoder_list, \ 342 base.head) 343 344 #define for_each_intel_connector(dev, intel_connector) \ 345 list_for_each_entry(intel_connector, \ 346 &dev->mode_config.connector_list, \ 347 base.head) 348 349 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 350 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 351 for_each_if ((intel_encoder)->base.crtc == (__crtc)) 352 353 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 354 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 355 for_each_if ((intel_connector)->base.encoder == (__encoder)) 356 357 #define for_each_power_domain(domain, mask) \ 358 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 359 for_each_if ((1 << (domain)) & (mask)) 360 361 struct drm_i915_private; 362 struct i915_mm_struct; 363 struct i915_mmu_object; 364 365 struct drm_i915_file_private { 366 struct drm_i915_private *dev_priv; 367 struct drm_file *file; 368 369 struct { 370 struct spinlock lock; 371 struct list_head request_list; 372 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 373 * chosen to prevent the CPU getting more than a frame ahead of the GPU 374 * (when using lax throttling for the frontbuffer). We also use it to 375 * offer free GPU waitboosts for severely congested workloads. 376 */ 377 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 378 } mm; 379 struct idr context_idr; 380 381 struct intel_rps_client { 382 struct list_head link; 383 unsigned boosts; 384 } rps; 385 386 unsigned int bsd_ring; 387 }; 388 389 /* Used by dp and fdi links */ 390 struct intel_link_m_n { 391 uint32_t tu; 392 uint32_t gmch_m; 393 uint32_t gmch_n; 394 uint32_t link_m; 395 uint32_t link_n; 396 }; 397 398 void intel_link_compute_m_n(int bpp, int nlanes, 399 int pixel_clock, int link_clock, 400 struct intel_link_m_n *m_n); 401 402 /* Interface history: 403 * 404 * 1.1: Original. 405 * 1.2: Add Power Management 406 * 1.3: Add vblank support 407 * 1.4: Fix cmdbuffer path, add heap destroy 408 * 1.5: Add vblank pipe configuration 409 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 410 * - Support vertical blank on secondary display pipe 411 */ 412 #define DRIVER_MAJOR 1 413 #define DRIVER_MINOR 6 414 #define DRIVER_PATCHLEVEL 0 415 416 #define WATCH_LISTS 0 417 418 struct opregion_header; 419 struct opregion_acpi; 420 struct opregion_swsci; 421 struct opregion_asle; 422 423 struct intel_opregion { 424 struct opregion_header *header; 425 struct opregion_acpi *acpi; 426 struct opregion_swsci *swsci; 427 u32 swsci_gbda_sub_functions; 428 u32 swsci_sbcb_sub_functions; 429 struct opregion_asle *asle; 430 void *rvda; 431 const void *vbt; 432 u32 vbt_size; 433 u32 *lid_state; 434 struct work_struct asle_work; 435 }; 436 #define OPREGION_SIZE (8*1024) 437 438 struct intel_overlay; 439 struct intel_overlay_error_state; 440 441 #define I915_FENCE_REG_NONE -1 442 #define I915_MAX_NUM_FENCES 32 443 /* 32 fences + sign bit for FENCE_REG_NONE */ 444 #define I915_MAX_NUM_FENCE_BITS 6 445 446 struct drm_i915_fence_reg { 447 struct list_head lru_list; 448 struct drm_i915_gem_object *obj; 449 int pin_count; 450 }; 451 452 struct sdvo_device_mapping { 453 u8 initialized; 454 u8 dvo_port; 455 u8 slave_addr; 456 u8 dvo_wiring; 457 u8 i2c_pin; 458 u8 ddc_pin; 459 }; 460 461 struct intel_display_error_state; 462 463 struct drm_i915_error_state { 464 struct kref ref; 465 struct timeval time; 466 467 char error_msg[128]; 468 int iommu; 469 u32 reset_count; 470 u32 suspend_count; 471 472 /* Generic register state */ 473 u32 eir; 474 u32 pgtbl_er; 475 u32 ier; 476 u32 gtier[4]; 477 u32 ccid; 478 u32 derrmr; 479 u32 forcewake; 480 u32 error; /* gen6+ */ 481 u32 err_int; /* gen7 */ 482 u32 fault_data0; /* gen8, gen9 */ 483 u32 fault_data1; /* gen8, gen9 */ 484 u32 done_reg; 485 u32 gac_eco; 486 u32 gam_ecochk; 487 u32 gab_ctl; 488 u32 gfx_mode; 489 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 490 u64 fence[I915_MAX_NUM_FENCES]; 491 struct intel_overlay_error_state *overlay; 492 struct intel_display_error_state *display; 493 struct drm_i915_error_object *semaphore_obj; 494 495 struct drm_i915_error_ring { 496 bool valid; 497 /* Software tracked state */ 498 bool waiting; 499 int hangcheck_score; 500 enum intel_ring_hangcheck_action hangcheck_action; 501 int num_requests; 502 503 /* our own tracking of ring head and tail */ 504 u32 cpu_ring_head; 505 u32 cpu_ring_tail; 506 507 u32 last_seqno; 508 u32 semaphore_seqno[I915_NUM_ENGINES - 1]; 509 510 /* Register state */ 511 u32 start; 512 u32 tail; 513 u32 head; 514 u32 ctl; 515 u32 hws; 516 u32 ipeir; 517 u32 ipehr; 518 u32 instdone; 519 u32 bbstate; 520 u32 instpm; 521 u32 instps; 522 u32 seqno; 523 u64 bbaddr; 524 u64 acthd; 525 u32 fault_reg; 526 u64 faddr; 527 u32 rc_psmi; /* sleep state */ 528 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 529 530 struct drm_i915_error_object { 531 int page_count; 532 u64 gtt_offset; 533 u32 *pages[0]; 534 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 535 536 struct drm_i915_error_object *wa_ctx; 537 538 struct drm_i915_error_request { 539 long jiffies; 540 u32 seqno; 541 u32 tail; 542 } *requests; 543 544 struct { 545 u32 gfx_mode; 546 union { 547 u64 pdp[4]; 548 u32 pp_dir_base; 549 }; 550 } vm_info; 551 552 pid_t pid; 553 char comm[TASK_COMM_LEN]; 554 } ring[I915_NUM_ENGINES]; 555 556 struct drm_i915_error_buffer { 557 u32 size; 558 u32 name; 559 u32 rseqno[I915_NUM_ENGINES], wseqno; 560 u64 gtt_offset; 561 u32 read_domains; 562 u32 write_domain; 563 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 564 s32 pinned:2; 565 u32 tiling:2; 566 u32 dirty:1; 567 u32 purgeable:1; 568 u32 userptr:1; 569 s32 ring:4; 570 u32 cache_level:3; 571 } **active_bo, **pinned_bo; 572 573 u32 *active_bo_count, *pinned_bo_count; 574 u32 vm_count; 575 }; 576 577 struct intel_connector; 578 struct intel_encoder; 579 struct intel_crtc_state; 580 struct intel_initial_plane_config; 581 struct intel_crtc; 582 struct intel_limit; 583 struct dpll; 584 585 struct drm_i915_display_funcs { 586 int (*get_display_clock_speed)(struct drm_device *dev); 587 int (*get_fifo_size)(struct drm_device *dev, int plane); 588 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 589 int (*compute_intermediate_wm)(struct drm_device *dev, 590 struct intel_crtc *intel_crtc, 591 struct intel_crtc_state *newstate); 592 void (*initial_watermarks)(struct intel_crtc_state *cstate); 593 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 594 void (*update_wm)(struct drm_crtc *crtc); 595 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 596 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 597 /* Returns the active state of the crtc, and if the crtc is active, 598 * fills out the pipe-config with the hw state. */ 599 bool (*get_pipe_config)(struct intel_crtc *, 600 struct intel_crtc_state *); 601 void (*get_initial_plane_config)(struct intel_crtc *, 602 struct intel_initial_plane_config *); 603 int (*crtc_compute_clock)(struct intel_crtc *crtc, 604 struct intel_crtc_state *crtc_state); 605 void (*crtc_enable)(struct drm_crtc *crtc); 606 void (*crtc_disable)(struct drm_crtc *crtc); 607 void (*audio_codec_enable)(struct drm_connector *connector, 608 struct intel_encoder *encoder, 609 const struct drm_display_mode *adjusted_mode); 610 void (*audio_codec_disable)(struct intel_encoder *encoder); 611 void (*fdi_link_train)(struct drm_crtc *crtc); 612 void (*init_clock_gating)(struct drm_device *dev); 613 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 614 struct drm_framebuffer *fb, 615 struct drm_i915_gem_object *obj, 616 struct drm_i915_gem_request *req, 617 uint32_t flags); 618 void (*hpd_irq_setup)(struct drm_device *dev); 619 /* clock updates for mode set */ 620 /* cursor updates */ 621 /* render clock increase/decrease */ 622 /* display clock increase/decrease */ 623 /* pll clock increase/decrease */ 624 625 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 626 void (*load_luts)(struct drm_crtc_state *crtc_state); 627 }; 628 629 enum forcewake_domain_id { 630 FW_DOMAIN_ID_RENDER = 0, 631 FW_DOMAIN_ID_BLITTER, 632 FW_DOMAIN_ID_MEDIA, 633 634 FW_DOMAIN_ID_COUNT 635 }; 636 637 enum forcewake_domains { 638 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 639 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 640 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 641 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 642 FORCEWAKE_BLITTER | 643 FORCEWAKE_MEDIA) 644 }; 645 646 #define FW_REG_READ (1) 647 #define FW_REG_WRITE (2) 648 649 enum forcewake_domains 650 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 651 i915_reg_t reg, unsigned int op); 652 653 struct intel_uncore_funcs { 654 void (*force_wake_get)(struct drm_i915_private *dev_priv, 655 enum forcewake_domains domains); 656 void (*force_wake_put)(struct drm_i915_private *dev_priv, 657 enum forcewake_domains domains); 658 659 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 660 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 661 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 662 u64 (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 663 664 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, 665 uint8_t val, bool trace); 666 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, 667 uint16_t val, bool trace); 668 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, 669 uint32_t val, bool trace); 670 void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r, 671 u64 val, bool trace); 672 }; 673 674 struct intel_uncore { 675 struct lock lock; /** lock is also taken in irq contexts. */ 676 677 struct intel_uncore_funcs funcs; 678 679 unsigned fifo_count; 680 enum forcewake_domains fw_domains; 681 682 struct intel_uncore_forcewake_domain { 683 struct drm_i915_private *i915; 684 enum forcewake_domain_id id; 685 enum forcewake_domains mask; 686 unsigned wake_count; 687 struct hrtimer timer; 688 i915_reg_t reg_set; 689 u32 val_set; 690 u32 val_clear; 691 i915_reg_t reg_ack; 692 i915_reg_t reg_post; 693 u32 val_reset; 694 } fw_domain[FW_DOMAIN_ID_COUNT]; 695 696 int unclaimed_mmio_check; 697 }; 698 699 /* Iterate over initialised fw domains */ 700 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ 701 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 702 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ 703 (domain__)++) \ 704 for_each_if ((mask__) & (domain__)->mask) 705 706 #define for_each_fw_domain(domain__, dev_priv__) \ 707 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) 708 709 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 710 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 711 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 712 713 struct intel_csr { 714 struct work_struct work; 715 const char *fw_path; 716 uint32_t *dmc_payload; 717 uint32_t dmc_fw_size; 718 uint32_t version; 719 uint32_t mmio_count; 720 i915_reg_t mmioaddr[8]; 721 uint32_t mmiodata[8]; 722 uint32_t dc_state; 723 uint32_t allowed_dc_mask; 724 }; 725 726 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 727 func(is_mobile) sep \ 728 func(is_i85x) sep \ 729 func(is_i915g) sep \ 730 func(is_i945gm) sep \ 731 func(is_g33) sep \ 732 func(need_gfx_hws) sep \ 733 func(is_g4x) sep \ 734 func(is_pineview) sep \ 735 func(is_broadwater) sep \ 736 func(is_crestline) sep \ 737 func(is_ivybridge) sep \ 738 func(is_valleyview) sep \ 739 func(is_cherryview) sep \ 740 func(is_haswell) sep \ 741 func(is_skylake) sep \ 742 func(is_broxton) sep \ 743 func(is_kabylake) sep \ 744 func(is_preliminary) sep \ 745 func(has_fbc) sep \ 746 func(has_pipe_cxsr) sep \ 747 func(has_hotplug) sep \ 748 func(cursor_needs_physical) sep \ 749 func(has_overlay) sep \ 750 func(overlay_needs_physical) sep \ 751 func(supports_tv) sep \ 752 func(has_llc) sep \ 753 func(has_snoop) sep \ 754 func(has_ddi) sep \ 755 func(has_fpga_dbg) 756 757 #define DEFINE_FLAG(name) u8 name:1 758 #define SEP_SEMICOLON ; 759 760 struct intel_device_info { 761 u32 display_mmio_offset; 762 u16 device_id; 763 u8 num_pipes:3; 764 u8 num_sprites[I915_MAX_PIPES]; 765 u8 gen; 766 u8 ring_mask; /* Rings supported by the HW */ 767 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 768 /* Register offsets for the various display pipes and transcoders */ 769 int pipe_offsets[I915_MAX_TRANSCODERS]; 770 int trans_offsets[I915_MAX_TRANSCODERS]; 771 int palette_offsets[I915_MAX_PIPES]; 772 int cursor_offsets[I915_MAX_PIPES]; 773 774 /* Slice/subslice/EU info */ 775 u8 slice_total; 776 u8 subslice_total; 777 u8 subslice_per_slice; 778 u8 eu_total; 779 u8 eu_per_subslice; 780 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 781 u8 subslice_7eu[3]; 782 u8 has_slice_pg:1; 783 u8 has_subslice_pg:1; 784 u8 has_eu_pg:1; 785 786 struct color_luts { 787 u16 degamma_lut_size; 788 u16 gamma_lut_size; 789 } color; 790 }; 791 792 #undef DEFINE_FLAG 793 #undef SEP_SEMICOLON 794 795 enum i915_cache_level { 796 I915_CACHE_NONE = 0, 797 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 798 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 799 caches, eg sampler/render caches, and the 800 large Last-Level-Cache. LLC is coherent with 801 the CPU, but L3 is only visible to the GPU. */ 802 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 803 }; 804 805 struct i915_ctx_hang_stats { 806 /* This context had batch pending when hang was declared */ 807 unsigned batch_pending; 808 809 /* This context had batch active when hang was declared */ 810 unsigned batch_active; 811 812 /* Time when this context was last blamed for a GPU reset */ 813 unsigned long guilty_ts; 814 815 /* If the contexts causes a second GPU hang within this time, 816 * it is permanently banned from submitting any more work. 817 */ 818 unsigned long ban_period_seconds; 819 820 /* This context is banned to submit more work */ 821 bool banned; 822 }; 823 824 /* This must match up with the value previously used for execbuf2.rsvd1. */ 825 #define DEFAULT_CONTEXT_HANDLE 0 826 827 #define CONTEXT_NO_ZEROMAP (1<<0) 828 /** 829 * struct intel_context - as the name implies, represents a context. 830 * @ref: reference count. 831 * @user_handle: userspace tracking identity for this context. 832 * @remap_slice: l3 row remapping information. 833 * @flags: context specific flags: 834 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 835 * @file_priv: filp associated with this context (NULL for global default 836 * context). 837 * @hang_stats: information about the role of this context in possible GPU 838 * hangs. 839 * @ppgtt: virtual memory space used by this context. 840 * @legacy_hw_ctx: render context backing object and whether it is correctly 841 * initialized (legacy ring submission mechanism only). 842 * @link: link in the global list of contexts. 843 * 844 * Contexts are memory images used by the hardware to store copies of their 845 * internal state. 846 */ 847 struct intel_context { 848 struct kref ref; 849 int user_handle; 850 uint8_t remap_slice; 851 struct drm_i915_private *i915; 852 int flags; 853 struct drm_i915_file_private *file_priv; 854 struct i915_ctx_hang_stats hang_stats; 855 struct i915_hw_ppgtt *ppgtt; 856 857 /* Legacy ring buffer submission */ 858 struct { 859 struct drm_i915_gem_object *rcs_state; 860 bool initialized; 861 } legacy_hw_ctx; 862 863 /* Execlists */ 864 struct { 865 struct drm_i915_gem_object *state; 866 struct intel_ringbuffer *ringbuf; 867 int pin_count; 868 struct i915_vma *lrc_vma; 869 u64 lrc_desc; 870 uint32_t *lrc_reg_state; 871 } engine[I915_NUM_ENGINES]; 872 873 struct list_head link; 874 }; 875 876 enum fb_op_origin { 877 ORIGIN_GTT, 878 ORIGIN_CPU, 879 ORIGIN_CS, 880 ORIGIN_FLIP, 881 ORIGIN_DIRTYFB, 882 }; 883 884 struct intel_fbc { 885 /* This is always the inner lock when overlapping with struct_mutex and 886 * it's the outer lock when overlapping with stolen_lock. */ 887 struct lock lock; 888 unsigned threshold; 889 unsigned int possible_framebuffer_bits; 890 unsigned int busy_bits; 891 unsigned int visible_pipes_mask; 892 struct intel_crtc *crtc; 893 894 struct drm_mm_node compressed_fb; 895 struct drm_mm_node *compressed_llb; 896 897 bool false_color; 898 899 bool enabled; 900 bool active; 901 902 struct intel_fbc_state_cache { 903 struct { 904 unsigned int mode_flags; 905 uint32_t hsw_bdw_pixel_rate; 906 } crtc; 907 908 struct { 909 unsigned int rotation; 910 int src_w; 911 int src_h; 912 bool visible; 913 } plane; 914 915 struct { 916 u64 ilk_ggtt_offset; 917 uint32_t pixel_format; 918 unsigned int stride; 919 int fence_reg; 920 unsigned int tiling_mode; 921 } fb; 922 } state_cache; 923 924 struct intel_fbc_reg_params { 925 struct { 926 enum i915_pipe pipe; 927 enum plane plane; 928 unsigned int fence_y_offset; 929 } crtc; 930 931 struct { 932 u64 ggtt_offset; 933 uint32_t pixel_format; 934 unsigned int stride; 935 int fence_reg; 936 } fb; 937 938 int cfb_size; 939 } params; 940 941 struct intel_fbc_work { 942 bool scheduled; 943 u32 scheduled_vblank; 944 struct work_struct work; 945 } work; 946 947 const char *no_fbc_reason; 948 }; 949 950 /** 951 * HIGH_RR is the highest eDP panel refresh rate read from EDID 952 * LOW_RR is the lowest eDP panel refresh rate found from EDID 953 * parsing for same resolution. 954 */ 955 enum drrs_refresh_rate_type { 956 DRRS_HIGH_RR, 957 DRRS_LOW_RR, 958 DRRS_MAX_RR, /* RR count */ 959 }; 960 961 enum drrs_support_type { 962 DRRS_NOT_SUPPORTED = 0, 963 STATIC_DRRS_SUPPORT = 1, 964 SEAMLESS_DRRS_SUPPORT = 2 965 }; 966 967 struct intel_dp; 968 struct i915_drrs { 969 struct lock mutex; 970 struct delayed_work work; 971 struct intel_dp *dp; 972 unsigned busy_frontbuffer_bits; 973 enum drrs_refresh_rate_type refresh_rate_type; 974 enum drrs_support_type type; 975 }; 976 977 struct i915_psr { 978 struct lock lock; 979 bool sink_support; 980 bool source_ok; 981 struct intel_dp *enabled; 982 bool active; 983 struct delayed_work work; 984 unsigned busy_frontbuffer_bits; 985 bool psr2_support; 986 bool aux_frame_sync; 987 bool link_standby; 988 }; 989 990 enum intel_pch { 991 PCH_NONE = 0, /* No PCH present */ 992 PCH_IBX, /* Ibexpeak PCH */ 993 PCH_CPT, /* Cougarpoint PCH */ 994 PCH_LPT, /* Lynxpoint PCH */ 995 PCH_SPT, /* Sunrisepoint PCH */ 996 PCH_KBP, /* Kabypoint PCH */ 997 PCH_NOP, 998 }; 999 1000 enum intel_sbi_destination { 1001 SBI_ICLK, 1002 SBI_MPHY, 1003 }; 1004 1005 #define QUIRK_PIPEA_FORCE (1<<0) 1006 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1007 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1008 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1009 #define QUIRK_PIPEB_FORCE (1<<4) 1010 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1011 1012 struct intel_fbdev; 1013 struct intel_fbc_work; 1014 1015 struct intel_gmbus { 1016 struct i2c_adapter adapter; 1017 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 1018 u32 force_bit; 1019 u32 reg0; 1020 i915_reg_t gpio_reg; 1021 struct i2c_algo_bit_data bit_algo; 1022 struct drm_i915_private *dev_priv; 1023 }; 1024 1025 struct i915_suspend_saved_registers { 1026 u32 saveDSPARB; 1027 u32 saveLVDS; 1028 u32 savePP_ON_DELAYS; 1029 u32 savePP_OFF_DELAYS; 1030 u32 savePP_ON; 1031 u32 savePP_OFF; 1032 u32 savePP_CONTROL; 1033 u32 savePP_DIVISOR; 1034 u32 saveFBC_CONTROL; 1035 u32 saveCACHE_MODE_0; 1036 u32 saveMI_ARB_STATE; 1037 u32 saveSWF0[16]; 1038 u32 saveSWF1[16]; 1039 u32 saveSWF3[3]; 1040 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1041 u32 savePCH_PORT_HOTPLUG; 1042 u16 saveGCDGMBUS; 1043 }; 1044 1045 struct vlv_s0ix_state { 1046 /* GAM */ 1047 u32 wr_watermark; 1048 u32 gfx_prio_ctrl; 1049 u32 arb_mode; 1050 u32 gfx_pend_tlb0; 1051 u32 gfx_pend_tlb1; 1052 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1053 u32 media_max_req_count; 1054 u32 gfx_max_req_count; 1055 u32 render_hwsp; 1056 u32 ecochk; 1057 u32 bsd_hwsp; 1058 u32 blt_hwsp; 1059 u32 tlb_rd_addr; 1060 1061 /* MBC */ 1062 u32 g3dctl; 1063 u32 gsckgctl; 1064 u32 mbctl; 1065 1066 /* GCP */ 1067 u32 ucgctl1; 1068 u32 ucgctl3; 1069 u32 rcgctl1; 1070 u32 rcgctl2; 1071 u32 rstctl; 1072 u32 misccpctl; 1073 1074 /* GPM */ 1075 u32 gfxpause; 1076 u32 rpdeuhwtc; 1077 u32 rpdeuc; 1078 u32 ecobus; 1079 u32 pwrdwnupctl; 1080 u32 rp_down_timeout; 1081 u32 rp_deucsw; 1082 u32 rcubmabdtmr; 1083 u32 rcedata; 1084 u32 spare2gh; 1085 1086 /* Display 1 CZ domain */ 1087 u32 gt_imr; 1088 u32 gt_ier; 1089 u32 pm_imr; 1090 u32 pm_ier; 1091 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1092 1093 /* GT SA CZ domain */ 1094 u32 tilectl; 1095 u32 gt_fifoctl; 1096 u32 gtlc_wake_ctrl; 1097 u32 gtlc_survive; 1098 u32 pmwgicz; 1099 1100 /* Display 2 CZ domain */ 1101 u32 gu_ctl0; 1102 u32 gu_ctl1; 1103 u32 pcbr; 1104 u32 clock_gate_dis2; 1105 }; 1106 1107 struct intel_rps_ei { 1108 u32 cz_clock; 1109 u32 render_c0; 1110 u32 media_c0; 1111 }; 1112 1113 struct intel_gen6_power_mgmt { 1114 /* 1115 * work, interrupts_enabled and pm_iir are protected by 1116 * dev_priv->irq_lock 1117 */ 1118 struct work_struct work; 1119 bool interrupts_enabled; 1120 u32 pm_iir; 1121 1122 /* Frequencies are stored in potentially platform dependent multiples. 1123 * In other words, *_freq needs to be multiplied by X to be interesting. 1124 * Soft limits are those which are used for the dynamic reclocking done 1125 * by the driver (raise frequencies under heavy loads, and lower for 1126 * lighter loads). Hard limits are those imposed by the hardware. 1127 * 1128 * A distinction is made for overclocking, which is never enabled by 1129 * default, and is considered to be above the hard limit if it's 1130 * possible at all. 1131 */ 1132 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1133 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1134 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1135 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1136 u8 min_freq; /* AKA RPn. Minimum frequency */ 1137 u8 idle_freq; /* Frequency to request when we are idle */ 1138 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1139 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1140 u8 rp0_freq; /* Non-overclocked max frequency. */ 1141 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 1142 1143 u8 up_threshold; /* Current %busy required to uplock */ 1144 u8 down_threshold; /* Current %busy required to downclock */ 1145 1146 int last_adj; 1147 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1148 1149 struct lock client_lock; 1150 struct list_head clients; 1151 bool client_boost; 1152 1153 bool enabled; 1154 struct delayed_work delayed_resume_work; 1155 unsigned boosts; 1156 1157 struct intel_rps_client semaphores, mmioflips; 1158 1159 /* manual wa residency calculations */ 1160 struct intel_rps_ei up_ei, down_ei; 1161 1162 /* 1163 * Protects RPS/RC6 register access and PCU communication. 1164 * Must be taken after struct_mutex if nested. Note that 1165 * this lock may be held for long periods of time when 1166 * talking to hw - so only take it when talking to hw! 1167 */ 1168 struct lock hw_lock; 1169 }; 1170 1171 /* defined intel_pm.c */ 1172 extern struct lock mchdev_lock; 1173 1174 struct intel_ilk_power_mgmt { 1175 u8 cur_delay; 1176 u8 min_delay; 1177 u8 max_delay; 1178 u8 fmax; 1179 u8 fstart; 1180 1181 u64 last_count1; 1182 unsigned long last_time1; 1183 unsigned long chipset_power; 1184 u64 last_count2; 1185 u64 last_time2; 1186 unsigned long gfx_power; 1187 u8 corr; 1188 1189 int c_m; 1190 int r_t; 1191 }; 1192 1193 struct drm_i915_private; 1194 struct i915_power_well; 1195 1196 struct i915_power_well_ops { 1197 /* 1198 * Synchronize the well's hw state to match the current sw state, for 1199 * example enable/disable it based on the current refcount. Called 1200 * during driver init and resume time, possibly after first calling 1201 * the enable/disable handlers. 1202 */ 1203 void (*sync_hw)(struct drm_i915_private *dev_priv, 1204 struct i915_power_well *power_well); 1205 /* 1206 * Enable the well and resources that depend on it (for example 1207 * interrupts located on the well). Called after the 0->1 refcount 1208 * transition. 1209 */ 1210 void (*enable)(struct drm_i915_private *dev_priv, 1211 struct i915_power_well *power_well); 1212 /* 1213 * Disable the well and resources that depend on it. Called after 1214 * the 1->0 refcount transition. 1215 */ 1216 void (*disable)(struct drm_i915_private *dev_priv, 1217 struct i915_power_well *power_well); 1218 /* Returns the hw enabled state. */ 1219 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1220 struct i915_power_well *power_well); 1221 }; 1222 1223 /* Power well structure for haswell */ 1224 struct i915_power_well { 1225 const char *name; 1226 bool always_on; 1227 /* power well enable/disable usage count */ 1228 int count; 1229 /* cached hw enabled state */ 1230 bool hw_enabled; 1231 unsigned long domains; 1232 unsigned long data; 1233 const struct i915_power_well_ops *ops; 1234 }; 1235 1236 struct i915_power_domains { 1237 /* 1238 * Power wells needed for initialization at driver init and suspend 1239 * time are on. They are kept on until after the first modeset. 1240 */ 1241 bool init_power_on; 1242 bool initializing; 1243 int power_well_count; 1244 1245 struct lock lock; 1246 int domain_use_count[POWER_DOMAIN_NUM]; 1247 struct i915_power_well *power_wells; 1248 }; 1249 1250 #define MAX_L3_SLICES 2 1251 struct intel_l3_parity { 1252 u32 *remap_info[MAX_L3_SLICES]; 1253 struct work_struct error_work; 1254 int which_slice; 1255 }; 1256 1257 struct i915_gem_mm { 1258 /** Memory allocator for GTT stolen memory */ 1259 struct drm_mm stolen; 1260 /** Protects the usage of the GTT stolen memory allocator. This is 1261 * always the inner lock when overlapping with struct_mutex. */ 1262 struct lock stolen_lock; 1263 1264 /** List of all objects in gtt_space. Used to restore gtt 1265 * mappings on resume */ 1266 struct list_head bound_list; 1267 /** 1268 * List of objects which are not bound to the GTT (thus 1269 * are idle and not used by the GPU) but still have 1270 * (presumably uncached) pages still attached. 1271 */ 1272 struct list_head unbound_list; 1273 1274 /** Usable portion of the GTT for GEM */ 1275 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1276 1277 /** PPGTT used for aliasing the PPGTT with the GTT */ 1278 struct i915_hw_ppgtt *aliasing_ppgtt; 1279 1280 struct notifier_block oom_notifier; 1281 struct notifier_block vmap_notifier; 1282 #if 0 1283 struct shrinker shrinker; 1284 #endif 1285 bool shrinker_no_lock_stealing; 1286 1287 /** LRU list of objects with fence regs on them. */ 1288 struct list_head fence_list; 1289 1290 /** 1291 * We leave the user IRQ off as much as possible, 1292 * but this means that requests will finish and never 1293 * be retired once the system goes idle. Set a timer to 1294 * fire periodically while the ring is running. When it 1295 * fires, go retire requests. 1296 */ 1297 struct delayed_work retire_work; 1298 1299 /** 1300 * When we detect an idle GPU, we want to turn on 1301 * powersaving features. So once we see that there 1302 * are no more requests outstanding and no more 1303 * arrive within a small period of time, we fire 1304 * off the idle_work. 1305 */ 1306 struct delayed_work idle_work; 1307 1308 /** 1309 * Are we in a non-interruptible section of code like 1310 * modesetting? 1311 */ 1312 bool interruptible; 1313 1314 /** 1315 * Is the GPU currently considered idle, or busy executing userspace 1316 * requests? Whilst idle, we attempt to power down the hardware and 1317 * display clocks. In order to reduce the effect on performance, there 1318 * is a slight delay before we do so. 1319 */ 1320 bool busy; 1321 1322 /* the indicator for dispatch video commands on two BSD rings */ 1323 unsigned int bsd_ring_dispatch_index; 1324 1325 /** Bit 6 swizzling required for X tiling */ 1326 uint32_t bit_6_swizzle_x; 1327 /** Bit 6 swizzling required for Y tiling */ 1328 uint32_t bit_6_swizzle_y; 1329 1330 /* accounting, useful for userland debugging */ 1331 struct spinlock object_stat_lock; 1332 size_t object_memory; 1333 u32 object_count; 1334 }; 1335 1336 struct drm_i915_error_state_buf { 1337 struct drm_i915_private *i915; 1338 unsigned bytes; 1339 unsigned size; 1340 int err; 1341 u8 *buf; 1342 loff_t start; 1343 loff_t pos; 1344 }; 1345 1346 struct i915_error_state_file_priv { 1347 struct drm_device *dev; 1348 struct drm_i915_error_state *error; 1349 }; 1350 1351 struct i915_gpu_error { 1352 /* For hangcheck timer */ 1353 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1354 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1355 /* Hang gpu twice in this window and your context gets banned */ 1356 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1357 1358 struct workqueue_struct *hangcheck_wq; 1359 struct delayed_work hangcheck_work; 1360 1361 /* For reset and error_state handling. */ 1362 struct lock lock; 1363 /* Protected by the above dev->gpu_error.lock. */ 1364 struct drm_i915_error_state *first_error; 1365 1366 unsigned long missed_irq_rings; 1367 1368 /** 1369 * State variable controlling the reset flow and count 1370 * 1371 * This is a counter which gets incremented when reset is triggered, 1372 * and again when reset has been handled. So odd values (lowest bit set) 1373 * means that reset is in progress and even values that 1374 * (reset_counter >> 1):th reset was successfully completed. 1375 * 1376 * If reset is not completed succesfully, the I915_WEDGE bit is 1377 * set meaning that hardware is terminally sour and there is no 1378 * recovery. All waiters on the reset_queue will be woken when 1379 * that happens. 1380 * 1381 * This counter is used by the wait_seqno code to notice that reset 1382 * event happened and it needs to restart the entire ioctl (since most 1383 * likely the seqno it waited for won't ever signal anytime soon). 1384 * 1385 * This is important for lock-free wait paths, where no contended lock 1386 * naturally enforces the correct ordering between the bail-out of the 1387 * waiter and the gpu reset work code. 1388 */ 1389 atomic_t reset_counter; 1390 1391 #define I915_RESET_IN_PROGRESS_FLAG 1 1392 #define I915_WEDGED (1 << 31) 1393 1394 /** 1395 * Waitqueue to signal when the reset has completed. Used by clients 1396 * that wait for dev_priv->mm.wedged to settle. 1397 */ 1398 wait_queue_head_t reset_queue; 1399 1400 /* Userspace knobs for gpu hang simulation; 1401 * combines both a ring mask, and extra flags 1402 */ 1403 u32 stop_rings; 1404 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1405 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1406 1407 /* For missed irq/seqno simulation. */ 1408 unsigned int test_irq_rings; 1409 }; 1410 1411 enum modeset_restore { 1412 MODESET_ON_LID_OPEN, 1413 MODESET_DONE, 1414 MODESET_SUSPENDED, 1415 }; 1416 1417 #define DP_AUX_A 0x40 1418 #define DP_AUX_B 0x10 1419 #define DP_AUX_C 0x20 1420 #define DP_AUX_D 0x30 1421 1422 #define DDC_PIN_B 0x05 1423 #define DDC_PIN_C 0x04 1424 #define DDC_PIN_D 0x06 1425 1426 struct ddi_vbt_port_info { 1427 /* 1428 * This is an index in the HDMI/DVI DDI buffer translation table. 1429 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1430 * populate this field. 1431 */ 1432 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1433 uint8_t hdmi_level_shift; 1434 1435 uint8_t supports_dvi:1; 1436 uint8_t supports_hdmi:1; 1437 uint8_t supports_dp:1; 1438 1439 uint8_t alternate_aux_channel; 1440 uint8_t alternate_ddc_pin; 1441 1442 uint8_t dp_boost_level; 1443 uint8_t hdmi_boost_level; 1444 }; 1445 1446 enum psr_lines_to_wait { 1447 PSR_0_LINES_TO_WAIT = 0, 1448 PSR_1_LINE_TO_WAIT, 1449 PSR_4_LINES_TO_WAIT, 1450 PSR_8_LINES_TO_WAIT 1451 }; 1452 1453 struct intel_vbt_data { 1454 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1455 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1456 1457 /* Feature bits */ 1458 unsigned int int_tv_support:1; 1459 unsigned int lvds_dither:1; 1460 unsigned int lvds_vbt:1; 1461 unsigned int int_crt_support:1; 1462 unsigned int lvds_use_ssc:1; 1463 unsigned int display_clock_mode:1; 1464 unsigned int fdi_rx_polarity_inverted:1; 1465 unsigned int panel_type:4; 1466 int lvds_ssc_freq; 1467 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1468 1469 enum drrs_support_type drrs_type; 1470 1471 struct { 1472 int rate; 1473 int lanes; 1474 int preemphasis; 1475 int vswing; 1476 bool low_vswing; 1477 bool initialized; 1478 bool support; 1479 int bpp; 1480 struct edp_power_seq pps; 1481 } edp; 1482 1483 struct { 1484 bool full_link; 1485 bool require_aux_wakeup; 1486 int idle_frames; 1487 enum psr_lines_to_wait lines_to_wait; 1488 int tp1_wakeup_time; 1489 int tp2_tp3_wakeup_time; 1490 } psr; 1491 1492 struct { 1493 u16 pwm_freq_hz; 1494 bool present; 1495 bool active_low_pwm; 1496 u8 min_brightness; /* min_brightness/255 of max */ 1497 } backlight; 1498 1499 /* MIPI DSI */ 1500 struct { 1501 u16 panel_id; 1502 struct mipi_config *config; 1503 struct mipi_pps_data *pps; 1504 u8 seq_version; 1505 u32 size; 1506 u8 *data; 1507 const u8 *sequence[MIPI_SEQ_MAX]; 1508 } dsi; 1509 1510 int crt_ddc_pin; 1511 1512 int child_dev_num; 1513 union child_device_config *child_dev; 1514 1515 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1516 struct sdvo_device_mapping sdvo_mappings[2]; 1517 }; 1518 1519 enum intel_ddb_partitioning { 1520 INTEL_DDB_PART_1_2, 1521 INTEL_DDB_PART_5_6, /* IVB+ */ 1522 }; 1523 1524 struct intel_wm_level { 1525 bool enable; 1526 uint32_t pri_val; 1527 uint32_t spr_val; 1528 uint32_t cur_val; 1529 uint32_t fbc_val; 1530 }; 1531 1532 struct ilk_wm_values { 1533 uint32_t wm_pipe[3]; 1534 uint32_t wm_lp[3]; 1535 uint32_t wm_lp_spr[3]; 1536 uint32_t wm_linetime[3]; 1537 bool enable_fbc_wm; 1538 enum intel_ddb_partitioning partitioning; 1539 }; 1540 1541 struct vlv_pipe_wm { 1542 uint16_t primary; 1543 uint16_t sprite[2]; 1544 uint8_t cursor; 1545 }; 1546 1547 struct vlv_sr_wm { 1548 uint16_t plane; 1549 uint8_t cursor; 1550 }; 1551 1552 struct vlv_wm_values { 1553 struct vlv_pipe_wm pipe[3]; 1554 struct vlv_sr_wm sr; 1555 struct { 1556 uint8_t cursor; 1557 uint8_t sprite[2]; 1558 uint8_t primary; 1559 } ddl[3]; 1560 uint8_t level; 1561 bool cxsr; 1562 }; 1563 1564 struct skl_ddb_entry { 1565 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1566 }; 1567 1568 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1569 { 1570 return entry->end - entry->start; 1571 } 1572 1573 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1574 const struct skl_ddb_entry *e2) 1575 { 1576 if (e1->start == e2->start && e1->end == e2->end) 1577 return true; 1578 1579 return false; 1580 } 1581 1582 struct skl_ddb_allocation { 1583 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1584 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1585 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1586 }; 1587 1588 struct skl_wm_values { 1589 bool dirty[I915_MAX_PIPES]; 1590 struct skl_ddb_allocation ddb; 1591 uint32_t wm_linetime[I915_MAX_PIPES]; 1592 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1593 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1594 }; 1595 1596 struct skl_wm_level { 1597 bool plane_en[I915_MAX_PLANES]; 1598 uint16_t plane_res_b[I915_MAX_PLANES]; 1599 uint8_t plane_res_l[I915_MAX_PLANES]; 1600 }; 1601 1602 /* 1603 * This struct helps tracking the state needed for runtime PM, which puts the 1604 * device in PCI D3 state. Notice that when this happens, nothing on the 1605 * graphics device works, even register access, so we don't get interrupts nor 1606 * anything else. 1607 * 1608 * Every piece of our code that needs to actually touch the hardware needs to 1609 * either call intel_runtime_pm_get or call intel_display_power_get with the 1610 * appropriate power domain. 1611 * 1612 * Our driver uses the autosuspend delay feature, which means we'll only really 1613 * suspend if we stay with zero refcount for a certain amount of time. The 1614 * default value is currently very conservative (see intel_runtime_pm_enable), but 1615 * it can be changed with the standard runtime PM files from sysfs. 1616 * 1617 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1618 * goes back to false exactly before we reenable the IRQs. We use this variable 1619 * to check if someone is trying to enable/disable IRQs while they're supposed 1620 * to be disabled. This shouldn't happen and we'll print some error messages in 1621 * case it happens. 1622 * 1623 * For more, read the Documentation/power/runtime_pm.txt. 1624 */ 1625 struct i915_runtime_pm { 1626 atomic_t wakeref_count; 1627 atomic_t atomic_seq; 1628 bool suspended; 1629 bool irqs_enabled; 1630 }; 1631 1632 enum intel_pipe_crc_source { 1633 INTEL_PIPE_CRC_SOURCE_NONE, 1634 INTEL_PIPE_CRC_SOURCE_PLANE1, 1635 INTEL_PIPE_CRC_SOURCE_PLANE2, 1636 INTEL_PIPE_CRC_SOURCE_PF, 1637 INTEL_PIPE_CRC_SOURCE_PIPE, 1638 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1639 INTEL_PIPE_CRC_SOURCE_TV, 1640 INTEL_PIPE_CRC_SOURCE_DP_B, 1641 INTEL_PIPE_CRC_SOURCE_DP_C, 1642 INTEL_PIPE_CRC_SOURCE_DP_D, 1643 INTEL_PIPE_CRC_SOURCE_AUTO, 1644 INTEL_PIPE_CRC_SOURCE_MAX, 1645 }; 1646 1647 struct intel_pipe_crc_entry { 1648 uint32_t frame; 1649 uint32_t crc[5]; 1650 }; 1651 1652 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1653 struct intel_pipe_crc { 1654 struct spinlock lock; 1655 bool opened; /* exclusive access to the result file */ 1656 struct intel_pipe_crc_entry *entries; 1657 enum intel_pipe_crc_source source; 1658 int head, tail; 1659 wait_queue_head_t wq; 1660 }; 1661 1662 struct i915_frontbuffer_tracking { 1663 struct lock lock; 1664 1665 /* 1666 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1667 * scheduled flips. 1668 */ 1669 unsigned busy_bits; 1670 unsigned flip_bits; 1671 }; 1672 1673 struct i915_wa_reg { 1674 i915_reg_t addr; 1675 u32 value; 1676 /* bitmask representing WA bits */ 1677 u32 mask; 1678 }; 1679 1680 /* 1681 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only 1682 * allowing it for RCS as we don't foresee any requirement of having 1683 * a whitelist for other engines. When it is really required for 1684 * other engines then the limit need to be increased. 1685 */ 1686 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) 1687 1688 struct i915_workarounds { 1689 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1690 u32 count; 1691 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1692 }; 1693 1694 struct i915_virtual_gpu { 1695 bool active; 1696 }; 1697 1698 struct i915_execbuffer_params { 1699 struct drm_device *dev; 1700 struct drm_file *file; 1701 uint32_t dispatch_flags; 1702 uint32_t args_batch_start_offset; 1703 uint64_t batch_obj_vm_offset; 1704 struct intel_engine_cs *engine; 1705 struct drm_i915_gem_object *batch_obj; 1706 struct intel_context *ctx; 1707 struct drm_i915_gem_request *request; 1708 }; 1709 1710 /* used in computing the new watermarks state */ 1711 struct intel_wm_config { 1712 unsigned int num_pipes_active; 1713 bool sprites_enabled; 1714 bool sprites_scaled; 1715 }; 1716 1717 struct drm_i915_private { 1718 struct drm_device *dev; 1719 struct kmem_cache *objects; 1720 struct kmem_cache *vmas; 1721 struct kmem_cache *requests; 1722 1723 struct intel_device_info info; 1724 1725 int relative_constants_mode; 1726 1727 char __iomem *regs; 1728 1729 struct intel_uncore uncore; 1730 1731 struct i915_virtual_gpu vgpu; 1732 1733 struct intel_guc guc; 1734 1735 struct intel_csr csr; 1736 1737 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1738 1739 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1740 * controller on different i2c buses. */ 1741 struct lock gmbus_mutex; 1742 1743 /** 1744 * Base address of the gmbus and gpio block. 1745 */ 1746 uint32_t gpio_mmio_base; 1747 1748 /* MMIO base address for MIPI regs */ 1749 uint32_t mipi_mmio_base; 1750 1751 uint32_t psr_mmio_base; 1752 1753 wait_queue_head_t gmbus_wait_queue; 1754 1755 struct pci_dev *bridge_dev; 1756 struct intel_engine_cs engine[I915_NUM_ENGINES]; 1757 struct drm_i915_gem_object *semaphore_obj; 1758 uint32_t last_seqno, next_seqno; 1759 1760 struct drm_dma_handle *status_page_dmah; 1761 struct resource *mch_res; 1762 int mch_res_rid; 1763 1764 /* protects the irq masks */ 1765 struct lock irq_lock; 1766 1767 /* protects the mmio flip data */ 1768 struct spinlock mmio_flip_lock; 1769 1770 bool display_irqs_enabled; 1771 1772 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1773 struct pm_qos_request pm_qos; 1774 1775 /* Sideband mailbox protection */ 1776 struct lock sb_lock; 1777 1778 /** Cached value of IMR to avoid reads in updating the bitfield */ 1779 union { 1780 u32 irq_mask; 1781 u32 de_irq_mask[I915_MAX_PIPES]; 1782 }; 1783 u32 gt_irq_mask; 1784 u32 pm_irq_mask; 1785 u32 pm_rps_events; 1786 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1787 1788 struct i915_hotplug hotplug; 1789 struct intel_fbc fbc; 1790 struct i915_drrs drrs; 1791 struct intel_opregion opregion; 1792 struct intel_vbt_data vbt; 1793 1794 bool preserve_bios_swizzle; 1795 1796 /* overlay */ 1797 struct intel_overlay *overlay; 1798 1799 /* backlight registers and fields in struct intel_panel */ 1800 struct lock backlight_lock; 1801 1802 /* LVDS info */ 1803 bool no_aux_handshake; 1804 1805 /* protects panel power sequencer state */ 1806 struct lock pps_mutex; 1807 1808 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1809 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1810 1811 unsigned int fsb_freq, mem_freq, is_ddr3; 1812 unsigned int skl_boot_cdclk; 1813 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1814 unsigned int max_dotclk_freq; 1815 unsigned int rawclk_freq; 1816 unsigned int hpll_freq; 1817 unsigned int czclk_freq; 1818 1819 /** 1820 * wq - Driver workqueue for GEM. 1821 * 1822 * NOTE: Work items scheduled here are not allowed to grab any modeset 1823 * locks, for otherwise the flushing done in the pageflip code will 1824 * result in deadlocks. 1825 */ 1826 struct workqueue_struct *wq; 1827 1828 /* Display functions */ 1829 struct drm_i915_display_funcs display; 1830 1831 /* PCH chipset type */ 1832 enum intel_pch pch_type; 1833 unsigned short pch_id; 1834 1835 unsigned long quirks; 1836 1837 enum modeset_restore modeset_restore; 1838 struct lock modeset_restore_lock; 1839 struct drm_atomic_state *modeset_restore_state; 1840 1841 struct list_head vm_list; /* Global list of all address spaces */ 1842 struct i915_ggtt ggtt; /* VM representing the global address space */ 1843 1844 struct i915_gem_mm mm; 1845 DECLARE_HASHTABLE(mm_structs, 7); 1846 struct lock mm_lock; 1847 1848 /* Kernel Modesetting */ 1849 1850 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1851 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1852 wait_queue_head_t pending_flip_queue; 1853 1854 #ifdef CONFIG_DEBUG_FS 1855 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1856 #endif 1857 1858 /* dpll and cdclk state is protected by connection_mutex */ 1859 int num_shared_dpll; 1860 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1861 const struct intel_dpll_mgr *dpll_mgr; 1862 1863 /* 1864 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1865 * Must be global rather than per dpll, because on some platforms 1866 * plls share registers. 1867 */ 1868 struct lock dpll_lock; 1869 1870 unsigned int active_crtcs; 1871 unsigned int min_pixclk[I915_MAX_PIPES]; 1872 1873 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1874 1875 struct i915_workarounds workarounds; 1876 1877 struct i915_frontbuffer_tracking fb_tracking; 1878 1879 u16 orig_clock; 1880 1881 bool mchbar_need_disable; 1882 1883 struct intel_l3_parity l3_parity; 1884 1885 /* Cannot be determined by PCIID. You must always read a register. */ 1886 u32 edram_cap; 1887 1888 /* gen6+ rps state */ 1889 struct intel_gen6_power_mgmt rps; 1890 1891 /* ilk-only ips/rps state. Everything in here is protected by the global 1892 * mchdev_lock in intel_pm.c */ 1893 struct intel_ilk_power_mgmt ips; 1894 1895 struct i915_power_domains power_domains; 1896 1897 struct i915_psr psr; 1898 1899 struct i915_gpu_error gpu_error; 1900 1901 struct drm_i915_gem_object *vlv_pctx; 1902 1903 #ifdef CONFIG_DRM_FBDEV_EMULATION 1904 /* list of fbdev register on this device */ 1905 struct intel_fbdev *fbdev; 1906 struct work_struct fbdev_suspend_work; 1907 #endif 1908 1909 struct drm_property *broadcast_rgb_property; 1910 struct drm_property *force_audio_property; 1911 1912 /* hda/i915 audio component */ 1913 struct i915_audio_component *audio_component; 1914 bool audio_component_registered; 1915 /** 1916 * av_mutex - mutex for audio/video sync 1917 * 1918 */ 1919 struct lock av_mutex; 1920 1921 uint32_t hw_context_size; 1922 struct list_head context_list; 1923 1924 u32 fdi_rx_config; 1925 1926 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 1927 u32 chv_phy_control; 1928 /* 1929 * Shadows for CHV DPLL_MD regs to keep the state 1930 * checker somewhat working in the presence hardware 1931 * crappiness (can't read out DPLL_MD for pipes B & C). 1932 */ 1933 u32 chv_dpll_md[I915_MAX_PIPES]; 1934 u32 bxt_phy_grc; 1935 1936 u32 suspend_count; 1937 bool suspended_to_idle; 1938 struct i915_suspend_saved_registers regfile; 1939 struct vlv_s0ix_state vlv_s0ix_state; 1940 1941 struct { 1942 /* 1943 * Raw watermark latency values: 1944 * in 0.1us units for WM0, 1945 * in 0.5us units for WM1+. 1946 */ 1947 /* primary */ 1948 uint16_t pri_latency[5]; 1949 /* sprite */ 1950 uint16_t spr_latency[5]; 1951 /* cursor */ 1952 uint16_t cur_latency[5]; 1953 /* 1954 * Raw watermark memory latency values 1955 * for SKL for all 8 levels 1956 * in 1us units. 1957 */ 1958 uint16_t skl_latency[8]; 1959 1960 /* Committed wm config */ 1961 struct intel_wm_config config; 1962 1963 /* 1964 * The skl_wm_values structure is a bit too big for stack 1965 * allocation, so we keep the staging struct where we store 1966 * intermediate results here instead. 1967 */ 1968 struct skl_wm_values skl_results; 1969 1970 /* current hardware state */ 1971 union { 1972 struct ilk_wm_values hw; 1973 struct skl_wm_values skl_hw; 1974 struct vlv_wm_values vlv; 1975 }; 1976 1977 uint8_t max_level; 1978 1979 /* 1980 * Should be held around atomic WM register writing; also 1981 * protects * intel_crtc->wm.active and 1982 * cstate->wm.need_postvbl_update. 1983 */ 1984 struct lock wm_mutex; 1985 } wm; 1986 1987 struct i915_runtime_pm pm; 1988 1989 uint32_t bios_vgacntr; 1990 1991 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1992 struct { 1993 int (*execbuf_submit)(struct i915_execbuffer_params *params, 1994 struct drm_i915_gem_execbuffer2 *args, 1995 struct list_head *vmas); 1996 int (*init_engines)(struct drm_device *dev); 1997 void (*cleanup_engine)(struct intel_engine_cs *engine); 1998 void (*stop_engine)(struct intel_engine_cs *engine); 1999 } gt; 2000 2001 struct intel_context *kernel_context; 2002 2003 /* perform PHY state sanity checks? */ 2004 bool chv_phy_assert[2]; 2005 2006 struct intel_encoder *dig_port_map[I915_MAX_PORTS]; 2007 2008 /* 2009 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2010 * will be rejected. Instead look for a better place. 2011 */ 2012 }; 2013 2014 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2015 { 2016 return dev->dev_private; 2017 } 2018 2019 static inline struct drm_i915_private *dev_to_i915(struct device *dev) 2020 { 2021 return to_i915(device_get_softc(dev->bsddev)); 2022 } 2023 2024 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2025 { 2026 return container_of(guc, struct drm_i915_private, guc); 2027 } 2028 2029 /* Simple iterator over all initialised engines */ 2030 #define for_each_engine(engine__, dev_priv__) \ 2031 for ((engine__) = &(dev_priv__)->engine[0]; \ 2032 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2033 (engine__)++) \ 2034 for_each_if (intel_engine_initialized(engine__)) 2035 2036 /* Iterator with engine_id */ 2037 #define for_each_engine_id(engine__, dev_priv__, id__) \ 2038 for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \ 2039 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2040 (engine__)++) \ 2041 for_each_if (((id__) = (engine__)->id, \ 2042 intel_engine_initialized(engine__))) 2043 2044 /* Iterator over subset of engines selected by mask */ 2045 #define for_each_engine_masked(engine__, dev_priv__, mask__) \ 2046 for ((engine__) = &(dev_priv__)->engine[0]; \ 2047 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2048 (engine__)++) \ 2049 for_each_if (((mask__) & intel_engine_flag(engine__)) && \ 2050 intel_engine_initialized(engine__)) 2051 2052 enum hdmi_force_audio { 2053 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2054 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2055 HDMI_AUDIO_AUTO, /* trust EDID */ 2056 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2057 }; 2058 2059 #define I915_GTT_OFFSET_NONE ((u32)-1) 2060 2061 struct drm_i915_gem_object_ops { 2062 unsigned int flags; 2063 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 2064 2065 /* Interface between the GEM object and its backing storage. 2066 * get_pages() is called once prior to the use of the associated set 2067 * of pages before to binding them into the GTT, and put_pages() is 2068 * called after we no longer need them. As we expect there to be 2069 * associated cost with migrating pages between the backing storage 2070 * and making them available for the GPU (e.g. clflush), we may hold 2071 * onto the pages after they are no longer referenced by the GPU 2072 * in case they may be used again shortly (for example migrating the 2073 * pages to a different memory domain within the GTT). put_pages() 2074 * will therefore most likely be called when the object itself is 2075 * being released or under memory pressure (where we attempt to 2076 * reap pages for the shrinker). 2077 */ 2078 int (*get_pages)(struct drm_i915_gem_object *); 2079 void (*put_pages)(struct drm_i915_gem_object *); 2080 2081 int (*dmabuf_export)(struct drm_i915_gem_object *); 2082 void (*release)(struct drm_i915_gem_object *); 2083 }; 2084 2085 /* 2086 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2087 * considered to be the frontbuffer for the given plane interface-wise. This 2088 * doesn't mean that the hw necessarily already scans it out, but that any 2089 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2090 * 2091 * We have one bit per pipe and per scanout plane type. 2092 */ 2093 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2094 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2095 #define INTEL_FRONTBUFFER_BITS \ 2096 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 2097 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2098 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2099 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2100 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2101 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2102 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2103 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2104 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2105 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2106 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2107 2108 struct drm_i915_gem_object { 2109 struct drm_gem_object base; 2110 2111 const struct drm_i915_gem_object_ops *ops; 2112 2113 /** List of VMAs backed by this object */ 2114 struct list_head vma_list; 2115 2116 /** Stolen memory for this object, instead of being backed by shmem. */ 2117 struct drm_mm_node *stolen; 2118 struct list_head global_list; 2119 2120 struct list_head engine_list[I915_NUM_ENGINES]; 2121 /** Used in execbuf to temporarily hold a ref */ 2122 struct list_head obj_exec_link; 2123 2124 struct list_head batch_pool_link; 2125 2126 /** 2127 * This is set if the object is on the active lists (has pending 2128 * rendering and so a non-zero seqno), and is not set if it i s on 2129 * inactive (ready to be unbound) list. 2130 */ 2131 unsigned int active:I915_NUM_ENGINES; 2132 2133 /** 2134 * This is set if the object has been written to since last bound 2135 * to the GTT 2136 */ 2137 unsigned int dirty:1; 2138 2139 /** 2140 * Fence register bits (if any) for this object. Will be set 2141 * as needed when mapped into the GTT. 2142 * Protected by dev->struct_mutex. 2143 */ 2144 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 2145 2146 /** 2147 * Advice: are the backing pages purgeable? 2148 */ 2149 unsigned int madv:2; 2150 2151 /** 2152 * Current tiling mode for the object. 2153 */ 2154 unsigned int tiling_mode:2; 2155 /** 2156 * Whether the tiling parameters for the currently associated fence 2157 * register have changed. Note that for the purposes of tracking 2158 * tiling changes we also treat the unfenced register, the register 2159 * slot that the object occupies whilst it executes a fenced 2160 * command (such as BLT on gen2/3), as a "fence". 2161 */ 2162 unsigned int fence_dirty:1; 2163 2164 /** 2165 * Is the object at the current location in the gtt mappable and 2166 * fenceable? Used to avoid costly recalculations. 2167 */ 2168 unsigned int map_and_fenceable:1; 2169 2170 /** 2171 * Whether the current gtt mapping needs to be mappable (and isn't just 2172 * mappable by accident). Track pin and fault separate for a more 2173 * accurate mappable working set. 2174 */ 2175 unsigned int fault_mappable:1; 2176 2177 /* 2178 * Is the object to be mapped as read-only to the GPU 2179 * Only honoured if hardware has relevant pte bit 2180 */ 2181 unsigned long gt_ro:1; 2182 unsigned int cache_level:3; 2183 unsigned int cache_dirty:1; 2184 2185 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2186 2187 unsigned int pin_display; 2188 2189 struct sg_table *pages; 2190 int pages_pin_count; 2191 struct get_page { 2192 struct scatterlist *sg; 2193 int last; 2194 } get_page; 2195 void *mapping; 2196 2197 /** Breadcrumb of last rendering to the buffer. 2198 * There can only be one writer, but we allow for multiple readers. 2199 * If there is a writer that necessarily implies that all other 2200 * read requests are complete - but we may only be lazily clearing 2201 * the read requests. A read request is naturally the most recent 2202 * request on a ring, so we may have two different write and read 2203 * requests on one ring where the write request is older than the 2204 * read request. This allows for the CPU to read from an active 2205 * buffer by only waiting for the write to complete. 2206 * */ 2207 struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES]; 2208 struct drm_i915_gem_request *last_write_req; 2209 /** Breadcrumb of last fenced GPU access to the buffer. */ 2210 struct drm_i915_gem_request *last_fenced_req; 2211 2212 /** Current tiling stride for the object, if it's tiled. */ 2213 uint32_t stride; 2214 2215 /** References from framebuffers, locks out tiling changes. */ 2216 unsigned long framebuffer_references; 2217 2218 /** Record of address bit 17 of each page at last unbind. */ 2219 unsigned long *bit_17; 2220 2221 union { 2222 /** for phy allocated objects */ 2223 struct drm_dma_handle *phys_handle; 2224 2225 struct i915_gem_userptr { 2226 uintptr_t ptr; 2227 unsigned read_only :1; 2228 unsigned workers :4; 2229 #define I915_GEM_USERPTR_MAX_WORKERS 15 2230 2231 struct i915_mm_struct *mm; 2232 struct i915_mmu_object *mmu_object; 2233 struct work_struct *work; 2234 } userptr; 2235 }; 2236 }; 2237 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2238 2239 void i915_gem_track_fb(struct drm_i915_gem_object *old, 2240 struct drm_i915_gem_object *new, 2241 unsigned frontbuffer_bits); 2242 2243 /** 2244 * Request queue structure. 2245 * 2246 * The request queue allows us to note sequence numbers that have been emitted 2247 * and may be associated with active buffers to be retired. 2248 * 2249 * By keeping this list, we can avoid having to do questionable sequence 2250 * number comparisons on buffer last_read|write_seqno. It also allows an 2251 * emission time to be associated with the request for tracking how far ahead 2252 * of the GPU the submission is. 2253 * 2254 * The requests are reference counted, so upon creation they should have an 2255 * initial reference taken using kref_init 2256 */ 2257 struct drm_i915_gem_request { 2258 struct kref ref; 2259 2260 /** On Which ring this request was generated */ 2261 struct drm_i915_private *i915; 2262 struct intel_engine_cs *engine; 2263 unsigned reset_counter; 2264 2265 /** GEM sequence number associated with the previous request, 2266 * when the HWS breadcrumb is equal to this the GPU is processing 2267 * this request. 2268 */ 2269 u32 previous_seqno; 2270 2271 /** GEM sequence number associated with this request, 2272 * when the HWS breadcrumb is equal or greater than this the GPU 2273 * has finished processing this request. 2274 */ 2275 u32 seqno; 2276 2277 /** Position in the ringbuffer of the start of the request */ 2278 u32 head; 2279 2280 /** 2281 * Position in the ringbuffer of the start of the postfix. 2282 * This is required to calculate the maximum available ringbuffer 2283 * space without overwriting the postfix. 2284 */ 2285 u32 postfix; 2286 2287 /** Position in the ringbuffer of the end of the whole request */ 2288 u32 tail; 2289 2290 /** 2291 * Context and ring buffer related to this request 2292 * Contexts are refcounted, so when this request is associated with a 2293 * context, we must increment the context's refcount, to guarantee that 2294 * it persists while any request is linked to it. Requests themselves 2295 * are also refcounted, so the request will only be freed when the last 2296 * reference to it is dismissed, and the code in 2297 * i915_gem_request_free() will then decrement the refcount on the 2298 * context. 2299 */ 2300 struct intel_context *ctx; 2301 struct intel_ringbuffer *ringbuf; 2302 2303 /** Batch buffer related to this request if any (used for 2304 error state dump only) */ 2305 struct drm_i915_gem_object *batch_obj; 2306 2307 /** Time at which this request was emitted, in jiffies. */ 2308 unsigned long emitted_jiffies; 2309 2310 /** global list entry for this request */ 2311 struct list_head list; 2312 2313 struct drm_i915_file_private *file_priv; 2314 /** file_priv list entry for this request */ 2315 struct list_head client_list; 2316 2317 /** process identifier submitting this request */ 2318 pid_t pid; 2319 2320 /** 2321 * The ELSP only accepts two elements at a time, so we queue 2322 * context/tail pairs on a given queue (ring->execlist_queue) until the 2323 * hardware is available. The queue serves a double purpose: we also use 2324 * it to keep track of the up to 2 contexts currently in the hardware 2325 * (usually one in execution and the other queued up by the GPU): We 2326 * only remove elements from the head of the queue when the hardware 2327 * informs us that an element has been completed. 2328 * 2329 * All accesses to the queue are mediated by a spinlock 2330 * (ring->execlist_lock). 2331 */ 2332 2333 /** Execlist link in the submission queue.*/ 2334 struct list_head execlist_link; 2335 2336 /** Execlists no. of times this request has been sent to the ELSP */ 2337 int elsp_submitted; 2338 2339 }; 2340 2341 struct drm_i915_gem_request * __must_check 2342 i915_gem_request_alloc(struct intel_engine_cs *engine, 2343 struct intel_context *ctx); 2344 void i915_gem_request_free(struct kref *req_ref); 2345 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2346 struct drm_file *file); 2347 2348 static inline uint32_t 2349 i915_gem_request_get_seqno(struct drm_i915_gem_request *req) 2350 { 2351 return req ? req->seqno : 0; 2352 } 2353 2354 static inline struct intel_engine_cs * 2355 i915_gem_request_get_engine(struct drm_i915_gem_request *req) 2356 { 2357 return req ? req->engine : NULL; 2358 } 2359 2360 static inline struct drm_i915_gem_request * 2361 i915_gem_request_reference(struct drm_i915_gem_request *req) 2362 { 2363 if (req) 2364 kref_get(&req->ref); 2365 return req; 2366 } 2367 2368 static inline void 2369 i915_gem_request_unreference(struct drm_i915_gem_request *req) 2370 { 2371 WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex)); 2372 kref_put(&req->ref, i915_gem_request_free); 2373 } 2374 2375 static inline void 2376 i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) 2377 { 2378 struct drm_device *dev; 2379 2380 if (!req) 2381 return; 2382 2383 dev = req->engine->dev; 2384 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) 2385 mutex_unlock(&dev->struct_mutex); 2386 } 2387 2388 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2389 struct drm_i915_gem_request *src) 2390 { 2391 if (src) 2392 i915_gem_request_reference(src); 2393 2394 if (*pdst) 2395 i915_gem_request_unreference(*pdst); 2396 2397 *pdst = src; 2398 } 2399 2400 /* 2401 * XXX: i915_gem_request_completed should be here but currently needs the 2402 * definition of i915_seqno_passed() which is below. It will be moved in 2403 * a later patch when the call to i915_seqno_passed() is obsoleted... 2404 */ 2405 2406 /* 2407 * A command that requires special handling by the command parser. 2408 */ 2409 struct drm_i915_cmd_descriptor { 2410 /* 2411 * Flags describing how the command parser processes the command. 2412 * 2413 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2414 * a length mask if not set 2415 * CMD_DESC_SKIP: The command is allowed but does not follow the 2416 * standard length encoding for the opcode range in 2417 * which it falls 2418 * CMD_DESC_REJECT: The command is never allowed 2419 * CMD_DESC_REGISTER: The command should be checked against the 2420 * register whitelist for the appropriate ring 2421 * CMD_DESC_MASTER: The command is allowed if the submitting process 2422 * is the DRM master 2423 */ 2424 u32 flags; 2425 #define CMD_DESC_FIXED (1<<0) 2426 #define CMD_DESC_SKIP (1<<1) 2427 #define CMD_DESC_REJECT (1<<2) 2428 #define CMD_DESC_REGISTER (1<<3) 2429 #define CMD_DESC_BITMASK (1<<4) 2430 #define CMD_DESC_MASTER (1<<5) 2431 2432 /* 2433 * The command's unique identification bits and the bitmask to get them. 2434 * This isn't strictly the opcode field as defined in the spec and may 2435 * also include type, subtype, and/or subop fields. 2436 */ 2437 struct { 2438 u32 value; 2439 u32 mask; 2440 } cmd; 2441 2442 /* 2443 * The command's length. The command is either fixed length (i.e. does 2444 * not include a length field) or has a length field mask. The flag 2445 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2446 * a length mask. All command entries in a command table must include 2447 * length information. 2448 */ 2449 union { 2450 u32 fixed; 2451 u32 mask; 2452 } length; 2453 2454 /* 2455 * Describes where to find a register address in the command to check 2456 * against the ring's register whitelist. Only valid if flags has the 2457 * CMD_DESC_REGISTER bit set. 2458 * 2459 * A non-zero step value implies that the command may access multiple 2460 * registers in sequence (e.g. LRI), in that case step gives the 2461 * distance in dwords between individual offset fields. 2462 */ 2463 struct { 2464 u32 offset; 2465 u32 mask; 2466 u32 step; 2467 } reg; 2468 2469 #define MAX_CMD_DESC_BITMASKS 3 2470 /* 2471 * Describes command checks where a particular dword is masked and 2472 * compared against an expected value. If the command does not match 2473 * the expected value, the parser rejects it. Only valid if flags has 2474 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2475 * are valid. 2476 * 2477 * If the check specifies a non-zero condition_mask then the parser 2478 * only performs the check when the bits specified by condition_mask 2479 * are non-zero. 2480 */ 2481 struct { 2482 u32 offset; 2483 u32 mask; 2484 u32 expected; 2485 u32 condition_offset; 2486 u32 condition_mask; 2487 } bits[MAX_CMD_DESC_BITMASKS]; 2488 }; 2489 2490 /* 2491 * A table of commands requiring special handling by the command parser. 2492 * 2493 * Each ring has an array of tables. Each table consists of an array of command 2494 * descriptors, which must be sorted with command opcodes in ascending order. 2495 */ 2496 struct drm_i915_cmd_table { 2497 const struct drm_i915_cmd_descriptor *table; 2498 int count; 2499 }; 2500 2501 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2502 #define __I915__(p) ({ \ 2503 const struct drm_i915_private *__p; \ 2504 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2505 __p = (const struct drm_i915_private *)p; \ 2506 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2507 __p = to_i915((const struct drm_device *)p); \ 2508 __p; \ 2509 }) 2510 #define INTEL_INFO(p) (&__I915__(p)->info) 2511 #define INTEL_GEN(p) (INTEL_INFO(p)->gen) 2512 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2513 #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2514 2515 #define REVID_FOREVER 0xff 2516 /* 2517 * Return true if revision is in range [since,until] inclusive. 2518 * 2519 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2520 */ 2521 #define IS_REVID(p, since, until) \ 2522 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2523 2524 #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2525 #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2526 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2527 #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) 2528 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2529 #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) 2530 #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) 2531 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2532 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2533 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2534 #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) 2535 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2536 #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) 2537 #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) 2538 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2539 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2540 #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) 2541 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2542 #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2543 INTEL_DEVID(dev) == 0x0152 || \ 2544 INTEL_DEVID(dev) == 0x015a) 2545 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2546 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) 2547 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2548 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) 2549 #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2550 #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) 2551 #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) 2552 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2553 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2554 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2555 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2556 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2557 (INTEL_DEVID(dev) & 0xf) == 0xb || \ 2558 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2559 /* ULX machines are also considered ULT. */ 2560 #define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ 2561 (INTEL_DEVID(dev) & 0xf) == 0xe) 2562 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2563 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2564 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2565 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2566 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2567 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2568 /* ULX machines are also considered ULT. */ 2569 #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ 2570 INTEL_DEVID(dev) == 0x0A1E) 2571 #define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ 2572 INTEL_DEVID(dev) == 0x1913 || \ 2573 INTEL_DEVID(dev) == 0x1916 || \ 2574 INTEL_DEVID(dev) == 0x1921 || \ 2575 INTEL_DEVID(dev) == 0x1926) 2576 #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ 2577 INTEL_DEVID(dev) == 0x1915 || \ 2578 INTEL_DEVID(dev) == 0x191E) 2579 #define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \ 2580 INTEL_DEVID(dev) == 0x5913 || \ 2581 INTEL_DEVID(dev) == 0x5916 || \ 2582 INTEL_DEVID(dev) == 0x5921 || \ 2583 INTEL_DEVID(dev) == 0x5926) 2584 #define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \ 2585 INTEL_DEVID(dev) == 0x5915 || \ 2586 INTEL_DEVID(dev) == 0x591E) 2587 #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ 2588 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2589 #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ 2590 (INTEL_DEVID(dev) & 0x00F0) == 0x0030) 2591 2592 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2593 2594 #define SKL_REVID_A0 0x0 2595 #define SKL_REVID_B0 0x1 2596 #define SKL_REVID_C0 0x2 2597 #define SKL_REVID_D0 0x3 2598 #define SKL_REVID_E0 0x4 2599 #define SKL_REVID_F0 0x5 2600 #define SKL_REVID_G0 0x6 2601 #define SKL_REVID_H0 0x7 2602 2603 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2604 2605 #define BXT_REVID_A0 0x0 2606 #define BXT_REVID_A1 0x1 2607 #define BXT_REVID_B0 0x3 2608 #define BXT_REVID_C0 0x9 2609 2610 #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) 2611 2612 #define KBL_REVID_A0 0x0 2613 #define KBL_REVID_B0 0x1 2614 #define KBL_REVID_C0 0x2 2615 #define KBL_REVID_D0 0x3 2616 #define KBL_REVID_E0 0x4 2617 2618 #define IS_KBL_REVID(p, since, until) \ 2619 (IS_KABYLAKE(p) && IS_REVID(p, since, until)) 2620 2621 /* 2622 * The genX designation typically refers to the render engine, so render 2623 * capability related checks should use IS_GEN, while display and other checks 2624 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2625 * chips, etc.). 2626 */ 2627 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2628 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2629 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2630 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2631 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2632 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2633 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2634 #define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2635 2636 #define RENDER_RING (1<<RCS) 2637 #define BSD_RING (1<<VCS) 2638 #define BLT_RING (1<<BCS) 2639 #define VEBOX_RING (1<<VECS) 2640 #define BSD2_RING (1<<VCS2) 2641 #define ALL_ENGINES (~0) 2642 2643 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2644 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2645 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2646 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2647 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2648 #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) 2649 #define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED) 2650 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2651 HAS_EDRAM(dev)) 2652 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2653 2654 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2655 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2656 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2657 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) 2658 #define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) 2659 2660 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2661 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2662 2663 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2664 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2665 2666 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2667 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2668 IS_SKL_GT3(dev) || \ 2669 IS_SKL_GT4(dev)) 2670 2671 /* 2672 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2673 * even when in MSI mode. This results in spurious interrupt warnings if the 2674 * legacy irq no. is shared with another device. The kernel then disables that 2675 * interrupt source and so prevents the other device from working properly. 2676 */ 2677 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2678 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2679 2680 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2681 * rows, which changed the alignment requirements and fence programming. 2682 */ 2683 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 2684 IS_I915GM(dev))) 2685 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2686 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2687 2688 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2689 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2690 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2691 2692 #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2693 2694 #define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2695 INTEL_INFO(dev)->gen >= 9) 2696 2697 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2698 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2699 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2700 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2701 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 2702 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2703 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ 2704 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ 2705 IS_KABYLAKE(dev) || IS_BROXTON(dev)) 2706 #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2707 #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2708 2709 #define HAS_CSR(dev) (IS_GEN9(dev)) 2710 2711 #define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2712 #define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2713 2714 #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2715 INTEL_INFO(dev)->gen >= 8) 2716 2717 #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ 2718 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ 2719 !IS_BROXTON(dev)) 2720 2721 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2722 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2723 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2724 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2725 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2726 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2727 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2728 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2729 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 2730 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2731 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2732 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2733 2734 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2735 #define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP) 2736 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2737 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2738 #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2739 #define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 2740 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2741 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2742 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2743 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2744 2745 #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \ 2746 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 2747 2748 /* DPF == dynamic parity feature */ 2749 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2750 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2751 2752 #define GT_FREQUENCY_MULTIPLIER 50 2753 #define GEN9_FREQ_SCALER 3 2754 2755 #include "i915_trace.h" 2756 2757 extern const struct drm_ioctl_desc i915_ioctls[]; 2758 extern int i915_max_ioctl; 2759 2760 extern int i915_suspend_switcheroo(device_t kdev); 2761 extern int i915_resume_switcheroo(struct drm_device *dev); 2762 2763 /* i915_dma.c */ 2764 void __printf(3, 4) 2765 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2766 const char *fmt, ...); 2767 2768 #define i915_report_error(dev_priv, fmt, ...) \ 2769 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2770 2771 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2772 extern int i915_driver_unload(struct drm_device *); 2773 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2774 extern void i915_driver_lastclose(struct drm_device * dev); 2775 extern void i915_driver_preclose(struct drm_device *dev, 2776 struct drm_file *file); 2777 extern void i915_driver_postclose(struct drm_device *dev, 2778 struct drm_file *file); 2779 #ifdef CONFIG_COMPAT 2780 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2781 unsigned long arg); 2782 #endif 2783 extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); 2784 extern bool intel_has_gpu_reset(struct drm_device *dev); 2785 extern int i915_reset(struct drm_device *dev); 2786 extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2787 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2788 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2789 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2790 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2791 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2792 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2793 2794 /* intel_hotplug.c */ 2795 void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2796 void intel_hpd_init(struct drm_i915_private *dev_priv); 2797 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2798 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2799 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2800 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2801 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2802 2803 /* i915_irq.c */ 2804 void i915_queue_hangcheck(struct drm_device *dev); 2805 __printf(3, 4) 2806 void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2807 const char *fmt, ...); 2808 2809 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2810 int intel_irq_install(struct drm_i915_private *dev_priv); 2811 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2812 2813 extern void intel_uncore_sanitize(struct drm_device *dev); 2814 extern void intel_uncore_early_sanitize(struct drm_device *dev, 2815 bool restore_forcewake); 2816 extern void intel_uncore_init(struct drm_device *dev); 2817 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2818 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2819 extern void intel_uncore_fini(struct drm_device *dev); 2820 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2821 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2822 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2823 enum forcewake_domains domains); 2824 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2825 enum forcewake_domains domains); 2826 /* Like above but the caller must manage the uncore.lock itself. 2827 * Must be used with I915_READ_FW and friends. 2828 */ 2829 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2830 enum forcewake_domains domains); 2831 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2832 enum forcewake_domains domains); 2833 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 2834 2835 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2836 static inline bool intel_vgpu_active(struct drm_device *dev) 2837 { 2838 return to_i915(dev)->vgpu.active; 2839 } 2840 2841 void 2842 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2843 u32 status_mask); 2844 2845 void 2846 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2847 u32 status_mask); 2848 2849 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2850 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2851 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2852 uint32_t mask, 2853 uint32_t bits); 2854 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 2855 uint32_t interrupt_mask, 2856 uint32_t enabled_irq_mask); 2857 static inline void 2858 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2859 { 2860 ilk_update_display_irq(dev_priv, bits, bits); 2861 } 2862 static inline void 2863 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2864 { 2865 ilk_update_display_irq(dev_priv, bits, 0); 2866 } 2867 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 2868 enum i915_pipe pipe, 2869 uint32_t interrupt_mask, 2870 uint32_t enabled_irq_mask); 2871 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 2872 enum i915_pipe pipe, uint32_t bits) 2873 { 2874 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 2875 } 2876 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 2877 enum i915_pipe pipe, uint32_t bits) 2878 { 2879 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 2880 } 2881 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2882 uint32_t interrupt_mask, 2883 uint32_t enabled_irq_mask); 2884 static inline void 2885 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2886 { 2887 ibx_display_interrupt_update(dev_priv, bits, bits); 2888 } 2889 static inline void 2890 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2891 { 2892 ibx_display_interrupt_update(dev_priv, bits, 0); 2893 } 2894 2895 2896 /* i915_gem.c */ 2897 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2898 struct drm_file *file_priv); 2899 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2900 struct drm_file *file_priv); 2901 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2902 struct drm_file *file_priv); 2903 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2904 struct drm_file *file_priv); 2905 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2906 struct drm_file *file_priv); 2907 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2908 struct drm_file *file_priv); 2909 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2910 struct drm_file *file_priv); 2911 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2912 struct drm_i915_gem_request *req); 2913 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, 2914 struct drm_i915_gem_execbuffer2 *args, 2915 struct list_head *vmas); 2916 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2917 struct drm_file *file_priv); 2918 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2919 struct drm_file *file_priv); 2920 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2921 struct drm_file *file_priv); 2922 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2923 struct drm_file *file); 2924 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2925 struct drm_file *file); 2926 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2927 struct drm_file *file_priv); 2928 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2929 struct drm_file *file_priv); 2930 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2931 struct drm_file *file_priv); 2932 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2933 struct drm_file *file_priv); 2934 int i915_gem_init_userptr(struct drm_device *dev); 2935 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2936 struct drm_file *file); 2937 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2938 struct drm_file *file_priv); 2939 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2940 struct drm_file *file_priv); 2941 void i915_gem_load_init(struct drm_device *dev); 2942 void i915_gem_load_cleanup(struct drm_device *dev); 2943 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 2944 void *i915_gem_object_alloc(struct drm_device *dev); 2945 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2946 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2947 const struct drm_i915_gem_object_ops *ops); 2948 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2949 size_t size); 2950 struct drm_i915_gem_object *i915_gem_object_create_from_data( 2951 struct drm_device *dev, const void *data, size_t size); 2952 void i915_gem_free_object(struct drm_gem_object *obj); 2953 void i915_gem_vma_destroy(struct i915_vma *vma); 2954 2955 /* Flags used by pin/bind&friends. */ 2956 #define PIN_MAPPABLE (1<<0) 2957 #define PIN_NONBLOCK (1<<1) 2958 #define PIN_GLOBAL (1<<2) 2959 #define PIN_OFFSET_BIAS (1<<3) 2960 #define PIN_USER (1<<4) 2961 #define PIN_UPDATE (1<<5) 2962 #define PIN_ZONE_4G (1<<6) 2963 #define PIN_HIGH (1<<7) 2964 #define PIN_OFFSET_FIXED (1<<8) 2965 #define PIN_OFFSET_MASK (~4095) 2966 int __must_check 2967 i915_gem_object_pin(struct drm_i915_gem_object *obj, 2968 struct i915_address_space *vm, 2969 uint32_t alignment, 2970 uint64_t flags); 2971 int __must_check 2972 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2973 const struct i915_ggtt_view *view, 2974 uint32_t alignment, 2975 uint64_t flags); 2976 2977 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2978 u32 flags); 2979 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 2980 int __must_check i915_vma_unbind(struct i915_vma *vma); 2981 /* 2982 * BEWARE: Do not use the function below unless you can _absolutely_ 2983 * _guarantee_ VMA in question is _not in use_ anywhere. 2984 */ 2985 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); 2986 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2987 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2988 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2989 2990 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2991 int *needs_clflush); 2992 2993 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2994 2995 static inline int __sg_page_count(struct scatterlist *sg) 2996 { 2997 return sg->length >> PAGE_SHIFT; 2998 } 2999 3000 struct page * 3001 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); 3002 3003 static inline struct page * 3004 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 3005 { 3006 if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) 3007 return NULL; 3008 3009 if (n < obj->get_page.last) { 3010 obj->get_page.sg = obj->pages->sgl; 3011 obj->get_page.last = 0; 3012 } 3013 3014 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { 3015 obj->get_page.last += __sg_page_count(obj->get_page.sg++); 3016 #if 0 3017 if (unlikely(sg_is_chain(obj->get_page.sg))) 3018 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); 3019 #endif 3020 } 3021 3022 return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); 3023 } 3024 3025 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3026 { 3027 BUG_ON(obj->pages == NULL); 3028 obj->pages_pin_count++; 3029 } 3030 3031 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3032 { 3033 BUG_ON(obj->pages_pin_count == 0); 3034 obj->pages_pin_count--; 3035 } 3036 3037 /** 3038 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3039 * @obj - the object to map into kernel address space 3040 * 3041 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3042 * pages and then returns a contiguous mapping of the backing storage into 3043 * the kernel address space. 3044 * 3045 * The caller must hold the struct_mutex, and is responsible for calling 3046 * i915_gem_object_unpin_map() when the mapping is no longer required. 3047 * 3048 * Returns the pointer through which to access the mapped object, or an 3049 * ERR_PTR() on error. 3050 */ 3051 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); 3052 3053 /** 3054 * i915_gem_object_unpin_map - releases an earlier mapping 3055 * @obj - the object to unmap 3056 * 3057 * After pinning the object and mapping its pages, once you are finished 3058 * with your access, call i915_gem_object_unpin_map() to release the pin 3059 * upon the mapping. Once the pin count reaches zero, that mapping may be 3060 * removed. 3061 * 3062 * The caller must hold the struct_mutex. 3063 */ 3064 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3065 { 3066 lockdep_assert_held(&obj->base.dev->struct_mutex); 3067 i915_gem_object_unpin_pages(obj); 3068 } 3069 3070 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3071 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 3072 struct intel_engine_cs *to, 3073 struct drm_i915_gem_request **to_req); 3074 void i915_vma_move_to_active(struct i915_vma *vma, 3075 struct drm_i915_gem_request *req); 3076 int i915_gem_dumb_create(struct drm_file *file_priv, 3077 struct drm_device *dev, 3078 struct drm_mode_create_dumb *args); 3079 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3080 uint32_t handle, uint64_t *offset); 3081 /** 3082 * Returns true if seq1 is later than seq2. 3083 */ 3084 static inline bool 3085 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 3086 { 3087 return (int32_t)(seq1 - seq2) >= 0; 3088 } 3089 3090 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, 3091 bool lazy_coherency) 3092 { 3093 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3094 req->engine->irq_seqno_barrier(req->engine); 3095 return i915_seqno_passed(req->engine->get_seqno(req->engine), 3096 req->previous_seqno); 3097 } 3098 3099 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 3100 bool lazy_coherency) 3101 { 3102 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3103 req->engine->irq_seqno_barrier(req->engine); 3104 return i915_seqno_passed(req->engine->get_seqno(req->engine), 3105 req->seqno); 3106 } 3107 3108 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 3109 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3110 3111 struct drm_i915_gem_request * 3112 i915_gem_find_active_request(struct intel_engine_cs *engine); 3113 3114 bool i915_gem_retire_requests(struct drm_device *dev); 3115 void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); 3116 3117 static inline u32 i915_reset_counter(struct i915_gpu_error *error) 3118 { 3119 return atomic_read(&error->reset_counter); 3120 } 3121 3122 static inline bool __i915_reset_in_progress(u32 reset) 3123 { 3124 return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG); 3125 } 3126 3127 static inline bool __i915_reset_in_progress_or_wedged(u32 reset) 3128 { 3129 return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 3130 } 3131 3132 static inline bool __i915_terminally_wedged(u32 reset) 3133 { 3134 return unlikely(reset & I915_WEDGED); 3135 } 3136 3137 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 3138 { 3139 return __i915_reset_in_progress(i915_reset_counter(error)); 3140 } 3141 3142 static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) 3143 { 3144 return __i915_reset_in_progress_or_wedged(i915_reset_counter(error)); 3145 } 3146 3147 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3148 { 3149 return __i915_terminally_wedged(i915_reset_counter(error)); 3150 } 3151 3152 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3153 { 3154 return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; 3155 } 3156 3157 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 3158 { 3159 return dev_priv->gpu_error.stop_rings == 0 || 3160 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 3161 } 3162 3163 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 3164 { 3165 return dev_priv->gpu_error.stop_rings == 0 || 3166 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 3167 } 3168 3169 void i915_gem_reset(struct drm_device *dev); 3170 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3171 int __must_check i915_gem_init(struct drm_device *dev); 3172 int i915_gem_init_engines(struct drm_device *dev); 3173 int __must_check i915_gem_init_hw(struct drm_device *dev); 3174 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); 3175 void i915_gem_init_swizzling(struct drm_device *dev); 3176 void i915_gem_cleanup_engines(struct drm_device *dev); 3177 int __must_check i915_gpu_idle(struct drm_device *dev); 3178 int __must_check i915_gem_suspend(struct drm_device *dev); 3179 void __i915_add_request(struct drm_i915_gem_request *req, 3180 struct drm_i915_gem_object *batch_obj, 3181 bool flush_caches); 3182 #define i915_add_request(req) \ 3183 __i915_add_request(req, NULL, true) 3184 #define i915_add_request_no_flush(req) \ 3185 __i915_add_request(req, NULL, false) 3186 int __i915_wait_request(struct drm_i915_gem_request *req, 3187 bool interruptible, 3188 s64 *timeout, 3189 struct intel_rps_client *rps); 3190 int __must_check i915_wait_request(struct drm_i915_gem_request *req); 3191 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres); 3192 int __must_check 3193 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 3194 bool readonly); 3195 int __must_check 3196 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3197 bool write); 3198 int __must_check 3199 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3200 int __must_check 3201 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3202 u32 alignment, 3203 const struct i915_ggtt_view *view); 3204 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 3205 const struct i915_ggtt_view *view); 3206 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3207 int align); 3208 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3209 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3210 3211 uint32_t 3212 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 3213 uint32_t 3214 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 3215 int tiling_mode, bool fenced); 3216 3217 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3218 enum i915_cache_level cache_level); 3219 3220 #if 0 3221 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3222 struct dma_buf *dma_buf); 3223 3224 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3225 struct drm_gem_object *gem_obj, int flags); 3226 #endif 3227 3228 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 3229 const struct i915_ggtt_view *view); 3230 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, 3231 struct i915_address_space *vm); 3232 static inline u64 3233 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) 3234 { 3235 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); 3236 } 3237 3238 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 3239 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 3240 const struct i915_ggtt_view *view); 3241 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 3242 struct i915_address_space *vm); 3243 3244 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 3245 struct i915_address_space *vm); 3246 struct i915_vma * 3247 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3248 struct i915_address_space *vm); 3249 struct i915_vma * 3250 i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 3251 const struct i915_ggtt_view *view); 3252 3253 struct i915_vma * 3254 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3255 struct i915_address_space *vm); 3256 struct i915_vma * 3257 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 3258 const struct i915_ggtt_view *view); 3259 3260 static inline struct i915_vma * 3261 i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 3262 { 3263 return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); 3264 } 3265 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); 3266 3267 /* Some GGTT VM helpers */ 3268 static inline struct i915_hw_ppgtt * 3269 i915_vm_to_ppgtt(struct i915_address_space *vm) 3270 { 3271 return container_of(vm, struct i915_hw_ppgtt, base); 3272 } 3273 3274 3275 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 3276 { 3277 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3278 } 3279 3280 static inline unsigned long 3281 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3282 { 3283 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3284 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3285 3286 return i915_gem_obj_size(obj, &ggtt->base); 3287 } 3288 3289 static inline int __must_check 3290 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3291 uint32_t alignment, 3292 unsigned flags) 3293 { 3294 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3295 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3296 3297 return i915_gem_object_pin(obj, &ggtt->base, 3298 alignment, flags | PIN_GLOBAL); 3299 } 3300 3301 static inline int 3302 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 3303 { 3304 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 3305 } 3306 3307 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3308 const struct i915_ggtt_view *view); 3309 static inline void 3310 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 3311 { 3312 i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); 3313 } 3314 3315 /* i915_gem_fence.c */ 3316 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 3317 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 3318 3319 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 3320 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 3321 3322 void i915_gem_restore_fences(struct drm_device *dev); 3323 3324 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3325 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3326 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3327 3328 /* i915_gem_context.c */ 3329 int __must_check i915_gem_context_init(struct drm_device *dev); 3330 void i915_gem_context_fini(struct drm_device *dev); 3331 void i915_gem_context_reset(struct drm_device *dev); 3332 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3333 int i915_gem_context_enable(struct drm_i915_gem_request *req); 3334 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3335 int i915_switch_context(struct drm_i915_gem_request *req); 3336 struct intel_context * 3337 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 3338 void i915_gem_context_free(struct kref *ctx_ref); 3339 struct drm_i915_gem_object * 3340 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3341 static inline void i915_gem_context_reference(struct intel_context *ctx) 3342 { 3343 kref_get(&ctx->ref); 3344 } 3345 3346 static inline void i915_gem_context_unreference(struct intel_context *ctx) 3347 { 3348 kref_put(&ctx->ref, i915_gem_context_free); 3349 } 3350 3351 static inline bool i915_gem_context_is_default(const struct intel_context *c) 3352 { 3353 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3354 } 3355 3356 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3357 struct drm_file *file); 3358 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3359 struct drm_file *file); 3360 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3361 struct drm_file *file_priv); 3362 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3363 struct drm_file *file_priv); 3364 3365 /* i915_gem_evict.c */ 3366 int __must_check i915_gem_evict_something(struct drm_device *dev, 3367 struct i915_address_space *vm, 3368 int min_size, 3369 unsigned alignment, 3370 unsigned cache_level, 3371 unsigned long start, 3372 unsigned long end, 3373 unsigned flags); 3374 int __must_check i915_gem_evict_for_vma(struct i915_vma *target); 3375 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3376 3377 /* belongs in i915_gem_gtt.h */ 3378 static inline void i915_gem_chipset_flush(struct drm_device *dev) 3379 { 3380 if (INTEL_INFO(dev)->gen < 6) 3381 intel_gtt_chipset_flush(); 3382 } 3383 3384 /* i915_gem_stolen.c */ 3385 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3386 struct drm_mm_node *node, u64 size, 3387 unsigned alignment); 3388 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3389 struct drm_mm_node *node, u64 size, 3390 unsigned alignment, u64 start, 3391 u64 end); 3392 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3393 struct drm_mm_node *node); 3394 int i915_gem_init_stolen(struct drm_device *dev); 3395 void i915_gem_cleanup_stolen(struct drm_device *dev); 3396 struct drm_i915_gem_object * 3397 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3398 struct drm_i915_gem_object * 3399 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3400 u32 stolen_offset, 3401 u32 gtt_offset, 3402 u32 size); 3403 3404 /* i915_gem_shrinker.c */ 3405 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3406 unsigned long target, 3407 unsigned flags); 3408 #define I915_SHRINK_PURGEABLE 0x1 3409 #define I915_SHRINK_UNBOUND 0x2 3410 #define I915_SHRINK_BOUND 0x4 3411 #define I915_SHRINK_ACTIVE 0x8 3412 #define I915_SHRINK_VMAPS 0x10 3413 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3414 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3415 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); 3416 3417 3418 /* i915_gem_tiling.c */ 3419 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3420 { 3421 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3422 3423 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3424 obj->tiling_mode != I915_TILING_NONE; 3425 } 3426 3427 /* i915_gem_debug.c */ 3428 #if WATCH_LISTS 3429 int i915_verify_lists(struct drm_device *dev); 3430 #else 3431 #define i915_verify_lists(dev) 0 3432 #endif 3433 3434 /* i915_debugfs.c */ 3435 int i915_debugfs_init(struct drm_minor *minor); 3436 void i915_debugfs_cleanup(struct drm_minor *minor); 3437 #ifdef CONFIG_DEBUG_FS 3438 int i915_debugfs_connector_add(struct drm_connector *connector); 3439 void intel_display_crc_init(struct drm_device *dev); 3440 #else 3441 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3442 { return 0; } 3443 static inline void intel_display_crc_init(struct drm_device *dev) {} 3444 #endif 3445 3446 /* i915_gpu_error.c */ 3447 __printf(2, 3) 3448 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3449 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3450 const struct i915_error_state_file_priv *error); 3451 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3452 struct drm_i915_private *i915, 3453 size_t count, loff_t pos); 3454 static inline void i915_error_state_buf_release( 3455 struct drm_i915_error_state_buf *eb) 3456 { 3457 kfree(eb->buf); 3458 } 3459 void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 3460 const char *error_msg); 3461 void i915_error_state_get(struct drm_device *dev, 3462 struct i915_error_state_file_priv *error_priv); 3463 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3464 void i915_destroy_error_state(struct drm_device *dev); 3465 3466 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3467 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3468 3469 /* i915_cmd_parser.c */ 3470 int i915_cmd_parser_get_version(void); 3471 int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); 3472 void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); 3473 bool i915_needs_cmd_parser(struct intel_engine_cs *engine); 3474 int i915_parse_cmds(struct intel_engine_cs *engine, 3475 struct drm_i915_gem_object *batch_obj, 3476 struct drm_i915_gem_object *shadow_batch_obj, 3477 u32 batch_start_offset, 3478 u32 batch_len, 3479 bool is_master); 3480 3481 /* i915_suspend.c */ 3482 extern int i915_save_state(struct drm_device *dev); 3483 extern int i915_restore_state(struct drm_device *dev); 3484 3485 /* i915_sysfs.c */ 3486 void i915_setup_sysfs(struct drm_device *dev_priv); 3487 void i915_teardown_sysfs(struct drm_device *dev_priv); 3488 3489 /* intel_i2c.c */ 3490 extern int intel_setup_gmbus(struct drm_device *dev); 3491 extern void intel_teardown_gmbus(struct drm_device *dev); 3492 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3493 unsigned int pin); 3494 3495 extern struct i2c_adapter * 3496 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3497 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3498 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3499 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3500 { 3501 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3502 } 3503 extern void intel_i2c_reset(struct drm_device *dev); 3504 3505 /* intel_bios.c */ 3506 int intel_bios_init(struct drm_i915_private *dev_priv); 3507 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3508 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3509 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3510 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3511 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3512 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3513 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3514 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3515 enum port port); 3516 3517 /* intel_opregion.c */ 3518 #ifdef CONFIG_ACPI 3519 extern int intel_opregion_setup(struct drm_device *dev); 3520 extern void intel_opregion_init(struct drm_device *dev); 3521 extern void intel_opregion_fini(struct drm_device *dev); 3522 extern void intel_opregion_asle_intr(struct drm_device *dev); 3523 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3524 bool enable); 3525 extern int intel_opregion_notify_adapter(struct drm_device *dev, 3526 pci_power_t state); 3527 extern int intel_opregion_get_panel_type(struct drm_device *dev); 3528 #else 3529 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3530 static inline void intel_opregion_init(struct drm_device *dev) { return; } 3531 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3532 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3533 static inline int 3534 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3535 { 3536 return 0; 3537 } 3538 static inline int 3539 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3540 { 3541 return 0; 3542 } 3543 static inline int intel_opregion_get_panel_type(struct drm_device *dev) 3544 { 3545 return -ENODEV; 3546 } 3547 #endif 3548 3549 /* intel_acpi.c */ 3550 #ifdef CONFIG_ACPI 3551 extern void intel_register_dsm_handler(void); 3552 extern void intel_unregister_dsm_handler(void); 3553 #else 3554 static inline void intel_register_dsm_handler(void) { return; } 3555 static inline void intel_unregister_dsm_handler(void) { return; } 3556 #endif /* CONFIG_ACPI */ 3557 3558 /* modesetting */ 3559 extern void intel_modeset_init_hw(struct drm_device *dev); 3560 extern void intel_modeset_init(struct drm_device *dev); 3561 extern void intel_modeset_gem_init(struct drm_device *dev); 3562 extern void intel_modeset_cleanup(struct drm_device *dev); 3563 extern void intel_connector_unregister(struct intel_connector *); 3564 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3565 extern void intel_display_resume(struct drm_device *dev); 3566 extern void i915_redisable_vga(struct drm_device *dev); 3567 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3568 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3569 extern void intel_init_pch_refclk(struct drm_device *dev); 3570 extern void intel_set_rps(struct drm_device *dev, u8 val); 3571 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3572 bool enable); 3573 extern void intel_detect_pch(struct drm_device *dev); 3574 extern int intel_enable_rc6(const struct drm_device *dev); 3575 3576 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3577 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3578 struct drm_file *file); 3579 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3580 struct drm_file *file); 3581 3582 struct intel_device_info *i915_get_device_id(int device); 3583 3584 /* overlay */ 3585 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3586 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3587 struct intel_overlay_error_state *error); 3588 3589 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3590 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3591 struct drm_device *dev, 3592 struct intel_display_error_state *error); 3593 3594 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3595 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3596 3597 /* intel_sideband.c */ 3598 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3599 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3600 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3601 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3602 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3603 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3604 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3605 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3606 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3607 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3608 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3609 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); 3610 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); 3611 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3612 enum intel_sbi_destination destination); 3613 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3614 enum intel_sbi_destination destination); 3615 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3616 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3617 3618 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3619 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3620 3621 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3622 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3623 3624 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3625 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3626 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3627 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3628 3629 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3630 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3631 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3632 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3633 3634 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3635 * will be implemented using 2 32-bit writes in an arbitrary order with 3636 * an arbitrary delay between them. This can cause the hardware to 3637 * act upon the intermediate value, possibly leading to corruption and 3638 * machine death. You have been warned. 3639 */ 3640 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 3641 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3642 3643 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3644 u32 upper, lower, old_upper, loop = 0; \ 3645 upper = I915_READ(upper_reg); \ 3646 do { \ 3647 old_upper = upper; \ 3648 lower = I915_READ(lower_reg); \ 3649 upper = I915_READ(upper_reg); \ 3650 } while (upper != old_upper && loop++ < 2); \ 3651 (u64)upper << 32 | lower; }) 3652 3653 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3654 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3655 3656 #define __raw_read(x, s) \ 3657 static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ 3658 i915_reg_t reg) \ 3659 { \ 3660 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3661 } 3662 3663 #define __raw_write(x, s) \ 3664 static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ 3665 i915_reg_t reg, uint##x##_t val) \ 3666 { \ 3667 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3668 } 3669 __raw_read(8, b) 3670 __raw_read(16, w) 3671 __raw_read(32, l) 3672 __raw_read(64, q) 3673 3674 __raw_write(8, b) 3675 __raw_write(16, w) 3676 __raw_write(32, l) 3677 __raw_write(64, q) 3678 3679 #undef __raw_read 3680 #undef __raw_write 3681 3682 /* These are untraced mmio-accessors that are only valid to be used inside 3683 * criticial sections inside IRQ handlers where forcewake is explicitly 3684 * controlled. 3685 * Think twice, and think again, before using these. 3686 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3687 * intel_uncore_forcewake_irqunlock(). 3688 */ 3689 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3690 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3691 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3692 3693 /* "Broadcast RGB" property */ 3694 #define INTEL_BROADCAST_RGB_AUTO 0 3695 #define INTEL_BROADCAST_RGB_FULL 1 3696 #define INTEL_BROADCAST_RGB_LIMITED 2 3697 3698 static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) 3699 { 3700 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 3701 return VLV_VGACNTRL; 3702 else if (INTEL_INFO(dev)->gen >= 5) 3703 return CPU_VGACNTRL; 3704 else 3705 return VGACNTRL; 3706 } 3707 3708 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3709 { 3710 unsigned long j = msecs_to_jiffies(m); 3711 3712 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3713 } 3714 3715 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3716 { 3717 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3718 } 3719 3720 static inline unsigned long 3721 timespec_to_jiffies_timeout(const struct timespec *value) 3722 { 3723 unsigned long j = timespec_to_jiffies(value); 3724 3725 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3726 } 3727 3728 /* 3729 * If you need to wait X milliseconds between events A and B, but event B 3730 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3731 * when event A happened, then just before event B you call this function and 3732 * pass the timestamp as the first argument, and X as the second argument. 3733 */ 3734 static inline void 3735 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3736 { 3737 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3738 3739 /* 3740 * Don't re-read the value of "jiffies" every time since it may change 3741 * behind our back and break the math. 3742 */ 3743 tmp_jiffies = jiffies; 3744 target_jiffies = timestamp_jiffies + 3745 msecs_to_jiffies_timeout(to_wait_ms); 3746 3747 if (time_after(target_jiffies, tmp_jiffies)) { 3748 remaining_jiffies = target_jiffies - tmp_jiffies; 3749 #if 0 3750 while (remaining_jiffies) 3751 remaining_jiffies = 3752 schedule_timeout_uninterruptible(remaining_jiffies); 3753 #else 3754 msleep(jiffies_to_msecs(remaining_jiffies)); 3755 #endif 3756 } 3757 } 3758 3759 static inline void i915_trace_irq_get(struct intel_engine_cs *engine, 3760 struct drm_i915_gem_request *req) 3761 { 3762 if (engine->trace_irq_req == NULL && engine->irq_get(engine)) 3763 i915_gem_request_assign(&engine->trace_irq_req, req); 3764 } 3765 3766 #endif 3767