1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <drm/drmP.h> 37 #include <linux/io-mapping.h> 38 #include <linux/i2c.h> 39 #include <linux/i2c-algo-bit.h> 40 #include <linux/backlight.h> 41 #include <linux/hashtable.h> 42 #include <linux/kref.h> 43 #include <linux/pm_qos.h> 44 #include <linux/shmem_fs.h> 45 46 #include <drm/drmP.h> 47 #include <drm/intel-gtt.h> 48 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 49 #include <drm/drm_gem.h> 50 51 #include "i915_params.h" 52 #include "i915_reg.h" 53 54 #include "intel_bios.h" 55 #include "intel_dpll_mgr.h" 56 #include "intel_guc.h" 57 #include "intel_lrc.h" 58 #include "intel_ringbuffer.h" 59 60 #include "i915_gem.h" 61 #include "i915_gem_gtt.h" 62 #include "i915_gem_render_state.h" 63 64 /* General customization: 65 */ 66 67 #define DRIVER_NAME "i915" 68 #define DRIVER_DESC "Intel Graphics" 69 #define DRIVER_DATE "20160425" 70 71 #undef WARN_ON 72 /* Many gcc seem to no see through this and fall over :( */ 73 #if 0 74 #define WARN_ON(x) ({ \ 75 bool __i915_warn_cond = (x); \ 76 if (__builtin_constant_p(__i915_warn_cond)) \ 77 BUILD_BUG_ON(__i915_warn_cond); \ 78 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 79 #else 80 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 81 #endif 82 83 #undef WARN_ON_ONCE 84 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 85 86 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 87 (long) (x), __func__); 88 89 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 91 * which may not necessarily be a user visible problem. This will either 92 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 93 * enable distros and users to tailor their preferred amount of i915 abrt 94 * spam. 95 */ 96 #define I915_STATE_WARN(condition, format...) ({ \ 97 int __ret_warn_on = !!(condition); \ 98 if (unlikely(__ret_warn_on)) \ 99 if (!WARN(i915.verbose_state_checks, format)) \ 100 DRM_ERROR(format); \ 101 unlikely(__ret_warn_on); \ 102 }) 103 104 #define I915_STATE_WARN_ON(x) \ 105 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 106 107 bool __i915_inject_load_failure(const char *func, int line); 108 #define i915_inject_load_failure() \ 109 __i915_inject_load_failure(__func__, __LINE__) 110 111 static inline const char *yesno(bool v) 112 { 113 return v ? "yes" : "no"; 114 } 115 116 static inline const char *onoff(bool v) 117 { 118 return v ? "on" : "off"; 119 } 120 121 enum i915_pipe { 122 INVALID_PIPE = -1, 123 PIPE_A = 0, 124 PIPE_B, 125 PIPE_C, 126 _PIPE_EDP, 127 I915_MAX_PIPES = _PIPE_EDP 128 }; 129 #define pipe_name(p) ((p) + 'A') 130 131 enum transcoder { 132 TRANSCODER_A = 0, 133 TRANSCODER_B, 134 TRANSCODER_C, 135 TRANSCODER_EDP, 136 TRANSCODER_DSI_A, 137 TRANSCODER_DSI_C, 138 I915_MAX_TRANSCODERS 139 }; 140 141 static inline const char *transcoder_name(enum transcoder transcoder) 142 { 143 switch (transcoder) { 144 case TRANSCODER_A: 145 return "A"; 146 case TRANSCODER_B: 147 return "B"; 148 case TRANSCODER_C: 149 return "C"; 150 case TRANSCODER_EDP: 151 return "EDP"; 152 case TRANSCODER_DSI_A: 153 return "DSI A"; 154 case TRANSCODER_DSI_C: 155 return "DSI C"; 156 default: 157 return "<invalid>"; 158 } 159 } 160 161 static inline bool transcoder_is_dsi(enum transcoder transcoder) 162 { 163 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; 164 } 165 166 /* 167 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 168 * number of planes per CRTC. Not all platforms really have this many planes, 169 * which means some arrays of size I915_MAX_PLANES may have unused entries 170 * between the topmost sprite plane and the cursor plane. 171 */ 172 enum plane { 173 PLANE_A = 0, 174 PLANE_B, 175 PLANE_C, 176 PLANE_CURSOR, 177 I915_MAX_PLANES, 178 }; 179 #define plane_name(p) ((p) + 'A') 180 181 #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') 182 183 enum port { 184 PORT_A = 0, 185 PORT_B, 186 PORT_C, 187 PORT_D, 188 PORT_E, 189 I915_MAX_PORTS 190 }; 191 #define port_name(p) ((p) + 'A') 192 193 #define I915_NUM_PHYS_VLV 2 194 195 enum dpio_channel { 196 DPIO_CH0, 197 DPIO_CH1 198 }; 199 200 enum dpio_phy { 201 DPIO_PHY0, 202 DPIO_PHY1 203 }; 204 205 enum intel_display_power_domain { 206 POWER_DOMAIN_PIPE_A, 207 POWER_DOMAIN_PIPE_B, 208 POWER_DOMAIN_PIPE_C, 209 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 210 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 211 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 212 POWER_DOMAIN_TRANSCODER_A, 213 POWER_DOMAIN_TRANSCODER_B, 214 POWER_DOMAIN_TRANSCODER_C, 215 POWER_DOMAIN_TRANSCODER_EDP, 216 POWER_DOMAIN_TRANSCODER_DSI_A, 217 POWER_DOMAIN_TRANSCODER_DSI_C, 218 POWER_DOMAIN_PORT_DDI_A_LANES, 219 POWER_DOMAIN_PORT_DDI_B_LANES, 220 POWER_DOMAIN_PORT_DDI_C_LANES, 221 POWER_DOMAIN_PORT_DDI_D_LANES, 222 POWER_DOMAIN_PORT_DDI_E_LANES, 223 POWER_DOMAIN_PORT_DSI, 224 POWER_DOMAIN_PORT_CRT, 225 POWER_DOMAIN_PORT_OTHER, 226 POWER_DOMAIN_VGA, 227 POWER_DOMAIN_AUDIO, 228 POWER_DOMAIN_PLLS, 229 POWER_DOMAIN_AUX_A, 230 POWER_DOMAIN_AUX_B, 231 POWER_DOMAIN_AUX_C, 232 POWER_DOMAIN_AUX_D, 233 POWER_DOMAIN_GMBUS, 234 POWER_DOMAIN_MODESET, 235 POWER_DOMAIN_INIT, 236 237 POWER_DOMAIN_NUM, 238 }; 239 240 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 241 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 242 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 243 #define POWER_DOMAIN_TRANSCODER(tran) \ 244 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 245 (tran) + POWER_DOMAIN_TRANSCODER_A) 246 247 enum hpd_pin { 248 HPD_NONE = 0, 249 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 250 HPD_CRT, 251 HPD_SDVO_B, 252 HPD_SDVO_C, 253 HPD_PORT_A, 254 HPD_PORT_B, 255 HPD_PORT_C, 256 HPD_PORT_D, 257 HPD_PORT_E, 258 HPD_NUM_PINS 259 }; 260 261 #define for_each_hpd_pin(__pin) \ 262 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 263 264 struct i915_hotplug { 265 struct work_struct hotplug_work; 266 267 struct { 268 unsigned long last_jiffies; 269 int count; 270 enum { 271 HPD_ENABLED = 0, 272 HPD_DISABLED = 1, 273 HPD_MARK_DISABLED = 2 274 } state; 275 } stats[HPD_NUM_PINS]; 276 u32 event_bits; 277 struct delayed_work reenable_work; 278 279 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 280 u32 long_port_mask; 281 u32 short_port_mask; 282 struct work_struct dig_port_work; 283 284 struct work_struct poll_init_work; 285 bool poll_enabled; 286 287 /* 288 * if we get a HPD irq from DP and a HPD irq from non-DP 289 * the non-DP HPD could block the workqueue on a mode config 290 * mutex getting, that userspace may have taken. However 291 * userspace is waiting on the DP workqueue to run which is 292 * blocked behind the non-DP one. 293 */ 294 struct workqueue_struct *dp_wq; 295 }; 296 297 #define I915_GEM_GPU_DOMAINS \ 298 (I915_GEM_DOMAIN_RENDER | \ 299 I915_GEM_DOMAIN_SAMPLER | \ 300 I915_GEM_DOMAIN_COMMAND | \ 301 I915_GEM_DOMAIN_INSTRUCTION | \ 302 I915_GEM_DOMAIN_VERTEX) 303 304 #define for_each_pipe(__dev_priv, __p) \ 305 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 306 #define for_each_pipe_masked(__dev_priv, __p, __mask) \ 307 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ 308 for_each_if ((__mask) & (1 << (__p))) 309 #define for_each_plane(__dev_priv, __pipe, __p) \ 310 for ((__p) = 0; \ 311 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 312 (__p)++) 313 #define for_each_sprite(__dev_priv, __p, __s) \ 314 for ((__s) = 0; \ 315 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 316 (__s)++) 317 318 #define for_each_port_masked(__port, __ports_mask) \ 319 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 320 for_each_if ((__ports_mask) & (1 << (__port))) 321 322 #define for_each_crtc(dev, crtc) \ 323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 324 325 #define for_each_intel_plane(dev, intel_plane) \ 326 list_for_each_entry(intel_plane, \ 327 &dev->mode_config.plane_list, \ 328 base.head) 329 330 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 331 list_for_each_entry(intel_plane, \ 332 &(dev)->mode_config.plane_list, \ 333 base.head) \ 334 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 335 336 #define for_each_intel_crtc(dev, intel_crtc) \ 337 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 338 339 #define for_each_intel_encoder(dev, intel_encoder) \ 340 list_for_each_entry(intel_encoder, \ 341 &(dev)->mode_config.encoder_list, \ 342 base.head) 343 344 #define for_each_intel_connector(dev, intel_connector) \ 345 list_for_each_entry(intel_connector, \ 346 &dev->mode_config.connector_list, \ 347 base.head) 348 349 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 350 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 351 for_each_if ((intel_encoder)->base.crtc == (__crtc)) 352 353 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 354 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 355 for_each_if ((intel_connector)->base.encoder == (__encoder)) 356 357 #define for_each_power_domain(domain, mask) \ 358 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 359 for_each_if ((1 << (domain)) & (mask)) 360 361 struct drm_i915_private; 362 struct i915_mm_struct; 363 struct i915_mmu_object; 364 365 struct drm_i915_file_private { 366 struct drm_i915_private *dev_priv; 367 struct drm_file *file; 368 369 struct { 370 struct spinlock lock; 371 struct list_head request_list; 372 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 373 * chosen to prevent the CPU getting more than a frame ahead of the GPU 374 * (when using lax throttling for the frontbuffer). We also use it to 375 * offer free GPU waitboosts for severely congested workloads. 376 */ 377 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 378 } mm; 379 struct idr context_idr; 380 381 struct intel_rps_client { 382 struct list_head link; 383 unsigned boosts; 384 } rps; 385 386 unsigned int bsd_ring; 387 }; 388 389 /* Used by dp and fdi links */ 390 struct intel_link_m_n { 391 uint32_t tu; 392 uint32_t gmch_m; 393 uint32_t gmch_n; 394 uint32_t link_m; 395 uint32_t link_n; 396 }; 397 398 void intel_link_compute_m_n(int bpp, int nlanes, 399 int pixel_clock, int link_clock, 400 struct intel_link_m_n *m_n); 401 402 /* Interface history: 403 * 404 * 1.1: Original. 405 * 1.2: Add Power Management 406 * 1.3: Add vblank support 407 * 1.4: Fix cmdbuffer path, add heap destroy 408 * 1.5: Add vblank pipe configuration 409 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 410 * - Support vertical blank on secondary display pipe 411 */ 412 #define DRIVER_MAJOR 1 413 #define DRIVER_MINOR 6 414 #define DRIVER_PATCHLEVEL 0 415 416 #define WATCH_LISTS 0 417 418 struct opregion_header; 419 struct opregion_acpi; 420 struct opregion_swsci; 421 struct opregion_asle; 422 423 struct intel_opregion { 424 struct opregion_header *header; 425 struct opregion_acpi *acpi; 426 struct opregion_swsci *swsci; 427 u32 swsci_gbda_sub_functions; 428 u32 swsci_sbcb_sub_functions; 429 struct opregion_asle *asle; 430 void *rvda; 431 const void *vbt; 432 u32 vbt_size; 433 u32 *lid_state; 434 struct work_struct asle_work; 435 }; 436 #define OPREGION_SIZE (8*1024) 437 438 struct intel_overlay; 439 struct intel_overlay_error_state; 440 441 #define I915_FENCE_REG_NONE -1 442 #define I915_MAX_NUM_FENCES 32 443 /* 32 fences + sign bit for FENCE_REG_NONE */ 444 #define I915_MAX_NUM_FENCE_BITS 6 445 446 struct drm_i915_fence_reg { 447 struct list_head lru_list; 448 struct drm_i915_gem_object *obj; 449 int pin_count; 450 }; 451 452 struct sdvo_device_mapping { 453 u8 initialized; 454 u8 dvo_port; 455 u8 slave_addr; 456 u8 dvo_wiring; 457 u8 i2c_pin; 458 u8 ddc_pin; 459 }; 460 461 struct intel_display_error_state; 462 463 struct drm_i915_error_state { 464 struct kref ref; 465 struct timeval time; 466 467 char error_msg[128]; 468 int iommu; 469 u32 reset_count; 470 u32 suspend_count; 471 472 /* Generic register state */ 473 u32 eir; 474 u32 pgtbl_er; 475 u32 ier; 476 u32 gtier[4]; 477 u32 ccid; 478 u32 derrmr; 479 u32 forcewake; 480 u32 error; /* gen6+ */ 481 u32 err_int; /* gen7 */ 482 u32 fault_data0; /* gen8, gen9 */ 483 u32 fault_data1; /* gen8, gen9 */ 484 u32 done_reg; 485 u32 gac_eco; 486 u32 gam_ecochk; 487 u32 gab_ctl; 488 u32 gfx_mode; 489 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 490 u64 fence[I915_MAX_NUM_FENCES]; 491 struct intel_overlay_error_state *overlay; 492 struct intel_display_error_state *display; 493 struct drm_i915_error_object *semaphore_obj; 494 495 struct drm_i915_error_ring { 496 bool valid; 497 /* Software tracked state */ 498 bool waiting; 499 int hangcheck_score; 500 enum intel_ring_hangcheck_action hangcheck_action; 501 int num_requests; 502 503 /* our own tracking of ring head and tail */ 504 u32 cpu_ring_head; 505 u32 cpu_ring_tail; 506 507 u32 last_seqno; 508 u32 semaphore_seqno[I915_NUM_ENGINES - 1]; 509 510 /* Register state */ 511 u32 start; 512 u32 tail; 513 u32 head; 514 u32 ctl; 515 u32 hws; 516 u32 ipeir; 517 u32 ipehr; 518 u32 instdone; 519 u32 bbstate; 520 u32 instpm; 521 u32 instps; 522 u32 seqno; 523 u64 bbaddr; 524 u64 acthd; 525 u32 fault_reg; 526 u64 faddr; 527 u32 rc_psmi; /* sleep state */ 528 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 529 530 struct drm_i915_error_object { 531 int page_count; 532 u64 gtt_offset; 533 u32 *pages[0]; 534 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 535 536 struct drm_i915_error_object *wa_ctx; 537 538 struct drm_i915_error_request { 539 long jiffies; 540 u32 seqno; 541 u32 tail; 542 } *requests; 543 544 struct { 545 u32 gfx_mode; 546 union { 547 u64 pdp[4]; 548 u32 pp_dir_base; 549 }; 550 } vm_info; 551 552 pid_t pid; 553 char comm[TASK_COMM_LEN]; 554 } ring[I915_NUM_ENGINES]; 555 556 struct drm_i915_error_buffer { 557 u32 size; 558 u32 name; 559 u32 rseqno[I915_NUM_ENGINES], wseqno; 560 u64 gtt_offset; 561 u32 read_domains; 562 u32 write_domain; 563 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 564 s32 pinned:2; 565 u32 tiling:2; 566 u32 dirty:1; 567 u32 purgeable:1; 568 u32 userptr:1; 569 s32 ring:4; 570 u32 cache_level:3; 571 } **active_bo, **pinned_bo; 572 573 u32 *active_bo_count, *pinned_bo_count; 574 u32 vm_count; 575 }; 576 577 struct intel_connector; 578 struct intel_encoder; 579 struct intel_crtc_state; 580 struct intel_initial_plane_config; 581 struct intel_crtc; 582 struct intel_limit; 583 struct dpll; 584 585 struct drm_i915_display_funcs { 586 int (*get_display_clock_speed)(struct drm_device *dev); 587 int (*get_fifo_size)(struct drm_device *dev, int plane); 588 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 589 int (*compute_intermediate_wm)(struct drm_device *dev, 590 struct intel_crtc *intel_crtc, 591 struct intel_crtc_state *newstate); 592 void (*initial_watermarks)(struct intel_crtc_state *cstate); 593 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 594 void (*update_wm)(struct drm_crtc *crtc); 595 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 596 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 597 /* Returns the active state of the crtc, and if the crtc is active, 598 * fills out the pipe-config with the hw state. */ 599 bool (*get_pipe_config)(struct intel_crtc *, 600 struct intel_crtc_state *); 601 void (*get_initial_plane_config)(struct intel_crtc *, 602 struct intel_initial_plane_config *); 603 int (*crtc_compute_clock)(struct intel_crtc *crtc, 604 struct intel_crtc_state *crtc_state); 605 void (*crtc_enable)(struct drm_crtc *crtc); 606 void (*crtc_disable)(struct drm_crtc *crtc); 607 void (*audio_codec_enable)(struct drm_connector *connector, 608 struct intel_encoder *encoder, 609 const struct drm_display_mode *adjusted_mode); 610 void (*audio_codec_disable)(struct intel_encoder *encoder); 611 void (*fdi_link_train)(struct drm_crtc *crtc); 612 void (*init_clock_gating)(struct drm_device *dev); 613 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 614 struct drm_framebuffer *fb, 615 struct drm_i915_gem_object *obj, 616 struct drm_i915_gem_request *req, 617 uint32_t flags); 618 void (*hpd_irq_setup)(struct drm_device *dev); 619 /* clock updates for mode set */ 620 /* cursor updates */ 621 /* render clock increase/decrease */ 622 /* display clock increase/decrease */ 623 /* pll clock increase/decrease */ 624 625 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 626 void (*load_luts)(struct drm_crtc_state *crtc_state); 627 }; 628 629 enum forcewake_domain_id { 630 FW_DOMAIN_ID_RENDER = 0, 631 FW_DOMAIN_ID_BLITTER, 632 FW_DOMAIN_ID_MEDIA, 633 634 FW_DOMAIN_ID_COUNT 635 }; 636 637 enum forcewake_domains { 638 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 639 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 640 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 641 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 642 FORCEWAKE_BLITTER | 643 FORCEWAKE_MEDIA) 644 }; 645 646 #define FW_REG_READ (1) 647 #define FW_REG_WRITE (2) 648 649 enum forcewake_domains 650 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 651 i915_reg_t reg, unsigned int op); 652 653 struct intel_uncore_funcs { 654 void (*force_wake_get)(struct drm_i915_private *dev_priv, 655 enum forcewake_domains domains); 656 void (*force_wake_put)(struct drm_i915_private *dev_priv, 657 enum forcewake_domains domains); 658 659 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 660 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 661 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 662 u64 (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 663 664 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, 665 uint8_t val, bool trace); 666 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, 667 uint16_t val, bool trace); 668 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, 669 uint32_t val, bool trace); 670 void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r, 671 u64 val, bool trace); 672 }; 673 674 struct intel_uncore { 675 struct lock lock; /** lock is also taken in irq contexts. */ 676 677 struct intel_uncore_funcs funcs; 678 679 unsigned fifo_count; 680 enum forcewake_domains fw_domains; 681 682 struct intel_uncore_forcewake_domain { 683 struct drm_i915_private *i915; 684 enum forcewake_domain_id id; 685 enum forcewake_domains mask; 686 unsigned wake_count; 687 struct hrtimer timer; 688 i915_reg_t reg_set; 689 u32 val_set; 690 u32 val_clear; 691 i915_reg_t reg_ack; 692 i915_reg_t reg_post; 693 u32 val_reset; 694 } fw_domain[FW_DOMAIN_ID_COUNT]; 695 696 int unclaimed_mmio_check; 697 }; 698 699 /* Iterate over initialised fw domains */ 700 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ 701 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 702 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ 703 (domain__)++) \ 704 for_each_if ((mask__) & (domain__)->mask) 705 706 #define for_each_fw_domain(domain__, dev_priv__) \ 707 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) 708 709 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 710 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 711 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 712 713 struct intel_csr { 714 struct work_struct work; 715 const char *fw_path; 716 uint32_t *dmc_payload; 717 uint32_t dmc_fw_size; 718 uint32_t version; 719 uint32_t mmio_count; 720 i915_reg_t mmioaddr[8]; 721 uint32_t mmiodata[8]; 722 uint32_t dc_state; 723 uint32_t allowed_dc_mask; 724 }; 725 726 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 727 func(is_mobile) sep \ 728 func(is_i85x) sep \ 729 func(is_i915g) sep \ 730 func(is_i945gm) sep \ 731 func(is_g33) sep \ 732 func(need_gfx_hws) sep \ 733 func(is_g4x) sep \ 734 func(is_pineview) sep \ 735 func(is_broadwater) sep \ 736 func(is_crestline) sep \ 737 func(is_ivybridge) sep \ 738 func(is_valleyview) sep \ 739 func(is_cherryview) sep \ 740 func(is_haswell) sep \ 741 func(is_skylake) sep \ 742 func(is_broxton) sep \ 743 func(is_kabylake) sep \ 744 func(is_preliminary) sep \ 745 func(has_fbc) sep \ 746 func(has_pipe_cxsr) sep \ 747 func(has_hotplug) sep \ 748 func(cursor_needs_physical) sep \ 749 func(has_overlay) sep \ 750 func(overlay_needs_physical) sep \ 751 func(supports_tv) sep \ 752 func(has_llc) sep \ 753 func(has_snoop) sep \ 754 func(has_ddi) sep \ 755 func(has_fpga_dbg) 756 757 #define DEFINE_FLAG(name) u8 name:1 758 #define SEP_SEMICOLON ; 759 760 struct intel_device_info { 761 u32 display_mmio_offset; 762 u16 device_id; 763 u8 num_pipes:3; 764 u8 num_sprites[I915_MAX_PIPES]; 765 u8 gen; 766 u8 ring_mask; /* Rings supported by the HW */ 767 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 768 /* Register offsets for the various display pipes and transcoders */ 769 int pipe_offsets[I915_MAX_TRANSCODERS]; 770 int trans_offsets[I915_MAX_TRANSCODERS]; 771 int palette_offsets[I915_MAX_PIPES]; 772 int cursor_offsets[I915_MAX_PIPES]; 773 774 /* Slice/subslice/EU info */ 775 u8 slice_total; 776 u8 subslice_total; 777 u8 subslice_per_slice; 778 u8 eu_total; 779 u8 eu_per_subslice; 780 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 781 u8 subslice_7eu[3]; 782 u8 has_slice_pg:1; 783 u8 has_subslice_pg:1; 784 u8 has_eu_pg:1; 785 786 struct color_luts { 787 u16 degamma_lut_size; 788 u16 gamma_lut_size; 789 } color; 790 }; 791 792 #undef DEFINE_FLAG 793 #undef SEP_SEMICOLON 794 795 enum i915_cache_level { 796 I915_CACHE_NONE = 0, 797 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 798 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 799 caches, eg sampler/render caches, and the 800 large Last-Level-Cache. LLC is coherent with 801 the CPU, but L3 is only visible to the GPU. */ 802 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 803 }; 804 805 struct i915_ctx_hang_stats { 806 /* This context had batch pending when hang was declared */ 807 unsigned batch_pending; 808 809 /* This context had batch active when hang was declared */ 810 unsigned batch_active; 811 812 /* Time when this context was last blamed for a GPU reset */ 813 unsigned long guilty_ts; 814 815 /* If the contexts causes a second GPU hang within this time, 816 * it is permanently banned from submitting any more work. 817 */ 818 unsigned long ban_period_seconds; 819 820 /* This context is banned to submit more work */ 821 bool banned; 822 }; 823 824 /* This must match up with the value previously used for execbuf2.rsvd1. */ 825 #define DEFAULT_CONTEXT_HANDLE 0 826 827 #define CONTEXT_NO_ZEROMAP (1<<0) 828 /** 829 * struct intel_context - as the name implies, represents a context. 830 * @ref: reference count. 831 * @user_handle: userspace tracking identity for this context. 832 * @remap_slice: l3 row remapping information. 833 * @flags: context specific flags: 834 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 835 * @file_priv: filp associated with this context (NULL for global default 836 * context). 837 * @hang_stats: information about the role of this context in possible GPU 838 * hangs. 839 * @ppgtt: virtual memory space used by this context. 840 * @legacy_hw_ctx: render context backing object and whether it is correctly 841 * initialized (legacy ring submission mechanism only). 842 * @link: link in the global list of contexts. 843 * 844 * Contexts are memory images used by the hardware to store copies of their 845 * internal state. 846 */ 847 struct intel_context { 848 struct kref ref; 849 int user_handle; 850 uint8_t remap_slice; 851 struct drm_i915_private *i915; 852 int flags; 853 struct drm_i915_file_private *file_priv; 854 struct i915_ctx_hang_stats hang_stats; 855 struct i915_hw_ppgtt *ppgtt; 856 857 /* Legacy ring buffer submission */ 858 struct { 859 struct drm_i915_gem_object *rcs_state; 860 bool initialized; 861 } legacy_hw_ctx; 862 863 /* Execlists */ 864 struct { 865 struct drm_i915_gem_object *state; 866 struct intel_ringbuffer *ringbuf; 867 int pin_count; 868 struct i915_vma *lrc_vma; 869 u64 lrc_desc; 870 uint32_t *lrc_reg_state; 871 } engine[I915_NUM_ENGINES]; 872 873 struct list_head link; 874 }; 875 876 enum fb_op_origin { 877 ORIGIN_GTT, 878 ORIGIN_CPU, 879 ORIGIN_CS, 880 ORIGIN_FLIP, 881 ORIGIN_DIRTYFB, 882 }; 883 884 struct intel_fbc { 885 /* This is always the inner lock when overlapping with struct_mutex and 886 * it's the outer lock when overlapping with stolen_lock. */ 887 struct lock lock; 888 unsigned threshold; 889 unsigned int possible_framebuffer_bits; 890 unsigned int busy_bits; 891 unsigned int visible_pipes_mask; 892 struct intel_crtc *crtc; 893 894 struct drm_mm_node compressed_fb; 895 struct drm_mm_node *compressed_llb; 896 897 bool false_color; 898 899 bool enabled; 900 bool active; 901 902 struct intel_fbc_state_cache { 903 struct { 904 unsigned int mode_flags; 905 uint32_t hsw_bdw_pixel_rate; 906 } crtc; 907 908 struct { 909 unsigned int rotation; 910 int src_w; 911 int src_h; 912 bool visible; 913 } plane; 914 915 struct { 916 u64 ilk_ggtt_offset; 917 uint32_t pixel_format; 918 unsigned int stride; 919 int fence_reg; 920 unsigned int tiling_mode; 921 } fb; 922 } state_cache; 923 924 struct intel_fbc_reg_params { 925 struct { 926 enum i915_pipe pipe; 927 enum plane plane; 928 unsigned int fence_y_offset; 929 } crtc; 930 931 struct { 932 u64 ggtt_offset; 933 uint32_t pixel_format; 934 unsigned int stride; 935 int fence_reg; 936 } fb; 937 938 int cfb_size; 939 } params; 940 941 struct intel_fbc_work { 942 bool scheduled; 943 u32 scheduled_vblank; 944 struct work_struct work; 945 } work; 946 947 const char *no_fbc_reason; 948 }; 949 950 /** 951 * HIGH_RR is the highest eDP panel refresh rate read from EDID 952 * LOW_RR is the lowest eDP panel refresh rate found from EDID 953 * parsing for same resolution. 954 */ 955 enum drrs_refresh_rate_type { 956 DRRS_HIGH_RR, 957 DRRS_LOW_RR, 958 DRRS_MAX_RR, /* RR count */ 959 }; 960 961 enum drrs_support_type { 962 DRRS_NOT_SUPPORTED = 0, 963 STATIC_DRRS_SUPPORT = 1, 964 SEAMLESS_DRRS_SUPPORT = 2 965 }; 966 967 struct intel_dp; 968 struct i915_drrs { 969 struct lock mutex; 970 struct delayed_work work; 971 struct intel_dp *dp; 972 unsigned busy_frontbuffer_bits; 973 enum drrs_refresh_rate_type refresh_rate_type; 974 enum drrs_support_type type; 975 }; 976 977 struct i915_psr { 978 struct lock lock; 979 bool sink_support; 980 bool source_ok; 981 struct intel_dp *enabled; 982 bool active; 983 struct delayed_work work; 984 unsigned busy_frontbuffer_bits; 985 bool psr2_support; 986 bool aux_frame_sync; 987 bool link_standby; 988 }; 989 990 enum intel_pch { 991 PCH_NONE = 0, /* No PCH present */ 992 PCH_IBX, /* Ibexpeak PCH */ 993 PCH_CPT, /* Cougarpoint PCH */ 994 PCH_LPT, /* Lynxpoint PCH */ 995 PCH_SPT, /* Sunrisepoint PCH */ 996 PCH_KBP, /* Kabypoint PCH */ 997 PCH_NOP, 998 }; 999 1000 enum intel_sbi_destination { 1001 SBI_ICLK, 1002 SBI_MPHY, 1003 }; 1004 1005 #define QUIRK_PIPEA_FORCE (1<<0) 1006 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1007 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1008 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1009 #define QUIRK_PIPEB_FORCE (1<<4) 1010 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1011 1012 struct intel_fbdev; 1013 struct intel_fbc_work; 1014 1015 struct intel_gmbus { 1016 struct i2c_adapter adapter; 1017 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 1018 u32 force_bit; 1019 u32 reg0; 1020 i915_reg_t gpio_reg; 1021 struct i2c_algo_bit_data bit_algo; 1022 struct drm_i915_private *dev_priv; 1023 }; 1024 1025 struct i915_suspend_saved_registers { 1026 u32 saveDSPARB; 1027 u32 saveLVDS; 1028 u32 savePP_ON_DELAYS; 1029 u32 savePP_OFF_DELAYS; 1030 u32 savePP_ON; 1031 u32 savePP_OFF; 1032 u32 savePP_CONTROL; 1033 u32 savePP_DIVISOR; 1034 u32 saveFBC_CONTROL; 1035 u32 saveCACHE_MODE_0; 1036 u32 saveMI_ARB_STATE; 1037 u32 saveSWF0[16]; 1038 u32 saveSWF1[16]; 1039 u32 saveSWF3[3]; 1040 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1041 u32 savePCH_PORT_HOTPLUG; 1042 u16 saveGCDGMBUS; 1043 }; 1044 1045 struct vlv_s0ix_state { 1046 /* GAM */ 1047 u32 wr_watermark; 1048 u32 gfx_prio_ctrl; 1049 u32 arb_mode; 1050 u32 gfx_pend_tlb0; 1051 u32 gfx_pend_tlb1; 1052 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1053 u32 media_max_req_count; 1054 u32 gfx_max_req_count; 1055 u32 render_hwsp; 1056 u32 ecochk; 1057 u32 bsd_hwsp; 1058 u32 blt_hwsp; 1059 u32 tlb_rd_addr; 1060 1061 /* MBC */ 1062 u32 g3dctl; 1063 u32 gsckgctl; 1064 u32 mbctl; 1065 1066 /* GCP */ 1067 u32 ucgctl1; 1068 u32 ucgctl3; 1069 u32 rcgctl1; 1070 u32 rcgctl2; 1071 u32 rstctl; 1072 u32 misccpctl; 1073 1074 /* GPM */ 1075 u32 gfxpause; 1076 u32 rpdeuhwtc; 1077 u32 rpdeuc; 1078 u32 ecobus; 1079 u32 pwrdwnupctl; 1080 u32 rp_down_timeout; 1081 u32 rp_deucsw; 1082 u32 rcubmabdtmr; 1083 u32 rcedata; 1084 u32 spare2gh; 1085 1086 /* Display 1 CZ domain */ 1087 u32 gt_imr; 1088 u32 gt_ier; 1089 u32 pm_imr; 1090 u32 pm_ier; 1091 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1092 1093 /* GT SA CZ domain */ 1094 u32 tilectl; 1095 u32 gt_fifoctl; 1096 u32 gtlc_wake_ctrl; 1097 u32 gtlc_survive; 1098 u32 pmwgicz; 1099 1100 /* Display 2 CZ domain */ 1101 u32 gu_ctl0; 1102 u32 gu_ctl1; 1103 u32 pcbr; 1104 u32 clock_gate_dis2; 1105 }; 1106 1107 struct intel_rps_ei { 1108 u32 cz_clock; 1109 u32 render_c0; 1110 u32 media_c0; 1111 }; 1112 1113 struct intel_gen6_power_mgmt { 1114 /* 1115 * work, interrupts_enabled and pm_iir are protected by 1116 * dev_priv->irq_lock 1117 */ 1118 struct work_struct work; 1119 bool interrupts_enabled; 1120 u32 pm_iir; 1121 1122 /* Frequencies are stored in potentially platform dependent multiples. 1123 * In other words, *_freq needs to be multiplied by X to be interesting. 1124 * Soft limits are those which are used for the dynamic reclocking done 1125 * by the driver (raise frequencies under heavy loads, and lower for 1126 * lighter loads). Hard limits are those imposed by the hardware. 1127 * 1128 * A distinction is made for overclocking, which is never enabled by 1129 * default, and is considered to be above the hard limit if it's 1130 * possible at all. 1131 */ 1132 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1133 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1134 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1135 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1136 u8 min_freq; /* AKA RPn. Minimum frequency */ 1137 u8 idle_freq; /* Frequency to request when we are idle */ 1138 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1139 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1140 u8 rp0_freq; /* Non-overclocked max frequency. */ 1141 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 1142 1143 u8 up_threshold; /* Current %busy required to uplock */ 1144 u8 down_threshold; /* Current %busy required to downclock */ 1145 1146 int last_adj; 1147 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1148 1149 struct lock client_lock; 1150 struct list_head clients; 1151 bool client_boost; 1152 1153 bool enabled; 1154 struct delayed_work delayed_resume_work; 1155 unsigned boosts; 1156 1157 struct intel_rps_client semaphores, mmioflips; 1158 1159 /* manual wa residency calculations */ 1160 struct intel_rps_ei up_ei, down_ei; 1161 1162 /* 1163 * Protects RPS/RC6 register access and PCU communication. 1164 * Must be taken after struct_mutex if nested. Note that 1165 * this lock may be held for long periods of time when 1166 * talking to hw - so only take it when talking to hw! 1167 */ 1168 struct lock hw_lock; 1169 }; 1170 1171 /* defined intel_pm.c */ 1172 extern struct lock mchdev_lock; 1173 1174 struct intel_ilk_power_mgmt { 1175 u8 cur_delay; 1176 u8 min_delay; 1177 u8 max_delay; 1178 u8 fmax; 1179 u8 fstart; 1180 1181 u64 last_count1; 1182 unsigned long last_time1; 1183 unsigned long chipset_power; 1184 u64 last_count2; 1185 u64 last_time2; 1186 unsigned long gfx_power; 1187 u8 corr; 1188 1189 int c_m; 1190 int r_t; 1191 }; 1192 1193 struct drm_i915_private; 1194 struct i915_power_well; 1195 1196 struct i915_power_well_ops { 1197 /* 1198 * Synchronize the well's hw state to match the current sw state, for 1199 * example enable/disable it based on the current refcount. Called 1200 * during driver init and resume time, possibly after first calling 1201 * the enable/disable handlers. 1202 */ 1203 void (*sync_hw)(struct drm_i915_private *dev_priv, 1204 struct i915_power_well *power_well); 1205 /* 1206 * Enable the well and resources that depend on it (for example 1207 * interrupts located on the well). Called after the 0->1 refcount 1208 * transition. 1209 */ 1210 void (*enable)(struct drm_i915_private *dev_priv, 1211 struct i915_power_well *power_well); 1212 /* 1213 * Disable the well and resources that depend on it. Called after 1214 * the 1->0 refcount transition. 1215 */ 1216 void (*disable)(struct drm_i915_private *dev_priv, 1217 struct i915_power_well *power_well); 1218 /* Returns the hw enabled state. */ 1219 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1220 struct i915_power_well *power_well); 1221 }; 1222 1223 /* Power well structure for haswell */ 1224 struct i915_power_well { 1225 const char *name; 1226 bool always_on; 1227 /* power well enable/disable usage count */ 1228 int count; 1229 /* cached hw enabled state */ 1230 bool hw_enabled; 1231 unsigned long domains; 1232 unsigned long data; 1233 const struct i915_power_well_ops *ops; 1234 }; 1235 1236 struct i915_power_domains { 1237 /* 1238 * Power wells needed for initialization at driver init and suspend 1239 * time are on. They are kept on until after the first modeset. 1240 */ 1241 bool init_power_on; 1242 bool initializing; 1243 int power_well_count; 1244 1245 struct lock lock; 1246 int domain_use_count[POWER_DOMAIN_NUM]; 1247 struct i915_power_well *power_wells; 1248 }; 1249 1250 #define MAX_L3_SLICES 2 1251 struct intel_l3_parity { 1252 u32 *remap_info[MAX_L3_SLICES]; 1253 struct work_struct error_work; 1254 int which_slice; 1255 }; 1256 1257 struct i915_gem_mm { 1258 /** Memory allocator for GTT stolen memory */ 1259 struct drm_mm stolen; 1260 /** Protects the usage of the GTT stolen memory allocator. This is 1261 * always the inner lock when overlapping with struct_mutex. */ 1262 struct lock stolen_lock; 1263 1264 /** List of all objects in gtt_space. Used to restore gtt 1265 * mappings on resume */ 1266 struct list_head bound_list; 1267 /** 1268 * List of objects which are not bound to the GTT (thus 1269 * are idle and not used by the GPU) but still have 1270 * (presumably uncached) pages still attached. 1271 */ 1272 struct list_head unbound_list; 1273 1274 /** Usable portion of the GTT for GEM */ 1275 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1276 1277 /** PPGTT used for aliasing the PPGTT with the GTT */ 1278 struct i915_hw_ppgtt *aliasing_ppgtt; 1279 1280 struct notifier_block oom_notifier; 1281 struct notifier_block vmap_notifier; 1282 struct shrinker shrinker; 1283 bool shrinker_no_lock_stealing; 1284 1285 /** LRU list of objects with fence regs on them. */ 1286 struct list_head fence_list; 1287 1288 /** 1289 * We leave the user IRQ off as much as possible, 1290 * but this means that requests will finish and never 1291 * be retired once the system goes idle. Set a timer to 1292 * fire periodically while the ring is running. When it 1293 * fires, go retire requests. 1294 */ 1295 struct delayed_work retire_work; 1296 1297 /** 1298 * When we detect an idle GPU, we want to turn on 1299 * powersaving features. So once we see that there 1300 * are no more requests outstanding and no more 1301 * arrive within a small period of time, we fire 1302 * off the idle_work. 1303 */ 1304 struct delayed_work idle_work; 1305 1306 /** 1307 * Are we in a non-interruptible section of code like 1308 * modesetting? 1309 */ 1310 bool interruptible; 1311 1312 /** 1313 * Is the GPU currently considered idle, or busy executing userspace 1314 * requests? Whilst idle, we attempt to power down the hardware and 1315 * display clocks. In order to reduce the effect on performance, there 1316 * is a slight delay before we do so. 1317 */ 1318 bool busy; 1319 1320 /* the indicator for dispatch video commands on two BSD rings */ 1321 unsigned int bsd_ring_dispatch_index; 1322 1323 /** Bit 6 swizzling required for X tiling */ 1324 uint32_t bit_6_swizzle_x; 1325 /** Bit 6 swizzling required for Y tiling */ 1326 uint32_t bit_6_swizzle_y; 1327 1328 /* accounting, useful for userland debugging */ 1329 struct spinlock object_stat_lock; 1330 size_t object_memory; 1331 u32 object_count; 1332 }; 1333 1334 struct drm_i915_error_state_buf { 1335 struct drm_i915_private *i915; 1336 unsigned bytes; 1337 unsigned size; 1338 int err; 1339 u8 *buf; 1340 loff_t start; 1341 loff_t pos; 1342 }; 1343 1344 struct i915_error_state_file_priv { 1345 struct drm_device *dev; 1346 struct drm_i915_error_state *error; 1347 }; 1348 1349 struct i915_gpu_error { 1350 /* For hangcheck timer */ 1351 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1352 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1353 /* Hang gpu twice in this window and your context gets banned */ 1354 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1355 1356 struct workqueue_struct *hangcheck_wq; 1357 struct delayed_work hangcheck_work; 1358 1359 /* For reset and error_state handling. */ 1360 struct lock lock; 1361 /* Protected by the above dev->gpu_error.lock. */ 1362 struct drm_i915_error_state *first_error; 1363 1364 unsigned long missed_irq_rings; 1365 1366 /** 1367 * State variable controlling the reset flow and count 1368 * 1369 * This is a counter which gets incremented when reset is triggered, 1370 * and again when reset has been handled. So odd values (lowest bit set) 1371 * means that reset is in progress and even values that 1372 * (reset_counter >> 1):th reset was successfully completed. 1373 * 1374 * If reset is not completed succesfully, the I915_WEDGE bit is 1375 * set meaning that hardware is terminally sour and there is no 1376 * recovery. All waiters on the reset_queue will be woken when 1377 * that happens. 1378 * 1379 * This counter is used by the wait_seqno code to notice that reset 1380 * event happened and it needs to restart the entire ioctl (since most 1381 * likely the seqno it waited for won't ever signal anytime soon). 1382 * 1383 * This is important for lock-free wait paths, where no contended lock 1384 * naturally enforces the correct ordering between the bail-out of the 1385 * waiter and the gpu reset work code. 1386 */ 1387 atomic_t reset_counter; 1388 1389 #define I915_RESET_IN_PROGRESS_FLAG 1 1390 #define I915_WEDGED (1 << 31) 1391 1392 /** 1393 * Waitqueue to signal when the reset has completed. Used by clients 1394 * that wait for dev_priv->mm.wedged to settle. 1395 */ 1396 wait_queue_head_t reset_queue; 1397 1398 /* Userspace knobs for gpu hang simulation; 1399 * combines both a ring mask, and extra flags 1400 */ 1401 u32 stop_rings; 1402 #define I915_STOP_RING_ALLOW_BAN (1 << 31) 1403 #define I915_STOP_RING_ALLOW_WARN (1 << 30) 1404 1405 /* For missed irq/seqno simulation. */ 1406 unsigned int test_irq_rings; 1407 }; 1408 1409 enum modeset_restore { 1410 MODESET_ON_LID_OPEN, 1411 MODESET_DONE, 1412 MODESET_SUSPENDED, 1413 }; 1414 1415 #define DP_AUX_A 0x40 1416 #define DP_AUX_B 0x10 1417 #define DP_AUX_C 0x20 1418 #define DP_AUX_D 0x30 1419 1420 #define DDC_PIN_B 0x05 1421 #define DDC_PIN_C 0x04 1422 #define DDC_PIN_D 0x06 1423 1424 struct ddi_vbt_port_info { 1425 /* 1426 * This is an index in the HDMI/DVI DDI buffer translation table. 1427 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1428 * populate this field. 1429 */ 1430 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1431 uint8_t hdmi_level_shift; 1432 1433 uint8_t supports_dvi:1; 1434 uint8_t supports_hdmi:1; 1435 uint8_t supports_dp:1; 1436 1437 uint8_t alternate_aux_channel; 1438 uint8_t alternate_ddc_pin; 1439 1440 uint8_t dp_boost_level; 1441 uint8_t hdmi_boost_level; 1442 }; 1443 1444 enum psr_lines_to_wait { 1445 PSR_0_LINES_TO_WAIT = 0, 1446 PSR_1_LINE_TO_WAIT, 1447 PSR_4_LINES_TO_WAIT, 1448 PSR_8_LINES_TO_WAIT 1449 }; 1450 1451 struct intel_vbt_data { 1452 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1453 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1454 1455 /* Feature bits */ 1456 unsigned int int_tv_support:1; 1457 unsigned int lvds_dither:1; 1458 unsigned int lvds_vbt:1; 1459 unsigned int int_crt_support:1; 1460 unsigned int lvds_use_ssc:1; 1461 unsigned int display_clock_mode:1; 1462 unsigned int fdi_rx_polarity_inverted:1; 1463 unsigned int panel_type:4; 1464 int lvds_ssc_freq; 1465 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1466 1467 enum drrs_support_type drrs_type; 1468 1469 struct { 1470 int rate; 1471 int lanes; 1472 int preemphasis; 1473 int vswing; 1474 bool low_vswing; 1475 bool initialized; 1476 bool support; 1477 int bpp; 1478 struct edp_power_seq pps; 1479 } edp; 1480 1481 struct { 1482 bool full_link; 1483 bool require_aux_wakeup; 1484 int idle_frames; 1485 enum psr_lines_to_wait lines_to_wait; 1486 int tp1_wakeup_time; 1487 int tp2_tp3_wakeup_time; 1488 } psr; 1489 1490 struct { 1491 u16 pwm_freq_hz; 1492 bool present; 1493 bool active_low_pwm; 1494 u8 min_brightness; /* min_brightness/255 of max */ 1495 } backlight; 1496 1497 /* MIPI DSI */ 1498 struct { 1499 u16 panel_id; 1500 struct mipi_config *config; 1501 struct mipi_pps_data *pps; 1502 u8 seq_version; 1503 u32 size; 1504 u8 *data; 1505 const u8 *sequence[MIPI_SEQ_MAX]; 1506 } dsi; 1507 1508 int crt_ddc_pin; 1509 1510 int child_dev_num; 1511 union child_device_config *child_dev; 1512 1513 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1514 struct sdvo_device_mapping sdvo_mappings[2]; 1515 }; 1516 1517 enum intel_ddb_partitioning { 1518 INTEL_DDB_PART_1_2, 1519 INTEL_DDB_PART_5_6, /* IVB+ */ 1520 }; 1521 1522 struct intel_wm_level { 1523 bool enable; 1524 uint32_t pri_val; 1525 uint32_t spr_val; 1526 uint32_t cur_val; 1527 uint32_t fbc_val; 1528 }; 1529 1530 struct ilk_wm_values { 1531 uint32_t wm_pipe[3]; 1532 uint32_t wm_lp[3]; 1533 uint32_t wm_lp_spr[3]; 1534 uint32_t wm_linetime[3]; 1535 bool enable_fbc_wm; 1536 enum intel_ddb_partitioning partitioning; 1537 }; 1538 1539 struct vlv_pipe_wm { 1540 uint16_t primary; 1541 uint16_t sprite[2]; 1542 uint8_t cursor; 1543 }; 1544 1545 struct vlv_sr_wm { 1546 uint16_t plane; 1547 uint8_t cursor; 1548 }; 1549 1550 struct vlv_wm_values { 1551 struct vlv_pipe_wm pipe[3]; 1552 struct vlv_sr_wm sr; 1553 struct { 1554 uint8_t cursor; 1555 uint8_t sprite[2]; 1556 uint8_t primary; 1557 } ddl[3]; 1558 uint8_t level; 1559 bool cxsr; 1560 }; 1561 1562 struct skl_ddb_entry { 1563 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1564 }; 1565 1566 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1567 { 1568 return entry->end - entry->start; 1569 } 1570 1571 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1572 const struct skl_ddb_entry *e2) 1573 { 1574 if (e1->start == e2->start && e1->end == e2->end) 1575 return true; 1576 1577 return false; 1578 } 1579 1580 struct skl_ddb_allocation { 1581 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1582 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1583 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1584 }; 1585 1586 struct skl_wm_values { 1587 bool dirty[I915_MAX_PIPES]; 1588 struct skl_ddb_allocation ddb; 1589 uint32_t wm_linetime[I915_MAX_PIPES]; 1590 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1591 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1592 }; 1593 1594 struct skl_wm_level { 1595 bool plane_en[I915_MAX_PLANES]; 1596 uint16_t plane_res_b[I915_MAX_PLANES]; 1597 uint8_t plane_res_l[I915_MAX_PLANES]; 1598 }; 1599 1600 /* 1601 * This struct helps tracking the state needed for runtime PM, which puts the 1602 * device in PCI D3 state. Notice that when this happens, nothing on the 1603 * graphics device works, even register access, so we don't get interrupts nor 1604 * anything else. 1605 * 1606 * Every piece of our code that needs to actually touch the hardware needs to 1607 * either call intel_runtime_pm_get or call intel_display_power_get with the 1608 * appropriate power domain. 1609 * 1610 * Our driver uses the autosuspend delay feature, which means we'll only really 1611 * suspend if we stay with zero refcount for a certain amount of time. The 1612 * default value is currently very conservative (see intel_runtime_pm_enable), but 1613 * it can be changed with the standard runtime PM files from sysfs. 1614 * 1615 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1616 * goes back to false exactly before we reenable the IRQs. We use this variable 1617 * to check if someone is trying to enable/disable IRQs while they're supposed 1618 * to be disabled. This shouldn't happen and we'll print some error messages in 1619 * case it happens. 1620 * 1621 * For more, read the Documentation/power/runtime_pm.txt. 1622 */ 1623 struct i915_runtime_pm { 1624 atomic_t wakeref_count; 1625 atomic_t atomic_seq; 1626 bool suspended; 1627 bool irqs_enabled; 1628 }; 1629 1630 enum intel_pipe_crc_source { 1631 INTEL_PIPE_CRC_SOURCE_NONE, 1632 INTEL_PIPE_CRC_SOURCE_PLANE1, 1633 INTEL_PIPE_CRC_SOURCE_PLANE2, 1634 INTEL_PIPE_CRC_SOURCE_PF, 1635 INTEL_PIPE_CRC_SOURCE_PIPE, 1636 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1637 INTEL_PIPE_CRC_SOURCE_TV, 1638 INTEL_PIPE_CRC_SOURCE_DP_B, 1639 INTEL_PIPE_CRC_SOURCE_DP_C, 1640 INTEL_PIPE_CRC_SOURCE_DP_D, 1641 INTEL_PIPE_CRC_SOURCE_AUTO, 1642 INTEL_PIPE_CRC_SOURCE_MAX, 1643 }; 1644 1645 struct intel_pipe_crc_entry { 1646 uint32_t frame; 1647 uint32_t crc[5]; 1648 }; 1649 1650 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1651 struct intel_pipe_crc { 1652 struct spinlock lock; 1653 bool opened; /* exclusive access to the result file */ 1654 struct intel_pipe_crc_entry *entries; 1655 enum intel_pipe_crc_source source; 1656 int head, tail; 1657 wait_queue_head_t wq; 1658 }; 1659 1660 struct i915_frontbuffer_tracking { 1661 struct lock lock; 1662 1663 /* 1664 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1665 * scheduled flips. 1666 */ 1667 unsigned busy_bits; 1668 unsigned flip_bits; 1669 }; 1670 1671 struct i915_wa_reg { 1672 i915_reg_t addr; 1673 u32 value; 1674 /* bitmask representing WA bits */ 1675 u32 mask; 1676 }; 1677 1678 /* 1679 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only 1680 * allowing it for RCS as we don't foresee any requirement of having 1681 * a whitelist for other engines. When it is really required for 1682 * other engines then the limit need to be increased. 1683 */ 1684 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) 1685 1686 struct i915_workarounds { 1687 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1688 u32 count; 1689 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1690 }; 1691 1692 struct i915_virtual_gpu { 1693 bool active; 1694 }; 1695 1696 struct i915_execbuffer_params { 1697 struct drm_device *dev; 1698 struct drm_file *file; 1699 uint32_t dispatch_flags; 1700 uint32_t args_batch_start_offset; 1701 uint64_t batch_obj_vm_offset; 1702 struct intel_engine_cs *engine; 1703 struct drm_i915_gem_object *batch_obj; 1704 struct intel_context *ctx; 1705 struct drm_i915_gem_request *request; 1706 }; 1707 1708 /* used in computing the new watermarks state */ 1709 struct intel_wm_config { 1710 unsigned int num_pipes_active; 1711 bool sprites_enabled; 1712 bool sprites_scaled; 1713 }; 1714 1715 struct drm_i915_private { 1716 struct drm_device *dev; 1717 struct kmem_cache *objects; 1718 struct kmem_cache *vmas; 1719 struct kmem_cache *requests; 1720 1721 struct intel_device_info info; 1722 1723 int relative_constants_mode; 1724 1725 char __iomem *regs; 1726 1727 struct intel_uncore uncore; 1728 1729 struct i915_virtual_gpu vgpu; 1730 1731 struct intel_guc guc; 1732 1733 struct intel_csr csr; 1734 1735 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1736 1737 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1738 * controller on different i2c buses. */ 1739 struct lock gmbus_mutex; 1740 1741 /** 1742 * Base address of the gmbus and gpio block. 1743 */ 1744 uint32_t gpio_mmio_base; 1745 1746 /* MMIO base address for MIPI regs */ 1747 uint32_t mipi_mmio_base; 1748 1749 uint32_t psr_mmio_base; 1750 1751 wait_queue_head_t gmbus_wait_queue; 1752 1753 struct pci_dev *bridge_dev; 1754 struct intel_engine_cs engine[I915_NUM_ENGINES]; 1755 struct drm_i915_gem_object *semaphore_obj; 1756 uint32_t last_seqno, next_seqno; 1757 1758 struct drm_dma_handle *status_page_dmah; 1759 struct resource *mch_res; 1760 int mch_res_rid; 1761 1762 /* protects the irq masks */ 1763 struct lock irq_lock; 1764 1765 /* protects the mmio flip data */ 1766 struct spinlock mmio_flip_lock; 1767 1768 bool display_irqs_enabled; 1769 1770 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1771 struct pm_qos_request pm_qos; 1772 1773 /* Sideband mailbox protection */ 1774 struct lock sb_lock; 1775 1776 /** Cached value of IMR to avoid reads in updating the bitfield */ 1777 union { 1778 u32 irq_mask; 1779 u32 de_irq_mask[I915_MAX_PIPES]; 1780 }; 1781 u32 gt_irq_mask; 1782 u32 pm_irq_mask; 1783 u32 pm_rps_events; 1784 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1785 1786 struct i915_hotplug hotplug; 1787 struct intel_fbc fbc; 1788 struct i915_drrs drrs; 1789 struct intel_opregion opregion; 1790 struct intel_vbt_data vbt; 1791 1792 bool preserve_bios_swizzle; 1793 1794 /* overlay */ 1795 struct intel_overlay *overlay; 1796 1797 /* backlight registers and fields in struct intel_panel */ 1798 struct lock backlight_lock; 1799 1800 /* LVDS info */ 1801 bool no_aux_handshake; 1802 1803 /* protects panel power sequencer state */ 1804 struct lock pps_mutex; 1805 1806 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1807 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1808 1809 unsigned int fsb_freq, mem_freq, is_ddr3; 1810 unsigned int skl_boot_cdclk; 1811 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1812 unsigned int max_dotclk_freq; 1813 unsigned int rawclk_freq; 1814 unsigned int hpll_freq; 1815 unsigned int czclk_freq; 1816 1817 /** 1818 * wq - Driver workqueue for GEM. 1819 * 1820 * NOTE: Work items scheduled here are not allowed to grab any modeset 1821 * locks, for otherwise the flushing done in the pageflip code will 1822 * result in deadlocks. 1823 */ 1824 struct workqueue_struct *wq; 1825 1826 /* Display functions */ 1827 struct drm_i915_display_funcs display; 1828 1829 /* PCH chipset type */ 1830 enum intel_pch pch_type; 1831 unsigned short pch_id; 1832 1833 unsigned long quirks; 1834 1835 enum modeset_restore modeset_restore; 1836 struct lock modeset_restore_lock; 1837 struct drm_atomic_state *modeset_restore_state; 1838 1839 struct list_head vm_list; /* Global list of all address spaces */ 1840 struct i915_ggtt ggtt; /* VM representing the global address space */ 1841 1842 struct i915_gem_mm mm; 1843 DECLARE_HASHTABLE(mm_structs, 7); 1844 struct lock mm_lock; 1845 1846 /* Kernel Modesetting */ 1847 1848 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1849 struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1850 wait_queue_head_t pending_flip_queue; 1851 1852 #ifdef CONFIG_DEBUG_FS 1853 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1854 #endif 1855 1856 /* dpll and cdclk state is protected by connection_mutex */ 1857 int num_shared_dpll; 1858 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1859 const struct intel_dpll_mgr *dpll_mgr; 1860 1861 /* 1862 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1863 * Must be global rather than per dpll, because on some platforms 1864 * plls share registers. 1865 */ 1866 struct lock dpll_lock; 1867 1868 unsigned int active_crtcs; 1869 unsigned int min_pixclk[I915_MAX_PIPES]; 1870 1871 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1872 1873 struct i915_workarounds workarounds; 1874 1875 struct i915_frontbuffer_tracking fb_tracking; 1876 1877 u16 orig_clock; 1878 1879 bool mchbar_need_disable; 1880 1881 struct intel_l3_parity l3_parity; 1882 1883 /* Cannot be determined by PCIID. You must always read a register. */ 1884 u32 edram_cap; 1885 1886 /* gen6+ rps state */ 1887 struct intel_gen6_power_mgmt rps; 1888 1889 /* ilk-only ips/rps state. Everything in here is protected by the global 1890 * mchdev_lock in intel_pm.c */ 1891 struct intel_ilk_power_mgmt ips; 1892 1893 struct i915_power_domains power_domains; 1894 1895 struct i915_psr psr; 1896 1897 struct i915_gpu_error gpu_error; 1898 1899 struct drm_i915_gem_object *vlv_pctx; 1900 1901 #ifdef CONFIG_DRM_FBDEV_EMULATION 1902 /* list of fbdev register on this device */ 1903 struct intel_fbdev *fbdev; 1904 struct work_struct fbdev_suspend_work; 1905 #endif 1906 1907 struct drm_property *broadcast_rgb_property; 1908 struct drm_property *force_audio_property; 1909 1910 /* hda/i915 audio component */ 1911 struct i915_audio_component *audio_component; 1912 bool audio_component_registered; 1913 /** 1914 * av_mutex - mutex for audio/video sync 1915 * 1916 */ 1917 struct lock av_mutex; 1918 1919 uint32_t hw_context_size; 1920 struct list_head context_list; 1921 1922 u32 fdi_rx_config; 1923 1924 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 1925 u32 chv_phy_control; 1926 /* 1927 * Shadows for CHV DPLL_MD regs to keep the state 1928 * checker somewhat working in the presence hardware 1929 * crappiness (can't read out DPLL_MD for pipes B & C). 1930 */ 1931 u32 chv_dpll_md[I915_MAX_PIPES]; 1932 u32 bxt_phy_grc; 1933 1934 u32 suspend_count; 1935 bool suspended_to_idle; 1936 struct i915_suspend_saved_registers regfile; 1937 struct vlv_s0ix_state vlv_s0ix_state; 1938 1939 struct { 1940 /* 1941 * Raw watermark latency values: 1942 * in 0.1us units for WM0, 1943 * in 0.5us units for WM1+. 1944 */ 1945 /* primary */ 1946 uint16_t pri_latency[5]; 1947 /* sprite */ 1948 uint16_t spr_latency[5]; 1949 /* cursor */ 1950 uint16_t cur_latency[5]; 1951 /* 1952 * Raw watermark memory latency values 1953 * for SKL for all 8 levels 1954 * in 1us units. 1955 */ 1956 uint16_t skl_latency[8]; 1957 1958 /* Committed wm config */ 1959 struct intel_wm_config config; 1960 1961 /* 1962 * The skl_wm_values structure is a bit too big for stack 1963 * allocation, so we keep the staging struct where we store 1964 * intermediate results here instead. 1965 */ 1966 struct skl_wm_values skl_results; 1967 1968 /* current hardware state */ 1969 union { 1970 struct ilk_wm_values hw; 1971 struct skl_wm_values skl_hw; 1972 struct vlv_wm_values vlv; 1973 }; 1974 1975 uint8_t max_level; 1976 1977 /* 1978 * Should be held around atomic WM register writing; also 1979 * protects * intel_crtc->wm.active and 1980 * cstate->wm.need_postvbl_update. 1981 */ 1982 struct lock wm_mutex; 1983 } wm; 1984 1985 struct i915_runtime_pm pm; 1986 1987 uint32_t bios_vgacntr; 1988 1989 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1990 struct { 1991 int (*execbuf_submit)(struct i915_execbuffer_params *params, 1992 struct drm_i915_gem_execbuffer2 *args, 1993 struct list_head *vmas); 1994 int (*init_engines)(struct drm_device *dev); 1995 void (*cleanup_engine)(struct intel_engine_cs *engine); 1996 void (*stop_engine)(struct intel_engine_cs *engine); 1997 } gt; 1998 1999 struct intel_context *kernel_context; 2000 2001 /* perform PHY state sanity checks? */ 2002 bool chv_phy_assert[2]; 2003 2004 struct intel_encoder *dig_port_map[I915_MAX_PORTS]; 2005 2006 /* 2007 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2008 * will be rejected. Instead look for a better place. 2009 */ 2010 }; 2011 2012 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2013 { 2014 return dev->dev_private; 2015 } 2016 2017 static inline struct drm_i915_private *dev_to_i915(struct device *dev) 2018 { 2019 return to_i915(device_get_softc(dev->bsddev)); 2020 } 2021 2022 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2023 { 2024 return container_of(guc, struct drm_i915_private, guc); 2025 } 2026 2027 /* Simple iterator over all initialised engines */ 2028 #define for_each_engine(engine__, dev_priv__) \ 2029 for ((engine__) = &(dev_priv__)->engine[0]; \ 2030 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2031 (engine__)++) \ 2032 for_each_if (intel_engine_initialized(engine__)) 2033 2034 /* Iterator with engine_id */ 2035 #define for_each_engine_id(engine__, dev_priv__, id__) \ 2036 for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \ 2037 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2038 (engine__)++) \ 2039 for_each_if (((id__) = (engine__)->id, \ 2040 intel_engine_initialized(engine__))) 2041 2042 /* Iterator over subset of engines selected by mask */ 2043 #define for_each_engine_masked(engine__, dev_priv__, mask__) \ 2044 for ((engine__) = &(dev_priv__)->engine[0]; \ 2045 (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ 2046 (engine__)++) \ 2047 for_each_if (((mask__) & intel_engine_flag(engine__)) && \ 2048 intel_engine_initialized(engine__)) 2049 2050 enum hdmi_force_audio { 2051 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2052 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2053 HDMI_AUDIO_AUTO, /* trust EDID */ 2054 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2055 }; 2056 2057 #define I915_GTT_OFFSET_NONE ((u32)-1) 2058 2059 struct drm_i915_gem_object_ops { 2060 unsigned int flags; 2061 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 2062 2063 /* Interface between the GEM object and its backing storage. 2064 * get_pages() is called once prior to the use of the associated set 2065 * of pages before to binding them into the GTT, and put_pages() is 2066 * called after we no longer need them. As we expect there to be 2067 * associated cost with migrating pages between the backing storage 2068 * and making them available for the GPU (e.g. clflush), we may hold 2069 * onto the pages after they are no longer referenced by the GPU 2070 * in case they may be used again shortly (for example migrating the 2071 * pages to a different memory domain within the GTT). put_pages() 2072 * will therefore most likely be called when the object itself is 2073 * being released or under memory pressure (where we attempt to 2074 * reap pages for the shrinker). 2075 */ 2076 int (*get_pages)(struct drm_i915_gem_object *); 2077 void (*put_pages)(struct drm_i915_gem_object *); 2078 2079 int (*dmabuf_export)(struct drm_i915_gem_object *); 2080 void (*release)(struct drm_i915_gem_object *); 2081 }; 2082 2083 /* 2084 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2085 * considered to be the frontbuffer for the given plane interface-wise. This 2086 * doesn't mean that the hw necessarily already scans it out, but that any 2087 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2088 * 2089 * We have one bit per pipe and per scanout plane type. 2090 */ 2091 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2092 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2093 #define INTEL_FRONTBUFFER_BITS \ 2094 (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) 2095 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2096 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2097 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2098 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2099 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2100 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2101 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2102 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2103 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2104 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2105 2106 struct drm_i915_gem_object { 2107 struct drm_gem_object base; 2108 2109 const struct drm_i915_gem_object_ops *ops; 2110 2111 /** List of VMAs backed by this object */ 2112 struct list_head vma_list; 2113 2114 /** Stolen memory for this object, instead of being backed by shmem. */ 2115 struct drm_mm_node *stolen; 2116 struct list_head global_list; 2117 2118 struct list_head engine_list[I915_NUM_ENGINES]; 2119 /** Used in execbuf to temporarily hold a ref */ 2120 struct list_head obj_exec_link; 2121 2122 struct list_head batch_pool_link; 2123 2124 /** 2125 * This is set if the object is on the active lists (has pending 2126 * rendering and so a non-zero seqno), and is not set if it i s on 2127 * inactive (ready to be unbound) list. 2128 */ 2129 unsigned int active:I915_NUM_ENGINES; 2130 2131 /** 2132 * This is set if the object has been written to since last bound 2133 * to the GTT 2134 */ 2135 unsigned int dirty:1; 2136 2137 /** 2138 * Fence register bits (if any) for this object. Will be set 2139 * as needed when mapped into the GTT. 2140 * Protected by dev->struct_mutex. 2141 */ 2142 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 2143 2144 /** 2145 * Advice: are the backing pages purgeable? 2146 */ 2147 unsigned int madv:2; 2148 2149 /** 2150 * Current tiling mode for the object. 2151 */ 2152 unsigned int tiling_mode:2; 2153 /** 2154 * Whether the tiling parameters for the currently associated fence 2155 * register have changed. Note that for the purposes of tracking 2156 * tiling changes we also treat the unfenced register, the register 2157 * slot that the object occupies whilst it executes a fenced 2158 * command (such as BLT on gen2/3), as a "fence". 2159 */ 2160 unsigned int fence_dirty:1; 2161 2162 /** 2163 * Is the object at the current location in the gtt mappable and 2164 * fenceable? Used to avoid costly recalculations. 2165 */ 2166 unsigned int map_and_fenceable:1; 2167 2168 /** 2169 * Whether the current gtt mapping needs to be mappable (and isn't just 2170 * mappable by accident). Track pin and fault separate for a more 2171 * accurate mappable working set. 2172 */ 2173 unsigned int fault_mappable:1; 2174 2175 /* 2176 * Is the object to be mapped as read-only to the GPU 2177 * Only honoured if hardware has relevant pte bit 2178 */ 2179 unsigned long gt_ro:1; 2180 unsigned int cache_level:3; 2181 unsigned int cache_dirty:1; 2182 2183 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2184 2185 unsigned int pin_display; 2186 2187 struct sg_table *pages; 2188 int pages_pin_count; 2189 struct get_page { 2190 struct scatterlist *sg; 2191 int last; 2192 } get_page; 2193 void *mapping; 2194 2195 /** Breadcrumb of last rendering to the buffer. 2196 * There can only be one writer, but we allow for multiple readers. 2197 * If there is a writer that necessarily implies that all other 2198 * read requests are complete - but we may only be lazily clearing 2199 * the read requests. A read request is naturally the most recent 2200 * request on a ring, so we may have two different write and read 2201 * requests on one ring where the write request is older than the 2202 * read request. This allows for the CPU to read from an active 2203 * buffer by only waiting for the write to complete. 2204 * */ 2205 struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES]; 2206 struct drm_i915_gem_request *last_write_req; 2207 /** Breadcrumb of last fenced GPU access to the buffer. */ 2208 struct drm_i915_gem_request *last_fenced_req; 2209 2210 /** Current tiling stride for the object, if it's tiled. */ 2211 uint32_t stride; 2212 2213 /** References from framebuffers, locks out tiling changes. */ 2214 unsigned long framebuffer_references; 2215 2216 /** Record of address bit 17 of each page at last unbind. */ 2217 unsigned long *bit_17; 2218 2219 union { 2220 /** for phy allocated objects */ 2221 struct drm_dma_handle *phys_handle; 2222 2223 struct i915_gem_userptr { 2224 uintptr_t ptr; 2225 unsigned read_only :1; 2226 unsigned workers :4; 2227 #define I915_GEM_USERPTR_MAX_WORKERS 15 2228 2229 struct i915_mm_struct *mm; 2230 struct i915_mmu_object *mmu_object; 2231 struct work_struct *work; 2232 } userptr; 2233 }; 2234 }; 2235 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2236 2237 void i915_gem_track_fb(struct drm_i915_gem_object *old, 2238 struct drm_i915_gem_object *new, 2239 unsigned frontbuffer_bits); 2240 2241 /** 2242 * Request queue structure. 2243 * 2244 * The request queue allows us to note sequence numbers that have been emitted 2245 * and may be associated with active buffers to be retired. 2246 * 2247 * By keeping this list, we can avoid having to do questionable sequence 2248 * number comparisons on buffer last_read|write_seqno. It also allows an 2249 * emission time to be associated with the request for tracking how far ahead 2250 * of the GPU the submission is. 2251 * 2252 * The requests are reference counted, so upon creation they should have an 2253 * initial reference taken using kref_init 2254 */ 2255 struct drm_i915_gem_request { 2256 struct kref ref; 2257 2258 /** On Which ring this request was generated */ 2259 struct drm_i915_private *i915; 2260 struct intel_engine_cs *engine; 2261 unsigned reset_counter; 2262 2263 /** GEM sequence number associated with the previous request, 2264 * when the HWS breadcrumb is equal to this the GPU is processing 2265 * this request. 2266 */ 2267 u32 previous_seqno; 2268 2269 /** GEM sequence number associated with this request, 2270 * when the HWS breadcrumb is equal or greater than this the GPU 2271 * has finished processing this request. 2272 */ 2273 u32 seqno; 2274 2275 /** Position in the ringbuffer of the start of the request */ 2276 u32 head; 2277 2278 /** 2279 * Position in the ringbuffer of the start of the postfix. 2280 * This is required to calculate the maximum available ringbuffer 2281 * space without overwriting the postfix. 2282 */ 2283 u32 postfix; 2284 2285 /** Position in the ringbuffer of the end of the whole request */ 2286 u32 tail; 2287 2288 /** 2289 * Context and ring buffer related to this request 2290 * Contexts are refcounted, so when this request is associated with a 2291 * context, we must increment the context's refcount, to guarantee that 2292 * it persists while any request is linked to it. Requests themselves 2293 * are also refcounted, so the request will only be freed when the last 2294 * reference to it is dismissed, and the code in 2295 * i915_gem_request_free() will then decrement the refcount on the 2296 * context. 2297 */ 2298 struct intel_context *ctx; 2299 struct intel_ringbuffer *ringbuf; 2300 2301 /** Batch buffer related to this request if any (used for 2302 error state dump only) */ 2303 struct drm_i915_gem_object *batch_obj; 2304 2305 /** Time at which this request was emitted, in jiffies. */ 2306 unsigned long emitted_jiffies; 2307 2308 /** global list entry for this request */ 2309 struct list_head list; 2310 2311 struct drm_i915_file_private *file_priv; 2312 /** file_priv list entry for this request */ 2313 struct list_head client_list; 2314 2315 /** process identifier submitting this request */ 2316 pid_t pid; 2317 2318 /** 2319 * The ELSP only accepts two elements at a time, so we queue 2320 * context/tail pairs on a given queue (ring->execlist_queue) until the 2321 * hardware is available. The queue serves a double purpose: we also use 2322 * it to keep track of the up to 2 contexts currently in the hardware 2323 * (usually one in execution and the other queued up by the GPU): We 2324 * only remove elements from the head of the queue when the hardware 2325 * informs us that an element has been completed. 2326 * 2327 * All accesses to the queue are mediated by a spinlock 2328 * (ring->execlist_lock). 2329 */ 2330 2331 /** Execlist link in the submission queue.*/ 2332 struct list_head execlist_link; 2333 2334 /** Execlists no. of times this request has been sent to the ELSP */ 2335 int elsp_submitted; 2336 2337 }; 2338 2339 struct drm_i915_gem_request * __must_check 2340 i915_gem_request_alloc(struct intel_engine_cs *engine, 2341 struct intel_context *ctx); 2342 void i915_gem_request_free(struct kref *req_ref); 2343 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2344 struct drm_file *file); 2345 2346 static inline uint32_t 2347 i915_gem_request_get_seqno(struct drm_i915_gem_request *req) 2348 { 2349 return req ? req->seqno : 0; 2350 } 2351 2352 static inline struct intel_engine_cs * 2353 i915_gem_request_get_engine(struct drm_i915_gem_request *req) 2354 { 2355 return req ? req->engine : NULL; 2356 } 2357 2358 static inline struct drm_i915_gem_request * 2359 i915_gem_request_reference(struct drm_i915_gem_request *req) 2360 { 2361 if (req) 2362 kref_get(&req->ref); 2363 return req; 2364 } 2365 2366 static inline void 2367 i915_gem_request_unreference(struct drm_i915_gem_request *req) 2368 { 2369 WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex)); 2370 kref_put(&req->ref, i915_gem_request_free); 2371 } 2372 2373 static inline void 2374 i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) 2375 { 2376 struct drm_device *dev; 2377 2378 if (!req) 2379 return; 2380 2381 dev = req->engine->dev; 2382 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) 2383 mutex_unlock(&dev->struct_mutex); 2384 } 2385 2386 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2387 struct drm_i915_gem_request *src) 2388 { 2389 if (src) 2390 i915_gem_request_reference(src); 2391 2392 if (*pdst) 2393 i915_gem_request_unreference(*pdst); 2394 2395 *pdst = src; 2396 } 2397 2398 /* 2399 * XXX: i915_gem_request_completed should be here but currently needs the 2400 * definition of i915_seqno_passed() which is below. It will be moved in 2401 * a later patch when the call to i915_seqno_passed() is obsoleted... 2402 */ 2403 2404 /* 2405 * A command that requires special handling by the command parser. 2406 */ 2407 struct drm_i915_cmd_descriptor { 2408 /* 2409 * Flags describing how the command parser processes the command. 2410 * 2411 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2412 * a length mask if not set 2413 * CMD_DESC_SKIP: The command is allowed but does not follow the 2414 * standard length encoding for the opcode range in 2415 * which it falls 2416 * CMD_DESC_REJECT: The command is never allowed 2417 * CMD_DESC_REGISTER: The command should be checked against the 2418 * register whitelist for the appropriate ring 2419 * CMD_DESC_MASTER: The command is allowed if the submitting process 2420 * is the DRM master 2421 */ 2422 u32 flags; 2423 #define CMD_DESC_FIXED (1<<0) 2424 #define CMD_DESC_SKIP (1<<1) 2425 #define CMD_DESC_REJECT (1<<2) 2426 #define CMD_DESC_REGISTER (1<<3) 2427 #define CMD_DESC_BITMASK (1<<4) 2428 #define CMD_DESC_MASTER (1<<5) 2429 2430 /* 2431 * The command's unique identification bits and the bitmask to get them. 2432 * This isn't strictly the opcode field as defined in the spec and may 2433 * also include type, subtype, and/or subop fields. 2434 */ 2435 struct { 2436 u32 value; 2437 u32 mask; 2438 } cmd; 2439 2440 /* 2441 * The command's length. The command is either fixed length (i.e. does 2442 * not include a length field) or has a length field mask. The flag 2443 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2444 * a length mask. All command entries in a command table must include 2445 * length information. 2446 */ 2447 union { 2448 u32 fixed; 2449 u32 mask; 2450 } length; 2451 2452 /* 2453 * Describes where to find a register address in the command to check 2454 * against the ring's register whitelist. Only valid if flags has the 2455 * CMD_DESC_REGISTER bit set. 2456 * 2457 * A non-zero step value implies that the command may access multiple 2458 * registers in sequence (e.g. LRI), in that case step gives the 2459 * distance in dwords between individual offset fields. 2460 */ 2461 struct { 2462 u32 offset; 2463 u32 mask; 2464 u32 step; 2465 } reg; 2466 2467 #define MAX_CMD_DESC_BITMASKS 3 2468 /* 2469 * Describes command checks where a particular dword is masked and 2470 * compared against an expected value. If the command does not match 2471 * the expected value, the parser rejects it. Only valid if flags has 2472 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2473 * are valid. 2474 * 2475 * If the check specifies a non-zero condition_mask then the parser 2476 * only performs the check when the bits specified by condition_mask 2477 * are non-zero. 2478 */ 2479 struct { 2480 u32 offset; 2481 u32 mask; 2482 u32 expected; 2483 u32 condition_offset; 2484 u32 condition_mask; 2485 } bits[MAX_CMD_DESC_BITMASKS]; 2486 }; 2487 2488 /* 2489 * A table of commands requiring special handling by the command parser. 2490 * 2491 * Each ring has an array of tables. Each table consists of an array of command 2492 * descriptors, which must be sorted with command opcodes in ascending order. 2493 */ 2494 struct drm_i915_cmd_table { 2495 const struct drm_i915_cmd_descriptor *table; 2496 int count; 2497 }; 2498 2499 /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ 2500 #define __I915__(p) ({ \ 2501 const struct drm_i915_private *__p; \ 2502 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ 2503 __p = (const struct drm_i915_private *)p; \ 2504 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ 2505 __p = to_i915((const struct drm_device *)p); \ 2506 __p; \ 2507 }) 2508 #define INTEL_INFO(p) (&__I915__(p)->info) 2509 #define INTEL_GEN(p) (INTEL_INFO(p)->gen) 2510 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2511 #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2512 2513 #define REVID_FOREVER 0xff 2514 /* 2515 * Return true if revision is in range [since,until] inclusive. 2516 * 2517 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2518 */ 2519 #define IS_REVID(p, since, until) \ 2520 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2521 2522 #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) 2523 #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) 2524 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2525 #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) 2526 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2527 #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) 2528 #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) 2529 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2530 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2531 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2532 #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) 2533 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2534 #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) 2535 #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) 2536 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2537 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2538 #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) 2539 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2540 #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ 2541 INTEL_DEVID(dev) == 0x0152 || \ 2542 INTEL_DEVID(dev) == 0x015a) 2543 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2544 #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) 2545 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2546 #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) 2547 #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2548 #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) 2549 #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) 2550 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2551 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2552 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2553 #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2554 ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2555 (INTEL_DEVID(dev) & 0xf) == 0xb || \ 2556 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2557 /* ULX machines are also considered ULT. */ 2558 #define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ 2559 (INTEL_DEVID(dev) & 0xf) == 0xe) 2560 #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ 2561 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2562 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2563 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2564 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2565 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2566 /* ULX machines are also considered ULT. */ 2567 #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ 2568 INTEL_DEVID(dev) == 0x0A1E) 2569 #define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ 2570 INTEL_DEVID(dev) == 0x1913 || \ 2571 INTEL_DEVID(dev) == 0x1916 || \ 2572 INTEL_DEVID(dev) == 0x1921 || \ 2573 INTEL_DEVID(dev) == 0x1926) 2574 #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ 2575 INTEL_DEVID(dev) == 0x1915 || \ 2576 INTEL_DEVID(dev) == 0x191E) 2577 #define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \ 2578 INTEL_DEVID(dev) == 0x5913 || \ 2579 INTEL_DEVID(dev) == 0x5916 || \ 2580 INTEL_DEVID(dev) == 0x5921 || \ 2581 INTEL_DEVID(dev) == 0x5926) 2582 #define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \ 2583 INTEL_DEVID(dev) == 0x5915 || \ 2584 INTEL_DEVID(dev) == 0x591E) 2585 #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ 2586 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2587 #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ 2588 (INTEL_DEVID(dev) & 0x00F0) == 0x0030) 2589 2590 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2591 2592 #define SKL_REVID_A0 0x0 2593 #define SKL_REVID_B0 0x1 2594 #define SKL_REVID_C0 0x2 2595 #define SKL_REVID_D0 0x3 2596 #define SKL_REVID_E0 0x4 2597 #define SKL_REVID_F0 0x5 2598 #define SKL_REVID_G0 0x6 2599 #define SKL_REVID_H0 0x7 2600 2601 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2602 2603 #define BXT_REVID_A0 0x0 2604 #define BXT_REVID_A1 0x1 2605 #define BXT_REVID_B0 0x3 2606 #define BXT_REVID_C0 0x9 2607 2608 #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) 2609 2610 #define KBL_REVID_A0 0x0 2611 #define KBL_REVID_B0 0x1 2612 #define KBL_REVID_C0 0x2 2613 #define KBL_REVID_D0 0x3 2614 #define KBL_REVID_E0 0x4 2615 2616 #define IS_KBL_REVID(p, since, until) \ 2617 (IS_KABYLAKE(p) && IS_REVID(p, since, until)) 2618 2619 /* 2620 * The genX designation typically refers to the render engine, so render 2621 * capability related checks should use IS_GEN, while display and other checks 2622 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2623 * chips, etc.). 2624 */ 2625 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2626 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2627 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2628 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2629 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2630 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2631 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2632 #define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2633 2634 #define RENDER_RING (1<<RCS) 2635 #define BSD_RING (1<<VCS) 2636 #define BLT_RING (1<<BCS) 2637 #define VEBOX_RING (1<<VECS) 2638 #define BSD2_RING (1<<VCS2) 2639 #define ALL_ENGINES (~0) 2640 2641 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2642 #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2643 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2644 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2645 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2646 #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) 2647 #define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED) 2648 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2649 HAS_EDRAM(dev)) 2650 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2651 2652 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2653 #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2654 #define USES_PPGTT(dev) (i915.enable_ppgtt) 2655 #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) 2656 #define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) 2657 2658 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2659 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2660 2661 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2662 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2663 2664 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2665 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2666 IS_SKL_GT3(dev) || \ 2667 IS_SKL_GT4(dev)) 2668 2669 /* 2670 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2671 * even when in MSI mode. This results in spurious interrupt warnings if the 2672 * legacy irq no. is shared with another device. The kernel then disables that 2673 * interrupt source and so prevents the other device from working properly. 2674 */ 2675 #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2676 #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) 2677 2678 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2679 * rows, which changed the alignment requirements and fence programming. 2680 */ 2681 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 2682 IS_I915GM(dev))) 2683 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 2684 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 2685 2686 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 2687 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2688 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2689 2690 #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2691 2692 #define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2693 INTEL_INFO(dev)->gen >= 9) 2694 2695 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2696 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2697 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ 2698 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ 2699 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 2700 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2701 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ 2702 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ 2703 IS_KABYLAKE(dev) || IS_BROXTON(dev)) 2704 #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2705 #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2706 2707 #define HAS_CSR(dev) (IS_GEN9(dev)) 2708 2709 #define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2710 #define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2711 2712 #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2713 INTEL_INFO(dev)->gen >= 8) 2714 2715 #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ 2716 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ 2717 !IS_BROXTON(dev)) 2718 2719 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2720 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2721 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2722 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2723 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2724 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2725 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2726 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2727 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 2728 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2729 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2730 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2731 2732 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2733 #define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP) 2734 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2735 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2736 #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2737 #define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 2738 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2739 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2740 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 2741 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 2742 2743 #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \ 2744 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 2745 2746 /* DPF == dynamic parity feature */ 2747 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 2748 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) 2749 2750 #define GT_FREQUENCY_MULTIPLIER 50 2751 #define GEN9_FREQ_SCALER 3 2752 2753 #include "i915_trace.h" 2754 2755 extern const struct drm_ioctl_desc i915_ioctls[]; 2756 extern int i915_max_ioctl; 2757 2758 extern int i915_suspend_switcheroo(device_t kdev); 2759 extern int i915_resume_switcheroo(struct drm_device *dev); 2760 2761 /* i915_dma.c */ 2762 void __printf(3, 4) 2763 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2764 const char *fmt, ...); 2765 2766 #define i915_report_error(dev_priv, fmt, ...) \ 2767 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2768 2769 extern int i915_driver_load(struct drm_device *, unsigned long flags); 2770 extern int i915_driver_unload(struct drm_device *); 2771 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2772 extern void i915_driver_lastclose(struct drm_device * dev); 2773 extern void i915_driver_preclose(struct drm_device *dev, 2774 struct drm_file *file); 2775 extern void i915_driver_postclose(struct drm_device *dev, 2776 struct drm_file *file); 2777 #ifdef CONFIG_COMPAT 2778 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2779 unsigned long arg); 2780 #endif 2781 extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); 2782 extern bool intel_has_gpu_reset(struct drm_device *dev); 2783 extern int i915_reset(struct drm_device *dev); 2784 extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2785 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2786 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2787 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2788 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2789 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2790 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2791 2792 /* intel_hotplug.c */ 2793 void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2794 void intel_hpd_init(struct drm_i915_private *dev_priv); 2795 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2796 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2797 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2798 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2799 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2800 2801 /* i915_irq.c */ 2802 void i915_queue_hangcheck(struct drm_device *dev); 2803 __printf(3, 4) 2804 void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2805 const char *fmt, ...); 2806 2807 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2808 int intel_irq_install(struct drm_i915_private *dev_priv); 2809 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2810 2811 extern void intel_uncore_sanitize(struct drm_device *dev); 2812 extern void intel_uncore_early_sanitize(struct drm_device *dev, 2813 bool restore_forcewake); 2814 extern void intel_uncore_init(struct drm_device *dev); 2815 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2816 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2817 extern void intel_uncore_fini(struct drm_device *dev); 2818 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 2819 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2820 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2821 enum forcewake_domains domains); 2822 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2823 enum forcewake_domains domains); 2824 /* Like above but the caller must manage the uncore.lock itself. 2825 * Must be used with I915_READ_FW and friends. 2826 */ 2827 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2828 enum forcewake_domains domains); 2829 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2830 enum forcewake_domains domains); 2831 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 2832 2833 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2834 static inline bool intel_vgpu_active(struct drm_device *dev) 2835 { 2836 return to_i915(dev)->vgpu.active; 2837 } 2838 2839 void 2840 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2841 u32 status_mask); 2842 2843 void 2844 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 2845 u32 status_mask); 2846 2847 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2848 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2849 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2850 uint32_t mask, 2851 uint32_t bits); 2852 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 2853 uint32_t interrupt_mask, 2854 uint32_t enabled_irq_mask); 2855 static inline void 2856 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2857 { 2858 ilk_update_display_irq(dev_priv, bits, bits); 2859 } 2860 static inline void 2861 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2862 { 2863 ilk_update_display_irq(dev_priv, bits, 0); 2864 } 2865 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 2866 enum i915_pipe pipe, 2867 uint32_t interrupt_mask, 2868 uint32_t enabled_irq_mask); 2869 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 2870 enum i915_pipe pipe, uint32_t bits) 2871 { 2872 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 2873 } 2874 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 2875 enum i915_pipe pipe, uint32_t bits) 2876 { 2877 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 2878 } 2879 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2880 uint32_t interrupt_mask, 2881 uint32_t enabled_irq_mask); 2882 static inline void 2883 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2884 { 2885 ibx_display_interrupt_update(dev_priv, bits, bits); 2886 } 2887 static inline void 2888 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2889 { 2890 ibx_display_interrupt_update(dev_priv, bits, 0); 2891 } 2892 2893 2894 /* i915_gem.c */ 2895 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2896 struct drm_file *file_priv); 2897 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2898 struct drm_file *file_priv); 2899 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2900 struct drm_file *file_priv); 2901 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2902 struct drm_file *file_priv); 2903 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2904 struct drm_file *file_priv); 2905 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2906 struct drm_file *file_priv); 2907 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2908 struct drm_file *file_priv); 2909 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2910 struct drm_i915_gem_request *req); 2911 int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, 2912 struct drm_i915_gem_execbuffer2 *args, 2913 struct list_head *vmas); 2914 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2915 struct drm_file *file_priv); 2916 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2917 struct drm_file *file_priv); 2918 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2919 struct drm_file *file_priv); 2920 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2921 struct drm_file *file); 2922 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2923 struct drm_file *file); 2924 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2925 struct drm_file *file_priv); 2926 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2927 struct drm_file *file_priv); 2928 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2929 struct drm_file *file_priv); 2930 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2931 struct drm_file *file_priv); 2932 int i915_gem_init_userptr(struct drm_device *dev); 2933 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2934 struct drm_file *file); 2935 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2936 struct drm_file *file_priv); 2937 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2938 struct drm_file *file_priv); 2939 void i915_gem_load_init(struct drm_device *dev); 2940 void i915_gem_load_cleanup(struct drm_device *dev); 2941 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 2942 void *i915_gem_object_alloc(struct drm_device *dev); 2943 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2944 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2945 const struct drm_i915_gem_object_ops *ops); 2946 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2947 size_t size); 2948 struct drm_i915_gem_object *i915_gem_object_create_from_data( 2949 struct drm_device *dev, const void *data, size_t size); 2950 void i915_gem_free_object(struct drm_gem_object *obj); 2951 void i915_gem_vma_destroy(struct i915_vma *vma); 2952 2953 /* Flags used by pin/bind&friends. */ 2954 #define PIN_MAPPABLE (1<<0) 2955 #define PIN_NONBLOCK (1<<1) 2956 #define PIN_GLOBAL (1<<2) 2957 #define PIN_OFFSET_BIAS (1<<3) 2958 #define PIN_USER (1<<4) 2959 #define PIN_UPDATE (1<<5) 2960 #define PIN_ZONE_4G (1<<6) 2961 #define PIN_HIGH (1<<7) 2962 #define PIN_OFFSET_FIXED (1<<8) 2963 #define PIN_OFFSET_MASK (~4095) 2964 int __must_check 2965 i915_gem_object_pin(struct drm_i915_gem_object *obj, 2966 struct i915_address_space *vm, 2967 uint32_t alignment, 2968 uint64_t flags); 2969 int __must_check 2970 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2971 const struct i915_ggtt_view *view, 2972 uint32_t alignment, 2973 uint64_t flags); 2974 2975 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2976 u32 flags); 2977 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); 2978 int __must_check i915_vma_unbind(struct i915_vma *vma); 2979 /* 2980 * BEWARE: Do not use the function below unless you can _absolutely_ 2981 * _guarantee_ VMA in question is _not in use_ anywhere. 2982 */ 2983 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); 2984 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2985 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2986 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2987 2988 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2989 int *needs_clflush); 2990 2991 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2992 2993 static inline int __sg_page_count(struct scatterlist *sg) 2994 { 2995 return sg->length >> PAGE_SHIFT; 2996 } 2997 2998 struct page * 2999 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); 3000 3001 static inline struct page * 3002 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 3003 { 3004 if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) 3005 return NULL; 3006 3007 if (n < obj->get_page.last) { 3008 obj->get_page.sg = obj->pages->sgl; 3009 obj->get_page.last = 0; 3010 } 3011 3012 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { 3013 obj->get_page.last += __sg_page_count(obj->get_page.sg++); 3014 #if 0 3015 if (unlikely(sg_is_chain(obj->get_page.sg))) 3016 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); 3017 #endif 3018 } 3019 3020 return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); 3021 } 3022 3023 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 3024 { 3025 BUG_ON(obj->pages == NULL); 3026 obj->pages_pin_count++; 3027 } 3028 3029 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 3030 { 3031 BUG_ON(obj->pages_pin_count == 0); 3032 obj->pages_pin_count--; 3033 } 3034 3035 /** 3036 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3037 * @obj - the object to map into kernel address space 3038 * 3039 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3040 * pages and then returns a contiguous mapping of the backing storage into 3041 * the kernel address space. 3042 * 3043 * The caller must hold the struct_mutex, and is responsible for calling 3044 * i915_gem_object_unpin_map() when the mapping is no longer required. 3045 * 3046 * Returns the pointer through which to access the mapped object, or an 3047 * ERR_PTR() on error. 3048 */ 3049 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); 3050 3051 /** 3052 * i915_gem_object_unpin_map - releases an earlier mapping 3053 * @obj - the object to unmap 3054 * 3055 * After pinning the object and mapping its pages, once you are finished 3056 * with your access, call i915_gem_object_unpin_map() to release the pin 3057 * upon the mapping. Once the pin count reaches zero, that mapping may be 3058 * removed. 3059 * 3060 * The caller must hold the struct_mutex. 3061 */ 3062 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3063 { 3064 lockdep_assert_held(&obj->base.dev->struct_mutex); 3065 i915_gem_object_unpin_pages(obj); 3066 } 3067 3068 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3069 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 3070 struct intel_engine_cs *to, 3071 struct drm_i915_gem_request **to_req); 3072 void i915_vma_move_to_active(struct i915_vma *vma, 3073 struct drm_i915_gem_request *req); 3074 int i915_gem_dumb_create(struct drm_file *file_priv, 3075 struct drm_device *dev, 3076 struct drm_mode_create_dumb *args); 3077 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3078 uint32_t handle, uint64_t *offset); 3079 /** 3080 * Returns true if seq1 is later than seq2. 3081 */ 3082 static inline bool 3083 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 3084 { 3085 return (int32_t)(seq1 - seq2) >= 0; 3086 } 3087 3088 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, 3089 bool lazy_coherency) 3090 { 3091 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3092 req->engine->irq_seqno_barrier(req->engine); 3093 return i915_seqno_passed(req->engine->get_seqno(req->engine), 3094 req->previous_seqno); 3095 } 3096 3097 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 3098 bool lazy_coherency) 3099 { 3100 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3101 req->engine->irq_seqno_barrier(req->engine); 3102 return i915_seqno_passed(req->engine->get_seqno(req->engine), 3103 req->seqno); 3104 } 3105 3106 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 3107 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3108 3109 struct drm_i915_gem_request * 3110 i915_gem_find_active_request(struct intel_engine_cs *engine); 3111 3112 bool i915_gem_retire_requests(struct drm_device *dev); 3113 void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); 3114 3115 static inline u32 i915_reset_counter(struct i915_gpu_error *error) 3116 { 3117 return atomic_read(&error->reset_counter); 3118 } 3119 3120 static inline bool __i915_reset_in_progress(u32 reset) 3121 { 3122 return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG); 3123 } 3124 3125 static inline bool __i915_reset_in_progress_or_wedged(u32 reset) 3126 { 3127 return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); 3128 } 3129 3130 static inline bool __i915_terminally_wedged(u32 reset) 3131 { 3132 return unlikely(reset & I915_WEDGED); 3133 } 3134 3135 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 3136 { 3137 return __i915_reset_in_progress(i915_reset_counter(error)); 3138 } 3139 3140 static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) 3141 { 3142 return __i915_reset_in_progress_or_wedged(i915_reset_counter(error)); 3143 } 3144 3145 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3146 { 3147 return __i915_terminally_wedged(i915_reset_counter(error)); 3148 } 3149 3150 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3151 { 3152 return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; 3153 } 3154 3155 static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) 3156 { 3157 return dev_priv->gpu_error.stop_rings == 0 || 3158 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; 3159 } 3160 3161 static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) 3162 { 3163 return dev_priv->gpu_error.stop_rings == 0 || 3164 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN; 3165 } 3166 3167 void i915_gem_reset(struct drm_device *dev); 3168 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3169 int __must_check i915_gem_init(struct drm_device *dev); 3170 int i915_gem_init_engines(struct drm_device *dev); 3171 int __must_check i915_gem_init_hw(struct drm_device *dev); 3172 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); 3173 void i915_gem_init_swizzling(struct drm_device *dev); 3174 void i915_gem_cleanup_engines(struct drm_device *dev); 3175 int __must_check i915_gpu_idle(struct drm_device *dev); 3176 int __must_check i915_gem_suspend(struct drm_device *dev); 3177 void __i915_add_request(struct drm_i915_gem_request *req, 3178 struct drm_i915_gem_object *batch_obj, 3179 bool flush_caches); 3180 #define i915_add_request(req) \ 3181 __i915_add_request(req, NULL, true) 3182 #define i915_add_request_no_flush(req) \ 3183 __i915_add_request(req, NULL, false) 3184 int __i915_wait_request(struct drm_i915_gem_request *req, 3185 bool interruptible, 3186 s64 *timeout, 3187 struct intel_rps_client *rps); 3188 int __must_check i915_wait_request(struct drm_i915_gem_request *req); 3189 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres); 3190 int __must_check 3191 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 3192 bool readonly); 3193 int __must_check 3194 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3195 bool write); 3196 int __must_check 3197 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3198 int __must_check 3199 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3200 u32 alignment, 3201 const struct i915_ggtt_view *view); 3202 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, 3203 const struct i915_ggtt_view *view); 3204 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3205 int align); 3206 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3207 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3208 3209 uint32_t 3210 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 3211 uint32_t 3212 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 3213 int tiling_mode, bool fenced); 3214 3215 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3216 enum i915_cache_level cache_level); 3217 3218 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3219 struct dma_buf *dma_buf); 3220 3221 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3222 struct drm_gem_object *gem_obj, int flags); 3223 3224 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 3225 const struct i915_ggtt_view *view); 3226 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, 3227 struct i915_address_space *vm); 3228 static inline u64 3229 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) 3230 { 3231 return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); 3232 } 3233 3234 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); 3235 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 3236 const struct i915_ggtt_view *view); 3237 bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 3238 struct i915_address_space *vm); 3239 3240 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 3241 struct i915_address_space *vm); 3242 struct i915_vma * 3243 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3244 struct i915_address_space *vm); 3245 struct i915_vma * 3246 i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 3247 const struct i915_ggtt_view *view); 3248 3249 struct i915_vma * 3250 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3251 struct i915_address_space *vm); 3252 struct i915_vma * 3253 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 3254 const struct i915_ggtt_view *view); 3255 3256 static inline struct i915_vma * 3257 i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) 3258 { 3259 return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); 3260 } 3261 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); 3262 3263 /* Some GGTT VM helpers */ 3264 static inline struct i915_hw_ppgtt * 3265 i915_vm_to_ppgtt(struct i915_address_space *vm) 3266 { 3267 return container_of(vm, struct i915_hw_ppgtt, base); 3268 } 3269 3270 3271 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 3272 { 3273 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3274 } 3275 3276 static inline unsigned long 3277 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3278 { 3279 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3280 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3281 3282 return i915_gem_obj_size(obj, &ggtt->base); 3283 } 3284 3285 static inline int __must_check 3286 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3287 uint32_t alignment, 3288 unsigned flags) 3289 { 3290 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3291 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3292 3293 return i915_gem_object_pin(obj, &ggtt->base, 3294 alignment, flags | PIN_GLOBAL); 3295 } 3296 3297 static inline int 3298 i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) 3299 { 3300 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); 3301 } 3302 3303 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3304 const struct i915_ggtt_view *view); 3305 static inline void 3306 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 3307 { 3308 i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); 3309 } 3310 3311 /* i915_gem_fence.c */ 3312 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 3313 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 3314 3315 bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); 3316 void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); 3317 3318 void i915_gem_restore_fences(struct drm_device *dev); 3319 3320 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 3321 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 3322 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 3323 3324 /* i915_gem_context.c */ 3325 int __must_check i915_gem_context_init(struct drm_device *dev); 3326 void i915_gem_context_fini(struct drm_device *dev); 3327 void i915_gem_context_reset(struct drm_device *dev); 3328 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3329 int i915_gem_context_enable(struct drm_i915_gem_request *req); 3330 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3331 int i915_switch_context(struct drm_i915_gem_request *req); 3332 struct intel_context * 3333 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 3334 void i915_gem_context_free(struct kref *ctx_ref); 3335 struct drm_i915_gem_object * 3336 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3337 static inline void i915_gem_context_reference(struct intel_context *ctx) 3338 { 3339 kref_get(&ctx->ref); 3340 } 3341 3342 static inline void i915_gem_context_unreference(struct intel_context *ctx) 3343 { 3344 kref_put(&ctx->ref, i915_gem_context_free); 3345 } 3346 3347 static inline bool i915_gem_context_is_default(const struct intel_context *c) 3348 { 3349 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3350 } 3351 3352 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3353 struct drm_file *file); 3354 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3355 struct drm_file *file); 3356 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3357 struct drm_file *file_priv); 3358 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3359 struct drm_file *file_priv); 3360 3361 /* i915_gem_evict.c */ 3362 int __must_check i915_gem_evict_something(struct drm_device *dev, 3363 struct i915_address_space *vm, 3364 int min_size, 3365 unsigned alignment, 3366 unsigned cache_level, 3367 unsigned long start, 3368 unsigned long end, 3369 unsigned flags); 3370 int __must_check i915_gem_evict_for_vma(struct i915_vma *target); 3371 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3372 3373 /* belongs in i915_gem_gtt.h */ 3374 static inline void i915_gem_chipset_flush(struct drm_device *dev) 3375 { 3376 if (INTEL_INFO(dev)->gen < 6) 3377 intel_gtt_chipset_flush(); 3378 } 3379 3380 /* i915_gem_stolen.c */ 3381 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3382 struct drm_mm_node *node, u64 size, 3383 unsigned alignment); 3384 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3385 struct drm_mm_node *node, u64 size, 3386 unsigned alignment, u64 start, 3387 u64 end); 3388 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3389 struct drm_mm_node *node); 3390 int i915_gem_init_stolen(struct drm_device *dev); 3391 void i915_gem_cleanup_stolen(struct drm_device *dev); 3392 struct drm_i915_gem_object * 3393 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3394 struct drm_i915_gem_object * 3395 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3396 u32 stolen_offset, 3397 u32 gtt_offset, 3398 u32 size); 3399 3400 /* i915_gem_shrinker.c */ 3401 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3402 unsigned long target, 3403 unsigned flags); 3404 #define I915_SHRINK_PURGEABLE 0x1 3405 #define I915_SHRINK_UNBOUND 0x2 3406 #define I915_SHRINK_BOUND 0x4 3407 #define I915_SHRINK_ACTIVE 0x8 3408 #define I915_SHRINK_VMAPS 0x10 3409 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3410 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3411 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); 3412 3413 3414 /* i915_gem_tiling.c */ 3415 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3416 { 3417 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3418 3419 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3420 obj->tiling_mode != I915_TILING_NONE; 3421 } 3422 3423 /* i915_gem_debug.c */ 3424 #if WATCH_LISTS 3425 int i915_verify_lists(struct drm_device *dev); 3426 #else 3427 #define i915_verify_lists(dev) 0 3428 #endif 3429 3430 /* i915_debugfs.c */ 3431 int i915_debugfs_init(struct drm_minor *minor); 3432 void i915_debugfs_cleanup(struct drm_minor *minor); 3433 #ifdef CONFIG_DEBUG_FS 3434 int i915_debugfs_connector_add(struct drm_connector *connector); 3435 void intel_display_crc_init(struct drm_device *dev); 3436 #else 3437 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3438 { return 0; } 3439 static inline void intel_display_crc_init(struct drm_device *dev) {} 3440 #endif 3441 3442 /* i915_gpu_error.c */ 3443 __printf(2, 3) 3444 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3445 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3446 const struct i915_error_state_file_priv *error); 3447 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3448 struct drm_i915_private *i915, 3449 size_t count, loff_t pos); 3450 static inline void i915_error_state_buf_release( 3451 struct drm_i915_error_state_buf *eb) 3452 { 3453 kfree(eb->buf); 3454 } 3455 void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 3456 const char *error_msg); 3457 void i915_error_state_get(struct drm_device *dev, 3458 struct i915_error_state_file_priv *error_priv); 3459 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3460 void i915_destroy_error_state(struct drm_device *dev); 3461 3462 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3463 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3464 3465 /* i915_cmd_parser.c */ 3466 int i915_cmd_parser_get_version(void); 3467 int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); 3468 void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); 3469 bool i915_needs_cmd_parser(struct intel_engine_cs *engine); 3470 int i915_parse_cmds(struct intel_engine_cs *engine, 3471 struct drm_i915_gem_object *batch_obj, 3472 struct drm_i915_gem_object *shadow_batch_obj, 3473 u32 batch_start_offset, 3474 u32 batch_len, 3475 bool is_master); 3476 3477 /* i915_suspend.c */ 3478 extern int i915_save_state(struct drm_device *dev); 3479 extern int i915_restore_state(struct drm_device *dev); 3480 3481 /* i915_sysfs.c */ 3482 void i915_setup_sysfs(struct drm_device *dev_priv); 3483 void i915_teardown_sysfs(struct drm_device *dev_priv); 3484 3485 /* intel_i2c.c */ 3486 extern int intel_setup_gmbus(struct drm_device *dev); 3487 extern void intel_teardown_gmbus(struct drm_device *dev); 3488 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3489 unsigned int pin); 3490 3491 extern struct i2c_adapter * 3492 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3493 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3494 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3495 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3496 { 3497 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3498 } 3499 extern void intel_i2c_reset(struct drm_device *dev); 3500 3501 /* intel_bios.c */ 3502 int intel_bios_init(struct drm_i915_private *dev_priv); 3503 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3504 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3505 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3506 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3507 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3508 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3509 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3510 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3511 enum port port); 3512 3513 /* intel_opregion.c */ 3514 #ifdef CONFIG_ACPI 3515 extern int intel_opregion_setup(struct drm_device *dev); 3516 extern void intel_opregion_init(struct drm_device *dev); 3517 extern void intel_opregion_fini(struct drm_device *dev); 3518 extern void intel_opregion_asle_intr(struct drm_device *dev); 3519 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3520 bool enable); 3521 extern int intel_opregion_notify_adapter(struct drm_device *dev, 3522 pci_power_t state); 3523 extern int intel_opregion_get_panel_type(struct drm_device *dev); 3524 #else 3525 static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3526 static inline void intel_opregion_init(struct drm_device *dev) { return; } 3527 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3528 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3529 static inline int 3530 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3531 { 3532 return 0; 3533 } 3534 static inline int 3535 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3536 { 3537 return 0; 3538 } 3539 static inline int intel_opregion_get_panel_type(struct drm_device *dev) 3540 { 3541 return -ENODEV; 3542 } 3543 #endif 3544 3545 /* intel_acpi.c */ 3546 #ifdef CONFIG_ACPI 3547 extern void intel_register_dsm_handler(void); 3548 extern void intel_unregister_dsm_handler(void); 3549 #else 3550 static inline void intel_register_dsm_handler(void) { return; } 3551 static inline void intel_unregister_dsm_handler(void) { return; } 3552 #endif /* CONFIG_ACPI */ 3553 3554 /* modesetting */ 3555 extern void intel_modeset_init_hw(struct drm_device *dev); 3556 extern void intel_modeset_init(struct drm_device *dev); 3557 extern void intel_modeset_gem_init(struct drm_device *dev); 3558 extern void intel_modeset_cleanup(struct drm_device *dev); 3559 extern void intel_connector_unregister(struct intel_connector *); 3560 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3561 extern void intel_display_resume(struct drm_device *dev); 3562 extern void i915_redisable_vga(struct drm_device *dev); 3563 extern void i915_redisable_vga_power_on(struct drm_device *dev); 3564 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3565 extern void intel_init_pch_refclk(struct drm_device *dev); 3566 extern void intel_set_rps(struct drm_device *dev, u8 val); 3567 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3568 bool enable); 3569 extern void intel_detect_pch(struct drm_device *dev); 3570 extern int intel_enable_rc6(const struct drm_device *dev); 3571 3572 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3573 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3574 struct drm_file *file); 3575 int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, 3576 struct drm_file *file); 3577 3578 struct intel_device_info *i915_get_device_id(int device); 3579 3580 /* overlay */ 3581 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3582 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3583 struct intel_overlay_error_state *error); 3584 3585 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3586 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3587 struct drm_device *dev, 3588 struct intel_display_error_state *error); 3589 3590 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3591 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3592 3593 /* intel_sideband.c */ 3594 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3595 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3596 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3597 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3598 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3599 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3600 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3601 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3602 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3603 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3604 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3605 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg); 3606 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val); 3607 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3608 enum intel_sbi_destination destination); 3609 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3610 enum intel_sbi_destination destination); 3611 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3612 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3613 3614 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3615 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3616 3617 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3618 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3619 3620 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3621 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3622 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3623 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3624 3625 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3626 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3627 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3628 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3629 3630 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3631 * will be implemented using 2 32-bit writes in an arbitrary order with 3632 * an arbitrary delay between them. This can cause the hardware to 3633 * act upon the intermediate value, possibly leading to corruption and 3634 * machine death. You have been warned. 3635 */ 3636 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 3637 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3638 3639 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3640 u32 upper, lower, old_upper, loop = 0; \ 3641 upper = I915_READ(upper_reg); \ 3642 do { \ 3643 old_upper = upper; \ 3644 lower = I915_READ(lower_reg); \ 3645 upper = I915_READ(upper_reg); \ 3646 } while (upper != old_upper && loop++ < 2); \ 3647 (u64)upper << 32 | lower; }) 3648 3649 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3650 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3651 3652 #define __raw_read(x, s) \ 3653 static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ 3654 i915_reg_t reg) \ 3655 { \ 3656 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3657 } 3658 3659 #define __raw_write(x, s) \ 3660 static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ 3661 i915_reg_t reg, uint##x##_t val) \ 3662 { \ 3663 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3664 } 3665 __raw_read(8, b) 3666 __raw_read(16, w) 3667 __raw_read(32, l) 3668 __raw_read(64, q) 3669 3670 __raw_write(8, b) 3671 __raw_write(16, w) 3672 __raw_write(32, l) 3673 __raw_write(64, q) 3674 3675 #undef __raw_read 3676 #undef __raw_write 3677 3678 /* These are untraced mmio-accessors that are only valid to be used inside 3679 * criticial sections inside IRQ handlers where forcewake is explicitly 3680 * controlled. 3681 * Think twice, and think again, before using these. 3682 * Note: Should only be used between intel_uncore_forcewake_irqlock() and 3683 * intel_uncore_forcewake_irqunlock(). 3684 */ 3685 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3686 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3687 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3688 3689 /* "Broadcast RGB" property */ 3690 #define INTEL_BROADCAST_RGB_AUTO 0 3691 #define INTEL_BROADCAST_RGB_FULL 1 3692 #define INTEL_BROADCAST_RGB_LIMITED 2 3693 3694 static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) 3695 { 3696 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 3697 return VLV_VGACNTRL; 3698 else if (INTEL_INFO(dev)->gen >= 5) 3699 return CPU_VGACNTRL; 3700 else 3701 return VGACNTRL; 3702 } 3703 3704 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3705 { 3706 unsigned long j = msecs_to_jiffies(m); 3707 3708 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3709 } 3710 3711 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3712 { 3713 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3714 } 3715 3716 static inline unsigned long 3717 timespec_to_jiffies_timeout(const struct timespec *value) 3718 { 3719 unsigned long j = timespec_to_jiffies(value); 3720 3721 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3722 } 3723 3724 /* 3725 * If you need to wait X milliseconds between events A and B, but event B 3726 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3727 * when event A happened, then just before event B you call this function and 3728 * pass the timestamp as the first argument, and X as the second argument. 3729 */ 3730 static inline void 3731 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3732 { 3733 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3734 3735 /* 3736 * Don't re-read the value of "jiffies" every time since it may change 3737 * behind our back and break the math. 3738 */ 3739 tmp_jiffies = jiffies; 3740 target_jiffies = timestamp_jiffies + 3741 msecs_to_jiffies_timeout(to_wait_ms); 3742 3743 if (time_after(target_jiffies, tmp_jiffies)) { 3744 remaining_jiffies = target_jiffies - tmp_jiffies; 3745 #if 0 3746 while (remaining_jiffies) 3747 remaining_jiffies = 3748 schedule_timeout_uninterruptible(remaining_jiffies); 3749 #else 3750 msleep(jiffies_to_msecs(remaining_jiffies)); 3751 #endif 3752 } 3753 } 3754 3755 static inline void i915_trace_irq_get(struct intel_engine_cs *engine, 3756 struct drm_i915_gem_request *req) 3757 { 3758 if (engine->trace_irq_req == NULL && engine->irq_get(engine)) 3759 i915_gem_request_assign(&engine->trace_irq_req, req); 3760 } 3761 3762 #endif 3763