1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include "dev/drm/drm_mm.h" 34 #include "i915_reg.h" 35 36 /* General customization: 37 */ 38 39 #define DRIVER_AUTHOR "Tungsten Graphics, Inc." 40 41 #define DRIVER_NAME "i915" 42 #define DRIVER_DESC "Intel Graphics" 43 #define DRIVER_DATE "20080730" 44 45 enum i915_pipe { 46 PIPE_A = 0, 47 PIPE_B, 48 }; 49 50 #define I915_NUM_PIPE 2 51 52 /* Interface history: 53 * 54 * 1.1: Original. 55 * 1.2: Add Power Management 56 * 1.3: Add vblank support 57 * 1.4: Fix cmdbuffer path, add heap destroy 58 * 1.5: Add vblank pipe configuration 59 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 60 * - Support vertical blank on secondary display pipe 61 */ 62 #define DRIVER_MAJOR 1 63 #define DRIVER_MINOR 6 64 #define DRIVER_PATCHLEVEL 0 65 66 #define WATCH_COHERENCY 0 67 #define WATCH_BUF 0 68 #define WATCH_EXEC 0 69 #define WATCH_LRU 0 70 #define WATCH_RELOC 0 71 #define WATCH_INACTIVE 0 72 #define WATCH_PWRITE 0 73 74 typedef struct _drm_i915_ring_buffer { 75 int tail_mask; 76 unsigned long Size; 77 u8 *virtual_start; 78 int head; 79 int tail; 80 int space; 81 drm_local_map_t map; 82 struct drm_gem_object *ring_obj; 83 } drm_i915_ring_buffer_t; 84 85 struct mem_block { 86 struct mem_block *next; 87 struct mem_block *prev; 88 int start; 89 int size; 90 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ 91 }; 92 93 struct opregion_header; 94 struct opregion_acpi; 95 struct opregion_swsci; 96 struct opregion_asle; 97 98 struct intel_opregion { 99 struct opregion_header *header; 100 struct opregion_acpi *acpi; 101 struct opregion_swsci *swsci; 102 struct opregion_asle *asle; 103 int enabled; 104 }; 105 106 typedef struct drm_i915_private { 107 struct drm_device *dev; 108 109 drm_local_map_t *sarea; 110 drm_local_map_t *mmio_map; 111 112 drm_i915_sarea_t *sarea_priv; 113 drm_i915_ring_buffer_t ring; 114 115 drm_dma_handle_t *status_page_dmah; 116 void *hw_status_page; 117 dma_addr_t dma_status_page; 118 uint32_t counter; 119 unsigned int status_gfx_addr; 120 drm_local_map_t hws_map; 121 struct drm_gem_object *hws_obj; 122 123 unsigned int cpp; 124 int back_offset; 125 int front_offset; 126 int current_page; 127 int page_flipping; 128 129 wait_queue_head_t irq_queue; 130 /** Protects user_irq_refcount and irq_mask_reg */ 131 DRM_SPINTYPE user_irq_lock; 132 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ 133 int user_irq_refcount; 134 /** Cached value of IER to avoid reads in updating the bitfield */ 135 u32 irq_mask_reg; 136 u32 pipestat[2]; 137 138 int tex_lru_log_granularity; 139 int allow_batchbuffer; 140 struct mem_block *agp_heap; 141 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 142 int vblank_pipe; 143 144 struct intel_opregion opregion; 145 146 /* Register state */ 147 u8 saveLBB; 148 u32 saveDSPACNTR; 149 u32 saveDSPBCNTR; 150 u32 saveDSPARB; 151 u32 saveRENDERSTANDBY; 152 u32 saveHWS; 153 u32 savePIPEACONF; 154 u32 savePIPEBCONF; 155 u32 savePIPEASRC; 156 u32 savePIPEBSRC; 157 u32 saveFPA0; 158 u32 saveFPA1; 159 u32 saveDPLL_A; 160 u32 saveDPLL_A_MD; 161 u32 saveHTOTAL_A; 162 u32 saveHBLANK_A; 163 u32 saveHSYNC_A; 164 u32 saveVTOTAL_A; 165 u32 saveVBLANK_A; 166 u32 saveVSYNC_A; 167 u32 saveBCLRPAT_A; 168 u32 savePIPEASTAT; 169 u32 saveDSPASTRIDE; 170 u32 saveDSPASIZE; 171 u32 saveDSPAPOS; 172 u32 saveDSPAADDR; 173 u32 saveDSPASURF; 174 u32 saveDSPATILEOFF; 175 u32 savePFIT_PGM_RATIOS; 176 u32 saveBLC_PWM_CTL; 177 u32 saveBLC_PWM_CTL2; 178 u32 saveFPB0; 179 u32 saveFPB1; 180 u32 saveDPLL_B; 181 u32 saveDPLL_B_MD; 182 u32 saveHTOTAL_B; 183 u32 saveHBLANK_B; 184 u32 saveHSYNC_B; 185 u32 saveVTOTAL_B; 186 u32 saveVBLANK_B; 187 u32 saveVSYNC_B; 188 u32 saveBCLRPAT_B; 189 u32 savePIPEBSTAT; 190 u32 saveDSPBSTRIDE; 191 u32 saveDSPBSIZE; 192 u32 saveDSPBPOS; 193 u32 saveDSPBADDR; 194 u32 saveDSPBSURF; 195 u32 saveDSPBTILEOFF; 196 u32 saveVGA0; 197 u32 saveVGA1; 198 u32 saveVGA_PD; 199 u32 saveVGACNTRL; 200 u32 saveADPA; 201 u32 saveLVDS; 202 u32 savePP_ON_DELAYS; 203 u32 savePP_OFF_DELAYS; 204 u32 saveDVOA; 205 u32 saveDVOB; 206 u32 saveDVOC; 207 u32 savePP_ON; 208 u32 savePP_OFF; 209 u32 savePP_CONTROL; 210 u32 savePP_DIVISOR; 211 u32 savePFIT_CONTROL; 212 u32 save_palette_a[256]; 213 u32 save_palette_b[256]; 214 u32 saveFBC_CFB_BASE; 215 u32 saveFBC_LL_BASE; 216 u32 saveFBC_CONTROL; 217 u32 saveFBC_CONTROL2; 218 u32 saveIER; 219 u32 saveIIR; 220 u32 saveIMR; 221 u32 saveCACHE_MODE_0; 222 u32 saveD_STATE; 223 u32 saveCG_2D_DIS; 224 u32 saveMI_ARB_STATE; 225 u32 saveSWF0[16]; 226 u32 saveSWF1[16]; 227 u32 saveSWF2[3]; 228 u8 saveMSR; 229 u8 saveSR[8]; 230 u8 saveGR[25]; 231 u8 saveAR_INDEX; 232 u8 saveAR[21]; 233 u8 saveDACMASK; 234 u8 saveCR[37]; 235 236 struct { 237 #ifdef __linux__ 238 struct drm_mm gtt_space; 239 #endif 240 /** 241 * List of objects currently involved in rendering from the 242 * ringbuffer. 243 * 244 * A reference is held on the buffer while on this list. 245 */ 246 struct list_head active_list; 247 248 /** 249 * List of objects which are not in the ringbuffer but which 250 * still have a write_domain which needs to be flushed before 251 * unbinding. 252 * 253 * A reference is held on the buffer while on this list. 254 */ 255 struct list_head flushing_list; 256 257 /** 258 * LRU list of objects which are not in the ringbuffer and 259 * are ready to unbind, but are still in the GTT. 260 * 261 * A reference is not held on the buffer while on this list, 262 * as merely being GTT-bound shouldn't prevent its being 263 * freed, and we'll pull it off the list in the free path. 264 */ 265 struct list_head inactive_list; 266 267 /** 268 * List of breadcrumbs associated with GPU requests currently 269 * outstanding. 270 */ 271 struct list_head request_list; 272 #ifdef __linux__ 273 /** 274 * We leave the user IRQ off as much as possible, 275 * but this means that requests will finish and never 276 * be retired once the system goes idle. Set a timer to 277 * fire periodically while the ring is running. When it 278 * fires, go retire requests. 279 */ 280 struct delayed_work retire_work; 281 #endif 282 uint32_t next_gem_seqno; 283 284 /** 285 * Waiting sequence number, if any 286 */ 287 uint32_t waiting_gem_seqno; 288 289 /** 290 * Last seq seen at irq time 291 */ 292 uint32_t irq_gem_seqno; 293 294 /** 295 * Flag if the X Server, and thus DRM, is not currently in 296 * control of the device. 297 * 298 * This is set between LeaveVT and EnterVT. It needs to be 299 * replaced with a semaphore. It also needs to be 300 * transitioned away from for kernel modesetting. 301 */ 302 int suspended; 303 304 /** 305 * Flag if the hardware appears to be wedged. 306 * 307 * This is set when attempts to idle the device timeout. 308 * It prevents command submission from occuring and makes 309 * every pending request fail 310 */ 311 int wedged; 312 313 /** Bit 6 swizzling required for X tiling */ 314 uint32_t bit_6_swizzle_x; 315 /** Bit 6 swizzling required for Y tiling */ 316 uint32_t bit_6_swizzle_y; 317 } mm; 318 } drm_i915_private_t; 319 320 enum intel_chip_family { 321 CHIP_I8XX = 0x01, 322 CHIP_I9XX = 0x02, 323 CHIP_I915 = 0x04, 324 CHIP_I965 = 0x08, 325 }; 326 327 /** driver private structure attached to each drm_gem_object */ 328 struct drm_i915_gem_object { 329 struct drm_gem_object *obj; 330 331 /** Current space allocated to this object in the GTT, if any. */ 332 struct drm_mm_node *gtt_space; 333 334 /** This object's place on the active/flushing/inactive lists */ 335 struct list_head list; 336 337 /** 338 * This is set if the object is on the active or flushing lists 339 * (has pending rendering), and is not set if it's on inactive (ready 340 * to be unbound). 341 */ 342 int active; 343 344 /** 345 * This is set if the object has been written to since last bound 346 * to the GTT 347 */ 348 int dirty; 349 350 /** AGP memory structure for our GTT binding. */ 351 DRM_AGP_MEM *agp_mem; 352 353 struct page **page_list; 354 355 /** 356 * Current offset of the object in GTT space. 357 * 358 * This is the same as gtt_space->start 359 */ 360 uint32_t gtt_offset; 361 362 /** Boolean whether this object has a valid gtt offset. */ 363 int gtt_bound; 364 365 /** How many users have pinned this object in GTT space */ 366 int pin_count; 367 368 /** Breadcrumb of last rendering to the buffer. */ 369 uint32_t last_rendering_seqno; 370 371 /** Current tiling mode for the object. */ 372 uint32_t tiling_mode; 373 374 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 375 uint32_t agp_type; 376 377 /** 378 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 379 * GEM_DOMAIN_CPU is not in the object's read domain. 380 */ 381 uint8_t *page_cpu_valid; 382 }; 383 384 /** 385 * Request queue structure. 386 * 387 * The request queue allows us to note sequence numbers that have been emitted 388 * and may be associated with active buffers to be retired. 389 * 390 * By keeping this list, we can avoid having to do questionable 391 * sequence-number comparisons on buffer last_rendering_seqnos, and associate 392 * an emission time with seqnos for tracking how far ahead of the GPU we are. 393 */ 394 struct drm_i915_gem_request { 395 /** GEM sequence number associated with this request. */ 396 uint32_t seqno; 397 398 /** Time at which this request was emitted, in jiffies. */ 399 unsigned long emitted_jiffies; 400 401 /** Cache domains that were flushed at the start of the request. */ 402 uint32_t flush_domains; 403 404 struct list_head list; 405 }; 406 407 struct drm_i915_file_private { 408 struct { 409 uint32_t last_gem_seqno; 410 uint32_t last_gem_throttle_seqno; 411 } mm; 412 }; 413 414 extern struct drm_ioctl_desc i915_ioctls[]; 415 extern int i915_max_ioctl; 416 417 /* i915_dma.c */ 418 extern void i915_kernel_lost_context(struct drm_device * dev); 419 extern int i915_driver_load(struct drm_device *, unsigned long flags); 420 extern int i915_driver_unload(struct drm_device *); 421 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 422 extern void i915_driver_lastclose(struct drm_device * dev); 423 extern void i915_driver_preclose(struct drm_device *dev, 424 struct drm_file *file_priv); 425 extern void i915_driver_postclose(struct drm_device *dev, 426 struct drm_file *file_priv); 427 extern int i915_driver_device_is_agp(struct drm_device * dev); 428 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 429 unsigned long arg); 430 extern int i915_emit_box(struct drm_device *dev, 431 struct drm_clip_rect __user *boxes, 432 int i, int DR1, int DR4); 433 434 /* i915_irq.c */ 435 extern int i915_irq_emit(struct drm_device *dev, void *data, 436 struct drm_file *file_priv); 437 extern int i915_irq_wait(struct drm_device *dev, void *data, 438 struct drm_file *file_priv); 439 void i915_user_irq_get(struct drm_device *dev); 440 void i915_user_irq_put(struct drm_device *dev); 441 442 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 443 extern void i915_driver_irq_preinstall(struct drm_device * dev); 444 extern int i915_driver_irq_postinstall(struct drm_device *dev); 445 extern void i915_driver_irq_uninstall(struct drm_device * dev); 446 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 447 struct drm_file *file_priv); 448 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 449 struct drm_file *file_priv); 450 extern int i915_enable_vblank(struct drm_device *dev, int crtc); 451 extern void i915_disable_vblank(struct drm_device *dev, int crtc); 452 extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 453 extern u32 g45_get_vblank_counter(struct drm_device *dev, int crtc); 454 extern int i915_vblank_swap(struct drm_device *dev, void *data, 455 struct drm_file *file_priv); 456 457 void 458 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 459 460 void 461 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 462 463 464 /* i915_mem.c */ 465 extern int i915_mem_alloc(struct drm_device *dev, void *data, 466 struct drm_file *file_priv); 467 extern int i915_mem_free(struct drm_device *dev, void *data, 468 struct drm_file *file_priv); 469 extern int i915_mem_init_heap(struct drm_device *dev, void *data, 470 struct drm_file *file_priv); 471 extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, 472 struct drm_file *file_priv); 473 extern void i915_mem_takedown(struct mem_block **heap); 474 extern void i915_mem_release(struct drm_device * dev, 475 struct drm_file *file_priv, struct mem_block *heap); 476 #ifdef I915_HAVE_GEM 477 /* i915_gem.c */ 478 int i915_gem_init_ioctl(struct drm_device *dev, void *data, 479 struct drm_file *file_priv); 480 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 481 struct drm_file *file_priv); 482 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 483 struct drm_file *file_priv); 484 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 485 struct drm_file *file_priv); 486 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 487 struct drm_file *file_priv); 488 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 489 struct drm_file *file_priv); 490 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 491 struct drm_file *file_priv); 492 int i915_gem_execbuffer(struct drm_device *dev, void *data, 493 struct drm_file *file_priv); 494 int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 495 struct drm_file *file_priv); 496 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 497 struct drm_file *file_priv); 498 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 499 struct drm_file *file_priv); 500 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 501 struct drm_file *file_priv); 502 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 503 struct drm_file *file_priv); 504 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 505 struct drm_file *file_priv); 506 int i915_gem_set_tiling(struct drm_device *dev, void *data, 507 struct drm_file *file_priv); 508 int i915_gem_get_tiling(struct drm_device *dev, void *data, 509 struct drm_file *file_priv); 510 void i915_gem_load(struct drm_device *dev); 511 int i915_gem_proc_init(struct drm_minor *minor); 512 void i915_gem_proc_cleanup(struct drm_minor *minor); 513 int i915_gem_init_object(struct drm_gem_object *obj); 514 void i915_gem_free_object(struct drm_gem_object *obj); 515 int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 516 void i915_gem_object_unpin(struct drm_gem_object *obj); 517 void i915_gem_lastclose(struct drm_device *dev); 518 uint32_t i915_get_gem_seqno(struct drm_device *dev); 519 void i915_gem_retire_requests(struct drm_device *dev); 520 void i915_gem_retire_work_handler(struct work_struct *work); 521 void i915_gem_clflush_object(struct drm_gem_object *obj); 522 523 /* i915_gem_tiling.c */ 524 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 525 526 /* i915_gem_debug.c */ 527 void i915_gem_dump_object(struct drm_gem_object *obj, int len, 528 const char *where, uint32_t mark); 529 #if WATCH_INACTIVE 530 void i915_verify_inactive(struct drm_device *dev, char *file, int line); 531 #else 532 #define i915_verify_inactive(dev, file, line) 533 #endif 534 void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 535 void i915_gem_dump_object(struct drm_gem_object *obj, int len, 536 const char *where, uint32_t mark); 537 void i915_dump_lru(struct drm_device *dev, const char *where); 538 #endif /* I915_HAVE_GEM */ 539 540 /* i915_suspend.c */ 541 extern int i915_save_state(struct drm_device *dev); 542 extern int i915_restore_state(struct drm_device *dev); 543 544 /* i915_opregion.c */ 545 extern int intel_opregion_init(struct drm_device *dev); 546 extern void intel_opregion_free(struct drm_device *dev); 547 extern void opregion_asle_intr(struct drm_device *dev); 548 extern void opregion_enable_asle(struct drm_device *dev); 549 550 /** 551 * Lock test for when it's just for synchronization of ring access. 552 * 553 * In that case, we don't need to do it when GEM is initialized as nobody else 554 * has access to the ring. 555 */ 556 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 557 if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ 558 LOCK_TEST_WITH_RETURN(dev, file_priv); \ 559 } while (0) 560 561 #if defined(__FreeBSD__) 562 typedef boolean_t bool; 563 #endif 564 565 #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 566 #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 567 #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) 568 #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) 569 #define I915_READ8(reg) DRM_READ8(dev_priv->mmio_map, (reg)) 570 #define I915_WRITE8(reg,val) DRM_WRITE8(dev_priv->mmio_map, (reg), (val)) 571 572 #define I915_VERBOSE 0 573 574 #define RING_LOCALS unsigned int outring, ringmask, outcount; \ 575 volatile char *virt; 576 577 #define BEGIN_LP_RING(n) do { \ 578 if (I915_VERBOSE) \ 579 DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ 580 if (dev_priv->ring.space < (n)*4) \ 581 i915_wait_ring(dev, (n)*4, __func__); \ 582 outcount = 0; \ 583 outring = dev_priv->ring.tail; \ 584 ringmask = dev_priv->ring.tail_mask; \ 585 virt = dev_priv->ring.virtual_start; \ 586 } while (0) 587 588 #define OUT_RING(n) do { \ 589 if (I915_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ 590 *(volatile unsigned int *)(virt + outring) = (n); \ 591 outcount++; \ 592 outring += 4; \ 593 outring &= ringmask; \ 594 } while (0) 595 596 #define ADVANCE_LP_RING() do { \ 597 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \ 598 dev_priv->ring.tail = outring; \ 599 dev_priv->ring.space -= outcount * 4; \ 600 I915_WRITE(PRB0_TAIL, outring); \ 601 } while(0) 602 603 /** 604 * Reads a dword out of the status page, which is written to from the command 605 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 606 * MI_STORE_DATA_IMM. 607 * 608 * The following dwords have a reserved meaning: 609 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 610 * 0x04: ring 0 head pointer 611 * 0x05: ring 1 head pointer (915-class) 612 * 0x06: ring 2 head pointer (915-class) 613 * 0x10-0x1b: Context status DWords (GM45) 614 * 0x1f: Last written status offset. (GM45) 615 * 616 * The area from dword 0x20 to 0x3ff is available for driver usage. 617 */ 618 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 619 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 620 #define I915_GEM_HWS_INDEX 0x20 621 #define I915_BREADCRUMB_INDEX 0x21 622 623 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 624 625 #define IS_I830(dev) ((dev)->pci_device == 0x3577) 626 #define IS_845G(dev) ((dev)->pci_device == 0x2562) 627 #define IS_I85X(dev) ((dev)->pci_device == 0x3582) 628 #define IS_I855(dev) ((dev)->pci_device == 0x3582) 629 #define IS_I865G(dev) ((dev)->pci_device == 0x2572) 630 631 #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 632 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 633 #define IS_I945G(dev) ((dev)->pci_device == 0x2772) 634 #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ 635 (dev)->pci_device == 0x27AE) 636 #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 637 (dev)->pci_device == 0x2982 || \ 638 (dev)->pci_device == 0x2992 || \ 639 (dev)->pci_device == 0x29A2 || \ 640 (dev)->pci_device == 0x2A02 || \ 641 (dev)->pci_device == 0x2A12 || \ 642 (dev)->pci_device == 0x2A42 || \ 643 (dev)->pci_device == 0x2E02 || \ 644 (dev)->pci_device == 0x2E12 || \ 645 (dev)->pci_device == 0x2E22 || \ 646 (dev)->pci_device == 0x2E32) 647 648 #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 649 650 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 651 652 #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 653 (dev)->pci_device == 0x2E12 || \ 654 (dev)->pci_device == 0x2E22 || \ 655 (dev)->pci_device == 0x2E32 || \ 656 IS_GM45(dev)) 657 658 #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) 659 #define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) 660 #define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) 661 662 #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 663 (dev)->pci_device == 0x29B2 || \ 664 (dev)->pci_device == 0x29D2 || \ 665 IS_IGD(dev)) 666 667 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 668 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 669 670 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 671 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ 672 IS_IGD(dev)) 673 674 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 675 676 #define PRIMARY_RINGBUFFER_SIZE (128*1024) 677 678 #endif 679