1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2020 Intel Corporation
4 *
5 * Please try to maintain the following order within this file unless it makes
6 * sense to do otherwise. From top to bottom:
7 * 1. typedefs
8 * 2. #defines, and macros
9 * 3. structure definitions
10 * 4. function prototypes
11 *
12 * Within each section, please try to order by generation in ascending order,
13 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
14 */
15
16 #ifndef __INTEL_GTT_H__
17 #define __INTEL_GTT_H__
18
19 #include <linux/io-mapping.h>
20 #include <linux/kref.h>
21 #include <linux/mm.h>
22 #include <linux/pagevec.h>
23 #include <linux/scatterlist.h>
24 #include <linux/workqueue.h>
25
26 #include <drm/drm_mm.h>
27
28 #include "gt/intel_reset.h"
29 #include "i915_selftest.h"
30 #include "i915_vma_types.h"
31
32 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
33
34 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
35 #define DBG(...) trace_printk(__VA_ARGS__)
36 #else
37 #define DBG(...)
38 #endif
39
40 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
41
42 #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
43 #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
44 #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
45
46 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
47 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
48
49 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
50
51 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
52
53 #define I915_FENCE_REG_NONE -1
54 #define I915_MAX_NUM_FENCES 32
55 /* 32 fences + sign bit for FENCE_REG_NONE */
56 #define I915_MAX_NUM_FENCE_BITS 6
57
58 typedef u32 gen6_pte_t;
59 typedef u64 gen8_pte_t;
60
61 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
62
63 #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
64 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
65 #define I915_PDES 512
66 #define I915_PDE_MASK (I915_PDES - 1)
67
68 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
69 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
70 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
71 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
72 #define GEN6_PTE_CACHE_LLC (2 << 1)
73 #define GEN6_PTE_UNCACHED (1 << 1)
74 #define GEN6_PTE_VALID REG_BIT(0)
75
76 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
77 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
78 #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
79 #define GEN6_PDE_SHIFT 22
80 #define GEN6_PDE_VALID REG_BIT(0)
81 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
82
83 #define GEN7_PTE_CACHE_L3_LLC (3 << 1)
84
85 #define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
86 #define BYT_PTE_WRITEABLE REG_BIT(1)
87
88 #define GEN12_PPGTT_PTE_LM BIT_ULL(11)
89
90 #define GEN12_GGTT_PTE_LM BIT_ULL(1)
91
92 /*
93 * Cacheability Control is a 4-bit value. The low three bits are stored in bits
94 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
95 */
96 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
97 (((bits) & 0x8) << (11 - 3)))
98 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
99 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
100 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
101 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
102 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
103 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
104 #define HSW_PTE_UNCACHED (0)
105 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
106 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
107
108 /*
109 * GEN8 32b style address is defined as a 3 level page table:
110 * 31:30 | 29:21 | 20:12 | 11:0
111 * PDPE | PDE | PTE | offset
112 * The difference as compared to normal x86 3 level page table is the PDPEs are
113 * programmed via register.
114 *
115 * GEN8 48b style address is defined as a 4 level page table:
116 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
117 * PML4E | PDPE | PDE | PTE | offset
118 */
119 #define GEN8_3LVL_PDPES 4
120
121 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
122 #define PPAT_CACHED_PDE 0 /* WB LLC */
123 #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
124 #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
125
126 #define CHV_PPAT_SNOOP REG_BIT(6)
127 #define GEN8_PPAT_AGE(x) ((x)<<4)
128 #define GEN8_PPAT_LLCeLLC (3<<2)
129 #define GEN8_PPAT_LLCELLC (2<<2)
130 #define GEN8_PPAT_LLC (1<<2)
131 #define GEN8_PPAT_WB (3<<0)
132 #define GEN8_PPAT_WT (2<<0)
133 #define GEN8_PPAT_WC (1<<0)
134 #define GEN8_PPAT_UC (0<<0)
135 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
136 #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
137
138 #define GEN8_PDE_IPS_64K BIT(11)
139 #define GEN8_PDE_PS_2M BIT(7)
140
141 enum i915_cache_level;
142
143 struct drm_i915_file_private;
144 struct drm_i915_gem_object;
145 struct i915_fence_reg;
146 struct i915_vma;
147 struct intel_gt;
148
149 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
150 __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
151
152 struct i915_page_table {
153 struct drm_i915_gem_object *base;
154 union {
155 atomic_t used;
156 struct i915_page_table *stash;
157 };
158 };
159
160 struct i915_page_directory {
161 struct i915_page_table pt;
162 spinlock_t lock;
163 void **entry;
164 };
165
166 #define __px_choose_expr(x, type, expr, other) \
167 __builtin_choose_expr( \
168 __builtin_types_compatible_p(typeof(x), type) || \
169 __builtin_types_compatible_p(typeof(x), const type), \
170 ({ type __x = (type)(x); expr; }), \
171 other)
172
173 #define px_base(px) \
174 __px_choose_expr(px, struct drm_i915_gem_object *, __x, \
175 __px_choose_expr(px, struct i915_page_table *, __x->base, \
176 __px_choose_expr(px, struct i915_page_directory *, __x->pt.base, \
177 (void)0)))
178
179 struct page *__px_page(struct drm_i915_gem_object *p);
180 dma_addr_t __px_dma(struct drm_i915_gem_object *p);
181 #define px_dma(px) (__px_dma(px_base(px)))
182
183 #define px_pt(px) \
184 __px_choose_expr(px, struct i915_page_table *, __x, \
185 __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
186 (void)0))
187 #define px_used(px) (&px_pt(px)->used)
188
189 struct i915_vm_pt_stash {
190 /* preallocated chains of page tables/directories */
191 struct i915_page_table *pt[2];
192 };
193
194 struct i915_vma_ops {
195 /* Map an object into an address space with the given cache flags. */
196 void (*bind_vma)(struct i915_address_space *vm,
197 struct i915_vm_pt_stash *stash,
198 struct i915_vma *vma,
199 enum i915_cache_level cache_level,
200 u32 flags);
201 /*
202 * Unmap an object from an address space. This usually consists of
203 * setting the valid PTE entries to a reserved scratch page.
204 */
205 void (*unbind_vma)(struct i915_address_space *vm,
206 struct i915_vma *vma);
207
208 int (*set_pages)(struct i915_vma *vma);
209 void (*clear_pages)(struct i915_vma *vma);
210 };
211
212 struct i915_address_space {
213 struct kref ref;
214 struct rcu_work rcu;
215
216 struct drm_mm mm;
217 struct intel_gt *gt;
218 struct drm_i915_private *i915;
219 struct device *dma;
220 /*
221 * Every address space belongs to a struct file - except for the global
222 * GTT that is owned by the driver (and so @file is set to NULL). In
223 * principle, no information should leak from one context to another
224 * (or between files/processes etc) unless explicitly shared by the
225 * owner. Tracking the owner is important in order to free up per-file
226 * objects along with the file, to aide resource tracking, and to
227 * assign blame.
228 */
229 struct drm_i915_file_private *file;
230 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
231 u64 reserved; /* size addr space reserved */
232
233 unsigned int bind_async_flags;
234
235 /*
236 * Each active user context has its own address space (in full-ppgtt).
237 * Since the vm may be shared between multiple contexts, we count how
238 * many contexts keep us "open". Once open hits zero, we are closed
239 * and do not allow any new attachments, and proceed to shutdown our
240 * vma and page directories.
241 */
242 atomic_t open;
243
244 struct mutex mutex; /* protects vma and our lists */
245 struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
246 #define VM_CLASS_GGTT 0
247 #define VM_CLASS_PPGTT 1
248
249 struct drm_i915_gem_object *scratch[4];
250 /**
251 * List of vma currently bound.
252 */
253 struct list_head bound_list;
254
255 /* Global GTT */
256 bool is_ggtt:1;
257
258 /* Some systems support read-only mappings for GGTT and/or PPGTT */
259 bool has_read_only:1;
260
261 u8 top;
262 u8 pd_shift;
263 u8 scratch_order;
264
265 struct drm_i915_gem_object *
266 (*alloc_pt_dma)(struct i915_address_space *vm, int sz);
267
268 u64 (*pte_encode)(dma_addr_t addr,
269 enum i915_cache_level level,
270 u32 flags); /* Create a valid PTE */
271 #define PTE_READ_ONLY BIT(0)
272 #define PTE_LM BIT(1)
273
274 void (*allocate_va_range)(struct i915_address_space *vm,
275 struct i915_vm_pt_stash *stash,
276 u64 start, u64 length);
277 void (*clear_range)(struct i915_address_space *vm,
278 u64 start, u64 length);
279 void (*insert_page)(struct i915_address_space *vm,
280 dma_addr_t addr,
281 u64 offset,
282 enum i915_cache_level cache_level,
283 u32 flags);
284 void (*insert_entries)(struct i915_address_space *vm,
285 struct i915_vma *vma,
286 enum i915_cache_level cache_level,
287 u32 flags);
288 void (*cleanup)(struct i915_address_space *vm);
289
290 struct i915_vma_ops vma_ops;
291
292 I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
293 I915_SELFTEST_DECLARE(bool scrub_64K);
294 };
295
296 /*
297 * The Graphics Translation Table is the way in which GEN hardware translates a
298 * Graphics Virtual Address into a Physical Address. In addition to the normal
299 * collateral associated with any va->pa translations GEN hardware also has a
300 * portion of the GTT which can be mapped by the CPU and remain both coherent
301 * and correct (in cases like swizzling). That region is referred to as GMADR in
302 * the spec.
303 */
304 struct i915_ggtt {
305 struct i915_address_space vm;
306
307 struct io_mapping iomap; /* Mapping to our CPU mappable region */
308 struct resource gmadr; /* GMADR resource */
309 resource_size_t mappable_end; /* End offset that we can CPU map */
310
311 /** "Graphics Stolen Memory" holds the global PTEs */
312 void __iomem *gsm;
313 void (*invalidate)(struct i915_ggtt *ggtt);
314
315 /** PPGTT used for aliasing the PPGTT with the GTT */
316 struct i915_ppgtt *alias;
317
318 bool do_idle_maps;
319
320 int mtrr;
321
322 /** Bit 6 swizzling required for X tiling */
323 u32 bit_6_swizzle_x;
324 /** Bit 6 swizzling required for Y tiling */
325 u32 bit_6_swizzle_y;
326
327 u32 pin_bias;
328
329 unsigned int num_fences;
330 struct i915_fence_reg *fence_regs;
331 struct list_head fence_list;
332
333 /**
334 * List of all objects in gtt_space, currently mmaped by userspace.
335 * All objects within this list must also be on bound_list.
336 */
337 struct list_head userfault_list;
338
339 /* Manual runtime pm autosuspend delay for user GGTT mmaps */
340 struct intel_wakeref_auto userfault_wakeref;
341
342 struct mutex error_mutex;
343 struct drm_mm_node error_capture;
344 struct drm_mm_node uc_fw;
345 };
346
347 struct i915_ppgtt {
348 struct i915_address_space vm;
349
350 struct i915_page_directory *pd;
351 };
352
353 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
354
355 int __must_check
356 i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
357
358 static inline bool
i915_vm_is_4lvl(const struct i915_address_space * vm)359 i915_vm_is_4lvl(const struct i915_address_space *vm)
360 {
361 return (vm->total - 1) >> 32;
362 }
363
364 static inline bool
i915_vm_has_scratch_64K(struct i915_address_space * vm)365 i915_vm_has_scratch_64K(struct i915_address_space *vm)
366 {
367 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
368 }
369
370 static inline bool
i915_vm_has_cache_coloring(struct i915_address_space * vm)371 i915_vm_has_cache_coloring(struct i915_address_space *vm)
372 {
373 return i915_is_ggtt(vm) && vm->mm.color_adjust;
374 }
375
376 static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space * vm)377 i915_vm_to_ggtt(struct i915_address_space *vm)
378 {
379 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
380 GEM_BUG_ON(!i915_is_ggtt(vm));
381 return container_of(vm, struct i915_ggtt, vm);
382 }
383
384 static inline struct i915_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space * vm)385 i915_vm_to_ppgtt(struct i915_address_space *vm)
386 {
387 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
388 GEM_BUG_ON(i915_is_ggtt(vm));
389 return container_of(vm, struct i915_ppgtt, vm);
390 }
391
392 static inline struct i915_address_space *
i915_vm_get(struct i915_address_space * vm)393 i915_vm_get(struct i915_address_space *vm)
394 {
395 kref_get(&vm->ref);
396 return vm;
397 }
398
399 void i915_vm_release(struct kref *kref);
400
i915_vm_put(struct i915_address_space * vm)401 static inline void i915_vm_put(struct i915_address_space *vm)
402 {
403 kref_put(&vm->ref, i915_vm_release);
404 }
405
406 static inline struct i915_address_space *
i915_vm_open(struct i915_address_space * vm)407 i915_vm_open(struct i915_address_space *vm)
408 {
409 GEM_BUG_ON(!atomic_read(&vm->open));
410 atomic_inc(&vm->open);
411 return i915_vm_get(vm);
412 }
413
414 static inline bool
i915_vm_tryopen(struct i915_address_space * vm)415 i915_vm_tryopen(struct i915_address_space *vm)
416 {
417 if (atomic_add_unless(&vm->open, 1, 0))
418 return i915_vm_get(vm);
419
420 return false;
421 }
422
423 void __i915_vm_close(struct i915_address_space *vm);
424
425 static inline void
i915_vm_close(struct i915_address_space * vm)426 i915_vm_close(struct i915_address_space *vm)
427 {
428 GEM_BUG_ON(!atomic_read(&vm->open));
429 __i915_vm_close(vm);
430
431 i915_vm_put(vm);
432 }
433
434 void i915_address_space_init(struct i915_address_space *vm, int subclass);
435 void i915_address_space_fini(struct i915_address_space *vm);
436
i915_pte_index(u64 address,unsigned int pde_shift)437 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
438 {
439 const u32 mask = NUM_PTE(pde_shift) - 1;
440
441 return (address >> PAGE_SHIFT) & mask;
442 }
443
444 /*
445 * Helper to counts the number of PTEs within the given length. This count
446 * does not cross a page table boundary, so the max value would be
447 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
448 */
i915_pte_count(u64 addr,u64 length,unsigned int pde_shift)449 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
450 {
451 const u64 mask = ~((1ULL << pde_shift) - 1);
452 u64 end;
453
454 GEM_BUG_ON(length == 0);
455 GEM_BUG_ON(offset_in_page(addr | length));
456
457 end = addr + length;
458
459 if ((addr & mask) != (end & mask))
460 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
461
462 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
463 }
464
i915_pde_index(u64 addr,u32 shift)465 static inline u32 i915_pde_index(u64 addr, u32 shift)
466 {
467 return (addr >> shift) & I915_PDE_MASK;
468 }
469
470 static inline struct i915_page_table *
i915_pt_entry(const struct i915_page_directory * const pd,const unsigned short n)471 i915_pt_entry(const struct i915_page_directory * const pd,
472 const unsigned short n)
473 {
474 return pd->entry[n];
475 }
476
477 static inline struct i915_page_directory *
i915_pd_entry(const struct i915_page_directory * const pdp,const unsigned short n)478 i915_pd_entry(const struct i915_page_directory * const pdp,
479 const unsigned short n)
480 {
481 return pdp->entry[n];
482 }
483
484 static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_ppgtt * ppgtt,const unsigned int n)485 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
486 {
487 struct i915_page_table *pt = ppgtt->pd->entry[n];
488
489 return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
490 }
491
492 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
493
494 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
495 int i915_ggtt_init_hw(struct drm_i915_private *i915);
496 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
497 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
498 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
499 int i915_init_ggtt(struct drm_i915_private *i915);
500 void i915_ggtt_driver_release(struct drm_i915_private *i915);
501
i915_ggtt_has_aperture(const struct i915_ggtt * ggtt)502 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
503 {
504 return ggtt->mappable_end > 0;
505 }
506
507 int i915_ppgtt_init_hw(struct intel_gt *gt);
508
509 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
510
511 void i915_ggtt_suspend(struct i915_ggtt *gtt);
512 void i915_ggtt_resume(struct i915_ggtt *ggtt);
513
514 #define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px)))
515
516 void
517 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
518
519 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
520 #define fill32_px(px, v) do { \
521 u64 v__ = lower_32_bits(v); \
522 fill_px((px), v__ << 32 | v__); \
523 } while (0)
524
525 int setup_scratch_page(struct i915_address_space *vm);
526 void free_scratch(struct i915_address_space *vm);
527
528 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
529 struct i915_page_table *alloc_pt(struct i915_address_space *vm);
530 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
531 struct i915_page_directory *__alloc_pd(int npde);
532
533 int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
534 int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
535
536 void free_px(struct i915_address_space *vm,
537 struct i915_page_table *pt, int lvl);
538 #define free_pt(vm, px) free_px(vm, px, 0)
539 #define free_pd(vm, px) free_px(vm, px_pt(px), 1)
540
541 void
542 __set_pd_entry(struct i915_page_directory * const pd,
543 const unsigned short idx,
544 struct i915_page_table *pt,
545 u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
546
547 #define set_pd_entry(pd, idx, to) \
548 __set_pd_entry((pd), (idx), px_pt(to), gen8_pde_encode)
549
550 void
551 clear_pd_entry(struct i915_page_directory * const pd,
552 const unsigned short idx,
553 const struct drm_i915_gem_object * const scratch);
554
555 bool
556 release_pd_entry(struct i915_page_directory * const pd,
557 const unsigned short idx,
558 struct i915_page_table * const pt,
559 const struct drm_i915_gem_object * const scratch);
560 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
561
562 int ggtt_set_pages(struct i915_vma *vma);
563 int ppgtt_set_pages(struct i915_vma *vma);
564 void clear_pages(struct i915_vma *vma);
565
566 void ppgtt_bind_vma(struct i915_address_space *vm,
567 struct i915_vm_pt_stash *stash,
568 struct i915_vma *vma,
569 enum i915_cache_level cache_level,
570 u32 flags);
571 void ppgtt_unbind_vma(struct i915_address_space *vm,
572 struct i915_vma *vma);
573
574 void gtt_write_workarounds(struct intel_gt *gt);
575
576 void setup_private_pat(struct intel_uncore *uncore);
577
578 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
579 struct i915_vm_pt_stash *stash,
580 u64 size);
581 int i915_vm_pin_pt_stash(struct i915_address_space *vm,
582 struct i915_vm_pt_stash *stash);
583 void i915_vm_free_pt_stash(struct i915_address_space *vm,
584 struct i915_vm_pt_stash *stash);
585
586 struct i915_vma *
587 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
588
589 struct i915_vma *
590 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
591
592 static inline struct sgt_dma {
593 struct scatterlist *sg;
594 dma_addr_t dma, max;
sgt_dma(struct i915_vma * vma)595 } sgt_dma(struct i915_vma *vma) {
596 struct scatterlist *sg = vma->pages->sgl;
597 dma_addr_t addr = sg_dma_address(sg);
598
599 return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
600 }
601
602 #endif
603