1 /*	$NetBSD: intel_gtt.h,v 1.13 2022/08/20 23:19:09 riastradh Exp $	*/
2 
3 /* SPDX-License-Identifier: MIT */
4 /*
5  * Copyright © 2020 Intel Corporation
6  *
7  * Please try to maintain the following order within this file unless it makes
8  * sense to do otherwise. From top to bottom:
9  * 1. typedefs
10  * 2. #defines, and macros
11  * 3. structure definitions
12  * 4. function prototypes
13  *
14  * Within each section, please try to order by generation in ascending order,
15  * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
16  */
17 
18 #ifndef __INTEL_GTT_H__
19 #define __INTEL_GTT_H__
20 
21 #include <linux/io-mapping.h>
22 #include <linux/ioport.h>
23 #include <linux/highmem.h>
24 #include <linux/kref.h>
25 #include <linux/mm.h>
26 #include <linux/pagevec.h>
27 #include <linux/scatterlist.h>
28 #include <linux/workqueue.h>
29 
30 #include <drm/drm_mm.h>
31 
32 #include "gt/intel_reset.h"
33 #include "i915_gem_fence_reg.h"
34 #include "i915_selftest.h"
35 #include "i915_vma_types.h"
36 
37 #ifdef __NetBSD__
38 #include <drm/bus_dma_hacks.h>
39 #include <x86/machdep.h>
40 #include <machine/pte.h>
41 #define	_PAGE_PRESENT	PTE_P	/* 0x01 PTE is present / valid */
42 #define	_PAGE_RW	PTE_W	/* 0x02 read/write */
43 #define	_PAGE_PWT	PTE_PWT	/* 0x08 write-through */
44 #define	_PAGE_PCD	PTE_PCD	/* 0x10 page cache disabled / non-cacheable */
45 #define	_PAGE_PAT	PTE_PAT	/* 0x80 page attribute table on PTE */
46 #endif
47 
48 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
49 
50 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
51 #define DBG(...) trace_printk(__VA_ARGS__)
52 #else
53 #define DBG(...)
54 #endif
55 
56 #define NALLOC 3 /* 1 normal, 1 for concurrent threads, 1 for preallocation */
57 
58 #define I915_GTT_PAGE_SIZE_4K	BIT_ULL(12)
59 #define I915_GTT_PAGE_SIZE_64K	BIT_ULL(16)
60 #define I915_GTT_PAGE_SIZE_2M	BIT_ULL(21)
61 
62 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
63 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
64 
65 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
66 
67 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
68 
69 #define I915_FENCE_REG_NONE -1
70 #define I915_MAX_NUM_FENCES 32
71 /* 32 fences + sign bit for FENCE_REG_NONE */
72 #define I915_MAX_NUM_FENCE_BITS 6
73 
74 typedef u32 gen6_pte_t;
75 typedef u64 gen8_pte_t;
76 
77 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
78 
79 #define I915_PTES(pte_len)		((unsigned int)(PAGE_SIZE / (pte_len)))
80 #define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
81 #define I915_PDES			512
82 #define I915_PDE_MASK			(I915_PDES - 1)
83 
84 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
85 #define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
86 #define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
87 #define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
88 #define GEN6_PTE_CACHE_LLC		(2 << 1)
89 #define GEN6_PTE_UNCACHED		(1 << 1)
90 #define GEN6_PTE_VALID			REG_BIT(0)
91 
92 #define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
93 #define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
94 #define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
95 #define GEN6_PDE_SHIFT			22
96 #define GEN6_PDE_VALID			REG_BIT(0)
97 #define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
98 
99 #define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
100 
101 #define BYT_PTE_SNOOPED_BY_CPU_CACHES	REG_BIT(2)
102 #define BYT_PTE_WRITEABLE		REG_BIT(1)
103 
104 /*
105  * Cacheability Control is a 4-bit value. The low three bits are stored in bits
106  * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
107  */
108 #define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
109 					 (((bits) & 0x8) << (11 - 3)))
110 #define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
111 #define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
112 #define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
113 #define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
114 #define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
115 #define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
116 #define HSW_PTE_UNCACHED		(0)
117 #define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
118 #define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
119 
120 /*
121  * GEN8 32b style address is defined as a 3 level page table:
122  * 31:30 | 29:21 | 20:12 |  11:0
123  * PDPE  |  PDE  |  PTE  | offset
124  * The difference as compared to normal x86 3 level page table is the PDPEs are
125  * programmed via register.
126  *
127  * GEN8 48b style address is defined as a 4 level page table:
128  * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
129  * PML4E | PDPE  |  PDE  |  PTE  | offset
130  */
131 #define GEN8_3LVL_PDPES			4
132 
133 #define PPAT_UNCACHED			(_PAGE_PWT | _PAGE_PCD)
134 #define PPAT_CACHED_PDE			0 /* WB LLC */
135 #define PPAT_CACHED			_PAGE_PAT /* WB LLCeLLC */
136 #define PPAT_DISPLAY_ELLC		_PAGE_PCD /* WT eLLC */
137 
138 #define CHV_PPAT_SNOOP			REG_BIT(6)
139 #define GEN8_PPAT_AGE(x)		((x)<<4)
140 #define GEN8_PPAT_LLCeLLC		(3<<2)
141 #define GEN8_PPAT_LLCELLC		(2<<2)
142 #define GEN8_PPAT_LLC			(1<<2)
143 #define GEN8_PPAT_WB			(3<<0)
144 #define GEN8_PPAT_WT			(2<<0)
145 #define GEN8_PPAT_WC			(1<<0)
146 #define GEN8_PPAT_UC			(0<<0)
147 #define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
148 #define GEN8_PPAT(i, x)			((u64)(x) << ((i) * 8))
149 
150 #define GEN8_PDE_IPS_64K BIT(11)
151 #define GEN8_PDE_PS_2M   BIT(7)
152 
153 #define for_each_sgt_daddr(__dp, __iter, __sgt) \
154 	__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
155 
156 struct i915_page_dma {
157 	struct page *page;
158 #ifdef __NetBSD__
159 	union {
160 		bus_dma_segment_t seg;
161 		uint32_t ggtt_offset;
162 	};
163 	bus_dmamap_t map;
164 #else
165 	union {
166 		dma_addr_t daddr;
167 
168 		/*
169 		 * For gen6/gen7 only. This is the offset in the GGTT
170 		 * where the page directory entries for PPGTT begin
171 		 */
172 		u32 ggtt_offset;
173 	};
174 #endif
175 };
176 
177 struct i915_page_scratch {
178 	struct i915_page_dma base;
179 	u64 encode;
180 };
181 
182 struct i915_page_table {
183 	struct i915_page_dma base;
184 	atomic_t used;
185 };
186 
187 struct i915_page_directory {
188 	struct i915_page_table pt;
189 	spinlock_t lock;
190 	void *entry[512];
191 };
192 
193 #define __px_choose_expr(x, type, expr, other) \
194 	__builtin_choose_expr( \
195 	__builtin_types_compatible_p(typeof(x), type) || \
196 	__builtin_types_compatible_p(typeof(x), const type), \
197 	({ type __x = (type)__UNCONST(x); expr; }), \
198 	other)
199 
200 #define px_base(px) \
201 	__px_choose_expr(px, struct i915_page_dma *, __x, \
202 	__px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
203 	__px_choose_expr(px, struct i915_page_table *, &__x->base, \
204 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
205 	(void)0))))
206 #ifdef __NetBSD__
207 #define px_dma(px) (px_base(px)->map->dm_segs[0].ds_addr)
208 #else
209 #define px_dma(px) (px_base(px)->daddr)
210 #endif
211 
212 #define px_pt(px) \
213 	__px_choose_expr(px, struct i915_page_table *, __x, \
214 	__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
215 	(void)0))
216 #define px_used(px) (&px_pt(px)->used)
217 
218 enum i915_cache_level;
219 
220 struct drm_i915_file_private;
221 struct drm_i915_gem_object;
222 struct i915_vma;
223 struct intel_gt;
224 
225 struct i915_vma_ops {
226 	/* Map an object into an address space with the given cache flags. */
227 	int (*bind_vma)(struct i915_vma *vma,
228 			enum i915_cache_level cache_level,
229 			u32 flags);
230 	/*
231 	 * Unmap an object from an address space. This usually consists of
232 	 * setting the valid PTE entries to a reserved scratch page.
233 	 */
234 	void (*unbind_vma)(struct i915_vma *vma);
235 
236 	int (*set_pages)(struct i915_vma *vma);
237 	void (*clear_pages)(struct i915_vma *vma);
238 };
239 
240 struct pagestash {
241 #ifndef __NetBSD__
242 	spinlock_t lock;
243 	struct pagevec pvec;
244 #endif
245 };
246 
247 void stash_init(struct pagestash *stash);
248 
249 struct i915_address_space {
250 	struct kref ref;
251 	struct rcu_work rcu;
252 
253 	struct drm_mm mm;
254 	struct intel_gt *gt;
255 	struct drm_i915_private *i915;
256 #ifdef __NetBSD__
257 	bus_dma_tag_t dmat;
258 #else
259 	struct device *dma;
260 #endif
261 
262 	/*
263 	 * Every address space belongs to a struct file - except for the global
264 	 * GTT that is owned by the driver (and so @file is set to NULL). In
265 	 * principle, no information should leak from one context to another
266 	 * (or between files/processes etc) unless explicitly shared by the
267 	 * owner. Tracking the owner is important in order to free up per-file
268 	 * objects along with the file, to aide resource tracking, and to
269 	 * assign blame.
270 	 */
271 	struct drm_i915_file_private *file;
272 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
273 	u64 reserved;		/* size addr space reserved */
274 
275 	unsigned int bind_async_flags;
276 
277 	/*
278 	 * Each active user context has its own address space (in full-ppgtt).
279 	 * Since the vm may be shared between multiple contexts, we count how
280 	 * many contexts keep us "open". Once open hits zero, we are closed
281 	 * and do not allow any new attachments, and proceed to shutdown our
282 	 * vma and page directories.
283 	 */
284 	atomic_t open;
285 
286 	struct mutex mutex; /* protects vma and our lists */
287 #define VM_CLASS_GGTT 0
288 #define VM_CLASS_PPGTT 1
289 
290 	struct i915_page_scratch scratch[4];
291 	unsigned int scratch_order;
292 	unsigned int top;
293 
294 	/**
295 	 * List of vma currently bound.
296 	 */
297 	struct list_head bound_list;
298 
299 #ifndef __NetBSD__
300 	struct pagestash free_pages;
301 #endif
302 
303 	/* Global GTT */
304 	bool is_ggtt:1;
305 
306 	/* Some systems require uncached updates of the page directories */
307 	bool pt_kmap_wc:1;
308 
309 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
310 	bool has_read_only:1;
311 
312 	u64 (*pte_encode)(dma_addr_t addr,
313 			  enum i915_cache_level level,
314 			  u32 flags); /* Create a valid PTE */
315 #define PTE_READ_ONLY	BIT(0)
316 
317 	int (*allocate_va_range)(struct i915_address_space *vm,
318 				 u64 start, u64 length);
319 	void (*clear_range)(struct i915_address_space *vm,
320 			    u64 start, u64 length);
321 	void (*insert_page)(struct i915_address_space *vm,
322 			    dma_addr_t addr,
323 			    u64 offset,
324 			    enum i915_cache_level cache_level,
325 			    u32 flags);
326 	void (*insert_entries)(struct i915_address_space *vm,
327 			       struct i915_vma *vma,
328 			       enum i915_cache_level cache_level,
329 			       u32 flags);
330 	void (*cleanup)(struct i915_address_space *vm);
331 
332 	struct i915_vma_ops vma_ops;
333 
334 	I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
335 	I915_SELFTEST_DECLARE(bool scrub_64K);
336 };
337 
338 /*
339  * The Graphics Translation Table is the way in which GEN hardware translates a
340  * Graphics Virtual Address into a Physical Address. In addition to the normal
341  * collateral associated with any va->pa translations GEN hardware also has a
342  * portion of the GTT which can be mapped by the CPU and remain both coherent
343  * and correct (in cases like swizzling). That region is referred to as GMADR in
344  * the spec.
345  */
346 struct i915_ggtt {
347 	struct i915_address_space vm;
348 
349 	struct io_mapping iomap;	/* Mapping to our CPU mappable region */
350 	struct resource gmadr;          /* GMADR resource */
351 	resource_size_t mappable_end;	/* End offset that we can CPU map */
352 
353 	/** "Graphics Stolen Memory" holds the global PTEs */
354 #ifdef __NetBSD__
355 	/*
356 	 * This is not actually the `Graphics Stolen Memory'; it is the
357 	 * graphics translation table, which we write to through the
358 	 * GTTADR/GTTMMADR PCI BAR, and which is backed by `Graphics
359 	 * GTT Stolen Memory'.  That isn't the `Graphics Stolen Memory'
360 	 * either, although it is stolen from main memory.
361 	 */
362 	bus_space_tag_t		gsmt;
363 	bus_space_handle_t	gsmh;
364 	bus_size_t		gsmsz;
365 
366 	/* Maximum physical address that can be wired into a GTT entry.  */
367 	uint64_t		max_paddr;
368 
369 	/* Page freelist for pages limited to the above maximum address.  */
370 	int			pgfl;
371 #else
372 	void __iomem *gsm;
373 #endif
374 	void (*invalidate)(struct i915_ggtt *ggtt);
375 
376 	/** PPGTT used for aliasing the PPGTT with the GTT */
377 	struct i915_ppgtt *alias;
378 
379 	bool do_idle_maps;
380 
381 	int mtrr;
382 
383 	/** Bit 6 swizzling required for X tiling */
384 	u32 bit_6_swizzle_x;
385 	/** Bit 6 swizzling required for Y tiling */
386 	u32 bit_6_swizzle_y;
387 
388 	u32 pin_bias;
389 
390 	unsigned int num_fences;
391 	struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
392 	struct list_head fence_list;
393 
394 	/**
395 	 * List of all objects in gtt_space, currently mmaped by userspace.
396 	 * All objects within this list must also be on bound_list.
397 	 */
398 	struct list_head userfault_list;
399 
400 	/* Manual runtime pm autosuspend delay for user GGTT mmaps */
401 	struct intel_wakeref_auto userfault_wakeref;
402 
403 	struct mutex error_mutex;
404 	struct drm_mm_node error_capture;
405 	struct drm_mm_node uc_fw;
406 };
407 
408 struct i915_ppgtt {
409 	struct i915_address_space vm;
410 
411 	struct i915_page_directory *pd;
412 };
413 
414 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
415 
416 static inline bool
i915_vm_is_4lvl(const struct i915_address_space * vm)417 i915_vm_is_4lvl(const struct i915_address_space *vm)
418 {
419 	return (vm->total - 1) >> 32;
420 }
421 
422 static inline bool
i915_vm_has_scratch_64K(struct i915_address_space * vm)423 i915_vm_has_scratch_64K(struct i915_address_space *vm)
424 {
425 	return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
426 }
427 
428 static inline bool
i915_vm_has_cache_coloring(struct i915_address_space * vm)429 i915_vm_has_cache_coloring(struct i915_address_space *vm)
430 {
431 	return i915_is_ggtt(vm) && vm->mm.color_adjust;
432 }
433 
434 static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space * vm)435 i915_vm_to_ggtt(struct i915_address_space *vm)
436 {
437 	BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
438 	GEM_BUG_ON(!i915_is_ggtt(vm));
439 	return container_of(vm, struct i915_ggtt, vm);
440 }
441 
442 static inline struct i915_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space * vm)443 i915_vm_to_ppgtt(struct i915_address_space *vm)
444 {
445 	BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
446 	GEM_BUG_ON(i915_is_ggtt(vm));
447 	return container_of(vm, struct i915_ppgtt, vm);
448 }
449 
450 static inline struct i915_address_space *
i915_vm_get(struct i915_address_space * vm)451 i915_vm_get(struct i915_address_space *vm)
452 {
453 	kref_get(&vm->ref);
454 	return vm;
455 }
456 
457 void i915_vm_release(struct kref *kref);
458 
i915_vm_put(struct i915_address_space * vm)459 static inline void i915_vm_put(struct i915_address_space *vm)
460 {
461 	kref_put(&vm->ref, i915_vm_release);
462 }
463 
464 static inline struct i915_address_space *
i915_vm_open(struct i915_address_space * vm)465 i915_vm_open(struct i915_address_space *vm)
466 {
467 	GEM_BUG_ON(!atomic_read(&vm->open));
468 	atomic_inc(&vm->open);
469 	return i915_vm_get(vm);
470 }
471 
472 static inline bool
i915_vm_tryopen(struct i915_address_space * vm)473 i915_vm_tryopen(struct i915_address_space *vm)
474 {
475 	if (atomic_add_unless(&vm->open, 1, 0))
476 		return i915_vm_get(vm);
477 
478 	return false;
479 }
480 
481 void __i915_vm_close(struct i915_address_space *vm);
482 
483 static inline void
i915_vm_close(struct i915_address_space * vm)484 i915_vm_close(struct i915_address_space *vm)
485 {
486 	GEM_BUG_ON(!atomic_read(&vm->open));
487 	if (atomic_dec_and_test(&vm->open))
488 		__i915_vm_close(vm);
489 
490 	i915_vm_put(vm);
491 }
492 
493 void i915_address_space_init(struct i915_address_space *vm, int subclass);
494 void i915_address_space_fini(struct i915_address_space *vm);
495 
i915_pte_index(u64 address,unsigned int pde_shift)496 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
497 {
498 	const u32 mask = NUM_PTE(pde_shift) - 1;
499 
500 	return (address >> PAGE_SHIFT) & mask;
501 }
502 
503 /*
504  * Helper to counts the number of PTEs within the given length. This count
505  * does not cross a page table boundary, so the max value would be
506  * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
507  */
i915_pte_count(u64 addr,u64 length,unsigned int pde_shift)508 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
509 {
510 	const u64 mask = ~((1ULL << pde_shift) - 1);
511 	u64 end;
512 
513 	GEM_BUG_ON(length == 0);
514 	GEM_BUG_ON(offset_in_page(addr | length));
515 
516 	end = addr + length;
517 
518 	if ((addr & mask) != (end & mask))
519 		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
520 
521 	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
522 }
523 
i915_pde_index(u64 addr,u32 shift)524 static inline u32 i915_pde_index(u64 addr, u32 shift)
525 {
526 	return (addr >> shift) & I915_PDE_MASK;
527 }
528 
529 static inline struct i915_page_table *
i915_pt_entry(const struct i915_page_directory * const pd,const unsigned short n)530 i915_pt_entry(const struct i915_page_directory * const pd,
531 	      const unsigned short n)
532 {
533 	return pd->entry[n];
534 }
535 
536 static inline struct i915_page_directory *
i915_pd_entry(const struct i915_page_directory * const pdp,const unsigned short n)537 i915_pd_entry(const struct i915_page_directory * const pdp,
538 	      const unsigned short n)
539 {
540 	return pdp->entry[n];
541 }
542 
543 static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_ppgtt * ppgtt,const unsigned int n)544 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
545 {
546 	struct i915_page_dma *pt = ppgtt->pd->entry[n];
547 
548 	return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
549 }
550 
551 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
552 
553 int i915_ggtt_probe_hw(struct drm_i915_private *i915);
554 int i915_ggtt_init_hw(struct drm_i915_private *i915);
555 int i915_ggtt_enable_hw(struct drm_i915_private *i915);
556 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
557 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
558 int i915_init_ggtt(struct drm_i915_private *i915);
559 void i915_ggtt_driver_release(struct drm_i915_private *i915);
560 
i915_ggtt_has_aperture(const struct i915_ggtt * ggtt)561 static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
562 {
563 	return ggtt->mappable_end > 0;
564 }
565 
566 int i915_ppgtt_init_hw(struct intel_gt *gt);
567 
568 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
569 
570 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
571 void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
572 
573 u64 gen8_pte_encode(dma_addr_t addr,
574 		    enum i915_cache_level level,
575 		    u32 flags);
576 
577 int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
578 void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
579 
580 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
581 
582 void
583 fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count);
584 
585 #define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
586 #define fill32_px(px, v) do {						\
587 	u64 v__ = lower_32_bits(v);					\
588 	fill_px((px), v__ << 32 | v__);					\
589 } while (0)
590 
591 int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp);
592 void cleanup_scratch_page(struct i915_address_space *vm);
593 void free_scratch(struct i915_address_space *vm);
594 
595 struct i915_page_table *alloc_pt(struct i915_address_space *vm);
596 struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
597 struct i915_page_directory *__alloc_pd(size_t sz);
598 
599 void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd);
600 
601 #define free_px(vm, px) free_pd(vm, px_base(px))
602 
603 void
604 __set_pd_entry(struct i915_page_directory * const pd,
605 	       const unsigned short idx,
606 	       struct i915_page_dma * const to,
607 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level));
608 
609 #define set_pd_entry(pd, idx, to) \
610 	__set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
611 
612 void
613 clear_pd_entry(struct i915_page_directory * const pd,
614 	       const unsigned short idx,
615 	       const struct i915_page_scratch * const scratch);
616 
617 bool
618 release_pd_entry(struct i915_page_directory * const pd,
619 		 const unsigned short idx,
620 		 struct i915_page_table * const pt,
621 		 const struct i915_page_scratch * const scratch);
622 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
623 
624 int ggtt_set_pages(struct i915_vma *vma);
625 int ppgtt_set_pages(struct i915_vma *vma);
626 void clear_pages(struct i915_vma *vma);
627 
628 void gtt_write_workarounds(struct intel_gt *gt);
629 
630 void setup_private_pat(struct intel_uncore *uncore);
631 
632 #ifdef __NetBSD__
633 struct sgt_dma {
634 	bus_dmamap_t map;
635 	unsigned seg;
636 	bus_size_t off;
637 };
638 static inline struct sgt_dma
sgt_dma(struct i915_vma * vma)639 sgt_dma(struct i915_vma *vma)
640 {
641 	return (struct sgt_dma) { vma->pages->sgl->sg_dmamap, 0, 0 };
642 }
643 #else
644 static inline struct sgt_dma {
645 	struct scatterlist *sg;
646 	dma_addr_t dma, max;
sgt_dma(struct i915_vma * vma)647 } sgt_dma(struct i915_vma *vma) {
648 	struct scatterlist *sg = vma->pages->sgl;
649 	dma_addr_t addr = sg_dma_address(sg);
650 
651 	return (struct sgt_dma){ sg, addr, addr + sg->length };
652 }
653 #endif
654 
655 #endif
656