xref: /linux/arch/arm64/include/asm/kvm_pgtable.h (revision c034ec84)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Will Deacon <will@kernel.org>
5  */
6 
7 #ifndef __ARM64_KVM_PGTABLE_H__
8 #define __ARM64_KVM_PGTABLE_H__
9 
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 
14 #define KVM_PGTABLE_FIRST_LEVEL		-1
15 #define KVM_PGTABLE_LAST_LEVEL		3
16 
17 /*
18  * The largest supported block sizes for KVM (no 52-bit PA support):
19  *  - 4K (level 1):	1GB
20  *  - 16K (level 2):	32MB
21  *  - 64K (level 2):	512MB
22  */
23 #ifdef CONFIG_ARM64_4K_PAGES
24 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	1
25 #else
26 #define KVM_PGTABLE_MIN_BLOCK_LEVEL	2
27 #endif
28 
29 #define kvm_lpa2_is_enabled()		system_supports_lpa2()
30 
kvm_get_parange_max(void)31 static inline u64 kvm_get_parange_max(void)
32 {
33 	if (kvm_lpa2_is_enabled() ||
34 	   (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && PAGE_SHIFT == 16))
35 		return ID_AA64MMFR0_EL1_PARANGE_52;
36 	else
37 		return ID_AA64MMFR0_EL1_PARANGE_48;
38 }
39 
kvm_get_parange(u64 mmfr0)40 static inline u64 kvm_get_parange(u64 mmfr0)
41 {
42 	u64 parange_max = kvm_get_parange_max();
43 	u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
44 				ID_AA64MMFR0_EL1_PARANGE_SHIFT);
45 	if (parange > parange_max)
46 		parange = parange_max;
47 
48 	return parange;
49 }
50 
51 typedef u64 kvm_pte_t;
52 
53 #define KVM_PTE_VALID			BIT(0)
54 
55 #define KVM_PTE_ADDR_MASK		GENMASK(47, PAGE_SHIFT)
56 #define KVM_PTE_ADDR_51_48		GENMASK(15, 12)
57 #define KVM_PTE_ADDR_MASK_LPA2		GENMASK(49, PAGE_SHIFT)
58 #define KVM_PTE_ADDR_51_50_LPA2		GENMASK(9, 8)
59 
60 #define KVM_PHYS_INVALID		(-1ULL)
61 
kvm_pte_valid(kvm_pte_t pte)62 static inline bool kvm_pte_valid(kvm_pte_t pte)
63 {
64 	return pte & KVM_PTE_VALID;
65 }
66 
kvm_pte_to_phys(kvm_pte_t pte)67 static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
68 {
69 	u64 pa;
70 
71 	if (kvm_lpa2_is_enabled()) {
72 		pa = pte & KVM_PTE_ADDR_MASK_LPA2;
73 		pa |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, pte) << 50;
74 	} else {
75 		pa = pte & KVM_PTE_ADDR_MASK;
76 		if (PAGE_SHIFT == 16)
77 			pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
78 	}
79 
80 	return pa;
81 }
82 
kvm_phys_to_pte(u64 pa)83 static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
84 {
85 	kvm_pte_t pte;
86 
87 	if (kvm_lpa2_is_enabled()) {
88 		pte = pa & KVM_PTE_ADDR_MASK_LPA2;
89 		pa &= GENMASK(51, 50);
90 		pte |= FIELD_PREP(KVM_PTE_ADDR_51_50_LPA2, pa >> 50);
91 	} else {
92 		pte = pa & KVM_PTE_ADDR_MASK;
93 		if (PAGE_SHIFT == 16) {
94 			pa &= GENMASK(51, 48);
95 			pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
96 		}
97 	}
98 
99 	return pte;
100 }
101 
kvm_pte_to_pfn(kvm_pte_t pte)102 static inline kvm_pfn_t kvm_pte_to_pfn(kvm_pte_t pte)
103 {
104 	return __phys_to_pfn(kvm_pte_to_phys(pte));
105 }
106 
kvm_granule_shift(s8 level)107 static inline u64 kvm_granule_shift(s8 level)
108 {
109 	/* Assumes KVM_PGTABLE_LAST_LEVEL is 3 */
110 	return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
111 }
112 
kvm_granule_size(s8 level)113 static inline u64 kvm_granule_size(s8 level)
114 {
115 	return BIT(kvm_granule_shift(level));
116 }
117 
kvm_level_supports_block_mapping(s8 level)118 static inline bool kvm_level_supports_block_mapping(s8 level)
119 {
120 	return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
121 }
122 
kvm_supported_block_sizes(void)123 static inline u32 kvm_supported_block_sizes(void)
124 {
125 	s8 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
126 	u32 r = 0;
127 
128 	for (; level <= KVM_PGTABLE_LAST_LEVEL; level++)
129 		r |= BIT(kvm_granule_shift(level));
130 
131 	return r;
132 }
133 
kvm_is_block_size_supported(u64 size)134 static inline bool kvm_is_block_size_supported(u64 size)
135 {
136 	bool is_power_of_two = IS_ALIGNED(size, size);
137 
138 	return is_power_of_two && (size & kvm_supported_block_sizes());
139 }
140 
141 /**
142  * struct kvm_pgtable_mm_ops - Memory management callbacks.
143  * @zalloc_page:		Allocate a single zeroed memory page.
144  *				The @arg parameter can be used by the walker
145  *				to pass a memcache. The initial refcount of
146  *				the page is 1.
147  * @zalloc_pages_exact:		Allocate an exact number of zeroed memory pages.
148  *				The @size parameter is in bytes, and is rounded
149  *				up to the next page boundary. The resulting
150  *				allocation is physically contiguous.
151  * @free_pages_exact:		Free an exact number of memory pages previously
152  *				allocated by zalloc_pages_exact.
153  * @free_unlinked_table:	Free an unlinked paging structure by unlinking and
154  *				dropping references.
155  * @get_page:			Increment the refcount on a page.
156  * @put_page:			Decrement the refcount on a page. When the
157  *				refcount reaches 0 the page is automatically
158  *				freed.
159  * @page_count:			Return the refcount of a page.
160  * @phys_to_virt:		Convert a physical address into a virtual
161  *				address	mapped in the current context.
162  * @virt_to_phys:		Convert a virtual address mapped in the current
163  *				context into a physical address.
164  * @dcache_clean_inval_poc:	Clean and invalidate the data cache to the PoC
165  *				for the	specified memory address range.
166  * @icache_inval_pou:		Invalidate the instruction cache to the PoU
167  *				for the specified memory address range.
168  */
169 struct kvm_pgtable_mm_ops {
170 	void*		(*zalloc_page)(void *arg);
171 	void*		(*zalloc_pages_exact)(size_t size);
172 	void		(*free_pages_exact)(void *addr, size_t size);
173 	void		(*free_unlinked_table)(void *addr, s8 level);
174 	void		(*get_page)(void *addr);
175 	void		(*put_page)(void *addr);
176 	int		(*page_count)(void *addr);
177 	void*		(*phys_to_virt)(phys_addr_t phys);
178 	phys_addr_t	(*virt_to_phys)(void *addr);
179 	void		(*dcache_clean_inval_poc)(void *addr, size_t size);
180 	void		(*icache_inval_pou)(void *addr, size_t size);
181 };
182 
183 /**
184  * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
185  * @KVM_PGTABLE_S2_NOFWB:	Don't enforce Normal-WB even if the CPUs have
186  *				ARM64_HAS_STAGE2_FWB.
187  * @KVM_PGTABLE_S2_IDMAP:	Only use identity mappings.
188  */
189 enum kvm_pgtable_stage2_flags {
190 	KVM_PGTABLE_S2_NOFWB			= BIT(0),
191 	KVM_PGTABLE_S2_IDMAP			= BIT(1),
192 };
193 
194 /**
195  * enum kvm_pgtable_prot - Page-table permissions and attributes.
196  * @KVM_PGTABLE_PROT_X:		Execute permission.
197  * @KVM_PGTABLE_PROT_W:		Write permission.
198  * @KVM_PGTABLE_PROT_R:		Read permission.
199  * @KVM_PGTABLE_PROT_DEVICE:	Device attributes.
200  * @KVM_PGTABLE_PROT_NORMAL_NC:	Normal noncacheable attributes.
201  * @KVM_PGTABLE_PROT_SW0:	Software bit 0.
202  * @KVM_PGTABLE_PROT_SW1:	Software bit 1.
203  * @KVM_PGTABLE_PROT_SW2:	Software bit 2.
204  * @KVM_PGTABLE_PROT_SW3:	Software bit 3.
205  */
206 enum kvm_pgtable_prot {
207 	KVM_PGTABLE_PROT_X			= BIT(0),
208 	KVM_PGTABLE_PROT_W			= BIT(1),
209 	KVM_PGTABLE_PROT_R			= BIT(2),
210 
211 	KVM_PGTABLE_PROT_DEVICE			= BIT(3),
212 	KVM_PGTABLE_PROT_NORMAL_NC		= BIT(4),
213 
214 	KVM_PGTABLE_PROT_SW0			= BIT(55),
215 	KVM_PGTABLE_PROT_SW1			= BIT(56),
216 	KVM_PGTABLE_PROT_SW2			= BIT(57),
217 	KVM_PGTABLE_PROT_SW3			= BIT(58),
218 };
219 
220 #define KVM_PGTABLE_PROT_RW	(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W)
221 #define KVM_PGTABLE_PROT_RWX	(KVM_PGTABLE_PROT_RW | KVM_PGTABLE_PROT_X)
222 
223 #define PKVM_HOST_MEM_PROT	KVM_PGTABLE_PROT_RWX
224 #define PKVM_HOST_MMIO_PROT	KVM_PGTABLE_PROT_RW
225 
226 #define PAGE_HYP		KVM_PGTABLE_PROT_RW
227 #define PAGE_HYP_EXEC		(KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_X)
228 #define PAGE_HYP_RO		(KVM_PGTABLE_PROT_R)
229 #define PAGE_HYP_DEVICE		(PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
230 
231 typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
232 					   enum kvm_pgtable_prot prot);
233 
234 /**
235  * enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
236  * @KVM_PGTABLE_WALK_LEAF:		Visit leaf entries, including invalid
237  *					entries.
238  * @KVM_PGTABLE_WALK_TABLE_PRE:		Visit table entries before their
239  *					children.
240  * @KVM_PGTABLE_WALK_TABLE_POST:	Visit table entries after their
241  *					children.
242  * @KVM_PGTABLE_WALK_SHARED:		Indicates the page-tables may be shared
243  *					with other software walkers.
244  * @KVM_PGTABLE_WALK_HANDLE_FAULT:	Indicates the page-table walk was
245  *					invoked from a fault handler.
246  * @KVM_PGTABLE_WALK_SKIP_BBM_TLBI:	Visit and update table entries
247  *					without Break-before-make's
248  *					TLB invalidation.
249  * @KVM_PGTABLE_WALK_SKIP_CMO:		Visit and update table entries
250  *					without Cache maintenance
251  *					operations required.
252  */
253 enum kvm_pgtable_walk_flags {
254 	KVM_PGTABLE_WALK_LEAF			= BIT(0),
255 	KVM_PGTABLE_WALK_TABLE_PRE		= BIT(1),
256 	KVM_PGTABLE_WALK_TABLE_POST		= BIT(2),
257 	KVM_PGTABLE_WALK_SHARED			= BIT(3),
258 	KVM_PGTABLE_WALK_HANDLE_FAULT		= BIT(4),
259 	KVM_PGTABLE_WALK_SKIP_BBM_TLBI		= BIT(5),
260 	KVM_PGTABLE_WALK_SKIP_CMO		= BIT(6),
261 };
262 
263 struct kvm_pgtable_visit_ctx {
264 	kvm_pte_t				*ptep;
265 	kvm_pte_t				old;
266 	void					*arg;
267 	struct kvm_pgtable_mm_ops		*mm_ops;
268 	u64					start;
269 	u64					addr;
270 	u64					end;
271 	s8					level;
272 	enum kvm_pgtable_walk_flags		flags;
273 };
274 
275 typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
276 					enum kvm_pgtable_walk_flags visit);
277 
kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx * ctx)278 static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
279 {
280 	return ctx->flags & KVM_PGTABLE_WALK_SHARED;
281 }
282 
283 /**
284  * struct kvm_pgtable_walker - Hook into a page-table walk.
285  * @cb:		Callback function to invoke during the walk.
286  * @arg:	Argument passed to the callback function.
287  * @flags:	Bitwise-OR of flags to identify the entry types on which to
288  *		invoke the callback function.
289  */
290 struct kvm_pgtable_walker {
291 	const kvm_pgtable_visitor_fn_t		cb;
292 	void * const				arg;
293 	const enum kvm_pgtable_walk_flags	flags;
294 };
295 
296 /*
297  * RCU cannot be used in a non-kernel context such as the hyp. As such, page
298  * table walkers used in hyp do not call into RCU and instead use other
299  * synchronization mechanisms (such as a spinlock).
300  */
301 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
302 
303 typedef kvm_pte_t *kvm_pteref_t;
304 
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)305 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
306 						kvm_pteref_t pteref)
307 {
308 	return pteref;
309 }
310 
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)311 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
312 {
313 	/*
314 	 * Due to the lack of RCU (or a similar protection scheme), only
315 	 * non-shared table walkers are allowed in the hypervisor.
316 	 */
317 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
318 		return -EPERM;
319 
320 	return 0;
321 }
322 
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)323 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
324 
kvm_pgtable_walk_lock_held(void)325 static inline bool kvm_pgtable_walk_lock_held(void)
326 {
327 	return true;
328 }
329 
330 #else
331 
332 typedef kvm_pte_t __rcu *kvm_pteref_t;
333 
kvm_dereference_pteref(struct kvm_pgtable_walker * walker,kvm_pteref_t pteref)334 static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
335 						kvm_pteref_t pteref)
336 {
337 	return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
338 }
339 
kvm_pgtable_walk_begin(struct kvm_pgtable_walker * walker)340 static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
341 {
342 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
343 		rcu_read_lock();
344 
345 	return 0;
346 }
347 
kvm_pgtable_walk_end(struct kvm_pgtable_walker * walker)348 static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
349 {
350 	if (walker->flags & KVM_PGTABLE_WALK_SHARED)
351 		rcu_read_unlock();
352 }
353 
kvm_pgtable_walk_lock_held(void)354 static inline bool kvm_pgtable_walk_lock_held(void)
355 {
356 	return rcu_read_lock_held();
357 }
358 
359 #endif
360 
361 /**
362  * struct kvm_pgtable - KVM page-table.
363  * @ia_bits:		Maximum input address size, in bits.
364  * @start_level:	Level at which the page-table walk starts.
365  * @pgd:		Pointer to the first top-level entry of the page-table.
366  * @mm_ops:		Memory management callbacks.
367  * @mmu:		Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
368  * @flags:		Stage-2 page-table flags.
369  * @force_pte_cb:	Function that returns true if page level mappings must
370  *			be used instead of block mappings.
371  */
372 struct kvm_pgtable {
373 	u32					ia_bits;
374 	s8					start_level;
375 	kvm_pteref_t				pgd;
376 	struct kvm_pgtable_mm_ops		*mm_ops;
377 
378 	/* Stage-2 only */
379 	struct kvm_s2_mmu			*mmu;
380 	enum kvm_pgtable_stage2_flags		flags;
381 	kvm_pgtable_force_pte_cb_t		force_pte_cb;
382 };
383 
384 /**
385  * kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
386  * @pgt:	Uninitialised page-table structure to initialise.
387  * @va_bits:	Maximum virtual address bits.
388  * @mm_ops:	Memory management callbacks.
389  *
390  * Return: 0 on success, negative error code on failure.
391  */
392 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
393 			 struct kvm_pgtable_mm_ops *mm_ops);
394 
395 /**
396  * kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
397  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
398  *
399  * The page-table is assumed to be unreachable by any hardware walkers prior
400  * to freeing and therefore no TLB invalidation is performed.
401  */
402 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
403 
404 /**
405  * kvm_pgtable_hyp_map() - Install a mapping in a hypervisor stage-1 page-table.
406  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
407  * @addr:	Virtual address at which to place the mapping.
408  * @size:	Size of the mapping.
409  * @phys:	Physical address of the memory to map.
410  * @prot:	Permissions and attributes for the mapping.
411  *
412  * The offset of @addr within a page is ignored, @size is rounded-up to
413  * the next page boundary and @phys is rounded-down to the previous page
414  * boundary.
415  *
416  * If device attributes are not explicitly requested in @prot, then the
417  * mapping will be normal, cacheable. Attempts to install a new mapping
418  * for a virtual address that is already mapped will be rejected with an
419  * error and a WARN().
420  *
421  * Return: 0 on success, negative error code on failure.
422  */
423 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
424 			enum kvm_pgtable_prot prot);
425 
426 /**
427  * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
428  * @pgt:	Page-table structure initialised by kvm_pgtable_hyp_init().
429  * @addr:	Virtual address from which to remove the mapping.
430  * @size:	Size of the mapping.
431  *
432  * The offset of @addr within a page is ignored, @size is rounded-up to
433  * the next page boundary and @phys is rounded-down to the previous page
434  * boundary.
435  *
436  * TLB invalidation is performed for each page-table entry cleared during the
437  * unmapping operation and the reference count for the page-table page
438  * containing the cleared entry is decremented, with unreferenced pages being
439  * freed. The unmapping operation will stop early if it encounters either an
440  * invalid page-table entry or a valid block mapping which maps beyond the range
441  * being unmapped.
442  *
443  * Return: Number of bytes unmapped, which may be 0.
444  */
445 u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
446 
447 /**
448  * kvm_get_vtcr() - Helper to construct VTCR_EL2
449  * @mmfr0:	Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
450  * @mmfr1:	Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
451  * @phys_shfit:	Value to set in VTCR_EL2.T0SZ.
452  *
453  * The VTCR value is common across all the physical CPUs on the system.
454  * We use system wide sanitised values to fill in different fields,
455  * except for Hardware Management of Access Flags. HA Flag is set
456  * unconditionally on all CPUs, as it is safe to run with or without
457  * the feature and the bit is RES0 on CPUs that don't support it.
458  *
459  * Return: VTCR_EL2 value
460  */
461 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
462 
463 /**
464  * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
465  * @vtcr:	Content of the VTCR register.
466  *
467  * Return: the size (in bytes) of the stage-2 PGD
468  */
469 size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
470 
471 /**
472  * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
473  * @pgt:	Uninitialised page-table structure to initialise.
474  * @mmu:	S2 MMU context for this S2 translation
475  * @mm_ops:	Memory management callbacks.
476  * @flags:	Stage-2 configuration flags.
477  * @force_pte_cb: Function that returns true if page level mappings must
478  *		be used instead of block mappings.
479  *
480  * Return: 0 on success, negative error code on failure.
481  */
482 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
483 			      struct kvm_pgtable_mm_ops *mm_ops,
484 			      enum kvm_pgtable_stage2_flags flags,
485 			      kvm_pgtable_force_pte_cb_t force_pte_cb);
486 
487 #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
488 	__kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
489 
490 /**
491  * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
492  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
493  *
494  * The page-table is assumed to be unreachable by any hardware walkers prior
495  * to freeing and therefore no TLB invalidation is performed.
496  */
497 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
498 
499 /**
500  * kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
501  * @mm_ops:	Memory management callbacks.
502  * @pgtable:	Unlinked stage-2 paging structure to be freed.
503  * @level:	Level of the stage-2 paging structure to be freed.
504  *
505  * The page-table is assumed to be unreachable by any hardware walkers prior to
506  * freeing and therefore no TLB invalidation is performed.
507  */
508 void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
509 
510 /**
511  * kvm_pgtable_stage2_create_unlinked() - Create an unlinked stage-2 paging structure.
512  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
513  * @phys:	Physical address of the memory to map.
514  * @level:	Starting level of the stage-2 paging structure to be created.
515  * @prot:	Permissions and attributes for the mapping.
516  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
517  *		page-table pages.
518  * @force_pte:  Force mappings to PAGE_SIZE granularity.
519  *
520  * Returns an unlinked page-table tree.  This new page-table tree is
521  * not reachable (i.e., it is unlinked) from the root pgd and it's
522  * therefore unreachableby the hardware page-table walker. No TLB
523  * invalidation or CMOs are performed.
524  *
525  * If device attributes are not explicitly requested in @prot, then the
526  * mapping will be normal, cacheable.
527  *
528  * Return: The fully populated (unlinked) stage-2 paging structure, or
529  * an ERR_PTR(error) on failure.
530  */
531 kvm_pte_t *kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt,
532 					      u64 phys, s8 level,
533 					      enum kvm_pgtable_prot prot,
534 					      void *mc, bool force_pte);
535 
536 /**
537  * kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
538  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
539  * @addr:	Intermediate physical address at which to place the mapping.
540  * @size:	Size of the mapping.
541  * @phys:	Physical address of the memory to map.
542  * @prot:	Permissions and attributes for the mapping.
543  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
544  *		page-table pages.
545  * @flags:	Flags to control the page-table walk (ex. a shared walk)
546  *
547  * The offset of @addr within a page is ignored, @size is rounded-up to
548  * the next page boundary and @phys is rounded-down to the previous page
549  * boundary.
550  *
551  * If device attributes are not explicitly requested in @prot, then the
552  * mapping will be normal, cacheable.
553  *
554  * Note that the update of a valid leaf PTE in this function will be aborted,
555  * if it's trying to recreate the exact same mapping or only change the access
556  * permissions. Instead, the vCPU will exit one more time from guest if still
557  * needed and then go through the path of relaxing permissions.
558  *
559  * Note that this function will both coalesce existing table entries and split
560  * existing block mappings, relying on page-faults to fault back areas outside
561  * of the new mapping lazily.
562  *
563  * Return: 0 on success, negative error code on failure.
564  */
565 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
566 			   u64 phys, enum kvm_pgtable_prot prot,
567 			   void *mc, enum kvm_pgtable_walk_flags flags);
568 
569 /**
570  * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
571  *				    track ownership.
572  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
573  * @addr:	Base intermediate physical address to annotate.
574  * @size:	Size of the annotated range.
575  * @mc:		Cache of pre-allocated and zeroed memory from which to allocate
576  *		page-table pages.
577  * @owner_id:	Unique identifier for the owner of the page.
578  *
579  * By default, all page-tables are owned by identifier 0. This function can be
580  * used to mark portions of the IPA space as owned by other entities. When a
581  * stage 2 is used with identity-mappings, these annotations allow to use the
582  * page-table data structure as a simple rmap.
583  *
584  * Return: 0 on success, negative error code on failure.
585  */
586 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
587 				 void *mc, u8 owner_id);
588 
589 /**
590  * kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
591  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
592  * @addr:	Intermediate physical address from which to remove the mapping.
593  * @size:	Size of the mapping.
594  *
595  * The offset of @addr within a page is ignored and @size is rounded-up to
596  * the next page boundary.
597  *
598  * TLB invalidation is performed for each page-table entry cleared during the
599  * unmapping operation and the reference count for the page-table page
600  * containing the cleared entry is decremented, with unreferenced pages being
601  * freed. Unmapping a cacheable page will ensure that it is clean to the PoC if
602  * FWB is not supported by the CPU.
603  *
604  * Return: 0 on success, negative error code on failure.
605  */
606 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
607 
608 /**
609  * kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
610  *                                  without TLB invalidation.
611  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
612  * @addr:	Intermediate physical address from which to write-protect,
613  * @size:	Size of the range.
614  *
615  * The offset of @addr within a page is ignored and @size is rounded-up to
616  * the next page boundary.
617  *
618  * Note that it is the caller's responsibility to invalidate the TLB after
619  * calling this function to ensure that the updated permissions are visible
620  * to the CPUs.
621  *
622  * Return: 0 on success, negative error code on failure.
623  */
624 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
625 
626 /**
627  * kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
628  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
629  * @addr:	Intermediate physical address to identify the page-table entry.
630  *
631  * The offset of @addr within a page is ignored.
632  *
633  * If there is a valid, leaf page-table entry used to translate @addr, then
634  * set the access flag in that entry.
635  *
636  * Return: The old page-table entry prior to setting the flag, 0 on failure.
637  */
638 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
639 
640 /**
641  * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
642  *					   flag in a page-table entry.
643  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
644  * @addr:	Intermediate physical address to identify the page-table entry.
645  * @size:	Size of the address range to visit.
646  * @mkold:	True if the access flag should be cleared.
647  *
648  * The offset of @addr within a page is ignored.
649  *
650  * Tests and conditionally clears the access flag for every valid, leaf
651  * page-table entry used to translate the range [@addr, @addr + @size).
652  *
653  * Note that it is the caller's responsibility to invalidate the TLB after
654  * calling this function to ensure that the updated permissions are visible
655  * to the CPUs.
656  *
657  * Return: True if any of the visited PTEs had the access flag set.
658  */
659 bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
660 					 u64 size, bool mkold);
661 
662 /**
663  * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
664  *				      page-table entry.
665  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
666  * @addr:	Intermediate physical address to identify the page-table entry.
667  * @prot:	Additional permissions to grant for the mapping.
668  *
669  * The offset of @addr within a page is ignored.
670  *
671  * If there is a valid, leaf page-table entry used to translate @addr, then
672  * relax the permissions in that entry according to the read, write and
673  * execute permissions specified by @prot. No permissions are removed, and
674  * TLB invalidation is performed after updating the entry. Software bits cannot
675  * be set or cleared using kvm_pgtable_stage2_relax_perms().
676  *
677  * Return: 0 on success, negative error code on failure.
678  */
679 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
680 				   enum kvm_pgtable_prot prot);
681 
682 /**
683  * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
684  * 				      of Coherency for guest stage-2 address
685  *				      range.
686  * @pgt:	Page-table structure initialised by kvm_pgtable_stage2_init*().
687  * @addr:	Intermediate physical address from which to flush.
688  * @size:	Size of the range.
689  *
690  * The offset of @addr within a page is ignored and @size is rounded-up to
691  * the next page boundary.
692  *
693  * Return: 0 on success, negative error code on failure.
694  */
695 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
696 
697 /**
698  * kvm_pgtable_stage2_split() - Split a range of huge pages into leaf PTEs pointing
699  *				to PAGE_SIZE guest pages.
700  * @pgt:	 Page-table structure initialised by kvm_pgtable_stage2_init().
701  * @addr:	 Intermediate physical address from which to split.
702  * @size:	 Size of the range.
703  * @mc:		 Cache of pre-allocated and zeroed memory from which to allocate
704  *		 page-table pages.
705  *
706  * The function tries to split any level 1 or 2 entry that overlaps
707  * with the input range (given by @addr and @size).
708  *
709  * Return: 0 on success, negative error code on failure. Note that
710  * kvm_pgtable_stage2_split() is best effort: it tries to break as many
711  * blocks in the input range as allowed by @mc_capacity.
712  */
713 int kvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
714 			     struct kvm_mmu_memory_cache *mc);
715 
716 /**
717  * kvm_pgtable_walk() - Walk a page-table.
718  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init().
719  * @addr:	Input address for the start of the walk.
720  * @size:	Size of the range to walk.
721  * @walker:	Walker callback description.
722  *
723  * The offset of @addr within a page is ignored and @size is rounded-up to
724  * the next page boundary.
725  *
726  * The walker will walk the page-table entries corresponding to the input
727  * address range specified, visiting entries according to the walker flags.
728  * Invalid entries are treated as leaf entries. The visited page table entry is
729  * reloaded after invoking the walker callback, allowing the walker to descend
730  * into a newly installed table.
731  *
732  * Returning a negative error code from the walker callback function will
733  * terminate the walk immediately with the same error code.
734  *
735  * Return: 0 on success, negative error code on failure.
736  */
737 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
738 		     struct kvm_pgtable_walker *walker);
739 
740 /**
741  * kvm_pgtable_get_leaf() - Walk a page-table and retrieve the leaf entry
742  *			    with its level.
743  * @pgt:	Page-table structure initialised by kvm_pgtable_*_init()
744  *		or a similar initialiser.
745  * @addr:	Input address for the start of the walk.
746  * @ptep:	Pointer to storage for the retrieved PTE.
747  * @level:	Pointer to storage for the level of the retrieved PTE.
748  *
749  * The offset of @addr within a page is ignored.
750  *
751  * The walker will walk the page-table entries corresponding to the input
752  * address specified, retrieving the leaf corresponding to this address.
753  * Invalid entries are treated as leaf entries.
754  *
755  * Return: 0 on success, negative error code on failure.
756  */
757 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
758 			 kvm_pte_t *ptep, s8 *level);
759 
760 /**
761  * kvm_pgtable_stage2_pte_prot() - Retrieve the protection attributes of a
762  *				   stage-2 Page-Table Entry.
763  * @pte:	Page-table entry
764  *
765  * Return: protection attributes of the page-table entry in the enum
766  *	   kvm_pgtable_prot format.
767  */
768 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
769 
770 /**
771  * kvm_pgtable_hyp_pte_prot() - Retrieve the protection attributes of a stage-1
772  *				Page-Table Entry.
773  * @pte:	Page-table entry
774  *
775  * Return: protection attributes of the page-table entry in the enum
776  *	   kvm_pgtable_prot format.
777  */
778 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
779 
780 /**
781  * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
782  *
783  * @mmu:	Stage-2 KVM MMU struct
784  * @addr:	The base Intermediate physical address from which to invalidate
785  * @size:	Size of the range from the base to invalidate
786  */
787 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
788 				phys_addr_t addr, size_t size);
789 #endif	/* __ARM64_KVM_PGTABLE_H__ */
790