xref: /linux/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h (revision dd093fb0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #ifndef __KVM_NVHE_MEM_PROTECT__
8 #define __KVM_NVHE_MEM_PROTECT__
9 #include <linux/kvm_host.h>
10 #include <asm/kvm_hyp.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/kvm_pgtable.h>
13 #include <asm/virt.h>
14 #include <nvhe/pkvm.h>
15 #include <nvhe/spinlock.h>
16 
17 /*
18  * SW bits 0-1 are reserved to track the memory ownership state of each page:
19  *   00: The page is owned exclusively by the page-table owner.
20  *   01: The page is owned by the page-table owner, but is shared
21  *       with another entity.
22  *   10: The page is shared with, but not owned by the page-table owner.
23  *   11: Reserved for future use (lending).
24  */
25 enum pkvm_page_state {
26 	PKVM_PAGE_OWNED			= 0ULL,
27 	PKVM_PAGE_SHARED_OWNED		= KVM_PGTABLE_PROT_SW0,
28 	PKVM_PAGE_SHARED_BORROWED	= KVM_PGTABLE_PROT_SW1,
29 	__PKVM_PAGE_RESERVED		= KVM_PGTABLE_PROT_SW0 |
30 					  KVM_PGTABLE_PROT_SW1,
31 
32 	/* Meta-states which aren't encoded directly in the PTE's SW bits */
33 	PKVM_NOPAGE,
34 };
35 
36 #define PKVM_PAGE_STATE_PROT_MASK	(KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
37 static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
38 						 enum pkvm_page_state state)
39 {
40 	return (prot & ~PKVM_PAGE_STATE_PROT_MASK) | state;
41 }
42 
43 static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
44 {
45 	return prot & PKVM_PAGE_STATE_PROT_MASK;
46 }
47 
48 struct host_mmu {
49 	struct kvm_arch arch;
50 	struct kvm_pgtable pgt;
51 	struct kvm_pgtable_mm_ops mm_ops;
52 	hyp_spinlock_t lock;
53 };
54 extern struct host_mmu host_mmu;
55 
56 /* This corresponds to page-table locking order */
57 enum pkvm_component_id {
58 	PKVM_ID_HOST,
59 	PKVM_ID_HYP,
60 };
61 
62 extern unsigned long hyp_nr_cpus;
63 
64 int __pkvm_prot_finalize(void);
65 int __pkvm_host_share_hyp(u64 pfn);
66 int __pkvm_host_unshare_hyp(u64 pfn);
67 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
68 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
69 
70 bool addr_is_memory(phys_addr_t phys);
71 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
72 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
73 int kvm_host_prepare_stage2(void *pgt_pool_base);
74 int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd);
75 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
76 
77 int hyp_pin_shared_mem(void *from, void *to);
78 void hyp_unpin_shared_mem(void *from, void *to);
79 void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
80 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
81 		    struct kvm_hyp_memcache *host_mc);
82 
83 static __always_inline void __load_host_stage2(void)
84 {
85 	if (static_branch_likely(&kvm_protected_mode_initialized))
86 		__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
87 	else
88 		write_sysreg(0, vttbr_el2);
89 }
90 #endif /* __KVM_NVHE_MEM_PROTECT__ */
91