xref: /linux/arch/arm64/kvm/hyp/nvhe/mm.c (revision 0be3ff0c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 #include <asm/spectre.h>
13 
14 #include <nvhe/early_alloc.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mm.h>
18 #include <nvhe/spinlock.h>
19 
20 struct kvm_pgtable pkvm_pgtable;
21 hyp_spinlock_t pkvm_pgd_lock;
22 
23 struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
24 unsigned int hyp_memblock_nr;
25 
26 static u64 __io_map_base;
27 
28 static int __pkvm_create_mappings(unsigned long start, unsigned long size,
29 				  unsigned long phys, enum kvm_pgtable_prot prot)
30 {
31 	int err;
32 
33 	hyp_spin_lock(&pkvm_pgd_lock);
34 	err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
35 	hyp_spin_unlock(&pkvm_pgd_lock);
36 
37 	return err;
38 }
39 
40 unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
41 					    enum kvm_pgtable_prot prot)
42 {
43 	unsigned long addr;
44 	int err;
45 
46 	hyp_spin_lock(&pkvm_pgd_lock);
47 
48 	size = PAGE_ALIGN(size + offset_in_page(phys));
49 	addr = __io_map_base;
50 	__io_map_base += size;
51 
52 	/* Are we overflowing on the vmemmap ? */
53 	if (__io_map_base > __hyp_vmemmap) {
54 		__io_map_base -= size;
55 		addr = (unsigned long)ERR_PTR(-ENOMEM);
56 		goto out;
57 	}
58 
59 	err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
60 	if (err) {
61 		addr = (unsigned long)ERR_PTR(err);
62 		goto out;
63 	}
64 
65 	addr = addr + offset_in_page(phys);
66 out:
67 	hyp_spin_unlock(&pkvm_pgd_lock);
68 
69 	return addr;
70 }
71 
72 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
73 {
74 	unsigned long start = (unsigned long)from;
75 	unsigned long end = (unsigned long)to;
76 	unsigned long virt_addr;
77 	phys_addr_t phys;
78 
79 	hyp_assert_lock_held(&pkvm_pgd_lock);
80 
81 	start = start & PAGE_MASK;
82 	end = PAGE_ALIGN(end);
83 
84 	for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
85 		int err;
86 
87 		phys = hyp_virt_to_phys((void *)virt_addr);
88 		err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
89 					  phys, prot);
90 		if (err)
91 			return err;
92 	}
93 
94 	return 0;
95 }
96 
97 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
98 {
99 	int ret;
100 
101 	hyp_spin_lock(&pkvm_pgd_lock);
102 	ret = pkvm_create_mappings_locked(from, to, prot);
103 	hyp_spin_unlock(&pkvm_pgd_lock);
104 
105 	return ret;
106 }
107 
108 int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
109 {
110 	unsigned long start, end;
111 
112 	hyp_vmemmap_range(phys, size, &start, &end);
113 
114 	return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
115 }
116 
117 static void *__hyp_bp_vect_base;
118 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
119 {
120 	void *vector;
121 
122 	switch (slot) {
123 	case HYP_VECTOR_DIRECT: {
124 		vector = __kvm_hyp_vector;
125 		break;
126 	}
127 	case HYP_VECTOR_SPECTRE_DIRECT: {
128 		vector = __bp_harden_hyp_vecs;
129 		break;
130 	}
131 	case HYP_VECTOR_INDIRECT:
132 	case HYP_VECTOR_SPECTRE_INDIRECT: {
133 		vector = (void *)__hyp_bp_vect_base;
134 		break;
135 	}
136 	default:
137 		return -EINVAL;
138 	}
139 
140 	vector = __kvm_vector_slot2addr(vector, slot);
141 	*this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
142 
143 	return 0;
144 }
145 
146 int hyp_map_vectors(void)
147 {
148 	phys_addr_t phys;
149 	void *bp_base;
150 
151 	if (!kvm_system_needs_idmapped_vectors()) {
152 		__hyp_bp_vect_base = __bp_harden_hyp_vecs;
153 		return 0;
154 	}
155 
156 	phys = __hyp_pa(__bp_harden_hyp_vecs);
157 	bp_base = (void *)__pkvm_create_private_mapping(phys,
158 							__BP_HARDEN_HYP_VECS_SZ,
159 							PAGE_HYP_EXEC);
160 	if (IS_ERR_OR_NULL(bp_base))
161 		return PTR_ERR(bp_base);
162 
163 	__hyp_bp_vect_base = bp_base;
164 
165 	return 0;
166 }
167 
168 int hyp_create_idmap(u32 hyp_va_bits)
169 {
170 	unsigned long start, end;
171 
172 	start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
173 	start = ALIGN_DOWN(start, PAGE_SIZE);
174 
175 	end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
176 	end = ALIGN(end, PAGE_SIZE);
177 
178 	/*
179 	 * One half of the VA space is reserved to linearly map portions of
180 	 * memory -- see va_layout.c for more details. The other half of the VA
181 	 * space contains the trampoline page, and needs some care. Split that
182 	 * second half in two and find the quarter of VA space not conflicting
183 	 * with the idmap to place the IOs and the vmemmap. IOs use the lower
184 	 * half of the quarter and the vmemmap the upper half.
185 	 */
186 	__io_map_base = start & BIT(hyp_va_bits - 2);
187 	__io_map_base ^= BIT(hyp_va_bits - 2);
188 	__hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
189 
190 	return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
191 }
192