1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4 * No bombay mix was harmed in the writing of this file.
5 *
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
8 */
9
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14 #define KVM_PTE_VALID BIT(0)
15
16 #define KVM_PTE_TYPE BIT(1)
17 #define KVM_PTE_TYPE_BLOCK 0
18 #define KVM_PTE_TYPE_PAGE 1
19 #define KVM_PTE_TYPE_TABLE 1
20
21 #define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
22 #define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
23
24 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
25
26 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
27 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
28 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3
29 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1
30 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
31 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
32 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
33
34 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
35 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
36 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
37 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
38 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
39 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
40
41 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51)
42
43 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
44
45 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
46
47 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
48 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
49 KVM_PTE_LEAF_ATTR_HI_S2_XN)
50
51 #define KVM_PTE_LEAF_ATTR_S2_IGNORED GENMASK(58, 55)
52
53 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(63, 56)
54 #define KVM_MAX_OWNER_ID 1
55
56 struct kvm_pgtable_walk_data {
57 struct kvm_pgtable *pgt;
58 struct kvm_pgtable_walker *walker;
59
60 u64 addr;
61 u64 end;
62 };
63
kvm_granule_shift(u32 level)64 static u64 kvm_granule_shift(u32 level)
65 {
66 /* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
67 return ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
68 }
69
kvm_granule_size(u32 level)70 static u64 kvm_granule_size(u32 level)
71 {
72 return BIT(kvm_granule_shift(level));
73 }
74
75 #define KVM_PHYS_INVALID (-1ULL)
76
kvm_phys_is_valid(u64 phys)77 static bool kvm_phys_is_valid(u64 phys)
78 {
79 return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
80 }
81
kvm_level_supports_block_mapping(u32 level)82 static bool kvm_level_supports_block_mapping(u32 level)
83 {
84 /*
85 * Reject invalid block mappings and don't bother with 4TB mappings for
86 * 52-bit PAs.
87 */
88 return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
89 }
90
kvm_block_mapping_supported(u64 addr,u64 end,u64 phys,u32 level)91 static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
92 {
93 u64 granule = kvm_granule_size(level);
94
95 if (!kvm_level_supports_block_mapping(level))
96 return false;
97
98 if (granule > (end - addr))
99 return false;
100
101 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
102 return false;
103
104 return IS_ALIGNED(addr, granule);
105 }
106
kvm_pgtable_idx(struct kvm_pgtable_walk_data * data,u32 level)107 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
108 {
109 u64 shift = kvm_granule_shift(level);
110 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
111
112 return (data->addr >> shift) & mask;
113 }
114
__kvm_pgd_page_idx(struct kvm_pgtable * pgt,u64 addr)115 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
116 {
117 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
118 u64 mask = BIT(pgt->ia_bits) - 1;
119
120 return (addr & mask) >> shift;
121 }
122
kvm_pgd_page_idx(struct kvm_pgtable_walk_data * data)123 static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
124 {
125 return __kvm_pgd_page_idx(data->pgt, data->addr);
126 }
127
kvm_pgd_pages(u32 ia_bits,u32 start_level)128 static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
129 {
130 struct kvm_pgtable pgt = {
131 .ia_bits = ia_bits,
132 .start_level = start_level,
133 };
134
135 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
136 }
137
kvm_pte_valid(kvm_pte_t pte)138 static bool kvm_pte_valid(kvm_pte_t pte)
139 {
140 return pte & KVM_PTE_VALID;
141 }
142
kvm_pte_table(kvm_pte_t pte,u32 level)143 static bool kvm_pte_table(kvm_pte_t pte, u32 level)
144 {
145 if (level == KVM_PGTABLE_MAX_LEVELS - 1)
146 return false;
147
148 if (!kvm_pte_valid(pte))
149 return false;
150
151 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
152 }
153
kvm_pte_to_phys(kvm_pte_t pte)154 static u64 kvm_pte_to_phys(kvm_pte_t pte)
155 {
156 u64 pa = pte & KVM_PTE_ADDR_MASK;
157
158 if (PAGE_SHIFT == 16)
159 pa |= FIELD_GET(KVM_PTE_ADDR_51_48, pte) << 48;
160
161 return pa;
162 }
163
kvm_phys_to_pte(u64 pa)164 static kvm_pte_t kvm_phys_to_pte(u64 pa)
165 {
166 kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
167
168 if (PAGE_SHIFT == 16)
169 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
170
171 return pte;
172 }
173
kvm_pte_follow(kvm_pte_t pte,struct kvm_pgtable_mm_ops * mm_ops)174 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
175 {
176 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
177 }
178
kvm_clear_pte(kvm_pte_t * ptep)179 static void kvm_clear_pte(kvm_pte_t *ptep)
180 {
181 WRITE_ONCE(*ptep, 0);
182 }
183
kvm_set_table_pte(kvm_pte_t * ptep,kvm_pte_t * childp,struct kvm_pgtable_mm_ops * mm_ops)184 static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
185 struct kvm_pgtable_mm_ops *mm_ops)
186 {
187 kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
188
189 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
190 pte |= KVM_PTE_VALID;
191
192 WARN_ON(kvm_pte_valid(old));
193 smp_store_release(ptep, pte);
194 }
195
kvm_init_valid_leaf_pte(u64 pa,kvm_pte_t attr,u32 level)196 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
197 {
198 kvm_pte_t pte = kvm_phys_to_pte(pa);
199 u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
200 KVM_PTE_TYPE_BLOCK;
201
202 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
203 pte |= FIELD_PREP(KVM_PTE_TYPE, type);
204 pte |= KVM_PTE_VALID;
205
206 return pte;
207 }
208
kvm_init_invalid_leaf_owner(u8 owner_id)209 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
210 {
211 return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
212 }
213
kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data * data,u64 addr,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag)214 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
215 u32 level, kvm_pte_t *ptep,
216 enum kvm_pgtable_walk_flags flag)
217 {
218 struct kvm_pgtable_walker *walker = data->walker;
219 return walker->cb(addr, data->end, level, ptep, flag, walker->arg);
220 }
221
222 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
223 kvm_pte_t *pgtable, u32 level);
224
__kvm_pgtable_visit(struct kvm_pgtable_walk_data * data,kvm_pte_t * ptep,u32 level)225 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
226 kvm_pte_t *ptep, u32 level)
227 {
228 int ret = 0;
229 u64 addr = data->addr;
230 kvm_pte_t *childp, pte = *ptep;
231 bool table = kvm_pte_table(pte, level);
232 enum kvm_pgtable_walk_flags flags = data->walker->flags;
233
234 if (table && (flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
235 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
236 KVM_PGTABLE_WALK_TABLE_PRE);
237 }
238
239 if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
240 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
241 KVM_PGTABLE_WALK_LEAF);
242 pte = *ptep;
243 table = kvm_pte_table(pte, level);
244 }
245
246 if (ret)
247 goto out;
248
249 if (!table) {
250 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
251 data->addr += kvm_granule_size(level);
252 goto out;
253 }
254
255 childp = kvm_pte_follow(pte, data->pgt->mm_ops);
256 ret = __kvm_pgtable_walk(data, childp, level + 1);
257 if (ret)
258 goto out;
259
260 if (flags & KVM_PGTABLE_WALK_TABLE_POST) {
261 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
262 KVM_PGTABLE_WALK_TABLE_POST);
263 }
264
265 out:
266 return ret;
267 }
268
__kvm_pgtable_walk(struct kvm_pgtable_walk_data * data,kvm_pte_t * pgtable,u32 level)269 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
270 kvm_pte_t *pgtable, u32 level)
271 {
272 u32 idx;
273 int ret = 0;
274
275 if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
276 return -EINVAL;
277
278 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
279 kvm_pte_t *ptep = &pgtable[idx];
280
281 if (data->addr >= data->end)
282 break;
283
284 ret = __kvm_pgtable_visit(data, ptep, level);
285 if (ret)
286 break;
287 }
288
289 return ret;
290 }
291
_kvm_pgtable_walk(struct kvm_pgtable_walk_data * data)292 static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
293 {
294 u32 idx;
295 int ret = 0;
296 struct kvm_pgtable *pgt = data->pgt;
297 u64 limit = BIT(pgt->ia_bits);
298
299 if (data->addr > limit || data->end > limit)
300 return -ERANGE;
301
302 if (!pgt->pgd)
303 return -EINVAL;
304
305 for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
306 kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
307
308 ret = __kvm_pgtable_walk(data, ptep, pgt->start_level);
309 if (ret)
310 break;
311 }
312
313 return ret;
314 }
315
kvm_pgtable_walk(struct kvm_pgtable * pgt,u64 addr,u64 size,struct kvm_pgtable_walker * walker)316 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
317 struct kvm_pgtable_walker *walker)
318 {
319 struct kvm_pgtable_walk_data walk_data = {
320 .pgt = pgt,
321 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
322 .end = PAGE_ALIGN(walk_data.addr + size),
323 .walker = walker,
324 };
325
326 return _kvm_pgtable_walk(&walk_data);
327 }
328
329 struct hyp_map_data {
330 u64 phys;
331 kvm_pte_t attr;
332 struct kvm_pgtable_mm_ops *mm_ops;
333 };
334
hyp_set_prot_attr(enum kvm_pgtable_prot prot,kvm_pte_t * ptep)335 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
336 {
337 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
338 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
339 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
340 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
341 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
342 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
343
344 if (!(prot & KVM_PGTABLE_PROT_R))
345 return -EINVAL;
346
347 if (prot & KVM_PGTABLE_PROT_X) {
348 if (prot & KVM_PGTABLE_PROT_W)
349 return -EINVAL;
350
351 if (device)
352 return -EINVAL;
353 } else {
354 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
355 }
356
357 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
358 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
359 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
360 *ptep = attr;
361
362 return 0;
363 }
364
hyp_map_walker_try_leaf(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,struct hyp_map_data * data)365 static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
366 kvm_pte_t *ptep, struct hyp_map_data *data)
367 {
368 kvm_pte_t new, old = *ptep;
369 u64 granule = kvm_granule_size(level), phys = data->phys;
370
371 if (!kvm_block_mapping_supported(addr, end, phys, level))
372 return false;
373
374 /* Tolerate KVM recreating the exact same mapping */
375 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
376 if (old != new && !WARN_ON(kvm_pte_valid(old)))
377 smp_store_release(ptep, new);
378
379 data->phys += granule;
380 return true;
381 }
382
hyp_map_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)383 static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
384 enum kvm_pgtable_walk_flags flag, void * const arg)
385 {
386 kvm_pte_t *childp;
387 struct hyp_map_data *data = arg;
388 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
389
390 if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
391 return 0;
392
393 if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
394 return -EINVAL;
395
396 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
397 if (!childp)
398 return -ENOMEM;
399
400 kvm_set_table_pte(ptep, childp, mm_ops);
401 return 0;
402 }
403
kvm_pgtable_hyp_map(struct kvm_pgtable * pgt,u64 addr,u64 size,u64 phys,enum kvm_pgtable_prot prot)404 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
405 enum kvm_pgtable_prot prot)
406 {
407 int ret;
408 struct hyp_map_data map_data = {
409 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
410 .mm_ops = pgt->mm_ops,
411 };
412 struct kvm_pgtable_walker walker = {
413 .cb = hyp_map_walker,
414 .flags = KVM_PGTABLE_WALK_LEAF,
415 .arg = &map_data,
416 };
417
418 ret = hyp_set_prot_attr(prot, &map_data.attr);
419 if (ret)
420 return ret;
421
422 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
423 dsb(ishst);
424 isb();
425 return ret;
426 }
427
kvm_pgtable_hyp_init(struct kvm_pgtable * pgt,u32 va_bits,struct kvm_pgtable_mm_ops * mm_ops)428 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
429 struct kvm_pgtable_mm_ops *mm_ops)
430 {
431 u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
432
433 pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
434 if (!pgt->pgd)
435 return -ENOMEM;
436
437 pgt->ia_bits = va_bits;
438 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
439 pgt->mm_ops = mm_ops;
440 pgt->mmu = NULL;
441 return 0;
442 }
443
hyp_free_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)444 static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
445 enum kvm_pgtable_walk_flags flag, void * const arg)
446 {
447 struct kvm_pgtable_mm_ops *mm_ops = arg;
448
449 mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
450 return 0;
451 }
452
kvm_pgtable_hyp_destroy(struct kvm_pgtable * pgt)453 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
454 {
455 struct kvm_pgtable_walker walker = {
456 .cb = hyp_free_walker,
457 .flags = KVM_PGTABLE_WALK_TABLE_POST,
458 .arg = pgt->mm_ops,
459 };
460
461 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
462 pgt->mm_ops->put_page(pgt->pgd);
463 pgt->pgd = NULL;
464 }
465
466 struct stage2_map_data {
467 u64 phys;
468 kvm_pte_t attr;
469 u8 owner_id;
470
471 kvm_pte_t *anchor;
472 kvm_pte_t *childp;
473
474 struct kvm_s2_mmu *mmu;
475 void *memcache;
476
477 struct kvm_pgtable_mm_ops *mm_ops;
478 };
479
kvm_get_vtcr(u64 mmfr0,u64 mmfr1,u32 phys_shift)480 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
481 {
482 u64 vtcr = VTCR_EL2_FLAGS;
483 u8 lvls;
484
485 vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
486 vtcr |= VTCR_EL2_T0SZ(phys_shift);
487 /*
488 * Use a minimum 2 level page table to prevent splitting
489 * host PMD huge pages at stage2.
490 */
491 lvls = stage2_pgtable_levels(phys_shift);
492 if (lvls < 2)
493 lvls = 2;
494 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
495
496 /*
497 * Enable the Hardware Access Flag management, unconditionally
498 * on all CPUs. The features is RES0 on CPUs without the support
499 * and must be ignored by the CPUs.
500 */
501 vtcr |= VTCR_EL2_HA;
502
503 /* Set the vmid bits */
504 vtcr |= (get_vmid_bits(mmfr1) == 16) ?
505 VTCR_EL2_VS_16BIT :
506 VTCR_EL2_VS_8BIT;
507
508 return vtcr;
509 }
510
stage2_has_fwb(struct kvm_pgtable * pgt)511 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
512 {
513 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
514 return false;
515
516 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
517 }
518
519 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
520
stage2_set_prot_attr(struct kvm_pgtable * pgt,enum kvm_pgtable_prot prot,kvm_pte_t * ptep)521 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
522 kvm_pte_t *ptep)
523 {
524 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
525 kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
526 KVM_S2_MEMATTR(pgt, NORMAL);
527 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
528
529 if (!(prot & KVM_PGTABLE_PROT_X))
530 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
531 else if (device)
532 return -EINVAL;
533
534 if (prot & KVM_PGTABLE_PROT_R)
535 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
536
537 if (prot & KVM_PGTABLE_PROT_W)
538 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
539
540 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
541 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
542 *ptep = attr;
543
544 return 0;
545 }
546
stage2_pte_needs_update(kvm_pte_t old,kvm_pte_t new)547 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
548 {
549 if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
550 return true;
551
552 return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
553 }
554
stage2_pte_is_counted(kvm_pte_t pte)555 static bool stage2_pte_is_counted(kvm_pte_t pte)
556 {
557 /*
558 * The refcount tracks valid entries as well as invalid entries if they
559 * encode ownership of a page to another entity than the page-table
560 * owner, whose id is 0.
561 */
562 return !!pte;
563 }
564
stage2_put_pte(kvm_pte_t * ptep,struct kvm_s2_mmu * mmu,u64 addr,u32 level,struct kvm_pgtable_mm_ops * mm_ops)565 static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
566 u32 level, struct kvm_pgtable_mm_ops *mm_ops)
567 {
568 /*
569 * Clear the existing PTE, and perform break-before-make with
570 * TLB maintenance if it was valid.
571 */
572 if (kvm_pte_valid(*ptep)) {
573 kvm_clear_pte(ptep);
574 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
575 }
576
577 mm_ops->put_page(ptep);
578 }
579
stage2_map_walker_try_leaf(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,struct stage2_map_data * data)580 static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
581 kvm_pte_t *ptep,
582 struct stage2_map_data *data)
583 {
584 kvm_pte_t new, old = *ptep;
585 u64 granule = kvm_granule_size(level), phys = data->phys;
586 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
587
588 if (!kvm_block_mapping_supported(addr, end, phys, level))
589 return -E2BIG;
590
591 if (kvm_phys_is_valid(phys))
592 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
593 else
594 new = kvm_init_invalid_leaf_owner(data->owner_id);
595
596 if (stage2_pte_is_counted(old)) {
597 /*
598 * Skip updating the PTE if we are trying to recreate the exact
599 * same mapping or only change the access permissions. Instead,
600 * the vCPU will exit one more time from guest if still needed
601 * and then go through the path of relaxing permissions.
602 */
603 if (!stage2_pte_needs_update(old, new))
604 return -EAGAIN;
605
606 stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
607 }
608
609 smp_store_release(ptep, new);
610 if (stage2_pte_is_counted(new))
611 mm_ops->get_page(ptep);
612 if (kvm_phys_is_valid(phys))
613 data->phys += granule;
614 return 0;
615 }
616
stage2_map_walk_table_pre(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,struct stage2_map_data * data)617 static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
618 kvm_pte_t *ptep,
619 struct stage2_map_data *data)
620 {
621 if (data->anchor)
622 return 0;
623
624 if (!kvm_block_mapping_supported(addr, end, data->phys, level))
625 return 0;
626
627 data->childp = kvm_pte_follow(*ptep, data->mm_ops);
628 kvm_clear_pte(ptep);
629
630 /*
631 * Invalidate the whole stage-2, as we may have numerous leaf
632 * entries below us which would otherwise need invalidating
633 * individually.
634 */
635 kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
636 data->anchor = ptep;
637 return 0;
638 }
639
stage2_map_walk_leaf(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,struct stage2_map_data * data)640 static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
641 struct stage2_map_data *data)
642 {
643 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
644 kvm_pte_t *childp, pte = *ptep;
645 int ret;
646
647 if (data->anchor) {
648 if (stage2_pte_is_counted(pte))
649 mm_ops->put_page(ptep);
650
651 return 0;
652 }
653
654 ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
655 if (ret != -E2BIG)
656 return ret;
657
658 if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
659 return -EINVAL;
660
661 if (!data->memcache)
662 return -ENOMEM;
663
664 childp = mm_ops->zalloc_page(data->memcache);
665 if (!childp)
666 return -ENOMEM;
667
668 /*
669 * If we've run into an existing block mapping then replace it with
670 * a table. Accesses beyond 'end' that fall within the new table
671 * will be mapped lazily.
672 */
673 if (stage2_pte_is_counted(pte))
674 stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
675
676 kvm_set_table_pte(ptep, childp, mm_ops);
677 mm_ops->get_page(ptep);
678
679 return 0;
680 }
681
stage2_map_walk_table_post(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,struct stage2_map_data * data)682 static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
683 kvm_pte_t *ptep,
684 struct stage2_map_data *data)
685 {
686 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
687 kvm_pte_t *childp;
688 int ret = 0;
689
690 if (!data->anchor)
691 return 0;
692
693 if (data->anchor == ptep) {
694 childp = data->childp;
695 data->anchor = NULL;
696 data->childp = NULL;
697 ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
698 } else {
699 childp = kvm_pte_follow(*ptep, mm_ops);
700 }
701
702 mm_ops->put_page(childp);
703 mm_ops->put_page(ptep);
704
705 return ret;
706 }
707
708 /*
709 * This is a little fiddly, as we use all three of the walk flags. The idea
710 * is that the TABLE_PRE callback runs for table entries on the way down,
711 * looking for table entries which we could conceivably replace with a
712 * block entry for this mapping. If it finds one, then it sets the 'anchor'
713 * field in 'struct stage2_map_data' to point at the table entry, before
714 * clearing the entry to zero and descending into the now detached table.
715 *
716 * The behaviour of the LEAF callback then depends on whether or not the
717 * anchor has been set. If not, then we're not using a block mapping higher
718 * up the table and we perform the mapping at the existing leaves instead.
719 * If, on the other hand, the anchor _is_ set, then we drop references to
720 * all valid leaves so that the pages beneath the anchor can be freed.
721 *
722 * Finally, the TABLE_POST callback does nothing if the anchor has not
723 * been set, but otherwise frees the page-table pages while walking back up
724 * the page-table, installing the block entry when it revisits the anchor
725 * pointer and clearing the anchor to NULL.
726 */
stage2_map_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)727 static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
728 enum kvm_pgtable_walk_flags flag, void * const arg)
729 {
730 struct stage2_map_data *data = arg;
731
732 switch (flag) {
733 case KVM_PGTABLE_WALK_TABLE_PRE:
734 return stage2_map_walk_table_pre(addr, end, level, ptep, data);
735 case KVM_PGTABLE_WALK_LEAF:
736 return stage2_map_walk_leaf(addr, end, level, ptep, data);
737 case KVM_PGTABLE_WALK_TABLE_POST:
738 return stage2_map_walk_table_post(addr, end, level, ptep, data);
739 }
740
741 return -EINVAL;
742 }
743
kvm_pgtable_stage2_map(struct kvm_pgtable * pgt,u64 addr,u64 size,u64 phys,enum kvm_pgtable_prot prot,void * mc)744 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
745 u64 phys, enum kvm_pgtable_prot prot,
746 void *mc)
747 {
748 int ret;
749 struct stage2_map_data map_data = {
750 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
751 .mmu = pgt->mmu,
752 .memcache = mc,
753 .mm_ops = pgt->mm_ops,
754 };
755 struct kvm_pgtable_walker walker = {
756 .cb = stage2_map_walker,
757 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
758 KVM_PGTABLE_WALK_LEAF |
759 KVM_PGTABLE_WALK_TABLE_POST,
760 .arg = &map_data,
761 };
762
763 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
764 return -EINVAL;
765
766 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
767 if (ret)
768 return ret;
769
770 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
771 dsb(ishst);
772 return ret;
773 }
774
kvm_pgtable_stage2_set_owner(struct kvm_pgtable * pgt,u64 addr,u64 size,void * mc,u8 owner_id)775 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
776 void *mc, u8 owner_id)
777 {
778 int ret;
779 struct stage2_map_data map_data = {
780 .phys = KVM_PHYS_INVALID,
781 .mmu = pgt->mmu,
782 .memcache = mc,
783 .mm_ops = pgt->mm_ops,
784 .owner_id = owner_id,
785 };
786 struct kvm_pgtable_walker walker = {
787 .cb = stage2_map_walker,
788 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
789 KVM_PGTABLE_WALK_LEAF |
790 KVM_PGTABLE_WALK_TABLE_POST,
791 .arg = &map_data,
792 };
793
794 if (owner_id > KVM_MAX_OWNER_ID)
795 return -EINVAL;
796
797 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
798 return ret;
799 }
800
stage2_pte_cacheable(struct kvm_pgtable * pgt,kvm_pte_t pte)801 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
802 {
803 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
804 return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
805 }
806
stage2_unmap_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)807 static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
808 enum kvm_pgtable_walk_flags flag,
809 void * const arg)
810 {
811 struct kvm_pgtable *pgt = arg;
812 struct kvm_s2_mmu *mmu = pgt->mmu;
813 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
814 kvm_pte_t pte = *ptep, *childp = NULL;
815 bool need_flush = false;
816
817 if (!kvm_pte_valid(pte)) {
818 if (stage2_pte_is_counted(pte)) {
819 kvm_clear_pte(ptep);
820 mm_ops->put_page(ptep);
821 }
822 return 0;
823 }
824
825 if (kvm_pte_table(pte, level)) {
826 childp = kvm_pte_follow(pte, mm_ops);
827
828 if (mm_ops->page_count(childp) != 1)
829 return 0;
830 } else if (stage2_pte_cacheable(pgt, pte)) {
831 need_flush = !stage2_has_fwb(pgt);
832 }
833
834 /*
835 * This is similar to the map() path in that we unmap the entire
836 * block entry and rely on the remaining portions being faulted
837 * back lazily.
838 */
839 stage2_put_pte(ptep, mmu, addr, level, mm_ops);
840
841 if (need_flush) {
842 __flush_dcache_area(kvm_pte_follow(pte, mm_ops),
843 kvm_granule_size(level));
844 }
845
846 if (childp)
847 mm_ops->put_page(childp);
848
849 return 0;
850 }
851
kvm_pgtable_stage2_unmap(struct kvm_pgtable * pgt,u64 addr,u64 size)852 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
853 {
854 struct kvm_pgtable_walker walker = {
855 .cb = stage2_unmap_walker,
856 .arg = pgt,
857 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
858 };
859
860 return kvm_pgtable_walk(pgt, addr, size, &walker);
861 }
862
863 struct stage2_attr_data {
864 kvm_pte_t attr_set;
865 kvm_pte_t attr_clr;
866 kvm_pte_t pte;
867 u32 level;
868 };
869
stage2_attr_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)870 static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
871 enum kvm_pgtable_walk_flags flag,
872 void * const arg)
873 {
874 kvm_pte_t pte = *ptep;
875 struct stage2_attr_data *data = arg;
876
877 if (!kvm_pte_valid(pte))
878 return 0;
879
880 data->level = level;
881 data->pte = pte;
882 pte &= ~data->attr_clr;
883 pte |= data->attr_set;
884
885 /*
886 * We may race with the CPU trying to set the access flag here,
887 * but worst-case the access flag update gets lost and will be
888 * set on the next access instead.
889 */
890 if (data->pte != pte)
891 WRITE_ONCE(*ptep, pte);
892
893 return 0;
894 }
895
stage2_update_leaf_attrs(struct kvm_pgtable * pgt,u64 addr,u64 size,kvm_pte_t attr_set,kvm_pte_t attr_clr,kvm_pte_t * orig_pte,u32 * level)896 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
897 u64 size, kvm_pte_t attr_set,
898 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
899 u32 *level)
900 {
901 int ret;
902 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
903 struct stage2_attr_data data = {
904 .attr_set = attr_set & attr_mask,
905 .attr_clr = attr_clr & attr_mask,
906 };
907 struct kvm_pgtable_walker walker = {
908 .cb = stage2_attr_walker,
909 .arg = &data,
910 .flags = KVM_PGTABLE_WALK_LEAF,
911 };
912
913 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
914 if (ret)
915 return ret;
916
917 if (orig_pte)
918 *orig_pte = data.pte;
919
920 if (level)
921 *level = data.level;
922 return 0;
923 }
924
kvm_pgtable_stage2_wrprotect(struct kvm_pgtable * pgt,u64 addr,u64 size)925 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
926 {
927 return stage2_update_leaf_attrs(pgt, addr, size, 0,
928 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
929 NULL, NULL);
930 }
931
kvm_pgtable_stage2_mkyoung(struct kvm_pgtable * pgt,u64 addr)932 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
933 {
934 kvm_pte_t pte = 0;
935 stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
936 &pte, NULL);
937 dsb(ishst);
938 return pte;
939 }
940
kvm_pgtable_stage2_mkold(struct kvm_pgtable * pgt,u64 addr)941 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
942 {
943 kvm_pte_t pte = 0;
944 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
945 &pte, NULL);
946 /*
947 * "But where's the TLBI?!", you scream.
948 * "Over in the core code", I sigh.
949 *
950 * See the '->clear_flush_young()' callback on the KVM mmu notifier.
951 */
952 return pte;
953 }
954
kvm_pgtable_stage2_is_young(struct kvm_pgtable * pgt,u64 addr)955 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
956 {
957 kvm_pte_t pte = 0;
958 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL);
959 return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
960 }
961
kvm_pgtable_stage2_relax_perms(struct kvm_pgtable * pgt,u64 addr,enum kvm_pgtable_prot prot)962 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
963 enum kvm_pgtable_prot prot)
964 {
965 int ret;
966 u32 level;
967 kvm_pte_t set = 0, clr = 0;
968
969 if (prot & KVM_PGTABLE_PROT_R)
970 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
971
972 if (prot & KVM_PGTABLE_PROT_W)
973 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
974
975 if (prot & KVM_PGTABLE_PROT_X)
976 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
977
978 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level);
979 if (!ret)
980 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
981 return ret;
982 }
983
stage2_flush_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)984 static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
985 enum kvm_pgtable_walk_flags flag,
986 void * const arg)
987 {
988 struct kvm_pgtable *pgt = arg;
989 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
990 kvm_pte_t pte = *ptep;
991
992 if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
993 return 0;
994
995 __flush_dcache_area(kvm_pte_follow(pte, mm_ops), kvm_granule_size(level));
996 return 0;
997 }
998
kvm_pgtable_stage2_flush(struct kvm_pgtable * pgt,u64 addr,u64 size)999 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1000 {
1001 struct kvm_pgtable_walker walker = {
1002 .cb = stage2_flush_walker,
1003 .flags = KVM_PGTABLE_WALK_LEAF,
1004 .arg = pgt,
1005 };
1006
1007 if (stage2_has_fwb(pgt))
1008 return 0;
1009
1010 return kvm_pgtable_walk(pgt, addr, size, &walker);
1011 }
1012
kvm_pgtable_stage2_init_flags(struct kvm_pgtable * pgt,struct kvm_arch * arch,struct kvm_pgtable_mm_ops * mm_ops,enum kvm_pgtable_stage2_flags flags)1013 int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
1014 struct kvm_pgtable_mm_ops *mm_ops,
1015 enum kvm_pgtable_stage2_flags flags)
1016 {
1017 size_t pgd_sz;
1018 u64 vtcr = arch->vtcr;
1019 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1020 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1021 u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1022
1023 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1024 pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz);
1025 if (!pgt->pgd)
1026 return -ENOMEM;
1027
1028 pgt->ia_bits = ia_bits;
1029 pgt->start_level = start_level;
1030 pgt->mm_ops = mm_ops;
1031 pgt->mmu = &arch->mmu;
1032 pgt->flags = flags;
1033
1034 /* Ensure zeroed PGD pages are visible to the hardware walker */
1035 dsb(ishst);
1036 return 0;
1037 }
1038
stage2_free_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)1039 static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
1040 enum kvm_pgtable_walk_flags flag,
1041 void * const arg)
1042 {
1043 struct kvm_pgtable_mm_ops *mm_ops = arg;
1044 kvm_pte_t pte = *ptep;
1045
1046 if (!stage2_pte_is_counted(pte))
1047 return 0;
1048
1049 mm_ops->put_page(ptep);
1050
1051 if (kvm_pte_table(pte, level))
1052 mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
1053
1054 return 0;
1055 }
1056
kvm_pgtable_stage2_destroy(struct kvm_pgtable * pgt)1057 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1058 {
1059 size_t pgd_sz;
1060 struct kvm_pgtable_walker walker = {
1061 .cb = stage2_free_walker,
1062 .flags = KVM_PGTABLE_WALK_LEAF |
1063 KVM_PGTABLE_WALK_TABLE_POST,
1064 .arg = pgt->mm_ops,
1065 };
1066
1067 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1068 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1069 pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
1070 pgt->pgd = NULL;
1071 }
1072
1073 #define KVM_PTE_LEAF_S2_COMPAT_MASK (KVM_PTE_LEAF_ATTR_S2_PERMS | \
1074 KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR | \
1075 KVM_PTE_LEAF_ATTR_S2_IGNORED)
1076
stage2_check_permission_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)1077 static int stage2_check_permission_walker(u64 addr, u64 end, u32 level,
1078 kvm_pte_t *ptep,
1079 enum kvm_pgtable_walk_flags flag,
1080 void * const arg)
1081 {
1082 kvm_pte_t old_attr, pte = *ptep, *new_attr = arg;
1083
1084 /*
1085 * Compatible mappings are either invalid and owned by the page-table
1086 * owner (whose id is 0), or valid with matching permission attributes.
1087 */
1088 if (kvm_pte_valid(pte)) {
1089 old_attr = pte & KVM_PTE_LEAF_S2_COMPAT_MASK;
1090 if (old_attr != *new_attr)
1091 return -EEXIST;
1092 } else if (pte) {
1093 return -EEXIST;
1094 }
1095
1096 return 0;
1097 }
1098
kvm_pgtable_stage2_find_range(struct kvm_pgtable * pgt,u64 addr,enum kvm_pgtable_prot prot,struct kvm_mem_range * range)1099 int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
1100 enum kvm_pgtable_prot prot,
1101 struct kvm_mem_range *range)
1102 {
1103 kvm_pte_t attr;
1104 struct kvm_pgtable_walker check_perm_walker = {
1105 .cb = stage2_check_permission_walker,
1106 .flags = KVM_PGTABLE_WALK_LEAF,
1107 .arg = &attr,
1108 };
1109 u64 granule, start, end;
1110 u32 level;
1111 int ret;
1112
1113 ret = stage2_set_prot_attr(pgt, prot, &attr);
1114 if (ret)
1115 return ret;
1116 attr &= KVM_PTE_LEAF_S2_COMPAT_MASK;
1117
1118 for (level = pgt->start_level; level < KVM_PGTABLE_MAX_LEVELS; level++) {
1119 granule = kvm_granule_size(level);
1120 start = ALIGN_DOWN(addr, granule);
1121 end = start + granule;
1122
1123 if (!kvm_level_supports_block_mapping(level))
1124 continue;
1125
1126 if (start < range->start || range->end < end)
1127 continue;
1128
1129 /*
1130 * Check the presence of existing mappings with incompatible
1131 * permissions within the current block range, and try one level
1132 * deeper if one is found.
1133 */
1134 ret = kvm_pgtable_walk(pgt, start, granule, &check_perm_walker);
1135 if (ret != -EEXIST)
1136 break;
1137 }
1138
1139 if (!ret) {
1140 range->start = start;
1141 range->end = end;
1142 }
1143
1144 return ret;
1145 }
1146