1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/sections.h>
21 #include <asm/ctlreg.h>
22 #include <asm/bug.h>
23 #include <asm/page.h>
24 #include <asm/uv.h>
25
26 extern pgd_t swapper_pg_dir[];
27 extern pgd_t invalid_pg_dir[];
28 extern void paging_init(void);
29 extern struct ctlreg s390_invalid_asce;
30
31 enum {
32 PG_DIRECT_MAP_4K = 0,
33 PG_DIRECT_MAP_1M,
34 PG_DIRECT_MAP_2G,
35 PG_DIRECT_MAP_MAX
36 };
37
38 extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
39
update_page_count(int level,long count)40 static inline void update_page_count(int level, long count)
41 {
42 if (IS_ENABLED(CONFIG_PROC_FS))
43 atomic_long_add(count, &direct_pages_count[level]);
44 }
45
46 /*
47 * The S390 doesn't have any external MMU info: the kernel page
48 * tables contain all the necessary information.
49 */
50 #define update_mmu_cache(vma, address, ptep) do { } while (0)
51 #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0)
52 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53
54 /*
55 * ZERO_PAGE is a global shared page that is always zero; used
56 * for zero-mapped memory areas etc..
57 */
58
59 extern unsigned long empty_zero_page;
60 extern unsigned long zero_page_mask;
61
62 #define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65 #define __HAVE_COLOR_ZERO_PAGE
66
67 /* TODO: s390 cannot support io_remap_pfn_range... */
68
69 #define pte_ERROR(e) \
70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
71 #define pmd_ERROR(e) \
72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
73 #define pud_ERROR(e) \
74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
75 #define p4d_ERROR(e) \
76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
77 #define pgd_ERROR(e) \
78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
79
80 /*
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. 512GB are reserved for vmalloc by default.
83 * At the top of the vmalloc area a 2GB area is reserved where modules
84 * will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a
86 * 2GB frame is branch prediction unit friendly.
87 */
88 extern unsigned long __bootdata_preserved(VMALLOC_START);
89 extern unsigned long __bootdata_preserved(VMALLOC_END);
90 #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91 extern struct page *__bootdata_preserved(vmemmap);
92 extern unsigned long __bootdata_preserved(vmemmap_size);
93
94 extern unsigned long __bootdata_preserved(MODULES_VADDR);
95 extern unsigned long __bootdata_preserved(MODULES_END);
96 #define MODULES_VADDR MODULES_VADDR
97 #define MODULES_END MODULES_END
98 #define MODULES_LEN (1UL << 31)
99
is_module_addr(void * addr)100 static inline int is_module_addr(void *addr)
101 {
102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 if (addr < (void *)MODULES_VADDR)
104 return 0;
105 if (addr > (void *)MODULES_END)
106 return 0;
107 return 1;
108 }
109
110 #ifdef CONFIG_KMSAN
111 #define KMSAN_VMALLOC_SIZE (VMALLOC_END - VMALLOC_START)
112 #define KMSAN_VMALLOC_SHADOW_START VMALLOC_END
113 #define KMSAN_VMALLOC_SHADOW_END (KMSAN_VMALLOC_SHADOW_START + KMSAN_VMALLOC_SIZE)
114 #define KMSAN_VMALLOC_ORIGIN_START KMSAN_VMALLOC_SHADOW_END
115 #define KMSAN_VMALLOC_ORIGIN_END (KMSAN_VMALLOC_ORIGIN_START + KMSAN_VMALLOC_SIZE)
116 #define KMSAN_MODULES_SHADOW_START KMSAN_VMALLOC_ORIGIN_END
117 #define KMSAN_MODULES_SHADOW_END (KMSAN_MODULES_SHADOW_START + MODULES_LEN)
118 #define KMSAN_MODULES_ORIGIN_START KMSAN_MODULES_SHADOW_END
119 #define KMSAN_MODULES_ORIGIN_END (KMSAN_MODULES_ORIGIN_START + MODULES_LEN)
120 #endif
121
122 #ifdef CONFIG_RANDOMIZE_BASE
123 #define KASLR_LEN (1UL << 31)
124 #else
125 #define KASLR_LEN 0UL
126 #endif
127
128 /*
129 * A 64 bit pagetable entry of S390 has following format:
130 * | PFRA |0IPC| OS |
131 * 0000000000111111111122222222223333333333444444444455555555556666
132 * 0123456789012345678901234567890123456789012345678901234567890123
133 *
134 * I Page-Invalid Bit: Page is not available for address-translation
135 * P Page-Protection Bit: Store access not possible for page
136 * C Change-bit override: HW is not required to set change bit
137 *
138 * A 64 bit segmenttable entry of S390 has following format:
139 * | P-table origin | TT
140 * 0000000000111111111122222222223333333333444444444455555555556666
141 * 0123456789012345678901234567890123456789012345678901234567890123
142 *
143 * I Segment-Invalid Bit: Segment is not available for address-translation
144 * C Common-Segment Bit: Segment is not private (PoP 3-30)
145 * P Page-Protection Bit: Store access not possible for page
146 * TT Type 00
147 *
148 * A 64 bit region table entry of S390 has following format:
149 * | S-table origin | TF TTTL
150 * 0000000000111111111122222222223333333333444444444455555555556666
151 * 0123456789012345678901234567890123456789012345678901234567890123
152 *
153 * I Segment-Invalid Bit: Segment is not available for address-translation
154 * TT Type 01
155 * TF
156 * TL Table length
157 *
158 * The 64 bit regiontable origin of S390 has following format:
159 * | region table origon | DTTL
160 * 0000000000111111111122222222223333333333444444444455555555556666
161 * 0123456789012345678901234567890123456789012345678901234567890123
162 *
163 * X Space-Switch event:
164 * G Segment-Invalid Bit:
165 * P Private-Space Bit:
166 * S Storage-Alteration:
167 * R Real space
168 * TL Table-Length:
169 *
170 * A storage key has the following format:
171 * | ACC |F|R|C|0|
172 * 0 3 4 5 6 7
173 * ACC: access key
174 * F : fetch protection bit
175 * R : referenced bit
176 * C : changed bit
177 */
178
179 /* Hardware bits in the page table entry */
180 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
181 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
182 #define _PAGE_INVALID 0x400 /* HW invalid bit */
183 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
184
185 /* Software bits in the page table entry */
186 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
187 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
188 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
189 #define _PAGE_READ 0x010 /* SW pte read bit */
190 #define _PAGE_WRITE 0x020 /* SW pte write bit */
191 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
192 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
193
194 #ifdef CONFIG_MEM_SOFT_DIRTY
195 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
196 #else
197 #define _PAGE_SOFT_DIRTY 0x000
198 #endif
199
200 #define _PAGE_SW_BITS 0xffUL /* All SW bits */
201
202 #define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */
203
204 /* Set of bits not changed in pte_modify */
205 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
206 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
207
208 /*
209 * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT
210 * HW bit and all SW bits.
211 */
212 #define _PAGE_RDP_MASK ~(_PAGE_PROTECT | _PAGE_SW_BITS)
213
214 /*
215 * handle_pte_fault uses pte_present and pte_none to find out the pte type
216 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
217 * distinguish present from not-present ptes. It is changed only with the page
218 * table lock held.
219 *
220 * The following table gives the different possible bit combinations for
221 * the pte hardware and software bits in the last 12 bits of a pte
222 * (. unassigned bit, x don't care, t swap type):
223 *
224 * 842100000000
225 * 000084210000
226 * 000000008421
227 * .IR.uswrdy.p
228 * empty .10.00000000
229 * swap .11..ttttt.0
230 * prot-none, clean, old .11.xx0000.1
231 * prot-none, clean, young .11.xx0001.1
232 * prot-none, dirty, old .11.xx0010.1
233 * prot-none, dirty, young .11.xx0011.1
234 * read-only, clean, old .11.xx0100.1
235 * read-only, clean, young .01.xx0101.1
236 * read-only, dirty, old .11.xx0110.1
237 * read-only, dirty, young .01.xx0111.1
238 * read-write, clean, old .11.xx1100.1
239 * read-write, clean, young .01.xx1101.1
240 * read-write, dirty, old .10.xx1110.1
241 * read-write, dirty, young .00.xx1111.1
242 * HW-bits: R read-only, I invalid
243 * SW-bits: p present, y young, d dirty, r read, w write, s special,
244 * u unused, l large
245 *
246 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
247 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
248 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
249 */
250
251 /* Bits in the segment/region table address-space-control-element */
252 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
253 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
254 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
255 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
256 #define _ASCE_REAL_SPACE 0x20 /* real space control */
257 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
258 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
259 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
260 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
261 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
262 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
263
264 /* Bits in the region table entry */
265 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
266 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
267 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
268 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
269 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
270 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
271 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
272 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
273 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
274 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
275
276 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
277 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
278 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
279 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
280 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
281 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
282
283 #define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL
284 #define _REGION3_ENTRY_HARDWARE_BITS_LARGE 0xffffffff8001073cUL
285 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
286 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
287 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
288 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
289 #define _REGION3_ENTRY_WRITE 0x0002 /* SW region write bit */
290 #define _REGION3_ENTRY_READ 0x0001 /* SW region read bit */
291
292 #ifdef CONFIG_MEM_SOFT_DIRTY
293 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
294 #else
295 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
296 #endif
297
298 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
299
300 /* Bits in the segment table entry */
301 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL
302 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL
303 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff1073cUL
304 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
305 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
306 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
307 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
308 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
309 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
310
311 #define _SEGMENT_ENTRY (0)
312 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
313
314 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
315 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
316 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
317 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
318 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
319
320 #ifdef CONFIG_MEM_SOFT_DIRTY
321 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
322 #else
323 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
324 #endif
325
326 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
327 #define _PAGE_ENTRIES 256 /* number of page table entries */
328
329 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
330 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
331
332 #define _REGION1_SHIFT 53
333 #define _REGION2_SHIFT 42
334 #define _REGION3_SHIFT 31
335 #define _SEGMENT_SHIFT 20
336
337 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
338 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
339 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
340 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
341 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
342
343 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
344 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
345 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
346 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
347
348 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
349 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
350 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
351 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
352
353 #define PMD_SHIFT _SEGMENT_SHIFT
354 #define PUD_SHIFT _REGION3_SHIFT
355 #define P4D_SHIFT _REGION2_SHIFT
356 #define PGDIR_SHIFT _REGION1_SHIFT
357
358 #define PMD_SIZE _SEGMENT_SIZE
359 #define PUD_SIZE _REGION3_SIZE
360 #define P4D_SIZE _REGION2_SIZE
361 #define PGDIR_SIZE _REGION1_SIZE
362
363 #define PMD_MASK _SEGMENT_MASK
364 #define PUD_MASK _REGION3_MASK
365 #define P4D_MASK _REGION2_MASK
366 #define PGDIR_MASK _REGION1_MASK
367
368 #define PTRS_PER_PTE _PAGE_ENTRIES
369 #define PTRS_PER_PMD _CRST_ENTRIES
370 #define PTRS_PER_PUD _CRST_ENTRIES
371 #define PTRS_PER_P4D _CRST_ENTRIES
372 #define PTRS_PER_PGD _CRST_ENTRIES
373
374 /*
375 * Segment table and region3 table entry encoding
376 * (R = read-only, I = invalid, y = young bit):
377 * dy..R...I...wr
378 * prot-none, clean, old 00..1...1...00
379 * prot-none, clean, young 01..1...1...00
380 * prot-none, dirty, old 10..1...1...00
381 * prot-none, dirty, young 11..1...1...00
382 * read-only, clean, old 00..1...1...01
383 * read-only, clean, young 01..1...0...01
384 * read-only, dirty, old 10..1...1...01
385 * read-only, dirty, young 11..1...0...01
386 * read-write, clean, old 00..1...1...11
387 * read-write, clean, young 01..1...0...11
388 * read-write, dirty, old 10..0...1...11
389 * read-write, dirty, young 11..0...0...11
390 * The segment table origin is used to distinguish empty (origin==0) from
391 * read-write, old segment table entries (origin!=0)
392 * HW-bits: R read-only, I invalid
393 * SW-bits: y young, d dirty, r read, w write
394 */
395
396 /* Page status table bits for virtualization */
397 #define PGSTE_ACC_BITS 0xf000000000000000UL
398 #define PGSTE_FP_BIT 0x0800000000000000UL
399 #define PGSTE_PCL_BIT 0x0080000000000000UL
400 #define PGSTE_HR_BIT 0x0040000000000000UL
401 #define PGSTE_HC_BIT 0x0020000000000000UL
402 #define PGSTE_GR_BIT 0x0004000000000000UL
403 #define PGSTE_GC_BIT 0x0002000000000000UL
404 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
405 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
406 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
407
408 /* Guest Page State used for virtualization */
409 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
410 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
411 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
412 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
413 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
414 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
415 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
416
417 /*
418 * A user page table pointer has the space-switch-event bit, the
419 * private-space-control bit and the storage-alteration-event-control
420 * bit set. A kernel page table pointer doesn't need them.
421 */
422 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
423 _ASCE_ALT_EVENT)
424
425 /*
426 * Page protection definitions.
427 */
428 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
429 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
430 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
431 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
432 _PAGE_INVALID | _PAGE_PROTECT)
433 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
434 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
435 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
436 _PAGE_INVALID | _PAGE_PROTECT)
437
438 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
439 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
440 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
441 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
442 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
443 _PAGE_PROTECT | _PAGE_NOEXEC)
444 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
445 _PAGE_YOUNG | _PAGE_DIRTY)
446
447 /*
448 * On s390 the page table entry has an invalid bit and a read-only bit.
449 * Read permission implies execute permission and write permission
450 * implies read permission.
451 */
452 /*xwr*/
453
454 /*
455 * Segment entry (large page) protection definitions.
456 */
457 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
458 _SEGMENT_ENTRY_PROTECT)
459 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
460 _SEGMENT_ENTRY_READ | \
461 _SEGMENT_ENTRY_NOEXEC)
462 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
463 _SEGMENT_ENTRY_READ)
464 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
465 _SEGMENT_ENTRY_WRITE | \
466 _SEGMENT_ENTRY_NOEXEC)
467 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
468 _SEGMENT_ENTRY_WRITE)
469 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
470 _SEGMENT_ENTRY_LARGE | \
471 _SEGMENT_ENTRY_READ | \
472 _SEGMENT_ENTRY_WRITE | \
473 _SEGMENT_ENTRY_YOUNG | \
474 _SEGMENT_ENTRY_DIRTY | \
475 _SEGMENT_ENTRY_NOEXEC)
476 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
477 _SEGMENT_ENTRY_LARGE | \
478 _SEGMENT_ENTRY_READ | \
479 _SEGMENT_ENTRY_YOUNG | \
480 _SEGMENT_ENTRY_PROTECT | \
481 _SEGMENT_ENTRY_NOEXEC)
482 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
483 _SEGMENT_ENTRY_LARGE | \
484 _SEGMENT_ENTRY_READ | \
485 _SEGMENT_ENTRY_WRITE | \
486 _SEGMENT_ENTRY_YOUNG | \
487 _SEGMENT_ENTRY_DIRTY)
488
489 /*
490 * Region3 entry (large page) protection definitions.
491 */
492
493 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
494 _REGION3_ENTRY_LARGE | \
495 _REGION3_ENTRY_READ | \
496 _REGION3_ENTRY_WRITE | \
497 _REGION3_ENTRY_YOUNG | \
498 _REGION3_ENTRY_DIRTY | \
499 _REGION_ENTRY_NOEXEC)
500 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
501 _REGION3_ENTRY_LARGE | \
502 _REGION3_ENTRY_READ | \
503 _REGION3_ENTRY_YOUNG | \
504 _REGION_ENTRY_PROTECT | \
505 _REGION_ENTRY_NOEXEC)
506 #define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \
507 _REGION3_ENTRY_LARGE | \
508 _REGION3_ENTRY_READ | \
509 _REGION3_ENTRY_WRITE | \
510 _REGION3_ENTRY_YOUNG | \
511 _REGION3_ENTRY_DIRTY)
512
mm_p4d_folded(struct mm_struct * mm)513 static inline bool mm_p4d_folded(struct mm_struct *mm)
514 {
515 return mm->context.asce_limit <= _REGION1_SIZE;
516 }
517 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
518
mm_pud_folded(struct mm_struct * mm)519 static inline bool mm_pud_folded(struct mm_struct *mm)
520 {
521 return mm->context.asce_limit <= _REGION2_SIZE;
522 }
523 #define mm_pud_folded(mm) mm_pud_folded(mm)
524
mm_pmd_folded(struct mm_struct * mm)525 static inline bool mm_pmd_folded(struct mm_struct *mm)
526 {
527 return mm->context.asce_limit <= _REGION3_SIZE;
528 }
529 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
530
mm_has_pgste(struct mm_struct * mm)531 static inline int mm_has_pgste(struct mm_struct *mm)
532 {
533 #ifdef CONFIG_PGSTE
534 if (unlikely(mm->context.has_pgste))
535 return 1;
536 #endif
537 return 0;
538 }
539
mm_is_protected(struct mm_struct * mm)540 static inline int mm_is_protected(struct mm_struct *mm)
541 {
542 #ifdef CONFIG_PGSTE
543 if (unlikely(atomic_read(&mm->context.protected_count)))
544 return 1;
545 #endif
546 return 0;
547 }
548
mm_alloc_pgste(struct mm_struct * mm)549 static inline int mm_alloc_pgste(struct mm_struct *mm)
550 {
551 #ifdef CONFIG_PGSTE
552 if (unlikely(mm->context.alloc_pgste))
553 return 1;
554 #endif
555 return 0;
556 }
557
clear_pte_bit(pte_t pte,pgprot_t prot)558 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
559 {
560 return __pte(pte_val(pte) & ~pgprot_val(prot));
561 }
562
set_pte_bit(pte_t pte,pgprot_t prot)563 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
564 {
565 return __pte(pte_val(pte) | pgprot_val(prot));
566 }
567
clear_pmd_bit(pmd_t pmd,pgprot_t prot)568 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
569 {
570 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
571 }
572
set_pmd_bit(pmd_t pmd,pgprot_t prot)573 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
574 {
575 return __pmd(pmd_val(pmd) | pgprot_val(prot));
576 }
577
clear_pud_bit(pud_t pud,pgprot_t prot)578 static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
579 {
580 return __pud(pud_val(pud) & ~pgprot_val(prot));
581 }
582
set_pud_bit(pud_t pud,pgprot_t prot)583 static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
584 {
585 return __pud(pud_val(pud) | pgprot_val(prot));
586 }
587
588 /*
589 * As soon as the guest uses storage keys or enables PV, we deduplicate all
590 * mapped shared zeropages and prevent new shared zeropages from getting
591 * mapped.
592 */
593 #define mm_forbids_zeropage mm_forbids_zeropage
mm_forbids_zeropage(struct mm_struct * mm)594 static inline int mm_forbids_zeropage(struct mm_struct *mm)
595 {
596 #ifdef CONFIG_PGSTE
597 if (!mm->context.allow_cow_sharing)
598 return 1;
599 #endif
600 return 0;
601 }
602
mm_uses_skeys(struct mm_struct * mm)603 static inline int mm_uses_skeys(struct mm_struct *mm)
604 {
605 #ifdef CONFIG_PGSTE
606 if (mm->context.uses_skeys)
607 return 1;
608 #endif
609 return 0;
610 }
611
csp(unsigned int * ptr,unsigned int old,unsigned int new)612 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
613 {
614 union register_pair r1 = { .even = old, .odd = new, };
615 unsigned long address = (unsigned long)ptr | 1;
616
617 asm volatile(
618 " csp %[r1],%[address]"
619 : [r1] "+&d" (r1.pair), "+m" (*ptr)
620 : [address] "d" (address)
621 : "cc");
622 }
623
624 /**
625 * cspg() - Compare and Swap and Purge (CSPG)
626 * @ptr: Pointer to the value to be exchanged
627 * @old: The expected old value
628 * @new: The new value
629 *
630 * Return: True if compare and swap was successful, otherwise false.
631 */
cspg(unsigned long * ptr,unsigned long old,unsigned long new)632 static inline bool cspg(unsigned long *ptr, unsigned long old, unsigned long new)
633 {
634 union register_pair r1 = { .even = old, .odd = new, };
635 unsigned long address = (unsigned long)ptr | 1;
636
637 asm volatile(
638 " cspg %[r1],%[address]"
639 : [r1] "+&d" (r1.pair), "+m" (*ptr)
640 : [address] "d" (address)
641 : "cc");
642 return old == r1.even;
643 }
644
645 #define CRDTE_DTT_PAGE 0x00UL
646 #define CRDTE_DTT_SEGMENT 0x10UL
647 #define CRDTE_DTT_REGION3 0x14UL
648 #define CRDTE_DTT_REGION2 0x18UL
649 #define CRDTE_DTT_REGION1 0x1cUL
650
651 /**
652 * crdte() - Compare and Replace DAT Table Entry
653 * @old: The expected old value
654 * @new: The new value
655 * @table: Pointer to the value to be exchanged
656 * @dtt: Table type of the table to be exchanged
657 * @address: The address mapped by the entry to be replaced
658 * @asce: The ASCE of this entry
659 *
660 * Return: True if compare and replace was successful, otherwise false.
661 */
crdte(unsigned long old,unsigned long new,unsigned long * table,unsigned long dtt,unsigned long address,unsigned long asce)662 static inline bool crdte(unsigned long old, unsigned long new,
663 unsigned long *table, unsigned long dtt,
664 unsigned long address, unsigned long asce)
665 {
666 union register_pair r1 = { .even = old, .odd = new, };
667 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
668
669 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
670 : [r1] "+&d" (r1.pair)
671 : [r2] "d" (r2.pair), [asce] "a" (asce)
672 : "memory", "cc");
673 return old == r1.even;
674 }
675
676 /*
677 * pgd/p4d/pud/pmd/pte query functions
678 */
pgd_folded(pgd_t pgd)679 static inline int pgd_folded(pgd_t pgd)
680 {
681 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
682 }
683
pgd_present(pgd_t pgd)684 static inline int pgd_present(pgd_t pgd)
685 {
686 if (pgd_folded(pgd))
687 return 1;
688 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
689 }
690
pgd_none(pgd_t pgd)691 static inline int pgd_none(pgd_t pgd)
692 {
693 if (pgd_folded(pgd))
694 return 0;
695 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
696 }
697
pgd_bad(pgd_t pgd)698 static inline int pgd_bad(pgd_t pgd)
699 {
700 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
701 return 0;
702 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
703 }
704
pgd_pfn(pgd_t pgd)705 static inline unsigned long pgd_pfn(pgd_t pgd)
706 {
707 unsigned long origin_mask;
708
709 origin_mask = _REGION_ENTRY_ORIGIN;
710 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
711 }
712
p4d_folded(p4d_t p4d)713 static inline int p4d_folded(p4d_t p4d)
714 {
715 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
716 }
717
p4d_present(p4d_t p4d)718 static inline int p4d_present(p4d_t p4d)
719 {
720 if (p4d_folded(p4d))
721 return 1;
722 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
723 }
724
p4d_none(p4d_t p4d)725 static inline int p4d_none(p4d_t p4d)
726 {
727 if (p4d_folded(p4d))
728 return 0;
729 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
730 }
731
p4d_pfn(p4d_t p4d)732 static inline unsigned long p4d_pfn(p4d_t p4d)
733 {
734 unsigned long origin_mask;
735
736 origin_mask = _REGION_ENTRY_ORIGIN;
737 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
738 }
739
pud_folded(pud_t pud)740 static inline int pud_folded(pud_t pud)
741 {
742 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
743 }
744
pud_present(pud_t pud)745 static inline int pud_present(pud_t pud)
746 {
747 if (pud_folded(pud))
748 return 1;
749 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
750 }
751
pud_none(pud_t pud)752 static inline int pud_none(pud_t pud)
753 {
754 if (pud_folded(pud))
755 return 0;
756 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
757 }
758
759 #define pud_leaf pud_leaf
pud_leaf(pud_t pud)760 static inline bool pud_leaf(pud_t pud)
761 {
762 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
763 return 0;
764 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
765 }
766
767 #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)768 static inline bool pmd_leaf(pmd_t pmd)
769 {
770 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
771 }
772
pmd_bad(pmd_t pmd)773 static inline int pmd_bad(pmd_t pmd)
774 {
775 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
776 return 1;
777 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
778 }
779
pud_bad(pud_t pud)780 static inline int pud_bad(pud_t pud)
781 {
782 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
783
784 if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
785 return 1;
786 if (type < _REGION_ENTRY_TYPE_R3)
787 return 0;
788 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
789 }
790
p4d_bad(p4d_t p4d)791 static inline int p4d_bad(p4d_t p4d)
792 {
793 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
794
795 if (type > _REGION_ENTRY_TYPE_R2)
796 return 1;
797 if (type < _REGION_ENTRY_TYPE_R2)
798 return 0;
799 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
800 }
801
pmd_present(pmd_t pmd)802 static inline int pmd_present(pmd_t pmd)
803 {
804 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
805 }
806
pmd_none(pmd_t pmd)807 static inline int pmd_none(pmd_t pmd)
808 {
809 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
810 }
811
812 #define pmd_write pmd_write
pmd_write(pmd_t pmd)813 static inline int pmd_write(pmd_t pmd)
814 {
815 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
816 }
817
818 #define pud_write pud_write
pud_write(pud_t pud)819 static inline int pud_write(pud_t pud)
820 {
821 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
822 }
823
824 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)825 static inline int pmd_dirty(pmd_t pmd)
826 {
827 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
828 }
829
830 #define pmd_young pmd_young
pmd_young(pmd_t pmd)831 static inline int pmd_young(pmd_t pmd)
832 {
833 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
834 }
835
pte_present(pte_t pte)836 static inline int pte_present(pte_t pte)
837 {
838 /* Bit pattern: (pte & 0x001) == 0x001 */
839 return (pte_val(pte) & _PAGE_PRESENT) != 0;
840 }
841
pte_none(pte_t pte)842 static inline int pte_none(pte_t pte)
843 {
844 /* Bit pattern: pte == 0x400 */
845 return pte_val(pte) == _PAGE_INVALID;
846 }
847
pte_swap(pte_t pte)848 static inline int pte_swap(pte_t pte)
849 {
850 /* Bit pattern: (pte & 0x201) == 0x200 */
851 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
852 == _PAGE_PROTECT;
853 }
854
pte_special(pte_t pte)855 static inline int pte_special(pte_t pte)
856 {
857 return (pte_val(pte) & _PAGE_SPECIAL);
858 }
859
860 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)861 static inline int pte_same(pte_t a, pte_t b)
862 {
863 return pte_val(a) == pte_val(b);
864 }
865
866 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)867 static inline int pte_protnone(pte_t pte)
868 {
869 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
870 }
871
pmd_protnone(pmd_t pmd)872 static inline int pmd_protnone(pmd_t pmd)
873 {
874 /* pmd_leaf(pmd) implies pmd_present(pmd) */
875 return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
876 }
877 #endif
878
pte_swp_exclusive(pte_t pte)879 static inline int pte_swp_exclusive(pte_t pte)
880 {
881 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
882 }
883
pte_swp_mkexclusive(pte_t pte)884 static inline pte_t pte_swp_mkexclusive(pte_t pte)
885 {
886 return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
887 }
888
pte_swp_clear_exclusive(pte_t pte)889 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
890 {
891 return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
892 }
893
pte_soft_dirty(pte_t pte)894 static inline int pte_soft_dirty(pte_t pte)
895 {
896 return pte_val(pte) & _PAGE_SOFT_DIRTY;
897 }
898 #define pte_swp_soft_dirty pte_soft_dirty
899
pte_mksoft_dirty(pte_t pte)900 static inline pte_t pte_mksoft_dirty(pte_t pte)
901 {
902 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
903 }
904 #define pte_swp_mksoft_dirty pte_mksoft_dirty
905
pte_clear_soft_dirty(pte_t pte)906 static inline pte_t pte_clear_soft_dirty(pte_t pte)
907 {
908 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
909 }
910 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
911
pmd_soft_dirty(pmd_t pmd)912 static inline int pmd_soft_dirty(pmd_t pmd)
913 {
914 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
915 }
916
pmd_mksoft_dirty(pmd_t pmd)917 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
918 {
919 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
920 }
921
pmd_clear_soft_dirty(pmd_t pmd)922 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
923 {
924 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
925 }
926
927 /*
928 * query functions pte_write/pte_dirty/pte_young only work if
929 * pte_present() is true. Undefined behaviour if not..
930 */
pte_write(pte_t pte)931 static inline int pte_write(pte_t pte)
932 {
933 return (pte_val(pte) & _PAGE_WRITE) != 0;
934 }
935
pte_dirty(pte_t pte)936 static inline int pte_dirty(pte_t pte)
937 {
938 return (pte_val(pte) & _PAGE_DIRTY) != 0;
939 }
940
pte_young(pte_t pte)941 static inline int pte_young(pte_t pte)
942 {
943 return (pte_val(pte) & _PAGE_YOUNG) != 0;
944 }
945
946 #define __HAVE_ARCH_PTE_UNUSED
pte_unused(pte_t pte)947 static inline int pte_unused(pte_t pte)
948 {
949 return pte_val(pte) & _PAGE_UNUSED;
950 }
951
952 /*
953 * Extract the pgprot value from the given pte while at the same time making it
954 * usable for kernel address space mappings where fault driven dirty and
955 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
956 * must not be set.
957 */
pte_pgprot(pte_t pte)958 static inline pgprot_t pte_pgprot(pte_t pte)
959 {
960 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
961
962 if (pte_write(pte))
963 pte_flags |= pgprot_val(PAGE_KERNEL);
964 else
965 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
966 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
967
968 return __pgprot(pte_flags);
969 }
970
971 /*
972 * pgd/pmd/pte modification functions
973 */
974
set_pgd(pgd_t * pgdp,pgd_t pgd)975 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
976 {
977 WRITE_ONCE(*pgdp, pgd);
978 }
979
set_p4d(p4d_t * p4dp,p4d_t p4d)980 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
981 {
982 WRITE_ONCE(*p4dp, p4d);
983 }
984
set_pud(pud_t * pudp,pud_t pud)985 static inline void set_pud(pud_t *pudp, pud_t pud)
986 {
987 WRITE_ONCE(*pudp, pud);
988 }
989
set_pmd(pmd_t * pmdp,pmd_t pmd)990 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
991 {
992 WRITE_ONCE(*pmdp, pmd);
993 }
994
set_pte(pte_t * ptep,pte_t pte)995 static inline void set_pte(pte_t *ptep, pte_t pte)
996 {
997 WRITE_ONCE(*ptep, pte);
998 }
999
pgd_clear(pgd_t * pgd)1000 static inline void pgd_clear(pgd_t *pgd)
1001 {
1002 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
1003 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
1004 }
1005
p4d_clear(p4d_t * p4d)1006 static inline void p4d_clear(p4d_t *p4d)
1007 {
1008 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1009 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
1010 }
1011
pud_clear(pud_t * pud)1012 static inline void pud_clear(pud_t *pud)
1013 {
1014 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1015 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
1016 }
1017
pmd_clear(pmd_t * pmdp)1018 static inline void pmd_clear(pmd_t *pmdp)
1019 {
1020 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1021 }
1022
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1023 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
1024 {
1025 set_pte(ptep, __pte(_PAGE_INVALID));
1026 }
1027
1028 /*
1029 * The following pte modification functions only work if
1030 * pte_present() is true. Undefined behaviour if not..
1031 */
pte_modify(pte_t pte,pgprot_t newprot)1032 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1033 {
1034 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
1035 pte = set_pte_bit(pte, newprot);
1036 /*
1037 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
1038 * has the invalid bit set, clear it again for readable, young pages
1039 */
1040 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
1041 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1042 /*
1043 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
1044 * protection bit set, clear it again for writable, dirty pages
1045 */
1046 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
1047 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1048 return pte;
1049 }
1050
pte_wrprotect(pte_t pte)1051 static inline pte_t pte_wrprotect(pte_t pte)
1052 {
1053 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
1054 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1055 }
1056
pte_mkwrite_novma(pte_t pte)1057 static inline pte_t pte_mkwrite_novma(pte_t pte)
1058 {
1059 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
1060 if (pte_val(pte) & _PAGE_DIRTY)
1061 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1062 return pte;
1063 }
1064
pte_mkclean(pte_t pte)1065 static inline pte_t pte_mkclean(pte_t pte)
1066 {
1067 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1068 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1069 }
1070
pte_mkdirty(pte_t pte)1071 static inline pte_t pte_mkdirty(pte_t pte)
1072 {
1073 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1074 if (pte_val(pte) & _PAGE_WRITE)
1075 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1076 return pte;
1077 }
1078
pte_mkold(pte_t pte)1079 static inline pte_t pte_mkold(pte_t pte)
1080 {
1081 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1082 return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1083 }
1084
pte_mkyoung(pte_t pte)1085 static inline pte_t pte_mkyoung(pte_t pte)
1086 {
1087 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1088 if (pte_val(pte) & _PAGE_READ)
1089 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1090 return pte;
1091 }
1092
pte_mkspecial(pte_t pte)1093 static inline pte_t pte_mkspecial(pte_t pte)
1094 {
1095 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1096 }
1097
1098 #ifdef CONFIG_HUGETLB_PAGE
pte_mkhuge(pte_t pte)1099 static inline pte_t pte_mkhuge(pte_t pte)
1100 {
1101 return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1102 }
1103 #endif
1104
1105 #define IPTE_GLOBAL 0
1106 #define IPTE_LOCAL 1
1107
1108 #define IPTE_NODAT 0x400
1109 #define IPTE_GUEST_ASCE 0x800
1110
__ptep_rdp(unsigned long addr,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1111 static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep,
1112 unsigned long opt, unsigned long asce,
1113 int local)
1114 {
1115 unsigned long pto;
1116
1117 pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
1118 asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]"
1119 : "+m" (*ptep)
1120 : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt),
1121 [asce] "a" (asce), [m4] "i" (local));
1122 }
1123
__ptep_ipte(unsigned long address,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1124 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1125 unsigned long opt, unsigned long asce,
1126 int local)
1127 {
1128 unsigned long pto = __pa(ptep);
1129
1130 if (__builtin_constant_p(opt) && opt == 0) {
1131 /* Invalidation + TLB flush for the pte */
1132 asm volatile(
1133 " ipte %[r1],%[r2],0,%[m4]"
1134 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1135 [m4] "i" (local));
1136 return;
1137 }
1138
1139 /* Invalidate ptes with options + TLB flush of the ptes */
1140 opt = opt | (asce & _ASCE_ORIGIN);
1141 asm volatile(
1142 " ipte %[r1],%[r2],%[r3],%[m4]"
1143 : [r2] "+a" (address), [r3] "+a" (opt)
1144 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1145 }
1146
__ptep_ipte_range(unsigned long address,int nr,pte_t * ptep,int local)1147 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1148 pte_t *ptep, int local)
1149 {
1150 unsigned long pto = __pa(ptep);
1151
1152 /* Invalidate a range of ptes + TLB flush of the ptes */
1153 do {
1154 asm volatile(
1155 " ipte %[r1],%[r2],%[r3],%[m4]"
1156 : [r2] "+a" (address), [r3] "+a" (nr)
1157 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1158 } while (nr != 255);
1159 }
1160
1161 /*
1162 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1163 * both clear the TLB for the unmapped pte. The reason is that
1164 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1165 * to modify an active pte. The sequence is
1166 * 1) ptep_get_and_clear
1167 * 2) set_pte_at
1168 * 3) flush_tlb_range
1169 * On s390 the tlb needs to get flushed with the modification of the pte
1170 * if the pte is active. The only way how this can be implemented is to
1171 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1172 * is a nop.
1173 */
1174 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1175 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1176
1177 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1178 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1179 unsigned long addr, pte_t *ptep)
1180 {
1181 pte_t pte = *ptep;
1182
1183 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1184 return pte_young(pte);
1185 }
1186
1187 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1188 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1189 unsigned long address, pte_t *ptep)
1190 {
1191 return ptep_test_and_clear_young(vma, address, ptep);
1192 }
1193
1194 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1195 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1196 unsigned long addr, pte_t *ptep)
1197 {
1198 pte_t res;
1199
1200 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1201 /* At this point the reference through the mapping is still present */
1202 if (mm_is_protected(mm) && pte_present(res))
1203 uv_convert_from_secure_pte(res);
1204 return res;
1205 }
1206
1207 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1208 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1209 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1210 pte_t *, pte_t, pte_t);
1211
1212 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1213 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1214 unsigned long addr, pte_t *ptep)
1215 {
1216 pte_t res;
1217
1218 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1219 /* At this point the reference through the mapping is still present */
1220 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1221 uv_convert_from_secure_pte(res);
1222 return res;
1223 }
1224
1225 /*
1226 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1227 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1228 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1229 * cannot be accessed while the batched unmap is running. In this case
1230 * full==1 and a simple pte_clear is enough. See tlb.h.
1231 */
1232 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1233 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1234 unsigned long addr,
1235 pte_t *ptep, int full)
1236 {
1237 pte_t res;
1238
1239 if (full) {
1240 res = *ptep;
1241 set_pte(ptep, __pte(_PAGE_INVALID));
1242 } else {
1243 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1244 }
1245 /* Nothing to do */
1246 if (!mm_is_protected(mm) || !pte_present(res))
1247 return res;
1248 /*
1249 * At this point the reference through the mapping is still present.
1250 * The notifier should have destroyed all protected vCPUs at this
1251 * point, so the destroy should be successful.
1252 */
1253 if (full && !uv_destroy_pte(res))
1254 return res;
1255 /*
1256 * If something went wrong and the page could not be destroyed, or
1257 * if this is not a mm teardown, the slower export is used as
1258 * fallback instead.
1259 */
1260 uv_convert_from_secure_pte(res);
1261 return res;
1262 }
1263
1264 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1265 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1266 unsigned long addr, pte_t *ptep)
1267 {
1268 pte_t pte = *ptep;
1269
1270 if (pte_write(pte))
1271 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1272 }
1273
1274 /*
1275 * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE
1276 * bits in the comparison. Those might change e.g. because of dirty and young
1277 * tracking.
1278 */
pte_allow_rdp(pte_t old,pte_t new)1279 static inline int pte_allow_rdp(pte_t old, pte_t new)
1280 {
1281 /*
1282 * Only allow changes from RO to RW
1283 */
1284 if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT)
1285 return 0;
1286
1287 return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK);
1288 }
1289
flush_tlb_fix_spurious_fault(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1290 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
1291 unsigned long address,
1292 pte_t *ptep)
1293 {
1294 /*
1295 * RDP might not have propagated the PTE protection reset to all CPUs,
1296 * so there could be spurious TLB protection faults.
1297 * NOTE: This will also be called when a racing pagetable update on
1298 * another thread already installed the correct PTE. Both cases cannot
1299 * really be distinguished.
1300 * Therefore, only do the local TLB flush when RDP can be used, and the
1301 * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
1302 * A local RDP can be used to do the flush.
1303 */
1304 if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT))
1305 __ptep_rdp(address, ptep, 0, 0, 1);
1306 }
1307 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
1308
1309 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
1310 pte_t new);
1311
1312 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1313 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1314 unsigned long addr, pte_t *ptep,
1315 pte_t entry, int dirty)
1316 {
1317 if (pte_same(*ptep, entry))
1318 return 0;
1319 if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry))
1320 ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry);
1321 else
1322 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1323 return 1;
1324 }
1325
1326 /*
1327 * Additional functions to handle KVM guest page tables
1328 */
1329 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1330 pte_t *ptep, pte_t entry);
1331 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1332 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1333 pte_t *ptep, unsigned long bits);
1334 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1335 pte_t *ptep, int prot, unsigned long bit);
1336 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1337 pte_t *ptep , int reset);
1338 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1339 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1340 pte_t *sptep, pte_t *tptep, pte_t pte);
1341 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1342
1343 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1344 pte_t *ptep);
1345 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1346 unsigned char key, bool nq);
1347 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1348 unsigned char key, unsigned char *oldkey,
1349 bool nq, bool mr, bool mc);
1350 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1351 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1352 unsigned char *key);
1353
1354 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1355 unsigned long bits, unsigned long value);
1356 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1357 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1358 unsigned long *oldpte, unsigned long *oldpgste);
1359 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1360 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1361 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1362 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1363
1364 #define pgprot_writecombine pgprot_writecombine
1365 pgprot_t pgprot_writecombine(pgprot_t prot);
1366
1367 #define pgprot_writethrough pgprot_writethrough
1368 pgprot_t pgprot_writethrough(pgprot_t prot);
1369
1370 #define PFN_PTE_SHIFT PAGE_SHIFT
1371
1372 /*
1373 * Set multiple PTEs to consecutive pages with a single call. All PTEs
1374 * are within the same folio, PMD and VMA.
1375 */
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry,unsigned int nr)1376 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
1377 pte_t *ptep, pte_t entry, unsigned int nr)
1378 {
1379 if (pte_present(entry))
1380 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1381 if (mm_has_pgste(mm)) {
1382 for (;;) {
1383 ptep_set_pte_at(mm, addr, ptep, entry);
1384 if (--nr == 0)
1385 break;
1386 ptep++;
1387 entry = __pte(pte_val(entry) + PAGE_SIZE);
1388 addr += PAGE_SIZE;
1389 }
1390 } else {
1391 for (;;) {
1392 set_pte(ptep, entry);
1393 if (--nr == 0)
1394 break;
1395 ptep++;
1396 entry = __pte(pte_val(entry) + PAGE_SIZE);
1397 }
1398 }
1399 }
1400 #define set_ptes set_ptes
1401
1402 /*
1403 * Conversion functions: convert a page and protection to a page entry,
1404 * and a page entry and page directory to the page they refer to.
1405 */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)1406 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1407 {
1408 pte_t __pte;
1409
1410 __pte = __pte(physpage | pgprot_val(pgprot));
1411 if (!MACHINE_HAS_NX)
1412 __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
1413 return pte_mkyoung(__pte);
1414 }
1415
mk_pte(struct page * page,pgprot_t pgprot)1416 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1417 {
1418 unsigned long physpage = page_to_phys(page);
1419 pte_t __pte = mk_pte_phys(physpage, pgprot);
1420
1421 if (pte_write(__pte) && PageDirty(page))
1422 __pte = pte_mkdirty(__pte);
1423 return __pte;
1424 }
1425
1426 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1427 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1428 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1429 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1430
1431 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1432 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1433
pmd_deref(pmd_t pmd)1434 static inline unsigned long pmd_deref(pmd_t pmd)
1435 {
1436 unsigned long origin_mask;
1437
1438 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1439 if (pmd_leaf(pmd))
1440 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1441 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1442 }
1443
pmd_pfn(pmd_t pmd)1444 static inline unsigned long pmd_pfn(pmd_t pmd)
1445 {
1446 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1447 }
1448
pud_deref(pud_t pud)1449 static inline unsigned long pud_deref(pud_t pud)
1450 {
1451 unsigned long origin_mask;
1452
1453 origin_mask = _REGION_ENTRY_ORIGIN;
1454 if (pud_leaf(pud))
1455 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1456 return (unsigned long)__va(pud_val(pud) & origin_mask);
1457 }
1458
1459 #define pud_pfn pud_pfn
pud_pfn(pud_t pud)1460 static inline unsigned long pud_pfn(pud_t pud)
1461 {
1462 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1463 }
1464
1465 /*
1466 * The pgd_offset function *always* adds the index for the top-level
1467 * region/segment table. This is done to get a sequence like the
1468 * following to work:
1469 * pgdp = pgd_offset(current->mm, addr);
1470 * pgd = READ_ONCE(*pgdp);
1471 * p4dp = p4d_offset(&pgd, addr);
1472 * ...
1473 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1474 * only add an index if they dereferenced the pointer.
1475 */
pgd_offset_raw(pgd_t * pgd,unsigned long address)1476 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1477 {
1478 unsigned long rste;
1479 unsigned int shift;
1480
1481 /* Get the first entry of the top level table */
1482 rste = pgd_val(*pgd);
1483 /* Pick up the shift from the table type of the first entry */
1484 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1485 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1486 }
1487
1488 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1489
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long address)1490 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1491 {
1492 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1493 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1494 return (p4d_t *) pgdp;
1495 }
1496 #define p4d_offset_lockless p4d_offset_lockless
1497
p4d_offset(pgd_t * pgdp,unsigned long address)1498 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1499 {
1500 return p4d_offset_lockless(pgdp, *pgdp, address);
1501 }
1502
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long address)1503 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1504 {
1505 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1506 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1507 return (pud_t *) p4dp;
1508 }
1509 #define pud_offset_lockless pud_offset_lockless
1510
pud_offset(p4d_t * p4dp,unsigned long address)1511 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1512 {
1513 return pud_offset_lockless(p4dp, *p4dp, address);
1514 }
1515 #define pud_offset pud_offset
1516
pmd_offset_lockless(pud_t * pudp,pud_t pud,unsigned long address)1517 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1518 {
1519 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1520 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1521 return (pmd_t *) pudp;
1522 }
1523 #define pmd_offset_lockless pmd_offset_lockless
1524
pmd_offset(pud_t * pudp,unsigned long address)1525 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1526 {
1527 return pmd_offset_lockless(pudp, *pudp, address);
1528 }
1529 #define pmd_offset pmd_offset
1530
pmd_page_vaddr(pmd_t pmd)1531 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1532 {
1533 return (unsigned long) pmd_deref(pmd);
1534 }
1535
gup_fast_permitted(unsigned long start,unsigned long end)1536 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1537 {
1538 return end <= current->mm->context.asce_limit;
1539 }
1540 #define gup_fast_permitted gup_fast_permitted
1541
1542 #define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1543 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1544 #define pte_page(x) pfn_to_page(pte_pfn(x))
1545
1546 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1547 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1548 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1549 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1550
pmd_wrprotect(pmd_t pmd)1551 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1552 {
1553 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1554 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1555 }
1556
pmd_mkwrite_novma(pmd_t pmd)1557 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
1558 {
1559 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1560 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1561 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1562 return pmd;
1563 }
1564
pmd_mkclean(pmd_t pmd)1565 static inline pmd_t pmd_mkclean(pmd_t pmd)
1566 {
1567 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1568 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1569 }
1570
pmd_mkdirty(pmd_t pmd)1571 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1572 {
1573 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1574 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1575 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1576 return pmd;
1577 }
1578
pud_wrprotect(pud_t pud)1579 static inline pud_t pud_wrprotect(pud_t pud)
1580 {
1581 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1582 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1583 }
1584
pud_mkwrite(pud_t pud)1585 static inline pud_t pud_mkwrite(pud_t pud)
1586 {
1587 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1588 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1589 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1590 return pud;
1591 }
1592
pud_mkclean(pud_t pud)1593 static inline pud_t pud_mkclean(pud_t pud)
1594 {
1595 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1596 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1597 }
1598
pud_mkdirty(pud_t pud)1599 static inline pud_t pud_mkdirty(pud_t pud)
1600 {
1601 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1602 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1603 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1604 return pud;
1605 }
1606
1607 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
massage_pgprot_pmd(pgprot_t pgprot)1608 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1609 {
1610 /*
1611 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1612 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1613 */
1614 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1615 return pgprot_val(SEGMENT_NONE);
1616 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1617 return pgprot_val(SEGMENT_RO);
1618 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1619 return pgprot_val(SEGMENT_RX);
1620 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1621 return pgprot_val(SEGMENT_RW);
1622 return pgprot_val(SEGMENT_RWX);
1623 }
1624
pmd_mkyoung(pmd_t pmd)1625 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1626 {
1627 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1628 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1629 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1630 return pmd;
1631 }
1632
pmd_mkold(pmd_t pmd)1633 static inline pmd_t pmd_mkold(pmd_t pmd)
1634 {
1635 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1636 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1637 }
1638
pmd_modify(pmd_t pmd,pgprot_t newprot)1639 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1640 {
1641 unsigned long mask;
1642
1643 mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1644 mask |= _SEGMENT_ENTRY_DIRTY;
1645 mask |= _SEGMENT_ENTRY_YOUNG;
1646 mask |= _SEGMENT_ENTRY_LARGE;
1647 mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1648 pmd = __pmd(pmd_val(pmd) & mask);
1649 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1650 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1651 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1652 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1653 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1654 return pmd;
1655 }
1656
mk_pmd_phys(unsigned long physpage,pgprot_t pgprot)1657 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1658 {
1659 return __pmd(physpage + massage_pgprot_pmd(pgprot));
1660 }
1661
1662 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1663
__pmdp_csp(pmd_t * pmdp)1664 static inline void __pmdp_csp(pmd_t *pmdp)
1665 {
1666 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1667 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1668 }
1669
1670 #define IDTE_GLOBAL 0
1671 #define IDTE_LOCAL 1
1672
1673 #define IDTE_PTOA 0x0800
1674 #define IDTE_NODAT 0x1000
1675 #define IDTE_GUEST_ASCE 0x2000
1676
__pmdp_idte(unsigned long addr,pmd_t * pmdp,unsigned long opt,unsigned long asce,int local)1677 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1678 unsigned long opt, unsigned long asce,
1679 int local)
1680 {
1681 unsigned long sto;
1682
1683 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1684 if (__builtin_constant_p(opt) && opt == 0) {
1685 /* flush without guest asce */
1686 asm volatile(
1687 " idte %[r1],0,%[r2],%[m4]"
1688 : "+m" (*pmdp)
1689 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1690 [m4] "i" (local)
1691 : "cc" );
1692 } else {
1693 /* flush with guest asce */
1694 asm volatile(
1695 " idte %[r1],%[r3],%[r2],%[m4]"
1696 : "+m" (*pmdp)
1697 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1698 [r3] "a" (asce), [m4] "i" (local)
1699 : "cc" );
1700 }
1701 }
1702
__pudp_idte(unsigned long addr,pud_t * pudp,unsigned long opt,unsigned long asce,int local)1703 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1704 unsigned long opt, unsigned long asce,
1705 int local)
1706 {
1707 unsigned long r3o;
1708
1709 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1710 r3o |= _ASCE_TYPE_REGION3;
1711 if (__builtin_constant_p(opt) && opt == 0) {
1712 /* flush without guest asce */
1713 asm volatile(
1714 " idte %[r1],0,%[r2],%[m4]"
1715 : "+m" (*pudp)
1716 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1717 [m4] "i" (local)
1718 : "cc");
1719 } else {
1720 /* flush with guest asce */
1721 asm volatile(
1722 " idte %[r1],%[r3],%[r2],%[m4]"
1723 : "+m" (*pudp)
1724 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1725 [r3] "a" (asce), [m4] "i" (local)
1726 : "cc" );
1727 }
1728 }
1729
1730 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1731 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1732 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1733
1734 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1735
1736 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1737 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1738 pgtable_t pgtable);
1739
1740 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1741 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1742
1743 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,pmd_t entry,int dirty)1744 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1745 unsigned long addr, pmd_t *pmdp,
1746 pmd_t entry, int dirty)
1747 {
1748 VM_BUG_ON(addr & ~HPAGE_MASK);
1749
1750 entry = pmd_mkyoung(entry);
1751 if (dirty)
1752 entry = pmd_mkdirty(entry);
1753 if (pmd_val(*pmdp) == pmd_val(entry))
1754 return 0;
1755 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1756 return 1;
1757 }
1758
1759 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1760 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1761 unsigned long addr, pmd_t *pmdp)
1762 {
1763 pmd_t pmd = *pmdp;
1764
1765 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1766 return pmd_young(pmd);
1767 }
1768
1769 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1770 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1771 unsigned long addr, pmd_t *pmdp)
1772 {
1773 VM_BUG_ON(addr & ~HPAGE_MASK);
1774 return pmdp_test_and_clear_young(vma, addr, pmdp);
1775 }
1776
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t entry)1777 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1778 pmd_t *pmdp, pmd_t entry)
1779 {
1780 if (!MACHINE_HAS_NX)
1781 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
1782 set_pmd(pmdp, entry);
1783 }
1784
pmd_mkhuge(pmd_t pmd)1785 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1786 {
1787 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1788 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1789 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1790 }
1791
1792 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1793 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1794 unsigned long addr, pmd_t *pmdp)
1795 {
1796 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1797 }
1798
1799 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)1800 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1801 unsigned long addr,
1802 pmd_t *pmdp, int full)
1803 {
1804 if (full) {
1805 pmd_t pmd = *pmdp;
1806 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1807 return pmd;
1808 }
1809 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1810 }
1811
1812 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmdp_huge_clear_flush(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1813 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1814 unsigned long addr, pmd_t *pmdp)
1815 {
1816 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1817 }
1818
1819 #define __HAVE_ARCH_PMDP_INVALIDATE
pmdp_invalidate(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1820 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1821 unsigned long addr, pmd_t *pmdp)
1822 {
1823 pmd_t pmd;
1824
1825 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
1826 pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1827 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1828 }
1829
1830 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1831 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1832 unsigned long addr, pmd_t *pmdp)
1833 {
1834 pmd_t pmd = *pmdp;
1835
1836 if (pmd_write(pmd))
1837 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1838 }
1839
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1840 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1841 unsigned long address,
1842 pmd_t *pmdp)
1843 {
1844 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1845 }
1846 #define pmdp_collapse_flush pmdp_collapse_flush
1847
1848 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1849 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1850
pmd_trans_huge(pmd_t pmd)1851 static inline int pmd_trans_huge(pmd_t pmd)
1852 {
1853 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1854 }
1855
1856 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)1857 static inline int has_transparent_hugepage(void)
1858 {
1859 return MACHINE_HAS_EDAT1 ? 1 : 0;
1860 }
1861 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1862
1863 /*
1864 * 64 bit swap entry format:
1865 * A page-table entry has some bits we have to treat in a special way.
1866 * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte
1867 * as invalid.
1868 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1869 * | offset |E11XX|type |S0|
1870 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1871 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1872 *
1873 * Bits 0-51 store the offset.
1874 * Bit 52 (E) is used to remember PG_anon_exclusive.
1875 * Bits 57-61 store the type.
1876 * Bit 62 (S) is used for softdirty tracking.
1877 * Bits 55 and 56 (X) are unused.
1878 */
1879
1880 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1881 #define __SWP_OFFSET_SHIFT 12
1882 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1883 #define __SWP_TYPE_SHIFT 2
1884
mk_swap_pte(unsigned long type,unsigned long offset)1885 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1886 {
1887 unsigned long pteval;
1888
1889 pteval = _PAGE_INVALID | _PAGE_PROTECT;
1890 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1891 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1892 return __pte(pteval);
1893 }
1894
__swp_type(swp_entry_t entry)1895 static inline unsigned long __swp_type(swp_entry_t entry)
1896 {
1897 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1898 }
1899
__swp_offset(swp_entry_t entry)1900 static inline unsigned long __swp_offset(swp_entry_t entry)
1901 {
1902 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1903 }
1904
__swp_entry(unsigned long type,unsigned long offset)1905 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1906 {
1907 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1908 }
1909
1910 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1911 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1912
1913 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1914 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1915 extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
1916 extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot);
1917 extern void vmem_unmap_4k_page(unsigned long addr);
1918 extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc);
1919 extern int s390_enable_sie(void);
1920 extern int s390_enable_skey(void);
1921 extern void s390_reset_cmma(struct mm_struct *mm);
1922
1923 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1924 #define HAVE_ARCH_UNMAPPED_AREA
1925 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1926
1927 #define pmd_pgtable(pmd) \
1928 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1929
1930 #endif /* _S390_PAGE_H */
1931