1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
14
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
20 #include <asm/sections.h>
21 #include <asm/ctlreg.h>
22 #include <asm/bug.h>
23 #include <asm/page.h>
24 #include <asm/uv.h>
25
26 extern pgd_t swapper_pg_dir[];
27 extern pgd_t invalid_pg_dir[];
28 extern void paging_init(void);
29 extern struct ctlreg s390_invalid_asce;
30
31 enum {
32 PG_DIRECT_MAP_4K = 0,
33 PG_DIRECT_MAP_1M,
34 PG_DIRECT_MAP_2G,
35 PG_DIRECT_MAP_MAX
36 };
37
38 extern atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
39
update_page_count(int level,long count)40 static inline void update_page_count(int level, long count)
41 {
42 if (IS_ENABLED(CONFIG_PROC_FS))
43 atomic_long_add(count, &direct_pages_count[level]);
44 }
45
46 /*
47 * The S390 doesn't have any external MMU info: the kernel page
48 * tables contain all the necessary information.
49 */
50 #define update_mmu_cache(vma, address, ptep) do { } while (0)
51 #define update_mmu_cache_range(vmf, vma, addr, ptep, nr) do { } while (0)
52 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53
54 /*
55 * ZERO_PAGE is a global shared page that is always zero; used
56 * for zero-mapped memory areas etc..
57 */
58
59 extern unsigned long empty_zero_page;
60 extern unsigned long zero_page_mask;
61
62 #define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65 #define __HAVE_COLOR_ZERO_PAGE
66
67 /* TODO: s390 cannot support io_remap_pfn_range... */
68
69 #define pte_ERROR(e) \
70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
71 #define pmd_ERROR(e) \
72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
73 #define pud_ERROR(e) \
74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
75 #define p4d_ERROR(e) \
76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
77 #define pgd_ERROR(e) \
78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
79
80 /*
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. 512GB are reserved for vmalloc by default.
83 * At the top of the vmalloc area a 2GB area is reserved where modules
84 * will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a
86 * 2GB frame is branch prediction unit friendly.
87 */
88 extern unsigned long __bootdata_preserved(VMALLOC_START);
89 extern unsigned long __bootdata_preserved(VMALLOC_END);
90 #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91 extern struct page *__bootdata_preserved(vmemmap);
92 extern unsigned long __bootdata_preserved(vmemmap_size);
93
94 extern unsigned long __bootdata_preserved(MODULES_VADDR);
95 extern unsigned long __bootdata_preserved(MODULES_END);
96 #define MODULES_VADDR MODULES_VADDR
97 #define MODULES_END MODULES_END
98 #define MODULES_LEN (1UL << 31)
99
is_module_addr(void * addr)100 static inline int is_module_addr(void *addr)
101 {
102 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
103 if (addr < (void *)MODULES_VADDR)
104 return 0;
105 if (addr > (void *)MODULES_END)
106 return 0;
107 return 1;
108 }
109
110 #ifdef CONFIG_RANDOMIZE_BASE
111 #define KASLR_LEN (1UL << 31)
112 #else
113 #define KASLR_LEN 0UL
114 #endif
115
116 /*
117 * A 64 bit pagetable entry of S390 has following format:
118 * | PFRA |0IPC| OS |
119 * 0000000000111111111122222222223333333333444444444455555555556666
120 * 0123456789012345678901234567890123456789012345678901234567890123
121 *
122 * I Page-Invalid Bit: Page is not available for address-translation
123 * P Page-Protection Bit: Store access not possible for page
124 * C Change-bit override: HW is not required to set change bit
125 *
126 * A 64 bit segmenttable entry of S390 has following format:
127 * | P-table origin | TT
128 * 0000000000111111111122222222223333333333444444444455555555556666
129 * 0123456789012345678901234567890123456789012345678901234567890123
130 *
131 * I Segment-Invalid Bit: Segment is not available for address-translation
132 * C Common-Segment Bit: Segment is not private (PoP 3-30)
133 * P Page-Protection Bit: Store access not possible for page
134 * TT Type 00
135 *
136 * A 64 bit region table entry of S390 has following format:
137 * | S-table origin | TF TTTL
138 * 0000000000111111111122222222223333333333444444444455555555556666
139 * 0123456789012345678901234567890123456789012345678901234567890123
140 *
141 * I Segment-Invalid Bit: Segment is not available for address-translation
142 * TT Type 01
143 * TF
144 * TL Table length
145 *
146 * The 64 bit regiontable origin of S390 has following format:
147 * | region table origon | DTTL
148 * 0000000000111111111122222222223333333333444444444455555555556666
149 * 0123456789012345678901234567890123456789012345678901234567890123
150 *
151 * X Space-Switch event:
152 * G Segment-Invalid Bit:
153 * P Private-Space Bit:
154 * S Storage-Alteration:
155 * R Real space
156 * TL Table-Length:
157 *
158 * A storage key has the following format:
159 * | ACC |F|R|C|0|
160 * 0 3 4 5 6 7
161 * ACC: access key
162 * F : fetch protection bit
163 * R : referenced bit
164 * C : changed bit
165 */
166
167 /* Hardware bits in the page table entry */
168 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
169 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
170 #define _PAGE_INVALID 0x400 /* HW invalid bit */
171 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
172
173 /* Software bits in the page table entry */
174 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
175 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
176 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
177 #define _PAGE_READ 0x010 /* SW pte read bit */
178 #define _PAGE_WRITE 0x020 /* SW pte write bit */
179 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
180 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
181
182 #ifdef CONFIG_MEM_SOFT_DIRTY
183 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
184 #else
185 #define _PAGE_SOFT_DIRTY 0x000
186 #endif
187
188 #define _PAGE_SW_BITS 0xffUL /* All SW bits */
189
190 #define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */
191
192 /* Set of bits not changed in pte_modify */
193 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
194 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
195
196 /*
197 * Mask of bits that must not be changed with RDP. Allow only _PAGE_PROTECT
198 * HW bit and all SW bits.
199 */
200 #define _PAGE_RDP_MASK ~(_PAGE_PROTECT | _PAGE_SW_BITS)
201
202 /*
203 * handle_pte_fault uses pte_present and pte_none to find out the pte type
204 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
205 * distinguish present from not-present ptes. It is changed only with the page
206 * table lock held.
207 *
208 * The following table gives the different possible bit combinations for
209 * the pte hardware and software bits in the last 12 bits of a pte
210 * (. unassigned bit, x don't care, t swap type):
211 *
212 * 842100000000
213 * 000084210000
214 * 000000008421
215 * .IR.uswrdy.p
216 * empty .10.00000000
217 * swap .11..ttttt.0
218 * prot-none, clean, old .11.xx0000.1
219 * prot-none, clean, young .11.xx0001.1
220 * prot-none, dirty, old .11.xx0010.1
221 * prot-none, dirty, young .11.xx0011.1
222 * read-only, clean, old .11.xx0100.1
223 * read-only, clean, young .01.xx0101.1
224 * read-only, dirty, old .11.xx0110.1
225 * read-only, dirty, young .01.xx0111.1
226 * read-write, clean, old .11.xx1100.1
227 * read-write, clean, young .01.xx1101.1
228 * read-write, dirty, old .10.xx1110.1
229 * read-write, dirty, young .00.xx1111.1
230 * HW-bits: R read-only, I invalid
231 * SW-bits: p present, y young, d dirty, r read, w write, s special,
232 * u unused, l large
233 *
234 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
235 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
236 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
237 */
238
239 /* Bits in the segment/region table address-space-control-element */
240 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
241 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
242 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
243 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
244 #define _ASCE_REAL_SPACE 0x20 /* real space control */
245 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
246 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
247 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
248 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
249 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
250 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
251
252 /* Bits in the region table entry */
253 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
254 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
255 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
256 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
257 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
258 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
259 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
260 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
261 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
262 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
263
264 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
265 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
266 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
267 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
268 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
269 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
270
271 #define _REGION3_ENTRY_HARDWARE_BITS 0xfffffffffffff6ffUL
272 #define _REGION3_ENTRY_HARDWARE_BITS_LARGE 0xffffffff8001073cUL
273 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
274 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
275 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
276 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
277 #define _REGION3_ENTRY_WRITE 0x0002 /* SW region write bit */
278 #define _REGION3_ENTRY_READ 0x0001 /* SW region read bit */
279
280 #ifdef CONFIG_MEM_SOFT_DIRTY
281 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
282 #else
283 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
284 #endif
285
286 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
287
288 /* Bits in the segment table entry */
289 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe3fUL
290 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe3cUL
291 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff1073cUL
292 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
293 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
294 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
295 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
296 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
297 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
298
299 #define _SEGMENT_ENTRY (0)
300 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
301
302 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
303 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
304 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
305 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
306 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
307
308 #ifdef CONFIG_MEM_SOFT_DIRTY
309 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
310 #else
311 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
312 #endif
313
314 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
315 #define _PAGE_ENTRIES 256 /* number of page table entries */
316
317 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
318 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
319
320 #define _REGION1_SHIFT 53
321 #define _REGION2_SHIFT 42
322 #define _REGION3_SHIFT 31
323 #define _SEGMENT_SHIFT 20
324
325 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
326 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
327 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
328 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
329 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
330
331 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
332 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
333 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
334 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
335
336 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
337 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
338 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
339 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
340
341 #define PMD_SHIFT _SEGMENT_SHIFT
342 #define PUD_SHIFT _REGION3_SHIFT
343 #define P4D_SHIFT _REGION2_SHIFT
344 #define PGDIR_SHIFT _REGION1_SHIFT
345
346 #define PMD_SIZE _SEGMENT_SIZE
347 #define PUD_SIZE _REGION3_SIZE
348 #define P4D_SIZE _REGION2_SIZE
349 #define PGDIR_SIZE _REGION1_SIZE
350
351 #define PMD_MASK _SEGMENT_MASK
352 #define PUD_MASK _REGION3_MASK
353 #define P4D_MASK _REGION2_MASK
354 #define PGDIR_MASK _REGION1_MASK
355
356 #define PTRS_PER_PTE _PAGE_ENTRIES
357 #define PTRS_PER_PMD _CRST_ENTRIES
358 #define PTRS_PER_PUD _CRST_ENTRIES
359 #define PTRS_PER_P4D _CRST_ENTRIES
360 #define PTRS_PER_PGD _CRST_ENTRIES
361
362 /*
363 * Segment table and region3 table entry encoding
364 * (R = read-only, I = invalid, y = young bit):
365 * dy..R...I...wr
366 * prot-none, clean, old 00..1...1...00
367 * prot-none, clean, young 01..1...1...00
368 * prot-none, dirty, old 10..1...1...00
369 * prot-none, dirty, young 11..1...1...00
370 * read-only, clean, old 00..1...1...01
371 * read-only, clean, young 01..1...0...01
372 * read-only, dirty, old 10..1...1...01
373 * read-only, dirty, young 11..1...0...01
374 * read-write, clean, old 00..1...1...11
375 * read-write, clean, young 01..1...0...11
376 * read-write, dirty, old 10..0...1...11
377 * read-write, dirty, young 11..0...0...11
378 * The segment table origin is used to distinguish empty (origin==0) from
379 * read-write, old segment table entries (origin!=0)
380 * HW-bits: R read-only, I invalid
381 * SW-bits: y young, d dirty, r read, w write
382 */
383
384 /* Page status table bits for virtualization */
385 #define PGSTE_ACC_BITS 0xf000000000000000UL
386 #define PGSTE_FP_BIT 0x0800000000000000UL
387 #define PGSTE_PCL_BIT 0x0080000000000000UL
388 #define PGSTE_HR_BIT 0x0040000000000000UL
389 #define PGSTE_HC_BIT 0x0020000000000000UL
390 #define PGSTE_GR_BIT 0x0004000000000000UL
391 #define PGSTE_GC_BIT 0x0002000000000000UL
392 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
393 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
394 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
395
396 /* Guest Page State used for virtualization */
397 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
398 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
399 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
400 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
401 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
402 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
403 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
404
405 /*
406 * A user page table pointer has the space-switch-event bit, the
407 * private-space-control bit and the storage-alteration-event-control
408 * bit set. A kernel page table pointer doesn't need them.
409 */
410 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
411 _ASCE_ALT_EVENT)
412
413 /*
414 * Page protection definitions.
415 */
416 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
417 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
418 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
419 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
420 _PAGE_INVALID | _PAGE_PROTECT)
421 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
422 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
423 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
424 _PAGE_INVALID | _PAGE_PROTECT)
425
426 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
427 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
428 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
429 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
430 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
431 _PAGE_PROTECT | _PAGE_NOEXEC)
432 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
433 _PAGE_YOUNG | _PAGE_DIRTY)
434
435 /*
436 * On s390 the page table entry has an invalid bit and a read-only bit.
437 * Read permission implies execute permission and write permission
438 * implies read permission.
439 */
440 /*xwr*/
441
442 /*
443 * Segment entry (large page) protection definitions.
444 */
445 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
446 _SEGMENT_ENTRY_PROTECT)
447 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
448 _SEGMENT_ENTRY_READ | \
449 _SEGMENT_ENTRY_NOEXEC)
450 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
451 _SEGMENT_ENTRY_READ)
452 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
453 _SEGMENT_ENTRY_WRITE | \
454 _SEGMENT_ENTRY_NOEXEC)
455 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
456 _SEGMENT_ENTRY_WRITE)
457 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
458 _SEGMENT_ENTRY_LARGE | \
459 _SEGMENT_ENTRY_READ | \
460 _SEGMENT_ENTRY_WRITE | \
461 _SEGMENT_ENTRY_YOUNG | \
462 _SEGMENT_ENTRY_DIRTY | \
463 _SEGMENT_ENTRY_NOEXEC)
464 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
465 _SEGMENT_ENTRY_LARGE | \
466 _SEGMENT_ENTRY_READ | \
467 _SEGMENT_ENTRY_YOUNG | \
468 _SEGMENT_ENTRY_PROTECT | \
469 _SEGMENT_ENTRY_NOEXEC)
470 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
471 _SEGMENT_ENTRY_LARGE | \
472 _SEGMENT_ENTRY_READ | \
473 _SEGMENT_ENTRY_WRITE | \
474 _SEGMENT_ENTRY_YOUNG | \
475 _SEGMENT_ENTRY_DIRTY)
476
477 /*
478 * Region3 entry (large page) protection definitions.
479 */
480
481 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
482 _REGION3_ENTRY_LARGE | \
483 _REGION3_ENTRY_READ | \
484 _REGION3_ENTRY_WRITE | \
485 _REGION3_ENTRY_YOUNG | \
486 _REGION3_ENTRY_DIRTY | \
487 _REGION_ENTRY_NOEXEC)
488 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
489 _REGION3_ENTRY_LARGE | \
490 _REGION3_ENTRY_READ | \
491 _REGION3_ENTRY_YOUNG | \
492 _REGION_ENTRY_PROTECT | \
493 _REGION_ENTRY_NOEXEC)
494 #define REGION3_KERNEL_EXEC __pgprot(_REGION_ENTRY_TYPE_R3 | \
495 _REGION3_ENTRY_LARGE | \
496 _REGION3_ENTRY_READ | \
497 _REGION3_ENTRY_WRITE | \
498 _REGION3_ENTRY_YOUNG | \
499 _REGION3_ENTRY_DIRTY)
500
mm_p4d_folded(struct mm_struct * mm)501 static inline bool mm_p4d_folded(struct mm_struct *mm)
502 {
503 return mm->context.asce_limit <= _REGION1_SIZE;
504 }
505 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
506
mm_pud_folded(struct mm_struct * mm)507 static inline bool mm_pud_folded(struct mm_struct *mm)
508 {
509 return mm->context.asce_limit <= _REGION2_SIZE;
510 }
511 #define mm_pud_folded(mm) mm_pud_folded(mm)
512
mm_pmd_folded(struct mm_struct * mm)513 static inline bool mm_pmd_folded(struct mm_struct *mm)
514 {
515 return mm->context.asce_limit <= _REGION3_SIZE;
516 }
517 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
518
mm_has_pgste(struct mm_struct * mm)519 static inline int mm_has_pgste(struct mm_struct *mm)
520 {
521 #ifdef CONFIG_PGSTE
522 if (unlikely(mm->context.has_pgste))
523 return 1;
524 #endif
525 return 0;
526 }
527
mm_is_protected(struct mm_struct * mm)528 static inline int mm_is_protected(struct mm_struct *mm)
529 {
530 #ifdef CONFIG_PGSTE
531 if (unlikely(atomic_read(&mm->context.protected_count)))
532 return 1;
533 #endif
534 return 0;
535 }
536
mm_alloc_pgste(struct mm_struct * mm)537 static inline int mm_alloc_pgste(struct mm_struct *mm)
538 {
539 #ifdef CONFIG_PGSTE
540 if (unlikely(mm->context.alloc_pgste))
541 return 1;
542 #endif
543 return 0;
544 }
545
clear_pte_bit(pte_t pte,pgprot_t prot)546 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
547 {
548 return __pte(pte_val(pte) & ~pgprot_val(prot));
549 }
550
set_pte_bit(pte_t pte,pgprot_t prot)551 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
552 {
553 return __pte(pte_val(pte) | pgprot_val(prot));
554 }
555
clear_pmd_bit(pmd_t pmd,pgprot_t prot)556 static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
557 {
558 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
559 }
560
set_pmd_bit(pmd_t pmd,pgprot_t prot)561 static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
562 {
563 return __pmd(pmd_val(pmd) | pgprot_val(prot));
564 }
565
clear_pud_bit(pud_t pud,pgprot_t prot)566 static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
567 {
568 return __pud(pud_val(pud) & ~pgprot_val(prot));
569 }
570
set_pud_bit(pud_t pud,pgprot_t prot)571 static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
572 {
573 return __pud(pud_val(pud) | pgprot_val(prot));
574 }
575
576 /*
577 * As soon as the guest uses storage keys or enables PV, we deduplicate all
578 * mapped shared zeropages and prevent new shared zeropages from getting
579 * mapped.
580 */
581 #define mm_forbids_zeropage mm_forbids_zeropage
mm_forbids_zeropage(struct mm_struct * mm)582 static inline int mm_forbids_zeropage(struct mm_struct *mm)
583 {
584 #ifdef CONFIG_PGSTE
585 if (!mm->context.allow_cow_sharing)
586 return 1;
587 #endif
588 return 0;
589 }
590
mm_uses_skeys(struct mm_struct * mm)591 static inline int mm_uses_skeys(struct mm_struct *mm)
592 {
593 #ifdef CONFIG_PGSTE
594 if (mm->context.uses_skeys)
595 return 1;
596 #endif
597 return 0;
598 }
599
csp(unsigned int * ptr,unsigned int old,unsigned int new)600 static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
601 {
602 union register_pair r1 = { .even = old, .odd = new, };
603 unsigned long address = (unsigned long)ptr | 1;
604
605 asm volatile(
606 " csp %[r1],%[address]"
607 : [r1] "+&d" (r1.pair), "+m" (*ptr)
608 : [address] "d" (address)
609 : "cc");
610 }
611
cspg(unsigned long * ptr,unsigned long old,unsigned long new)612 static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
613 {
614 union register_pair r1 = { .even = old, .odd = new, };
615 unsigned long address = (unsigned long)ptr | 1;
616
617 asm volatile(
618 " cspg %[r1],%[address]"
619 : [r1] "+&d" (r1.pair), "+m" (*ptr)
620 : [address] "d" (address)
621 : "cc");
622 }
623
624 #define CRDTE_DTT_PAGE 0x00UL
625 #define CRDTE_DTT_SEGMENT 0x10UL
626 #define CRDTE_DTT_REGION3 0x14UL
627 #define CRDTE_DTT_REGION2 0x18UL
628 #define CRDTE_DTT_REGION1 0x1cUL
629
crdte(unsigned long old,unsigned long new,unsigned long * table,unsigned long dtt,unsigned long address,unsigned long asce)630 static inline void crdte(unsigned long old, unsigned long new,
631 unsigned long *table, unsigned long dtt,
632 unsigned long address, unsigned long asce)
633 {
634 union register_pair r1 = { .even = old, .odd = new, };
635 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
636
637 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
638 : [r1] "+&d" (r1.pair)
639 : [r2] "d" (r2.pair), [asce] "a" (asce)
640 : "memory", "cc");
641 }
642
643 /*
644 * pgd/p4d/pud/pmd/pte query functions
645 */
pgd_folded(pgd_t pgd)646 static inline int pgd_folded(pgd_t pgd)
647 {
648 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
649 }
650
pgd_present(pgd_t pgd)651 static inline int pgd_present(pgd_t pgd)
652 {
653 if (pgd_folded(pgd))
654 return 1;
655 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
656 }
657
pgd_none(pgd_t pgd)658 static inline int pgd_none(pgd_t pgd)
659 {
660 if (pgd_folded(pgd))
661 return 0;
662 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
663 }
664
pgd_bad(pgd_t pgd)665 static inline int pgd_bad(pgd_t pgd)
666 {
667 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
668 return 0;
669 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
670 }
671
pgd_pfn(pgd_t pgd)672 static inline unsigned long pgd_pfn(pgd_t pgd)
673 {
674 unsigned long origin_mask;
675
676 origin_mask = _REGION_ENTRY_ORIGIN;
677 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
678 }
679
p4d_folded(p4d_t p4d)680 static inline int p4d_folded(p4d_t p4d)
681 {
682 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
683 }
684
p4d_present(p4d_t p4d)685 static inline int p4d_present(p4d_t p4d)
686 {
687 if (p4d_folded(p4d))
688 return 1;
689 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
690 }
691
p4d_none(p4d_t p4d)692 static inline int p4d_none(p4d_t p4d)
693 {
694 if (p4d_folded(p4d))
695 return 0;
696 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
697 }
698
p4d_pfn(p4d_t p4d)699 static inline unsigned long p4d_pfn(p4d_t p4d)
700 {
701 unsigned long origin_mask;
702
703 origin_mask = _REGION_ENTRY_ORIGIN;
704 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
705 }
706
pud_folded(pud_t pud)707 static inline int pud_folded(pud_t pud)
708 {
709 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
710 }
711
pud_present(pud_t pud)712 static inline int pud_present(pud_t pud)
713 {
714 if (pud_folded(pud))
715 return 1;
716 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
717 }
718
pud_none(pud_t pud)719 static inline int pud_none(pud_t pud)
720 {
721 if (pud_folded(pud))
722 return 0;
723 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
724 }
725
726 #define pud_leaf pud_leaf
pud_leaf(pud_t pud)727 static inline bool pud_leaf(pud_t pud)
728 {
729 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
730 return 0;
731 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
732 }
733
734 #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)735 static inline bool pmd_leaf(pmd_t pmd)
736 {
737 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
738 }
739
pmd_bad(pmd_t pmd)740 static inline int pmd_bad(pmd_t pmd)
741 {
742 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
743 return 1;
744 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
745 }
746
pud_bad(pud_t pud)747 static inline int pud_bad(pud_t pud)
748 {
749 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
750
751 if (type > _REGION_ENTRY_TYPE_R3 || pud_leaf(pud))
752 return 1;
753 if (type < _REGION_ENTRY_TYPE_R3)
754 return 0;
755 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
756 }
757
p4d_bad(p4d_t p4d)758 static inline int p4d_bad(p4d_t p4d)
759 {
760 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
761
762 if (type > _REGION_ENTRY_TYPE_R2)
763 return 1;
764 if (type < _REGION_ENTRY_TYPE_R2)
765 return 0;
766 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
767 }
768
pmd_present(pmd_t pmd)769 static inline int pmd_present(pmd_t pmd)
770 {
771 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
772 }
773
pmd_none(pmd_t pmd)774 static inline int pmd_none(pmd_t pmd)
775 {
776 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
777 }
778
779 #define pmd_write pmd_write
pmd_write(pmd_t pmd)780 static inline int pmd_write(pmd_t pmd)
781 {
782 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
783 }
784
785 #define pud_write pud_write
pud_write(pud_t pud)786 static inline int pud_write(pud_t pud)
787 {
788 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
789 }
790
791 #define pmd_dirty pmd_dirty
pmd_dirty(pmd_t pmd)792 static inline int pmd_dirty(pmd_t pmd)
793 {
794 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
795 }
796
797 #define pmd_young pmd_young
pmd_young(pmd_t pmd)798 static inline int pmd_young(pmd_t pmd)
799 {
800 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
801 }
802
pte_present(pte_t pte)803 static inline int pte_present(pte_t pte)
804 {
805 /* Bit pattern: (pte & 0x001) == 0x001 */
806 return (pte_val(pte) & _PAGE_PRESENT) != 0;
807 }
808
pte_none(pte_t pte)809 static inline int pte_none(pte_t pte)
810 {
811 /* Bit pattern: pte == 0x400 */
812 return pte_val(pte) == _PAGE_INVALID;
813 }
814
pte_swap(pte_t pte)815 static inline int pte_swap(pte_t pte)
816 {
817 /* Bit pattern: (pte & 0x201) == 0x200 */
818 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
819 == _PAGE_PROTECT;
820 }
821
pte_special(pte_t pte)822 static inline int pte_special(pte_t pte)
823 {
824 return (pte_val(pte) & _PAGE_SPECIAL);
825 }
826
827 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)828 static inline int pte_same(pte_t a, pte_t b)
829 {
830 return pte_val(a) == pte_val(b);
831 }
832
833 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)834 static inline int pte_protnone(pte_t pte)
835 {
836 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
837 }
838
pmd_protnone(pmd_t pmd)839 static inline int pmd_protnone(pmd_t pmd)
840 {
841 /* pmd_leaf(pmd) implies pmd_present(pmd) */
842 return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
843 }
844 #endif
845
pte_swp_exclusive(pte_t pte)846 static inline int pte_swp_exclusive(pte_t pte)
847 {
848 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
849 }
850
pte_swp_mkexclusive(pte_t pte)851 static inline pte_t pte_swp_mkexclusive(pte_t pte)
852 {
853 return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
854 }
855
pte_swp_clear_exclusive(pte_t pte)856 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
857 {
858 return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
859 }
860
pte_soft_dirty(pte_t pte)861 static inline int pte_soft_dirty(pte_t pte)
862 {
863 return pte_val(pte) & _PAGE_SOFT_DIRTY;
864 }
865 #define pte_swp_soft_dirty pte_soft_dirty
866
pte_mksoft_dirty(pte_t pte)867 static inline pte_t pte_mksoft_dirty(pte_t pte)
868 {
869 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
870 }
871 #define pte_swp_mksoft_dirty pte_mksoft_dirty
872
pte_clear_soft_dirty(pte_t pte)873 static inline pte_t pte_clear_soft_dirty(pte_t pte)
874 {
875 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
876 }
877 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
878
pmd_soft_dirty(pmd_t pmd)879 static inline int pmd_soft_dirty(pmd_t pmd)
880 {
881 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
882 }
883
pmd_mksoft_dirty(pmd_t pmd)884 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
885 {
886 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
887 }
888
pmd_clear_soft_dirty(pmd_t pmd)889 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
890 {
891 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
892 }
893
894 /*
895 * query functions pte_write/pte_dirty/pte_young only work if
896 * pte_present() is true. Undefined behaviour if not..
897 */
pte_write(pte_t pte)898 static inline int pte_write(pte_t pte)
899 {
900 return (pte_val(pte) & _PAGE_WRITE) != 0;
901 }
902
pte_dirty(pte_t pte)903 static inline int pte_dirty(pte_t pte)
904 {
905 return (pte_val(pte) & _PAGE_DIRTY) != 0;
906 }
907
pte_young(pte_t pte)908 static inline int pte_young(pte_t pte)
909 {
910 return (pte_val(pte) & _PAGE_YOUNG) != 0;
911 }
912
913 #define __HAVE_ARCH_PTE_UNUSED
pte_unused(pte_t pte)914 static inline int pte_unused(pte_t pte)
915 {
916 return pte_val(pte) & _PAGE_UNUSED;
917 }
918
919 /*
920 * Extract the pgprot value from the given pte while at the same time making it
921 * usable for kernel address space mappings where fault driven dirty and
922 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
923 * must not be set.
924 */
pte_pgprot(pte_t pte)925 static inline pgprot_t pte_pgprot(pte_t pte)
926 {
927 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
928
929 if (pte_write(pte))
930 pte_flags |= pgprot_val(PAGE_KERNEL);
931 else
932 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
933 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
934
935 return __pgprot(pte_flags);
936 }
937
938 /*
939 * pgd/pmd/pte modification functions
940 */
941
set_pgd(pgd_t * pgdp,pgd_t pgd)942 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
943 {
944 WRITE_ONCE(*pgdp, pgd);
945 }
946
set_p4d(p4d_t * p4dp,p4d_t p4d)947 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
948 {
949 WRITE_ONCE(*p4dp, p4d);
950 }
951
set_pud(pud_t * pudp,pud_t pud)952 static inline void set_pud(pud_t *pudp, pud_t pud)
953 {
954 WRITE_ONCE(*pudp, pud);
955 }
956
set_pmd(pmd_t * pmdp,pmd_t pmd)957 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
958 {
959 WRITE_ONCE(*pmdp, pmd);
960 }
961
set_pte(pte_t * ptep,pte_t pte)962 static inline void set_pte(pte_t *ptep, pte_t pte)
963 {
964 WRITE_ONCE(*ptep, pte);
965 }
966
pgd_clear(pgd_t * pgd)967 static inline void pgd_clear(pgd_t *pgd)
968 {
969 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
970 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
971 }
972
p4d_clear(p4d_t * p4d)973 static inline void p4d_clear(p4d_t *p4d)
974 {
975 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
976 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
977 }
978
pud_clear(pud_t * pud)979 static inline void pud_clear(pud_t *pud)
980 {
981 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
982 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
983 }
984
pmd_clear(pmd_t * pmdp)985 static inline void pmd_clear(pmd_t *pmdp)
986 {
987 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
988 }
989
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)990 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
991 {
992 set_pte(ptep, __pte(_PAGE_INVALID));
993 }
994
995 /*
996 * The following pte modification functions only work if
997 * pte_present() is true. Undefined behaviour if not..
998 */
pte_modify(pte_t pte,pgprot_t newprot)999 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
1000 {
1001 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
1002 pte = set_pte_bit(pte, newprot);
1003 /*
1004 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
1005 * has the invalid bit set, clear it again for readable, young pages
1006 */
1007 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
1008 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1009 /*
1010 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
1011 * protection bit set, clear it again for writable, dirty pages
1012 */
1013 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
1014 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1015 return pte;
1016 }
1017
pte_wrprotect(pte_t pte)1018 static inline pte_t pte_wrprotect(pte_t pte)
1019 {
1020 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
1021 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1022 }
1023
pte_mkwrite_novma(pte_t pte)1024 static inline pte_t pte_mkwrite_novma(pte_t pte)
1025 {
1026 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
1027 if (pte_val(pte) & _PAGE_DIRTY)
1028 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1029 return pte;
1030 }
1031
pte_mkclean(pte_t pte)1032 static inline pte_t pte_mkclean(pte_t pte)
1033 {
1034 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1035 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1036 }
1037
pte_mkdirty(pte_t pte)1038 static inline pte_t pte_mkdirty(pte_t pte)
1039 {
1040 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1041 if (pte_val(pte) & _PAGE_WRITE)
1042 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1043 return pte;
1044 }
1045
pte_mkold(pte_t pte)1046 static inline pte_t pte_mkold(pte_t pte)
1047 {
1048 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1049 return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1050 }
1051
pte_mkyoung(pte_t pte)1052 static inline pte_t pte_mkyoung(pte_t pte)
1053 {
1054 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1055 if (pte_val(pte) & _PAGE_READ)
1056 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1057 return pte;
1058 }
1059
pte_mkspecial(pte_t pte)1060 static inline pte_t pte_mkspecial(pte_t pte)
1061 {
1062 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1063 }
1064
1065 #ifdef CONFIG_HUGETLB_PAGE
pte_mkhuge(pte_t pte)1066 static inline pte_t pte_mkhuge(pte_t pte)
1067 {
1068 return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1069 }
1070 #endif
1071
1072 #define IPTE_GLOBAL 0
1073 #define IPTE_LOCAL 1
1074
1075 #define IPTE_NODAT 0x400
1076 #define IPTE_GUEST_ASCE 0x800
1077
__ptep_rdp(unsigned long addr,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1078 static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep,
1079 unsigned long opt, unsigned long asce,
1080 int local)
1081 {
1082 unsigned long pto;
1083
1084 pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
1085 asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]"
1086 : "+m" (*ptep)
1087 : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt),
1088 [asce] "a" (asce), [m4] "i" (local));
1089 }
1090
__ptep_ipte(unsigned long address,pte_t * ptep,unsigned long opt,unsigned long asce,int local)1091 static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1092 unsigned long opt, unsigned long asce,
1093 int local)
1094 {
1095 unsigned long pto = __pa(ptep);
1096
1097 if (__builtin_constant_p(opt) && opt == 0) {
1098 /* Invalidation + TLB flush for the pte */
1099 asm volatile(
1100 " ipte %[r1],%[r2],0,%[m4]"
1101 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1102 [m4] "i" (local));
1103 return;
1104 }
1105
1106 /* Invalidate ptes with options + TLB flush of the ptes */
1107 opt = opt | (asce & _ASCE_ORIGIN);
1108 asm volatile(
1109 " ipte %[r1],%[r2],%[r3],%[m4]"
1110 : [r2] "+a" (address), [r3] "+a" (opt)
1111 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1112 }
1113
__ptep_ipte_range(unsigned long address,int nr,pte_t * ptep,int local)1114 static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1115 pte_t *ptep, int local)
1116 {
1117 unsigned long pto = __pa(ptep);
1118
1119 /* Invalidate a range of ptes + TLB flush of the ptes */
1120 do {
1121 asm volatile(
1122 " ipte %[r1],%[r2],%[r3],%[m4]"
1123 : [r2] "+a" (address), [r3] "+a" (nr)
1124 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1125 } while (nr != 255);
1126 }
1127
1128 /*
1129 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1130 * both clear the TLB for the unmapped pte. The reason is that
1131 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1132 * to modify an active pte. The sequence is
1133 * 1) ptep_get_and_clear
1134 * 2) set_pte_at
1135 * 3) flush_tlb_range
1136 * On s390 the tlb needs to get flushed with the modification of the pte
1137 * if the pte is active. The only way how this can be implemented is to
1138 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1139 * is a nop.
1140 */
1141 pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1142 pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1143
1144 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1145 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1146 unsigned long addr, pte_t *ptep)
1147 {
1148 pte_t pte = *ptep;
1149
1150 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1151 return pte_young(pte);
1152 }
1153
1154 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1155 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1156 unsigned long address, pte_t *ptep)
1157 {
1158 return ptep_test_and_clear_young(vma, address, ptep);
1159 }
1160
1161 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1162 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1163 unsigned long addr, pte_t *ptep)
1164 {
1165 pte_t res;
1166
1167 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1168 /* At this point the reference through the mapping is still present */
1169 if (mm_is_protected(mm) && pte_present(res))
1170 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1171 return res;
1172 }
1173
1174 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1175 pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1176 void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1177 pte_t *, pte_t, pte_t);
1178
1179 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1180 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1181 unsigned long addr, pte_t *ptep)
1182 {
1183 pte_t res;
1184
1185 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1186 /* At this point the reference through the mapping is still present */
1187 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1188 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1189 return res;
1190 }
1191
1192 /*
1193 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1194 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1195 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1196 * cannot be accessed while the batched unmap is running. In this case
1197 * full==1 and a simple pte_clear is enough. See tlb.h.
1198 */
1199 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1200 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1201 unsigned long addr,
1202 pte_t *ptep, int full)
1203 {
1204 pte_t res;
1205
1206 if (full) {
1207 res = *ptep;
1208 set_pte(ptep, __pte(_PAGE_INVALID));
1209 } else {
1210 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1211 }
1212 /* Nothing to do */
1213 if (!mm_is_protected(mm) || !pte_present(res))
1214 return res;
1215 /*
1216 * At this point the reference through the mapping is still present.
1217 * The notifier should have destroyed all protected vCPUs at this
1218 * point, so the destroy should be successful.
1219 */
1220 if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK))
1221 return res;
1222 /*
1223 * If something went wrong and the page could not be destroyed, or
1224 * if this is not a mm teardown, the slower export is used as
1225 * fallback instead.
1226 */
1227 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1228 return res;
1229 }
1230
1231 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1232 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1233 unsigned long addr, pte_t *ptep)
1234 {
1235 pte_t pte = *ptep;
1236
1237 if (pte_write(pte))
1238 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1239 }
1240
1241 /*
1242 * Check if PTEs only differ in _PAGE_PROTECT HW bit, but also allow SW PTE
1243 * bits in the comparison. Those might change e.g. because of dirty and young
1244 * tracking.
1245 */
pte_allow_rdp(pte_t old,pte_t new)1246 static inline int pte_allow_rdp(pte_t old, pte_t new)
1247 {
1248 /*
1249 * Only allow changes from RO to RW
1250 */
1251 if (!(pte_val(old) & _PAGE_PROTECT) || pte_val(new) & _PAGE_PROTECT)
1252 return 0;
1253
1254 return (pte_val(old) & _PAGE_RDP_MASK) == (pte_val(new) & _PAGE_RDP_MASK);
1255 }
1256
flush_tlb_fix_spurious_fault(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)1257 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
1258 unsigned long address,
1259 pte_t *ptep)
1260 {
1261 /*
1262 * RDP might not have propagated the PTE protection reset to all CPUs,
1263 * so there could be spurious TLB protection faults.
1264 * NOTE: This will also be called when a racing pagetable update on
1265 * another thread already installed the correct PTE. Both cases cannot
1266 * really be distinguished.
1267 * Therefore, only do the local TLB flush when RDP can be used, and the
1268 * PTE does not have _PAGE_PROTECT set, to avoid unnecessary overhead.
1269 * A local RDP can be used to do the flush.
1270 */
1271 if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT))
1272 __ptep_rdp(address, ptep, 0, 0, 1);
1273 }
1274 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
1275
1276 void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
1277 pte_t new);
1278
1279 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t entry,int dirty)1280 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1281 unsigned long addr, pte_t *ptep,
1282 pte_t entry, int dirty)
1283 {
1284 if (pte_same(*ptep, entry))
1285 return 0;
1286 if (MACHINE_HAS_RDP && !mm_has_pgste(vma->vm_mm) && pte_allow_rdp(*ptep, entry))
1287 ptep_reset_dat_prot(vma->vm_mm, addr, ptep, entry);
1288 else
1289 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1290 return 1;
1291 }
1292
1293 /*
1294 * Additional functions to handle KVM guest page tables
1295 */
1296 void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1297 pte_t *ptep, pte_t entry);
1298 void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1299 void ptep_notify(struct mm_struct *mm, unsigned long addr,
1300 pte_t *ptep, unsigned long bits);
1301 int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1302 pte_t *ptep, int prot, unsigned long bit);
1303 void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1304 pte_t *ptep , int reset);
1305 void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1306 int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1307 pte_t *sptep, pte_t *tptep, pte_t pte);
1308 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1309
1310 bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1311 pte_t *ptep);
1312 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1313 unsigned char key, bool nq);
1314 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1315 unsigned char key, unsigned char *oldkey,
1316 bool nq, bool mr, bool mc);
1317 int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1318 int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1319 unsigned char *key);
1320
1321 int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1322 unsigned long bits, unsigned long value);
1323 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1324 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1325 unsigned long *oldpte, unsigned long *oldpgste);
1326 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1327 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1328 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1329 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1330
1331 #define pgprot_writecombine pgprot_writecombine
1332 pgprot_t pgprot_writecombine(pgprot_t prot);
1333
1334 #define pgprot_writethrough pgprot_writethrough
1335 pgprot_t pgprot_writethrough(pgprot_t prot);
1336
1337 #define PFN_PTE_SHIFT PAGE_SHIFT
1338
1339 /*
1340 * Set multiple PTEs to consecutive pages with a single call. All PTEs
1341 * are within the same folio, PMD and VMA.
1342 */
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry,unsigned int nr)1343 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
1344 pte_t *ptep, pte_t entry, unsigned int nr)
1345 {
1346 if (pte_present(entry))
1347 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1348 if (mm_has_pgste(mm)) {
1349 for (;;) {
1350 ptep_set_pte_at(mm, addr, ptep, entry);
1351 if (--nr == 0)
1352 break;
1353 ptep++;
1354 entry = __pte(pte_val(entry) + PAGE_SIZE);
1355 addr += PAGE_SIZE;
1356 }
1357 } else {
1358 for (;;) {
1359 set_pte(ptep, entry);
1360 if (--nr == 0)
1361 break;
1362 ptep++;
1363 entry = __pte(pte_val(entry) + PAGE_SIZE);
1364 }
1365 }
1366 }
1367 #define set_ptes set_ptes
1368
1369 /*
1370 * Conversion functions: convert a page and protection to a page entry,
1371 * and a page entry and page directory to the page they refer to.
1372 */
mk_pte_phys(unsigned long physpage,pgprot_t pgprot)1373 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1374 {
1375 pte_t __pte;
1376
1377 __pte = __pte(physpage | pgprot_val(pgprot));
1378 if (!MACHINE_HAS_NX)
1379 __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
1380 return pte_mkyoung(__pte);
1381 }
1382
mk_pte(struct page * page,pgprot_t pgprot)1383 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1384 {
1385 unsigned long physpage = page_to_phys(page);
1386 pte_t __pte = mk_pte_phys(physpage, pgprot);
1387
1388 if (pte_write(__pte) && PageDirty(page))
1389 __pte = pte_mkdirty(__pte);
1390 return __pte;
1391 }
1392
1393 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1394 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1395 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1396 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1397
1398 #define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1399 #define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1400
pmd_deref(pmd_t pmd)1401 static inline unsigned long pmd_deref(pmd_t pmd)
1402 {
1403 unsigned long origin_mask;
1404
1405 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1406 if (pmd_leaf(pmd))
1407 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1408 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1409 }
1410
pmd_pfn(pmd_t pmd)1411 static inline unsigned long pmd_pfn(pmd_t pmd)
1412 {
1413 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1414 }
1415
pud_deref(pud_t pud)1416 static inline unsigned long pud_deref(pud_t pud)
1417 {
1418 unsigned long origin_mask;
1419
1420 origin_mask = _REGION_ENTRY_ORIGIN;
1421 if (pud_leaf(pud))
1422 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1423 return (unsigned long)__va(pud_val(pud) & origin_mask);
1424 }
1425
1426 #define pud_pfn pud_pfn
pud_pfn(pud_t pud)1427 static inline unsigned long pud_pfn(pud_t pud)
1428 {
1429 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1430 }
1431
1432 /*
1433 * The pgd_offset function *always* adds the index for the top-level
1434 * region/segment table. This is done to get a sequence like the
1435 * following to work:
1436 * pgdp = pgd_offset(current->mm, addr);
1437 * pgd = READ_ONCE(*pgdp);
1438 * p4dp = p4d_offset(&pgd, addr);
1439 * ...
1440 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1441 * only add an index if they dereferenced the pointer.
1442 */
pgd_offset_raw(pgd_t * pgd,unsigned long address)1443 static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1444 {
1445 unsigned long rste;
1446 unsigned int shift;
1447
1448 /* Get the first entry of the top level table */
1449 rste = pgd_val(*pgd);
1450 /* Pick up the shift from the table type of the first entry */
1451 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1452 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1453 }
1454
1455 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1456
p4d_offset_lockless(pgd_t * pgdp,pgd_t pgd,unsigned long address)1457 static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1458 {
1459 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1460 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1461 return (p4d_t *) pgdp;
1462 }
1463 #define p4d_offset_lockless p4d_offset_lockless
1464
p4d_offset(pgd_t * pgdp,unsigned long address)1465 static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1466 {
1467 return p4d_offset_lockless(pgdp, *pgdp, address);
1468 }
1469
pud_offset_lockless(p4d_t * p4dp,p4d_t p4d,unsigned long address)1470 static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1471 {
1472 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1473 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1474 return (pud_t *) p4dp;
1475 }
1476 #define pud_offset_lockless pud_offset_lockless
1477
pud_offset(p4d_t * p4dp,unsigned long address)1478 static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1479 {
1480 return pud_offset_lockless(p4dp, *p4dp, address);
1481 }
1482 #define pud_offset pud_offset
1483
pmd_offset_lockless(pud_t * pudp,pud_t pud,unsigned long address)1484 static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1485 {
1486 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1487 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1488 return (pmd_t *) pudp;
1489 }
1490 #define pmd_offset_lockless pmd_offset_lockless
1491
pmd_offset(pud_t * pudp,unsigned long address)1492 static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1493 {
1494 return pmd_offset_lockless(pudp, *pudp, address);
1495 }
1496 #define pmd_offset pmd_offset
1497
pmd_page_vaddr(pmd_t pmd)1498 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1499 {
1500 return (unsigned long) pmd_deref(pmd);
1501 }
1502
gup_fast_permitted(unsigned long start,unsigned long end)1503 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1504 {
1505 return end <= current->mm->context.asce_limit;
1506 }
1507 #define gup_fast_permitted gup_fast_permitted
1508
1509 #define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1510 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1511 #define pte_page(x) pfn_to_page(pte_pfn(x))
1512
1513 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1514 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1515 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1516 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1517
pmd_wrprotect(pmd_t pmd)1518 static inline pmd_t pmd_wrprotect(pmd_t pmd)
1519 {
1520 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1521 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1522 }
1523
pmd_mkwrite_novma(pmd_t pmd)1524 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
1525 {
1526 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1527 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1528 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1529 return pmd;
1530 }
1531
pmd_mkclean(pmd_t pmd)1532 static inline pmd_t pmd_mkclean(pmd_t pmd)
1533 {
1534 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1535 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1536 }
1537
pmd_mkdirty(pmd_t pmd)1538 static inline pmd_t pmd_mkdirty(pmd_t pmd)
1539 {
1540 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1541 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1542 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1543 return pmd;
1544 }
1545
pud_wrprotect(pud_t pud)1546 static inline pud_t pud_wrprotect(pud_t pud)
1547 {
1548 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1549 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1550 }
1551
pud_mkwrite(pud_t pud)1552 static inline pud_t pud_mkwrite(pud_t pud)
1553 {
1554 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1555 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1556 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1557 return pud;
1558 }
1559
pud_mkclean(pud_t pud)1560 static inline pud_t pud_mkclean(pud_t pud)
1561 {
1562 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1563 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1564 }
1565
pud_mkdirty(pud_t pud)1566 static inline pud_t pud_mkdirty(pud_t pud)
1567 {
1568 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1569 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1570 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1571 return pud;
1572 }
1573
1574 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
massage_pgprot_pmd(pgprot_t pgprot)1575 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1576 {
1577 /*
1578 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1579 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1580 */
1581 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1582 return pgprot_val(SEGMENT_NONE);
1583 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1584 return pgprot_val(SEGMENT_RO);
1585 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1586 return pgprot_val(SEGMENT_RX);
1587 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1588 return pgprot_val(SEGMENT_RW);
1589 return pgprot_val(SEGMENT_RWX);
1590 }
1591
pmd_mkyoung(pmd_t pmd)1592 static inline pmd_t pmd_mkyoung(pmd_t pmd)
1593 {
1594 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1595 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1596 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1597 return pmd;
1598 }
1599
pmd_mkold(pmd_t pmd)1600 static inline pmd_t pmd_mkold(pmd_t pmd)
1601 {
1602 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1603 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1604 }
1605
pmd_modify(pmd_t pmd,pgprot_t newprot)1606 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1607 {
1608 unsigned long mask;
1609
1610 mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1611 mask |= _SEGMENT_ENTRY_DIRTY;
1612 mask |= _SEGMENT_ENTRY_YOUNG;
1613 mask |= _SEGMENT_ENTRY_LARGE;
1614 mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1615 pmd = __pmd(pmd_val(pmd) & mask);
1616 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1617 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1618 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1619 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1620 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1621 return pmd;
1622 }
1623
mk_pmd_phys(unsigned long physpage,pgprot_t pgprot)1624 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1625 {
1626 return __pmd(physpage + massage_pgprot_pmd(pgprot));
1627 }
1628
1629 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1630
__pmdp_csp(pmd_t * pmdp)1631 static inline void __pmdp_csp(pmd_t *pmdp)
1632 {
1633 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1634 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1635 }
1636
1637 #define IDTE_GLOBAL 0
1638 #define IDTE_LOCAL 1
1639
1640 #define IDTE_PTOA 0x0800
1641 #define IDTE_NODAT 0x1000
1642 #define IDTE_GUEST_ASCE 0x2000
1643
__pmdp_idte(unsigned long addr,pmd_t * pmdp,unsigned long opt,unsigned long asce,int local)1644 static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1645 unsigned long opt, unsigned long asce,
1646 int local)
1647 {
1648 unsigned long sto;
1649
1650 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1651 if (__builtin_constant_p(opt) && opt == 0) {
1652 /* flush without guest asce */
1653 asm volatile(
1654 " idte %[r1],0,%[r2],%[m4]"
1655 : "+m" (*pmdp)
1656 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1657 [m4] "i" (local)
1658 : "cc" );
1659 } else {
1660 /* flush with guest asce */
1661 asm volatile(
1662 " idte %[r1],%[r3],%[r2],%[m4]"
1663 : "+m" (*pmdp)
1664 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1665 [r3] "a" (asce), [m4] "i" (local)
1666 : "cc" );
1667 }
1668 }
1669
__pudp_idte(unsigned long addr,pud_t * pudp,unsigned long opt,unsigned long asce,int local)1670 static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1671 unsigned long opt, unsigned long asce,
1672 int local)
1673 {
1674 unsigned long r3o;
1675
1676 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1677 r3o |= _ASCE_TYPE_REGION3;
1678 if (__builtin_constant_p(opt) && opt == 0) {
1679 /* flush without guest asce */
1680 asm volatile(
1681 " idte %[r1],0,%[r2],%[m4]"
1682 : "+m" (*pudp)
1683 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1684 [m4] "i" (local)
1685 : "cc");
1686 } else {
1687 /* flush with guest asce */
1688 asm volatile(
1689 " idte %[r1],%[r3],%[r2],%[m4]"
1690 : "+m" (*pudp)
1691 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1692 [r3] "a" (asce), [m4] "i" (local)
1693 : "cc" );
1694 }
1695 }
1696
1697 pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1698 pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1699 pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1700
1701 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1702
1703 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1704 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1705 pgtable_t pgtable);
1706
1707 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1708 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1709
1710 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,pmd_t entry,int dirty)1711 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1712 unsigned long addr, pmd_t *pmdp,
1713 pmd_t entry, int dirty)
1714 {
1715 VM_BUG_ON(addr & ~HPAGE_MASK);
1716
1717 entry = pmd_mkyoung(entry);
1718 if (dirty)
1719 entry = pmd_mkdirty(entry);
1720 if (pmd_val(*pmdp) == pmd_val(entry))
1721 return 0;
1722 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1723 return 1;
1724 }
1725
1726 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1727 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1728 unsigned long addr, pmd_t *pmdp)
1729 {
1730 pmd_t pmd = *pmdp;
1731
1732 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1733 return pmd_young(pmd);
1734 }
1735
1736 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1737 static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1738 unsigned long addr, pmd_t *pmdp)
1739 {
1740 VM_BUG_ON(addr & ~HPAGE_MASK);
1741 return pmdp_test_and_clear_young(vma, addr, pmdp);
1742 }
1743
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t entry)1744 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1745 pmd_t *pmdp, pmd_t entry)
1746 {
1747 if (!MACHINE_HAS_NX)
1748 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
1749 set_pmd(pmdp, entry);
1750 }
1751
pmd_mkhuge(pmd_t pmd)1752 static inline pmd_t pmd_mkhuge(pmd_t pmd)
1753 {
1754 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1755 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1756 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1757 }
1758
1759 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1760 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1761 unsigned long addr, pmd_t *pmdp)
1762 {
1763 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1764 }
1765
1766 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
pmdp_huge_get_and_clear_full(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,int full)1767 static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1768 unsigned long addr,
1769 pmd_t *pmdp, int full)
1770 {
1771 if (full) {
1772 pmd_t pmd = *pmdp;
1773 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1774 return pmd;
1775 }
1776 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1777 }
1778
1779 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmdp_huge_clear_flush(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1780 static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1781 unsigned long addr, pmd_t *pmdp)
1782 {
1783 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1784 }
1785
1786 #define __HAVE_ARCH_PMDP_INVALIDATE
pmdp_invalidate(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1787 static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1788 unsigned long addr, pmd_t *pmdp)
1789 {
1790 pmd_t pmd;
1791
1792 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
1793 pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1794 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1795 }
1796
1797 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1798 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1799 unsigned long addr, pmd_t *pmdp)
1800 {
1801 pmd_t pmd = *pmdp;
1802
1803 if (pmd_write(pmd))
1804 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1805 }
1806
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)1807 static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1808 unsigned long address,
1809 pmd_t *pmdp)
1810 {
1811 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1812 }
1813 #define pmdp_collapse_flush pmdp_collapse_flush
1814
1815 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1816 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1817
pmd_trans_huge(pmd_t pmd)1818 static inline int pmd_trans_huge(pmd_t pmd)
1819 {
1820 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1821 }
1822
1823 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)1824 static inline int has_transparent_hugepage(void)
1825 {
1826 return MACHINE_HAS_EDAT1 ? 1 : 0;
1827 }
1828 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1829
1830 /*
1831 * 64 bit swap entry format:
1832 * A page-table entry has some bits we have to treat in a special way.
1833 * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte
1834 * as invalid.
1835 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1836 * | offset |E11XX|type |S0|
1837 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1838 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1839 *
1840 * Bits 0-51 store the offset.
1841 * Bit 52 (E) is used to remember PG_anon_exclusive.
1842 * Bits 57-61 store the type.
1843 * Bit 62 (S) is used for softdirty tracking.
1844 * Bits 55 and 56 (X) are unused.
1845 */
1846
1847 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1848 #define __SWP_OFFSET_SHIFT 12
1849 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1850 #define __SWP_TYPE_SHIFT 2
1851
mk_swap_pte(unsigned long type,unsigned long offset)1852 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1853 {
1854 unsigned long pteval;
1855
1856 pteval = _PAGE_INVALID | _PAGE_PROTECT;
1857 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1858 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1859 return __pte(pteval);
1860 }
1861
__swp_type(swp_entry_t entry)1862 static inline unsigned long __swp_type(swp_entry_t entry)
1863 {
1864 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1865 }
1866
__swp_offset(swp_entry_t entry)1867 static inline unsigned long __swp_offset(swp_entry_t entry)
1868 {
1869 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1870 }
1871
__swp_entry(unsigned long type,unsigned long offset)1872 static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1873 {
1874 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1875 }
1876
1877 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1878 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1879
1880 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1881 extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1882 extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
1883 extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot);
1884 extern void vmem_unmap_4k_page(unsigned long addr);
1885 extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc);
1886 extern int s390_enable_sie(void);
1887 extern int s390_enable_skey(void);
1888 extern void s390_reset_cmma(struct mm_struct *mm);
1889
1890 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1891 #define HAVE_ARCH_UNMAPPED_AREA
1892 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1893
1894 #define pmd_pgtable(pmd) \
1895 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1896
1897 #endif /* _S390_PAGE_H */
1898