xref: /linux/arch/powerpc/include/asm/book3s/32/pgtable.h (revision db10cb9b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4 
5 #include <asm-generic/pgtable-nopmd.h>
6 
7 /*
8  * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
9  * table containing PTEs, together with a set of 16 segment registers,
10  * to define the virtual to physical address mapping.
11  *
12  * We use the hash table as an extended TLB, i.e. a cache of currently
13  * active mappings.  We maintain a two-level page table tree, much
14  * like that used by the i386, for the sake of the Linux memory
15  * management code.  Low-level assembler code in hash_low_32.S
16  * (procedure hash_page) is responsible for extracting ptes from the
17  * tree and putting them into the hash table when necessary, and
18  * updating the accessed and modified bits in the page table tree.
19  */
20 
21 #define _PAGE_PRESENT	0x001	/* software: pte contains a translation */
22 #define _PAGE_HASHPTE	0x002	/* hash_page has made an HPTE for this pte */
23 #define _PAGE_USER	0x004	/* usermode access allowed */
24 #define _PAGE_GUARDED	0x008	/* G: prohibit speculative access */
25 #define _PAGE_COHERENT	0x010	/* M: enforce memory coherence (SMP systems) */
26 #define _PAGE_NO_CACHE	0x020	/* I: cache inhibit */
27 #define _PAGE_WRITETHRU	0x040	/* W: cache write-through */
28 #define _PAGE_DIRTY	0x080	/* C: page changed */
29 #define _PAGE_ACCESSED	0x100	/* R: page referenced */
30 #define _PAGE_EXEC	0x200	/* software: exec allowed */
31 #define _PAGE_RW	0x400	/* software: user write access allowed */
32 #define _PAGE_SPECIAL	0x800	/* software: Special page */
33 
34 #ifdef CONFIG_PTE_64BIT
35 /* We never clear the high word of the pte */
36 #define _PTE_NONE_MASK	(0xffffffff00000000ULL | _PAGE_HASHPTE)
37 #else
38 #define _PTE_NONE_MASK	_PAGE_HASHPTE
39 #endif
40 
41 #define _PMD_PRESENT	0
42 #define _PMD_PRESENT_MASK (PAGE_MASK)
43 #define _PMD_BAD	(~PAGE_MASK)
44 
45 /* We borrow the _PAGE_USER bit to store the exclusive marker in swap PTEs. */
46 #define _PAGE_SWP_EXCLUSIVE	_PAGE_USER
47 
48 /* And here we include common definitions */
49 
50 #define _PAGE_KERNEL_RO		0
51 #define _PAGE_KERNEL_ROX	(_PAGE_EXEC)
52 #define _PAGE_KERNEL_RW		(_PAGE_DIRTY | _PAGE_RW)
53 #define _PAGE_KERNEL_RWX	(_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
54 
55 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
56 
57 #ifndef __ASSEMBLY__
58 
59 static inline bool pte_user(pte_t pte)
60 {
61 	return pte_val(pte) & _PAGE_USER;
62 }
63 #endif /* __ASSEMBLY__ */
64 
65 /*
66  * Location of the PFN in the PTE. Most 32-bit platforms use the same
67  * as _PAGE_SHIFT here (ie, naturally aligned).
68  * Platform who don't just pre-define the value so we don't override it here.
69  */
70 #define PTE_RPN_SHIFT	(PAGE_SHIFT)
71 
72 /*
73  * The mask covered by the RPN must be a ULL on 32-bit platforms with
74  * 64-bit PTEs.
75  */
76 #ifdef CONFIG_PTE_64BIT
77 #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
78 #define MAX_POSSIBLE_PHYSMEM_BITS 36
79 #else
80 #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
81 #define MAX_POSSIBLE_PHYSMEM_BITS 32
82 #endif
83 
84 /*
85  * _PAGE_CHG_MASK masks of bits that are to be preserved across
86  * pgprot changes.
87  */
88 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
89 			 _PAGE_ACCESSED | _PAGE_SPECIAL)
90 
91 /*
92  * We define 2 sets of base prot bits, one for basic pages (ie,
93  * cacheable kernel and user pages) and one for non cacheable
94  * pages. We always set _PAGE_COHERENT when SMP is enabled or
95  * the processor might need it for DMA coherency.
96  */
97 #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
98 #define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
99 
100 /*
101  * Permission masks used to generate the __P and __S table.
102  *
103  * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
104  *
105  * Write permissions imply read permissions for now.
106  */
107 #define PAGE_NONE	__pgprot(_PAGE_BASE)
108 #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
109 #define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
110 #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
111 #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
112 #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
113 #define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
114 
115 /* Permission masks used for kernel mappings */
116 #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
117 #define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
118 #define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
119 #define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
120 #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
121 #define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
122 
123 #define PTE_INDEX_SIZE	PTE_SHIFT
124 #define PMD_INDEX_SIZE	0
125 #define PUD_INDEX_SIZE	0
126 #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
127 
128 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
129 #define PUD_CACHE_INDEX	PUD_INDEX_SIZE
130 
131 #ifndef __ASSEMBLY__
132 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
133 #define PMD_TABLE_SIZE	0
134 #define PUD_TABLE_SIZE	0
135 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
136 
137 /* Bits to mask out from a PMD to get to the PTE page */
138 #define PMD_MASKED_BITS		(PTE_TABLE_SIZE - 1)
139 #endif	/* __ASSEMBLY__ */
140 
141 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
142 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
143 
144 /*
145  * The normal case is that PTEs are 32-bits and we have a 1-page
146  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
147  *
148  * For any >32-bit physical address platform, we can use the following
149  * two level page table layout where the pgdir is 8KB and the MS 13 bits
150  * are an index to the second level table.  The combined pgdir/pmd first
151  * level has 2048 entries and the second level has 512 64-bit PTE entries.
152  * -Matt
153  */
154 /* PGDIR_SHIFT determines what a top-level page table entry can map */
155 #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
156 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
157 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
158 
159 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
160 
161 #ifndef __ASSEMBLY__
162 
163 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
164 void unmap_kernel_page(unsigned long va);
165 
166 #endif /* !__ASSEMBLY__ */
167 
168 /*
169  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
170  * value (for now) on others, from where we can start layout kernel
171  * virtual space that goes below PKMAP and FIXMAP
172  */
173 #include <asm/fixmap.h>
174 
175 /*
176  * ioremap_bot starts at that address. Early ioremaps move down from there,
177  * until mem_init() at which point this becomes the top of the vmalloc
178  * and ioremap space
179  */
180 #ifdef CONFIG_HIGHMEM
181 #define IOREMAP_TOP	PKMAP_BASE
182 #else
183 #define IOREMAP_TOP	FIXADDR_START
184 #endif
185 
186 /* PPC32 shares vmalloc area with ioremap */
187 #define IOREMAP_START	VMALLOC_START
188 #define IOREMAP_END	VMALLOC_END
189 
190 /*
191  * Just any arbitrary offset to the start of the vmalloc VM area: the
192  * current 16MB value just means that there will be a 64MB "hole" after the
193  * physical memory until the kernel virtual memory starts.  That means that
194  * any out-of-bounds memory accesses will hopefully be caught.
195  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
196  * area for the same reason. ;)
197  *
198  * We no longer map larger than phys RAM with the BATs so we don't have
199  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
200  * about clashes between our early calls to ioremap() that start growing down
201  * from ioremap_base being run into the VM area allocations (growing upwards
202  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
203  * we actually run into our mappings setup in the early boot with the VM
204  * system.  This really does become a problem for machines with good amounts
205  * of RAM.  -- Cort
206  */
207 #define VMALLOC_OFFSET (0x1000000) /* 16M */
208 
209 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
210 
211 #ifdef CONFIG_KASAN_VMALLOC
212 #define VMALLOC_END	ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
213 #else
214 #define VMALLOC_END	ioremap_bot
215 #endif
216 
217 #define MODULES_END	ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
218 #define MODULES_VADDR	(MODULES_END - SZ_256M)
219 
220 #ifndef __ASSEMBLY__
221 #include <linux/sched.h>
222 #include <linux/threads.h>
223 
224 /* Bits to mask out from a PGD to get to the PUD page */
225 #define PGD_MASKED_BITS		0
226 
227 #define pte_ERROR(e) \
228 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
229 		(unsigned long long)pte_val(e))
230 #define pgd_ERROR(e) \
231 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
232 /*
233  * Bits in a linux-style PTE.  These match the bits in the
234  * (hardware-defined) PowerPC PTE as closely as possible.
235  */
236 
237 #define pte_clear(mm, addr, ptep) \
238 	do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
239 
240 #define pmd_none(pmd)		(!pmd_val(pmd))
241 #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
242 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
243 static inline void pmd_clear(pmd_t *pmdp)
244 {
245 	*pmdp = __pmd(0);
246 }
247 
248 
249 /*
250  * When flushing the tlb entry for a page, we also need to flush the hash
251  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
252  */
253 extern int flush_hash_pages(unsigned context, unsigned long va,
254 			    unsigned long pmdval, int count);
255 
256 /* Add an HPTE to the hash table */
257 extern void add_hash_page(unsigned context, unsigned long va,
258 			  unsigned long pmdval);
259 
260 /* Flush an entry from the TLB/hash table */
261 static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
262 {
263 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
264 		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
265 
266 		flush_hash_pages(mm->context.id, addr, ptephys, 1);
267 	}
268 }
269 
270 /*
271  * PTE updates. This function is called whenever an existing
272  * valid PTE is updated. This does -not- include set_pte_at()
273  * which nowadays only sets a new PTE.
274  *
275  * Depending on the type of MMU, we may need to use atomic updates
276  * and the PTE may be either 32 or 64 bit wide. In the later case,
277  * when using atomic updates, only the low part of the PTE is
278  * accessed atomically.
279  */
280 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
281 				     unsigned long clr, unsigned long set, int huge)
282 {
283 	pte_basic_t old;
284 
285 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
286 		unsigned long tmp;
287 
288 		asm volatile(
289 #ifndef CONFIG_PTE_64BIT
290 	"1:	lwarx	%0, 0, %3\n"
291 	"	andc	%1, %0, %4\n"
292 #else
293 	"1:	lwarx	%L0, 0, %3\n"
294 	"	lwz	%0, -4(%3)\n"
295 	"	andc	%1, %L0, %4\n"
296 #endif
297 	"	or	%1, %1, %5\n"
298 	"	stwcx.	%1, 0, %3\n"
299 	"	bne-	1b"
300 		: "=&r" (old), "=&r" (tmp), "=m" (*p)
301 #ifndef CONFIG_PTE_64BIT
302 		: "r" (p),
303 #else
304 		: "b" ((unsigned long)(p) + 4),
305 #endif
306 		  "r" (clr), "r" (set), "m" (*p)
307 		: "cc" );
308 	} else {
309 		old = pte_val(*p);
310 
311 		*p = __pte((old & ~(pte_basic_t)clr) | set);
312 	}
313 
314 	return old;
315 }
316 
317 /*
318  * 2.6 calls this without flushing the TLB entry; this is wrong
319  * for our hash-based implementation, we fix that up here.
320  */
321 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
322 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
323 					      unsigned long addr, pte_t *ptep)
324 {
325 	unsigned long old;
326 	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
327 	if (old & _PAGE_HASHPTE)
328 		flush_hash_entry(mm, ptep, addr);
329 
330 	return (old & _PAGE_ACCESSED) != 0;
331 }
332 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
333 	__ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
334 
335 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
336 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
337 				       pte_t *ptep)
338 {
339 	return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
340 }
341 
342 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
343 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
344 				      pte_t *ptep)
345 {
346 	pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
347 }
348 
349 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
350 					   pte_t *ptep, pte_t entry,
351 					   unsigned long address,
352 					   int psize)
353 {
354 	unsigned long set = pte_val(entry) &
355 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
356 
357 	pte_update(vma->vm_mm, address, ptep, 0, set, 0);
358 
359 	flush_tlb_page(vma, address);
360 }
361 
362 #define __HAVE_ARCH_PTE_SAME
363 #define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
364 
365 #define pmd_pfn(pmd)		(pmd_val(pmd) >> PAGE_SHIFT)
366 #define pmd_page(pmd)		pfn_to_page(pmd_pfn(pmd))
367 
368 /*
369  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
370  * are !pte_none() && !pte_present().
371  *
372  * Format of swap PTEs (32bit PTEs):
373  *
374  *                         1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
375  *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
376  *   <----------------- offset --------------------> < type -> E H P
377  *
378  *   E is the exclusive marker that is not stored in swap entries.
379  *   _PAGE_PRESENT (P) and __PAGE_HASHPTE (H) must be 0.
380  *
381  * For 64bit PTEs, the offset is extended by 32bit.
382  */
383 #define __swp_type(entry)		((entry).val & 0x1f)
384 #define __swp_offset(entry)		((entry).val >> 5)
385 #define __swp_entry(type, offset)	((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) })
386 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
387 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
388 
389 static inline int pte_swp_exclusive(pte_t pte)
390 {
391 	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
392 }
393 
394 static inline pte_t pte_swp_mkexclusive(pte_t pte)
395 {
396 	return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
397 }
398 
399 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
400 {
401 	return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
402 }
403 
404 /* Generic accessors to PTE bits */
405 static inline int pte_write(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_RW);}
406 static inline int pte_read(pte_t pte)		{ return 1; }
407 static inline int pte_dirty(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_DIRTY); }
408 static inline int pte_young(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_ACCESSED); }
409 static inline int pte_special(pte_t pte)	{ return !!(pte_val(pte) & _PAGE_SPECIAL); }
410 static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
411 static inline bool pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_EXEC; }
412 
413 static inline int pte_present(pte_t pte)
414 {
415 	return pte_val(pte) & _PAGE_PRESENT;
416 }
417 
418 static inline bool pte_hw_valid(pte_t pte)
419 {
420 	return pte_val(pte) & _PAGE_PRESENT;
421 }
422 
423 static inline bool pte_hashpte(pte_t pte)
424 {
425 	return !!(pte_val(pte) & _PAGE_HASHPTE);
426 }
427 
428 static inline bool pte_ci(pte_t pte)
429 {
430 	return !!(pte_val(pte) & _PAGE_NO_CACHE);
431 }
432 
433 /*
434  * We only find page table entry in the last level
435  * Hence no need for other accessors
436  */
437 #define pte_access_permitted pte_access_permitted
438 static inline bool pte_access_permitted(pte_t pte, bool write)
439 {
440 	/*
441 	 * A read-only access is controlled by _PAGE_USER bit.
442 	 * We have _PAGE_READ set for WRITE and EXECUTE
443 	 */
444 	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
445 		return false;
446 
447 	if (write && !pte_write(pte))
448 		return false;
449 
450 	return true;
451 }
452 
453 /* Conversion functions: convert a page and protection to a page entry,
454  * and a page entry and page directory to the page they refer to.
455  *
456  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
457  * long for now.
458  */
459 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
460 {
461 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
462 		     pgprot_val(pgprot));
463 }
464 
465 /* Generic modifiers for PTE bits */
466 static inline pte_t pte_wrprotect(pte_t pte)
467 {
468 	return __pte(pte_val(pte) & ~_PAGE_RW);
469 }
470 
471 static inline pte_t pte_exprotect(pte_t pte)
472 {
473 	return __pte(pte_val(pte) & ~_PAGE_EXEC);
474 }
475 
476 static inline pte_t pte_mkclean(pte_t pte)
477 {
478 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
479 }
480 
481 static inline pte_t pte_mkold(pte_t pte)
482 {
483 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
484 }
485 
486 static inline pte_t pte_mkexec(pte_t pte)
487 {
488 	return __pte(pte_val(pte) | _PAGE_EXEC);
489 }
490 
491 static inline pte_t pte_mkpte(pte_t pte)
492 {
493 	return pte;
494 }
495 
496 static inline pte_t pte_mkwrite_novma(pte_t pte)
497 {
498 	return __pte(pte_val(pte) | _PAGE_RW);
499 }
500 
501 static inline pte_t pte_mkdirty(pte_t pte)
502 {
503 	return __pte(pte_val(pte) | _PAGE_DIRTY);
504 }
505 
506 static inline pte_t pte_mkyoung(pte_t pte)
507 {
508 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
509 }
510 
511 static inline pte_t pte_mkspecial(pte_t pte)
512 {
513 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
514 }
515 
516 static inline pte_t pte_mkhuge(pte_t pte)
517 {
518 	return pte;
519 }
520 
521 static inline pte_t pte_mkprivileged(pte_t pte)
522 {
523 	return __pte(pte_val(pte) & ~_PAGE_USER);
524 }
525 
526 static inline pte_t pte_mkuser(pte_t pte)
527 {
528 	return __pte(pte_val(pte) | _PAGE_USER);
529 }
530 
531 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
532 {
533 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
534 }
535 
536 
537 
538 /* This low level function performs the actual PTE insertion
539  * Setting the PTE depends on the MMU type and other factors.
540  *
541  * First case is 32-bit in UP mode with 32-bit PTEs, we need to preserve
542  * the _PAGE_HASHPTE bit since we may not have invalidated the previous
543  * translation in the hash yet (done in a subsequent flush_tlb_xxx())
544  * and see we need to keep track that this PTE needs invalidating.
545  *
546  * Second case is 32-bit with 64-bit PTE.  In this case, we
547  * can just store as long as we do the two halves in the right order
548  * with a barrier in between. This is possible because we take care,
549  * in the hash code, to pre-invalidate if the PTE was already hashed,
550  * which synchronizes us with any concurrent invalidation.
551  * In the percpu case, we fallback to the simple update preserving
552  * the hash bits (ie, same as the non-SMP case).
553  *
554  * Third case is 32-bit in SMP mode with 32-bit PTEs. We use the
555  * helper pte_update() which does an atomic update. We need to do that
556  * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
557  * per-CPU PTE such as a kmap_atomic, we also do a simple update preserving
558  * the hash bits instead.
559  */
560 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
561 				pte_t *ptep, pte_t pte, int percpu)
562 {
563 	if ((!IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_PTE_64BIT)) || percpu) {
564 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
565 			      (pte_val(pte) & ~_PAGE_HASHPTE));
566 	} else if (IS_ENABLED(CONFIG_PTE_64BIT)) {
567 		if (pte_val(*ptep) & _PAGE_HASHPTE)
568 			flush_hash_entry(mm, ptep, addr);
569 
570 		asm volatile("stw%X0 %2,%0; eieio; stw%X1 %L2,%1" :
571 			     "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) :
572 			     "r" (pte) : "memory");
573 	} else {
574 		pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
575 	}
576 }
577 
578 /*
579  * Macro to mark a page protection value as "uncacheable".
580  */
581 
582 #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
583 			 _PAGE_WRITETHRU)
584 
585 #define pgprot_noncached pgprot_noncached
586 static inline pgprot_t pgprot_noncached(pgprot_t prot)
587 {
588 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
589 			_PAGE_NO_CACHE | _PAGE_GUARDED);
590 }
591 
592 #define pgprot_noncached_wc pgprot_noncached_wc
593 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
594 {
595 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
596 			_PAGE_NO_CACHE);
597 }
598 
599 #define pgprot_cached pgprot_cached
600 static inline pgprot_t pgprot_cached(pgprot_t prot)
601 {
602 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
603 			_PAGE_COHERENT);
604 }
605 
606 #define pgprot_cached_wthru pgprot_cached_wthru
607 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
608 {
609 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
610 			_PAGE_COHERENT | _PAGE_WRITETHRU);
611 }
612 
613 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
614 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
615 {
616 	return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
617 }
618 
619 #define pgprot_writecombine pgprot_writecombine
620 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
621 {
622 	return pgprot_noncached_wc(prot);
623 }
624 
625 #endif /* !__ASSEMBLY__ */
626 
627 #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
628