xref: /linux/arch/powerpc/include/asm/nohash/32/pgtable.h (revision f86fd32d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4 
5 #define __ARCH_USE_5LEVEL_HACK
6 #include <asm-generic/pgtable-nopmd.h>
7 
8 #ifndef __ASSEMBLY__
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <asm/mmu.h>			/* For sub-arch specific PPC_PIN_SIZE */
12 #include <asm/asm-405.h>
13 
14 #ifdef CONFIG_44x
15 extern int icache_44x_need_flush;
16 #endif
17 
18 #endif /* __ASSEMBLY__ */
19 
20 #define PTE_INDEX_SIZE	PTE_SHIFT
21 #define PMD_INDEX_SIZE	0
22 #define PUD_INDEX_SIZE	0
23 #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
24 
25 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
26 #define PUD_CACHE_INDEX	PUD_INDEX_SIZE
27 
28 #ifndef __ASSEMBLY__
29 #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
30 #define PMD_TABLE_SIZE	0
31 #define PUD_TABLE_SIZE	0
32 #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
33 #endif	/* __ASSEMBLY__ */
34 
35 #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
36 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
37 
38 /*
39  * The normal case is that PTEs are 32-bits and we have a 1-page
40  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
41  *
42  * For any >32-bit physical address platform, we can use the following
43  * two level page table layout where the pgdir is 8KB and the MS 13 bits
44  * are an index to the second level table.  The combined pgdir/pmd first
45  * level has 2048 entries and the second level has 512 64-bit PTE entries.
46  * -Matt
47  */
48 /* PGDIR_SHIFT determines what a top-level page table entry can map */
49 #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
50 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
51 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
52 
53 /* Bits to mask out from a PGD to get to the PUD page */
54 #define PGD_MASKED_BITS		0
55 
56 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
57 #define FIRST_USER_ADDRESS	0UL
58 
59 #define pte_ERROR(e) \
60 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
61 		(unsigned long long)pte_val(e))
62 #define pgd_ERROR(e) \
63 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
64 
65 #ifndef __ASSEMBLY__
66 
67 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
68 
69 #endif /* !__ASSEMBLY__ */
70 
71 
72 /*
73  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
74  * value (for now) on others, from where we can start layout kernel
75  * virtual space that goes below PKMAP and FIXMAP
76  */
77 #include <asm/fixmap.h>
78 
79 /*
80  * ioremap_bot starts at that address. Early ioremaps move down from there,
81  * until mem_init() at which point this becomes the top of the vmalloc
82  * and ioremap space
83  */
84 #ifdef CONFIG_HIGHMEM
85 #define IOREMAP_TOP	PKMAP_BASE
86 #else
87 #define IOREMAP_TOP	FIXADDR_START
88 #endif
89 
90 /* PPC32 shares vmalloc area with ioremap */
91 #define IOREMAP_START	VMALLOC_START
92 #define IOREMAP_END	VMALLOC_END
93 
94 /*
95  * Just any arbitrary offset to the start of the vmalloc VM area: the
96  * current 16MB value just means that there will be a 64MB "hole" after the
97  * physical memory until the kernel virtual memory starts.  That means that
98  * any out-of-bounds memory accesses will hopefully be caught.
99  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
100  * area for the same reason. ;)
101  *
102  * We no longer map larger than phys RAM with the BATs so we don't have
103  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
104  * about clashes between our early calls to ioremap() that start growing down
105  * from IOREMAP_TOP being run into the VM area allocations (growing upwards
106  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
107  * we actually run into our mappings setup in the early boot with the VM
108  * system.  This really does become a problem for machines with good amounts
109  * of RAM.  -- Cort
110  */
111 #define VMALLOC_OFFSET (0x1000000) /* 16M */
112 #ifdef PPC_PIN_SIZE
113 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
114 #else
115 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
116 #endif
117 
118 #ifdef CONFIG_KASAN_VMALLOC
119 #define VMALLOC_END	_ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
120 #else
121 #define VMALLOC_END	ioremap_bot
122 #endif
123 
124 /*
125  * Bits in a linux-style PTE.  These match the bits in the
126  * (hardware-defined) PowerPC PTE as closely as possible.
127  */
128 
129 #if defined(CONFIG_40x)
130 #include <asm/nohash/32/pte-40x.h>
131 #elif defined(CONFIG_44x)
132 #include <asm/nohash/32/pte-44x.h>
133 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
134 #include <asm/nohash/pte-book3e.h>
135 #elif defined(CONFIG_FSL_BOOKE)
136 #include <asm/nohash/32/pte-fsl-booke.h>
137 #elif defined(CONFIG_PPC_8xx)
138 #include <asm/nohash/32/pte-8xx.h>
139 #endif
140 
141 /*
142  * Location of the PFN in the PTE. Most 32-bit platforms use the same
143  * as _PAGE_SHIFT here (ie, naturally aligned).
144  * Platform who don't just pre-define the value so we don't override it here.
145  */
146 #ifndef PTE_RPN_SHIFT
147 #define PTE_RPN_SHIFT	(PAGE_SHIFT)
148 #endif
149 
150 /*
151  * The mask covered by the RPN must be a ULL on 32-bit platforms with
152  * 64-bit PTEs.
153  */
154 #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
155 #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
156 #else
157 #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
158 #endif
159 
160 /*
161  * _PAGE_CHG_MASK masks of bits that are to be preserved across
162  * pgprot changes.
163  */
164 #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
165 
166 #ifndef __ASSEMBLY__
167 
168 #define pte_clear(mm, addr, ptep) \
169 	do { pte_update(ptep, ~0, 0); } while (0)
170 
171 #ifndef pte_mkwrite
172 static inline pte_t pte_mkwrite(pte_t pte)
173 {
174 	return __pte(pte_val(pte) | _PAGE_RW);
175 }
176 #endif
177 
178 static inline pte_t pte_mkdirty(pte_t pte)
179 {
180 	return __pte(pte_val(pte) | _PAGE_DIRTY);
181 }
182 
183 static inline pte_t pte_mkyoung(pte_t pte)
184 {
185 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
186 }
187 
188 #ifndef pte_wrprotect
189 static inline pte_t pte_wrprotect(pte_t pte)
190 {
191 	return __pte(pte_val(pte) & ~_PAGE_RW);
192 }
193 #endif
194 
195 static inline pte_t pte_mkexec(pte_t pte)
196 {
197 	return __pte(pte_val(pte) | _PAGE_EXEC);
198 }
199 
200 #define pmd_none(pmd)		(!pmd_val(pmd))
201 #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
202 #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
203 static inline void pmd_clear(pmd_t *pmdp)
204 {
205 	*pmdp = __pmd(0);
206 }
207 
208 
209 
210 /*
211  * PTE updates. This function is called whenever an existing
212  * valid PTE is updated. This does -not- include set_pte_at()
213  * which nowadays only sets a new PTE.
214  *
215  * Depending on the type of MMU, we may need to use atomic updates
216  * and the PTE may be either 32 or 64 bit wide. In the later case,
217  * when using atomic updates, only the low part of the PTE is
218  * accessed atomically.
219  *
220  * In addition, on 44x, we also maintain a global flag indicating
221  * that an executable user mapping was modified, which is needed
222  * to properly flush the virtually tagged instruction cache of
223  * those implementations.
224  */
225 #ifndef CONFIG_PTE_64BIT
226 static inline unsigned long pte_update(pte_t *p,
227 				       unsigned long clr,
228 				       unsigned long set)
229 {
230 #ifdef PTE_ATOMIC_UPDATES
231 	unsigned long old, tmp;
232 
233 	__asm__ __volatile__("\
234 1:	lwarx	%0,0,%3\n\
235 	andc	%1,%0,%4\n\
236 	or	%1,%1,%5\n"
237 	PPC405_ERR77(0,%3)
238 "	stwcx.	%1,0,%3\n\
239 	bne-	1b"
240 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
241 	: "r" (p), "r" (clr), "r" (set), "m" (*p)
242 	: "cc" );
243 #else /* PTE_ATOMIC_UPDATES */
244 	unsigned long old = pte_val(*p);
245 	unsigned long new = (old & ~clr) | set;
246 
247 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
248 	p->pte = p->pte1 = p->pte2 = p->pte3 = new;
249 #else
250 	*p = __pte(new);
251 #endif
252 #endif /* !PTE_ATOMIC_UPDATES */
253 
254 #ifdef CONFIG_44x
255 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
256 		icache_44x_need_flush = 1;
257 #endif
258 	return old;
259 }
260 #else /* CONFIG_PTE_64BIT */
261 static inline unsigned long long pte_update(pte_t *p,
262 					    unsigned long clr,
263 					    unsigned long set)
264 {
265 #ifdef PTE_ATOMIC_UPDATES
266 	unsigned long long old;
267 	unsigned long tmp;
268 
269 	__asm__ __volatile__("\
270 1:	lwarx	%L0,0,%4\n\
271 	lwzx	%0,0,%3\n\
272 	andc	%1,%L0,%5\n\
273 	or	%1,%1,%6\n"
274 	PPC405_ERR77(0,%3)
275 "	stwcx.	%1,0,%4\n\
276 	bne-	1b"
277 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
278 	: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
279 	: "cc" );
280 #else /* PTE_ATOMIC_UPDATES */
281 	unsigned long long old = pte_val(*p);
282 	*p = __pte((old & ~(unsigned long long)clr) | set);
283 #endif /* !PTE_ATOMIC_UPDATES */
284 
285 #ifdef CONFIG_44x
286 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
287 		icache_44x_need_flush = 1;
288 #endif
289 	return old;
290 }
291 #endif /* CONFIG_PTE_64BIT */
292 
293 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
294 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
295 {
296 	unsigned long old;
297 	old = pte_update(ptep, _PAGE_ACCESSED, 0);
298 	return (old & _PAGE_ACCESSED) != 0;
299 }
300 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
301 	__ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
302 
303 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
304 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
305 				       pte_t *ptep)
306 {
307 	return __pte(pte_update(ptep, ~0, 0));
308 }
309 
310 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
311 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
312 				      pte_t *ptep)
313 {
314 	unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
315 	unsigned long set = pte_val(pte_wrprotect(__pte(0)));
316 
317 	pte_update(ptep, clr, set);
318 }
319 
320 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
321 					   pte_t *ptep, pte_t entry,
322 					   unsigned long address,
323 					   int psize)
324 {
325 	pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
326 	pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
327 	unsigned long set = pte_val(entry) & pte_val(pte_set);
328 	unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
329 
330 	pte_update(ptep, clr, set);
331 
332 	flush_tlb_page(vma, address);
333 }
334 
335 static inline int pte_young(pte_t pte)
336 {
337 	return pte_val(pte) & _PAGE_ACCESSED;
338 }
339 
340 #define __HAVE_ARCH_PTE_SAME
341 #define pte_same(A,B)	((pte_val(A) ^ pte_val(B)) == 0)
342 
343 /*
344  * Note that on Book E processors, the pmd contains the kernel virtual
345  * (lowmem) address of the pte page.  The physical address is less useful
346  * because everything runs with translation enabled (even the TLB miss
347  * handler).  On everything else the pmd contains the physical address
348  * of the pte page.  -- paulus
349  */
350 #ifndef CONFIG_BOOKE
351 #define pmd_page_vaddr(pmd)	\
352 	((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
353 #define pmd_page(pmd)		\
354 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
355 #else
356 #define pmd_page_vaddr(pmd)	\
357 	((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
358 #define pmd_page(pmd)		\
359 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
360 #endif
361 
362 /* to find an entry in a kernel page-table-directory */
363 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
364 
365 /* to find an entry in a page-table-directory */
366 #define pgd_index(address)	 ((address) >> PGDIR_SHIFT)
367 #define pgd_offset(mm, address)	 ((mm)->pgd + pgd_index(address))
368 
369 /* Find an entry in the third-level page table.. */
370 #define pte_index(address)		\
371 	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
372 #define pte_offset_kernel(dir, addr)	\
373 	(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
374 				  pte_index(addr))
375 #define pte_offset_map(dir, addr)		\
376 	((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
377 		   (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
378 #define pte_unmap(pte)		kunmap_atomic(pte)
379 
380 /*
381  * Encode and decode a swap entry.
382  * Note that the bits we use in a PTE for representing a swap entry
383  * must not include the _PAGE_PRESENT bit.
384  *   -- paulus
385  */
386 #define __swp_type(entry)		((entry).val & 0x1f)
387 #define __swp_offset(entry)		((entry).val >> 5)
388 #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
389 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
390 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
391 
392 #endif /* !__ASSEMBLY__ */
393 
394 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
395