xref: /linux/arch/arc/include/asm/pgtable.h (revision 9a6b55ac)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  *
5  * vineetg: May 2011
6  *  -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
7  *     They are semantically the same although in different contexts
8  *     VALID marks a TLB entry exists and it will only happen if PRESENT
9  *  - Utilise some unused free bits to confine PTE flags to 12 bits
10  *     This is a must for 4k pg-sz
11  *
12  * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
13  *  -TLB Locking never really existed, except for initial specs
14  *  -SILENT_xxx not needed for our port
15  *  -Per my request, MMU V3 changes the layout of some of the bits
16  *     to avoid a few shifts in TLB Miss handlers.
17  *
18  * vineetg: April 2010
19  *  -PGD entry no longer contains any flags. If empty it is 0, otherwise has
20  *   Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
21  *
22  * vineetg: April 2010
23  *  -Switched form 8:11:13 split for page table lookup to 11:8:13
24  *  -this speeds up page table allocation itself as we now have to memset 1K
25  *    instead of 8k per page table.
26  * -TODO: Right now page table alloc is 8K and rest 7K is unused
27  *    need to optimise it
28  *
29  * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
30  */
31 
32 #ifndef _ASM_ARC_PGTABLE_H
33 #define _ASM_ARC_PGTABLE_H
34 
35 #include <linux/bits.h>
36 #include <asm-generic/pgtable-nopmd.h>
37 #include <asm/page.h>
38 #include <asm/mmu.h>	/* to propagate CONFIG_ARC_MMU_VER <n> */
39 
40 /**************************************************************************
41  * Page Table Flags
42  *
43  * ARC700 MMU only deals with softare managed TLB entries.
44  * Page Tables are purely for Linux VM's consumption and the bits below are
45  * suited to that (uniqueness). Hence some are not implemented in the TLB and
46  * some have different value in TLB.
47  * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
48  *      seperate PD0 and PD1, which combined forms a translation entry)
49  *      while for PTE perspective, they are 8 and 9 respectively
50  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
51  *      (saves some bit shift ops in TLB Miss hdlrs)
52  */
53 
54 #if (CONFIG_ARC_MMU_VER <= 2)
55 
56 #define _PAGE_ACCESSED      (1<<1)	/* Page is accessed (S) */
57 #define _PAGE_CACHEABLE     (1<<2)	/* Page is cached (H) */
58 #define _PAGE_EXECUTE       (1<<3)	/* Page has user execute perm (H) */
59 #define _PAGE_WRITE         (1<<4)	/* Page has user write perm (H) */
60 #define _PAGE_READ          (1<<5)	/* Page has user read perm (H) */
61 #define _PAGE_DIRTY         (1<<6)	/* Page modified (dirty) (S) */
62 #define _PAGE_SPECIAL       (1<<7)
63 #define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
64 #define _PAGE_PRESENT       (1<<10)	/* TLB entry is valid (H) */
65 
66 #else	/* MMU v3 onwards */
67 
68 #define _PAGE_CACHEABLE     (1<<0)	/* Page is cached (H) */
69 #define _PAGE_EXECUTE       (1<<1)	/* Page has user execute perm (H) */
70 #define _PAGE_WRITE         (1<<2)	/* Page has user write perm (H) */
71 #define _PAGE_READ          (1<<3)	/* Page has user read perm (H) */
72 #define _PAGE_ACCESSED      (1<<4)	/* Page is accessed (S) */
73 #define _PAGE_DIRTY         (1<<5)	/* Page modified (dirty) (S) */
74 #define _PAGE_SPECIAL       (1<<6)
75 
76 #if (CONFIG_ARC_MMU_VER >= 4)
77 #define _PAGE_WTHRU         (1<<7)	/* Page cache mode write-thru (H) */
78 #endif
79 
80 #define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
81 #define _PAGE_PRESENT       (1<<9)	/* TLB entry is valid (H) */
82 
83 #if (CONFIG_ARC_MMU_VER >= 4)
84 #define _PAGE_HW_SZ         (1<<10)	/* Page Size indicator (H): 0 normal, 1 super */
85 #endif
86 
87 #define _PAGE_SHARED_CODE   (1<<11)	/* Shared Code page with cmn vaddr
88 					   usable for shared TLB entries (H) */
89 
90 #define _PAGE_UNUSED_BIT    (1<<12)
91 #endif
92 
93 /* vmalloc permissions */
94 #define _K_PAGE_PERMS  (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
95 			_PAGE_GLOBAL | _PAGE_PRESENT)
96 
97 #ifndef CONFIG_ARC_CACHE_PAGES
98 #undef _PAGE_CACHEABLE
99 #define _PAGE_CACHEABLE 0
100 #endif
101 
102 #ifndef _PAGE_HW_SZ
103 #define _PAGE_HW_SZ	0
104 #endif
105 
106 /* Defaults for every user page */
107 #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
108 
109 /* Set of bits not changed in pte_modify */
110 #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
111 
112 /* More Abbrevaited helpers */
113 #define PAGE_U_NONE     __pgprot(___DEF)
114 #define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
115 #define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
116 #define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
117 #define PAGE_U_X_W_R    __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
118 						       _PAGE_EXECUTE)
119 
120 #define PAGE_SHARED	PAGE_U_W_R
121 
122 /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
123  * user vaddr space - visible in all addr spaces, but kernel mode only
124  * Thus Global, all-kernel-access, no-user-access, cached
125  */
126 #define PAGE_KERNEL          __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
127 
128 /* ioremap */
129 #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
130 
131 /* Masks for actual TLB "PD"s */
132 #define PTE_BITS_IN_PD0		(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
133 #define PTE_BITS_RWX		(_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
134 
135 #ifdef CONFIG_ARC_HAS_PAE40
136 #define PTE_BITS_NON_RWX_IN_PD1	(0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
137 #else
138 #define PTE_BITS_NON_RWX_IN_PD1	(PAGE_MASK | _PAGE_CACHEABLE)
139 #endif
140 
141 /**************************************************************************
142  * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
143  *
144  * Certain cases have 1:1 mapping
145  *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
146  *       which directly corresponds to  PAGE_U_X_R
147  *
148  * Other rules which cause the divergence from 1:1 mapping
149  *
150  *  1. Although ARC700 can do exclusive execute/write protection (meaning R
151  *     can be tracked independet of X/W unlike some other CPUs), still to
152  *     keep things consistent with other archs:
153  *      -Write implies Read:   W => R
154  *      -Execute implies Read: X => R
155  *
156  *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
157  *     This is to enable COW mechanism
158  */
159 	/* xwr */
160 #define __P000  PAGE_U_NONE
161 #define __P001  PAGE_U_R
162 #define __P010  PAGE_U_R	/* Pvt-W => !W */
163 #define __P011  PAGE_U_R	/* Pvt-W => !W */
164 #define __P100  PAGE_U_X_R	/* X => R */
165 #define __P101  PAGE_U_X_R
166 #define __P110  PAGE_U_X_R	/* Pvt-W => !W and X => R */
167 #define __P111  PAGE_U_X_R	/* Pvt-W => !W */
168 
169 #define __S000  PAGE_U_NONE
170 #define __S001  PAGE_U_R
171 #define __S010  PAGE_U_W_R	/* W => R */
172 #define __S011  PAGE_U_W_R
173 #define __S100  PAGE_U_X_R	/* X => R */
174 #define __S101  PAGE_U_X_R
175 #define __S110  PAGE_U_X_W_R	/* X => R */
176 #define __S111  PAGE_U_X_W_R
177 
178 /****************************************************************
179  * 2 tier (PGD:PTE) software page walker
180  *
181  * [31]		    32 bit virtual address              [0]
182  * -------------------------------------------------------
183  * |               | <------------ PGDIR_SHIFT ----------> |
184  * |		   |					 |
185  * | BITS_FOR_PGD  |  BITS_FOR_PTE  | <-- PAGE_SHIFT --> |
186  * -------------------------------------------------------
187  *       |                  |                |
188  *       |                  |                --> off in page frame
189  *       |                  ---> index into Page Table
190  *       ----> index into Page Directory
191  *
192  * In a single page size configuration, only PAGE_SHIFT is fixed
193  * So both PGD and PTE sizing can be tweaked
194  *  e.g. 8K page (PAGE_SHIFT 13) can have
195  *  - PGDIR_SHIFT 21  -> 11:8:13 address split
196  *  - PGDIR_SHIFT 24  -> 8:11:13 address split
197  *
198  * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
199  * so the sizing flexibility is gone.
200  */
201 
202 #if defined(CONFIG_ARC_HUGEPAGE_16M)
203 #define PGDIR_SHIFT	24
204 #elif defined(CONFIG_ARC_HUGEPAGE_2M)
205 #define PGDIR_SHIFT	21
206 #else
207 /*
208  * Only Normal page support so "hackable" (see comment above)
209  * Default value provides 11:8:13 (8K), 11:9:12 (4K)
210  */
211 #define PGDIR_SHIFT	21
212 #endif
213 
214 #define BITS_FOR_PTE	(PGDIR_SHIFT - PAGE_SHIFT)
215 #define BITS_FOR_PGD	(32 - PGDIR_SHIFT)
216 
217 #define PGDIR_SIZE	BIT(PGDIR_SHIFT)	/* vaddr span, not PDG sz */
218 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
219 
220 #define	PTRS_PER_PTE	BIT(BITS_FOR_PTE)
221 #define	PTRS_PER_PGD	BIT(BITS_FOR_PGD)
222 
223 /*
224  * Number of entries a user land program use.
225  * TASK_SIZE is the maximum vaddr that can be used by a userland program.
226  */
227 #define	USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
228 
229 /*
230  * No special requirements for lowest virtual address we permit any user space
231  * mapping to be mapped at.
232  */
233 #define FIRST_USER_ADDRESS      0UL
234 
235 
236 /****************************************************************
237  * Bucket load of VM Helpers
238  */
239 
240 #ifndef __ASSEMBLY__
241 
242 #define pte_ERROR(e) \
243 	pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
244 #define pgd_ERROR(e) \
245 	pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
246 
247 /* the zero page used for uninitialized and anonymous pages */
248 extern char empty_zero_page[PAGE_SIZE];
249 #define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
250 
251 #define pte_unmap(pte)		do { } while (0)
252 #define pte_unmap_nested(pte)		do { } while (0)
253 
254 #define set_pte(pteptr, pteval)	((*(pteptr)) = (pteval))
255 #define set_pmd(pmdptr, pmdval)	(*(pmdptr) = pmdval)
256 
257 /* find the page descriptor of the Page Tbl ref by PMD entry */
258 #define pmd_page(pmd)		virt_to_page(pmd_val(pmd) & PAGE_MASK)
259 
260 /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
261 #define pmd_page_vaddr(pmd)	(pmd_val(pmd) & PAGE_MASK)
262 
263 /* In a 2 level sys, setup the PGD entry with PTE value */
264 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
265 {
266 	pmd_val(*pmdp) = (unsigned long)ptep;
267 }
268 
269 #define pte_none(x)			(!pte_val(x))
270 #define pte_present(x)			(pte_val(x) & _PAGE_PRESENT)
271 #define pte_clear(mm, addr, ptep)	set_pte_at(mm, addr, ptep, __pte(0))
272 
273 #define pmd_none(x)			(!pmd_val(x))
274 #define	pmd_bad(x)			((pmd_val(x) & ~PAGE_MASK))
275 #define pmd_present(x)			(pmd_val(x))
276 #define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)
277 
278 #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
279 #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
280 #define pfn_pte(pfn, prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
281 
282 /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
283 #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
284 #define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
285 
286 /*
287  * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
288  * and returns ptr to PTE entry corresponding to @addr
289  */
290 #define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
291 					 __pte_index(addr))
292 
293 /* No mapping of Page Tables in high mem etc, so following same as above */
294 #define pte_offset_kernel(dir, addr)		pte_offset(dir, addr)
295 #define pte_offset_map(dir, addr)		pte_offset(dir, addr)
296 
297 /* Zoo of pte_xxx function */
298 #define pte_read(pte)		(pte_val(pte) & _PAGE_READ)
299 #define pte_write(pte)		(pte_val(pte) & _PAGE_WRITE)
300 #define pte_dirty(pte)		(pte_val(pte) & _PAGE_DIRTY)
301 #define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
302 #define pte_special(pte)	(pte_val(pte) & _PAGE_SPECIAL)
303 
304 #define PTE_BIT_FUNC(fn, op) \
305 	static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
306 
307 PTE_BIT_FUNC(mknotpresent,	&= ~(_PAGE_PRESENT));
308 PTE_BIT_FUNC(wrprotect,	&= ~(_PAGE_WRITE));
309 PTE_BIT_FUNC(mkwrite,	|= (_PAGE_WRITE));
310 PTE_BIT_FUNC(mkclean,	&= ~(_PAGE_DIRTY));
311 PTE_BIT_FUNC(mkdirty,	|= (_PAGE_DIRTY));
312 PTE_BIT_FUNC(mkold,	&= ~(_PAGE_ACCESSED));
313 PTE_BIT_FUNC(mkyoung,	|= (_PAGE_ACCESSED));
314 PTE_BIT_FUNC(exprotect,	&= ~(_PAGE_EXECUTE));
315 PTE_BIT_FUNC(mkexec,	|= (_PAGE_EXECUTE));
316 PTE_BIT_FUNC(mkspecial,	|= (_PAGE_SPECIAL));
317 PTE_BIT_FUNC(mkhuge,	|= (_PAGE_HW_SZ));
318 
319 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
320 {
321 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
322 }
323 
324 /* Macro to mark a page protection as uncacheable */
325 #define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
326 
327 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
328 			      pte_t *ptep, pte_t pteval)
329 {
330 	set_pte(ptep, pteval);
331 }
332 
333 /*
334  * All kernel related VM pages are in init's mm.
335  */
336 #define pgd_offset_k(address)	pgd_offset(&init_mm, address)
337 #define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
338 #define pgd_offset(mm, addr)	(((mm)->pgd)+pgd_index(addr))
339 
340 /*
341  * Macro to quickly access the PGD entry, utlising the fact that some
342  * arch may cache the pointer to Page Directory of "current" task
343  * in a MMU register
344  *
345  * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
346  * becomes read a register
347  *
348  * ********CAUTION*******:
349  * Kernel code might be dealing with some mm_struct of NON "current"
350  * Thus use this macro only when you are certain that "current" is current
351  * e.g. when dealing with signal frame setup code etc
352  */
353 #ifdef ARC_USE_SCRATCH_REG
354 #define pgd_offset_fast(mm, addr)	\
355 ({					\
356 	pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0);  \
357 	pgd_base + pgd_index(addr);	\
358 })
359 #else
360 #define pgd_offset_fast(mm, addr)	pgd_offset(mm, addr)
361 #endif
362 
363 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
364 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
365 		      pte_t *ptep);
366 
367 /* Encode swap {type,off} tuple into PTE
368  * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
369  * PAGE_PRESENT is zero in a PTE holding swap "identifier"
370  */
371 #define __swp_entry(type, off)	((swp_entry_t) { \
372 					((type) & 0x1f) | ((off) << 13) })
373 
374 /* Decode a PTE containing swap "identifier "into constituents */
375 #define __swp_type(pte_lookalike)	(((pte_lookalike).val) & 0x1f)
376 #define __swp_offset(pte_lookalike)	((pte_lookalike).val >> 13)
377 
378 /* NOPs, to keep generic kernel happy */
379 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
380 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
381 
382 #define kern_addr_valid(addr)	(1)
383 
384 /*
385  * remap a physical page `pfn' of size `size' with page protection `prot'
386  * into virtual address `from'
387  */
388 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
389 #include <asm/hugepage.h>
390 #endif
391 
392 #include <asm-generic/pgtable.h>
393 
394 /* to cope with aliasing VIPT cache */
395 #define HAVE_ARCH_UNMAPPED_AREA
396 
397 #endif /* __ASSEMBLY__ */
398 
399 #endif
400