1 /*
2  * Taken from kernel for decoupling from <asm/page.h>. --zaitcev
3  *
4  * $Id: pgtsrmmu.h,v 1.2 1999/04/19 01:04:31 zaitcev Exp $
5  * pgtsrmmu.h:  SRMMU page table defines and code.
6  *
7  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8  */
9 
10 #ifndef _PGTSRMMU_H
11 #define _PGTSRMMU_H
12 
13 /* PMD_SHIFT determines the size of the area a second-level page table can map */
14 #define SRMMU_PMD_SHIFT         18
15 #define SRMMU_PMD_SIZE          (1UL << SRMMU_PMD_SHIFT)
16 #define SRMMU_PMD_MASK          (~(SRMMU_PMD_SIZE-1))
17 #define SRMMU_PMD_ALIGN(addr)   (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK)
18 
19 /* PGDIR_SHIFT determines what a third-level page table entry can map */
20 #define SRMMU_PGDIR_SHIFT       24
21 #define SRMMU_PGDIR_SIZE        (1UL << SRMMU_PGDIR_SHIFT)
22 #define SRMMU_PGDIR_MASK        (~(SRMMU_PGDIR_SIZE-1))
23 #define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
24 
25 #define SRMMU_PTRS_PER_PTE      64
26 #define SRMMU_PTRS_PER_PMD      64
27 #define SRMMU_PTRS_PER_PGD      256
28 
29 #define SRMMU_PTE_TABLE_SIZE    0x100 /* 64 entries, 4 bytes a piece */
30 #define SRMMU_PMD_TABLE_SIZE    0x100 /* 64 entries, 4 bytes a piece */
31 #define SRMMU_PGD_TABLE_SIZE    0x400 /* 256 entries, 4 bytes a piece */
32 
33 #define SRMMU_VMALLOC_START   (0xfe300000)
34 #define SRMMU_VMALLOC_END     ~0x0UL
35 
36 /* Definition of the values in the ET field of PTD's and PTE's */
37 #define SRMMU_ET_MASK         0x3
38 #define SRMMU_ET_INVALID      0x0
39 #define SRMMU_ET_PTD          0x1
40 #define SRMMU_ET_PTE          0x2
41 #define SRMMU_ET_REPTE        0x3 /* AIEEE, SuperSparc II reverse endian page! */
42 
43 /* Physical page extraction from PTP's and PTE's. */
44 #define SRMMU_CTX_PMASK    0xfffffff0
45 #define SRMMU_PTD_PMASK    0xfffffff0
46 #define SRMMU_PTE_PMASK    0xffffff00
47 
48 /* The pte non-page bits.  Some notes:
49  * 1) cache, dirty, valid, and ref are frobbable
50  *    for both supervisor and user pages.
51  * 2) exec and write will only give the desired effect
52  *    on user pages
53  * 3) use priv and priv_readonly for changing the
54  *    characteristics of supervisor ptes
55  */
56 #define SRMMU_CACHE        0x80
57 #define SRMMU_DIRTY        0x40
58 #define SRMMU_REF          0x20
59 #define SRMMU_EXEC         0x08
60 #define SRMMU_WRITE        0x04
61 #define SRMMU_VALID        0x02 /* SRMMU_ET_PTE */
62 #define SRMMU_PRIV         0x1c
63 #define SRMMU_PRIV_RDONLY  0x18
64 
65 #define SRMMU_CHG_MASK    (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
66 
67 /* SRMMU Register addresses in ASI 0x4.  These are valid for all
68  * current SRMMU implementations that exist.
69  */
70 #define SRMMU_CTRL_REG           0x00000000
71 #define SRMMU_CTXTBL_PTR         0x00000100
72 #define SRMMU_CTX_REG            0x00000200
73 #define SRMMU_FAULT_STATUS       0x00000300
74 #define SRMMU_FAULT_ADDR         0x00000400
75 
76 #ifndef __ASSEMBLY__
77 
78 /* Accessing the MMU control register. */
srmmu_get_mmureg(void)79 static __inline__ unsigned int srmmu_get_mmureg(void)
80 {
81         unsigned int retval;
82 	__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
83 			     "=r" (retval) :
84 			     "i" (ASI_M_MMUREGS));
85 	return retval;
86 }
87 
srmmu_set_mmureg(unsigned long regval)88 static __inline__ void srmmu_set_mmureg(unsigned long regval)
89 {
90 	__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
91 			     "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
92 
93 }
94 
srmmu_set_ctable_ptr(unsigned long paddr)95 static __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
96 {
97 	paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
98 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
99 			     "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
100 			     "i" (ASI_M_MMUREGS) :
101 			     "memory");
102 }
103 
srmmu_get_ctable_ptr(void)104 static __inline__ unsigned long srmmu_get_ctable_ptr(void)
105 {
106 	unsigned int retval;
107 
108 	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
109 			     "=r" (retval) :
110 			     "r" (SRMMU_CTXTBL_PTR),
111 			     "i" (ASI_M_MMUREGS));
112 	return (retval & SRMMU_CTX_PMASK) << 4;
113 }
114 
srmmu_set_context(int context)115 static __inline__ void srmmu_set_context(int context)
116 {
117 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
118 			     "r" (context), "r" (SRMMU_CTX_REG),
119 			     "i" (ASI_M_MMUREGS) : "memory");
120 }
121 
srmmu_get_context(void)122 static __inline__ int srmmu_get_context(void)
123 {
124 	register int retval;
125 	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
126 			     "=r" (retval) :
127 			     "r" (SRMMU_CTX_REG),
128 			     "i" (ASI_M_MMUREGS));
129 	return retval;
130 }
131 
srmmu_get_fstatus(void)132 static __inline__ unsigned int srmmu_get_fstatus(void)
133 {
134 	unsigned int retval;
135 
136 	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
137 			     "=r" (retval) :
138 			     "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
139 	return retval;
140 }
141 
srmmu_get_faddr(void)142 static __inline__ unsigned int srmmu_get_faddr(void)
143 {
144 	unsigned int retval;
145 
146 	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
147 			     "=r" (retval) :
148 			     "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
149 	return retval;
150 }
151 
152 /* This is guaranteed on all SRMMU's. */
srmmu_flush_whole_tlb(void)153 static __inline__ void srmmu_flush_whole_tlb(void)
154 {
155 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
156 			     "r" (0x400),        /* Flush entire TLB!! */
157 			     "i" (ASI_M_FLUSH_PROBE) : "memory");
158 
159 }
160 
161 /* These flush types are not available on all chips... */
srmmu_flush_tlb_ctx(void)162 static __inline__ void srmmu_flush_tlb_ctx(void)
163 {
164 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
165 			     "r" (0x300),        /* Flush TLB ctx.. */
166 			     "i" (ASI_M_FLUSH_PROBE) : "memory");
167 
168 }
169 
srmmu_flush_tlb_region(unsigned long addr)170 static __inline__ void srmmu_flush_tlb_region(unsigned long addr)
171 {
172 	addr &= SRMMU_PGDIR_MASK;
173 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
174 			     "r" (addr | 0x200), /* Flush TLB region.. */
175 			     "i" (ASI_M_FLUSH_PROBE) : "memory");
176 
177 }
178 
179 
srmmu_flush_tlb_segment(unsigned long addr)180 static __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
181 {
182 	addr &= SRMMU_PMD_MASK;
183 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
184 			     "r" (addr | 0x100), /* Flush TLB segment.. */
185 			     "i" (ASI_M_FLUSH_PROBE) : "memory");
186 
187 }
188 
srmmu_flush_tlb_page(unsigned long page)189 static __inline__ void srmmu_flush_tlb_page(unsigned long page)
190 {
191 	page &= PAGE_MASK;
192 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
193 			     "r" (page),        /* Flush TLB page.. */
194 			     "i" (ASI_M_FLUSH_PROBE) : "memory");
195 
196 }
197 
srmmu_hwprobe(unsigned long vaddr)198 static __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
199 {
200 	unsigned long retval;
201 
202 	vaddr &= PAGE_MASK;
203 	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
204 			     "=r" (retval) :
205 			     "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
206 
207 	return retval;
208 }
209 
210 static __inline__ int
srmmu_get_pte(unsigned long addr)211 srmmu_get_pte (unsigned long addr)
212 {
213 	register unsigned long entry;
214 
215 	__asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" :
216 				"=r" (entry):
217 				"r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE));
218 	return entry;
219 }
220 
221 #endif /* !(__ASSEMBLY__) */
222 
223 #endif /* !(_SPARC_PGTSRMMU_H) */
224