xref: /linux/arch/m68k/include/asm/tlbflush.h (revision 44f57d78)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _M68K_TLBFLUSH_H
3 #define _M68K_TLBFLUSH_H
4 
5 #ifdef CONFIG_MMU
6 #ifndef CONFIG_SUN3
7 
8 #include <asm/current.h>
9 #include <asm/mcfmmu.h>
10 
11 static inline void flush_tlb_kernel_page(void *addr)
12 {
13 	if (CPU_IS_COLDFIRE) {
14 		mmu_write(MMUOR, MMUOR_CNL);
15 	} else if (CPU_IS_040_OR_060) {
16 		mm_segment_t old_fs = get_fs();
17 		set_fs(KERNEL_DS);
18 		__asm__ __volatile__(".chip 68040\n\t"
19 				     "pflush (%0)\n\t"
20 				     ".chip 68k"
21 				     : : "a" (addr));
22 		set_fs(old_fs);
23 	} else if (CPU_IS_020_OR_030)
24 		__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
25 }
26 
27 /*
28  * flush all user-space atc entries.
29  */
30 static inline void __flush_tlb(void)
31 {
32 	if (CPU_IS_COLDFIRE) {
33 		mmu_write(MMUOR, MMUOR_CNL);
34 	} else if (CPU_IS_040_OR_060) {
35 		__asm__ __volatile__(".chip 68040\n\t"
36 				     "pflushan\n\t"
37 				     ".chip 68k");
38 	} else if (CPU_IS_020_OR_030) {
39 		__asm__ __volatile__("pflush #0,#4");
40 	}
41 }
42 
43 static inline void __flush_tlb040_one(unsigned long addr)
44 {
45 	__asm__ __volatile__(".chip 68040\n\t"
46 			     "pflush (%0)\n\t"
47 			     ".chip 68k"
48 			     : : "a" (addr));
49 }
50 
51 static inline void __flush_tlb_one(unsigned long addr)
52 {
53 	if (CPU_IS_COLDFIRE)
54 		mmu_write(MMUOR, MMUOR_CNL);
55 	else if (CPU_IS_040_OR_060)
56 		__flush_tlb040_one(addr);
57 	else if (CPU_IS_020_OR_030)
58 		__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
59 }
60 
61 #define flush_tlb() __flush_tlb()
62 
63 /*
64  * flush all atc entries (both kernel and user-space entries).
65  */
66 static inline void flush_tlb_all(void)
67 {
68 	if (CPU_IS_COLDFIRE) {
69 		mmu_write(MMUOR, MMUOR_CNL);
70 	} else if (CPU_IS_040_OR_060) {
71 		__asm__ __volatile__(".chip 68040\n\t"
72 				     "pflusha\n\t"
73 				     ".chip 68k");
74 	} else if (CPU_IS_020_OR_030) {
75 		__asm__ __volatile__("pflusha");
76 	}
77 }
78 
79 static inline void flush_tlb_mm(struct mm_struct *mm)
80 {
81 	if (mm == current->active_mm)
82 		__flush_tlb();
83 }
84 
85 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
86 {
87 	if (vma->vm_mm == current->active_mm) {
88 		mm_segment_t old_fs = get_fs();
89 		set_fs(USER_DS);
90 		__flush_tlb_one(addr);
91 		set_fs(old_fs);
92 	}
93 }
94 
95 static inline void flush_tlb_range(struct vm_area_struct *vma,
96 				   unsigned long start, unsigned long end)
97 {
98 	if (vma->vm_mm == current->active_mm)
99 		__flush_tlb();
100 }
101 
102 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
103 {
104 	flush_tlb_all();
105 }
106 
107 #else
108 
109 
110 /* Reserved PMEGs. */
111 extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
112 extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
113 extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
114 extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
115 
116 /* Flush all userspace mappings one by one...  (why no flush command,
117    sun?) */
118 static inline void flush_tlb_all(void)
119 {
120        unsigned long addr;
121        unsigned char ctx, oldctx;
122 
123        oldctx = sun3_get_context();
124        for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
125 	       for(ctx = 0; ctx < 8; ctx++) {
126 		       sun3_put_context(ctx);
127 		       sun3_put_segmap(addr, SUN3_INVALID_PMEG);
128 	       }
129        }
130 
131        sun3_put_context(oldctx);
132        /* erase all of the userspace pmeg maps, we've clobbered them
133 	  all anyway */
134        for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
135 	       if(pmeg_alloc[addr] == 1) {
136 		       pmeg_alloc[addr] = 0;
137 		       pmeg_ctx[addr] = 0;
138 		       pmeg_vaddr[addr] = 0;
139 	       }
140        }
141 
142 }
143 
144 /* Clear user TLB entries within the context named in mm */
145 static inline void flush_tlb_mm (struct mm_struct *mm)
146 {
147      unsigned char oldctx;
148      unsigned char seg;
149      unsigned long i;
150 
151      oldctx = sun3_get_context();
152      sun3_put_context(mm->context);
153 
154      for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
155 	     seg = sun3_get_segmap(i);
156 	     if(seg == SUN3_INVALID_PMEG)
157 		     continue;
158 
159 	     sun3_put_segmap(i, SUN3_INVALID_PMEG);
160 	     pmeg_alloc[seg] = 0;
161 	     pmeg_ctx[seg] = 0;
162 	     pmeg_vaddr[seg] = 0;
163      }
164 
165      sun3_put_context(oldctx);
166 
167 }
168 
169 /* Flush a single TLB page. In this case, we're limited to flushing a
170    single PMEG */
171 static inline void flush_tlb_page (struct vm_area_struct *vma,
172 				   unsigned long addr)
173 {
174 	unsigned char oldctx;
175 	unsigned char i;
176 
177 	oldctx = sun3_get_context();
178 	sun3_put_context(vma->vm_mm->context);
179 	addr &= ~SUN3_PMEG_MASK;
180 	if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
181 	{
182 		pmeg_alloc[i] = 0;
183 		pmeg_ctx[i] = 0;
184 		pmeg_vaddr[i] = 0;
185 		sun3_put_segmap (addr,  SUN3_INVALID_PMEG);
186 	}
187 	sun3_put_context(oldctx);
188 
189 }
190 /* Flush a range of pages from TLB. */
191 
192 static inline void flush_tlb_range (struct vm_area_struct *vma,
193 		      unsigned long start, unsigned long end)
194 {
195 	struct mm_struct *mm = vma->vm_mm;
196 	unsigned char seg, oldctx;
197 
198 	start &= ~SUN3_PMEG_MASK;
199 
200 	oldctx = sun3_get_context();
201 	sun3_put_context(mm->context);
202 
203 	while(start < end)
204 	{
205 		if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
206 		     goto next;
207 		if(pmeg_ctx[seg] == mm->context) {
208 			pmeg_alloc[seg] = 0;
209 			pmeg_ctx[seg] = 0;
210 			pmeg_vaddr[seg] = 0;
211 		}
212 		sun3_put_segmap(start, SUN3_INVALID_PMEG);
213 	next:
214 		start += SUN3_PMEG_SIZE;
215 	}
216 }
217 
218 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
219 {
220 	flush_tlb_all();
221 }
222 
223 /* Flush kernel page from TLB. */
224 static inline void flush_tlb_kernel_page (unsigned long addr)
225 {
226 	sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
227 }
228 
229 #endif
230 
231 #else /* !CONFIG_MMU */
232 
233 /*
234  * flush all user-space atc entries.
235  */
236 static inline void __flush_tlb(void)
237 {
238 	BUG();
239 }
240 
241 static inline void __flush_tlb_one(unsigned long addr)
242 {
243 	BUG();
244 }
245 
246 #define flush_tlb() __flush_tlb()
247 
248 /*
249  * flush all atc entries (both kernel and user-space entries).
250  */
251 static inline void flush_tlb_all(void)
252 {
253 	BUG();
254 }
255 
256 static inline void flush_tlb_mm(struct mm_struct *mm)
257 {
258 	BUG();
259 }
260 
261 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
262 {
263 	BUG();
264 }
265 
266 static inline void flush_tlb_range(struct mm_struct *mm,
267 				   unsigned long start, unsigned long end)
268 {
269 	BUG();
270 }
271 
272 static inline void flush_tlb_kernel_page(unsigned long addr)
273 {
274 	BUG();
275 }
276 
277 #endif /* CONFIG_MMU */
278 
279 #endif /* _M68K_TLBFLUSH_H */
280