xref: /linux/arch/xtensa/mm/tlb.c (revision db10cb9b)
1 /*
2  * arch/xtensa/mm/tlb.c
3  *
4  * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (C) 2001 - 2003 Tensilica Inc.
11  *
12  * Joe Taylor
13  * Chris Zankel	<chris@zankel.net>
14  * Marc Gauthier
15  */
16 
17 #include <linux/mm.h>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/cacheflush.h>
22 
23 
24 static inline void __flush_itlb_all (void)
25 {
26 	int w, i;
27 
28 	for (w = 0; w < ITLB_ARF_WAYS; w++) {
29 		for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
30 			int e = w + (i << PAGE_SHIFT);
31 			invalidate_itlb_entry_no_isync(e);
32 		}
33 	}
34 	asm volatile ("isync\n");
35 }
36 
37 static inline void __flush_dtlb_all (void)
38 {
39 	int w, i;
40 
41 	for (w = 0; w < DTLB_ARF_WAYS; w++) {
42 		for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
43 			int e = w + (i << PAGE_SHIFT);
44 			invalidate_dtlb_entry_no_isync(e);
45 		}
46 	}
47 	asm volatile ("isync\n");
48 }
49 
50 
51 void local_flush_tlb_all(void)
52 {
53 	__flush_itlb_all();
54 	__flush_dtlb_all();
55 }
56 
57 /* If mm is current, we simply assign the current task a new ASID, thus,
58  * invalidating all previous tlb entries. If mm is someone else's user mapping,
59  * wie invalidate the context, thus, when that user mapping is swapped in,
60  * a new context will be assigned to it.
61  */
62 
63 void local_flush_tlb_mm(struct mm_struct *mm)
64 {
65 	int cpu = smp_processor_id();
66 
67 	if (mm == current->active_mm) {
68 		unsigned long flags;
69 		local_irq_save(flags);
70 		mm->context.asid[cpu] = NO_CONTEXT;
71 		activate_context(mm, cpu);
72 		local_irq_restore(flags);
73 	} else {
74 		mm->context.asid[cpu] = NO_CONTEXT;
75 		mm->context.cpu = -1;
76 	}
77 }
78 
79 
80 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
81 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
82 #if _ITLB_ENTRIES > _DTLB_ENTRIES
83 # define _TLB_ENTRIES _ITLB_ENTRIES
84 #else
85 # define _TLB_ENTRIES _DTLB_ENTRIES
86 #endif
87 
88 void local_flush_tlb_range(struct vm_area_struct *vma,
89 		unsigned long start, unsigned long end)
90 {
91 	int cpu = smp_processor_id();
92 	struct mm_struct *mm = vma->vm_mm;
93 	unsigned long flags;
94 
95 	if (mm->context.asid[cpu] == NO_CONTEXT)
96 		return;
97 
98 	pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
99 		 (unsigned long)mm->context.asid[cpu], start, end);
100 	local_irq_save(flags);
101 
102 	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
103 		int oldpid = get_rasid_register();
104 
105 		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
106 		start &= PAGE_MASK;
107 		if (vma->vm_flags & VM_EXEC)
108 			while(start < end) {
109 				invalidate_itlb_mapping(start);
110 				invalidate_dtlb_mapping(start);
111 				start += PAGE_SIZE;
112 			}
113 		else
114 			while(start < end) {
115 				invalidate_dtlb_mapping(start);
116 				start += PAGE_SIZE;
117 			}
118 
119 		set_rasid_register(oldpid);
120 	} else {
121 		local_flush_tlb_mm(mm);
122 	}
123 	local_irq_restore(flags);
124 }
125 
126 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
127 {
128 	int cpu = smp_processor_id();
129 	struct mm_struct* mm = vma->vm_mm;
130 	unsigned long flags;
131 	int oldpid;
132 
133 	if (mm->context.asid[cpu] == NO_CONTEXT)
134 		return;
135 
136 	local_irq_save(flags);
137 
138 	oldpid = get_rasid_register();
139 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
140 
141 	if (vma->vm_flags & VM_EXEC)
142 		invalidate_itlb_mapping(page);
143 	invalidate_dtlb_mapping(page);
144 
145 	set_rasid_register(oldpid);
146 
147 	local_irq_restore(flags);
148 }
149 
150 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
151 {
152 	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
153 	    end - start < _TLB_ENTRIES << PAGE_SHIFT) {
154 		start &= PAGE_MASK;
155 		while (start < end) {
156 			invalidate_itlb_mapping(start);
157 			invalidate_dtlb_mapping(start);
158 			start += PAGE_SIZE;
159 		}
160 	} else {
161 		local_flush_tlb_all();
162 	}
163 }
164 
165 void update_mmu_tlb(struct vm_area_struct *vma,
166 		    unsigned long address, pte_t *ptep)
167 {
168 	local_flush_tlb_page(vma, address);
169 }
170 
171 #ifdef CONFIG_DEBUG_TLB_SANITY
172 
173 static unsigned get_pte_for_vaddr(unsigned vaddr)
174 {
175 	struct task_struct *task = get_current();
176 	struct mm_struct *mm = task->mm;
177 	pgd_t *pgd;
178 	p4d_t *p4d;
179 	pud_t *pud;
180 	pmd_t *pmd;
181 	pte_t *pte;
182 	unsigned int pteval;
183 
184 	if (!mm)
185 		mm = task->active_mm;
186 	pgd = pgd_offset(mm, vaddr);
187 	if (pgd_none_or_clear_bad(pgd))
188 		return 0;
189 	p4d = p4d_offset(pgd, vaddr);
190 	if (p4d_none_or_clear_bad(p4d))
191 		return 0;
192 	pud = pud_offset(p4d, vaddr);
193 	if (pud_none_or_clear_bad(pud))
194 		return 0;
195 	pmd = pmd_offset(pud, vaddr);
196 	if (pmd_none_or_clear_bad(pmd))
197 		return 0;
198 	pte = pte_offset_map(pmd, vaddr);
199 	if (!pte)
200 		return 0;
201 	pteval = pte_val(*pte);
202 	pte_unmap(pte);
203 	return pteval;
204 }
205 
206 enum {
207 	TLB_SUSPICIOUS	= 1,
208 	TLB_INSANE	= 2,
209 };
210 
211 static void tlb_insane(void)
212 {
213 	BUG_ON(1);
214 }
215 
216 static void tlb_suspicious(void)
217 {
218 	WARN_ON(1);
219 }
220 
221 /*
222  * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
223  * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
224  *
225  * Check that valid TLB entries either have the same PA as the PTE, or PTE is
226  * marked as non-present. Non-present PTE and the page with non-zero refcount
227  * and zero mapcount is normal for batched TLB flush operation. Zero refcount
228  * means that the page was freed prematurely. Non-zero mapcount is unusual,
229  * but does not necessary means an error, thus marked as suspicious.
230  */
231 static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
232 {
233 	unsigned tlbidx = w | (e << PAGE_SHIFT);
234 	unsigned r0 = dtlb ?
235 		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
236 	unsigned r1 = dtlb ?
237 		read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
238 	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
239 	unsigned pte = get_pte_for_vaddr(vpn);
240 	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
241 	unsigned tlb_asid = r0 & ASID_MASK;
242 	bool kernel = tlb_asid == 1;
243 	int rc = 0;
244 
245 	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
246 		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
247 				dtlb ? 'D' : 'I', w, e, vpn,
248 				kernel ? "kernel" : "user");
249 		rc |= TLB_INSANE;
250 	}
251 
252 	if (tlb_asid == mm_asid) {
253 		if ((pte ^ r1) & PAGE_MASK) {
254 			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
255 					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
256 			if (pte == 0 || !pte_present(__pte(pte))) {
257 				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
258 				pr_err("page refcount: %d, mapcount: %d\n",
259 						page_count(p),
260 						page_mapcount(p));
261 				if (!page_count(p))
262 					rc |= TLB_INSANE;
263 				else if (page_mapcount(p))
264 					rc |= TLB_SUSPICIOUS;
265 			} else {
266 				rc |= TLB_INSANE;
267 			}
268 		}
269 	}
270 	return rc;
271 }
272 
273 void check_tlb_sanity(void)
274 {
275 	unsigned long flags;
276 	unsigned w, e;
277 	int bug = 0;
278 
279 	local_irq_save(flags);
280 	for (w = 0; w < DTLB_ARF_WAYS; ++w)
281 		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
282 			bug |= check_tlb_entry(w, e, true);
283 	for (w = 0; w < ITLB_ARF_WAYS; ++w)
284 		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
285 			bug |= check_tlb_entry(w, e, false);
286 	if (bug & TLB_INSANE)
287 		tlb_insane();
288 	if (bug & TLB_SUSPICIOUS)
289 		tlb_suspicious();
290 	local_irq_restore(flags);
291 }
292 
293 #endif /* CONFIG_DEBUG_TLB_SANITY */
294