xref: /linux/arch/arm/mm/fault-armv.c (revision 44f57d78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/fault-armv.c
4  *
5  *  Copyright (C) 1995  Linus Torvalds
6  *  Modifications for ARM processor (c) 1995-2002 Russell King
7  */
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/bitops.h>
12 #include <linux/vmalloc.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/gfp.h>
16 
17 #include <asm/bugs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 
23 #include "mm.h"
24 
25 static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
26 
27 #if __LINUX_ARM_ARCH__ < 6
28 /*
29  * We take the easy way out of this problem - we make the
30  * PTE uncacheable.  However, we leave the write buffer on.
31  *
32  * Note that the pte lock held when calling update_mmu_cache must also
33  * guard the pte (somewhere else in the same mm) that we modify here.
34  * Therefore those configurations which might call adjust_pte (those
35  * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
36  */
37 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
38 	unsigned long pfn, pte_t *ptep)
39 {
40 	pte_t entry = *ptep;
41 	int ret;
42 
43 	/*
44 	 * If this page is present, it's actually being shared.
45 	 */
46 	ret = pte_present(entry);
47 
48 	/*
49 	 * If this page isn't present, or is already setup to
50 	 * fault (ie, is old), we can safely ignore any issues.
51 	 */
52 	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
53 		flush_cache_page(vma, address, pfn);
54 		outer_flush_range((pfn << PAGE_SHIFT),
55 				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
56 		pte_val(entry) &= ~L_PTE_MT_MASK;
57 		pte_val(entry) |= shared_pte_mask;
58 		set_pte_at(vma->vm_mm, address, ptep, entry);
59 		flush_tlb_page(vma, address);
60 	}
61 
62 	return ret;
63 }
64 
65 #if USE_SPLIT_PTE_PTLOCKS
66 /*
67  * If we are using split PTE locks, then we need to take the page
68  * lock here.  Otherwise we are using shared mm->page_table_lock
69  * which is already locked, thus cannot take it.
70  */
71 static inline void do_pte_lock(spinlock_t *ptl)
72 {
73 	/*
74 	 * Use nested version here to indicate that we are already
75 	 * holding one similar spinlock.
76 	 */
77 	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
78 }
79 
80 static inline void do_pte_unlock(spinlock_t *ptl)
81 {
82 	spin_unlock(ptl);
83 }
84 #else /* !USE_SPLIT_PTE_PTLOCKS */
85 static inline void do_pte_lock(spinlock_t *ptl) {}
86 static inline void do_pte_unlock(spinlock_t *ptl) {}
87 #endif /* USE_SPLIT_PTE_PTLOCKS */
88 
89 static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
90 	unsigned long pfn)
91 {
92 	spinlock_t *ptl;
93 	pgd_t *pgd;
94 	pud_t *pud;
95 	pmd_t *pmd;
96 	pte_t *pte;
97 	int ret;
98 
99 	pgd = pgd_offset(vma->vm_mm, address);
100 	if (pgd_none_or_clear_bad(pgd))
101 		return 0;
102 
103 	pud = pud_offset(pgd, address);
104 	if (pud_none_or_clear_bad(pud))
105 		return 0;
106 
107 	pmd = pmd_offset(pud, address);
108 	if (pmd_none_or_clear_bad(pmd))
109 		return 0;
110 
111 	/*
112 	 * This is called while another page table is mapped, so we
113 	 * must use the nested version.  This also means we need to
114 	 * open-code the spin-locking.
115 	 */
116 	ptl = pte_lockptr(vma->vm_mm, pmd);
117 	pte = pte_offset_map(pmd, address);
118 	do_pte_lock(ptl);
119 
120 	ret = do_adjust_pte(vma, address, pfn, pte);
121 
122 	do_pte_unlock(ptl);
123 	pte_unmap(pte);
124 
125 	return ret;
126 }
127 
128 static void
129 make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
130 	unsigned long addr, pte_t *ptep, unsigned long pfn)
131 {
132 	struct mm_struct *mm = vma->vm_mm;
133 	struct vm_area_struct *mpnt;
134 	unsigned long offset;
135 	pgoff_t pgoff;
136 	int aliases = 0;
137 
138 	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
139 
140 	/*
141 	 * If we have any shared mappings that are in the same mm
142 	 * space, then we need to handle them specially to maintain
143 	 * cache coherency.
144 	 */
145 	flush_dcache_mmap_lock(mapping);
146 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
147 		/*
148 		 * If this VMA is not in our MM, we can ignore it.
149 		 * Note that we intentionally mask out the VMA
150 		 * that we are fixing up.
151 		 */
152 		if (mpnt->vm_mm != mm || mpnt == vma)
153 			continue;
154 		if (!(mpnt->vm_flags & VM_MAYSHARE))
155 			continue;
156 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
157 		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
158 	}
159 	flush_dcache_mmap_unlock(mapping);
160 	if (aliases)
161 		do_adjust_pte(vma, addr, pfn, ptep);
162 }
163 
164 /*
165  * Take care of architecture specific things when placing a new PTE into
166  * a page table, or changing an existing PTE.  Basically, there are two
167  * things that we need to take care of:
168  *
169  *  1. If PG_dcache_clean is not set for the page, we need to ensure
170  *     that any cache entries for the kernels virtual memory
171  *     range are written back to the page.
172  *  2. If we have multiple shared mappings of the same space in
173  *     an object, we need to deal with the cache aliasing issues.
174  *
175  * Note that the pte lock will be held.
176  */
177 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
178 	pte_t *ptep)
179 {
180 	unsigned long pfn = pte_pfn(*ptep);
181 	struct address_space *mapping;
182 	struct page *page;
183 
184 	if (!pfn_valid(pfn))
185 		return;
186 
187 	/*
188 	 * The zero page is never written to, so never has any dirty
189 	 * cache lines, and therefore never needs to be flushed.
190 	 */
191 	page = pfn_to_page(pfn);
192 	if (page == ZERO_PAGE(0))
193 		return;
194 
195 	mapping = page_mapping_file(page);
196 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
197 		__flush_dcache_page(mapping, page);
198 	if (mapping) {
199 		if (cache_is_vivt())
200 			make_coherent(mapping, vma, addr, ptep, pfn);
201 		else if (vma->vm_flags & VM_EXEC)
202 			__flush_icache_all();
203 	}
204 }
205 #endif	/* __LINUX_ARM_ARCH__ < 6 */
206 
207 /*
208  * Check whether the write buffer has physical address aliasing
209  * issues.  If it has, we need to avoid them for the case where
210  * we have several shared mappings of the same object in user
211  * space.
212  */
213 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
214 {
215 	register unsigned long zero = 0, one = 1, val;
216 
217 	local_irq_disable();
218 	mb();
219 	*p1 = one;
220 	mb();
221 	*p2 = zero;
222 	mb();
223 	val = *p1;
224 	mb();
225 	local_irq_enable();
226 	return val != zero;
227 }
228 
229 void __init check_writebuffer_bugs(void)
230 {
231 	struct page *page;
232 	const char *reason;
233 	unsigned long v = 1;
234 
235 	pr_info("CPU: Testing write buffer coherency: ");
236 
237 	page = alloc_page(GFP_KERNEL);
238 	if (page) {
239 		unsigned long *p1, *p2;
240 		pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
241 					L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
242 
243 		p1 = vmap(&page, 1, VM_IOREMAP, prot);
244 		p2 = vmap(&page, 1, VM_IOREMAP, prot);
245 
246 		if (p1 && p2) {
247 			v = check_writebuffer(p1, p2);
248 			reason = "enabling work-around";
249 		} else {
250 			reason = "unable to map memory\n";
251 		}
252 
253 		vunmap(p1);
254 		vunmap(p2);
255 		put_page(page);
256 	} else {
257 		reason = "unable to grab page\n";
258 	}
259 
260 	if (v) {
261 		pr_cont("failed, %s\n", reason);
262 		shared_pte_mask = L_PTE_MT_UNCACHED;
263 	} else {
264 		pr_cont("ok\n");
265 	}
266 }
267