xref: /linux/arch/xtensa/mm/fault.c (revision d642ef71)
1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3  * arch/xtensa/mm/fault.c
4  *
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001 - 2010 Tensilica Inc.
10  *
11  * Chris Zankel <chris@zankel.net>
12  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
13  */
14 
15 #include <linux/mm.h>
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
23 #include <asm/traps.h>
24 
25 void bad_page_fault(struct pt_regs*, unsigned long, int);
26 
27 static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
28 {
29 #ifdef CONFIG_MMU
30 	/* Synchronize this task's top level page-table
31 	 * with the 'reference' page table.
32 	 */
33 	struct mm_struct *act_mm = current->active_mm;
34 	int index = pgd_index(address);
35 	pgd_t *pgd, *pgd_k;
36 	p4d_t *p4d, *p4d_k;
37 	pud_t *pud, *pud_k;
38 	pmd_t *pmd, *pmd_k;
39 	pte_t *pte_k;
40 
41 	if (act_mm == NULL)
42 		goto bad_page_fault;
43 
44 	pgd = act_mm->pgd + index;
45 	pgd_k = init_mm.pgd + index;
46 
47 	if (!pgd_present(*pgd_k))
48 		goto bad_page_fault;
49 
50 	pgd_val(*pgd) = pgd_val(*pgd_k);
51 
52 	p4d = p4d_offset(pgd, address);
53 	p4d_k = p4d_offset(pgd_k, address);
54 	if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
55 		goto bad_page_fault;
56 
57 	pud = pud_offset(p4d, address);
58 	pud_k = pud_offset(p4d_k, address);
59 	if (!pud_present(*pud) || !pud_present(*pud_k))
60 		goto bad_page_fault;
61 
62 	pmd = pmd_offset(pud, address);
63 	pmd_k = pmd_offset(pud_k, address);
64 	if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
65 		goto bad_page_fault;
66 
67 	pmd_val(*pmd) = pmd_val(*pmd_k);
68 	pte_k = pte_offset_kernel(pmd_k, address);
69 
70 	if (!pte_present(*pte_k))
71 		goto bad_page_fault;
72 	return;
73 
74 bad_page_fault:
75 	bad_page_fault(regs, address, SIGKILL);
76 #else
77 	WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
78 #endif
79 }
80 /*
81  * This routine handles page faults.  It determines the address,
82  * and the problem, and then passes it off to one of the appropriate
83  * routines.
84  *
85  * Note: does not handle Miss and MultiHit.
86  */
87 
88 void do_page_fault(struct pt_regs *regs)
89 {
90 	struct vm_area_struct * vma;
91 	struct mm_struct *mm = current->mm;
92 	unsigned int exccause = regs->exccause;
93 	unsigned int address = regs->excvaddr;
94 	int code;
95 
96 	int is_write, is_exec;
97 	vm_fault_t fault;
98 	unsigned int flags = FAULT_FLAG_DEFAULT;
99 
100 	code = SEGV_MAPERR;
101 
102 	/* We fault-in kernel-space virtual memory on-demand. The
103 	 * 'reference' page table is init_mm.pgd.
104 	 */
105 	if (address >= TASK_SIZE && !user_mode(regs)) {
106 		vmalloc_fault(regs, address);
107 		return;
108 	}
109 
110 	/* If we're in an interrupt or have no user
111 	 * context, we must not take the fault..
112 	 */
113 	if (faulthandler_disabled() || !mm) {
114 		bad_page_fault(regs, address, SIGSEGV);
115 		return;
116 	}
117 
118 	is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
119 	is_exec =  (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
120 		    exccause == EXCCAUSE_ITLB_MISS ||
121 		    exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
122 
123 	pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
124 		 current->comm, current->pid,
125 		 address, exccause, regs->pc,
126 		 is_write ? "w" : "", is_exec ? "x" : "");
127 
128 	if (user_mode(regs))
129 		flags |= FAULT_FLAG_USER;
130 
131 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
132 
133 retry:
134 	vma = lock_mm_and_find_vma(mm, address, regs);
135 	if (!vma)
136 		goto bad_area_nosemaphore;
137 
138 	/* Ok, we have a good vm_area for this memory access, so
139 	 * we can handle it..
140 	 */
141 
142 	code = SEGV_ACCERR;
143 
144 	if (is_write) {
145 		if (!(vma->vm_flags & VM_WRITE))
146 			goto bad_area;
147 		flags |= FAULT_FLAG_WRITE;
148 	} else if (is_exec) {
149 		if (!(vma->vm_flags & VM_EXEC))
150 			goto bad_area;
151 	} else	/* Allow read even from write-only pages. */
152 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
153 			goto bad_area;
154 
155 	/* If for any reason at all we couldn't handle the fault,
156 	 * make sure we exit gracefully rather than endlessly redo
157 	 * the fault.
158 	 */
159 	fault = handle_mm_fault(vma, address, flags, regs);
160 
161 	if (fault_signal_pending(fault, regs)) {
162 		if (!user_mode(regs))
163 			bad_page_fault(regs, address, SIGKILL);
164 		return;
165 	}
166 
167 	/* The fault is fully completed (including releasing mmap lock) */
168 	if (fault & VM_FAULT_COMPLETED)
169 		return;
170 
171 	if (unlikely(fault & VM_FAULT_ERROR)) {
172 		if (fault & VM_FAULT_OOM)
173 			goto out_of_memory;
174 		else if (fault & VM_FAULT_SIGSEGV)
175 			goto bad_area;
176 		else if (fault & VM_FAULT_SIGBUS)
177 			goto do_sigbus;
178 		BUG();
179 	}
180 
181 	if (fault & VM_FAULT_RETRY) {
182 		flags |= FAULT_FLAG_TRIED;
183 
184 		/* No need to mmap_read_unlock(mm) as we would
185 		 * have already released it in __lock_page_or_retry
186 		 * in mm/filemap.c.
187 		 */
188 
189 		goto retry;
190 	}
191 
192 	mmap_read_unlock(mm);
193 	return;
194 
195 	/* Something tried to access memory that isn't in our memory map..
196 	 * Fix it, but check if it's kernel or user first..
197 	 */
198 bad_area:
199 	mmap_read_unlock(mm);
200 bad_area_nosemaphore:
201 	if (user_mode(regs)) {
202 		force_sig_fault(SIGSEGV, code, (void *) address);
203 		return;
204 	}
205 	bad_page_fault(regs, address, SIGSEGV);
206 	return;
207 
208 
209 	/* We ran out of memory, or some other thing happened to us that made
210 	 * us unable to handle the page fault gracefully.
211 	 */
212 out_of_memory:
213 	mmap_read_unlock(mm);
214 	if (!user_mode(regs))
215 		bad_page_fault(regs, address, SIGKILL);
216 	else
217 		pagefault_out_of_memory();
218 	return;
219 
220 do_sigbus:
221 	mmap_read_unlock(mm);
222 
223 	/* Send a sigbus, regardless of whether we were in kernel
224 	 * or user mode.
225 	 */
226 	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
227 
228 	/* Kernel mode? Handle exceptions or die */
229 	if (!user_mode(regs))
230 		bad_page_fault(regs, address, SIGBUS);
231 	return;
232 }
233 
234 
235 void
236 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
237 {
238 	extern void __noreturn die(const char*, struct pt_regs*, long);
239 	const struct exception_table_entry *entry;
240 
241 	/* Are we prepared to handle this kernel fault?  */
242 	if ((entry = search_exception_tables(regs->pc)) != NULL) {
243 		pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
244 			 current->comm, regs->pc, entry->fixup);
245 		regs->pc = entry->fixup;
246 		return;
247 	}
248 
249 	/* Oops. The kernel tried to access some bad page. We'll have to
250 	 * terminate things with extreme prejudice.
251 	 */
252 	pr_alert("Unable to handle kernel paging request at virtual "
253 		 "address %08lx\n pc = %08lx, ra = %08lx\n",
254 		 address, regs->pc, regs->areg[0]);
255 	die("Oops", regs, sig);
256 }
257