1 // TODO VM_EXEC flag work-around, cache aliasing
2 /*
3 * arch/xtensa/mm/fault.c
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * Copyright (C) 2001 - 2010 Tensilica Inc.
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 */
14
15 #include <linux/mm.h>
16 #include <linux/extable.h>
17 #include <linux/hardirq.h>
18 #include <linux/perf_event.h>
19 #include <linux/uaccess.h>
20 #include <asm/mmu_context.h>
21 #include <asm/cacheflush.h>
22 #include <asm/hardirq.h>
23
24 void bad_page_fault(struct pt_regs*, unsigned long, int);
25
vmalloc_fault(struct pt_regs * regs,unsigned int address)26 static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
27 {
28 #ifdef CONFIG_MMU
29 /* Synchronize this task's top level page-table
30 * with the 'reference' page table.
31 */
32 struct mm_struct *act_mm = current->active_mm;
33 int index = pgd_index(address);
34 pgd_t *pgd, *pgd_k;
35 p4d_t *p4d, *p4d_k;
36 pud_t *pud, *pud_k;
37 pmd_t *pmd, *pmd_k;
38 pte_t *pte_k;
39
40 if (act_mm == NULL)
41 goto bad_page_fault;
42
43 pgd = act_mm->pgd + index;
44 pgd_k = init_mm.pgd + index;
45
46 if (!pgd_present(*pgd_k))
47 goto bad_page_fault;
48
49 pgd_val(*pgd) = pgd_val(*pgd_k);
50
51 p4d = p4d_offset(pgd, address);
52 p4d_k = p4d_offset(pgd_k, address);
53 if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
54 goto bad_page_fault;
55
56 pud = pud_offset(p4d, address);
57 pud_k = pud_offset(p4d_k, address);
58 if (!pud_present(*pud) || !pud_present(*pud_k))
59 goto bad_page_fault;
60
61 pmd = pmd_offset(pud, address);
62 pmd_k = pmd_offset(pud_k, address);
63 if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
64 goto bad_page_fault;
65
66 pmd_val(*pmd) = pmd_val(*pmd_k);
67 pte_k = pte_offset_kernel(pmd_k, address);
68
69 if (!pte_present(*pte_k))
70 goto bad_page_fault;
71 return;
72
73 bad_page_fault:
74 bad_page_fault(regs, address, SIGKILL);
75 #else
76 WARN_ONCE(1, "%s in noMMU configuration\n", __func__);
77 #endif
78 }
79 /*
80 * This routine handles page faults. It determines the address,
81 * and the problem, and then passes it off to one of the appropriate
82 * routines.
83 *
84 * Note: does not handle Miss and MultiHit.
85 */
86
do_page_fault(struct pt_regs * regs)87 void do_page_fault(struct pt_regs *regs)
88 {
89 struct vm_area_struct * vma;
90 struct mm_struct *mm = current->mm;
91 unsigned int exccause = regs->exccause;
92 unsigned int address = regs->excvaddr;
93 int code;
94
95 int is_write, is_exec;
96 vm_fault_t fault;
97 unsigned int flags = FAULT_FLAG_DEFAULT;
98
99 code = SEGV_MAPERR;
100
101 /* We fault-in kernel-space virtual memory on-demand. The
102 * 'reference' page table is init_mm.pgd.
103 */
104 if (address >= TASK_SIZE && !user_mode(regs)) {
105 vmalloc_fault(regs, address);
106 return;
107 }
108
109 /* If we're in an interrupt or have no user
110 * context, we must not take the fault..
111 */
112 if (faulthandler_disabled() || !mm) {
113 bad_page_fault(regs, address, SIGSEGV);
114 return;
115 }
116
117 is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
118 is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
119 exccause == EXCCAUSE_ITLB_MISS ||
120 exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
121
122 pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
123 current->comm, current->pid,
124 address, exccause, regs->pc,
125 is_write ? "w" : "", is_exec ? "x" : "");
126
127 if (user_mode(regs))
128 flags |= FAULT_FLAG_USER;
129
130 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
131
132 retry:
133 mmap_read_lock(mm);
134 vma = find_vma(mm, address);
135
136 if (!vma)
137 goto bad_area;
138 if (vma->vm_start <= address)
139 goto good_area;
140 if (!(vma->vm_flags & VM_GROWSDOWN))
141 goto bad_area;
142 if (expand_stack(vma, address))
143 goto bad_area;
144
145 /* Ok, we have a good vm_area for this memory access, so
146 * we can handle it..
147 */
148
149 good_area:
150 code = SEGV_ACCERR;
151
152 if (is_write) {
153 if (!(vma->vm_flags & VM_WRITE))
154 goto bad_area;
155 flags |= FAULT_FLAG_WRITE;
156 } else if (is_exec) {
157 if (!(vma->vm_flags & VM_EXEC))
158 goto bad_area;
159 } else /* Allow read even from write-only pages. */
160 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
161 goto bad_area;
162
163 /* If for any reason at all we couldn't handle the fault,
164 * make sure we exit gracefully rather than endlessly redo
165 * the fault.
166 */
167 fault = handle_mm_fault(vma, address, flags, regs);
168
169 if (fault_signal_pending(fault, regs)) {
170 if (!user_mode(regs))
171 bad_page_fault(regs, address, SIGKILL);
172 return;
173 }
174
175 /* The fault is fully completed (including releasing mmap lock) */
176 if (fault & VM_FAULT_COMPLETED)
177 return;
178
179 if (unlikely(fault & VM_FAULT_ERROR)) {
180 if (fault & VM_FAULT_OOM)
181 goto out_of_memory;
182 else if (fault & VM_FAULT_SIGSEGV)
183 goto bad_area;
184 else if (fault & VM_FAULT_SIGBUS)
185 goto do_sigbus;
186 BUG();
187 }
188
189 if (fault & VM_FAULT_RETRY) {
190 flags |= FAULT_FLAG_TRIED;
191
192 /* No need to mmap_read_unlock(mm) as we would
193 * have already released it in __lock_page_or_retry
194 * in mm/filemap.c.
195 */
196
197 goto retry;
198 }
199
200 mmap_read_unlock(mm);
201 return;
202
203 /* Something tried to access memory that isn't in our memory map..
204 * Fix it, but check if it's kernel or user first..
205 */
206 bad_area:
207 mmap_read_unlock(mm);
208 if (user_mode(regs)) {
209 current->thread.bad_vaddr = address;
210 current->thread.error_code = is_write;
211 force_sig_fault(SIGSEGV, code, (void *) address);
212 return;
213 }
214 bad_page_fault(regs, address, SIGSEGV);
215 return;
216
217
218 /* We ran out of memory, or some other thing happened to us that made
219 * us unable to handle the page fault gracefully.
220 */
221 out_of_memory:
222 mmap_read_unlock(mm);
223 if (!user_mode(regs))
224 bad_page_fault(regs, address, SIGKILL);
225 else
226 pagefault_out_of_memory();
227 return;
228
229 do_sigbus:
230 mmap_read_unlock(mm);
231
232 /* Send a sigbus, regardless of whether we were in kernel
233 * or user mode.
234 */
235 current->thread.bad_vaddr = address;
236 force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
237
238 /* Kernel mode? Handle exceptions or die */
239 if (!user_mode(regs))
240 bad_page_fault(regs, address, SIGBUS);
241 return;
242 }
243
244
245 void
bad_page_fault(struct pt_regs * regs,unsigned long address,int sig)246 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
247 {
248 extern void __noreturn die(const char*, struct pt_regs*, long);
249 const struct exception_table_entry *entry;
250
251 /* Are we prepared to handle this kernel fault? */
252 if ((entry = search_exception_tables(regs->pc)) != NULL) {
253 pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
254 current->comm, regs->pc, entry->fixup);
255 current->thread.bad_uaddr = address;
256 regs->pc = entry->fixup;
257 return;
258 }
259
260 /* Oops. The kernel tried to access some bad page. We'll have to
261 * terminate things with extreme prejudice.
262 */
263 pr_alert("Unable to handle kernel paging request at virtual "
264 "address %08lx\n pc = %08lx, ra = %08lx\n",
265 address, regs->pc, regs->areg[0]);
266 die("Oops", regs, sig);
267 }
268