Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Derived from "arch/i386/mm/fault.c"
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 *
9 * Modified by Cort Dougan and Paul Mackerras.
10 *
11 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
12 */
13
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/sched/task_stack.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/interrupt.h>
26#include <linux/highmem.h>
27#include <linux/extable.h>
28#include <linux/kprobes.h>
29#include <linux/kdebug.h>
30#include <linux/perf_event.h>
31#include <linux/ratelimit.h>
32#include <linux/context_tracking.h>
33#include <linux/hugetlb.h>
34#include <linux/uaccess.h>
35#include <linux/kfence.h>
36#include <linux/pkeys.h>
37
38#include <asm/firmware.h>
39#include <asm/interrupt.h>
40#include <asm/page.h>
41#include <asm/mmu.h>
42#include <asm/mmu_context.h>
43#include <asm/siginfo.h>
44#include <asm/debug.h>
45#include <asm/kup.h>
46#include <asm/inst.h>
47
48
49/*
50 * do_page_fault error handling helpers
51 */
52
53static int
54__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
55{
56 /*
57 * If we are in kernel mode, bail out with a SEGV, this will
58 * be caught by the assembly which will restore the non-volatile
59 * registers before calling bad_page_fault()
60 */
61 if (!user_mode(regs))
62 return SIGSEGV;
63
64 _exception(SIGSEGV, regs, si_code, address);
65
66 return 0;
67}
68
69static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
70{
71 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
72}
73
74static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
75{
76 struct mm_struct *mm = current->mm;
77
78 /*
79 * Something tried to access memory that isn't in our memory map..
80 * Fix it, but check if it's kernel or user first..
81 */
82 mmap_read_unlock(mm);
83
84 return __bad_area_nosemaphore(regs, address, si_code);
85}
86
87static noinline int bad_area(struct pt_regs *regs, unsigned long address)
88{
89 return __bad_area(regs, address, SEGV_MAPERR);
90}
91
92static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
93 struct vm_area_struct *vma)
94{
95 struct mm_struct *mm = current->mm;
96 int pkey;
97
98 /*
99 * We don't try to fetch the pkey from page table because reading
100 * page table without locking doesn't guarantee stable pte value.
101 * Hence the pkey value that we return to userspace can be different
102 * from the pkey that actually caused access error.
103 *
104 * It does *not* guarantee that the VMA we find here
105 * was the one that we faulted on.
106 *
107 * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
108 * 2. T1 : set AMR to deny access to pkey=4, touches, page
109 * 3. T1 : faults...
110 * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
111 * 5. T1 : enters fault handler, takes mmap_lock, etc...
112 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
113 * faulted on a pte with its pkey=4.
114 */
115 pkey = vma_pkey(vma);
116
117 mmap_read_unlock(mm);
118
119 /*
120 * If we are in kernel mode, bail out with a SEGV, this will
121 * be caught by the assembly which will restore the non-volatile
122 * registers before calling bad_page_fault()
123 */
124 if (!user_mode(regs))
125 return SIGSEGV;
126
127 _exception_pkey(regs, address, pkey);
128
129 return 0;
130}
131
132static noinline int bad_access(struct pt_regs *regs, unsigned long address)
133{
134 return __bad_area(regs, address, SEGV_ACCERR);
135}
136
137static int do_sigbus(struct pt_regs *regs, unsigned long address,
138 vm_fault_t fault)
139{
140 if (!user_mode(regs))
141 return SIGBUS;
142
143 current->thread.trap_nr = BUS_ADRERR;
144#ifdef CONFIG_MEMORY_FAILURE
145 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
146 unsigned int lsb = 0; /* shutup gcc */
147
148 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
149 current->comm, current->pid, address);
150
151 if (fault & VM_FAULT_HWPOISON_LARGE)
152 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
153 if (fault & VM_FAULT_HWPOISON)
154 lsb = PAGE_SHIFT;
155
156 force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
157 return 0;
158 }
159
160#endif
161 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
162 return 0;
163}
164
165static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
166 vm_fault_t fault)
167{
168 /*
169 * Kernel page fault interrupted by SIGKILL. We have no reason to
170 * continue processing.
171 */
172 if (fatal_signal_pending(current) && !user_mode(regs))
173 return SIGKILL;
174
175 /* Out of memory */
176 if (fault & VM_FAULT_OOM) {
177 /*
178 * We ran out of memory, or some other thing happened to us that
179 * made us unable to handle the page fault gracefully.
180 */
181 if (!user_mode(regs))
182 return SIGSEGV;
183 pagefault_out_of_memory();
184 } else {
185 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
186 VM_FAULT_HWPOISON_LARGE))
187 return do_sigbus(regs, addr, fault);
188 else if (fault & VM_FAULT_SIGSEGV)
189 return bad_area_nosemaphore(regs, addr);
190 else
191 BUG();
192 }
193 return 0;
194}
195
196/* Is this a bad kernel fault ? */
197static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
198 unsigned long address, bool is_write)
199{
200 int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
201
202 if (is_exec) {
203 pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
204 address >= TASK_SIZE ? "exec-protected" : "user",
205 address,
206 from_kuid(&init_user_ns, current_uid()));
207
208 // Kernel exec fault is always bad
209 return true;
210 }
211
212 // Kernel fault on kernel address is bad
213 if (address >= TASK_SIZE)
214 return true;
215
216 // Read/write fault blocked by KUAP is bad, it can never succeed.
217 if (bad_kuap_fault(regs, address, is_write)) {
218 pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n",
219 is_write ? "write" : "read", address,
220 from_kuid(&init_user_ns, current_uid()));
221
222 // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
223 if (!search_exception_tables(regs->nip))
224 return true;
225
226 // Read/write fault in a valid region (the exception table search passed
227 // above), but blocked by KUAP is bad, it can never succeed.
228 return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read");
229 }
230
231 // What's left? Kernel fault on user and allowed by KUAP in the faulting context.
232 return false;
233}
234
235static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
236 struct vm_area_struct *vma)
237{
238 /*
239 * Make sure to check the VMA so that we do not perform
240 * faults just to hit a pkey fault as soon as we fill in a
241 * page. Only called for current mm, hence foreign == 0
242 */
243 if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
244 return true;
245
246 return false;
247}
248
249static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
250{
251 /*
252 * Allow execution from readable areas if the MMU does not
253 * provide separate controls over reading and executing.
254 *
255 * Note: That code used to not be enabled for 4xx/BookE.
256 * It is now as I/D cache coherency for these is done at
257 * set_pte_at() time and I see no reason why the test
258 * below wouldn't be valid on those processors. This -may-
259 * break programs compiled with a really old ABI though.
260 */
261 if (is_exec) {
262 return !(vma->vm_flags & VM_EXEC) &&
263 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
264 !(vma->vm_flags & (VM_READ | VM_WRITE)));
265 }
266
267 if (is_write) {
268 if (unlikely(!(vma->vm_flags & VM_WRITE)))
269 return true;
270 return false;
271 }
272
273 /*
274 * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
275 * defined in protection_map[]. Read faults can only be caused by
276 * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
277 */
278 if (unlikely(!vma_is_accessible(vma)))
279 return true;
280
281 if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
282 return true;
283
284 /*
285 * We should ideally do the vma pkey access check here. But in the
286 * fault path, handle_mm_fault() also does the same check. To avoid
287 * these multiple checks, we skip it here and handle access error due
288 * to pkeys later.
289 */
290 return false;
291}
292
293#ifdef CONFIG_PPC_SMLPAR
294static inline void cmo_account_page_fault(void)
295{
296 if (firmware_has_feature(FW_FEATURE_CMO)) {
297 u32 page_ins;
298
299 preempt_disable();
300 page_ins = be32_to_cpu(get_lppaca()->page_ins);
301 page_ins += 1 << PAGE_FACTOR;
302 get_lppaca()->page_ins = cpu_to_be32(page_ins);
303 preempt_enable();
304 }
305}
306#else
307static inline void cmo_account_page_fault(void) { }
308#endif /* CONFIG_PPC_SMLPAR */
309
310static void sanity_check_fault(bool is_write, bool is_user,
311 unsigned long error_code, unsigned long address)
312{
313 /*
314 * Userspace trying to access kernel address, we get PROTFAULT for that.
315 */
316 if (is_user && address >= TASK_SIZE) {
317 if ((long)address == -1)
318 return;
319
320 pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
321 current->comm, current->pid, address,
322 from_kuid(&init_user_ns, current_uid()));
323 return;
324 }
325
326 if (!IS_ENABLED(CONFIG_PPC_BOOK3S))
327 return;
328
329 /*
330 * For hash translation mode, we should never get a
331 * PROTFAULT. Any update to pte to reduce access will result in us
332 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
333 * fault instead of DSISR_PROTFAULT.
334 *
335 * A pte update to relax the access will not result in a hash page table
336 * entry invalidate and hence can result in DSISR_PROTFAULT.
337 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
338 * the special !is_write in the below conditional.
339 *
340 * For platforms that doesn't supports coherent icache and do support
341 * per page noexec bit, we do setup things such that we do the
342 * sync between D/I cache via fault. But that is handled via low level
343 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
344 * here in such case.
345 *
346 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
347 * check should handle those and hence we should fall to the bad_area
348 * handling correctly.
349 *
350 * For embedded with per page exec support that doesn't support coherent
351 * icache we do get PROTFAULT and we handle that D/I cache sync in
352 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
353 * is conditional for server MMU.
354 *
355 * For radix, we can get prot fault for autonuma case, because radix
356 * page table will have them marked noaccess for user.
357 */
358 if (radix_enabled() || is_write)
359 return;
360
361 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
362}
363
364/*
365 * Define the correct "is_write" bit in error_code based
366 * on the processor family
367 */
368#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
369#define page_fault_is_write(__err) ((__err) & ESR_DST)
370#else
371#define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE)
372#endif
373
374#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
375#define page_fault_is_bad(__err) (0)
376#elif defined(CONFIG_PPC_8xx)
377#define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G)
378#elif defined(CONFIG_PPC64)
379static int page_fault_is_bad(unsigned long err)
380{
381 unsigned long flag = DSISR_BAD_FAULT_64S;
382
383 /*
384 * PAPR+ v2.11 § 14.15.3.4.1 (unreleased)
385 * If byte 0, bit 3 of pi-attribute-specifier-type in
386 * ibm,pi-features property is defined, ignore the DSI error
387 * which is caused by the paste instruction on the
388 * suspended NX window.
389 */
390 if (mmu_has_feature(MMU_FTR_NX_DSI))
391 flag &= ~DSISR_BAD_COPYPASTE;
392
393 return err & flag;
394}
395#else
396#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S)
397#endif
398
399/*
400 * For 600- and 800-family processors, the error_code parameter is DSISR
401 * for a data fault, SRR1 for an instruction fault.
402 * For 400-family processors the error_code parameter is ESR for a data fault,
403 * 0 for an instruction fault.
404 * For 64-bit processors, the error_code parameter is DSISR for a data access
405 * fault, SRR1 & 0x08000000 for an instruction access fault.
406 *
407 * The return value is 0 if the fault was handled, or the signal
408 * number if this is a kernel fault that can't be handled here.
409 */
410static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
411 unsigned long error_code)
412{
413 struct vm_area_struct * vma;
414 struct mm_struct *mm = current->mm;
415 unsigned int flags = FAULT_FLAG_DEFAULT;
416 int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
417 int is_user = user_mode(regs);
418 int is_write = page_fault_is_write(error_code);
419 vm_fault_t fault, major = 0;
420 bool kprobe_fault = kprobe_page_fault(regs, 11);
421
422 if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
423 return 0;
424
425 if (unlikely(page_fault_is_bad(error_code))) {
426 if (is_user) {
427 _exception(SIGBUS, regs, BUS_OBJERR, address);
428 return 0;
429 }
430 return SIGBUS;
431 }
432
433 /* Additional sanity check(s) */
434 sanity_check_fault(is_write, is_user, error_code, address);
435
436 /*
437 * The kernel should never take an execute fault nor should it
438 * take a page fault to a kernel address or a page fault to a user
439 * address outside of dedicated places
440 */
441 if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
442 if (kfence_handle_page_fault(address, is_write, regs))
443 return 0;
444
445 return SIGSEGV;
446 }
447
448 /*
449 * If we're in an interrupt, have no user context or are running
450 * in a region with pagefaults disabled then we must not take the fault
451 */
452 if (unlikely(faulthandler_disabled() || !mm)) {
453 if (is_user)
454 printk_ratelimited(KERN_ERR "Page fault in user mode"
455 " with faulthandler_disabled()=%d"
456 " mm=%p\n",
457 faulthandler_disabled(), mm);
458 return bad_area_nosemaphore(regs, address);
459 }
460
461 interrupt_cond_local_irq_enable(regs);
462
463 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
464
465 /*
466 * We want to do this outside mmap_lock, because reading code around nip
467 * can result in fault, which will cause a deadlock when called with
468 * mmap_lock held
469 */
470 if (is_user)
471 flags |= FAULT_FLAG_USER;
472 if (is_write)
473 flags |= FAULT_FLAG_WRITE;
474 if (is_exec)
475 flags |= FAULT_FLAG_INSTRUCTION;
476
477#ifdef CONFIG_PER_VMA_LOCK
478 if (!(flags & FAULT_FLAG_USER))
479 goto lock_mmap;
480
481 vma = lock_vma_under_rcu(mm, address);
482 if (!vma)
483 goto lock_mmap;
484
485 if (unlikely(access_pkey_error(is_write, is_exec,
486 (error_code & DSISR_KEYFAULT), vma))) {
487 vma_end_read(vma);
488 goto lock_mmap;
489 }
490
491 if (unlikely(access_error(is_write, is_exec, vma))) {
492 vma_end_read(vma);
493 goto lock_mmap;
494 }
495
496 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
497 vma_end_read(vma);
498
499 if (!(fault & VM_FAULT_RETRY)) {
500 count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
501 goto done;
502 }
503 count_vm_vma_lock_event(VMA_LOCK_RETRY);
504
505 if (fault_signal_pending(fault, regs))
506 return user_mode(regs) ? 0 : SIGBUS;
507
508lock_mmap:
509#endif /* CONFIG_PER_VMA_LOCK */
510
511 /* When running in the kernel we expect faults to occur only to
512 * addresses in user space. All other faults represent errors in the
513 * kernel and should generate an OOPS. Unfortunately, in the case of an
514 * erroneous fault occurring in a code path which already holds mmap_lock
515 * we will deadlock attempting to validate the fault against the
516 * address space. Luckily the kernel only validly references user
517 * space from well defined areas of code, which are listed in the
518 * exceptions table.
519 *
520 * As the vast majority of faults will be valid we will only perform
521 * the source reference check when there is a possibility of a deadlock.
522 * Attempt to lock the address space, if we cannot we then validate the
523 * source. If this is invalid we can skip the address space check,
524 * thus avoiding the deadlock.
525 */
526 if (unlikely(!mmap_read_trylock(mm))) {
527 if (!is_user && !search_exception_tables(regs->nip))
528 return bad_area_nosemaphore(regs, address);
529
530retry:
531 mmap_read_lock(mm);
532 } else {
533 /*
534 * The above down_read_trylock() might have succeeded in
535 * which case we'll have missed the might_sleep() from
536 * down_read():
537 */
538 might_sleep();
539 }
540
541 vma = find_vma(mm, address);
542 if (unlikely(!vma))
543 return bad_area(regs, address);
544
545 if (unlikely(vma->vm_start > address)) {
546 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
547 return bad_area(regs, address);
548
549 if (unlikely(expand_stack(vma, address)))
550 return bad_area(regs, address);
551 }
552
553 if (unlikely(access_pkey_error(is_write, is_exec,
554 (error_code & DSISR_KEYFAULT), vma)))
555 return bad_access_pkey(regs, address, vma);
556
557 if (unlikely(access_error(is_write, is_exec, vma)))
558 return bad_access(regs, address);
559
560 /*
561 * If for any reason at all we couldn't handle the fault,
562 * make sure we exit gracefully rather than endlessly redo
563 * the fault.
564 */
565 fault = handle_mm_fault(vma, address, flags, regs);
566
567 major |= fault & VM_FAULT_MAJOR;
568
569 if (fault_signal_pending(fault, regs))
570 return user_mode(regs) ? 0 : SIGBUS;
571
572 /* The fault is fully completed (including releasing mmap lock) */
573 if (fault & VM_FAULT_COMPLETED)
574 goto out;
575
576 /*
577 * Handle the retry right now, the mmap_lock has been released in that
578 * case.
579 */
580 if (unlikely(fault & VM_FAULT_RETRY)) {
581 flags |= FAULT_FLAG_TRIED;
582 goto retry;
583 }
584
585 mmap_read_unlock(current->mm);
586
587#ifdef CONFIG_PER_VMA_LOCK
588done:
589#endif
590 if (unlikely(fault & VM_FAULT_ERROR))
591 return mm_fault_error(regs, address, fault);
592
593out:
594 /*
595 * Major/minor page fault accounting.
596 */
597 if (major)
598 cmo_account_page_fault();
599
600 return 0;
601}
602NOKPROBE_SYMBOL(___do_page_fault);
603
604static __always_inline void __do_page_fault(struct pt_regs *regs)
605{
606 long err;
607
608 err = ___do_page_fault(regs, regs->dar, regs->dsisr);
609 if (unlikely(err))
610 bad_page_fault(regs, err);
611}
612
613DEFINE_INTERRUPT_HANDLER(do_page_fault)
614{
615 __do_page_fault(regs);
616}
617
618#ifdef CONFIG_PPC_BOOK3S_64
619/* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
620void hash__do_page_fault(struct pt_regs *regs)
621{
622 __do_page_fault(regs);
623}
624NOKPROBE_SYMBOL(hash__do_page_fault);
625#endif
626
627/*
628 * bad_page_fault is called when we have a bad access from the kernel.
629 * It is called from the DSI and ISI handlers in head.S and from some
630 * of the procedures in traps.c.
631 */
632static void __bad_page_fault(struct pt_regs *regs, int sig)
633{
634 int is_write = page_fault_is_write(regs->dsisr);
635 const char *msg;
636
637 /* kernel has accessed a bad area */
638
639 if (regs->dar < PAGE_SIZE)
640 msg = "Kernel NULL pointer dereference";
641 else
642 msg = "Unable to handle kernel data access";
643
644 switch (TRAP(regs)) {
645 case INTERRUPT_DATA_STORAGE:
646 case INTERRUPT_H_DATA_STORAGE:
647 pr_alert("BUG: %s on %s at 0x%08lx\n", msg,
648 is_write ? "write" : "read", regs->dar);
649 break;
650 case INTERRUPT_DATA_SEGMENT:
651 pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar);
652 break;
653 case INTERRUPT_INST_STORAGE:
654 case INTERRUPT_INST_SEGMENT:
655 pr_alert("BUG: Unable to handle kernel instruction fetch%s",
656 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
657 break;
658 case INTERRUPT_ALIGNMENT:
659 pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
660 regs->dar);
661 break;
662 default:
663 pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
664 regs->dar);
665 break;
666 }
667 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
668 regs->nip);
669
670 if (task_stack_end_corrupted(current))
671 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
672
673 die("Kernel access of bad area", regs, sig);
674}
675
676void bad_page_fault(struct pt_regs *regs, int sig)
677{
678 const struct exception_table_entry *entry;
679
680 /* Are we prepared to handle this fault? */
681 entry = search_exception_tables(instruction_pointer(regs));
682 if (entry)
683 instruction_pointer_set(regs, extable_fixup(entry));
684 else
685 __bad_page_fault(regs, sig);
686}
687
688#ifdef CONFIG_PPC_BOOK3S_64
689DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)
690{
691 bad_page_fault(regs, SIGSEGV);
692}
693
694/*
695 * In radix, segment interrupts indicate the EA is not addressable by the
696 * page table geometry, so they are always sent here.
697 *
698 * In hash, this is called if do_slb_fault returns error. Typically it is
699 * because the EA was outside the region allowed by software.
700 */
701DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt)
702{
703 int err = regs->result;
704
705 if (err == -EFAULT) {
706 if (user_mode(regs))
707 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
708 else
709 bad_page_fault(regs, SIGSEGV);
710 } else if (err == -EINVAL) {
711 unrecoverable_exception(regs);
712 } else {
713 BUG();
714 }
715}
716#endif