2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
15 * This file handles the architecture-dependent parts of hardware exceptions
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/extable.h>
29 #include <linux/module.h> /* print_modules */
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 #include <linux/bug.h>
36 #include <linux/kdebug.h>
37 #include <linux/debugfs.h>
38 #include <linux/ratelimit.h>
39 #include <linux/context_tracking.h>
41 #include <asm/emulated_ops.h>
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
45 #include <asm/machdep.h>
49 #ifdef CONFIG_PMAC_BACKLIGHT
50 #include <asm/backlight.h>
53 #include <asm/firmware.h>
54 #include <asm/processor.h>
57 #include <asm/kexec.h>
58 #include <asm/ppc-opcode.h>
60 #include <asm/fadump.h>
61 #include <asm/switch_to.h>
63 #include <asm/debug.h>
64 #include <asm/asm-prototypes.h>
66 #include <sysdev/fsl_pci.h>
68 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
69 int (*__debugger)(struct pt_regs *regs) __read_mostly;
70 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
71 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
72 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
73 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
74 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
75 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
77 EXPORT_SYMBOL(__debugger);
78 EXPORT_SYMBOL(__debugger_ipi);
79 EXPORT_SYMBOL(__debugger_bpt);
80 EXPORT_SYMBOL(__debugger_sstep);
81 EXPORT_SYMBOL(__debugger_iabr_match);
82 EXPORT_SYMBOL(__debugger_break_match);
83 EXPORT_SYMBOL(__debugger_fault_handler);
86 /* Transactional Memory trap debug */
88 #define TM_DEBUG(x...) printk(KERN_INFO x)
90 #define TM_DEBUG(x...) do { } while(0)
94 * Trap & Exception support
97 #ifdef CONFIG_PMAC_BACKLIGHT
98 static void pmac_backlight_unblank(void)
100 mutex_lock(&pmac_backlight_mutex);
101 if (pmac_backlight) {
102 struct backlight_properties *props;
104 props = &pmac_backlight->props;
105 props->brightness = props->max_brightness;
106 props->power = FB_BLANK_UNBLANK;
107 backlight_update_status(pmac_backlight);
109 mutex_unlock(&pmac_backlight_mutex);
112 static inline void pmac_backlight_unblank(void) { }
115 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
116 static int die_owner = -1;
117 static unsigned int die_nest_count;
118 static int die_counter;
120 static unsigned long oops_begin(struct pt_regs *regs)
130 /* racy, but better than risking deadlock. */
131 raw_local_irq_save(flags);
132 cpu = smp_processor_id();
133 if (!arch_spin_trylock(&die_lock)) {
134 if (cpu == die_owner)
135 /* nested oops. should stop eventually */;
137 arch_spin_lock(&die_lock);
143 if (machine_is(powermac))
144 pmac_backlight_unblank();
147 NOKPROBE_SYMBOL(oops_begin);
149 static void oops_end(unsigned long flags, struct pt_regs *regs,
154 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
159 /* Nest count reaches zero, release the lock. */
160 arch_spin_unlock(&die_lock);
161 raw_local_irq_restore(flags);
163 crash_fadump(regs, "die oops");
166 * A system reset (0x100) is a request to dump, so we always send
167 * it through the crashdump code.
169 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
173 * We aren't the primary crash CPU. We need to send it
174 * to a holding pattern to avoid it ending up in the panic
177 crash_kexec_secondary(regs);
184 * While our oops output is serialised by a spinlock, output
185 * from panic() called below can race and corrupt it. If we
186 * know we are going to panic, delay for 1 second so we have a
187 * chance to get clean backtraces from all CPUs that are oopsing.
189 if (in_interrupt() || panic_on_oops || !current->pid ||
190 is_global_init(current)) {
191 mdelay(MSEC_PER_SEC);
195 panic("Fatal exception in interrupt");
197 panic("Fatal exception");
200 NOKPROBE_SYMBOL(oops_end);
202 static int __die(const char *str, struct pt_regs *regs, long err)
204 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
205 #ifdef CONFIG_PREEMPT
209 printk("SMP NR_CPUS=%d ", NR_CPUS);
211 if (debug_pagealloc_enabled())
212 printk("DEBUG_PAGEALLOC ");
216 printk("%s\n", ppc_md.name ? ppc_md.name : "");
218 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
226 NOKPROBE_SYMBOL(__die);
228 void die(const char *str, struct pt_regs *regs, long err)
230 unsigned long flags = oops_begin(regs);
232 if (__die(str, regs, err))
234 oops_end(flags, regs, err);
237 void user_single_step_siginfo(struct task_struct *tsk,
238 struct pt_regs *regs, siginfo_t *info)
240 memset(info, 0, sizeof(*info));
241 info->si_signo = SIGTRAP;
242 info->si_code = TRAP_TRACE;
243 info->si_addr = (void __user *)regs->nip;
246 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
249 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
250 "at %08lx nip %08lx lr %08lx code %x\n";
251 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
252 "at %016lx nip %016lx lr %016lx code %x\n";
254 if (!user_mode(regs)) {
255 die("Exception in kernel mode", regs, signr);
259 if (show_unhandled_signals && unhandled_signal(current, signr)) {
260 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
261 current->comm, current->pid, signr,
262 addr, regs->nip, regs->link, code);
265 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
268 current->thread.trap_nr = code;
269 memset(&info, 0, sizeof(info));
270 info.si_signo = signr;
272 info.si_addr = (void __user *) addr;
273 force_sig_info(signr, &info, current);
277 void system_reset_exception(struct pt_regs *regs)
279 /* See if any machine dependent calls */
280 if (ppc_md.system_reset_exception) {
281 if (ppc_md.system_reset_exception(regs))
285 die("System Reset", regs, SIGABRT);
287 /* Must die if the interrupt is not recoverable */
288 if (!(regs->msr & MSR_RI))
289 panic("Unrecoverable System Reset");
291 /* What should we do here? We could issue a shutdown or hard reset. */
295 * This function is called in real mode. Strictly no printk's please.
297 * regs->nip and regs->msr contains srr0 and ssr1.
299 long machine_check_early(struct pt_regs *regs)
303 __this_cpu_inc(irq_stat.mce_exceptions);
305 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
307 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
308 handled = cur_cpu_spec->machine_check_early(regs);
312 long hmi_exception_realmode(struct pt_regs *regs)
314 __this_cpu_inc(irq_stat.hmi_exceptions);
316 wait_for_subcore_guest_exit();
318 if (ppc_md.hmi_exception_early)
319 ppc_md.hmi_exception_early(regs);
321 wait_for_tb_resync();
329 * I/O accesses can cause machine checks on powermacs.
330 * Check if the NIP corresponds to the address of a sync
331 * instruction for which there is an entry in the exception
333 * Note that the 601 only takes a machine check on TEA
334 * (transfer error ack) signal assertion, and does not
335 * set any of the top 16 bits of SRR1.
338 static inline int check_io_access(struct pt_regs *regs)
341 unsigned long msr = regs->msr;
342 const struct exception_table_entry *entry;
343 unsigned int *nip = (unsigned int *)regs->nip;
345 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
346 && (entry = search_exception_tables(regs->nip)) != NULL) {
348 * Check that it's a sync instruction, or somewhere
349 * in the twi; isync; nop sequence that inb/inw/inl uses.
350 * As the address is in the exception table
351 * we should be able to read the instr there.
352 * For the debug message, we look at the preceding
355 if (*nip == 0x60000000) /* nop */
357 else if (*nip == 0x4c00012c) /* isync */
359 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
364 rb = (*nip >> 11) & 0x1f;
365 printk(KERN_DEBUG "%s bad port %lx at %p\n",
366 (*nip & 0x100)? "OUT to": "IN from",
367 regs->gpr[rb] - _IO_BASE, nip);
369 regs->nip = entry->fixup;
373 #endif /* CONFIG_PPC32 */
377 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
378 /* On 4xx, the reason for the machine check or program exception
380 #define get_reason(regs) ((regs)->dsisr)
381 #ifndef CONFIG_FSL_BOOKE
382 #define get_mc_reason(regs) ((regs)->dsisr)
384 #define get_mc_reason(regs) (mfspr(SPRN_MCSR))
386 #define REASON_FP ESR_FP
387 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
388 #define REASON_PRIVILEGED ESR_PPR
389 #define REASON_TRAP ESR_PTR
391 /* single-step stuff */
392 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
393 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
396 /* On non-4xx, the reason for the machine check or program
397 exception is in the MSR. */
398 #define get_reason(regs) ((regs)->msr)
399 #define get_mc_reason(regs) ((regs)->msr)
400 #define REASON_TM 0x200000
401 #define REASON_FP 0x100000
402 #define REASON_ILLEGAL 0x80000
403 #define REASON_PRIVILEGED 0x40000
404 #define REASON_TRAP 0x20000
406 #define single_stepping(regs) ((regs)->msr & MSR_SE)
407 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
410 #if defined(CONFIG_4xx)
411 int machine_check_4xx(struct pt_regs *regs)
413 unsigned long reason = get_mc_reason(regs);
415 if (reason & ESR_IMCP) {
416 printk("Instruction");
417 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
420 printk(" machine check in kernel mode.\n");
425 int machine_check_440A(struct pt_regs *regs)
427 unsigned long reason = get_mc_reason(regs);
429 printk("Machine check in kernel mode.\n");
430 if (reason & ESR_IMCP){
431 printk("Instruction Synchronous Machine Check exception\n");
432 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
435 u32 mcsr = mfspr(SPRN_MCSR);
437 printk("Instruction Read PLB Error\n");
439 printk("Data Read PLB Error\n");
441 printk("Data Write PLB Error\n");
442 if (mcsr & MCSR_TLBP)
443 printk("TLB Parity Error\n");
444 if (mcsr & MCSR_ICP){
445 flush_instruction_cache();
446 printk("I-Cache Parity Error\n");
448 if (mcsr & MCSR_DCSP)
449 printk("D-Cache Search Parity Error\n");
450 if (mcsr & MCSR_DCFP)
451 printk("D-Cache Flush Parity Error\n");
452 if (mcsr & MCSR_IMPE)
453 printk("Machine Check exception is imprecise\n");
456 mtspr(SPRN_MCSR, mcsr);
461 int machine_check_47x(struct pt_regs *regs)
463 unsigned long reason = get_mc_reason(regs);
466 printk(KERN_ERR "Machine check in kernel mode.\n");
467 if (reason & ESR_IMCP) {
469 "Instruction Synchronous Machine Check exception\n");
470 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
473 mcsr = mfspr(SPRN_MCSR);
475 printk(KERN_ERR "Instruction Read PLB Error\n");
477 printk(KERN_ERR "Data Read PLB Error\n");
479 printk(KERN_ERR "Data Write PLB Error\n");
480 if (mcsr & MCSR_TLBP)
481 printk(KERN_ERR "TLB Parity Error\n");
482 if (mcsr & MCSR_ICP) {
483 flush_instruction_cache();
484 printk(KERN_ERR "I-Cache Parity Error\n");
486 if (mcsr & MCSR_DCSP)
487 printk(KERN_ERR "D-Cache Search Parity Error\n");
488 if (mcsr & PPC47x_MCSR_GPR)
489 printk(KERN_ERR "GPR Parity Error\n");
490 if (mcsr & PPC47x_MCSR_FPR)
491 printk(KERN_ERR "FPR Parity Error\n");
492 if (mcsr & PPC47x_MCSR_IPR)
493 printk(KERN_ERR "Machine Check exception is imprecise\n");
496 mtspr(SPRN_MCSR, mcsr);
500 #elif defined(CONFIG_E500)
501 int machine_check_e500mc(struct pt_regs *regs)
503 unsigned long mcsr = mfspr(SPRN_MCSR);
504 unsigned long reason = mcsr;
507 if (reason & MCSR_LD) {
508 recoverable = fsl_rio_mcheck_exception(regs);
509 if (recoverable == 1)
513 printk("Machine check in kernel mode.\n");
514 printk("Caused by (from MCSR=%lx): ", reason);
516 if (reason & MCSR_MCP)
517 printk("Machine Check Signal\n");
519 if (reason & MCSR_ICPERR) {
520 printk("Instruction Cache Parity Error\n");
523 * This is recoverable by invalidating the i-cache.
525 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
526 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
530 * This will generally be accompanied by an instruction
531 * fetch error report -- only treat MCSR_IF as fatal
532 * if it wasn't due to an L1 parity error.
537 if (reason & MCSR_DCPERR_MC) {
538 printk("Data Cache Parity Error\n");
541 * In write shadow mode we auto-recover from the error, but it
542 * may still get logged and cause a machine check. We should
543 * only treat the non-write shadow case as non-recoverable.
545 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
549 if (reason & MCSR_L2MMU_MHIT) {
550 printk("Hit on multiple TLB entries\n");
554 if (reason & MCSR_NMI)
555 printk("Non-maskable interrupt\n");
557 if (reason & MCSR_IF) {
558 printk("Instruction Fetch Error Report\n");
562 if (reason & MCSR_LD) {
563 printk("Load Error Report\n");
567 if (reason & MCSR_ST) {
568 printk("Store Error Report\n");
572 if (reason & MCSR_LDG) {
573 printk("Guarded Load Error Report\n");
577 if (reason & MCSR_TLBSYNC)
578 printk("Simultaneous tlbsync operations\n");
580 if (reason & MCSR_BSL2_ERR) {
581 printk("Level 2 Cache Error\n");
585 if (reason & MCSR_MAV) {
588 addr = mfspr(SPRN_MCAR);
589 addr |= (u64)mfspr(SPRN_MCARU) << 32;
591 printk("Machine Check %s Address: %#llx\n",
592 reason & MCSR_MEA ? "Effective" : "Physical", addr);
596 mtspr(SPRN_MCSR, mcsr);
597 return mfspr(SPRN_MCSR) == 0 && recoverable;
600 int machine_check_e500(struct pt_regs *regs)
602 unsigned long reason = get_mc_reason(regs);
604 if (reason & MCSR_BUS_RBERR) {
605 if (fsl_rio_mcheck_exception(regs))
607 if (fsl_pci_mcheck_exception(regs))
611 printk("Machine check in kernel mode.\n");
612 printk("Caused by (from MCSR=%lx): ", reason);
614 if (reason & MCSR_MCP)
615 printk("Machine Check Signal\n");
616 if (reason & MCSR_ICPERR)
617 printk("Instruction Cache Parity Error\n");
618 if (reason & MCSR_DCP_PERR)
619 printk("Data Cache Push Parity Error\n");
620 if (reason & MCSR_DCPERR)
621 printk("Data Cache Parity Error\n");
622 if (reason & MCSR_BUS_IAERR)
623 printk("Bus - Instruction Address Error\n");
624 if (reason & MCSR_BUS_RAERR)
625 printk("Bus - Read Address Error\n");
626 if (reason & MCSR_BUS_WAERR)
627 printk("Bus - Write Address Error\n");
628 if (reason & MCSR_BUS_IBERR)
629 printk("Bus - Instruction Data Error\n");
630 if (reason & MCSR_BUS_RBERR)
631 printk("Bus - Read Data Bus Error\n");
632 if (reason & MCSR_BUS_WBERR)
633 printk("Bus - Write Data Bus Error\n");
634 if (reason & MCSR_BUS_IPERR)
635 printk("Bus - Instruction Parity Error\n");
636 if (reason & MCSR_BUS_RPERR)
637 printk("Bus - Read Parity Error\n");
642 int machine_check_generic(struct pt_regs *regs)
646 #elif defined(CONFIG_E200)
647 int machine_check_e200(struct pt_regs *regs)
649 unsigned long reason = get_mc_reason(regs);
651 printk("Machine check in kernel mode.\n");
652 printk("Caused by (from MCSR=%lx): ", reason);
654 if (reason & MCSR_MCP)
655 printk("Machine Check Signal\n");
656 if (reason & MCSR_CP_PERR)
657 printk("Cache Push Parity Error\n");
658 if (reason & MCSR_CPERR)
659 printk("Cache Parity Error\n");
660 if (reason & MCSR_EXCP_ERR)
661 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
662 if (reason & MCSR_BUS_IRERR)
663 printk("Bus - Read Bus Error on instruction fetch\n");
664 if (reason & MCSR_BUS_DRERR)
665 printk("Bus - Read Bus Error on data load\n");
666 if (reason & MCSR_BUS_WRERR)
667 printk("Bus - Write Bus Error on buffered store or cache line push\n");
672 int machine_check_generic(struct pt_regs *regs)
674 unsigned long reason = get_mc_reason(regs);
676 printk("Machine check in kernel mode.\n");
677 printk("Caused by (from SRR1=%lx): ", reason);
678 switch (reason & 0x601F0000) {
680 printk("Machine check signal\n");
682 case 0: /* for 601 */
684 case 0x140000: /* 7450 MSS error and TEA */
685 printk("Transfer error ack signal\n");
688 printk("Data parity error signal\n");
691 printk("Address parity error signal\n");
694 printk("L1 Data Cache error\n");
697 printk("L1 Instruction Cache error\n");
700 printk("L2 data cache parity error\n");
703 printk("Unknown values in msr\n");
707 #endif /* everything else */
709 void machine_check_exception(struct pt_regs *regs)
711 enum ctx_state prev_state = exception_enter();
714 __this_cpu_inc(irq_stat.mce_exceptions);
716 /* See if any machine dependent calls. In theory, we would want
717 * to call the CPU first, and call the ppc_md. one if the CPU
718 * one returns a positive number. However there is existing code
719 * that assumes the board gets a first chance, so let's keep it
720 * that way for now and fix things later. --BenH.
722 if (ppc_md.machine_check_exception)
723 recover = ppc_md.machine_check_exception(regs);
724 else if (cur_cpu_spec->machine_check)
725 recover = cur_cpu_spec->machine_check(regs);
730 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
731 /* the qspan pci read routines can cause machine checks -- Cort
733 * yuck !!! that totally needs to go away ! There are better ways
734 * to deal with that than having a wart in the mcheck handler.
737 bad_page_fault(regs, regs->dar, SIGBUS);
741 if (debugger_fault_handler(regs))
744 if (check_io_access(regs))
747 die("Machine check", regs, SIGBUS);
749 /* Must die if the interrupt is not recoverable */
750 if (!(regs->msr & MSR_RI))
751 panic("Unrecoverable Machine check");
754 exception_exit(prev_state);
757 void SMIException(struct pt_regs *regs)
759 die("System Management Interrupt", regs, SIGABRT);
762 void handle_hmi_exception(struct pt_regs *regs)
764 struct pt_regs *old_regs;
766 old_regs = set_irq_regs(regs);
769 if (ppc_md.handle_hmi_exception)
770 ppc_md.handle_hmi_exception(regs);
773 set_irq_regs(old_regs);
776 void unknown_exception(struct pt_regs *regs)
778 enum ctx_state prev_state = exception_enter();
780 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
781 regs->nip, regs->msr, regs->trap);
783 _exception(SIGTRAP, regs, 0, 0);
785 exception_exit(prev_state);
788 void instruction_breakpoint_exception(struct pt_regs *regs)
790 enum ctx_state prev_state = exception_enter();
792 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
793 5, SIGTRAP) == NOTIFY_STOP)
795 if (debugger_iabr_match(regs))
797 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
800 exception_exit(prev_state);
803 void RunModeException(struct pt_regs *regs)
805 _exception(SIGTRAP, regs, 0, 0);
808 void single_step_exception(struct pt_regs *regs)
810 enum ctx_state prev_state = exception_enter();
812 clear_single_step(regs);
814 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
815 5, SIGTRAP) == NOTIFY_STOP)
817 if (debugger_sstep(regs))
820 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
823 exception_exit(prev_state);
825 NOKPROBE_SYMBOL(single_step_exception);
828 * After we have successfully emulated an instruction, we have to
829 * check if the instruction was being single-stepped, and if so,
830 * pretend we got a single-step exception. This was pointed out
831 * by Kumar Gala. -- paulus
833 static void emulate_single_step(struct pt_regs *regs)
835 if (single_stepping(regs))
836 single_step_exception(regs);
839 static inline int __parse_fpscr(unsigned long fpscr)
843 /* Invalid operation */
844 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
848 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
852 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
856 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
860 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
866 static void parse_fpe(struct pt_regs *regs)
870 flush_fp_to_thread(current);
872 code = __parse_fpscr(current->thread.fp_state.fpscr);
874 _exception(SIGFPE, regs, code, regs->nip);
878 * Illegal instruction emulation support. Originally written to
879 * provide the PVR to user applications using the mfspr rd, PVR.
880 * Return non-zero if we can't emulate, or -EFAULT if the associated
881 * memory access caused an access fault. Return zero on success.
883 * There are a couple of ways to do this, either "decode" the instruction
884 * or directly match lots of bits. In this case, matching lots of
885 * bits is faster and easier.
888 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
890 u8 rT = (instword >> 21) & 0x1f;
891 u8 rA = (instword >> 16) & 0x1f;
892 u8 NB_RB = (instword >> 11) & 0x1f;
897 /* Early out if we are an invalid form of lswx */
898 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
899 if ((rT == rA) || (rT == NB_RB))
902 EA = (rA == 0) ? 0 : regs->gpr[rA];
904 switch (instword & PPC_INST_STRING_MASK) {
908 num_bytes = regs->xer & 0x7f;
912 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
918 while (num_bytes != 0)
921 u32 shift = 8 * (3 - (pos & 0x3));
923 /* if process is 32-bit, clear upper 32 bits of EA */
924 if ((regs->msr & MSR_64BIT) == 0)
927 switch ((instword & PPC_INST_STRING_MASK)) {
930 if (get_user(val, (u8 __user *)EA))
932 /* first time updating this reg,
936 regs->gpr[rT] |= val << shift;
940 val = regs->gpr[rT] >> shift;
941 if (put_user(val, (u8 __user *)EA))
945 /* move EA to next address */
949 /* manage our position within the register */
960 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
965 ra = (instword >> 16) & 0x1f;
966 rs = (instword >> 21) & 0x1f;
969 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
970 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
971 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
977 static int emulate_isel(struct pt_regs *regs, u32 instword)
979 u8 rT = (instword >> 21) & 0x1f;
980 u8 rA = (instword >> 16) & 0x1f;
981 u8 rB = (instword >> 11) & 0x1f;
982 u8 BC = (instword >> 6) & 0x1f;
986 tmp = (rA == 0) ? 0 : regs->gpr[rA];
987 bit = (regs->ccr >> (31 - BC)) & 0x1;
989 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
994 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
995 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
997 /* If we're emulating a load/store in an active transaction, we cannot
998 * emulate it as the kernel operates in transaction suspended context.
999 * We need to abort the transaction. This creates a persistent TM
1000 * abort so tell the user what caused it with a new code.
1002 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1010 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1016 static int emulate_instruction(struct pt_regs *regs)
1021 if (!user_mode(regs))
1023 CHECK_FULL_REGS(regs);
1025 if (get_user(instword, (u32 __user *)(regs->nip)))
1028 /* Emulate the mfspr rD, PVR. */
1029 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1030 PPC_WARN_EMULATED(mfpvr, regs);
1031 rd = (instword >> 21) & 0x1f;
1032 regs->gpr[rd] = mfspr(SPRN_PVR);
1036 /* Emulating the dcba insn is just a no-op. */
1037 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1038 PPC_WARN_EMULATED(dcba, regs);
1042 /* Emulate the mcrxr insn. */
1043 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1044 int shift = (instword >> 21) & 0x1c;
1045 unsigned long msk = 0xf0000000UL >> shift;
1047 PPC_WARN_EMULATED(mcrxr, regs);
1048 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1049 regs->xer &= ~0xf0000000UL;
1053 /* Emulate load/store string insn. */
1054 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1055 if (tm_abort_check(regs,
1056 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1058 PPC_WARN_EMULATED(string, regs);
1059 return emulate_string_inst(regs, instword);
1062 /* Emulate the popcntb (Population Count Bytes) instruction. */
1063 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1064 PPC_WARN_EMULATED(popcntb, regs);
1065 return emulate_popcntb_inst(regs, instword);
1068 /* Emulate isel (Integer Select) instruction */
1069 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1070 PPC_WARN_EMULATED(isel, regs);
1071 return emulate_isel(regs, instword);
1074 /* Emulate sync instruction variants */
1075 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1076 PPC_WARN_EMULATED(sync, regs);
1077 asm volatile("sync");
1082 /* Emulate the mfspr rD, DSCR. */
1083 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1084 PPC_INST_MFSPR_DSCR_USER) ||
1085 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1086 PPC_INST_MFSPR_DSCR)) &&
1087 cpu_has_feature(CPU_FTR_DSCR)) {
1088 PPC_WARN_EMULATED(mfdscr, regs);
1089 rd = (instword >> 21) & 0x1f;
1090 regs->gpr[rd] = mfspr(SPRN_DSCR);
1093 /* Emulate the mtspr DSCR, rD. */
1094 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1095 PPC_INST_MTSPR_DSCR_USER) ||
1096 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1097 PPC_INST_MTSPR_DSCR)) &&
1098 cpu_has_feature(CPU_FTR_DSCR)) {
1099 PPC_WARN_EMULATED(mtdscr, regs);
1100 rd = (instword >> 21) & 0x1f;
1101 current->thread.dscr = regs->gpr[rd];
1102 current->thread.dscr_inherit = 1;
1103 mtspr(SPRN_DSCR, current->thread.dscr);
1111 int is_valid_bugaddr(unsigned long addr)
1113 return is_kernel_addr(addr);
1116 #ifdef CONFIG_MATH_EMULATION
1117 static int emulate_math(struct pt_regs *regs)
1120 extern int do_mathemu(struct pt_regs *regs);
1122 ret = do_mathemu(regs);
1124 PPC_WARN_EMULATED(math, regs);
1128 emulate_single_step(regs);
1132 code = __parse_fpscr(current->thread.fp_state.fpscr);
1133 _exception(SIGFPE, regs, code, regs->nip);
1137 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1144 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1147 void program_check_exception(struct pt_regs *regs)
1149 enum ctx_state prev_state = exception_enter();
1150 unsigned int reason = get_reason(regs);
1152 /* We can now get here via a FP Unavailable exception if the core
1153 * has no FPU, in that case the reason flags will be 0 */
1155 if (reason & REASON_FP) {
1156 /* IEEE FP exception */
1160 if (reason & REASON_TRAP) {
1161 unsigned long bugaddr;
1162 /* Debugger is first in line to stop recursive faults in
1163 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1164 if (debugger_bpt(regs))
1167 /* trap exception */
1168 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1172 bugaddr = regs->nip;
1174 * Fixup bugaddr for BUG_ON() in real mode
1176 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1177 bugaddr += PAGE_OFFSET;
1179 if (!(regs->msr & MSR_PR) && /* not user-mode */
1180 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1184 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1187 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1188 if (reason & REASON_TM) {
1189 /* This is a TM "Bad Thing Exception" program check.
1191 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1192 * transition in TM states.
1193 * - A trechkpt is attempted when transactional.
1194 * - A treclaim is attempted when non transactional.
1195 * - A tend is illegally attempted.
1196 * - writing a TM SPR when transactional.
1198 if (!user_mode(regs) &&
1199 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1203 /* If usermode caused this, it's done something illegal and
1204 * gets a SIGILL slap on the wrist. We call it an illegal
1205 * operand to distinguish from the instruction just being bad
1206 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1207 * illegal /placement/ of a valid instruction.
1209 if (user_mode(regs)) {
1210 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1213 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1214 "at %lx (msr 0x%x)\n", regs->nip, reason);
1215 die("Unrecoverable exception", regs, SIGABRT);
1221 * If we took the program check in the kernel skip down to sending a
1222 * SIGILL. The subsequent cases all relate to emulating instructions
1223 * which we should only do for userspace. We also do not want to enable
1224 * interrupts for kernel faults because that might lead to further
1225 * faults, and loose the context of the original exception.
1227 if (!user_mode(regs))
1230 /* We restore the interrupt state now */
1231 if (!arch_irq_disabled_regs(regs))
1234 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1235 * but there seems to be a hardware bug on the 405GP (RevD)
1236 * that means ESR is sometimes set incorrectly - either to
1237 * ESR_DST (!?) or 0. In the process of chasing this with the
1238 * hardware people - not sure if it can happen on any illegal
1239 * instruction or only on FP instructions, whether there is a
1240 * pattern to occurrences etc. -dgibson 31/Mar/2003
1242 if (!emulate_math(regs))
1245 /* Try to emulate it if we should. */
1246 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1247 switch (emulate_instruction(regs)) {
1250 emulate_single_step(regs);
1253 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1259 if (reason & REASON_PRIVILEGED)
1260 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1262 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1265 exception_exit(prev_state);
1267 NOKPROBE_SYMBOL(program_check_exception);
1270 * This occurs when running in hypervisor mode on POWER6 or later
1271 * and an illegal instruction is encountered.
1273 void emulation_assist_interrupt(struct pt_regs *regs)
1275 regs->msr |= REASON_ILLEGAL;
1276 program_check_exception(regs);
1278 NOKPROBE_SYMBOL(emulation_assist_interrupt);
1280 void alignment_exception(struct pt_regs *regs)
1282 enum ctx_state prev_state = exception_enter();
1283 int sig, code, fixed = 0;
1285 /* We restore the interrupt state now */
1286 if (!arch_irq_disabled_regs(regs))
1289 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1292 /* we don't implement logging of alignment exceptions */
1293 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1294 fixed = fix_alignment(regs);
1297 regs->nip += 4; /* skip over emulated instruction */
1298 emulate_single_step(regs);
1302 /* Operand address was bad */
1303 if (fixed == -EFAULT) {
1310 if (user_mode(regs))
1311 _exception(sig, regs, code, regs->dar);
1313 bad_page_fault(regs, regs->dar, sig);
1316 exception_exit(prev_state);
1319 void slb_miss_bad_addr(struct pt_regs *regs)
1321 enum ctx_state prev_state = exception_enter();
1323 if (user_mode(regs))
1324 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
1326 bad_page_fault(regs, regs->dar, SIGSEGV);
1328 exception_exit(prev_state);
1331 void StackOverflow(struct pt_regs *regs)
1333 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1334 current, regs->gpr[1]);
1337 panic("kernel stack overflow");
1340 void nonrecoverable_exception(struct pt_regs *regs)
1342 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1343 regs->nip, regs->msr);
1345 die("nonrecoverable exception", regs, SIGKILL);
1348 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1350 enum ctx_state prev_state = exception_enter();
1352 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1353 "%lx at %lx\n", regs->trap, regs->nip);
1354 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1356 exception_exit(prev_state);
1359 void altivec_unavailable_exception(struct pt_regs *regs)
1361 enum ctx_state prev_state = exception_enter();
1363 if (user_mode(regs)) {
1364 /* A user program has executed an altivec instruction,
1365 but this kernel doesn't support altivec. */
1366 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1370 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1371 "%lx at %lx\n", regs->trap, regs->nip);
1372 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1375 exception_exit(prev_state);
1378 void vsx_unavailable_exception(struct pt_regs *regs)
1380 if (user_mode(regs)) {
1381 /* A user program has executed an vsx instruction,
1382 but this kernel doesn't support vsx. */
1383 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1387 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1388 "%lx at %lx\n", regs->trap, regs->nip);
1389 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1393 static void tm_unavailable(struct pt_regs *regs)
1395 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1396 if (user_mode(regs)) {
1397 current->thread.load_tm++;
1398 regs->msr |= MSR_TM;
1400 tm_restore_sprs(¤t->thread);
1404 pr_emerg("Unrecoverable TM Unavailable Exception "
1405 "%lx at %lx\n", regs->trap, regs->nip);
1406 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1409 void facility_unavailable_exception(struct pt_regs *regs)
1411 static char *facility_strings[] = {
1412 [FSCR_FP_LG] = "FPU",
1413 [FSCR_VECVSX_LG] = "VMX/VSX",
1414 [FSCR_DSCR_LG] = "DSCR",
1415 [FSCR_PM_LG] = "PMU SPRs",
1416 [FSCR_BHRB_LG] = "BHRB",
1417 [FSCR_TM_LG] = "TM",
1418 [FSCR_EBB_LG] = "EBB",
1419 [FSCR_TAR_LG] = "TAR",
1420 [FSCR_LM_LG] = "LM",
1422 char *facility = "unknown";
1428 hv = (regs->trap == 0xf80);
1430 value = mfspr(SPRN_HFSCR);
1432 value = mfspr(SPRN_FSCR);
1434 status = value >> 56;
1435 if (status == FSCR_DSCR_LG) {
1437 * User is accessing the DSCR register using the problem
1438 * state only SPR number (0x03) either through a mfspr or
1439 * a mtspr instruction. If it is a write attempt through
1440 * a mtspr, then we set the inherit bit. This also allows
1441 * the user to write or read the register directly in the
1442 * future by setting via the FSCR DSCR bit. But in case it
1443 * is a read DSCR attempt through a mfspr instruction, we
1444 * just emulate the instruction instead. This code path will
1445 * always emulate all the mfspr instructions till the user
1446 * has attempted at least one mtspr instruction. This way it
1447 * preserves the same behaviour when the user is accessing
1448 * the DSCR through privilege level only SPR number (0x11)
1449 * which is emulated through illegal instruction exception.
1450 * We always leave HFSCR DSCR set.
1452 if (get_user(instword, (u32 __user *)(regs->nip))) {
1453 pr_err("Failed to fetch the user instruction\n");
1457 /* Write into DSCR (mtspr 0x03, RS) */
1458 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1459 == PPC_INST_MTSPR_DSCR_USER) {
1460 rd = (instword >> 21) & 0x1f;
1461 current->thread.dscr = regs->gpr[rd];
1462 current->thread.dscr_inherit = 1;
1463 current->thread.fscr |= FSCR_DSCR;
1464 mtspr(SPRN_FSCR, current->thread.fscr);
1467 /* Read from DSCR (mfspr RT, 0x03) */
1468 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1469 == PPC_INST_MFSPR_DSCR_USER) {
1470 if (emulate_instruction(regs)) {
1471 pr_err("DSCR based mfspr emulation failed\n");
1475 emulate_single_step(regs);
1478 } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
1480 * This process has touched LM, so turn it on forever
1483 current->thread.fscr |= FSCR_LM;
1484 mtspr(SPRN_FSCR, current->thread.fscr);
1488 if (status == FSCR_TM_LG) {
1490 * If we're here then the hardware is TM aware because it
1491 * generated an exception with FSRM_TM set.
1493 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1494 * told us not to do TM, or the kernel is not built with TM
1497 * If both of those things are true, then userspace can spam the
1498 * console by triggering the printk() below just by continually
1499 * doing tbegin (or any TM instruction). So in that case just
1500 * send the process a SIGILL immediately.
1502 if (!cpu_has_feature(CPU_FTR_TM))
1505 tm_unavailable(regs);
1509 if ((status < ARRAY_SIZE(facility_strings)) &&
1510 facility_strings[status])
1511 facility = facility_strings[status];
1513 /* We restore the interrupt state now */
1514 if (!arch_irq_disabled_regs(regs))
1518 "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1519 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1522 if (user_mode(regs)) {
1523 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1527 die("Unexpected facility unavailable exception", regs, SIGABRT);
1531 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1533 void fp_unavailable_tm(struct pt_regs *regs)
1535 /* Note: This does not handle any kind of FP laziness. */
1537 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1538 regs->nip, regs->msr);
1540 /* We can only have got here if the task started using FP after
1541 * beginning the transaction. So, the transactional regs are just a
1542 * copy of the checkpointed ones. But, we still need to recheckpoint
1543 * as we're enabling FP for the process; it will return, abort the
1544 * transaction, and probably retry but now with FP enabled. So the
1545 * checkpointed FP registers need to be loaded.
1547 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1548 /* Reclaim didn't save out any FPRs to transact_fprs. */
1550 /* Enable FP for the task: */
1551 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1553 /* This loads and recheckpoints the FP registers from
1554 * thread.fpr[]. They will remain in registers after the
1555 * checkpoint so we don't need to reload them after.
1556 * If VMX is in use, the VRs now hold checkpointed values,
1557 * so we don't want to load the VRs from the thread_struct.
1559 tm_recheckpoint(¤t->thread, MSR_FP);
1561 /* If VMX is in use, get the transactional values back */
1562 if (regs->msr & MSR_VEC) {
1563 msr_check_and_set(MSR_VEC);
1564 load_vr_state(¤t->thread.vr_state);
1565 /* At this point all the VSX state is loaded, so enable it */
1566 regs->msr |= MSR_VSX;
1570 void altivec_unavailable_tm(struct pt_regs *regs)
1572 /* See the comments in fp_unavailable_tm(). This function operates
1576 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1578 regs->nip, regs->msr);
1579 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1580 regs->msr |= MSR_VEC;
1581 tm_recheckpoint(¤t->thread, MSR_VEC);
1582 current->thread.used_vr = 1;
1584 if (regs->msr & MSR_FP) {
1585 msr_check_and_set(MSR_FP);
1586 load_fp_state(¤t->thread.fp_state);
1587 regs->msr |= MSR_VSX;
1591 void vsx_unavailable_tm(struct pt_regs *regs)
1593 unsigned long orig_msr = regs->msr;
1595 /* See the comments in fp_unavailable_tm(). This works similarly,
1596 * though we're loading both FP and VEC registers in here.
1598 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1599 * regs. Either way, set MSR_VSX.
1602 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1604 regs->nip, regs->msr);
1606 current->thread.used_vsr = 1;
1608 /* If FP and VMX are already loaded, we have all the state we need */
1609 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1610 regs->msr |= MSR_VSX;
1614 /* This reclaims FP and/or VR regs if they're already enabled */
1615 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1617 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1620 /* This loads & recheckpoints FP and VRs; but we have
1621 * to be sure not to overwrite previously-valid state.
1623 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr);
1625 msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC));
1627 if (orig_msr & MSR_FP)
1628 load_fp_state(¤t->thread.fp_state);
1629 if (orig_msr & MSR_VEC)
1630 load_vr_state(¤t->thread.vr_state);
1632 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1634 void performance_monitor_exception(struct pt_regs *regs)
1636 __this_cpu_inc(irq_stat.pmu_irqs);
1642 void SoftwareEmulation(struct pt_regs *regs)
1644 CHECK_FULL_REGS(regs);
1646 if (!user_mode(regs)) {
1648 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation",
1652 if (!emulate_math(regs))
1655 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1657 #endif /* CONFIG_8xx */
1659 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1660 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1664 * Determine the cause of the debug event, clear the
1665 * event flags and send a trap to the handler. Torez
1667 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1668 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1669 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1670 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1672 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1675 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1676 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1677 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1680 } else if (debug_status & DBSR_IAC1) {
1681 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1682 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1683 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1686 } else if (debug_status & DBSR_IAC2) {
1687 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1688 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1691 } else if (debug_status & DBSR_IAC3) {
1692 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1693 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1694 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1697 } else if (debug_status & DBSR_IAC4) {
1698 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1699 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1704 * At the point this routine was called, the MSR(DE) was turned off.
1705 * Check all other debug flags and see if that bit needs to be turned
1708 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1709 current->thread.debug.dbcr1))
1710 regs->msr |= MSR_DE;
1712 /* Make sure the IDM flag is off */
1713 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1716 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1719 void DebugException(struct pt_regs *regs, unsigned long debug_status)
1721 current->thread.debug.dbsr = debug_status;
1723 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1724 * on server, it stops on the target of the branch. In order to simulate
1725 * the server behaviour, we thus restart right away with a single step
1726 * instead of stopping here when hitting a BT
1728 if (debug_status & DBSR_BT) {
1729 regs->msr &= ~MSR_DE;
1732 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1733 /* Clear the BT event */
1734 mtspr(SPRN_DBSR, DBSR_BT);
1736 /* Do the single step trick only when coming from userspace */
1737 if (user_mode(regs)) {
1738 current->thread.debug.dbcr0 &= ~DBCR0_BT;
1739 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1740 regs->msr |= MSR_DE;
1744 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1745 5, SIGTRAP) == NOTIFY_STOP) {
1748 if (debugger_sstep(regs))
1750 } else if (debug_status & DBSR_IC) { /* Instruction complete */
1751 regs->msr &= ~MSR_DE;
1753 /* Disable instruction completion */
1754 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1755 /* Clear the instruction completion event */
1756 mtspr(SPRN_DBSR, DBSR_IC);
1758 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1759 5, SIGTRAP) == NOTIFY_STOP) {
1763 if (debugger_sstep(regs))
1766 if (user_mode(regs)) {
1767 current->thread.debug.dbcr0 &= ~DBCR0_IC;
1768 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1769 current->thread.debug.dbcr1))
1770 regs->msr |= MSR_DE;
1772 /* Make sure the IDM bit is off */
1773 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1776 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1778 handle_debug(regs, debug_status);
1780 NOKPROBE_SYMBOL(DebugException);
1781 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1783 #if !defined(CONFIG_TAU_INT)
1784 void TAUException(struct pt_regs *regs)
1786 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1787 regs->nip, regs->msr, regs->trap, print_tainted());
1789 #endif /* CONFIG_INT_TAU */
1791 #ifdef CONFIG_ALTIVEC
1792 void altivec_assist_exception(struct pt_regs *regs)
1796 if (!user_mode(regs)) {
1797 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1798 " at %lx\n", regs->nip);
1799 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1802 flush_altivec_to_thread(current);
1804 PPC_WARN_EMULATED(altivec, regs);
1805 err = emulate_altivec(regs);
1807 regs->nip += 4; /* skip emulated instruction */
1808 emulate_single_step(regs);
1812 if (err == -EFAULT) {
1813 /* got an error reading the instruction */
1814 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1816 /* didn't recognize the instruction */
1817 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1818 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1819 "in %s at %lx\n", current->comm, regs->nip);
1820 current->thread.vr_state.vscr.u[3] |= 0x10000;
1823 #endif /* CONFIG_ALTIVEC */
1825 #ifdef CONFIG_FSL_BOOKE
1826 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1827 unsigned long error_code)
1829 /* We treat cache locking instructions from the user
1830 * as priv ops, in the future we could try to do
1833 if (error_code & (ESR_DLK|ESR_ILK))
1834 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1837 #endif /* CONFIG_FSL_BOOKE */
1840 void SPEFloatingPointException(struct pt_regs *regs)
1842 extern int do_spe_mathemu(struct pt_regs *regs);
1843 unsigned long spefscr;
1848 flush_spe_to_thread(current);
1850 spefscr = current->thread.spefscr;
1851 fpexc_mode = current->thread.fpexc_mode;
1853 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1856 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1859 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1861 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1864 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1867 err = do_spe_mathemu(regs);
1869 regs->nip += 4; /* skip emulated instruction */
1870 emulate_single_step(regs);
1874 if (err == -EFAULT) {
1875 /* got an error reading the instruction */
1876 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1877 } else if (err == -EINVAL) {
1878 /* didn't recognize the instruction */
1879 printk(KERN_ERR "unrecognized spe instruction "
1880 "in %s at %lx\n", current->comm, regs->nip);
1882 _exception(SIGFPE, regs, code, regs->nip);
1888 void SPEFloatingPointRoundException(struct pt_regs *regs)
1890 extern int speround_handler(struct pt_regs *regs);
1894 if (regs->msr & MSR_SPE)
1895 giveup_spe(current);
1899 err = speround_handler(regs);
1901 regs->nip += 4; /* skip emulated instruction */
1902 emulate_single_step(regs);
1906 if (err == -EFAULT) {
1907 /* got an error reading the instruction */
1908 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1909 } else if (err == -EINVAL) {
1910 /* didn't recognize the instruction */
1911 printk(KERN_ERR "unrecognized spe instruction "
1912 "in %s at %lx\n", current->comm, regs->nip);
1914 _exception(SIGFPE, regs, 0, regs->nip);
1921 * We enter here if we get an unrecoverable exception, that is, one
1922 * that happened at a point where the RI (recoverable interrupt) bit
1923 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1924 * we therefore lost state by taking this exception.
1926 void unrecoverable_exception(struct pt_regs *regs)
1928 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1929 regs->trap, regs->nip);
1930 die("Unrecoverable exception", regs, SIGABRT);
1933 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1935 * Default handler for a Watchdog exception,
1936 * spins until a reboot occurs
1938 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1940 /* Generic WatchdogHandler, implement your own */
1941 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1945 void WatchdogException(struct pt_regs *regs)
1947 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1948 WatchdogHandler(regs);
1953 * We enter here if we discover during exception entry that we are
1954 * running in supervisor mode with a userspace value in the stack pointer.
1956 void kernel_bad_stack(struct pt_regs *regs)
1958 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1959 regs->gpr[1], regs->nip);
1960 die("Bad kernel stack pointer", regs, SIGABRT);
1963 void __init trap_init(void)
1968 #ifdef CONFIG_PPC_EMULATED_STATS
1970 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
1972 struct ppc_emulated ppc_emulated = {
1973 #ifdef CONFIG_ALTIVEC
1974 WARN_EMULATED_SETUP(altivec),
1976 WARN_EMULATED_SETUP(dcba),
1977 WARN_EMULATED_SETUP(dcbz),
1978 WARN_EMULATED_SETUP(fp_pair),
1979 WARN_EMULATED_SETUP(isel),
1980 WARN_EMULATED_SETUP(mcrxr),
1981 WARN_EMULATED_SETUP(mfpvr),
1982 WARN_EMULATED_SETUP(multiple),
1983 WARN_EMULATED_SETUP(popcntb),
1984 WARN_EMULATED_SETUP(spe),
1985 WARN_EMULATED_SETUP(string),
1986 WARN_EMULATED_SETUP(sync),
1987 WARN_EMULATED_SETUP(unaligned),
1988 #ifdef CONFIG_MATH_EMULATION
1989 WARN_EMULATED_SETUP(math),
1992 WARN_EMULATED_SETUP(vsx),
1995 WARN_EMULATED_SETUP(mfdscr),
1996 WARN_EMULATED_SETUP(mtdscr),
1997 WARN_EMULATED_SETUP(lq_stq),
2001 u32 ppc_warn_emulated;
2003 void ppc_warn_emulated_print(const char *type)
2005 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2009 static int __init ppc_warn_emulated_init(void)
2011 struct dentry *dir, *d;
2013 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2015 if (!powerpc_debugfs_root)
2018 dir = debugfs_create_dir("emulated_instructions",
2019 powerpc_debugfs_root);
2023 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
2024 &ppc_warn_emulated);
2028 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
2029 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
2030 (u32 *)&entries[i].val.counter);
2038 debugfs_remove_recursive(dir);
2042 device_initcall(ppc_warn_emulated_init);
2044 #endif /* CONFIG_PPC_EMULATED_STATS */