2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
15 * This file handles the architecture-dependent parts of hardware exceptions
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/init.h>
28 #include <linux/extable.h>
29 #include <linux/module.h> /* print_modules */
30 #include <linux/prctl.h>
31 #include <linux/delay.h>
32 #include <linux/kprobes.h>
33 #include <linux/kexec.h>
34 #include <linux/backlight.h>
35 #include <linux/bug.h>
36 #include <linux/kdebug.h>
37 #include <linux/debugfs.h>
38 #include <linux/ratelimit.h>
39 #include <linux/context_tracking.h>
41 #include <asm/emulated_ops.h>
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
45 #include <asm/machdep.h>
49 #ifdef CONFIG_PMAC_BACKLIGHT
50 #include <asm/backlight.h>
53 #include <asm/firmware.h>
54 #include <asm/processor.h>
57 #include <asm/kexec.h>
58 #include <asm/ppc-opcode.h>
60 #include <asm/fadump.h>
61 #include <asm/switch_to.h>
63 #include <asm/debug.h>
64 #include <asm/asm-prototypes.h>
66 #include <sysdev/fsl_pci.h>
68 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
69 int (*__debugger)(struct pt_regs *regs) __read_mostly;
70 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
71 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
72 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
73 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
74 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
75 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
77 EXPORT_SYMBOL(__debugger);
78 EXPORT_SYMBOL(__debugger_ipi);
79 EXPORT_SYMBOL(__debugger_bpt);
80 EXPORT_SYMBOL(__debugger_sstep);
81 EXPORT_SYMBOL(__debugger_iabr_match);
82 EXPORT_SYMBOL(__debugger_break_match);
83 EXPORT_SYMBOL(__debugger_fault_handler);
86 /* Transactional Memory trap debug */
88 #define TM_DEBUG(x...) printk(KERN_INFO x)
90 #define TM_DEBUG(x...) do { } while(0)
94 * Trap & Exception support
97 #ifdef CONFIG_PMAC_BACKLIGHT
98 static void pmac_backlight_unblank(void)
100 mutex_lock(&pmac_backlight_mutex);
101 if (pmac_backlight) {
102 struct backlight_properties *props;
104 props = &pmac_backlight->props;
105 props->brightness = props->max_brightness;
106 props->power = FB_BLANK_UNBLANK;
107 backlight_update_status(pmac_backlight);
109 mutex_unlock(&pmac_backlight_mutex);
112 static inline void pmac_backlight_unblank(void) { }
115 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
116 static int die_owner = -1;
117 static unsigned int die_nest_count;
118 static int die_counter;
120 static unsigned __kprobes long oops_begin(struct pt_regs *regs)
130 /* racy, but better than risking deadlock. */
131 raw_local_irq_save(flags);
132 cpu = smp_processor_id();
133 if (!arch_spin_trylock(&die_lock)) {
134 if (cpu == die_owner)
135 /* nested oops. should stop eventually */;
137 arch_spin_lock(&die_lock);
143 if (machine_is(powermac))
144 pmac_backlight_unblank();
148 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
153 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
158 /* Nest count reaches zero, release the lock. */
159 arch_spin_unlock(&die_lock);
160 raw_local_irq_restore(flags);
162 crash_fadump(regs, "die oops");
165 * A system reset (0x100) is a request to dump, so we always send
166 * it through the crashdump code.
168 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
172 * We aren't the primary crash CPU. We need to send it
173 * to a holding pattern to avoid it ending up in the panic
176 crash_kexec_secondary(regs);
183 * While our oops output is serialised by a spinlock, output
184 * from panic() called below can race and corrupt it. If we
185 * know we are going to panic, delay for 1 second so we have a
186 * chance to get clean backtraces from all CPUs that are oopsing.
188 if (in_interrupt() || panic_on_oops || !current->pid ||
189 is_global_init(current)) {
190 mdelay(MSEC_PER_SEC);
194 panic("Fatal exception in interrupt");
196 panic("Fatal exception");
200 static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
202 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
203 #ifdef CONFIG_PREEMPT
207 printk("SMP NR_CPUS=%d ", NR_CPUS);
209 if (debug_pagealloc_enabled())
210 printk("DEBUG_PAGEALLOC ");
214 printk("%s\n", ppc_md.name ? ppc_md.name : "");
216 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
225 void die(const char *str, struct pt_regs *regs, long err)
227 unsigned long flags = oops_begin(regs);
229 if (__die(str, regs, err))
231 oops_end(flags, regs, err);
234 void user_single_step_siginfo(struct task_struct *tsk,
235 struct pt_regs *regs, siginfo_t *info)
237 memset(info, 0, sizeof(*info));
238 info->si_signo = SIGTRAP;
239 info->si_code = TRAP_TRACE;
240 info->si_addr = (void __user *)regs->nip;
243 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
246 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
247 "at %08lx nip %08lx lr %08lx code %x\n";
248 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
249 "at %016lx nip %016lx lr %016lx code %x\n";
251 if (!user_mode(regs)) {
252 die("Exception in kernel mode", regs, signr);
256 if (show_unhandled_signals && unhandled_signal(current, signr)) {
257 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
258 current->comm, current->pid, signr,
259 addr, regs->nip, regs->link, code);
262 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
265 current->thread.trap_nr = code;
266 memset(&info, 0, sizeof(info));
267 info.si_signo = signr;
269 info.si_addr = (void __user *) addr;
270 force_sig_info(signr, &info, current);
274 void system_reset_exception(struct pt_regs *regs)
276 /* See if any machine dependent calls */
277 if (ppc_md.system_reset_exception) {
278 if (ppc_md.system_reset_exception(regs))
282 die("System Reset", regs, SIGABRT);
284 /* Must die if the interrupt is not recoverable */
285 if (!(regs->msr & MSR_RI))
286 panic("Unrecoverable System Reset");
288 /* What should we do here? We could issue a shutdown or hard reset. */
292 * This function is called in real mode. Strictly no printk's please.
294 * regs->nip and regs->msr contains srr0 and ssr1.
296 long machine_check_early(struct pt_regs *regs)
300 __this_cpu_inc(irq_stat.mce_exceptions);
302 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
304 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
305 handled = cur_cpu_spec->machine_check_early(regs);
309 long hmi_exception_realmode(struct pt_regs *regs)
311 __this_cpu_inc(irq_stat.hmi_exceptions);
313 wait_for_subcore_guest_exit();
315 if (ppc_md.hmi_exception_early)
316 ppc_md.hmi_exception_early(regs);
318 wait_for_tb_resync();
326 * I/O accesses can cause machine checks on powermacs.
327 * Check if the NIP corresponds to the address of a sync
328 * instruction for which there is an entry in the exception
330 * Note that the 601 only takes a machine check on TEA
331 * (transfer error ack) signal assertion, and does not
332 * set any of the top 16 bits of SRR1.
335 static inline int check_io_access(struct pt_regs *regs)
338 unsigned long msr = regs->msr;
339 const struct exception_table_entry *entry;
340 unsigned int *nip = (unsigned int *)regs->nip;
342 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
343 && (entry = search_exception_tables(regs->nip)) != NULL) {
345 * Check that it's a sync instruction, or somewhere
346 * in the twi; isync; nop sequence that inb/inw/inl uses.
347 * As the address is in the exception table
348 * we should be able to read the instr there.
349 * For the debug message, we look at the preceding
352 if (*nip == 0x60000000) /* nop */
354 else if (*nip == 0x4c00012c) /* isync */
356 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
361 rb = (*nip >> 11) & 0x1f;
362 printk(KERN_DEBUG "%s bad port %lx at %p\n",
363 (*nip & 0x100)? "OUT to": "IN from",
364 regs->gpr[rb] - _IO_BASE, nip);
366 regs->nip = entry->fixup;
370 #endif /* CONFIG_PPC32 */
374 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
375 /* On 4xx, the reason for the machine check or program exception
377 #define get_reason(regs) ((regs)->dsisr)
378 #ifndef CONFIG_FSL_BOOKE
379 #define get_mc_reason(regs) ((regs)->dsisr)
381 #define get_mc_reason(regs) (mfspr(SPRN_MCSR))
383 #define REASON_FP ESR_FP
384 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
385 #define REASON_PRIVILEGED ESR_PPR
386 #define REASON_TRAP ESR_PTR
388 /* single-step stuff */
389 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
390 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
393 /* On non-4xx, the reason for the machine check or program
394 exception is in the MSR. */
395 #define get_reason(regs) ((regs)->msr)
396 #define get_mc_reason(regs) ((regs)->msr)
397 #define REASON_TM 0x200000
398 #define REASON_FP 0x100000
399 #define REASON_ILLEGAL 0x80000
400 #define REASON_PRIVILEGED 0x40000
401 #define REASON_TRAP 0x20000
403 #define single_stepping(regs) ((regs)->msr & MSR_SE)
404 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
407 #if defined(CONFIG_4xx)
408 int machine_check_4xx(struct pt_regs *regs)
410 unsigned long reason = get_mc_reason(regs);
412 if (reason & ESR_IMCP) {
413 printk("Instruction");
414 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
417 printk(" machine check in kernel mode.\n");
422 int machine_check_440A(struct pt_regs *regs)
424 unsigned long reason = get_mc_reason(regs);
426 printk("Machine check in kernel mode.\n");
427 if (reason & ESR_IMCP){
428 printk("Instruction Synchronous Machine Check exception\n");
429 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
432 u32 mcsr = mfspr(SPRN_MCSR);
434 printk("Instruction Read PLB Error\n");
436 printk("Data Read PLB Error\n");
438 printk("Data Write PLB Error\n");
439 if (mcsr & MCSR_TLBP)
440 printk("TLB Parity Error\n");
441 if (mcsr & MCSR_ICP){
442 flush_instruction_cache();
443 printk("I-Cache Parity Error\n");
445 if (mcsr & MCSR_DCSP)
446 printk("D-Cache Search Parity Error\n");
447 if (mcsr & MCSR_DCFP)
448 printk("D-Cache Flush Parity Error\n");
449 if (mcsr & MCSR_IMPE)
450 printk("Machine Check exception is imprecise\n");
453 mtspr(SPRN_MCSR, mcsr);
458 int machine_check_47x(struct pt_regs *regs)
460 unsigned long reason = get_mc_reason(regs);
463 printk(KERN_ERR "Machine check in kernel mode.\n");
464 if (reason & ESR_IMCP) {
466 "Instruction Synchronous Machine Check exception\n");
467 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
470 mcsr = mfspr(SPRN_MCSR);
472 printk(KERN_ERR "Instruction Read PLB Error\n");
474 printk(KERN_ERR "Data Read PLB Error\n");
476 printk(KERN_ERR "Data Write PLB Error\n");
477 if (mcsr & MCSR_TLBP)
478 printk(KERN_ERR "TLB Parity Error\n");
479 if (mcsr & MCSR_ICP) {
480 flush_instruction_cache();
481 printk(KERN_ERR "I-Cache Parity Error\n");
483 if (mcsr & MCSR_DCSP)
484 printk(KERN_ERR "D-Cache Search Parity Error\n");
485 if (mcsr & PPC47x_MCSR_GPR)
486 printk(KERN_ERR "GPR Parity Error\n");
487 if (mcsr & PPC47x_MCSR_FPR)
488 printk(KERN_ERR "FPR Parity Error\n");
489 if (mcsr & PPC47x_MCSR_IPR)
490 printk(KERN_ERR "Machine Check exception is imprecise\n");
493 mtspr(SPRN_MCSR, mcsr);
497 #elif defined(CONFIG_E500)
498 int machine_check_e500mc(struct pt_regs *regs)
500 unsigned long mcsr = mfspr(SPRN_MCSR);
501 unsigned long reason = mcsr;
504 if (reason & MCSR_LD) {
505 recoverable = fsl_rio_mcheck_exception(regs);
506 if (recoverable == 1)
510 printk("Machine check in kernel mode.\n");
511 printk("Caused by (from MCSR=%lx): ", reason);
513 if (reason & MCSR_MCP)
514 printk("Machine Check Signal\n");
516 if (reason & MCSR_ICPERR) {
517 printk("Instruction Cache Parity Error\n");
520 * This is recoverable by invalidating the i-cache.
522 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
523 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
527 * This will generally be accompanied by an instruction
528 * fetch error report -- only treat MCSR_IF as fatal
529 * if it wasn't due to an L1 parity error.
534 if (reason & MCSR_DCPERR_MC) {
535 printk("Data Cache Parity Error\n");
538 * In write shadow mode we auto-recover from the error, but it
539 * may still get logged and cause a machine check. We should
540 * only treat the non-write shadow case as non-recoverable.
542 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
546 if (reason & MCSR_L2MMU_MHIT) {
547 printk("Hit on multiple TLB entries\n");
551 if (reason & MCSR_NMI)
552 printk("Non-maskable interrupt\n");
554 if (reason & MCSR_IF) {
555 printk("Instruction Fetch Error Report\n");
559 if (reason & MCSR_LD) {
560 printk("Load Error Report\n");
564 if (reason & MCSR_ST) {
565 printk("Store Error Report\n");
569 if (reason & MCSR_LDG) {
570 printk("Guarded Load Error Report\n");
574 if (reason & MCSR_TLBSYNC)
575 printk("Simultaneous tlbsync operations\n");
577 if (reason & MCSR_BSL2_ERR) {
578 printk("Level 2 Cache Error\n");
582 if (reason & MCSR_MAV) {
585 addr = mfspr(SPRN_MCAR);
586 addr |= (u64)mfspr(SPRN_MCARU) << 32;
588 printk("Machine Check %s Address: %#llx\n",
589 reason & MCSR_MEA ? "Effective" : "Physical", addr);
593 mtspr(SPRN_MCSR, mcsr);
594 return mfspr(SPRN_MCSR) == 0 && recoverable;
597 int machine_check_e500(struct pt_regs *regs)
599 unsigned long reason = get_mc_reason(regs);
601 if (reason & MCSR_BUS_RBERR) {
602 if (fsl_rio_mcheck_exception(regs))
604 if (fsl_pci_mcheck_exception(regs))
608 printk("Machine check in kernel mode.\n");
609 printk("Caused by (from MCSR=%lx): ", reason);
611 if (reason & MCSR_MCP)
612 printk("Machine Check Signal\n");
613 if (reason & MCSR_ICPERR)
614 printk("Instruction Cache Parity Error\n");
615 if (reason & MCSR_DCP_PERR)
616 printk("Data Cache Push Parity Error\n");
617 if (reason & MCSR_DCPERR)
618 printk("Data Cache Parity Error\n");
619 if (reason & MCSR_BUS_IAERR)
620 printk("Bus - Instruction Address Error\n");
621 if (reason & MCSR_BUS_RAERR)
622 printk("Bus - Read Address Error\n");
623 if (reason & MCSR_BUS_WAERR)
624 printk("Bus - Write Address Error\n");
625 if (reason & MCSR_BUS_IBERR)
626 printk("Bus - Instruction Data Error\n");
627 if (reason & MCSR_BUS_RBERR)
628 printk("Bus - Read Data Bus Error\n");
629 if (reason & MCSR_BUS_WBERR)
630 printk("Bus - Write Data Bus Error\n");
631 if (reason & MCSR_BUS_IPERR)
632 printk("Bus - Instruction Parity Error\n");
633 if (reason & MCSR_BUS_RPERR)
634 printk("Bus - Read Parity Error\n");
639 int machine_check_generic(struct pt_regs *regs)
643 #elif defined(CONFIG_E200)
644 int machine_check_e200(struct pt_regs *regs)
646 unsigned long reason = get_mc_reason(regs);
648 printk("Machine check in kernel mode.\n");
649 printk("Caused by (from MCSR=%lx): ", reason);
651 if (reason & MCSR_MCP)
652 printk("Machine Check Signal\n");
653 if (reason & MCSR_CP_PERR)
654 printk("Cache Push Parity Error\n");
655 if (reason & MCSR_CPERR)
656 printk("Cache Parity Error\n");
657 if (reason & MCSR_EXCP_ERR)
658 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
659 if (reason & MCSR_BUS_IRERR)
660 printk("Bus - Read Bus Error on instruction fetch\n");
661 if (reason & MCSR_BUS_DRERR)
662 printk("Bus - Read Bus Error on data load\n");
663 if (reason & MCSR_BUS_WRERR)
664 printk("Bus - Write Bus Error on buffered store or cache line push\n");
669 int machine_check_generic(struct pt_regs *regs)
671 unsigned long reason = get_mc_reason(regs);
673 printk("Machine check in kernel mode.\n");
674 printk("Caused by (from SRR1=%lx): ", reason);
675 switch (reason & 0x601F0000) {
677 printk("Machine check signal\n");
679 case 0: /* for 601 */
681 case 0x140000: /* 7450 MSS error and TEA */
682 printk("Transfer error ack signal\n");
685 printk("Data parity error signal\n");
688 printk("Address parity error signal\n");
691 printk("L1 Data Cache error\n");
694 printk("L1 Instruction Cache error\n");
697 printk("L2 data cache parity error\n");
700 printk("Unknown values in msr\n");
704 #endif /* everything else */
706 void machine_check_exception(struct pt_regs *regs)
708 enum ctx_state prev_state = exception_enter();
711 __this_cpu_inc(irq_stat.mce_exceptions);
713 /* See if any machine dependent calls. In theory, we would want
714 * to call the CPU first, and call the ppc_md. one if the CPU
715 * one returns a positive number. However there is existing code
716 * that assumes the board gets a first chance, so let's keep it
717 * that way for now and fix things later. --BenH.
719 if (ppc_md.machine_check_exception)
720 recover = ppc_md.machine_check_exception(regs);
721 else if (cur_cpu_spec->machine_check)
722 recover = cur_cpu_spec->machine_check(regs);
727 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
728 /* the qspan pci read routines can cause machine checks -- Cort
730 * yuck !!! that totally needs to go away ! There are better ways
731 * to deal with that than having a wart in the mcheck handler.
734 bad_page_fault(regs, regs->dar, SIGBUS);
738 if (debugger_fault_handler(regs))
741 if (check_io_access(regs))
744 die("Machine check", regs, SIGBUS);
746 /* Must die if the interrupt is not recoverable */
747 if (!(regs->msr & MSR_RI))
748 panic("Unrecoverable Machine check");
751 exception_exit(prev_state);
754 void SMIException(struct pt_regs *regs)
756 die("System Management Interrupt", regs, SIGABRT);
759 void handle_hmi_exception(struct pt_regs *regs)
761 struct pt_regs *old_regs;
763 old_regs = set_irq_regs(regs);
766 if (ppc_md.handle_hmi_exception)
767 ppc_md.handle_hmi_exception(regs);
770 set_irq_regs(old_regs);
773 void unknown_exception(struct pt_regs *regs)
775 enum ctx_state prev_state = exception_enter();
777 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
778 regs->nip, regs->msr, regs->trap);
780 _exception(SIGTRAP, regs, 0, 0);
782 exception_exit(prev_state);
785 void instruction_breakpoint_exception(struct pt_regs *regs)
787 enum ctx_state prev_state = exception_enter();
789 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
790 5, SIGTRAP) == NOTIFY_STOP)
792 if (debugger_iabr_match(regs))
794 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
797 exception_exit(prev_state);
800 void RunModeException(struct pt_regs *regs)
802 _exception(SIGTRAP, regs, 0, 0);
805 void __kprobes single_step_exception(struct pt_regs *regs)
807 enum ctx_state prev_state = exception_enter();
809 clear_single_step(regs);
811 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
812 5, SIGTRAP) == NOTIFY_STOP)
814 if (debugger_sstep(regs))
817 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
820 exception_exit(prev_state);
824 * After we have successfully emulated an instruction, we have to
825 * check if the instruction was being single-stepped, and if so,
826 * pretend we got a single-step exception. This was pointed out
827 * by Kumar Gala. -- paulus
829 static void emulate_single_step(struct pt_regs *regs)
831 if (single_stepping(regs))
832 single_step_exception(regs);
835 static inline int __parse_fpscr(unsigned long fpscr)
839 /* Invalid operation */
840 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
844 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
848 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
852 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
856 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
862 static void parse_fpe(struct pt_regs *regs)
866 flush_fp_to_thread(current);
868 code = __parse_fpscr(current->thread.fp_state.fpscr);
870 _exception(SIGFPE, regs, code, regs->nip);
874 * Illegal instruction emulation support. Originally written to
875 * provide the PVR to user applications using the mfspr rd, PVR.
876 * Return non-zero if we can't emulate, or -EFAULT if the associated
877 * memory access caused an access fault. Return zero on success.
879 * There are a couple of ways to do this, either "decode" the instruction
880 * or directly match lots of bits. In this case, matching lots of
881 * bits is faster and easier.
884 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
886 u8 rT = (instword >> 21) & 0x1f;
887 u8 rA = (instword >> 16) & 0x1f;
888 u8 NB_RB = (instword >> 11) & 0x1f;
893 /* Early out if we are an invalid form of lswx */
894 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
895 if ((rT == rA) || (rT == NB_RB))
898 EA = (rA == 0) ? 0 : regs->gpr[rA];
900 switch (instword & PPC_INST_STRING_MASK) {
904 num_bytes = regs->xer & 0x7f;
908 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
914 while (num_bytes != 0)
917 u32 shift = 8 * (3 - (pos & 0x3));
919 /* if process is 32-bit, clear upper 32 bits of EA */
920 if ((regs->msr & MSR_64BIT) == 0)
923 switch ((instword & PPC_INST_STRING_MASK)) {
926 if (get_user(val, (u8 __user *)EA))
928 /* first time updating this reg,
932 regs->gpr[rT] |= val << shift;
936 val = regs->gpr[rT] >> shift;
937 if (put_user(val, (u8 __user *)EA))
941 /* move EA to next address */
945 /* manage our position within the register */
956 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
961 ra = (instword >> 16) & 0x1f;
962 rs = (instword >> 21) & 0x1f;
965 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
966 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
967 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
973 static int emulate_isel(struct pt_regs *regs, u32 instword)
975 u8 rT = (instword >> 21) & 0x1f;
976 u8 rA = (instword >> 16) & 0x1f;
977 u8 rB = (instword >> 11) & 0x1f;
978 u8 BC = (instword >> 6) & 0x1f;
982 tmp = (rA == 0) ? 0 : regs->gpr[rA];
983 bit = (regs->ccr >> (31 - BC)) & 0x1;
985 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
990 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
991 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
993 /* If we're emulating a load/store in an active transaction, we cannot
994 * emulate it as the kernel operates in transaction suspended context.
995 * We need to abort the transaction. This creates a persistent TM
996 * abort so tell the user what caused it with a new code.
998 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1006 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1012 static int emulate_instruction(struct pt_regs *regs)
1017 if (!user_mode(regs))
1019 CHECK_FULL_REGS(regs);
1021 if (get_user(instword, (u32 __user *)(regs->nip)))
1024 /* Emulate the mfspr rD, PVR. */
1025 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1026 PPC_WARN_EMULATED(mfpvr, regs);
1027 rd = (instword >> 21) & 0x1f;
1028 regs->gpr[rd] = mfspr(SPRN_PVR);
1032 /* Emulating the dcba insn is just a no-op. */
1033 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1034 PPC_WARN_EMULATED(dcba, regs);
1038 /* Emulate the mcrxr insn. */
1039 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1040 int shift = (instword >> 21) & 0x1c;
1041 unsigned long msk = 0xf0000000UL >> shift;
1043 PPC_WARN_EMULATED(mcrxr, regs);
1044 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1045 regs->xer &= ~0xf0000000UL;
1049 /* Emulate load/store string insn. */
1050 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1051 if (tm_abort_check(regs,
1052 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1054 PPC_WARN_EMULATED(string, regs);
1055 return emulate_string_inst(regs, instword);
1058 /* Emulate the popcntb (Population Count Bytes) instruction. */
1059 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1060 PPC_WARN_EMULATED(popcntb, regs);
1061 return emulate_popcntb_inst(regs, instword);
1064 /* Emulate isel (Integer Select) instruction */
1065 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1066 PPC_WARN_EMULATED(isel, regs);
1067 return emulate_isel(regs, instword);
1070 /* Emulate sync instruction variants */
1071 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1072 PPC_WARN_EMULATED(sync, regs);
1073 asm volatile("sync");
1078 /* Emulate the mfspr rD, DSCR. */
1079 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1080 PPC_INST_MFSPR_DSCR_USER) ||
1081 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1082 PPC_INST_MFSPR_DSCR)) &&
1083 cpu_has_feature(CPU_FTR_DSCR)) {
1084 PPC_WARN_EMULATED(mfdscr, regs);
1085 rd = (instword >> 21) & 0x1f;
1086 regs->gpr[rd] = mfspr(SPRN_DSCR);
1089 /* Emulate the mtspr DSCR, rD. */
1090 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1091 PPC_INST_MTSPR_DSCR_USER) ||
1092 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1093 PPC_INST_MTSPR_DSCR)) &&
1094 cpu_has_feature(CPU_FTR_DSCR)) {
1095 PPC_WARN_EMULATED(mtdscr, regs);
1096 rd = (instword >> 21) & 0x1f;
1097 current->thread.dscr = regs->gpr[rd];
1098 current->thread.dscr_inherit = 1;
1099 mtspr(SPRN_DSCR, current->thread.dscr);
1107 int is_valid_bugaddr(unsigned long addr)
1109 return is_kernel_addr(addr);
1112 #ifdef CONFIG_MATH_EMULATION
1113 static int emulate_math(struct pt_regs *regs)
1116 extern int do_mathemu(struct pt_regs *regs);
1118 ret = do_mathemu(regs);
1120 PPC_WARN_EMULATED(math, regs);
1124 emulate_single_step(regs);
1128 code = __parse_fpscr(current->thread.fp_state.fpscr);
1129 _exception(SIGFPE, regs, code, regs->nip);
1133 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1140 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1143 void __kprobes program_check_exception(struct pt_regs *regs)
1145 enum ctx_state prev_state = exception_enter();
1146 unsigned int reason = get_reason(regs);
1148 /* We can now get here via a FP Unavailable exception if the core
1149 * has no FPU, in that case the reason flags will be 0 */
1151 if (reason & REASON_FP) {
1152 /* IEEE FP exception */
1156 if (reason & REASON_TRAP) {
1157 unsigned long bugaddr;
1158 /* Debugger is first in line to stop recursive faults in
1159 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1160 if (debugger_bpt(regs))
1163 /* trap exception */
1164 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1168 bugaddr = regs->nip;
1170 * Fixup bugaddr for BUG_ON() in real mode
1172 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1173 bugaddr += PAGE_OFFSET;
1175 if (!(regs->msr & MSR_PR) && /* not user-mode */
1176 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1180 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1183 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1184 if (reason & REASON_TM) {
1185 /* This is a TM "Bad Thing Exception" program check.
1187 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1188 * transition in TM states.
1189 * - A trechkpt is attempted when transactional.
1190 * - A treclaim is attempted when non transactional.
1191 * - A tend is illegally attempted.
1192 * - writing a TM SPR when transactional.
1194 if (!user_mode(regs) &&
1195 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1199 /* If usermode caused this, it's done something illegal and
1200 * gets a SIGILL slap on the wrist. We call it an illegal
1201 * operand to distinguish from the instruction just being bad
1202 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1203 * illegal /placement/ of a valid instruction.
1205 if (user_mode(regs)) {
1206 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1209 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1210 "at %lx (msr 0x%x)\n", regs->nip, reason);
1211 die("Unrecoverable exception", regs, SIGABRT);
1217 * If we took the program check in the kernel skip down to sending a
1218 * SIGILL. The subsequent cases all relate to emulating instructions
1219 * which we should only do for userspace. We also do not want to enable
1220 * interrupts for kernel faults because that might lead to further
1221 * faults, and loose the context of the original exception.
1223 if (!user_mode(regs))
1226 /* We restore the interrupt state now */
1227 if (!arch_irq_disabled_regs(regs))
1230 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1231 * but there seems to be a hardware bug on the 405GP (RevD)
1232 * that means ESR is sometimes set incorrectly - either to
1233 * ESR_DST (!?) or 0. In the process of chasing this with the
1234 * hardware people - not sure if it can happen on any illegal
1235 * instruction or only on FP instructions, whether there is a
1236 * pattern to occurrences etc. -dgibson 31/Mar/2003
1238 if (!emulate_math(regs))
1241 /* Try to emulate it if we should. */
1242 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1243 switch (emulate_instruction(regs)) {
1246 emulate_single_step(regs);
1249 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1255 if (reason & REASON_PRIVILEGED)
1256 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1258 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1261 exception_exit(prev_state);
1265 * This occurs when running in hypervisor mode on POWER6 or later
1266 * and an illegal instruction is encountered.
1268 void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
1270 regs->msr |= REASON_ILLEGAL;
1271 program_check_exception(regs);
1274 void alignment_exception(struct pt_regs *regs)
1276 enum ctx_state prev_state = exception_enter();
1277 int sig, code, fixed = 0;
1279 /* We restore the interrupt state now */
1280 if (!arch_irq_disabled_regs(regs))
1283 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1286 /* we don't implement logging of alignment exceptions */
1287 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1288 fixed = fix_alignment(regs);
1291 regs->nip += 4; /* skip over emulated instruction */
1292 emulate_single_step(regs);
1296 /* Operand address was bad */
1297 if (fixed == -EFAULT) {
1304 if (user_mode(regs))
1305 _exception(sig, regs, code, regs->dar);
1307 bad_page_fault(regs, regs->dar, sig);
1310 exception_exit(prev_state);
1313 void StackOverflow(struct pt_regs *regs)
1315 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1316 current, regs->gpr[1]);
1319 panic("kernel stack overflow");
1322 void nonrecoverable_exception(struct pt_regs *regs)
1324 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1325 regs->nip, regs->msr);
1327 die("nonrecoverable exception", regs, SIGKILL);
1330 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1332 enum ctx_state prev_state = exception_enter();
1334 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1335 "%lx at %lx\n", regs->trap, regs->nip);
1336 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1338 exception_exit(prev_state);
1341 void altivec_unavailable_exception(struct pt_regs *regs)
1343 enum ctx_state prev_state = exception_enter();
1345 if (user_mode(regs)) {
1346 /* A user program has executed an altivec instruction,
1347 but this kernel doesn't support altivec. */
1348 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1352 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1353 "%lx at %lx\n", regs->trap, regs->nip);
1354 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1357 exception_exit(prev_state);
1360 void vsx_unavailable_exception(struct pt_regs *regs)
1362 if (user_mode(regs)) {
1363 /* A user program has executed an vsx instruction,
1364 but this kernel doesn't support vsx. */
1365 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1369 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1370 "%lx at %lx\n", regs->trap, regs->nip);
1371 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1375 void facility_unavailable_exception(struct pt_regs *regs)
1377 static char *facility_strings[] = {
1378 [FSCR_FP_LG] = "FPU",
1379 [FSCR_VECVSX_LG] = "VMX/VSX",
1380 [FSCR_DSCR_LG] = "DSCR",
1381 [FSCR_PM_LG] = "PMU SPRs",
1382 [FSCR_BHRB_LG] = "BHRB",
1383 [FSCR_TM_LG] = "TM",
1384 [FSCR_EBB_LG] = "EBB",
1385 [FSCR_TAR_LG] = "TAR",
1386 [FSCR_LM_LG] = "LM",
1388 char *facility = "unknown";
1394 hv = (regs->trap == 0xf80);
1396 value = mfspr(SPRN_HFSCR);
1398 value = mfspr(SPRN_FSCR);
1400 status = value >> 56;
1401 if (status == FSCR_DSCR_LG) {
1403 * User is accessing the DSCR register using the problem
1404 * state only SPR number (0x03) either through a mfspr or
1405 * a mtspr instruction. If it is a write attempt through
1406 * a mtspr, then we set the inherit bit. This also allows
1407 * the user to write or read the register directly in the
1408 * future by setting via the FSCR DSCR bit. But in case it
1409 * is a read DSCR attempt through a mfspr instruction, we
1410 * just emulate the instruction instead. This code path will
1411 * always emulate all the mfspr instructions till the user
1412 * has attempted at least one mtspr instruction. This way it
1413 * preserves the same behaviour when the user is accessing
1414 * the DSCR through privilege level only SPR number (0x11)
1415 * which is emulated through illegal instruction exception.
1416 * We always leave HFSCR DSCR set.
1418 if (get_user(instword, (u32 __user *)(regs->nip))) {
1419 pr_err("Failed to fetch the user instruction\n");
1423 /* Write into DSCR (mtspr 0x03, RS) */
1424 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1425 == PPC_INST_MTSPR_DSCR_USER) {
1426 rd = (instword >> 21) & 0x1f;
1427 current->thread.dscr = regs->gpr[rd];
1428 current->thread.dscr_inherit = 1;
1429 current->thread.fscr |= FSCR_DSCR;
1430 mtspr(SPRN_FSCR, current->thread.fscr);
1433 /* Read from DSCR (mfspr RT, 0x03) */
1434 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1435 == PPC_INST_MFSPR_DSCR_USER) {
1436 if (emulate_instruction(regs)) {
1437 pr_err("DSCR based mfspr emulation failed\n");
1441 emulate_single_step(regs);
1444 } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
1446 * This process has touched LM, so turn it on forever
1449 current->thread.fscr |= FSCR_LM;
1450 mtspr(SPRN_FSCR, current->thread.fscr);
1454 if ((status < ARRAY_SIZE(facility_strings)) &&
1455 facility_strings[status])
1456 facility = facility_strings[status];
1458 /* We restore the interrupt state now */
1459 if (!arch_irq_disabled_regs(regs))
1463 "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1464 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1466 if (user_mode(regs)) {
1467 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1471 die("Unexpected facility unavailable exception", regs, SIGABRT);
1475 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1477 void fp_unavailable_tm(struct pt_regs *regs)
1479 /* Note: This does not handle any kind of FP laziness. */
1481 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1482 regs->nip, regs->msr);
1484 /* We can only have got here if the task started using FP after
1485 * beginning the transaction. So, the transactional regs are just a
1486 * copy of the checkpointed ones. But, we still need to recheckpoint
1487 * as we're enabling FP for the process; it will return, abort the
1488 * transaction, and probably retry but now with FP enabled. So the
1489 * checkpointed FP registers need to be loaded.
1491 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1492 /* Reclaim didn't save out any FPRs to transact_fprs. */
1494 /* Enable FP for the task: */
1495 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1497 /* This loads and recheckpoints the FP registers from
1498 * thread.fpr[]. They will remain in registers after the
1499 * checkpoint so we don't need to reload them after.
1500 * If VMX is in use, the VRs now hold checkpointed values,
1501 * so we don't want to load the VRs from the thread_struct.
1503 tm_recheckpoint(¤t->thread, MSR_FP);
1505 /* If VMX is in use, get the transactional values back */
1506 if (regs->msr & MSR_VEC) {
1507 do_load_up_transact_altivec(¤t->thread);
1508 /* At this point all the VSX state is loaded, so enable it */
1509 regs->msr |= MSR_VSX;
1513 void altivec_unavailable_tm(struct pt_regs *regs)
1515 /* See the comments in fp_unavailable_tm(). This function operates
1519 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1521 regs->nip, regs->msr);
1522 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1523 regs->msr |= MSR_VEC;
1524 tm_recheckpoint(¤t->thread, MSR_VEC);
1525 current->thread.used_vr = 1;
1527 if (regs->msr & MSR_FP) {
1528 do_load_up_transact_fpu(¤t->thread);
1529 regs->msr |= MSR_VSX;
1533 void vsx_unavailable_tm(struct pt_regs *regs)
1535 unsigned long orig_msr = regs->msr;
1537 /* See the comments in fp_unavailable_tm(). This works similarly,
1538 * though we're loading both FP and VEC registers in here.
1540 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1541 * regs. Either way, set MSR_VSX.
1544 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1546 regs->nip, regs->msr);
1548 current->thread.used_vsr = 1;
1550 /* If FP and VMX are already loaded, we have all the state we need */
1551 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1552 regs->msr |= MSR_VSX;
1556 /* This reclaims FP and/or VR regs if they're already enabled */
1557 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1559 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1562 /* This loads & recheckpoints FP and VRs; but we have
1563 * to be sure not to overwrite previously-valid state.
1565 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr);
1567 if (orig_msr & MSR_FP)
1568 do_load_up_transact_fpu(¤t->thread);
1569 if (orig_msr & MSR_VEC)
1570 do_load_up_transact_altivec(¤t->thread);
1572 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1574 void performance_monitor_exception(struct pt_regs *regs)
1576 __this_cpu_inc(irq_stat.pmu_irqs);
1582 void SoftwareEmulation(struct pt_regs *regs)
1584 CHECK_FULL_REGS(regs);
1586 if (!user_mode(regs)) {
1588 die("Kernel Mode Unimplemented Instruction or SW FPU Emulation",
1592 if (!emulate_math(regs))
1595 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1597 #endif /* CONFIG_8xx */
1599 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1600 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1604 * Determine the cause of the debug event, clear the
1605 * event flags and send a trap to the handler. Torez
1607 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1608 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1609 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1610 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1612 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1615 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1616 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1617 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1620 } else if (debug_status & DBSR_IAC1) {
1621 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1622 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1623 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1626 } else if (debug_status & DBSR_IAC2) {
1627 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1628 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1631 } else if (debug_status & DBSR_IAC3) {
1632 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1633 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1634 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1637 } else if (debug_status & DBSR_IAC4) {
1638 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1639 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1644 * At the point this routine was called, the MSR(DE) was turned off.
1645 * Check all other debug flags and see if that bit needs to be turned
1648 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1649 current->thread.debug.dbcr1))
1650 regs->msr |= MSR_DE;
1652 /* Make sure the IDM flag is off */
1653 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1656 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1659 void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1661 current->thread.debug.dbsr = debug_status;
1663 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1664 * on server, it stops on the target of the branch. In order to simulate
1665 * the server behaviour, we thus restart right away with a single step
1666 * instead of stopping here when hitting a BT
1668 if (debug_status & DBSR_BT) {
1669 regs->msr &= ~MSR_DE;
1672 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1673 /* Clear the BT event */
1674 mtspr(SPRN_DBSR, DBSR_BT);
1676 /* Do the single step trick only when coming from userspace */
1677 if (user_mode(regs)) {
1678 current->thread.debug.dbcr0 &= ~DBCR0_BT;
1679 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1680 regs->msr |= MSR_DE;
1684 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1685 5, SIGTRAP) == NOTIFY_STOP) {
1688 if (debugger_sstep(regs))
1690 } else if (debug_status & DBSR_IC) { /* Instruction complete */
1691 regs->msr &= ~MSR_DE;
1693 /* Disable instruction completion */
1694 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1695 /* Clear the instruction completion event */
1696 mtspr(SPRN_DBSR, DBSR_IC);
1698 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1699 5, SIGTRAP) == NOTIFY_STOP) {
1703 if (debugger_sstep(regs))
1706 if (user_mode(regs)) {
1707 current->thread.debug.dbcr0 &= ~DBCR0_IC;
1708 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1709 current->thread.debug.dbcr1))
1710 regs->msr |= MSR_DE;
1712 /* Make sure the IDM bit is off */
1713 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1716 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1718 handle_debug(regs, debug_status);
1720 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1722 #if !defined(CONFIG_TAU_INT)
1723 void TAUException(struct pt_regs *regs)
1725 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1726 regs->nip, regs->msr, regs->trap, print_tainted());
1728 #endif /* CONFIG_INT_TAU */
1730 #ifdef CONFIG_ALTIVEC
1731 void altivec_assist_exception(struct pt_regs *regs)
1735 if (!user_mode(regs)) {
1736 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1737 " at %lx\n", regs->nip);
1738 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1741 flush_altivec_to_thread(current);
1743 PPC_WARN_EMULATED(altivec, regs);
1744 err = emulate_altivec(regs);
1746 regs->nip += 4; /* skip emulated instruction */
1747 emulate_single_step(regs);
1751 if (err == -EFAULT) {
1752 /* got an error reading the instruction */
1753 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1755 /* didn't recognize the instruction */
1756 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1757 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1758 "in %s at %lx\n", current->comm, regs->nip);
1759 current->thread.vr_state.vscr.u[3] |= 0x10000;
1762 #endif /* CONFIG_ALTIVEC */
1764 #ifdef CONFIG_FSL_BOOKE
1765 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1766 unsigned long error_code)
1768 /* We treat cache locking instructions from the user
1769 * as priv ops, in the future we could try to do
1772 if (error_code & (ESR_DLK|ESR_ILK))
1773 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1776 #endif /* CONFIG_FSL_BOOKE */
1779 void SPEFloatingPointException(struct pt_regs *regs)
1781 extern int do_spe_mathemu(struct pt_regs *regs);
1782 unsigned long spefscr;
1787 flush_spe_to_thread(current);
1789 spefscr = current->thread.spefscr;
1790 fpexc_mode = current->thread.fpexc_mode;
1792 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1795 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1798 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1800 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1803 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1806 err = do_spe_mathemu(regs);
1808 regs->nip += 4; /* skip emulated instruction */
1809 emulate_single_step(regs);
1813 if (err == -EFAULT) {
1814 /* got an error reading the instruction */
1815 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1816 } else if (err == -EINVAL) {
1817 /* didn't recognize the instruction */
1818 printk(KERN_ERR "unrecognized spe instruction "
1819 "in %s at %lx\n", current->comm, regs->nip);
1821 _exception(SIGFPE, regs, code, regs->nip);
1827 void SPEFloatingPointRoundException(struct pt_regs *regs)
1829 extern int speround_handler(struct pt_regs *regs);
1833 if (regs->msr & MSR_SPE)
1834 giveup_spe(current);
1838 err = speround_handler(regs);
1840 regs->nip += 4; /* skip emulated instruction */
1841 emulate_single_step(regs);
1845 if (err == -EFAULT) {
1846 /* got an error reading the instruction */
1847 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1848 } else if (err == -EINVAL) {
1849 /* didn't recognize the instruction */
1850 printk(KERN_ERR "unrecognized spe instruction "
1851 "in %s at %lx\n", current->comm, regs->nip);
1853 _exception(SIGFPE, regs, 0, regs->nip);
1860 * We enter here if we get an unrecoverable exception, that is, one
1861 * that happened at a point where the RI (recoverable interrupt) bit
1862 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1863 * we therefore lost state by taking this exception.
1865 void unrecoverable_exception(struct pt_regs *regs)
1867 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1868 regs->trap, regs->nip);
1869 die("Unrecoverable exception", regs, SIGABRT);
1872 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1874 * Default handler for a Watchdog exception,
1875 * spins until a reboot occurs
1877 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1879 /* Generic WatchdogHandler, implement your own */
1880 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1884 void WatchdogException(struct pt_regs *regs)
1886 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1887 WatchdogHandler(regs);
1892 * We enter here if we discover during exception entry that we are
1893 * running in supervisor mode with a userspace value in the stack pointer.
1895 void kernel_bad_stack(struct pt_regs *regs)
1897 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1898 regs->gpr[1], regs->nip);
1899 die("Bad kernel stack pointer", regs, SIGABRT);
1902 void __init trap_init(void)
1907 #ifdef CONFIG_PPC_EMULATED_STATS
1909 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
1911 struct ppc_emulated ppc_emulated = {
1912 #ifdef CONFIG_ALTIVEC
1913 WARN_EMULATED_SETUP(altivec),
1915 WARN_EMULATED_SETUP(dcba),
1916 WARN_EMULATED_SETUP(dcbz),
1917 WARN_EMULATED_SETUP(fp_pair),
1918 WARN_EMULATED_SETUP(isel),
1919 WARN_EMULATED_SETUP(mcrxr),
1920 WARN_EMULATED_SETUP(mfpvr),
1921 WARN_EMULATED_SETUP(multiple),
1922 WARN_EMULATED_SETUP(popcntb),
1923 WARN_EMULATED_SETUP(spe),
1924 WARN_EMULATED_SETUP(string),
1925 WARN_EMULATED_SETUP(sync),
1926 WARN_EMULATED_SETUP(unaligned),
1927 #ifdef CONFIG_MATH_EMULATION
1928 WARN_EMULATED_SETUP(math),
1931 WARN_EMULATED_SETUP(vsx),
1934 WARN_EMULATED_SETUP(mfdscr),
1935 WARN_EMULATED_SETUP(mtdscr),
1936 WARN_EMULATED_SETUP(lq_stq),
1940 u32 ppc_warn_emulated;
1942 void ppc_warn_emulated_print(const char *type)
1944 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1948 static int __init ppc_warn_emulated_init(void)
1950 struct dentry *dir, *d;
1952 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1954 if (!powerpc_debugfs_root)
1957 dir = debugfs_create_dir("emulated_instructions",
1958 powerpc_debugfs_root);
1962 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1963 &ppc_warn_emulated);
1967 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1968 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1969 (u32 *)&entries[i].val.counter);
1977 debugfs_remove_recursive(dir);
1981 device_initcall(ppc_warn_emulated_init);
1983 #endif /* CONFIG_PPC_EMULATED_STATS */