sh: Wire up division and address error exceptions on SH-2A.
[cascardo/linux.git] / arch / sh / kernel / traps.c
1 /*
2  * 'traps.c' handles hardware traps and faults after we have saved some
3  * state in 'entry.S'.
4  *
5  *  SuperH version: Copyright (C) 1999 Niibe Yutaka
6  *                  Copyright (C) 2000 Philipp Rumpf
7  *                  Copyright (C) 2000 David Howells
8  *                  Copyright (C) 2002 - 2006 Paul Mundt
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/kernel.h>
15 #include <linux/ptrace.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <linux/kallsyms.h>
20 #include <linux/io.h>
21 #include <asm/system.h>
22 #include <asm/uaccess.h>
23
24 #ifdef CONFIG_SH_KGDB
25 #include <asm/kgdb.h>
26 #define CHK_REMOTE_DEBUG(regs)                  \
27 {                                               \
28         if (kgdb_debug_hook && !user_mode(regs))\
29                 (*kgdb_debug_hook)(regs);       \
30 }
31 #else
32 #define CHK_REMOTE_DEBUG(regs)
33 #endif
34
35 #ifdef CONFIG_CPU_SH2
36 # define TRAP_RESERVED_INST     4
37 # define TRAP_ILLEGAL_SLOT_INST 6
38 # define TRAP_ADDRESS_ERROR     9
39 # ifdef CONFIG_CPU_SH2A
40 #  define TRAP_DIVZERO_ERROR    17
41 #  define TRAP_DIVOVF_ERROR     18
42 # endif
43 #else
44 #define TRAP_RESERVED_INST      12
45 #define TRAP_ILLEGAL_SLOT_INST  13
46 #endif
47
48 static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
49 {
50         unsigned long p;
51         int i;
52
53         printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
54
55         for (p = bottom & ~31; p < top; ) {
56                 printk("%04lx: ", p & 0xffff);
57
58                 for (i = 0; i < 8; i++, p += 4) {
59                         unsigned int val;
60
61                         if (p < bottom || p >= top)
62                                 printk("         ");
63                         else {
64                                 if (__get_user(val, (unsigned int __user *)p)) {
65                                         printk("\n");
66                                         return;
67                                 }
68                                 printk("%08x ", val);
69                         }
70                 }
71                 printk("\n");
72         }
73 }
74
75 DEFINE_SPINLOCK(die_lock);
76
77 void die(const char * str, struct pt_regs * regs, long err)
78 {
79         static int die_counter;
80
81         console_verbose();
82         spin_lock_irq(&die_lock);
83         bust_spinlocks(1);
84
85         printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
86
87         CHK_REMOTE_DEBUG(regs);
88         print_modules();
89         show_regs(regs);
90
91         printk("Process: %s (pid: %d, stack limit = %p)\n",
92                current->comm, current->pid, task_stack_page(current) + 1);
93
94         if (!user_mode(regs) || in_interrupt())
95                 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
96                          (unsigned long)task_stack_page(current));
97
98         bust_spinlocks(0);
99         spin_unlock_irq(&die_lock);
100         do_exit(SIGSEGV);
101 }
102
103 static inline void die_if_kernel(const char *str, struct pt_regs *regs,
104                                  long err)
105 {
106         if (!user_mode(regs))
107                 die(str, regs, err);
108 }
109
110 static int handle_unaligned_notify_count = 10;
111
112 /*
113  * try and fix up kernelspace address errors
114  * - userspace errors just cause EFAULT to be returned, resulting in SEGV
115  * - kernel/userspace interfaces cause a jump to an appropriate handler
116  * - other kernel errors are bad
117  * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
118  */
119 static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
120 {
121         if (!user_mode(regs)) {
122                 const struct exception_table_entry *fixup;
123                 fixup = search_exception_tables(regs->pc);
124                 if (fixup) {
125                         regs->pc = fixup->fixup;
126                         return 0;
127                 }
128                 die(str, regs, err);
129         }
130         return -EFAULT;
131 }
132
133 /*
134  * handle an instruction that does an unaligned memory access by emulating the
135  * desired behaviour
136  * - note that PC _may not_ point to the faulting instruction
137  *   (if that instruction is in a branch delay slot)
138  * - return 0 if emulation okay, -EFAULT on existential error
139  */
140 static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
141 {
142         int ret, index, count;
143         unsigned long *rm, *rn;
144         unsigned char *src, *dst;
145
146         index = (instruction>>8)&15;    /* 0x0F00 */
147         rn = &regs->regs[index];
148
149         index = (instruction>>4)&15;    /* 0x00F0 */
150         rm = &regs->regs[index];
151
152         count = 1<<(instruction&3);
153
154         ret = -EFAULT;
155         switch (instruction>>12) {
156         case 0: /* mov.[bwl] to/from memory via r0+rn */
157                 if (instruction & 8) {
158                         /* from memory */
159                         src = (unsigned char*) *rm;
160                         src += regs->regs[0];
161                         dst = (unsigned char*) rn;
162                         *(unsigned long*)dst = 0;
163
164 #ifdef __LITTLE_ENDIAN__
165                         if (copy_from_user(dst, src, count))
166                                 goto fetch_fault;
167
168                         if ((count == 2) && dst[1] & 0x80) {
169                                 dst[2] = 0xff;
170                                 dst[3] = 0xff;
171                         }
172 #else
173                         dst += 4-count;
174
175                         if (__copy_user(dst, src, count))
176                                 goto fetch_fault;
177
178                         if ((count == 2) && dst[2] & 0x80) {
179                                 dst[0] = 0xff;
180                                 dst[1] = 0xff;
181                         }
182 #endif
183                 } else {
184                         /* to memory */
185                         src = (unsigned char*) rm;
186 #if !defined(__LITTLE_ENDIAN__)
187                         src += 4-count;
188 #endif
189                         dst = (unsigned char*) *rn;
190                         dst += regs->regs[0];
191
192                         if (copy_to_user(dst, src, count))
193                                 goto fetch_fault;
194                 }
195                 ret = 0;
196                 break;
197
198         case 1: /* mov.l Rm,@(disp,Rn) */
199                 src = (unsigned char*) rm;
200                 dst = (unsigned char*) *rn;
201                 dst += (instruction&0x000F)<<2;
202
203                 if (copy_to_user(dst,src,4))
204                         goto fetch_fault;
205                 ret = 0;
206                 break;
207
208         case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
209                 if (instruction & 4)
210                         *rn -= count;
211                 src = (unsigned char*) rm;
212                 dst = (unsigned char*) *rn;
213 #if !defined(__LITTLE_ENDIAN__)
214                 src += 4-count;
215 #endif
216                 if (copy_to_user(dst, src, count))
217                         goto fetch_fault;
218                 ret = 0;
219                 break;
220
221         case 5: /* mov.l @(disp,Rm),Rn */
222                 src = (unsigned char*) *rm;
223                 src += (instruction&0x000F)<<2;
224                 dst = (unsigned char*) rn;
225                 *(unsigned long*)dst = 0;
226
227                 if (copy_from_user(dst,src,4))
228                         goto fetch_fault;
229                 ret = 0;
230                 break;
231
232         case 6: /* mov.[bwl] from memory, possibly with post-increment */
233                 src = (unsigned char*) *rm;
234                 if (instruction & 4)
235                         *rm += count;
236                 dst = (unsigned char*) rn;
237                 *(unsigned long*)dst = 0;
238                 
239 #ifdef __LITTLE_ENDIAN__
240                 if (copy_from_user(dst, src, count))
241                         goto fetch_fault;
242
243                 if ((count == 2) && dst[1] & 0x80) {
244                         dst[2] = 0xff;
245                         dst[3] = 0xff;
246                 }
247 #else
248                 dst += 4-count;
249                 
250                 if (copy_from_user(dst, src, count))
251                         goto fetch_fault;
252
253                 if ((count == 2) && dst[2] & 0x80) {
254                         dst[0] = 0xff;
255                         dst[1] = 0xff;
256                 }
257 #endif
258                 ret = 0;
259                 break;
260
261         case 8:
262                 switch ((instruction&0xFF00)>>8) {
263                 case 0x81: /* mov.w R0,@(disp,Rn) */
264                         src = (unsigned char*) &regs->regs[0];
265 #if !defined(__LITTLE_ENDIAN__)
266                         src += 2;
267 #endif
268                         dst = (unsigned char*) *rm; /* called Rn in the spec */
269                         dst += (instruction&0x000F)<<1;
270
271                         if (copy_to_user(dst, src, 2))
272                                 goto fetch_fault;
273                         ret = 0;
274                         break;
275
276                 case 0x85: /* mov.w @(disp,Rm),R0 */
277                         src = (unsigned char*) *rm;
278                         src += (instruction&0x000F)<<1;
279                         dst = (unsigned char*) &regs->regs[0];
280                         *(unsigned long*)dst = 0;
281
282 #if !defined(__LITTLE_ENDIAN__)
283                         dst += 2;
284 #endif
285
286                         if (copy_from_user(dst, src, 2))
287                                 goto fetch_fault;
288
289 #ifdef __LITTLE_ENDIAN__
290                         if (dst[1] & 0x80) {
291                                 dst[2] = 0xff;
292                                 dst[3] = 0xff;
293                         }
294 #else
295                         if (dst[2] & 0x80) {
296                                 dst[0] = 0xff;
297                                 dst[1] = 0xff;
298                         }
299 #endif
300                         ret = 0;
301                         break;
302                 }
303                 break;
304         }
305         return ret;
306
307  fetch_fault:
308         /* Argh. Address not only misaligned but also non-existent.
309          * Raise an EFAULT and see if it's trapped
310          */
311         return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
312 }
313
314 /*
315  * emulate the instruction in the delay slot
316  * - fetches the instruction from PC+2
317  */
318 static inline int handle_unaligned_delayslot(struct pt_regs *regs)
319 {
320         u16 instruction;
321
322         if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
323                 /* the instruction-fetch faulted */
324                 if (user_mode(regs))
325                         return -EFAULT;
326
327                 /* kernel */
328                 die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0);
329         }
330
331         return handle_unaligned_ins(instruction,regs);
332 }
333
334 /*
335  * handle an instruction that does an unaligned memory access
336  * - have to be careful of branch delay-slot instructions that fault
337  *  SH3:
338  *   - if the branch would be taken PC points to the branch
339  *   - if the branch would not be taken, PC points to delay-slot
340  *  SH4:
341  *   - PC always points to delayed branch
342  * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
343  */
344
345 /* Macros to determine offset from current PC for branch instructions */
346 /* Explicit type coercion is used to force sign extension where needed */
347 #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
348 #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
349
350 static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
351 {
352         u_int rm;
353         int ret, index;
354
355         index = (instruction>>8)&15;    /* 0x0F00 */
356         rm = regs->regs[index];
357
358         /* shout about the first ten userspace fixups */
359         if (user_mode(regs) && handle_unaligned_notify_count>0) {
360                 handle_unaligned_notify_count--;
361
362                 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
363                        current->comm,current->pid,(u16*)regs->pc,instruction);
364         }
365
366         ret = -EFAULT;
367         switch (instruction&0xF000) {
368         case 0x0000:
369                 if (instruction==0x000B) {
370                         /* rts */
371                         ret = handle_unaligned_delayslot(regs);
372                         if (ret==0)
373                                 regs->pc = regs->pr;
374                 }
375                 else if ((instruction&0x00FF)==0x0023) {
376                         /* braf @Rm */
377                         ret = handle_unaligned_delayslot(regs);
378                         if (ret==0)
379                                 regs->pc += rm + 4;
380                 }
381                 else if ((instruction&0x00FF)==0x0003) {
382                         /* bsrf @Rm */
383                         ret = handle_unaligned_delayslot(regs);
384                         if (ret==0) {
385                                 regs->pr = regs->pc + 4;
386                                 regs->pc += rm + 4;
387                         }
388                 }
389                 else {
390                         /* mov.[bwl] to/from memory via r0+rn */
391                         goto simple;
392                 }
393                 break;
394
395         case 0x1000: /* mov.l Rm,@(disp,Rn) */
396                 goto simple;
397
398         case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
399                 goto simple;
400
401         case 0x4000:
402                 if ((instruction&0x00FF)==0x002B) {
403                         /* jmp @Rm */
404                         ret = handle_unaligned_delayslot(regs);
405                         if (ret==0)
406                                 regs->pc = rm;
407                 }
408                 else if ((instruction&0x00FF)==0x000B) {
409                         /* jsr @Rm */
410                         ret = handle_unaligned_delayslot(regs);
411                         if (ret==0) {
412                                 regs->pr = regs->pc + 4;
413                                 regs->pc = rm;
414                         }
415                 }
416                 else {
417                         /* mov.[bwl] to/from memory via r0+rn */
418                         goto simple;
419                 }
420                 break;
421
422         case 0x5000: /* mov.l @(disp,Rm),Rn */
423                 goto simple;
424
425         case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
426                 goto simple;
427
428         case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
429                 switch (instruction&0x0F00) {
430                 case 0x0100: /* mov.w R0,@(disp,Rm) */
431                         goto simple;
432                 case 0x0500: /* mov.w @(disp,Rm),R0 */
433                         goto simple;
434                 case 0x0B00: /* bf   lab - no delayslot*/
435                         break;
436                 case 0x0F00: /* bf/s lab */
437                         ret = handle_unaligned_delayslot(regs);
438                         if (ret==0) {
439 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
440                                 if ((regs->sr & 0x00000001) != 0)
441                                         regs->pc += 4; /* next after slot */
442                                 else
443 #endif
444                                         regs->pc += SH_PC_8BIT_OFFSET(instruction);
445                         }
446                         break;
447                 case 0x0900: /* bt   lab - no delayslot */
448                         break;
449                 case 0x0D00: /* bt/s lab */
450                         ret = handle_unaligned_delayslot(regs);
451                         if (ret==0) {
452 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
453                                 if ((regs->sr & 0x00000001) == 0)
454                                         regs->pc += 4; /* next after slot */
455                                 else
456 #endif
457                                         regs->pc += SH_PC_8BIT_OFFSET(instruction);
458                         }
459                         break;
460                 }
461                 break;
462
463         case 0xA000: /* bra label */
464                 ret = handle_unaligned_delayslot(regs);
465                 if (ret==0)
466                         regs->pc += SH_PC_12BIT_OFFSET(instruction);
467                 break;
468
469         case 0xB000: /* bsr label */
470                 ret = handle_unaligned_delayslot(regs);
471                 if (ret==0) {
472                         regs->pr = regs->pc + 4;
473                         regs->pc += SH_PC_12BIT_OFFSET(instruction);
474                 }
475                 break;
476         }
477         return ret;
478
479         /* handle non-delay-slot instruction */
480  simple:
481         ret = handle_unaligned_ins(instruction,regs);
482         if (ret==0)
483                 regs->pc += 2;
484         return ret;
485 }
486
487 #ifdef CONFIG_CPU_HAS_SR_RB
488 #define lookup_exception_vector(x)      \
489         __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
490 #else
491 #define lookup_exception_vector(x)      \
492         __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
493 #endif
494
495 /*
496  * Handle various address error exceptions
497  */
498 asmlinkage void do_address_error(struct pt_regs *regs, 
499                                  unsigned long writeaccess,
500                                  unsigned long address)
501 {
502         unsigned long error_code = 0;
503         mm_segment_t oldfs;
504         u16 instruction;
505         int tmp;
506
507         /* Intentional ifdef */
508 #ifdef CONFIG_CPU_HAS_SR_RB
509         lookup_exception_vector(error_code);
510 #endif
511
512         oldfs = get_fs();
513
514         if (user_mode(regs)) {
515                 local_irq_enable();
516                 current->thread.error_code = error_code;
517 #ifdef CONFIG_CPU_SH2
518                 /*
519                  * On the SH-2, we only have a single vector for address
520                  * errors, there's no differentiating between a load error
521                  * and a store error.
522                  */
523                 current->thread.trap_no = 9;
524 #else
525                 current->thread.trap_no = (writeaccess) ? 8 : 7;
526 #endif
527
528                 /* bad PC is not something we can fix */
529                 if (regs->pc & 1)
530                         goto uspace_segv;
531
532 #ifndef CONFIG_CPU_SH2A
533                 set_fs(USER_DS);
534                 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
535                         /* Argh. Fault on the instruction itself.
536                            This should never happen non-SMP
537                         */
538                         set_fs(oldfs);
539                         goto uspace_segv;
540                 }
541
542                 tmp = handle_unaligned_access(instruction, regs);
543                 set_fs(oldfs);
544
545                 if (tmp==0)
546                         return; /* sorted */
547 #endif
548
549         uspace_segv:
550                 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
551                 force_sig(SIGSEGV, current);
552         } else {
553                 if (regs->pc & 1)
554                         die("unaligned program counter", regs, error_code);
555
556 #ifndef CONFIG_CPU_SH2A
557                 set_fs(KERNEL_DS);
558                 if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
559                         /* Argh. Fault on the instruction itself.
560                            This should never happen non-SMP
561                         */
562                         set_fs(oldfs);
563                         die("insn faulting in do_address_error", regs, 0);
564                 }
565
566                 handle_unaligned_access(instruction, regs);
567                 set_fs(oldfs);
568 #else
569                 printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm);
570                 force_sig(SIGSEGV, current);
571 #endif
572         }
573 }
574
575 #ifdef CONFIG_SH_DSP
576 /*
577  *      SH-DSP support gerg@snapgear.com.
578  */
579 int is_dsp_inst(struct pt_regs *regs)
580 {
581         unsigned short inst;
582
583         /* 
584          * Safe guard if DSP mode is already enabled or we're lacking
585          * the DSP altogether.
586          */
587         if (!(cpu_data->flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
588                 return 0;
589
590         get_user(inst, ((unsigned short *) regs->pc));
591
592         inst &= 0xf000;
593
594         /* Check for any type of DSP or support instruction */
595         if ((inst == 0xf000) || (inst == 0x4000))
596                 return 1;
597
598         return 0;
599 }
600 #else
601 #define is_dsp_inst(regs)       (0)
602 #endif /* CONFIG_SH_DSP */
603
604 #ifdef CONFIG_CPU_SH2A
605 asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
606                                 unsigned long r6, unsigned long r7,
607                                 struct pt_regs regs)
608 {
609         siginfo_t info;
610
611         current->thread.trap_no = r4;
612         current->thread.error_code = 0;
613
614         switch (r4) {
615         case TRAP_DIVZERO_ERROR:
616                 info.si_code = FPE_INTDIV;
617                 break;
618         case TRAP_DIVOVF_ERROR:
619                 info.si_code = FPE_INTOVF;
620                 break;
621         }
622
623         force_sig_info(SIGFPE, &info, current);
624 }
625 #endif
626
627 /* arch/sh/kernel/cpu/sh4/fpu.c */
628 extern int do_fpu_inst(unsigned short, struct pt_regs *);
629 extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
630                 unsigned long r6, unsigned long r7, struct pt_regs regs);
631
632 asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
633                                 unsigned long r6, unsigned long r7,
634                                 struct pt_regs regs)
635 {
636         unsigned long error_code;
637         struct task_struct *tsk = current;
638
639 #ifdef CONFIG_SH_FPU_EMU
640         unsigned short inst = 0;
641         int err;
642
643         get_user(inst, (unsigned short*)regs.pc);
644
645         err = do_fpu_inst(inst, &regs);
646         if (!err) {
647                 regs.pc += 2;
648                 return;
649         }
650         /* not a FPU inst. */
651 #endif
652
653 #ifdef CONFIG_SH_DSP
654         /* Check if it's a DSP instruction */
655         if (is_dsp_inst(&regs)) {
656                 /* Enable DSP mode, and restart instruction. */
657                 regs.sr |= SR_DSP;
658                 return;
659         }
660 #endif
661
662         lookup_exception_vector(error_code);
663
664         local_irq_enable();
665         tsk->thread.error_code = error_code;
666         tsk->thread.trap_no = TRAP_RESERVED_INST;
667         CHK_REMOTE_DEBUG(&regs);
668         force_sig(SIGILL, tsk);
669         die_if_no_fixup("reserved instruction", &regs, error_code);
670 }
671
672 #ifdef CONFIG_SH_FPU_EMU
673 static int emulate_branch(unsigned short inst, struct pt_regs* regs)
674 {
675         /*
676          * bfs: 8fxx: PC+=d*2+4;
677          * bts: 8dxx: PC+=d*2+4;
678          * bra: axxx: PC+=D*2+4;
679          * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
680          * braf:0x23: PC+=Rn*2+4;
681          * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
682          * jmp: 4x2b: PC=Rn;
683          * jsr: 4x0b: PC=Rn      after PR=PC+4;
684          * rts: 000b: PC=PR;
685          */
686         if ((inst & 0xfd00) == 0x8d00) {
687                 regs->pc += SH_PC_8BIT_OFFSET(inst);
688                 return 0;
689         }
690
691         if ((inst & 0xe000) == 0xa000) {
692                 regs->pc += SH_PC_12BIT_OFFSET(inst);
693                 return 0;
694         }
695
696         if ((inst & 0xf0df) == 0x0003) {
697                 regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
698                 return 0;
699         }
700
701         if ((inst & 0xf0df) == 0x400b) {
702                 regs->pc = regs->regs[(inst & 0x0f00) >> 8];
703                 return 0;
704         }
705
706         if ((inst & 0xffff) == 0x000b) {
707                 regs->pc = regs->pr;
708                 return 0;
709         }
710
711         return 1;
712 }
713 #endif
714
715 asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
716                                 unsigned long r6, unsigned long r7,
717                                 struct pt_regs regs)
718 {
719         unsigned long error_code;
720         struct task_struct *tsk = current;
721 #ifdef CONFIG_SH_FPU_EMU
722         unsigned short inst = 0;
723
724         get_user(inst, (unsigned short *)regs.pc + 1);
725         if (!do_fpu_inst(inst, &regs)) {
726                 get_user(inst, (unsigned short *)regs.pc);
727                 if (!emulate_branch(inst, &regs))
728                         return;
729                 /* fault in branch.*/
730         }
731         /* not a FPU inst. */
732 #endif
733
734         lookup_exception_vector(error_code);
735
736         local_irq_enable();
737         tsk->thread.error_code = error_code;
738         tsk->thread.trap_no = TRAP_RESERVED_INST;
739         CHK_REMOTE_DEBUG(&regs);
740         force_sig(SIGILL, tsk);
741         die_if_no_fixup("illegal slot instruction", &regs, error_code);
742 }
743
744 asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
745                                    unsigned long r6, unsigned long r7,
746                                    struct pt_regs regs)
747 {
748         long ex;
749
750         lookup_exception_vector(ex);
751         die_if_kernel("exception", &regs, ex);
752 }
753
754 #if defined(CONFIG_SH_STANDARD_BIOS)
755 void *gdb_vbr_vector;
756
757 static inline void __init gdb_vbr_init(void)
758 {
759         register unsigned long vbr;
760
761         /*
762          * Read the old value of the VBR register to initialise
763          * the vector through which debug and BIOS traps are
764          * delegated by the Linux trap handler.
765          */
766         asm volatile("stc vbr, %0" : "=r" (vbr));
767
768         gdb_vbr_vector = (void *)(vbr + 0x100);
769         printk("Setting GDB trap vector to 0x%08lx\n",
770                (unsigned long)gdb_vbr_vector);
771 }
772 #endif
773
774 void __init per_cpu_trap_init(void)
775 {
776         extern void *vbr_base;
777
778 #ifdef CONFIG_SH_STANDARD_BIOS
779         gdb_vbr_init();
780 #endif
781
782         /* NOTE: The VBR value should be at P1
783            (or P2, virtural "fixed" address space).
784            It's definitely should not in physical address.  */
785
786         asm volatile("ldc       %0, vbr"
787                      : /* no output */
788                      : "r" (&vbr_base)
789                      : "memory");
790 }
791
792 void *set_exception_table_vec(unsigned int vec, void *handler)
793 {
794         extern void *exception_handling_table[];
795         void *old_handler;
796         
797         old_handler = exception_handling_table[vec];
798         exception_handling_table[vec] = handler;
799         return old_handler;
800 }
801
802 extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
803                                              unsigned long r6, unsigned long r7,
804                                              struct pt_regs regs);
805
806 void __init trap_init(void)
807 {
808         set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
809         set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
810
811 #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
812     defined(CONFIG_SH_FPU_EMU)
813         /*
814          * For SH-4 lacking an FPU, treat floating point instructions as
815          * reserved. They'll be handled in the math-emu case, or faulted on
816          * otherwise.
817          */
818         set_exception_table_evt(0x800, do_reserved_inst);
819         set_exception_table_evt(0x820, do_illegal_slot_inst);
820 #elif defined(CONFIG_SH_FPU)
821         set_exception_table_evt(0x800, do_fpu_state_restore);
822         set_exception_table_evt(0x820, do_fpu_state_restore);
823 #endif
824
825 #ifdef CONFIG_CPU_SH2
826         set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
827 #endif
828 #ifdef CONFIG_CPU_SH2A
829         set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
830         set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
831 #endif
832                 
833         /* Setup VBR for boot cpu */
834         per_cpu_trap_init();
835 }
836
837 void show_trace(struct task_struct *tsk, unsigned long *sp,
838                 struct pt_regs *regs)
839 {
840         unsigned long addr;
841
842         if (regs && user_mode(regs))
843                 return;
844
845         printk("\nCall trace: ");
846 #ifdef CONFIG_KALLSYMS
847         printk("\n");
848 #endif
849
850         while (!kstack_end(sp)) {
851                 addr = *sp++;
852                 if (kernel_text_address(addr))
853                         print_ip_sym(addr);
854         }
855
856         printk("\n");
857 }
858
859 void show_stack(struct task_struct *tsk, unsigned long *sp)
860 {
861         unsigned long stack;
862
863         if (!tsk)
864                 tsk = current;
865         if (tsk == current)
866                 sp = (unsigned long *)current_stack_pointer;
867         else
868                 sp = (unsigned long *)tsk->thread.sp;
869
870         stack = (unsigned long)sp;
871         dump_mem("Stack: ", stack, THREAD_SIZE +
872                  (unsigned long)task_stack_page(tsk));
873         show_trace(tsk, sp, NULL);
874 }
875
876 void dump_stack(void)
877 {
878         show_stack(NULL, NULL);
879 }
880 EXPORT_SYMBOL(dump_stack);