Merge branch 'work.splice_read' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / powerpc / kernel / ptrace.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
36
37 #include <asm/uaccess.h>
38 #include <asm/page.h>
39 #include <asm/pgtable.h>
40 #include <asm/switch_to.h>
41 #include <asm/tm.h>
42
43 #define CREATE_TRACE_POINTS
44 #include <trace/events/syscalls.h>
45
46 /*
47  * The parameter save area on the stack is used to store arguments being passed
48  * to callee function and is located at fixed offset from stack pointer.
49  */
50 #ifdef CONFIG_PPC32
51 #define PARAMETER_SAVE_AREA_OFFSET      24  /* bytes */
52 #else /* CONFIG_PPC32 */
53 #define PARAMETER_SAVE_AREA_OFFSET      48  /* bytes */
54 #endif
55
56 struct pt_regs_offset {
57         const char *name;
58         int offset;
59 };
60
61 #define STR(s)  #s                      /* convert to string */
62 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
63 #define GPR_OFFSET_NAME(num)    \
64         {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
65         {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
66 #define REG_OFFSET_END {.name = NULL, .offset = 0}
67
68 #define TVSO(f) (offsetof(struct thread_vr_state, f))
69 #define TFSO(f) (offsetof(struct thread_fp_state, f))
70 #define TSO(f)  (offsetof(struct thread_struct, f))
71
72 static const struct pt_regs_offset regoffset_table[] = {
73         GPR_OFFSET_NAME(0),
74         GPR_OFFSET_NAME(1),
75         GPR_OFFSET_NAME(2),
76         GPR_OFFSET_NAME(3),
77         GPR_OFFSET_NAME(4),
78         GPR_OFFSET_NAME(5),
79         GPR_OFFSET_NAME(6),
80         GPR_OFFSET_NAME(7),
81         GPR_OFFSET_NAME(8),
82         GPR_OFFSET_NAME(9),
83         GPR_OFFSET_NAME(10),
84         GPR_OFFSET_NAME(11),
85         GPR_OFFSET_NAME(12),
86         GPR_OFFSET_NAME(13),
87         GPR_OFFSET_NAME(14),
88         GPR_OFFSET_NAME(15),
89         GPR_OFFSET_NAME(16),
90         GPR_OFFSET_NAME(17),
91         GPR_OFFSET_NAME(18),
92         GPR_OFFSET_NAME(19),
93         GPR_OFFSET_NAME(20),
94         GPR_OFFSET_NAME(21),
95         GPR_OFFSET_NAME(22),
96         GPR_OFFSET_NAME(23),
97         GPR_OFFSET_NAME(24),
98         GPR_OFFSET_NAME(25),
99         GPR_OFFSET_NAME(26),
100         GPR_OFFSET_NAME(27),
101         GPR_OFFSET_NAME(28),
102         GPR_OFFSET_NAME(29),
103         GPR_OFFSET_NAME(30),
104         GPR_OFFSET_NAME(31),
105         REG_OFFSET_NAME(nip),
106         REG_OFFSET_NAME(msr),
107         REG_OFFSET_NAME(ctr),
108         REG_OFFSET_NAME(link),
109         REG_OFFSET_NAME(xer),
110         REG_OFFSET_NAME(ccr),
111 #ifdef CONFIG_PPC64
112         REG_OFFSET_NAME(softe),
113 #else
114         REG_OFFSET_NAME(mq),
115 #endif
116         REG_OFFSET_NAME(trap),
117         REG_OFFSET_NAME(dar),
118         REG_OFFSET_NAME(dsisr),
119         REG_OFFSET_END,
120 };
121
122 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
123 static void flush_tmregs_to_thread(struct task_struct *tsk)
124 {
125         /*
126          * If task is not current, it will have been flushed already to
127          * it's thread_struct during __switch_to().
128          *
129          * A reclaim flushes ALL the state.
130          */
131
132         if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
133                 tm_reclaim_current(TM_CAUSE_SIGNAL);
134
135 }
136 #else
137 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
138 #endif
139
140 /**
141  * regs_query_register_offset() - query register offset from its name
142  * @name:       the name of a register
143  *
144  * regs_query_register_offset() returns the offset of a register in struct
145  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
146  */
147 int regs_query_register_offset(const char *name)
148 {
149         const struct pt_regs_offset *roff;
150         for (roff = regoffset_table; roff->name != NULL; roff++)
151                 if (!strcmp(roff->name, name))
152                         return roff->offset;
153         return -EINVAL;
154 }
155
156 /**
157  * regs_query_register_name() - query register name from its offset
158  * @offset:     the offset of a register in struct pt_regs.
159  *
160  * regs_query_register_name() returns the name of a register from its
161  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
162  */
163 const char *regs_query_register_name(unsigned int offset)
164 {
165         const struct pt_regs_offset *roff;
166         for (roff = regoffset_table; roff->name != NULL; roff++)
167                 if (roff->offset == offset)
168                         return roff->name;
169         return NULL;
170 }
171
172 /*
173  * does not yet catch signals sent when the child dies.
174  * in exit.c or in signal.c.
175  */
176
177 /*
178  * Set of msr bits that gdb can change on behalf of a process.
179  */
180 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
181 #define MSR_DEBUGCHANGE 0
182 #else
183 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
184 #endif
185
186 /*
187  * Max register writeable via put_reg
188  */
189 #ifdef CONFIG_PPC32
190 #define PT_MAX_PUT_REG  PT_MQ
191 #else
192 #define PT_MAX_PUT_REG  PT_CCR
193 #endif
194
195 static unsigned long get_user_msr(struct task_struct *task)
196 {
197         return task->thread.regs->msr | task->thread.fpexc_mode;
198 }
199
200 static int set_user_msr(struct task_struct *task, unsigned long msr)
201 {
202         task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
203         task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
204         return 0;
205 }
206
207 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
208 static unsigned long get_user_ckpt_msr(struct task_struct *task)
209 {
210         return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
211 }
212
213 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
214 {
215         task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
216         task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
217         return 0;
218 }
219
220 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
221 {
222         task->thread.ckpt_regs.trap = trap & 0xfff0;
223         return 0;
224 }
225 #endif
226
227 #ifdef CONFIG_PPC64
228 static int get_user_dscr(struct task_struct *task, unsigned long *data)
229 {
230         *data = task->thread.dscr;
231         return 0;
232 }
233
234 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
235 {
236         task->thread.dscr = dscr;
237         task->thread.dscr_inherit = 1;
238         return 0;
239 }
240 #else
241 static int get_user_dscr(struct task_struct *task, unsigned long *data)
242 {
243         return -EIO;
244 }
245
246 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
247 {
248         return -EIO;
249 }
250 #endif
251
252 /*
253  * We prevent mucking around with the reserved area of trap
254  * which are used internally by the kernel.
255  */
256 static int set_user_trap(struct task_struct *task, unsigned long trap)
257 {
258         task->thread.regs->trap = trap & 0xfff0;
259         return 0;
260 }
261
262 /*
263  * Get contents of register REGNO in task TASK.
264  */
265 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
266 {
267         if ((task->thread.regs == NULL) || !data)
268                 return -EIO;
269
270         if (regno == PT_MSR) {
271                 *data = get_user_msr(task);
272                 return 0;
273         }
274
275         if (regno == PT_DSCR)
276                 return get_user_dscr(task, data);
277
278         if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
279                 *data = ((unsigned long *)task->thread.regs)[regno];
280                 return 0;
281         }
282
283         return -EIO;
284 }
285
286 /*
287  * Write contents of register REGNO in task TASK.
288  */
289 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
290 {
291         if (task->thread.regs == NULL)
292                 return -EIO;
293
294         if (regno == PT_MSR)
295                 return set_user_msr(task, data);
296         if (regno == PT_TRAP)
297                 return set_user_trap(task, data);
298         if (regno == PT_DSCR)
299                 return set_user_dscr(task, data);
300
301         if (regno <= PT_MAX_PUT_REG) {
302                 ((unsigned long *)task->thread.regs)[regno] = data;
303                 return 0;
304         }
305         return -EIO;
306 }
307
308 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
309                    unsigned int pos, unsigned int count,
310                    void *kbuf, void __user *ubuf)
311 {
312         int i, ret;
313
314         if (target->thread.regs == NULL)
315                 return -EIO;
316
317         if (!FULL_REGS(target->thread.regs)) {
318                 /* We have a partial register set.  Fill 14-31 with bogus values */
319                 for (i = 14; i < 32; i++)
320                         target->thread.regs->gpr[i] = NV_REG_POISON;
321         }
322
323         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
324                                   target->thread.regs,
325                                   0, offsetof(struct pt_regs, msr));
326         if (!ret) {
327                 unsigned long msr = get_user_msr(target);
328                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
329                                           offsetof(struct pt_regs, msr),
330                                           offsetof(struct pt_regs, msr) +
331                                           sizeof(msr));
332         }
333
334         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
335                      offsetof(struct pt_regs, msr) + sizeof(long));
336
337         if (!ret)
338                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
339                                           &target->thread.regs->orig_gpr3,
340                                           offsetof(struct pt_regs, orig_gpr3),
341                                           sizeof(struct pt_regs));
342         if (!ret)
343                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
344                                                sizeof(struct pt_regs), -1);
345
346         return ret;
347 }
348
349 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
350                    unsigned int pos, unsigned int count,
351                    const void *kbuf, const void __user *ubuf)
352 {
353         unsigned long reg;
354         int ret;
355
356         if (target->thread.regs == NULL)
357                 return -EIO;
358
359         CHECK_FULL_REGS(target->thread.regs);
360
361         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
362                                  target->thread.regs,
363                                  0, PT_MSR * sizeof(reg));
364
365         if (!ret && count > 0) {
366                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
367                                          PT_MSR * sizeof(reg),
368                                          (PT_MSR + 1) * sizeof(reg));
369                 if (!ret)
370                         ret = set_user_msr(target, reg);
371         }
372
373         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
374                      offsetof(struct pt_regs, msr) + sizeof(long));
375
376         if (!ret)
377                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
378                                          &target->thread.regs->orig_gpr3,
379                                          PT_ORIG_R3 * sizeof(reg),
380                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
381
382         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
383                 ret = user_regset_copyin_ignore(
384                         &pos, &count, &kbuf, &ubuf,
385                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
386                         PT_TRAP * sizeof(reg));
387
388         if (!ret && count > 0) {
389                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
390                                          PT_TRAP * sizeof(reg),
391                                          (PT_TRAP + 1) * sizeof(reg));
392                 if (!ret)
393                         ret = set_user_trap(target, reg);
394         }
395
396         if (!ret)
397                 ret = user_regset_copyin_ignore(
398                         &pos, &count, &kbuf, &ubuf,
399                         (PT_TRAP + 1) * sizeof(reg), -1);
400
401         return ret;
402 }
403
404 /*
405  * When the transaction is active, 'transact_fp' holds the current running
406  * value of all FPR registers and 'fp_state' holds the last checkpointed
407  * value of all FPR registers for the current transaction. When transaction
408  * is not active 'fp_state' holds the current running state of all the FPR
409  * registers. So this function which returns the current running values of
410  * all the FPR registers, needs to know whether any transaction is active
411  * or not.
412  *
413  * Userspace interface buffer layout:
414  *
415  * struct data {
416  *      u64     fpr[32];
417  *      u64     fpscr;
418  * };
419  *
420  * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
421  * which determines the final code in this function. All the combinations of
422  * these two config options are possible except the one below as transactional
423  * memory config pulls in CONFIG_VSX automatically.
424  *
425  *      !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
426  */
427 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
428                    unsigned int pos, unsigned int count,
429                    void *kbuf, void __user *ubuf)
430 {
431 #ifdef CONFIG_VSX
432         u64 buf[33];
433         int i;
434 #endif
435         flush_fp_to_thread(target);
436
437 #if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
438         /* copy to local buffer then write that out */
439         if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
440                 flush_altivec_to_thread(target);
441                 flush_tmregs_to_thread(target);
442                 for (i = 0; i < 32 ; i++)
443                         buf[i] = target->thread.TS_TRANS_FPR(i);
444                 buf[32] = target->thread.transact_fp.fpscr;
445         } else {
446                 for (i = 0; i < 32 ; i++)
447                         buf[i] = target->thread.TS_FPR(i);
448                 buf[32] = target->thread.fp_state.fpscr;
449         }
450         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
451 #endif
452
453 #if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
454         /* copy to local buffer then write that out */
455         for (i = 0; i < 32 ; i++)
456                 buf[i] = target->thread.TS_FPR(i);
457         buf[32] = target->thread.fp_state.fpscr;
458         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
459 #endif
460
461 #if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
462         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
463                      offsetof(struct thread_fp_state, fpr[32]));
464
465         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
466                                    &target->thread.fp_state, 0, -1);
467 #endif
468 }
469
470 /*
471  * When the transaction is active, 'transact_fp' holds the current running
472  * value of all FPR registers and 'fp_state' holds the last checkpointed
473  * value of all FPR registers for the current transaction. When transaction
474  * is not active 'fp_state' holds the current running state of all the FPR
475  * registers. So this function which setss the current running values of
476  * all the FPR registers, needs to know whether any transaction is active
477  * or not.
478  *
479  * Userspace interface buffer layout:
480  *
481  * struct data {
482  *      u64     fpr[32];
483  *      u64     fpscr;
484  * };
485  *
486  * There are two config options CONFIG_VSX and CONFIG_PPC_TRANSACTIONAL_MEM
487  * which determines the final code in this function. All the combinations of
488  * these two config options are possible except the one below as transactional
489  * memory config pulls in CONFIG_VSX automatically.
490  *
491  *      !defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
492  */
493 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
494                    unsigned int pos, unsigned int count,
495                    const void *kbuf, const void __user *ubuf)
496 {
497 #ifdef CONFIG_VSX
498         u64 buf[33];
499         int i;
500 #endif
501         flush_fp_to_thread(target);
502
503 #if defined(CONFIG_VSX) && defined(CONFIG_PPC_TRANSACTIONAL_MEM)
504         /* copy to local buffer then write that out */
505         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
506         if (i)
507                 return i;
508
509         if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
510                 flush_altivec_to_thread(target);
511                 flush_tmregs_to_thread(target);
512                 for (i = 0; i < 32 ; i++)
513                         target->thread.TS_TRANS_FPR(i) = buf[i];
514                 target->thread.transact_fp.fpscr = buf[32];
515         } else {
516                 for (i = 0; i < 32 ; i++)
517                         target->thread.TS_FPR(i) = buf[i];
518                 target->thread.fp_state.fpscr = buf[32];
519         }
520         return 0;
521 #endif
522
523 #if defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
524         /* copy to local buffer then write that out */
525         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
526         if (i)
527                 return i;
528         for (i = 0; i < 32 ; i++)
529                 target->thread.TS_FPR(i) = buf[i];
530         target->thread.fp_state.fpscr = buf[32];
531         return 0;
532 #endif
533
534 #if !defined(CONFIG_VSX) && !defined(CONFIG_PPC_TRANSACTIONAL_MEM)
535         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
536                      offsetof(struct thread_fp_state, fpr[32]));
537
538         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
539                                   &target->thread.fp_state, 0, -1);
540 #endif
541 }
542
543 #ifdef CONFIG_ALTIVEC
544 /*
545  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
546  * The transfer totals 34 quadword.  Quadwords 0-31 contain the
547  * corresponding vector registers.  Quadword 32 contains the vscr as the
548  * last word (offset 12) within that quadword.  Quadword 33 contains the
549  * vrsave as the first word (offset 0) within the quadword.
550  *
551  * This definition of the VMX state is compatible with the current PPC32
552  * ptrace interface.  This allows signal handling and ptrace to use the
553  * same structures.  This also simplifies the implementation of a bi-arch
554  * (combined (32- and 64-bit) gdb.
555  */
556
557 static int vr_active(struct task_struct *target,
558                      const struct user_regset *regset)
559 {
560         flush_altivec_to_thread(target);
561         return target->thread.used_vr ? regset->n : 0;
562 }
563
564 /*
565  * When the transaction is active, 'transact_vr' holds the current running
566  * value of all the VMX registers and 'vr_state' holds the last checkpointed
567  * value of all the VMX registers for the current transaction to fall back
568  * on in case it aborts. When transaction is not active 'vr_state' holds
569  * the current running state of all the VMX registers. So this function which
570  * gets the current running values of all the VMX registers, needs to know
571  * whether any transaction is active or not.
572  *
573  * Userspace interface buffer layout:
574  *
575  * struct data {
576  *      vector128       vr[32];
577  *      vector128       vscr;
578  *      vector128       vrsave;
579  * };
580  */
581 static int vr_get(struct task_struct *target, const struct user_regset *regset,
582                   unsigned int pos, unsigned int count,
583                   void *kbuf, void __user *ubuf)
584 {
585         struct thread_vr_state *addr;
586         int ret;
587
588         flush_altivec_to_thread(target);
589
590         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
591                      offsetof(struct thread_vr_state, vr[32]));
592
593 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
594         if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
595                 flush_fp_to_thread(target);
596                 flush_tmregs_to_thread(target);
597                 addr = &target->thread.transact_vr;
598         } else {
599                 addr = &target->thread.vr_state;
600         }
601 #else
602         addr = &target->thread.vr_state;
603 #endif
604         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
605                                   addr, 0,
606                                   33 * sizeof(vector128));
607         if (!ret) {
608                 /*
609                  * Copy out only the low-order word of vrsave.
610                  */
611                 union {
612                         elf_vrreg_t reg;
613                         u32 word;
614                 } vrsave;
615                 memset(&vrsave, 0, sizeof(vrsave));
616
617 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
618                 if (MSR_TM_ACTIVE(target->thread.regs->msr))
619                         vrsave.word = target->thread.transact_vrsave;
620                 else
621                         vrsave.word = target->thread.vrsave;
622 #else
623                 vrsave.word = target->thread.vrsave;
624 #endif
625
626                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
627                                           33 * sizeof(vector128), -1);
628         }
629
630         return ret;
631 }
632
633 /*
634  * When the transaction is active, 'transact_vr' holds the current running
635  * value of all the VMX registers and 'vr_state' holds the last checkpointed
636  * value of all the VMX registers for the current transaction to fall back
637  * on in case it aborts. When transaction is not active 'vr_state' holds
638  * the current running state of all the VMX registers. So this function which
639  * sets the current running values of all the VMX registers, needs to know
640  * whether any transaction is active or not.
641  *
642  * Userspace interface buffer layout:
643  *
644  * struct data {
645  *      vector128       vr[32];
646  *      vector128       vscr;
647  *      vector128       vrsave;
648  * };
649  */
650 static int vr_set(struct task_struct *target, const struct user_regset *regset,
651                   unsigned int pos, unsigned int count,
652                   const void *kbuf, const void __user *ubuf)
653 {
654         struct thread_vr_state *addr;
655         int ret;
656
657         flush_altivec_to_thread(target);
658
659         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
660                      offsetof(struct thread_vr_state, vr[32]));
661
662 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
663         if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
664                 flush_fp_to_thread(target);
665                 flush_tmregs_to_thread(target);
666                 addr = &target->thread.transact_vr;
667         } else {
668                 addr = &target->thread.vr_state;
669         }
670 #else
671         addr = &target->thread.vr_state;
672 #endif
673         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
674                                  addr, 0,
675                                  33 * sizeof(vector128));
676         if (!ret && count > 0) {
677                 /*
678                  * We use only the first word of vrsave.
679                  */
680                 union {
681                         elf_vrreg_t reg;
682                         u32 word;
683                 } vrsave;
684                 memset(&vrsave, 0, sizeof(vrsave));
685
686 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
687                 if (MSR_TM_ACTIVE(target->thread.regs->msr))
688                         vrsave.word = target->thread.transact_vrsave;
689                 else
690                         vrsave.word = target->thread.vrsave;
691 #else
692                 vrsave.word = target->thread.vrsave;
693 #endif
694                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
695                                          33 * sizeof(vector128), -1);
696                 if (!ret) {
697
698 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
699                         if (MSR_TM_ACTIVE(target->thread.regs->msr))
700                                 target->thread.transact_vrsave = vrsave.word;
701                         else
702                                 target->thread.vrsave = vrsave.word;
703 #else
704                         target->thread.vrsave = vrsave.word;
705 #endif
706                 }
707         }
708
709         return ret;
710 }
711 #endif /* CONFIG_ALTIVEC */
712
713 #ifdef CONFIG_VSX
714 /*
715  * Currently to set and and get all the vsx state, you need to call
716  * the fp and VMX calls as well.  This only get/sets the lower 32
717  * 128bit VSX registers.
718  */
719
720 static int vsr_active(struct task_struct *target,
721                       const struct user_regset *regset)
722 {
723         flush_vsx_to_thread(target);
724         return target->thread.used_vsr ? regset->n : 0;
725 }
726
727 /*
728  * When the transaction is active, 'transact_fp' holds the current running
729  * value of all FPR registers and 'fp_state' holds the last checkpointed
730  * value of all FPR registers for the current transaction. When transaction
731  * is not active 'fp_state' holds the current running state of all the FPR
732  * registers. So this function which returns the current running values of
733  * all the FPR registers, needs to know whether any transaction is active
734  * or not.
735  *
736  * Userspace interface buffer layout:
737  *
738  * struct data {
739  *      u64     vsx[32];
740  * };
741  */
742 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
743                    unsigned int pos, unsigned int count,
744                    void *kbuf, void __user *ubuf)
745 {
746         u64 buf[32];
747         int ret, i;
748
749 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
750         flush_fp_to_thread(target);
751         flush_altivec_to_thread(target);
752         flush_tmregs_to_thread(target);
753 #endif
754         flush_vsx_to_thread(target);
755
756 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
757         if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
758                 for (i = 0; i < 32 ; i++)
759                         buf[i] = target->thread.
760                                 transact_fp.fpr[i][TS_VSRLOWOFFSET];
761         } else {
762                 for (i = 0; i < 32 ; i++)
763                         buf[i] = target->thread.
764                                 fp_state.fpr[i][TS_VSRLOWOFFSET];
765         }
766 #else
767         for (i = 0; i < 32 ; i++)
768                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
769 #endif
770         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
771                                   buf, 0, 32 * sizeof(double));
772
773         return ret;
774 }
775
776 /*
777  * When the transaction is active, 'transact_fp' holds the current running
778  * value of all FPR registers and 'fp_state' holds the last checkpointed
779  * value of all FPR registers for the current transaction. When transaction
780  * is not active 'fp_state' holds the current running state of all the FPR
781  * registers. So this function which sets the current running values of all
782  * the FPR registers, needs to know whether any transaction is active or not.
783  *
784  * Userspace interface buffer layout:
785  *
786  * struct data {
787  *      u64     vsx[32];
788  * };
789  */
790 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
791                    unsigned int pos, unsigned int count,
792                    const void *kbuf, const void __user *ubuf)
793 {
794         u64 buf[32];
795         int ret,i;
796
797 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
798         flush_fp_to_thread(target);
799         flush_altivec_to_thread(target);
800         flush_tmregs_to_thread(target);
801 #endif
802         flush_vsx_to_thread(target);
803
804         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
805                                  buf, 0, 32 * sizeof(double));
806
807 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
808         if (MSR_TM_ACTIVE(target->thread.regs->msr)) {
809                 for (i = 0; i < 32 ; i++)
810                         target->thread.transact_fp.
811                                 fpr[i][TS_VSRLOWOFFSET] = buf[i];
812         } else {
813                 for (i = 0; i < 32 ; i++)
814                         target->thread.fp_state.
815                                 fpr[i][TS_VSRLOWOFFSET] = buf[i];
816         }
817 #else
818         for (i = 0; i < 32 ; i++)
819                 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
820 #endif
821
822
823         return ret;
824 }
825 #endif /* CONFIG_VSX */
826
827 #ifdef CONFIG_SPE
828
829 /*
830  * For get_evrregs/set_evrregs functions 'data' has the following layout:
831  *
832  * struct {
833  *   u32 evr[32];
834  *   u64 acc;
835  *   u32 spefscr;
836  * }
837  */
838
839 static int evr_active(struct task_struct *target,
840                       const struct user_regset *regset)
841 {
842         flush_spe_to_thread(target);
843         return target->thread.used_spe ? regset->n : 0;
844 }
845
846 static int evr_get(struct task_struct *target, const struct user_regset *regset,
847                    unsigned int pos, unsigned int count,
848                    void *kbuf, void __user *ubuf)
849 {
850         int ret;
851
852         flush_spe_to_thread(target);
853
854         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
855                                   &target->thread.evr,
856                                   0, sizeof(target->thread.evr));
857
858         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
859                      offsetof(struct thread_struct, spefscr));
860
861         if (!ret)
862                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
863                                           &target->thread.acc,
864                                           sizeof(target->thread.evr), -1);
865
866         return ret;
867 }
868
869 static int evr_set(struct task_struct *target, const struct user_regset *regset,
870                    unsigned int pos, unsigned int count,
871                    const void *kbuf, const void __user *ubuf)
872 {
873         int ret;
874
875         flush_spe_to_thread(target);
876
877         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
878                                  &target->thread.evr,
879                                  0, sizeof(target->thread.evr));
880
881         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
882                      offsetof(struct thread_struct, spefscr));
883
884         if (!ret)
885                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
886                                          &target->thread.acc,
887                                          sizeof(target->thread.evr), -1);
888
889         return ret;
890 }
891 #endif /* CONFIG_SPE */
892
893 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
894 /**
895  * tm_cgpr_active - get active number of registers in CGPR
896  * @target:     The target task.
897  * @regset:     The user regset structure.
898  *
899  * This function checks for the active number of available
900  * regisers in transaction checkpointed GPR category.
901  */
902 static int tm_cgpr_active(struct task_struct *target,
903                           const struct user_regset *regset)
904 {
905         if (!cpu_has_feature(CPU_FTR_TM))
906                 return -ENODEV;
907
908         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
909                 return 0;
910
911         return regset->n;
912 }
913
914 /**
915  * tm_cgpr_get - get CGPR registers
916  * @target:     The target task.
917  * @regset:     The user regset structure.
918  * @pos:        The buffer position.
919  * @count:      Number of bytes to copy.
920  * @kbuf:       Kernel buffer to copy from.
921  * @ubuf:       User buffer to copy into.
922  *
923  * This function gets transaction checkpointed GPR registers.
924  *
925  * When the transaction is active, 'ckpt_regs' holds all the checkpointed
926  * GPR register values for the current transaction to fall back on if it
927  * aborts in between. This function gets those checkpointed GPR registers.
928  * The userspace interface buffer layout is as follows.
929  *
930  * struct data {
931  *      struct pt_regs ckpt_regs;
932  * };
933  */
934 static int tm_cgpr_get(struct task_struct *target,
935                         const struct user_regset *regset,
936                         unsigned int pos, unsigned int count,
937                         void *kbuf, void __user *ubuf)
938 {
939         int ret;
940
941         if (!cpu_has_feature(CPU_FTR_TM))
942                 return -ENODEV;
943
944         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
945                 return -ENODATA;
946
947         flush_fp_to_thread(target);
948         flush_altivec_to_thread(target);
949         flush_tmregs_to_thread(target);
950
951         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
952                                   &target->thread.ckpt_regs,
953                                   0, offsetof(struct pt_regs, msr));
954         if (!ret) {
955                 unsigned long msr = get_user_ckpt_msr(target);
956
957                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
958                                           offsetof(struct pt_regs, msr),
959                                           offsetof(struct pt_regs, msr) +
960                                           sizeof(msr));
961         }
962
963         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
964                      offsetof(struct pt_regs, msr) + sizeof(long));
965
966         if (!ret)
967                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
968                                           &target->thread.ckpt_regs.orig_gpr3,
969                                           offsetof(struct pt_regs, orig_gpr3),
970                                           sizeof(struct pt_regs));
971         if (!ret)
972                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
973                                                sizeof(struct pt_regs), -1);
974
975         return ret;
976 }
977
978 /*
979  * tm_cgpr_set - set the CGPR registers
980  * @target:     The target task.
981  * @regset:     The user regset structure.
982  * @pos:        The buffer position.
983  * @count:      Number of bytes to copy.
984  * @kbuf:       Kernel buffer to copy into.
985  * @ubuf:       User buffer to copy from.
986  *
987  * This function sets in transaction checkpointed GPR registers.
988  *
989  * When the transaction is active, 'ckpt_regs' holds the checkpointed
990  * GPR register values for the current transaction to fall back on if it
991  * aborts in between. This function sets those checkpointed GPR registers.
992  * The userspace interface buffer layout is as follows.
993  *
994  * struct data {
995  *      struct pt_regs ckpt_regs;
996  * };
997  */
998 static int tm_cgpr_set(struct task_struct *target,
999                         const struct user_regset *regset,
1000                         unsigned int pos, unsigned int count,
1001                         const void *kbuf, const void __user *ubuf)
1002 {
1003         unsigned long reg;
1004         int ret;
1005
1006         if (!cpu_has_feature(CPU_FTR_TM))
1007                 return -ENODEV;
1008
1009         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1010                 return -ENODATA;
1011
1012         flush_fp_to_thread(target);
1013         flush_altivec_to_thread(target);
1014         flush_tmregs_to_thread(target);
1015
1016         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1017                                  &target->thread.ckpt_regs,
1018                                  0, PT_MSR * sizeof(reg));
1019
1020         if (!ret && count > 0) {
1021                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
1022                                          PT_MSR * sizeof(reg),
1023                                          (PT_MSR + 1) * sizeof(reg));
1024                 if (!ret)
1025                         ret = set_user_ckpt_msr(target, reg);
1026         }
1027
1028         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
1029                      offsetof(struct pt_regs, msr) + sizeof(long));
1030
1031         if (!ret)
1032                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1033                                          &target->thread.ckpt_regs.orig_gpr3,
1034                                          PT_ORIG_R3 * sizeof(reg),
1035                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
1036
1037         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
1038                 ret = user_regset_copyin_ignore(
1039                         &pos, &count, &kbuf, &ubuf,
1040                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
1041                         PT_TRAP * sizeof(reg));
1042
1043         if (!ret && count > 0) {
1044                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
1045                                          PT_TRAP * sizeof(reg),
1046                                          (PT_TRAP + 1) * sizeof(reg));
1047                 if (!ret)
1048                         ret = set_user_ckpt_trap(target, reg);
1049         }
1050
1051         if (!ret)
1052                 ret = user_regset_copyin_ignore(
1053                         &pos, &count, &kbuf, &ubuf,
1054                         (PT_TRAP + 1) * sizeof(reg), -1);
1055
1056         return ret;
1057 }
1058
1059 /**
1060  * tm_cfpr_active - get active number of registers in CFPR
1061  * @target:     The target task.
1062  * @regset:     The user regset structure.
1063  *
1064  * This function checks for the active number of available
1065  * regisers in transaction checkpointed FPR category.
1066  */
1067 static int tm_cfpr_active(struct task_struct *target,
1068                                 const struct user_regset *regset)
1069 {
1070         if (!cpu_has_feature(CPU_FTR_TM))
1071                 return -ENODEV;
1072
1073         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1074                 return 0;
1075
1076         return regset->n;
1077 }
1078
1079 /**
1080  * tm_cfpr_get - get CFPR registers
1081  * @target:     The target task.
1082  * @regset:     The user regset structure.
1083  * @pos:        The buffer position.
1084  * @count:      Number of bytes to copy.
1085  * @kbuf:       Kernel buffer to copy from.
1086  * @ubuf:       User buffer to copy into.
1087  *
1088  * This function gets in transaction checkpointed FPR registers.
1089  *
1090  * When the transaction is active 'fp_state' holds the checkpointed
1091  * values for the current transaction to fall back on if it aborts
1092  * in between. This function gets those checkpointed FPR registers.
1093  * The userspace interface buffer layout is as follows.
1094  *
1095  * struct data {
1096  *      u64     fpr[32];
1097  *      u64     fpscr;
1098  *};
1099  */
1100 static int tm_cfpr_get(struct task_struct *target,
1101                         const struct user_regset *regset,
1102                         unsigned int pos, unsigned int count,
1103                         void *kbuf, void __user *ubuf)
1104 {
1105         u64 buf[33];
1106         int i;
1107
1108         if (!cpu_has_feature(CPU_FTR_TM))
1109                 return -ENODEV;
1110
1111         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1112                 return -ENODATA;
1113
1114         flush_fp_to_thread(target);
1115         flush_altivec_to_thread(target);
1116         flush_tmregs_to_thread(target);
1117
1118         /* copy to local buffer then write that out */
1119         for (i = 0; i < 32 ; i++)
1120                 buf[i] = target->thread.TS_FPR(i);
1121         buf[32] = target->thread.fp_state.fpscr;
1122         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1123 }
1124
1125 /**
1126  * tm_cfpr_set - set CFPR registers
1127  * @target:     The target task.
1128  * @regset:     The user regset structure.
1129  * @pos:        The buffer position.
1130  * @count:      Number of bytes to copy.
1131  * @kbuf:       Kernel buffer to copy into.
1132  * @ubuf:       User buffer to copy from.
1133  *
1134  * This function sets in transaction checkpointed FPR registers.
1135  *
1136  * When the transaction is active 'fp_state' holds the checkpointed
1137  * FPR register values for the current transaction to fall back on
1138  * if it aborts in between. This function sets these checkpointed
1139  * FPR registers. The userspace interface buffer layout is as follows.
1140  *
1141  * struct data {
1142  *      u64     fpr[32];
1143  *      u64     fpscr;
1144  *};
1145  */
1146 static int tm_cfpr_set(struct task_struct *target,
1147                         const struct user_regset *regset,
1148                         unsigned int pos, unsigned int count,
1149                         const void *kbuf, const void __user *ubuf)
1150 {
1151         u64 buf[33];
1152         int i;
1153
1154         if (!cpu_has_feature(CPU_FTR_TM))
1155                 return -ENODEV;
1156
1157         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1158                 return -ENODATA;
1159
1160         flush_fp_to_thread(target);
1161         flush_altivec_to_thread(target);
1162         flush_tmregs_to_thread(target);
1163
1164         /* copy to local buffer then write that out */
1165         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1166         if (i)
1167                 return i;
1168         for (i = 0; i < 32 ; i++)
1169                 target->thread.TS_FPR(i) = buf[i];
1170         target->thread.fp_state.fpscr = buf[32];
1171         return 0;
1172 }
1173
1174 /**
1175  * tm_cvmx_active - get active number of registers in CVMX
1176  * @target:     The target task.
1177  * @regset:     The user regset structure.
1178  *
1179  * This function checks for the active number of available
1180  * regisers in checkpointed VMX category.
1181  */
1182 static int tm_cvmx_active(struct task_struct *target,
1183                                 const struct user_regset *regset)
1184 {
1185         if (!cpu_has_feature(CPU_FTR_TM))
1186                 return -ENODEV;
1187
1188         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1189                 return 0;
1190
1191         return regset->n;
1192 }
1193
1194 /**
1195  * tm_cvmx_get - get CMVX registers
1196  * @target:     The target task.
1197  * @regset:     The user regset structure.
1198  * @pos:        The buffer position.
1199  * @count:      Number of bytes to copy.
1200  * @kbuf:       Kernel buffer to copy from.
1201  * @ubuf:       User buffer to copy into.
1202  *
1203  * This function gets in transaction checkpointed VMX registers.
1204  *
1205  * When the transaction is active 'vr_state' and 'vr_save' hold
1206  * the checkpointed values for the current transaction to fall
1207  * back on if it aborts in between. The userspace interface buffer
1208  * layout is as follows.
1209  *
1210  * struct data {
1211  *      vector128       vr[32];
1212  *      vector128       vscr;
1213  *      vector128       vrsave;
1214  *};
1215  */
1216 static int tm_cvmx_get(struct task_struct *target,
1217                         const struct user_regset *regset,
1218                         unsigned int pos, unsigned int count,
1219                         void *kbuf, void __user *ubuf)
1220 {
1221         int ret;
1222
1223         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1224
1225         if (!cpu_has_feature(CPU_FTR_TM))
1226                 return -ENODEV;
1227
1228         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1229                 return -ENODATA;
1230
1231         /* Flush the state */
1232         flush_fp_to_thread(target);
1233         flush_altivec_to_thread(target);
1234         flush_tmregs_to_thread(target);
1235
1236         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1237                                         &target->thread.vr_state, 0,
1238                                         33 * sizeof(vector128));
1239         if (!ret) {
1240                 /*
1241                  * Copy out only the low-order word of vrsave.
1242                  */
1243                 union {
1244                         elf_vrreg_t reg;
1245                         u32 word;
1246                 } vrsave;
1247                 memset(&vrsave, 0, sizeof(vrsave));
1248                 vrsave.word = target->thread.vrsave;
1249                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1250                                                 33 * sizeof(vector128), -1);
1251         }
1252
1253         return ret;
1254 }
1255
1256 /**
1257  * tm_cvmx_set - set CMVX registers
1258  * @target:     The target task.
1259  * @regset:     The user regset structure.
1260  * @pos:        The buffer position.
1261  * @count:      Number of bytes to copy.
1262  * @kbuf:       Kernel buffer to copy into.
1263  * @ubuf:       User buffer to copy from.
1264  *
1265  * This function sets in transaction checkpointed VMX registers.
1266  *
1267  * When the transaction is active 'vr_state' and 'vr_save' hold
1268  * the checkpointed values for the current transaction to fall
1269  * back on if it aborts in between. The userspace interface buffer
1270  * layout is as follows.
1271  *
1272  * struct data {
1273  *      vector128       vr[32];
1274  *      vector128       vscr;
1275  *      vector128       vrsave;
1276  *};
1277  */
1278 static int tm_cvmx_set(struct task_struct *target,
1279                         const struct user_regset *regset,
1280                         unsigned int pos, unsigned int count,
1281                         const void *kbuf, const void __user *ubuf)
1282 {
1283         int ret;
1284
1285         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1286
1287         if (!cpu_has_feature(CPU_FTR_TM))
1288                 return -ENODEV;
1289
1290         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1291                 return -ENODATA;
1292
1293         flush_fp_to_thread(target);
1294         flush_altivec_to_thread(target);
1295         flush_tmregs_to_thread(target);
1296
1297         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1298                                         &target->thread.vr_state, 0,
1299                                         33 * sizeof(vector128));
1300         if (!ret && count > 0) {
1301                 /*
1302                  * We use only the low-order word of vrsave.
1303                  */
1304                 union {
1305                         elf_vrreg_t reg;
1306                         u32 word;
1307                 } vrsave;
1308                 memset(&vrsave, 0, sizeof(vrsave));
1309                 vrsave.word = target->thread.vrsave;
1310                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1311                                                 33 * sizeof(vector128), -1);
1312                 if (!ret)
1313                         target->thread.vrsave = vrsave.word;
1314         }
1315
1316         return ret;
1317 }
1318
1319 /**
1320  * tm_cvsx_active - get active number of registers in CVSX
1321  * @target:     The target task.
1322  * @regset:     The user regset structure.
1323  *
1324  * This function checks for the active number of available
1325  * regisers in transaction checkpointed VSX category.
1326  */
1327 static int tm_cvsx_active(struct task_struct *target,
1328                                 const struct user_regset *regset)
1329 {
1330         if (!cpu_has_feature(CPU_FTR_TM))
1331                 return -ENODEV;
1332
1333         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1334                 return 0;
1335
1336         flush_vsx_to_thread(target);
1337         return target->thread.used_vsr ? regset->n : 0;
1338 }
1339
1340 /**
1341  * tm_cvsx_get - get CVSX registers
1342  * @target:     The target task.
1343  * @regset:     The user regset structure.
1344  * @pos:        The buffer position.
1345  * @count:      Number of bytes to copy.
1346  * @kbuf:       Kernel buffer to copy from.
1347  * @ubuf:       User buffer to copy into.
1348  *
1349  * This function gets in transaction checkpointed VSX registers.
1350  *
1351  * When the transaction is active 'fp_state' holds the checkpointed
1352  * values for the current transaction to fall back on if it aborts
1353  * in between. This function gets those checkpointed VSX registers.
1354  * The userspace interface buffer layout is as follows.
1355  *
1356  * struct data {
1357  *      u64     vsx[32];
1358  *};
1359  */
1360 static int tm_cvsx_get(struct task_struct *target,
1361                         const struct user_regset *regset,
1362                         unsigned int pos, unsigned int count,
1363                         void *kbuf, void __user *ubuf)
1364 {
1365         u64 buf[32];
1366         int ret, i;
1367
1368         if (!cpu_has_feature(CPU_FTR_TM))
1369                 return -ENODEV;
1370
1371         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1372                 return -ENODATA;
1373
1374         /* Flush the state */
1375         flush_fp_to_thread(target);
1376         flush_altivec_to_thread(target);
1377         flush_tmregs_to_thread(target);
1378         flush_vsx_to_thread(target);
1379
1380         for (i = 0; i < 32 ; i++)
1381                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
1382         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1383                                   buf, 0, 32 * sizeof(double));
1384
1385         return ret;
1386 }
1387
1388 /**
1389  * tm_cvsx_set - set CFPR registers
1390  * @target:     The target task.
1391  * @regset:     The user regset structure.
1392  * @pos:        The buffer position.
1393  * @count:      Number of bytes to copy.
1394  * @kbuf:       Kernel buffer to copy into.
1395  * @ubuf:       User buffer to copy from.
1396  *
1397  * This function sets in transaction checkpointed VSX registers.
1398  *
1399  * When the transaction is active 'fp_state' holds the checkpointed
1400  * VSX register values for the current transaction to fall back on
1401  * if it aborts in between. This function sets these checkpointed
1402  * FPR registers. The userspace interface buffer layout is as follows.
1403  *
1404  * struct data {
1405  *      u64     vsx[32];
1406  *};
1407  */
1408 static int tm_cvsx_set(struct task_struct *target,
1409                         const struct user_regset *regset,
1410                         unsigned int pos, unsigned int count,
1411                         const void *kbuf, const void __user *ubuf)
1412 {
1413         u64 buf[32];
1414         int ret, i;
1415
1416         if (!cpu_has_feature(CPU_FTR_TM))
1417                 return -ENODEV;
1418
1419         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1420                 return -ENODATA;
1421
1422         /* Flush the state */
1423         flush_fp_to_thread(target);
1424         flush_altivec_to_thread(target);
1425         flush_tmregs_to_thread(target);
1426         flush_vsx_to_thread(target);
1427
1428         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1429                                  buf, 0, 32 * sizeof(double));
1430         for (i = 0; i < 32 ; i++)
1431                 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1432
1433         return ret;
1434 }
1435
1436 /**
1437  * tm_spr_active - get active number of registers in TM SPR
1438  * @target:     The target task.
1439  * @regset:     The user regset structure.
1440  *
1441  * This function checks the active number of available
1442  * regisers in the transactional memory SPR category.
1443  */
1444 static int tm_spr_active(struct task_struct *target,
1445                          const struct user_regset *regset)
1446 {
1447         if (!cpu_has_feature(CPU_FTR_TM))
1448                 return -ENODEV;
1449
1450         return regset->n;
1451 }
1452
1453 /**
1454  * tm_spr_get - get the TM related SPR registers
1455  * @target:     The target task.
1456  * @regset:     The user regset structure.
1457  * @pos:        The buffer position.
1458  * @count:      Number of bytes to copy.
1459  * @kbuf:       Kernel buffer to copy from.
1460  * @ubuf:       User buffer to copy into.
1461  *
1462  * This function gets transactional memory related SPR registers.
1463  * The userspace interface buffer layout is as follows.
1464  *
1465  * struct {
1466  *      u64             tm_tfhar;
1467  *      u64             tm_texasr;
1468  *      u64             tm_tfiar;
1469  * };
1470  */
1471 static int tm_spr_get(struct task_struct *target,
1472                       const struct user_regset *regset,
1473                       unsigned int pos, unsigned int count,
1474                       void *kbuf, void __user *ubuf)
1475 {
1476         int ret;
1477
1478         /* Build tests */
1479         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1480         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1481         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1482
1483         if (!cpu_has_feature(CPU_FTR_TM))
1484                 return -ENODEV;
1485
1486         /* Flush the states */
1487         flush_fp_to_thread(target);
1488         flush_altivec_to_thread(target);
1489         flush_tmregs_to_thread(target);
1490
1491         /* TFHAR register */
1492         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1493                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1494
1495         /* TEXASR register */
1496         if (!ret)
1497                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1498                                 &target->thread.tm_texasr, sizeof(u64),
1499                                 2 * sizeof(u64));
1500
1501         /* TFIAR register */
1502         if (!ret)
1503                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1504                                 &target->thread.tm_tfiar,
1505                                 2 * sizeof(u64), 3 * sizeof(u64));
1506         return ret;
1507 }
1508
1509 /**
1510  * tm_spr_set - set the TM related SPR registers
1511  * @target:     The target task.
1512  * @regset:     The user regset structure.
1513  * @pos:        The buffer position.
1514  * @count:      Number of bytes to copy.
1515  * @kbuf:       Kernel buffer to copy into.
1516  * @ubuf:       User buffer to copy from.
1517  *
1518  * This function sets transactional memory related SPR registers.
1519  * The userspace interface buffer layout is as follows.
1520  *
1521  * struct {
1522  *      u64             tm_tfhar;
1523  *      u64             tm_texasr;
1524  *      u64             tm_tfiar;
1525  * };
1526  */
1527 static int tm_spr_set(struct task_struct *target,
1528                       const struct user_regset *regset,
1529                       unsigned int pos, unsigned int count,
1530                       const void *kbuf, const void __user *ubuf)
1531 {
1532         int ret;
1533
1534         /* Build tests */
1535         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1536         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1537         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1538
1539         if (!cpu_has_feature(CPU_FTR_TM))
1540                 return -ENODEV;
1541
1542         /* Flush the states */
1543         flush_fp_to_thread(target);
1544         flush_altivec_to_thread(target);
1545         flush_tmregs_to_thread(target);
1546
1547         /* TFHAR register */
1548         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1549                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1550
1551         /* TEXASR register */
1552         if (!ret)
1553                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1554                                 &target->thread.tm_texasr, sizeof(u64),
1555                                 2 * sizeof(u64));
1556
1557         /* TFIAR register */
1558         if (!ret)
1559                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1560                                 &target->thread.tm_tfiar,
1561                                  2 * sizeof(u64), 3 * sizeof(u64));
1562         return ret;
1563 }
1564
1565 static int tm_tar_active(struct task_struct *target,
1566                          const struct user_regset *regset)
1567 {
1568         if (!cpu_has_feature(CPU_FTR_TM))
1569                 return -ENODEV;
1570
1571         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1572                 return regset->n;
1573
1574         return 0;
1575 }
1576
1577 static int tm_tar_get(struct task_struct *target,
1578                       const struct user_regset *regset,
1579                       unsigned int pos, unsigned int count,
1580                       void *kbuf, void __user *ubuf)
1581 {
1582         int ret;
1583
1584         if (!cpu_has_feature(CPU_FTR_TM))
1585                 return -ENODEV;
1586
1587         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1588                 return -ENODATA;
1589
1590         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1591                                 &target->thread.tm_tar, 0, sizeof(u64));
1592         return ret;
1593 }
1594
1595 static int tm_tar_set(struct task_struct *target,
1596                       const struct user_regset *regset,
1597                       unsigned int pos, unsigned int count,
1598                       const void *kbuf, const void __user *ubuf)
1599 {
1600         int ret;
1601
1602         if (!cpu_has_feature(CPU_FTR_TM))
1603                 return -ENODEV;
1604
1605         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1606                 return -ENODATA;
1607
1608         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1609                                 &target->thread.tm_tar, 0, sizeof(u64));
1610         return ret;
1611 }
1612
1613 static int tm_ppr_active(struct task_struct *target,
1614                          const struct user_regset *regset)
1615 {
1616         if (!cpu_has_feature(CPU_FTR_TM))
1617                 return -ENODEV;
1618
1619         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1620                 return regset->n;
1621
1622         return 0;
1623 }
1624
1625
1626 static int tm_ppr_get(struct task_struct *target,
1627                       const struct user_regset *regset,
1628                       unsigned int pos, unsigned int count,
1629                       void *kbuf, void __user *ubuf)
1630 {
1631         int ret;
1632
1633         if (!cpu_has_feature(CPU_FTR_TM))
1634                 return -ENODEV;
1635
1636         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1637                 return -ENODATA;
1638
1639         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1640                                 &target->thread.tm_ppr, 0, sizeof(u64));
1641         return ret;
1642 }
1643
1644 static int tm_ppr_set(struct task_struct *target,
1645                       const struct user_regset *regset,
1646                       unsigned int pos, unsigned int count,
1647                       const void *kbuf, const void __user *ubuf)
1648 {
1649         int ret;
1650
1651         if (!cpu_has_feature(CPU_FTR_TM))
1652                 return -ENODEV;
1653
1654         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1655                 return -ENODATA;
1656
1657         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1658                                 &target->thread.tm_ppr, 0, sizeof(u64));
1659         return ret;
1660 }
1661
1662 static int tm_dscr_active(struct task_struct *target,
1663                          const struct user_regset *regset)
1664 {
1665         if (!cpu_has_feature(CPU_FTR_TM))
1666                 return -ENODEV;
1667
1668         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1669                 return regset->n;
1670
1671         return 0;
1672 }
1673
1674 static int tm_dscr_get(struct task_struct *target,
1675                       const struct user_regset *regset,
1676                       unsigned int pos, unsigned int count,
1677                       void *kbuf, void __user *ubuf)
1678 {
1679         int ret;
1680
1681         if (!cpu_has_feature(CPU_FTR_TM))
1682                 return -ENODEV;
1683
1684         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1685                 return -ENODATA;
1686
1687         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1688                                 &target->thread.tm_dscr, 0, sizeof(u64));
1689         return ret;
1690 }
1691
1692 static int tm_dscr_set(struct task_struct *target,
1693                       const struct user_regset *regset,
1694                       unsigned int pos, unsigned int count,
1695                       const void *kbuf, const void __user *ubuf)
1696 {
1697         int ret;
1698
1699         if (!cpu_has_feature(CPU_FTR_TM))
1700                 return -ENODEV;
1701
1702         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1703                 return -ENODATA;
1704
1705         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1706                                 &target->thread.tm_dscr, 0, sizeof(u64));
1707         return ret;
1708 }
1709 #endif  /* CONFIG_PPC_TRANSACTIONAL_MEM */
1710
1711 #ifdef CONFIG_PPC64
1712 static int ppr_get(struct task_struct *target,
1713                       const struct user_regset *regset,
1714                       unsigned int pos, unsigned int count,
1715                       void *kbuf, void __user *ubuf)
1716 {
1717         int ret;
1718
1719         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1720                                 &target->thread.ppr, 0, sizeof(u64));
1721         return ret;
1722 }
1723
1724 static int ppr_set(struct task_struct *target,
1725                       const struct user_regset *regset,
1726                       unsigned int pos, unsigned int count,
1727                       const void *kbuf, const void __user *ubuf)
1728 {
1729         int ret;
1730
1731         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1732                                 &target->thread.ppr, 0, sizeof(u64));
1733         return ret;
1734 }
1735
1736 static int dscr_get(struct task_struct *target,
1737                       const struct user_regset *regset,
1738                       unsigned int pos, unsigned int count,
1739                       void *kbuf, void __user *ubuf)
1740 {
1741         int ret;
1742
1743         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1744                                 &target->thread.dscr, 0, sizeof(u64));
1745         return ret;
1746 }
1747 static int dscr_set(struct task_struct *target,
1748                       const struct user_regset *regset,
1749                       unsigned int pos, unsigned int count,
1750                       const void *kbuf, const void __user *ubuf)
1751 {
1752         int ret;
1753
1754         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1755                                 &target->thread.dscr, 0, sizeof(u64));
1756         return ret;
1757 }
1758 #endif
1759 #ifdef CONFIG_PPC_BOOK3S_64
1760 static int tar_get(struct task_struct *target,
1761                       const struct user_regset *regset,
1762                       unsigned int pos, unsigned int count,
1763                       void *kbuf, void __user *ubuf)
1764 {
1765         int ret;
1766
1767         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1768                                 &target->thread.tar, 0, sizeof(u64));
1769         return ret;
1770 }
1771 static int tar_set(struct task_struct *target,
1772                       const struct user_regset *regset,
1773                       unsigned int pos, unsigned int count,
1774                       const void *kbuf, const void __user *ubuf)
1775 {
1776         int ret;
1777
1778         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1779                                 &target->thread.tar, 0, sizeof(u64));
1780         return ret;
1781 }
1782
1783 static int ebb_active(struct task_struct *target,
1784                          const struct user_regset *regset)
1785 {
1786         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1787                 return -ENODEV;
1788
1789         if (target->thread.used_ebb)
1790                 return regset->n;
1791
1792         return 0;
1793 }
1794
1795 static int ebb_get(struct task_struct *target,
1796                       const struct user_regset *regset,
1797                       unsigned int pos, unsigned int count,
1798                       void *kbuf, void __user *ubuf)
1799 {
1800         /* Build tests */
1801         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1802         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1803
1804         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1805                 return -ENODEV;
1806
1807         if (!target->thread.used_ebb)
1808                 return -ENODATA;
1809
1810         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1811                         &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1812 }
1813
1814 static int ebb_set(struct task_struct *target,
1815                       const struct user_regset *regset,
1816                       unsigned int pos, unsigned int count,
1817                       const void *kbuf, const void __user *ubuf)
1818 {
1819         int ret = 0;
1820
1821         /* Build tests */
1822         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1823         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1824
1825         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1826                 return -ENODEV;
1827
1828         if (target->thread.used_ebb)
1829                 return -ENODATA;
1830
1831         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1832                         &target->thread.ebbrr, 0, sizeof(unsigned long));
1833
1834         if (!ret)
1835                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1836                         &target->thread.ebbhr, sizeof(unsigned long),
1837                         2 * sizeof(unsigned long));
1838
1839         if (!ret)
1840                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1841                         &target->thread.bescr,
1842                         2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1843
1844         return ret;
1845 }
1846 static int pmu_active(struct task_struct *target,
1847                          const struct user_regset *regset)
1848 {
1849         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1850                 return -ENODEV;
1851
1852         return regset->n;
1853 }
1854
1855 static int pmu_get(struct task_struct *target,
1856                       const struct user_regset *regset,
1857                       unsigned int pos, unsigned int count,
1858                       void *kbuf, void __user *ubuf)
1859 {
1860         /* Build tests */
1861         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1862         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1863         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1864         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1865
1866         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1867                 return -ENODEV;
1868
1869         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1870                         &target->thread.siar, 0,
1871                         5 * sizeof(unsigned long));
1872 }
1873
1874 static int pmu_set(struct task_struct *target,
1875                       const struct user_regset *regset,
1876                       unsigned int pos, unsigned int count,
1877                       const void *kbuf, const void __user *ubuf)
1878 {
1879         int ret = 0;
1880
1881         /* Build tests */
1882         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1883         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1884         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1885         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1886
1887         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1888                 return -ENODEV;
1889
1890         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1891                         &target->thread.siar, 0,
1892                         sizeof(unsigned long));
1893
1894         if (!ret)
1895                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1896                         &target->thread.sdar, sizeof(unsigned long),
1897                         2 * sizeof(unsigned long));
1898
1899         if (!ret)
1900                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1901                         &target->thread.sier, 2 * sizeof(unsigned long),
1902                         3 * sizeof(unsigned long));
1903
1904         if (!ret)
1905                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1906                         &target->thread.mmcr2, 3 * sizeof(unsigned long),
1907                         4 * sizeof(unsigned long));
1908
1909         if (!ret)
1910                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1911                         &target->thread.mmcr0, 4 * sizeof(unsigned long),
1912                         5 * sizeof(unsigned long));
1913         return ret;
1914 }
1915 #endif
1916 /*
1917  * These are our native regset flavors.
1918  */
1919 enum powerpc_regset {
1920         REGSET_GPR,
1921         REGSET_FPR,
1922 #ifdef CONFIG_ALTIVEC
1923         REGSET_VMX,
1924 #endif
1925 #ifdef CONFIG_VSX
1926         REGSET_VSX,
1927 #endif
1928 #ifdef CONFIG_SPE
1929         REGSET_SPE,
1930 #endif
1931 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1932         REGSET_TM_CGPR,         /* TM checkpointed GPR registers */
1933         REGSET_TM_CFPR,         /* TM checkpointed FPR registers */
1934         REGSET_TM_CVMX,         /* TM checkpointed VMX registers */
1935         REGSET_TM_CVSX,         /* TM checkpointed VSX registers */
1936         REGSET_TM_SPR,          /* TM specific SPR registers */
1937         REGSET_TM_CTAR,         /* TM checkpointed TAR register */
1938         REGSET_TM_CPPR,         /* TM checkpointed PPR register */
1939         REGSET_TM_CDSCR,        /* TM checkpointed DSCR register */
1940 #endif
1941 #ifdef CONFIG_PPC64
1942         REGSET_PPR,             /* PPR register */
1943         REGSET_DSCR,            /* DSCR register */
1944 #endif
1945 #ifdef CONFIG_PPC_BOOK3S_64
1946         REGSET_TAR,             /* TAR register */
1947         REGSET_EBB,             /* EBB registers */
1948         REGSET_PMR,             /* Performance Monitor Registers */
1949 #endif
1950 };
1951
1952 static const struct user_regset native_regsets[] = {
1953         [REGSET_GPR] = {
1954                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1955                 .size = sizeof(long), .align = sizeof(long),
1956                 .get = gpr_get, .set = gpr_set
1957         },
1958         [REGSET_FPR] = {
1959                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1960                 .size = sizeof(double), .align = sizeof(double),
1961                 .get = fpr_get, .set = fpr_set
1962         },
1963 #ifdef CONFIG_ALTIVEC
1964         [REGSET_VMX] = {
1965                 .core_note_type = NT_PPC_VMX, .n = 34,
1966                 .size = sizeof(vector128), .align = sizeof(vector128),
1967                 .active = vr_active, .get = vr_get, .set = vr_set
1968         },
1969 #endif
1970 #ifdef CONFIG_VSX
1971         [REGSET_VSX] = {
1972                 .core_note_type = NT_PPC_VSX, .n = 32,
1973                 .size = sizeof(double), .align = sizeof(double),
1974                 .active = vsr_active, .get = vsr_get, .set = vsr_set
1975         },
1976 #endif
1977 #ifdef CONFIG_SPE
1978         [REGSET_SPE] = {
1979                 .core_note_type = NT_PPC_SPE, .n = 35,
1980                 .size = sizeof(u32), .align = sizeof(u32),
1981                 .active = evr_active, .get = evr_get, .set = evr_set
1982         },
1983 #endif
1984 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1985         [REGSET_TM_CGPR] = {
1986                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1987                 .size = sizeof(long), .align = sizeof(long),
1988                 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1989         },
1990         [REGSET_TM_CFPR] = {
1991                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1992                 .size = sizeof(double), .align = sizeof(double),
1993                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1994         },
1995         [REGSET_TM_CVMX] = {
1996                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1997                 .size = sizeof(vector128), .align = sizeof(vector128),
1998                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1999         },
2000         [REGSET_TM_CVSX] = {
2001                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2002                 .size = sizeof(double), .align = sizeof(double),
2003                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2004         },
2005         [REGSET_TM_SPR] = {
2006                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2007                 .size = sizeof(u64), .align = sizeof(u64),
2008                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2009         },
2010         [REGSET_TM_CTAR] = {
2011                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2012                 .size = sizeof(u64), .align = sizeof(u64),
2013                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2014         },
2015         [REGSET_TM_CPPR] = {
2016                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2017                 .size = sizeof(u64), .align = sizeof(u64),
2018                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2019         },
2020         [REGSET_TM_CDSCR] = {
2021                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2022                 .size = sizeof(u64), .align = sizeof(u64),
2023                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2024         },
2025 #endif
2026 #ifdef CONFIG_PPC64
2027         [REGSET_PPR] = {
2028                 .core_note_type = NT_PPC_PPR, .n = 1,
2029                 .size = sizeof(u64), .align = sizeof(u64),
2030                 .get = ppr_get, .set = ppr_set
2031         },
2032         [REGSET_DSCR] = {
2033                 .core_note_type = NT_PPC_DSCR, .n = 1,
2034                 .size = sizeof(u64), .align = sizeof(u64),
2035                 .get = dscr_get, .set = dscr_set
2036         },
2037 #endif
2038 #ifdef CONFIG_PPC_BOOK3S_64
2039         [REGSET_TAR] = {
2040                 .core_note_type = NT_PPC_TAR, .n = 1,
2041                 .size = sizeof(u64), .align = sizeof(u64),
2042                 .get = tar_get, .set = tar_set
2043         },
2044         [REGSET_EBB] = {
2045                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2046                 .size = sizeof(u64), .align = sizeof(u64),
2047                 .active = ebb_active, .get = ebb_get, .set = ebb_set
2048         },
2049         [REGSET_PMR] = {
2050                 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
2051                 .size = sizeof(u64), .align = sizeof(u64),
2052                 .active = pmu_active, .get = pmu_get, .set = pmu_set
2053         },
2054 #endif
2055 };
2056
2057 static const struct user_regset_view user_ppc_native_view = {
2058         .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2059         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2060 };
2061
2062 #ifdef CONFIG_PPC64
2063 #include <linux/compat.h>
2064
2065 static int gpr32_get_common(struct task_struct *target,
2066                      const struct user_regset *regset,
2067                      unsigned int pos, unsigned int count,
2068                             void *kbuf, void __user *ubuf, bool tm_active)
2069 {
2070         const unsigned long *regs = &target->thread.regs->gpr[0];
2071         const unsigned long *ckpt_regs;
2072         compat_ulong_t *k = kbuf;
2073         compat_ulong_t __user *u = ubuf;
2074         compat_ulong_t reg;
2075         int i;
2076
2077 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2078         ckpt_regs = &target->thread.ckpt_regs.gpr[0];
2079 #endif
2080         if (tm_active) {
2081                 regs = ckpt_regs;
2082         } else {
2083                 if (target->thread.regs == NULL)
2084                         return -EIO;
2085
2086                 if (!FULL_REGS(target->thread.regs)) {
2087                         /*
2088                          * We have a partial register set.
2089                          * Fill 14-31 with bogus values.
2090                          */
2091                         for (i = 14; i < 32; i++)
2092                                 target->thread.regs->gpr[i] = NV_REG_POISON;
2093                 }
2094         }
2095
2096         pos /= sizeof(reg);
2097         count /= sizeof(reg);
2098
2099         if (kbuf)
2100                 for (; count > 0 && pos < PT_MSR; --count)
2101                         *k++ = regs[pos++];
2102         else
2103                 for (; count > 0 && pos < PT_MSR; --count)
2104                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2105                                 return -EFAULT;
2106
2107         if (count > 0 && pos == PT_MSR) {
2108                 reg = get_user_msr(target);
2109                 if (kbuf)
2110                         *k++ = reg;
2111                 else if (__put_user(reg, u++))
2112                         return -EFAULT;
2113                 ++pos;
2114                 --count;
2115         }
2116
2117         if (kbuf)
2118                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2119                         *k++ = regs[pos++];
2120         else
2121                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2122                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2123                                 return -EFAULT;
2124
2125         kbuf = k;
2126         ubuf = u;
2127         pos *= sizeof(reg);
2128         count *= sizeof(reg);
2129         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2130                                         PT_REGS_COUNT * sizeof(reg), -1);
2131 }
2132
2133 static int gpr32_set_common(struct task_struct *target,
2134                      const struct user_regset *regset,
2135                      unsigned int pos, unsigned int count,
2136                      const void *kbuf, const void __user *ubuf, bool tm_active)
2137 {
2138         unsigned long *regs = &target->thread.regs->gpr[0];
2139         unsigned long *ckpt_regs;
2140         const compat_ulong_t *k = kbuf;
2141         const compat_ulong_t __user *u = ubuf;
2142         compat_ulong_t reg;
2143
2144 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2145         ckpt_regs = &target->thread.ckpt_regs.gpr[0];
2146 #endif
2147
2148         if (tm_active) {
2149                 regs = ckpt_regs;
2150         } else {
2151                 regs = &target->thread.regs->gpr[0];
2152
2153                 if (target->thread.regs == NULL)
2154                         return -EIO;
2155
2156                 CHECK_FULL_REGS(target->thread.regs);
2157         }
2158
2159         pos /= sizeof(reg);
2160         count /= sizeof(reg);
2161
2162         if (kbuf)
2163                 for (; count > 0 && pos < PT_MSR; --count)
2164                         regs[pos++] = *k++;
2165         else
2166                 for (; count > 0 && pos < PT_MSR; --count) {
2167                         if (__get_user(reg, u++))
2168                                 return -EFAULT;
2169                         regs[pos++] = reg;
2170                 }
2171
2172
2173         if (count > 0 && pos == PT_MSR) {
2174                 if (kbuf)
2175                         reg = *k++;
2176                 else if (__get_user(reg, u++))
2177                         return -EFAULT;
2178                 set_user_msr(target, reg);
2179                 ++pos;
2180                 --count;
2181         }
2182
2183         if (kbuf) {
2184                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2185                         regs[pos++] = *k++;
2186                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2187                         ++k;
2188         } else {
2189                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2190                         if (__get_user(reg, u++))
2191                                 return -EFAULT;
2192                         regs[pos++] = reg;
2193                 }
2194                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2195                         if (__get_user(reg, u++))
2196                                 return -EFAULT;
2197         }
2198
2199         if (count > 0 && pos == PT_TRAP) {
2200                 if (kbuf)
2201                         reg = *k++;
2202                 else if (__get_user(reg, u++))
2203                         return -EFAULT;
2204                 set_user_trap(target, reg);
2205                 ++pos;
2206                 --count;
2207         }
2208
2209         kbuf = k;
2210         ubuf = u;
2211         pos *= sizeof(reg);
2212         count *= sizeof(reg);
2213         return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2214                                          (PT_TRAP + 1) * sizeof(reg), -1);
2215 }
2216
2217 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2218 static int tm_cgpr32_get(struct task_struct *target,
2219                      const struct user_regset *regset,
2220                      unsigned int pos, unsigned int count,
2221                      void *kbuf, void __user *ubuf)
2222 {
2223         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 1);
2224 }
2225
2226 static int tm_cgpr32_set(struct task_struct *target,
2227                      const struct user_regset *regset,
2228                      unsigned int pos, unsigned int count,
2229                      const void *kbuf, const void __user *ubuf)
2230 {
2231         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 1);
2232 }
2233 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2234
2235 static int gpr32_get(struct task_struct *target,
2236                      const struct user_regset *regset,
2237                      unsigned int pos, unsigned int count,
2238                      void *kbuf, void __user *ubuf)
2239 {
2240         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf, 0);
2241 }
2242
2243 static int gpr32_set(struct task_struct *target,
2244                      const struct user_regset *regset,
2245                      unsigned int pos, unsigned int count,
2246                      const void *kbuf, const void __user *ubuf)
2247 {
2248         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf, 0);
2249 }
2250
2251 /*
2252  * These are the regset flavors matching the CONFIG_PPC32 native set.
2253  */
2254 static const struct user_regset compat_regsets[] = {
2255         [REGSET_GPR] = {
2256                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2257                 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2258                 .get = gpr32_get, .set = gpr32_set
2259         },
2260         [REGSET_FPR] = {
2261                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2262                 .size = sizeof(double), .align = sizeof(double),
2263                 .get = fpr_get, .set = fpr_set
2264         },
2265 #ifdef CONFIG_ALTIVEC
2266         [REGSET_VMX] = {
2267                 .core_note_type = NT_PPC_VMX, .n = 34,
2268                 .size = sizeof(vector128), .align = sizeof(vector128),
2269                 .active = vr_active, .get = vr_get, .set = vr_set
2270         },
2271 #endif
2272 #ifdef CONFIG_SPE
2273         [REGSET_SPE] = {
2274                 .core_note_type = NT_PPC_SPE, .n = 35,
2275                 .size = sizeof(u32), .align = sizeof(u32),
2276                 .active = evr_active, .get = evr_get, .set = evr_set
2277         },
2278 #endif
2279 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2280         [REGSET_TM_CGPR] = {
2281                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2282                 .size = sizeof(long), .align = sizeof(long),
2283                 .active = tm_cgpr_active,
2284                 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2285         },
2286         [REGSET_TM_CFPR] = {
2287                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2288                 .size = sizeof(double), .align = sizeof(double),
2289                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2290         },
2291         [REGSET_TM_CVMX] = {
2292                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2293                 .size = sizeof(vector128), .align = sizeof(vector128),
2294                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2295         },
2296         [REGSET_TM_CVSX] = {
2297                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2298                 .size = sizeof(double), .align = sizeof(double),
2299                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2300         },
2301         [REGSET_TM_SPR] = {
2302                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2303                 .size = sizeof(u64), .align = sizeof(u64),
2304                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2305         },
2306         [REGSET_TM_CTAR] = {
2307                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2308                 .size = sizeof(u64), .align = sizeof(u64),
2309                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2310         },
2311         [REGSET_TM_CPPR] = {
2312                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2313                 .size = sizeof(u64), .align = sizeof(u64),
2314                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2315         },
2316         [REGSET_TM_CDSCR] = {
2317                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2318                 .size = sizeof(u64), .align = sizeof(u64),
2319                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2320         },
2321 #endif
2322 #ifdef CONFIG_PPC64
2323         [REGSET_PPR] = {
2324                 .core_note_type = NT_PPC_PPR, .n = 1,
2325                 .size = sizeof(u64), .align = sizeof(u64),
2326                 .get = ppr_get, .set = ppr_set
2327         },
2328         [REGSET_DSCR] = {
2329                 .core_note_type = NT_PPC_DSCR, .n = 1,
2330                 .size = sizeof(u64), .align = sizeof(u64),
2331                 .get = dscr_get, .set = dscr_set
2332         },
2333 #endif
2334 #ifdef CONFIG_PPC_BOOK3S_64
2335         [REGSET_TAR] = {
2336                 .core_note_type = NT_PPC_TAR, .n = 1,
2337                 .size = sizeof(u64), .align = sizeof(u64),
2338                 .get = tar_get, .set = tar_set
2339         },
2340         [REGSET_EBB] = {
2341                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2342                 .size = sizeof(u64), .align = sizeof(u64),
2343                 .active = ebb_active, .get = ebb_get, .set = ebb_set
2344         },
2345 #endif
2346 };
2347
2348 static const struct user_regset_view user_ppc_compat_view = {
2349         .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2350         .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2351 };
2352 #endif  /* CONFIG_PPC64 */
2353
2354 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2355 {
2356 #ifdef CONFIG_PPC64
2357         if (test_tsk_thread_flag(task, TIF_32BIT))
2358                 return &user_ppc_compat_view;
2359 #endif
2360         return &user_ppc_native_view;
2361 }
2362
2363
2364 void user_enable_single_step(struct task_struct *task)
2365 {
2366         struct pt_regs *regs = task->thread.regs;
2367
2368         if (regs != NULL) {
2369 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2370                 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2371                 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2372                 regs->msr |= MSR_DE;
2373 #else
2374                 regs->msr &= ~MSR_BE;
2375                 regs->msr |= MSR_SE;
2376 #endif
2377         }
2378         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2379 }
2380
2381 void user_enable_block_step(struct task_struct *task)
2382 {
2383         struct pt_regs *regs = task->thread.regs;
2384
2385         if (regs != NULL) {
2386 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2387                 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2388                 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2389                 regs->msr |= MSR_DE;
2390 #else
2391                 regs->msr &= ~MSR_SE;
2392                 regs->msr |= MSR_BE;
2393 #endif
2394         }
2395         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2396 }
2397
2398 void user_disable_single_step(struct task_struct *task)
2399 {
2400         struct pt_regs *regs = task->thread.regs;
2401
2402         if (regs != NULL) {
2403 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2404                 /*
2405                  * The logic to disable single stepping should be as
2406                  * simple as turning off the Instruction Complete flag.
2407                  * And, after doing so, if all debug flags are off, turn
2408                  * off DBCR0(IDM) and MSR(DE) .... Torez
2409                  */
2410                 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2411                 /*
2412                  * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2413                  */
2414                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2415                                         task->thread.debug.dbcr1)) {
2416                         /*
2417                          * All debug events were off.....
2418                          */
2419                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2420                         regs->msr &= ~MSR_DE;
2421                 }
2422 #else
2423                 regs->msr &= ~(MSR_SE | MSR_BE);
2424 #endif
2425         }
2426         clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2427 }
2428
2429 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2430 void ptrace_triggered(struct perf_event *bp,
2431                       struct perf_sample_data *data, struct pt_regs *regs)
2432 {
2433         struct perf_event_attr attr;
2434
2435         /*
2436          * Disable the breakpoint request here since ptrace has defined a
2437          * one-shot behaviour for breakpoint exceptions in PPC64.
2438          * The SIGTRAP signal is generated automatically for us in do_dabr().
2439          * We don't have to do anything about that here
2440          */
2441         attr = bp->attr;
2442         attr.disabled = true;
2443         modify_user_hw_breakpoint(bp, &attr);
2444 }
2445 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2446
2447 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2448                                unsigned long data)
2449 {
2450 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2451         int ret;
2452         struct thread_struct *thread = &(task->thread);
2453         struct perf_event *bp;
2454         struct perf_event_attr attr;
2455 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2456 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2457         struct arch_hw_breakpoint hw_brk;
2458 #endif
2459
2460         /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2461          *  For embedded processors we support one DAC and no IAC's at the
2462          *  moment.
2463          */
2464         if (addr > 0)
2465                 return -EINVAL;
2466
2467         /* The bottom 3 bits in dabr are flags */
2468         if ((data & ~0x7UL) >= TASK_SIZE)
2469                 return -EIO;
2470
2471 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2472         /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2473          *  It was assumed, on previous implementations, that 3 bits were
2474          *  passed together with the data address, fitting the design of the
2475          *  DABR register, as follows:
2476          *
2477          *  bit 0: Read flag
2478          *  bit 1: Write flag
2479          *  bit 2: Breakpoint translation
2480          *
2481          *  Thus, we use them here as so.
2482          */
2483
2484         /* Ensure breakpoint translation bit is set */
2485         if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2486                 return -EIO;
2487         hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2488         hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2489         hw_brk.len = 8;
2490 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2491         bp = thread->ptrace_bps[0];
2492         if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
2493                 if (bp) {
2494                         unregister_hw_breakpoint(bp);
2495                         thread->ptrace_bps[0] = NULL;
2496                 }
2497                 return 0;
2498         }
2499         if (bp) {
2500                 attr = bp->attr;
2501                 attr.bp_addr = hw_brk.address;
2502                 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2503
2504                 /* Enable breakpoint */
2505                 attr.disabled = false;
2506
2507                 ret =  modify_user_hw_breakpoint(bp, &attr);
2508                 if (ret) {
2509                         return ret;
2510                 }
2511                 thread->ptrace_bps[0] = bp;
2512                 thread->hw_brk = hw_brk;
2513                 return 0;
2514         }
2515
2516         /* Create a new breakpoint request if one doesn't exist already */
2517         hw_breakpoint_init(&attr);
2518         attr.bp_addr = hw_brk.address;
2519         arch_bp_generic_fields(hw_brk.type,
2520                                &attr.bp_type);
2521
2522         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2523                                                ptrace_triggered, NULL, task);
2524         if (IS_ERR(bp)) {
2525                 thread->ptrace_bps[0] = NULL;
2526                 return PTR_ERR(bp);
2527         }
2528
2529 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2530         task->thread.hw_brk = hw_brk;
2531 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2532         /* As described above, it was assumed 3 bits were passed with the data
2533          *  address, but we will assume only the mode bits will be passed
2534          *  as to not cause alignment restrictions for DAC-based processors.
2535          */
2536
2537         /* DAC's hold the whole address without any mode flags */
2538         task->thread.debug.dac1 = data & ~0x3UL;
2539
2540         if (task->thread.debug.dac1 == 0) {
2541                 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2542                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2543                                         task->thread.debug.dbcr1)) {
2544                         task->thread.regs->msr &= ~MSR_DE;
2545                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2546                 }
2547                 return 0;
2548         }
2549
2550         /* Read or Write bits must be set */
2551
2552         if (!(data & 0x3UL))
2553                 return -EINVAL;
2554
2555         /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2556            register */
2557         task->thread.debug.dbcr0 |= DBCR0_IDM;
2558
2559         /* Check for write and read flags and set DBCR0
2560            accordingly */
2561         dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2562         if (data & 0x1UL)
2563                 dbcr_dac(task) |= DBCR_DAC1R;
2564         if (data & 0x2UL)
2565                 dbcr_dac(task) |= DBCR_DAC1W;
2566         task->thread.regs->msr |= MSR_DE;
2567 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2568         return 0;
2569 }
2570
2571 /*
2572  * Called by kernel/ptrace.c when detaching..
2573  *
2574  * Make sure single step bits etc are not set.
2575  */
2576 void ptrace_disable(struct task_struct *child)
2577 {
2578         /* make sure the single step bit is not set. */
2579         user_disable_single_step(child);
2580 }
2581
2582 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2583 static long set_instruction_bp(struct task_struct *child,
2584                               struct ppc_hw_breakpoint *bp_info)
2585 {
2586         int slot;
2587         int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2588         int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2589         int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2590         int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2591
2592         if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2593                 slot2_in_use = 1;
2594         if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2595                 slot4_in_use = 1;
2596
2597         if (bp_info->addr >= TASK_SIZE)
2598                 return -EIO;
2599
2600         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2601
2602                 /* Make sure range is valid. */
2603                 if (bp_info->addr2 >= TASK_SIZE)
2604                         return -EIO;
2605
2606                 /* We need a pair of IAC regsisters */
2607                 if ((!slot1_in_use) && (!slot2_in_use)) {
2608                         slot = 1;
2609                         child->thread.debug.iac1 = bp_info->addr;
2610                         child->thread.debug.iac2 = bp_info->addr2;
2611                         child->thread.debug.dbcr0 |= DBCR0_IAC1;
2612                         if (bp_info->addr_mode ==
2613                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2614                                 dbcr_iac_range(child) |= DBCR_IAC12X;
2615                         else
2616                                 dbcr_iac_range(child) |= DBCR_IAC12I;
2617 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2618                 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2619                         slot = 3;
2620                         child->thread.debug.iac3 = bp_info->addr;
2621                         child->thread.debug.iac4 = bp_info->addr2;
2622                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2623                         if (bp_info->addr_mode ==
2624                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2625                                 dbcr_iac_range(child) |= DBCR_IAC34X;
2626                         else
2627                                 dbcr_iac_range(child) |= DBCR_IAC34I;
2628 #endif
2629                 } else
2630                         return -ENOSPC;
2631         } else {
2632                 /* We only need one.  If possible leave a pair free in
2633                  * case a range is needed later
2634                  */
2635                 if (!slot1_in_use) {
2636                         /*
2637                          * Don't use iac1 if iac1-iac2 are free and either
2638                          * iac3 or iac4 (but not both) are free
2639                          */
2640                         if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2641                                 slot = 1;
2642                                 child->thread.debug.iac1 = bp_info->addr;
2643                                 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2644                                 goto out;
2645                         }
2646                 }
2647                 if (!slot2_in_use) {
2648                         slot = 2;
2649                         child->thread.debug.iac2 = bp_info->addr;
2650                         child->thread.debug.dbcr0 |= DBCR0_IAC2;
2651 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2652                 } else if (!slot3_in_use) {
2653                         slot = 3;
2654                         child->thread.debug.iac3 = bp_info->addr;
2655                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2656                 } else if (!slot4_in_use) {
2657                         slot = 4;
2658                         child->thread.debug.iac4 = bp_info->addr;
2659                         child->thread.debug.dbcr0 |= DBCR0_IAC4;
2660 #endif
2661                 } else
2662                         return -ENOSPC;
2663         }
2664 out:
2665         child->thread.debug.dbcr0 |= DBCR0_IDM;
2666         child->thread.regs->msr |= MSR_DE;
2667
2668         return slot;
2669 }
2670
2671 static int del_instruction_bp(struct task_struct *child, int slot)
2672 {
2673         switch (slot) {
2674         case 1:
2675                 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2676                         return -ENOENT;
2677
2678                 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2679                         /* address range - clear slots 1 & 2 */
2680                         child->thread.debug.iac2 = 0;
2681                         dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2682                 }
2683                 child->thread.debug.iac1 = 0;
2684                 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2685                 break;
2686         case 2:
2687                 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2688                         return -ENOENT;
2689
2690                 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2691                         /* used in a range */
2692                         return -EINVAL;
2693                 child->thread.debug.iac2 = 0;
2694                 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2695                 break;
2696 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2697         case 3:
2698                 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2699                         return -ENOENT;
2700
2701                 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2702                         /* address range - clear slots 3 & 4 */
2703                         child->thread.debug.iac4 = 0;
2704                         dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2705                 }
2706                 child->thread.debug.iac3 = 0;
2707                 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2708                 break;
2709         case 4:
2710                 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2711                         return -ENOENT;
2712
2713                 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2714                         /* Used in a range */
2715                         return -EINVAL;
2716                 child->thread.debug.iac4 = 0;
2717                 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2718                 break;
2719 #endif
2720         default:
2721                 return -EINVAL;
2722         }
2723         return 0;
2724 }
2725
2726 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2727 {
2728         int byte_enable =
2729                 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2730                 & 0xf;
2731         int condition_mode =
2732                 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2733         int slot;
2734
2735         if (byte_enable && (condition_mode == 0))
2736                 return -EINVAL;
2737
2738         if (bp_info->addr >= TASK_SIZE)
2739                 return -EIO;
2740
2741         if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2742                 slot = 1;
2743                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2744                         dbcr_dac(child) |= DBCR_DAC1R;
2745                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2746                         dbcr_dac(child) |= DBCR_DAC1W;
2747                 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2748 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2749                 if (byte_enable) {
2750                         child->thread.debug.dvc1 =
2751                                 (unsigned long)bp_info->condition_value;
2752                         child->thread.debug.dbcr2 |=
2753                                 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2754                                  (condition_mode << DBCR2_DVC1M_SHIFT));
2755                 }
2756 #endif
2757 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2758         } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2759                 /* Both dac1 and dac2 are part of a range */
2760                 return -ENOSPC;
2761 #endif
2762         } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2763                 slot = 2;
2764                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2765                         dbcr_dac(child) |= DBCR_DAC2R;
2766                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2767                         dbcr_dac(child) |= DBCR_DAC2W;
2768                 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2769 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2770                 if (byte_enable) {
2771                         child->thread.debug.dvc2 =
2772                                 (unsigned long)bp_info->condition_value;
2773                         child->thread.debug.dbcr2 |=
2774                                 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2775                                  (condition_mode << DBCR2_DVC2M_SHIFT));
2776                 }
2777 #endif
2778         } else
2779                 return -ENOSPC;
2780         child->thread.debug.dbcr0 |= DBCR0_IDM;
2781         child->thread.regs->msr |= MSR_DE;
2782
2783         return slot + 4;
2784 }
2785
2786 static int del_dac(struct task_struct *child, int slot)
2787 {
2788         if (slot == 1) {
2789                 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2790                         return -ENOENT;
2791
2792                 child->thread.debug.dac1 = 0;
2793                 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2794 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2795                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2796                         child->thread.debug.dac2 = 0;
2797                         child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2798                 }
2799                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2800 #endif
2801 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2802                 child->thread.debug.dvc1 = 0;
2803 #endif
2804         } else if (slot == 2) {
2805                 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2806                         return -ENOENT;
2807
2808 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2809                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2810                         /* Part of a range */
2811                         return -EINVAL;
2812                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2813 #endif
2814 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2815                 child->thread.debug.dvc2 = 0;
2816 #endif
2817                 child->thread.debug.dac2 = 0;
2818                 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2819         } else
2820                 return -EINVAL;
2821
2822         return 0;
2823 }
2824 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2825
2826 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2827 static int set_dac_range(struct task_struct *child,
2828                          struct ppc_hw_breakpoint *bp_info)
2829 {
2830         int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2831
2832         /* We don't allow range watchpoints to be used with DVC */
2833         if (bp_info->condition_mode)
2834                 return -EINVAL;
2835
2836         /*
2837          * Best effort to verify the address range.  The user/supervisor bits
2838          * prevent trapping in kernel space, but let's fail on an obvious bad
2839          * range.  The simple test on the mask is not fool-proof, and any
2840          * exclusive range will spill over into kernel space.
2841          */
2842         if (bp_info->addr >= TASK_SIZE)
2843                 return -EIO;
2844         if (mode == PPC_BREAKPOINT_MODE_MASK) {
2845                 /*
2846                  * dac2 is a bitmask.  Don't allow a mask that makes a
2847                  * kernel space address from a valid dac1 value
2848                  */
2849                 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2850                         return -EIO;
2851         } else {
2852                 /*
2853                  * For range breakpoints, addr2 must also be a valid address
2854                  */
2855                 if (bp_info->addr2 >= TASK_SIZE)
2856                         return -EIO;
2857         }
2858
2859         if (child->thread.debug.dbcr0 &
2860             (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2861                 return -ENOSPC;
2862
2863         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2864                 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2865         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2866                 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2867         child->thread.debug.dac1 = bp_info->addr;
2868         child->thread.debug.dac2 = bp_info->addr2;
2869         if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2870                 child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2871         else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2872                 child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2873         else    /* PPC_BREAKPOINT_MODE_MASK */
2874                 child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2875         child->thread.regs->msr |= MSR_DE;
2876
2877         return 5;
2878 }
2879 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2880
2881 static long ppc_set_hwdebug(struct task_struct *child,
2882                      struct ppc_hw_breakpoint *bp_info)
2883 {
2884 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2885         int len = 0;
2886         struct thread_struct *thread = &(child->thread);
2887         struct perf_event *bp;
2888         struct perf_event_attr attr;
2889 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2890 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2891         struct arch_hw_breakpoint brk;
2892 #endif
2893
2894         if (bp_info->version != 1)
2895                 return -ENOTSUPP;
2896 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2897         /*
2898          * Check for invalid flags and combinations
2899          */
2900         if ((bp_info->trigger_type == 0) ||
2901             (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2902                                        PPC_BREAKPOINT_TRIGGER_RW)) ||
2903             (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2904             (bp_info->condition_mode &
2905              ~(PPC_BREAKPOINT_CONDITION_MODE |
2906                PPC_BREAKPOINT_CONDITION_BE_ALL)))
2907                 return -EINVAL;
2908 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2909         if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2910                 return -EINVAL;
2911 #endif
2912
2913         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2914                 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2915                     (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2916                         return -EINVAL;
2917                 return set_instruction_bp(child, bp_info);
2918         }
2919         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2920                 return set_dac(child, bp_info);
2921
2922 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2923         return set_dac_range(child, bp_info);
2924 #else
2925         return -EINVAL;
2926 #endif
2927 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2928         /*
2929          * We only support one data breakpoint
2930          */
2931         if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2932             (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2933             bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2934                 return -EINVAL;
2935
2936         if ((unsigned long)bp_info->addr >= TASK_SIZE)
2937                 return -EIO;
2938
2939         brk.address = bp_info->addr & ~7UL;
2940         brk.type = HW_BRK_TYPE_TRANSLATE;
2941         brk.len = 8;
2942         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2943                 brk.type |= HW_BRK_TYPE_READ;
2944         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2945                 brk.type |= HW_BRK_TYPE_WRITE;
2946 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2947         /*
2948          * Check if the request is for 'range' breakpoints. We can
2949          * support it if range < 8 bytes.
2950          */
2951         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2952                 len = bp_info->addr2 - bp_info->addr;
2953         else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2954                 len = 1;
2955         else
2956                 return -EINVAL;
2957         bp = thread->ptrace_bps[0];
2958         if (bp)
2959                 return -ENOSPC;
2960
2961         /* Create a new breakpoint request if one doesn't exist already */
2962         hw_breakpoint_init(&attr);
2963         attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2964         attr.bp_len = len;
2965         arch_bp_generic_fields(brk.type, &attr.bp_type);
2966
2967         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2968                                                ptrace_triggered, NULL, child);
2969         if (IS_ERR(bp)) {
2970                 thread->ptrace_bps[0] = NULL;
2971                 return PTR_ERR(bp);
2972         }
2973
2974         return 1;
2975 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2976
2977         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2978                 return -EINVAL;
2979
2980         if (child->thread.hw_brk.address)
2981                 return -ENOSPC;
2982
2983         child->thread.hw_brk = brk;
2984
2985         return 1;
2986 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2987 }
2988
2989 static long ppc_del_hwdebug(struct task_struct *child, long data)
2990 {
2991 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2992         int ret = 0;
2993         struct thread_struct *thread = &(child->thread);
2994         struct perf_event *bp;
2995 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2996 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2997         int rc;
2998
2999         if (data <= 4)
3000                 rc = del_instruction_bp(child, (int)data);
3001         else
3002                 rc = del_dac(child, (int)data - 4);
3003
3004         if (!rc) {
3005                 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
3006                                         child->thread.debug.dbcr1)) {
3007                         child->thread.debug.dbcr0 &= ~DBCR0_IDM;
3008                         child->thread.regs->msr &= ~MSR_DE;
3009                 }
3010         }
3011         return rc;
3012 #else
3013         if (data != 1)
3014                 return -EINVAL;
3015
3016 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3017         bp = thread->ptrace_bps[0];
3018         if (bp) {
3019                 unregister_hw_breakpoint(bp);
3020                 thread->ptrace_bps[0] = NULL;
3021         } else
3022                 ret = -ENOENT;
3023         return ret;
3024 #else /* CONFIG_HAVE_HW_BREAKPOINT */
3025         if (child->thread.hw_brk.address == 0)
3026                 return -ENOENT;
3027
3028         child->thread.hw_brk.address = 0;
3029         child->thread.hw_brk.type = 0;
3030 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3031
3032         return 0;
3033 #endif
3034 }
3035
3036 long arch_ptrace(struct task_struct *child, long request,
3037                  unsigned long addr, unsigned long data)
3038 {
3039         int ret = -EPERM;
3040         void __user *datavp = (void __user *) data;
3041         unsigned long __user *datalp = datavp;
3042
3043         switch (request) {
3044         /* read the word at location addr in the USER area. */
3045         case PTRACE_PEEKUSR: {
3046                 unsigned long index, tmp;
3047
3048                 ret = -EIO;
3049                 /* convert to index and check */
3050 #ifdef CONFIG_PPC32
3051                 index = addr >> 2;
3052                 if ((addr & 3) || (index > PT_FPSCR)
3053                     || (child->thread.regs == NULL))
3054 #else
3055                 index = addr >> 3;
3056                 if ((addr & 7) || (index > PT_FPSCR))
3057 #endif
3058                         break;
3059
3060                 CHECK_FULL_REGS(child->thread.regs);
3061                 if (index < PT_FPR0) {
3062                         ret = ptrace_get_reg(child, (int) index, &tmp);
3063                         if (ret)
3064                                 break;
3065                 } else {
3066                         unsigned int fpidx = index - PT_FPR0;
3067
3068                         flush_fp_to_thread(child);
3069                         if (fpidx < (PT_FPSCR - PT_FPR0))
3070                                 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
3071                                        sizeof(long));
3072                         else
3073                                 tmp = child->thread.fp_state.fpscr;
3074                 }
3075                 ret = put_user(tmp, datalp);
3076                 break;
3077         }
3078
3079         /* write the word at location addr in the USER area */
3080         case PTRACE_POKEUSR: {
3081                 unsigned long index;
3082
3083                 ret = -EIO;
3084                 /* convert to index and check */
3085 #ifdef CONFIG_PPC32
3086                 index = addr >> 2;
3087                 if ((addr & 3) || (index > PT_FPSCR)
3088                     || (child->thread.regs == NULL))
3089 #else
3090                 index = addr >> 3;
3091                 if ((addr & 7) || (index > PT_FPSCR))
3092 #endif
3093                         break;
3094
3095                 CHECK_FULL_REGS(child->thread.regs);
3096                 if (index < PT_FPR0) {
3097                         ret = ptrace_put_reg(child, index, data);
3098                 } else {
3099                         unsigned int fpidx = index - PT_FPR0;
3100
3101                         flush_fp_to_thread(child);
3102                         if (fpidx < (PT_FPSCR - PT_FPR0))
3103                                 memcpy(&child->thread.TS_FPR(fpidx), &data,
3104                                        sizeof(long));
3105                         else
3106                                 child->thread.fp_state.fpscr = data;
3107                         ret = 0;
3108                 }
3109                 break;
3110         }
3111
3112         case PPC_PTRACE_GETHWDBGINFO: {
3113                 struct ppc_debug_info dbginfo;
3114
3115                 dbginfo.version = 1;
3116 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3117                 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3118                 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3119                 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3120                 dbginfo.data_bp_alignment = 4;
3121                 dbginfo.sizeof_condition = 4;
3122                 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3123                                    PPC_DEBUG_FEATURE_INSN_BP_MASK;
3124 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3125                 dbginfo.features |=
3126                                    PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3127                                    PPC_DEBUG_FEATURE_DATA_BP_MASK;
3128 #endif
3129 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3130                 dbginfo.num_instruction_bps = 0;
3131                 dbginfo.num_data_bps = 1;
3132                 dbginfo.num_condition_regs = 0;
3133 #ifdef CONFIG_PPC64
3134                 dbginfo.data_bp_alignment = 8;
3135 #else
3136                 dbginfo.data_bp_alignment = 4;
3137 #endif
3138                 dbginfo.sizeof_condition = 0;
3139 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3140                 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3141                 if (cpu_has_feature(CPU_FTR_DAWR))
3142                         dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3143 #else
3144                 dbginfo.features = 0;
3145 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3146 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3147
3148                 if (!access_ok(VERIFY_WRITE, datavp,
3149                                sizeof(struct ppc_debug_info)))
3150                         return -EFAULT;
3151                 ret = __copy_to_user(datavp, &dbginfo,
3152                                      sizeof(struct ppc_debug_info)) ?
3153                       -EFAULT : 0;
3154                 break;
3155         }
3156
3157         case PPC_PTRACE_SETHWDEBUG: {
3158                 struct ppc_hw_breakpoint bp_info;
3159
3160                 if (!access_ok(VERIFY_READ, datavp,
3161                                sizeof(struct ppc_hw_breakpoint)))
3162                         return -EFAULT;
3163                 ret = __copy_from_user(&bp_info, datavp,
3164                                        sizeof(struct ppc_hw_breakpoint)) ?
3165                       -EFAULT : 0;
3166                 if (!ret)
3167                         ret = ppc_set_hwdebug(child, &bp_info);
3168                 break;
3169         }
3170
3171         case PPC_PTRACE_DELHWDEBUG: {
3172                 ret = ppc_del_hwdebug(child, data);
3173                 break;
3174         }
3175
3176         case PTRACE_GET_DEBUGREG: {
3177 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3178                 unsigned long dabr_fake;
3179 #endif
3180                 ret = -EINVAL;
3181                 /* We only support one DABR and no IABRS at the moment */
3182                 if (addr > 0)
3183                         break;
3184 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3185                 ret = put_user(child->thread.debug.dac1, datalp);
3186 #else
3187                 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3188                              (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3189                 ret = put_user(dabr_fake, datalp);
3190 #endif
3191                 break;
3192         }
3193
3194         case PTRACE_SET_DEBUGREG:
3195                 ret = ptrace_set_debugreg(child, addr, data);
3196                 break;
3197
3198 #ifdef CONFIG_PPC64
3199         case PTRACE_GETREGS64:
3200 #endif
3201         case PTRACE_GETREGS:    /* Get all pt_regs from the child. */
3202                 return copy_regset_to_user(child, &user_ppc_native_view,
3203                                            REGSET_GPR,
3204                                            0, sizeof(struct pt_regs),
3205                                            datavp);
3206
3207 #ifdef CONFIG_PPC64
3208         case PTRACE_SETREGS64:
3209 #endif
3210         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
3211                 return copy_regset_from_user(child, &user_ppc_native_view,
3212                                              REGSET_GPR,
3213                                              0, sizeof(struct pt_regs),
3214                                              datavp);
3215
3216         case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3217                 return copy_regset_to_user(child, &user_ppc_native_view,
3218                                            REGSET_FPR,
3219                                            0, sizeof(elf_fpregset_t),
3220                                            datavp);
3221
3222         case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3223                 return copy_regset_from_user(child, &user_ppc_native_view,
3224                                              REGSET_FPR,
3225                                              0, sizeof(elf_fpregset_t),
3226                                              datavp);
3227
3228 #ifdef CONFIG_ALTIVEC
3229         case PTRACE_GETVRREGS:
3230                 return copy_regset_to_user(child, &user_ppc_native_view,
3231                                            REGSET_VMX,
3232                                            0, (33 * sizeof(vector128) +
3233                                                sizeof(u32)),
3234                                            datavp);
3235
3236         case PTRACE_SETVRREGS:
3237                 return copy_regset_from_user(child, &user_ppc_native_view,
3238                                              REGSET_VMX,
3239                                              0, (33 * sizeof(vector128) +
3240                                                  sizeof(u32)),
3241                                              datavp);
3242 #endif
3243 #ifdef CONFIG_VSX
3244         case PTRACE_GETVSRREGS:
3245                 return copy_regset_to_user(child, &user_ppc_native_view,
3246                                            REGSET_VSX,
3247                                            0, 32 * sizeof(double),
3248                                            datavp);
3249
3250         case PTRACE_SETVSRREGS:
3251                 return copy_regset_from_user(child, &user_ppc_native_view,
3252                                              REGSET_VSX,
3253                                              0, 32 * sizeof(double),
3254                                              datavp);
3255 #endif
3256 #ifdef CONFIG_SPE
3257         case PTRACE_GETEVRREGS:
3258                 /* Get the child spe register state. */
3259                 return copy_regset_to_user(child, &user_ppc_native_view,
3260                                            REGSET_SPE, 0, 35 * sizeof(u32),
3261                                            datavp);
3262
3263         case PTRACE_SETEVRREGS:
3264                 /* Set the child spe register state. */
3265                 return copy_regset_from_user(child, &user_ppc_native_view,
3266                                              REGSET_SPE, 0, 35 * sizeof(u32),
3267                                              datavp);
3268 #endif
3269
3270         default:
3271                 ret = ptrace_request(child, request, addr, data);
3272                 break;
3273         }
3274         return ret;
3275 }
3276
3277 #ifdef CONFIG_SECCOMP
3278 static int do_seccomp(struct pt_regs *regs)
3279 {
3280         if (!test_thread_flag(TIF_SECCOMP))
3281                 return 0;
3282
3283         /*
3284          * The ABI we present to seccomp tracers is that r3 contains
3285          * the syscall return value and orig_gpr3 contains the first
3286          * syscall parameter. This is different to the ptrace ABI where
3287          * both r3 and orig_gpr3 contain the first syscall parameter.
3288          */
3289         regs->gpr[3] = -ENOSYS;
3290
3291         /*
3292          * We use the __ version here because we have already checked
3293          * TIF_SECCOMP. If this fails, there is nothing left to do, we
3294          * have already loaded -ENOSYS into r3, or seccomp has put
3295          * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3296          */
3297         if (__secure_computing(NULL))
3298                 return -1;
3299
3300         /*
3301          * The syscall was allowed by seccomp, restore the register
3302          * state to what audit expects.
3303          * Note that we use orig_gpr3, which means a seccomp tracer can
3304          * modify the first syscall parameter (in orig_gpr3) and also
3305          * allow the syscall to proceed.
3306          */
3307         regs->gpr[3] = regs->orig_gpr3;
3308
3309         return 0;
3310 }
3311 #else
3312 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3313 #endif /* CONFIG_SECCOMP */
3314
3315 /**
3316  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3317  * @regs: the pt_regs of the task to trace (current)
3318  *
3319  * Performs various types of tracing on syscall entry. This includes seccomp,
3320  * ptrace, syscall tracepoints and audit.
3321  *
3322  * The pt_regs are potentially visible to userspace via ptrace, so their
3323  * contents is ABI.
3324  *
3325  * One or more of the tracers may modify the contents of pt_regs, in particular
3326  * to modify arguments or even the syscall number itself.
3327  *
3328  * It's also possible that a tracer can choose to reject the system call. In
3329  * that case this function will return an illegal syscall number, and will put
3330  * an appropriate return value in regs->r3.
3331  *
3332  * Return: the (possibly changed) syscall number.
3333  */
3334 long do_syscall_trace_enter(struct pt_regs *regs)
3335 {
3336         user_exit();
3337
3338         /*
3339          * The tracer may decide to abort the syscall, if so tracehook
3340          * will return !0. Note that the tracer may also just change
3341          * regs->gpr[0] to an invalid syscall number, that is handled
3342          * below on the exit path.
3343          */
3344         if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3345             tracehook_report_syscall_entry(regs))
3346                 goto skip;
3347
3348         /* Run seccomp after ptrace; allow it to set gpr[3]. */
3349         if (do_seccomp(regs))
3350                 return -1;
3351
3352         /* Avoid trace and audit when syscall is invalid. */
3353         if (regs->gpr[0] >= NR_syscalls)
3354                 goto skip;
3355
3356         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3357                 trace_sys_enter(regs, regs->gpr[0]);
3358
3359 #ifdef CONFIG_PPC64
3360         if (!is_32bit_task())
3361                 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3362                                     regs->gpr[5], regs->gpr[6]);
3363         else
3364 #endif
3365                 audit_syscall_entry(regs->gpr[0],
3366                                     regs->gpr[3] & 0xffffffff,
3367                                     regs->gpr[4] & 0xffffffff,
3368                                     regs->gpr[5] & 0xffffffff,
3369                                     regs->gpr[6] & 0xffffffff);
3370
3371         /* Return the possibly modified but valid syscall number */
3372         return regs->gpr[0];
3373
3374 skip:
3375         /*
3376          * If we are aborting explicitly, or if the syscall number is
3377          * now invalid, set the return value to -ENOSYS.
3378          */
3379         regs->gpr[3] = -ENOSYS;
3380         return -1;
3381 }
3382
3383 void do_syscall_trace_leave(struct pt_regs *regs)
3384 {
3385         int step;
3386
3387         audit_syscall_exit(regs);
3388
3389         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3390                 trace_sys_exit(regs, regs->result);
3391
3392         step = test_thread_flag(TIF_SINGLESTEP);
3393         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3394                 tracehook_report_syscall_exit(regs, step);
3395
3396         user_enter();
3397 }