Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid
[sfrench/cifs-2.6.git] / arch / powerpc / kernel / ptrace.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Derived from "arch/m68k/kernel/ptrace.c"
6  *  Copyright (C) 1994 by Hamish Macdonald
7  *  Taken from linux/kernel/ptrace.c and modified for M680x0.
8  *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9  *
10  * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11  * and Paul Mackerras (paulus@samba.org).
12  *
13  * This file is subject to the terms and conditions of the GNU General
14  * Public License.  See the file README.legal in the main directory of
15  * this archive for more details.
16  */
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
36 #include <linux/nospec.h>
37
38 #include <linux/uaccess.h>
39 #include <linux/pkeys.h>
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #include <asm/debug.h>
46
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/syscalls.h>
49
50 /*
51  * The parameter save area on the stack is used to store arguments being passed
52  * to callee function and is located at fixed offset from stack pointer.
53  */
54 #ifdef CONFIG_PPC32
55 #define PARAMETER_SAVE_AREA_OFFSET      24  /* bytes */
56 #else /* CONFIG_PPC32 */
57 #define PARAMETER_SAVE_AREA_OFFSET      48  /* bytes */
58 #endif
59
60 struct pt_regs_offset {
61         const char *name;
62         int offset;
63 };
64
65 #define STR(s)  #s                      /* convert to string */
66 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
67 #define GPR_OFFSET_NAME(num)    \
68         {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
69         {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
70 #define REG_OFFSET_END {.name = NULL, .offset = 0}
71
72 #define TVSO(f) (offsetof(struct thread_vr_state, f))
73 #define TFSO(f) (offsetof(struct thread_fp_state, f))
74 #define TSO(f)  (offsetof(struct thread_struct, f))
75
76 static const struct pt_regs_offset regoffset_table[] = {
77         GPR_OFFSET_NAME(0),
78         GPR_OFFSET_NAME(1),
79         GPR_OFFSET_NAME(2),
80         GPR_OFFSET_NAME(3),
81         GPR_OFFSET_NAME(4),
82         GPR_OFFSET_NAME(5),
83         GPR_OFFSET_NAME(6),
84         GPR_OFFSET_NAME(7),
85         GPR_OFFSET_NAME(8),
86         GPR_OFFSET_NAME(9),
87         GPR_OFFSET_NAME(10),
88         GPR_OFFSET_NAME(11),
89         GPR_OFFSET_NAME(12),
90         GPR_OFFSET_NAME(13),
91         GPR_OFFSET_NAME(14),
92         GPR_OFFSET_NAME(15),
93         GPR_OFFSET_NAME(16),
94         GPR_OFFSET_NAME(17),
95         GPR_OFFSET_NAME(18),
96         GPR_OFFSET_NAME(19),
97         GPR_OFFSET_NAME(20),
98         GPR_OFFSET_NAME(21),
99         GPR_OFFSET_NAME(22),
100         GPR_OFFSET_NAME(23),
101         GPR_OFFSET_NAME(24),
102         GPR_OFFSET_NAME(25),
103         GPR_OFFSET_NAME(26),
104         GPR_OFFSET_NAME(27),
105         GPR_OFFSET_NAME(28),
106         GPR_OFFSET_NAME(29),
107         GPR_OFFSET_NAME(30),
108         GPR_OFFSET_NAME(31),
109         REG_OFFSET_NAME(nip),
110         REG_OFFSET_NAME(msr),
111         REG_OFFSET_NAME(ctr),
112         REG_OFFSET_NAME(link),
113         REG_OFFSET_NAME(xer),
114         REG_OFFSET_NAME(ccr),
115 #ifdef CONFIG_PPC64
116         REG_OFFSET_NAME(softe),
117 #else
118         REG_OFFSET_NAME(mq),
119 #endif
120         REG_OFFSET_NAME(trap),
121         REG_OFFSET_NAME(dar),
122         REG_OFFSET_NAME(dsisr),
123         REG_OFFSET_END,
124 };
125
126 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
127 static void flush_tmregs_to_thread(struct task_struct *tsk)
128 {
129         /*
130          * If task is not current, it will have been flushed already to
131          * it's thread_struct during __switch_to().
132          *
133          * A reclaim flushes ALL the state or if not in TM save TM SPRs
134          * in the appropriate thread structures from live.
135          */
136
137         if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
138                 return;
139
140         if (MSR_TM_SUSPENDED(mfmsr())) {
141                 tm_reclaim_current(TM_CAUSE_SIGNAL);
142         } else {
143                 tm_enable();
144                 tm_save_sprs(&(tsk->thread));
145         }
146 }
147 #else
148 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
149 #endif
150
151 /**
152  * regs_query_register_offset() - query register offset from its name
153  * @name:       the name of a register
154  *
155  * regs_query_register_offset() returns the offset of a register in struct
156  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
157  */
158 int regs_query_register_offset(const char *name)
159 {
160         const struct pt_regs_offset *roff;
161         for (roff = regoffset_table; roff->name != NULL; roff++)
162                 if (!strcmp(roff->name, name))
163                         return roff->offset;
164         return -EINVAL;
165 }
166
167 /**
168  * regs_query_register_name() - query register name from its offset
169  * @offset:     the offset of a register in struct pt_regs.
170  *
171  * regs_query_register_name() returns the name of a register from its
172  * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
173  */
174 const char *regs_query_register_name(unsigned int offset)
175 {
176         const struct pt_regs_offset *roff;
177         for (roff = regoffset_table; roff->name != NULL; roff++)
178                 if (roff->offset == offset)
179                         return roff->name;
180         return NULL;
181 }
182
183 /*
184  * does not yet catch signals sent when the child dies.
185  * in exit.c or in signal.c.
186  */
187
188 /*
189  * Set of msr bits that gdb can change on behalf of a process.
190  */
191 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
192 #define MSR_DEBUGCHANGE 0
193 #else
194 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
195 #endif
196
197 /*
198  * Max register writeable via put_reg
199  */
200 #ifdef CONFIG_PPC32
201 #define PT_MAX_PUT_REG  PT_MQ
202 #else
203 #define PT_MAX_PUT_REG  PT_CCR
204 #endif
205
206 static unsigned long get_user_msr(struct task_struct *task)
207 {
208         return task->thread.regs->msr | task->thread.fpexc_mode;
209 }
210
211 static int set_user_msr(struct task_struct *task, unsigned long msr)
212 {
213         task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
214         task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
215         return 0;
216 }
217
218 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
219 static unsigned long get_user_ckpt_msr(struct task_struct *task)
220 {
221         return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
222 }
223
224 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
225 {
226         task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
227         task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
228         return 0;
229 }
230
231 static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
232 {
233         task->thread.ckpt_regs.trap = trap & 0xfff0;
234         return 0;
235 }
236 #endif
237
238 #ifdef CONFIG_PPC64
239 static int get_user_dscr(struct task_struct *task, unsigned long *data)
240 {
241         *data = task->thread.dscr;
242         return 0;
243 }
244
245 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
246 {
247         task->thread.dscr = dscr;
248         task->thread.dscr_inherit = 1;
249         return 0;
250 }
251 #else
252 static int get_user_dscr(struct task_struct *task, unsigned long *data)
253 {
254         return -EIO;
255 }
256
257 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
258 {
259         return -EIO;
260 }
261 #endif
262
263 /*
264  * We prevent mucking around with the reserved area of trap
265  * which are used internally by the kernel.
266  */
267 static int set_user_trap(struct task_struct *task, unsigned long trap)
268 {
269         task->thread.regs->trap = trap & 0xfff0;
270         return 0;
271 }
272
273 /*
274  * Get contents of register REGNO in task TASK.
275  */
276 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
277 {
278         unsigned int regs_max;
279
280         if ((task->thread.regs == NULL) || !data)
281                 return -EIO;
282
283         if (regno == PT_MSR) {
284                 *data = get_user_msr(task);
285                 return 0;
286         }
287
288         if (regno == PT_DSCR)
289                 return get_user_dscr(task, data);
290
291 #ifdef CONFIG_PPC64
292         /*
293          * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
294          * no more used as a flag, lets force usr to alway see the softe value as 1
295          * which means interrupts are not soft disabled.
296          */
297         if (regno == PT_SOFTE) {
298                 *data = 1;
299                 return  0;
300         }
301 #endif
302
303         regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
304         if (regno < regs_max) {
305                 regno = array_index_nospec(regno, regs_max);
306                 *data = ((unsigned long *)task->thread.regs)[regno];
307                 return 0;
308         }
309
310         return -EIO;
311 }
312
313 /*
314  * Write contents of register REGNO in task TASK.
315  */
316 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
317 {
318         if (task->thread.regs == NULL)
319                 return -EIO;
320
321         if (regno == PT_MSR)
322                 return set_user_msr(task, data);
323         if (regno == PT_TRAP)
324                 return set_user_trap(task, data);
325         if (regno == PT_DSCR)
326                 return set_user_dscr(task, data);
327
328         if (regno <= PT_MAX_PUT_REG) {
329                 regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
330                 ((unsigned long *)task->thread.regs)[regno] = data;
331                 return 0;
332         }
333         return -EIO;
334 }
335
336 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
337                    unsigned int pos, unsigned int count,
338                    void *kbuf, void __user *ubuf)
339 {
340         int i, ret;
341
342         if (target->thread.regs == NULL)
343                 return -EIO;
344
345         if (!FULL_REGS(target->thread.regs)) {
346                 /* We have a partial register set.  Fill 14-31 with bogus values */
347                 for (i = 14; i < 32; i++)
348                         target->thread.regs->gpr[i] = NV_REG_POISON;
349         }
350
351         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
352                                   target->thread.regs,
353                                   0, offsetof(struct pt_regs, msr));
354         if (!ret) {
355                 unsigned long msr = get_user_msr(target);
356                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
357                                           offsetof(struct pt_regs, msr),
358                                           offsetof(struct pt_regs, msr) +
359                                           sizeof(msr));
360         }
361
362         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
363                      offsetof(struct pt_regs, msr) + sizeof(long));
364
365         if (!ret)
366                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
367                                           &target->thread.regs->orig_gpr3,
368                                           offsetof(struct pt_regs, orig_gpr3),
369                                           sizeof(struct user_pt_regs));
370         if (!ret)
371                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
372                                                sizeof(struct user_pt_regs), -1);
373
374         return ret;
375 }
376
377 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
378                    unsigned int pos, unsigned int count,
379                    const void *kbuf, const void __user *ubuf)
380 {
381         unsigned long reg;
382         int ret;
383
384         if (target->thread.regs == NULL)
385                 return -EIO;
386
387         CHECK_FULL_REGS(target->thread.regs);
388
389         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
390                                  target->thread.regs,
391                                  0, PT_MSR * sizeof(reg));
392
393         if (!ret && count > 0) {
394                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
395                                          PT_MSR * sizeof(reg),
396                                          (PT_MSR + 1) * sizeof(reg));
397                 if (!ret)
398                         ret = set_user_msr(target, reg);
399         }
400
401         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
402                      offsetof(struct pt_regs, msr) + sizeof(long));
403
404         if (!ret)
405                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
406                                          &target->thread.regs->orig_gpr3,
407                                          PT_ORIG_R3 * sizeof(reg),
408                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
409
410         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
411                 ret = user_regset_copyin_ignore(
412                         &pos, &count, &kbuf, &ubuf,
413                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
414                         PT_TRAP * sizeof(reg));
415
416         if (!ret && count > 0) {
417                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
418                                          PT_TRAP * sizeof(reg),
419                                          (PT_TRAP + 1) * sizeof(reg));
420                 if (!ret)
421                         ret = set_user_trap(target, reg);
422         }
423
424         if (!ret)
425                 ret = user_regset_copyin_ignore(
426                         &pos, &count, &kbuf, &ubuf,
427                         (PT_TRAP + 1) * sizeof(reg), -1);
428
429         return ret;
430 }
431
432 /*
433  * Regardless of transactions, 'fp_state' holds the current running
434  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
435  * value of all FPR registers for the current transaction.
436  *
437  * Userspace interface buffer layout:
438  *
439  * struct data {
440  *      u64     fpr[32];
441  *      u64     fpscr;
442  * };
443  */
444 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
445                    unsigned int pos, unsigned int count,
446                    void *kbuf, void __user *ubuf)
447 {
448 #ifdef CONFIG_VSX
449         u64 buf[33];
450         int i;
451
452         flush_fp_to_thread(target);
453
454         /* copy to local buffer then write that out */
455         for (i = 0; i < 32 ; i++)
456                 buf[i] = target->thread.TS_FPR(i);
457         buf[32] = target->thread.fp_state.fpscr;
458         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
459 #else
460         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
461                      offsetof(struct thread_fp_state, fpr[32]));
462
463         flush_fp_to_thread(target);
464
465         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
466                                    &target->thread.fp_state, 0, -1);
467 #endif
468 }
469
470 /*
471  * Regardless of transactions, 'fp_state' holds the current running
472  * value of all FPR registers and 'ckfp_state' holds the last checkpointed
473  * value of all FPR registers for the current transaction.
474  *
475  * Userspace interface buffer layout:
476  *
477  * struct data {
478  *      u64     fpr[32];
479  *      u64     fpscr;
480  * };
481  *
482  */
483 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
484                    unsigned int pos, unsigned int count,
485                    const void *kbuf, const void __user *ubuf)
486 {
487 #ifdef CONFIG_VSX
488         u64 buf[33];
489         int i;
490
491         flush_fp_to_thread(target);
492
493         for (i = 0; i < 32 ; i++)
494                 buf[i] = target->thread.TS_FPR(i);
495         buf[32] = target->thread.fp_state.fpscr;
496
497         /* copy to local buffer then write that out */
498         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
499         if (i)
500                 return i;
501
502         for (i = 0; i < 32 ; i++)
503                 target->thread.TS_FPR(i) = buf[i];
504         target->thread.fp_state.fpscr = buf[32];
505         return 0;
506 #else
507         BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
508                      offsetof(struct thread_fp_state, fpr[32]));
509
510         flush_fp_to_thread(target);
511
512         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
513                                   &target->thread.fp_state, 0, -1);
514 #endif
515 }
516
517 #ifdef CONFIG_ALTIVEC
518 /*
519  * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
520  * The transfer totals 34 quadword.  Quadwords 0-31 contain the
521  * corresponding vector registers.  Quadword 32 contains the vscr as the
522  * last word (offset 12) within that quadword.  Quadword 33 contains the
523  * vrsave as the first word (offset 0) within the quadword.
524  *
525  * This definition of the VMX state is compatible with the current PPC32
526  * ptrace interface.  This allows signal handling and ptrace to use the
527  * same structures.  This also simplifies the implementation of a bi-arch
528  * (combined (32- and 64-bit) gdb.
529  */
530
531 static int vr_active(struct task_struct *target,
532                      const struct user_regset *regset)
533 {
534         flush_altivec_to_thread(target);
535         return target->thread.used_vr ? regset->n : 0;
536 }
537
538 /*
539  * Regardless of transactions, 'vr_state' holds the current running
540  * value of all the VMX registers and 'ckvr_state' holds the last
541  * checkpointed value of all the VMX registers for the current
542  * transaction to fall back on in case it aborts.
543  *
544  * Userspace interface buffer layout:
545  *
546  * struct data {
547  *      vector128       vr[32];
548  *      vector128       vscr;
549  *      vector128       vrsave;
550  * };
551  */
552 static int vr_get(struct task_struct *target, const struct user_regset *regset,
553                   unsigned int pos, unsigned int count,
554                   void *kbuf, void __user *ubuf)
555 {
556         int ret;
557
558         flush_altivec_to_thread(target);
559
560         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
561                      offsetof(struct thread_vr_state, vr[32]));
562
563         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
564                                   &target->thread.vr_state, 0,
565                                   33 * sizeof(vector128));
566         if (!ret) {
567                 /*
568                  * Copy out only the low-order word of vrsave.
569                  */
570                 int start, end;
571                 union {
572                         elf_vrreg_t reg;
573                         u32 word;
574                 } vrsave;
575                 memset(&vrsave, 0, sizeof(vrsave));
576
577                 vrsave.word = target->thread.vrsave;
578
579                 start = 33 * sizeof(vector128);
580                 end = start + sizeof(vrsave);
581                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
582                                           start, end);
583         }
584
585         return ret;
586 }
587
588 /*
589  * Regardless of transactions, 'vr_state' holds the current running
590  * value of all the VMX registers and 'ckvr_state' holds the last
591  * checkpointed value of all the VMX registers for the current
592  * transaction to fall back on in case it aborts.
593  *
594  * Userspace interface buffer layout:
595  *
596  * struct data {
597  *      vector128       vr[32];
598  *      vector128       vscr;
599  *      vector128       vrsave;
600  * };
601  */
602 static int vr_set(struct task_struct *target, const struct user_regset *regset,
603                   unsigned int pos, unsigned int count,
604                   const void *kbuf, const void __user *ubuf)
605 {
606         int ret;
607
608         flush_altivec_to_thread(target);
609
610         BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
611                      offsetof(struct thread_vr_state, vr[32]));
612
613         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
614                                  &target->thread.vr_state, 0,
615                                  33 * sizeof(vector128));
616         if (!ret && count > 0) {
617                 /*
618                  * We use only the first word of vrsave.
619                  */
620                 int start, end;
621                 union {
622                         elf_vrreg_t reg;
623                         u32 word;
624                 } vrsave;
625                 memset(&vrsave, 0, sizeof(vrsave));
626
627                 vrsave.word = target->thread.vrsave;
628
629                 start = 33 * sizeof(vector128);
630                 end = start + sizeof(vrsave);
631                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
632                                          start, end);
633                 if (!ret)
634                         target->thread.vrsave = vrsave.word;
635         }
636
637         return ret;
638 }
639 #endif /* CONFIG_ALTIVEC */
640
641 #ifdef CONFIG_VSX
642 /*
643  * Currently to set and and get all the vsx state, you need to call
644  * the fp and VMX calls as well.  This only get/sets the lower 32
645  * 128bit VSX registers.
646  */
647
648 static int vsr_active(struct task_struct *target,
649                       const struct user_regset *regset)
650 {
651         flush_vsx_to_thread(target);
652         return target->thread.used_vsr ? regset->n : 0;
653 }
654
655 /*
656  * Regardless of transactions, 'fp_state' holds the current running
657  * value of all FPR registers and 'ckfp_state' holds the last
658  * checkpointed value of all FPR registers for the current
659  * transaction.
660  *
661  * Userspace interface buffer layout:
662  *
663  * struct data {
664  *      u64     vsx[32];
665  * };
666  */
667 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
668                    unsigned int pos, unsigned int count,
669                    void *kbuf, void __user *ubuf)
670 {
671         u64 buf[32];
672         int ret, i;
673
674         flush_tmregs_to_thread(target);
675         flush_fp_to_thread(target);
676         flush_altivec_to_thread(target);
677         flush_vsx_to_thread(target);
678
679         for (i = 0; i < 32 ; i++)
680                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
681
682         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
683                                   buf, 0, 32 * sizeof(double));
684
685         return ret;
686 }
687
688 /*
689  * Regardless of transactions, 'fp_state' holds the current running
690  * value of all FPR registers and 'ckfp_state' holds the last
691  * checkpointed value of all FPR registers for the current
692  * transaction.
693  *
694  * Userspace interface buffer layout:
695  *
696  * struct data {
697  *      u64     vsx[32];
698  * };
699  */
700 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
701                    unsigned int pos, unsigned int count,
702                    const void *kbuf, const void __user *ubuf)
703 {
704         u64 buf[32];
705         int ret,i;
706
707         flush_tmregs_to_thread(target);
708         flush_fp_to_thread(target);
709         flush_altivec_to_thread(target);
710         flush_vsx_to_thread(target);
711
712         for (i = 0; i < 32 ; i++)
713                 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
714
715         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
716                                  buf, 0, 32 * sizeof(double));
717         if (!ret)
718                 for (i = 0; i < 32 ; i++)
719                         target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
720
721         return ret;
722 }
723 #endif /* CONFIG_VSX */
724
725 #ifdef CONFIG_SPE
726
727 /*
728  * For get_evrregs/set_evrregs functions 'data' has the following layout:
729  *
730  * struct {
731  *   u32 evr[32];
732  *   u64 acc;
733  *   u32 spefscr;
734  * }
735  */
736
737 static int evr_active(struct task_struct *target,
738                       const struct user_regset *regset)
739 {
740         flush_spe_to_thread(target);
741         return target->thread.used_spe ? regset->n : 0;
742 }
743
744 static int evr_get(struct task_struct *target, const struct user_regset *regset,
745                    unsigned int pos, unsigned int count,
746                    void *kbuf, void __user *ubuf)
747 {
748         int ret;
749
750         flush_spe_to_thread(target);
751
752         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
753                                   &target->thread.evr,
754                                   0, sizeof(target->thread.evr));
755
756         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
757                      offsetof(struct thread_struct, spefscr));
758
759         if (!ret)
760                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
761                                           &target->thread.acc,
762                                           sizeof(target->thread.evr), -1);
763
764         return ret;
765 }
766
767 static int evr_set(struct task_struct *target, const struct user_regset *regset,
768                    unsigned int pos, unsigned int count,
769                    const void *kbuf, const void __user *ubuf)
770 {
771         int ret;
772
773         flush_spe_to_thread(target);
774
775         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
776                                  &target->thread.evr,
777                                  0, sizeof(target->thread.evr));
778
779         BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
780                      offsetof(struct thread_struct, spefscr));
781
782         if (!ret)
783                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
784                                          &target->thread.acc,
785                                          sizeof(target->thread.evr), -1);
786
787         return ret;
788 }
789 #endif /* CONFIG_SPE */
790
791 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
792 /**
793  * tm_cgpr_active - get active number of registers in CGPR
794  * @target:     The target task.
795  * @regset:     The user regset structure.
796  *
797  * This function checks for the active number of available
798  * regisers in transaction checkpointed GPR category.
799  */
800 static int tm_cgpr_active(struct task_struct *target,
801                           const struct user_regset *regset)
802 {
803         if (!cpu_has_feature(CPU_FTR_TM))
804                 return -ENODEV;
805
806         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
807                 return 0;
808
809         return regset->n;
810 }
811
812 /**
813  * tm_cgpr_get - get CGPR registers
814  * @target:     The target task.
815  * @regset:     The user regset structure.
816  * @pos:        The buffer position.
817  * @count:      Number of bytes to copy.
818  * @kbuf:       Kernel buffer to copy from.
819  * @ubuf:       User buffer to copy into.
820  *
821  * This function gets transaction checkpointed GPR registers.
822  *
823  * When the transaction is active, 'ckpt_regs' holds all the checkpointed
824  * GPR register values for the current transaction to fall back on if it
825  * aborts in between. This function gets those checkpointed GPR registers.
826  * The userspace interface buffer layout is as follows.
827  *
828  * struct data {
829  *      struct pt_regs ckpt_regs;
830  * };
831  */
832 static int tm_cgpr_get(struct task_struct *target,
833                         const struct user_regset *regset,
834                         unsigned int pos, unsigned int count,
835                         void *kbuf, void __user *ubuf)
836 {
837         int ret;
838
839         if (!cpu_has_feature(CPU_FTR_TM))
840                 return -ENODEV;
841
842         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
843                 return -ENODATA;
844
845         flush_tmregs_to_thread(target);
846         flush_fp_to_thread(target);
847         flush_altivec_to_thread(target);
848
849         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
850                                   &target->thread.ckpt_regs,
851                                   0, offsetof(struct pt_regs, msr));
852         if (!ret) {
853                 unsigned long msr = get_user_ckpt_msr(target);
854
855                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
856                                           offsetof(struct pt_regs, msr),
857                                           offsetof(struct pt_regs, msr) +
858                                           sizeof(msr));
859         }
860
861         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
862                      offsetof(struct pt_regs, msr) + sizeof(long));
863
864         if (!ret)
865                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
866                                           &target->thread.ckpt_regs.orig_gpr3,
867                                           offsetof(struct pt_regs, orig_gpr3),
868                                           sizeof(struct user_pt_regs));
869         if (!ret)
870                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
871                                                sizeof(struct user_pt_regs), -1);
872
873         return ret;
874 }
875
876 /*
877  * tm_cgpr_set - set the CGPR registers
878  * @target:     The target task.
879  * @regset:     The user regset structure.
880  * @pos:        The buffer position.
881  * @count:      Number of bytes to copy.
882  * @kbuf:       Kernel buffer to copy into.
883  * @ubuf:       User buffer to copy from.
884  *
885  * This function sets in transaction checkpointed GPR registers.
886  *
887  * When the transaction is active, 'ckpt_regs' holds the checkpointed
888  * GPR register values for the current transaction to fall back on if it
889  * aborts in between. This function sets those checkpointed GPR registers.
890  * The userspace interface buffer layout is as follows.
891  *
892  * struct data {
893  *      struct pt_regs ckpt_regs;
894  * };
895  */
896 static int tm_cgpr_set(struct task_struct *target,
897                         const struct user_regset *regset,
898                         unsigned int pos, unsigned int count,
899                         const void *kbuf, const void __user *ubuf)
900 {
901         unsigned long reg;
902         int ret;
903
904         if (!cpu_has_feature(CPU_FTR_TM))
905                 return -ENODEV;
906
907         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
908                 return -ENODATA;
909
910         flush_tmregs_to_thread(target);
911         flush_fp_to_thread(target);
912         flush_altivec_to_thread(target);
913
914         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
915                                  &target->thread.ckpt_regs,
916                                  0, PT_MSR * sizeof(reg));
917
918         if (!ret && count > 0) {
919                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
920                                          PT_MSR * sizeof(reg),
921                                          (PT_MSR + 1) * sizeof(reg));
922                 if (!ret)
923                         ret = set_user_ckpt_msr(target, reg);
924         }
925
926         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
927                      offsetof(struct pt_regs, msr) + sizeof(long));
928
929         if (!ret)
930                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
931                                          &target->thread.ckpt_regs.orig_gpr3,
932                                          PT_ORIG_R3 * sizeof(reg),
933                                          (PT_MAX_PUT_REG + 1) * sizeof(reg));
934
935         if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
936                 ret = user_regset_copyin_ignore(
937                         &pos, &count, &kbuf, &ubuf,
938                         (PT_MAX_PUT_REG + 1) * sizeof(reg),
939                         PT_TRAP * sizeof(reg));
940
941         if (!ret && count > 0) {
942                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
943                                          PT_TRAP * sizeof(reg),
944                                          (PT_TRAP + 1) * sizeof(reg));
945                 if (!ret)
946                         ret = set_user_ckpt_trap(target, reg);
947         }
948
949         if (!ret)
950                 ret = user_regset_copyin_ignore(
951                         &pos, &count, &kbuf, &ubuf,
952                         (PT_TRAP + 1) * sizeof(reg), -1);
953
954         return ret;
955 }
956
957 /**
958  * tm_cfpr_active - get active number of registers in CFPR
959  * @target:     The target task.
960  * @regset:     The user regset structure.
961  *
962  * This function checks for the active number of available
963  * regisers in transaction checkpointed FPR category.
964  */
965 static int tm_cfpr_active(struct task_struct *target,
966                                 const struct user_regset *regset)
967 {
968         if (!cpu_has_feature(CPU_FTR_TM))
969                 return -ENODEV;
970
971         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
972                 return 0;
973
974         return regset->n;
975 }
976
977 /**
978  * tm_cfpr_get - get CFPR registers
979  * @target:     The target task.
980  * @regset:     The user regset structure.
981  * @pos:        The buffer position.
982  * @count:      Number of bytes to copy.
983  * @kbuf:       Kernel buffer to copy from.
984  * @ubuf:       User buffer to copy into.
985  *
986  * This function gets in transaction checkpointed FPR registers.
987  *
988  * When the transaction is active 'ckfp_state' holds the checkpointed
989  * values for the current transaction to fall back on if it aborts
990  * in between. This function gets those checkpointed FPR registers.
991  * The userspace interface buffer layout is as follows.
992  *
993  * struct data {
994  *      u64     fpr[32];
995  *      u64     fpscr;
996  *};
997  */
998 static int tm_cfpr_get(struct task_struct *target,
999                         const struct user_regset *regset,
1000                         unsigned int pos, unsigned int count,
1001                         void *kbuf, void __user *ubuf)
1002 {
1003         u64 buf[33];
1004         int i;
1005
1006         if (!cpu_has_feature(CPU_FTR_TM))
1007                 return -ENODEV;
1008
1009         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1010                 return -ENODATA;
1011
1012         flush_tmregs_to_thread(target);
1013         flush_fp_to_thread(target);
1014         flush_altivec_to_thread(target);
1015
1016         /* copy to local buffer then write that out */
1017         for (i = 0; i < 32 ; i++)
1018                 buf[i] = target->thread.TS_CKFPR(i);
1019         buf[32] = target->thread.ckfp_state.fpscr;
1020         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1021 }
1022
1023 /**
1024  * tm_cfpr_set - set CFPR registers
1025  * @target:     The target task.
1026  * @regset:     The user regset structure.
1027  * @pos:        The buffer position.
1028  * @count:      Number of bytes to copy.
1029  * @kbuf:       Kernel buffer to copy into.
1030  * @ubuf:       User buffer to copy from.
1031  *
1032  * This function sets in transaction checkpointed FPR registers.
1033  *
1034  * When the transaction is active 'ckfp_state' holds the checkpointed
1035  * FPR register values for the current transaction to fall back on
1036  * if it aborts in between. This function sets these checkpointed
1037  * FPR registers. The userspace interface buffer layout is as follows.
1038  *
1039  * struct data {
1040  *      u64     fpr[32];
1041  *      u64     fpscr;
1042  *};
1043  */
1044 static int tm_cfpr_set(struct task_struct *target,
1045                         const struct user_regset *regset,
1046                         unsigned int pos, unsigned int count,
1047                         const void *kbuf, const void __user *ubuf)
1048 {
1049         u64 buf[33];
1050         int i;
1051
1052         if (!cpu_has_feature(CPU_FTR_TM))
1053                 return -ENODEV;
1054
1055         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1056                 return -ENODATA;
1057
1058         flush_tmregs_to_thread(target);
1059         flush_fp_to_thread(target);
1060         flush_altivec_to_thread(target);
1061
1062         for (i = 0; i < 32; i++)
1063                 buf[i] = target->thread.TS_CKFPR(i);
1064         buf[32] = target->thread.ckfp_state.fpscr;
1065
1066         /* copy to local buffer then write that out */
1067         i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1068         if (i)
1069                 return i;
1070         for (i = 0; i < 32 ; i++)
1071                 target->thread.TS_CKFPR(i) = buf[i];
1072         target->thread.ckfp_state.fpscr = buf[32];
1073         return 0;
1074 }
1075
1076 /**
1077  * tm_cvmx_active - get active number of registers in CVMX
1078  * @target:     The target task.
1079  * @regset:     The user regset structure.
1080  *
1081  * This function checks for the active number of available
1082  * regisers in checkpointed VMX category.
1083  */
1084 static int tm_cvmx_active(struct task_struct *target,
1085                                 const struct user_regset *regset)
1086 {
1087         if (!cpu_has_feature(CPU_FTR_TM))
1088                 return -ENODEV;
1089
1090         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1091                 return 0;
1092
1093         return regset->n;
1094 }
1095
1096 /**
1097  * tm_cvmx_get - get CMVX registers
1098  * @target:     The target task.
1099  * @regset:     The user regset structure.
1100  * @pos:        The buffer position.
1101  * @count:      Number of bytes to copy.
1102  * @kbuf:       Kernel buffer to copy from.
1103  * @ubuf:       User buffer to copy into.
1104  *
1105  * This function gets in transaction checkpointed VMX registers.
1106  *
1107  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1108  * the checkpointed values for the current transaction to fall
1109  * back on if it aborts in between. The userspace interface buffer
1110  * layout is as follows.
1111  *
1112  * struct data {
1113  *      vector128       vr[32];
1114  *      vector128       vscr;
1115  *      vector128       vrsave;
1116  *};
1117  */
1118 static int tm_cvmx_get(struct task_struct *target,
1119                         const struct user_regset *regset,
1120                         unsigned int pos, unsigned int count,
1121                         void *kbuf, void __user *ubuf)
1122 {
1123         int ret;
1124
1125         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1126
1127         if (!cpu_has_feature(CPU_FTR_TM))
1128                 return -ENODEV;
1129
1130         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1131                 return -ENODATA;
1132
1133         /* Flush the state */
1134         flush_tmregs_to_thread(target);
1135         flush_fp_to_thread(target);
1136         flush_altivec_to_thread(target);
1137
1138         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1139                                         &target->thread.ckvr_state, 0,
1140                                         33 * sizeof(vector128));
1141         if (!ret) {
1142                 /*
1143                  * Copy out only the low-order word of vrsave.
1144                  */
1145                 union {
1146                         elf_vrreg_t reg;
1147                         u32 word;
1148                 } vrsave;
1149                 memset(&vrsave, 0, sizeof(vrsave));
1150                 vrsave.word = target->thread.ckvrsave;
1151                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1152                                                 33 * sizeof(vector128), -1);
1153         }
1154
1155         return ret;
1156 }
1157
1158 /**
1159  * tm_cvmx_set - set CMVX registers
1160  * @target:     The target task.
1161  * @regset:     The user regset structure.
1162  * @pos:        The buffer position.
1163  * @count:      Number of bytes to copy.
1164  * @kbuf:       Kernel buffer to copy into.
1165  * @ubuf:       User buffer to copy from.
1166  *
1167  * This function sets in transaction checkpointed VMX registers.
1168  *
1169  * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1170  * the checkpointed values for the current transaction to fall
1171  * back on if it aborts in between. The userspace interface buffer
1172  * layout is as follows.
1173  *
1174  * struct data {
1175  *      vector128       vr[32];
1176  *      vector128       vscr;
1177  *      vector128       vrsave;
1178  *};
1179  */
1180 static int tm_cvmx_set(struct task_struct *target,
1181                         const struct user_regset *regset,
1182                         unsigned int pos, unsigned int count,
1183                         const void *kbuf, const void __user *ubuf)
1184 {
1185         int ret;
1186
1187         BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1188
1189         if (!cpu_has_feature(CPU_FTR_TM))
1190                 return -ENODEV;
1191
1192         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1193                 return -ENODATA;
1194
1195         flush_tmregs_to_thread(target);
1196         flush_fp_to_thread(target);
1197         flush_altivec_to_thread(target);
1198
1199         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1200                                         &target->thread.ckvr_state, 0,
1201                                         33 * sizeof(vector128));
1202         if (!ret && count > 0) {
1203                 /*
1204                  * We use only the low-order word of vrsave.
1205                  */
1206                 union {
1207                         elf_vrreg_t reg;
1208                         u32 word;
1209                 } vrsave;
1210                 memset(&vrsave, 0, sizeof(vrsave));
1211                 vrsave.word = target->thread.ckvrsave;
1212                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1213                                                 33 * sizeof(vector128), -1);
1214                 if (!ret)
1215                         target->thread.ckvrsave = vrsave.word;
1216         }
1217
1218         return ret;
1219 }
1220
1221 /**
1222  * tm_cvsx_active - get active number of registers in CVSX
1223  * @target:     The target task.
1224  * @regset:     The user regset structure.
1225  *
1226  * This function checks for the active number of available
1227  * regisers in transaction checkpointed VSX category.
1228  */
1229 static int tm_cvsx_active(struct task_struct *target,
1230                                 const struct user_regset *regset)
1231 {
1232         if (!cpu_has_feature(CPU_FTR_TM))
1233                 return -ENODEV;
1234
1235         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1236                 return 0;
1237
1238         flush_vsx_to_thread(target);
1239         return target->thread.used_vsr ? regset->n : 0;
1240 }
1241
1242 /**
1243  * tm_cvsx_get - get CVSX registers
1244  * @target:     The target task.
1245  * @regset:     The user regset structure.
1246  * @pos:        The buffer position.
1247  * @count:      Number of bytes to copy.
1248  * @kbuf:       Kernel buffer to copy from.
1249  * @ubuf:       User buffer to copy into.
1250  *
1251  * This function gets in transaction checkpointed VSX registers.
1252  *
1253  * When the transaction is active 'ckfp_state' holds the checkpointed
1254  * values for the current transaction to fall back on if it aborts
1255  * in between. This function gets those checkpointed VSX registers.
1256  * The userspace interface buffer layout is as follows.
1257  *
1258  * struct data {
1259  *      u64     vsx[32];
1260  *};
1261  */
1262 static int tm_cvsx_get(struct task_struct *target,
1263                         const struct user_regset *regset,
1264                         unsigned int pos, unsigned int count,
1265                         void *kbuf, void __user *ubuf)
1266 {
1267         u64 buf[32];
1268         int ret, i;
1269
1270         if (!cpu_has_feature(CPU_FTR_TM))
1271                 return -ENODEV;
1272
1273         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1274                 return -ENODATA;
1275
1276         /* Flush the state */
1277         flush_tmregs_to_thread(target);
1278         flush_fp_to_thread(target);
1279         flush_altivec_to_thread(target);
1280         flush_vsx_to_thread(target);
1281
1282         for (i = 0; i < 32 ; i++)
1283                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1284         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1285                                   buf, 0, 32 * sizeof(double));
1286
1287         return ret;
1288 }
1289
1290 /**
1291  * tm_cvsx_set - set CFPR registers
1292  * @target:     The target task.
1293  * @regset:     The user regset structure.
1294  * @pos:        The buffer position.
1295  * @count:      Number of bytes to copy.
1296  * @kbuf:       Kernel buffer to copy into.
1297  * @ubuf:       User buffer to copy from.
1298  *
1299  * This function sets in transaction checkpointed VSX registers.
1300  *
1301  * When the transaction is active 'ckfp_state' holds the checkpointed
1302  * VSX register values for the current transaction to fall back on
1303  * if it aborts in between. This function sets these checkpointed
1304  * FPR registers. The userspace interface buffer layout is as follows.
1305  *
1306  * struct data {
1307  *      u64     vsx[32];
1308  *};
1309  */
1310 static int tm_cvsx_set(struct task_struct *target,
1311                         const struct user_regset *regset,
1312                         unsigned int pos, unsigned int count,
1313                         const void *kbuf, const void __user *ubuf)
1314 {
1315         u64 buf[32];
1316         int ret, i;
1317
1318         if (!cpu_has_feature(CPU_FTR_TM))
1319                 return -ENODEV;
1320
1321         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1322                 return -ENODATA;
1323
1324         /* Flush the state */
1325         flush_tmregs_to_thread(target);
1326         flush_fp_to_thread(target);
1327         flush_altivec_to_thread(target);
1328         flush_vsx_to_thread(target);
1329
1330         for (i = 0; i < 32 ; i++)
1331                 buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1332
1333         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1334                                  buf, 0, 32 * sizeof(double));
1335         if (!ret)
1336                 for (i = 0; i < 32 ; i++)
1337                         target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1338
1339         return ret;
1340 }
1341
1342 /**
1343  * tm_spr_active - get active number of registers in TM SPR
1344  * @target:     The target task.
1345  * @regset:     The user regset structure.
1346  *
1347  * This function checks the active number of available
1348  * regisers in the transactional memory SPR category.
1349  */
1350 static int tm_spr_active(struct task_struct *target,
1351                          const struct user_regset *regset)
1352 {
1353         if (!cpu_has_feature(CPU_FTR_TM))
1354                 return -ENODEV;
1355
1356         return regset->n;
1357 }
1358
1359 /**
1360  * tm_spr_get - get the TM related SPR registers
1361  * @target:     The target task.
1362  * @regset:     The user regset structure.
1363  * @pos:        The buffer position.
1364  * @count:      Number of bytes to copy.
1365  * @kbuf:       Kernel buffer to copy from.
1366  * @ubuf:       User buffer to copy into.
1367  *
1368  * This function gets transactional memory related SPR registers.
1369  * The userspace interface buffer layout is as follows.
1370  *
1371  * struct {
1372  *      u64             tm_tfhar;
1373  *      u64             tm_texasr;
1374  *      u64             tm_tfiar;
1375  * };
1376  */
1377 static int tm_spr_get(struct task_struct *target,
1378                       const struct user_regset *regset,
1379                       unsigned int pos, unsigned int count,
1380                       void *kbuf, void __user *ubuf)
1381 {
1382         int ret;
1383
1384         /* Build tests */
1385         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1386         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1387         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1388
1389         if (!cpu_has_feature(CPU_FTR_TM))
1390                 return -ENODEV;
1391
1392         /* Flush the states */
1393         flush_tmregs_to_thread(target);
1394         flush_fp_to_thread(target);
1395         flush_altivec_to_thread(target);
1396
1397         /* TFHAR register */
1398         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1399                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1400
1401         /* TEXASR register */
1402         if (!ret)
1403                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1404                                 &target->thread.tm_texasr, sizeof(u64),
1405                                 2 * sizeof(u64));
1406
1407         /* TFIAR register */
1408         if (!ret)
1409                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1410                                 &target->thread.tm_tfiar,
1411                                 2 * sizeof(u64), 3 * sizeof(u64));
1412         return ret;
1413 }
1414
1415 /**
1416  * tm_spr_set - set the TM related SPR registers
1417  * @target:     The target task.
1418  * @regset:     The user regset structure.
1419  * @pos:        The buffer position.
1420  * @count:      Number of bytes to copy.
1421  * @kbuf:       Kernel buffer to copy into.
1422  * @ubuf:       User buffer to copy from.
1423  *
1424  * This function sets transactional memory related SPR registers.
1425  * The userspace interface buffer layout is as follows.
1426  *
1427  * struct {
1428  *      u64             tm_tfhar;
1429  *      u64             tm_texasr;
1430  *      u64             tm_tfiar;
1431  * };
1432  */
1433 static int tm_spr_set(struct task_struct *target,
1434                       const struct user_regset *regset,
1435                       unsigned int pos, unsigned int count,
1436                       const void *kbuf, const void __user *ubuf)
1437 {
1438         int ret;
1439
1440         /* Build tests */
1441         BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1442         BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1443         BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1444
1445         if (!cpu_has_feature(CPU_FTR_TM))
1446                 return -ENODEV;
1447
1448         /* Flush the states */
1449         flush_tmregs_to_thread(target);
1450         flush_fp_to_thread(target);
1451         flush_altivec_to_thread(target);
1452
1453         /* TFHAR register */
1454         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1455                                 &target->thread.tm_tfhar, 0, sizeof(u64));
1456
1457         /* TEXASR register */
1458         if (!ret)
1459                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1460                                 &target->thread.tm_texasr, sizeof(u64),
1461                                 2 * sizeof(u64));
1462
1463         /* TFIAR register */
1464         if (!ret)
1465                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1466                                 &target->thread.tm_tfiar,
1467                                  2 * sizeof(u64), 3 * sizeof(u64));
1468         return ret;
1469 }
1470
1471 static int tm_tar_active(struct task_struct *target,
1472                          const struct user_regset *regset)
1473 {
1474         if (!cpu_has_feature(CPU_FTR_TM))
1475                 return -ENODEV;
1476
1477         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1478                 return regset->n;
1479
1480         return 0;
1481 }
1482
1483 static int tm_tar_get(struct task_struct *target,
1484                       const struct user_regset *regset,
1485                       unsigned int pos, unsigned int count,
1486                       void *kbuf, void __user *ubuf)
1487 {
1488         int ret;
1489
1490         if (!cpu_has_feature(CPU_FTR_TM))
1491                 return -ENODEV;
1492
1493         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1494                 return -ENODATA;
1495
1496         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1497                                 &target->thread.tm_tar, 0, sizeof(u64));
1498         return ret;
1499 }
1500
1501 static int tm_tar_set(struct task_struct *target,
1502                       const struct user_regset *regset,
1503                       unsigned int pos, unsigned int count,
1504                       const void *kbuf, const void __user *ubuf)
1505 {
1506         int ret;
1507
1508         if (!cpu_has_feature(CPU_FTR_TM))
1509                 return -ENODEV;
1510
1511         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1512                 return -ENODATA;
1513
1514         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1515                                 &target->thread.tm_tar, 0, sizeof(u64));
1516         return ret;
1517 }
1518
1519 static int tm_ppr_active(struct task_struct *target,
1520                          const struct user_regset *regset)
1521 {
1522         if (!cpu_has_feature(CPU_FTR_TM))
1523                 return -ENODEV;
1524
1525         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1526                 return regset->n;
1527
1528         return 0;
1529 }
1530
1531
1532 static int tm_ppr_get(struct task_struct *target,
1533                       const struct user_regset *regset,
1534                       unsigned int pos, unsigned int count,
1535                       void *kbuf, void __user *ubuf)
1536 {
1537         int ret;
1538
1539         if (!cpu_has_feature(CPU_FTR_TM))
1540                 return -ENODEV;
1541
1542         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1543                 return -ENODATA;
1544
1545         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1546                                 &target->thread.tm_ppr, 0, sizeof(u64));
1547         return ret;
1548 }
1549
1550 static int tm_ppr_set(struct task_struct *target,
1551                       const struct user_regset *regset,
1552                       unsigned int pos, unsigned int count,
1553                       const void *kbuf, const void __user *ubuf)
1554 {
1555         int ret;
1556
1557         if (!cpu_has_feature(CPU_FTR_TM))
1558                 return -ENODEV;
1559
1560         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1561                 return -ENODATA;
1562
1563         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1564                                 &target->thread.tm_ppr, 0, sizeof(u64));
1565         return ret;
1566 }
1567
1568 static int tm_dscr_active(struct task_struct *target,
1569                          const struct user_regset *regset)
1570 {
1571         if (!cpu_has_feature(CPU_FTR_TM))
1572                 return -ENODEV;
1573
1574         if (MSR_TM_ACTIVE(target->thread.regs->msr))
1575                 return regset->n;
1576
1577         return 0;
1578 }
1579
1580 static int tm_dscr_get(struct task_struct *target,
1581                       const struct user_regset *regset,
1582                       unsigned int pos, unsigned int count,
1583                       void *kbuf, void __user *ubuf)
1584 {
1585         int ret;
1586
1587         if (!cpu_has_feature(CPU_FTR_TM))
1588                 return -ENODEV;
1589
1590         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1591                 return -ENODATA;
1592
1593         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1594                                 &target->thread.tm_dscr, 0, sizeof(u64));
1595         return ret;
1596 }
1597
1598 static int tm_dscr_set(struct task_struct *target,
1599                       const struct user_regset *regset,
1600                       unsigned int pos, unsigned int count,
1601                       const void *kbuf, const void __user *ubuf)
1602 {
1603         int ret;
1604
1605         if (!cpu_has_feature(CPU_FTR_TM))
1606                 return -ENODEV;
1607
1608         if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1609                 return -ENODATA;
1610
1611         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1612                                 &target->thread.tm_dscr, 0, sizeof(u64));
1613         return ret;
1614 }
1615 #endif  /* CONFIG_PPC_TRANSACTIONAL_MEM */
1616
1617 #ifdef CONFIG_PPC64
1618 static int ppr_get(struct task_struct *target,
1619                       const struct user_regset *regset,
1620                       unsigned int pos, unsigned int count,
1621                       void *kbuf, void __user *ubuf)
1622 {
1623         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1624                                    &target->thread.regs->ppr, 0, sizeof(u64));
1625 }
1626
1627 static int ppr_set(struct task_struct *target,
1628                       const struct user_regset *regset,
1629                       unsigned int pos, unsigned int count,
1630                       const void *kbuf, const void __user *ubuf)
1631 {
1632         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1633                                   &target->thread.regs->ppr, 0, sizeof(u64));
1634 }
1635
1636 static int dscr_get(struct task_struct *target,
1637                       const struct user_regset *regset,
1638                       unsigned int pos, unsigned int count,
1639                       void *kbuf, void __user *ubuf)
1640 {
1641         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1642                                    &target->thread.dscr, 0, sizeof(u64));
1643 }
1644 static int dscr_set(struct task_struct *target,
1645                       const struct user_regset *regset,
1646                       unsigned int pos, unsigned int count,
1647                       const void *kbuf, const void __user *ubuf)
1648 {
1649         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1650                                   &target->thread.dscr, 0, sizeof(u64));
1651 }
1652 #endif
1653 #ifdef CONFIG_PPC_BOOK3S_64
1654 static int tar_get(struct task_struct *target,
1655                       const struct user_regset *regset,
1656                       unsigned int pos, unsigned int count,
1657                       void *kbuf, void __user *ubuf)
1658 {
1659         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1660                                    &target->thread.tar, 0, sizeof(u64));
1661 }
1662 static int tar_set(struct task_struct *target,
1663                       const struct user_regset *regset,
1664                       unsigned int pos, unsigned int count,
1665                       const void *kbuf, const void __user *ubuf)
1666 {
1667         return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1668                                   &target->thread.tar, 0, sizeof(u64));
1669 }
1670
1671 static int ebb_active(struct task_struct *target,
1672                          const struct user_regset *regset)
1673 {
1674         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1675                 return -ENODEV;
1676
1677         if (target->thread.used_ebb)
1678                 return regset->n;
1679
1680         return 0;
1681 }
1682
1683 static int ebb_get(struct task_struct *target,
1684                       const struct user_regset *regset,
1685                       unsigned int pos, unsigned int count,
1686                       void *kbuf, void __user *ubuf)
1687 {
1688         /* Build tests */
1689         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1690         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1691
1692         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1693                 return -ENODEV;
1694
1695         if (!target->thread.used_ebb)
1696                 return -ENODATA;
1697
1698         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1699                         &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1700 }
1701
1702 static int ebb_set(struct task_struct *target,
1703                       const struct user_regset *regset,
1704                       unsigned int pos, unsigned int count,
1705                       const void *kbuf, const void __user *ubuf)
1706 {
1707         int ret = 0;
1708
1709         /* Build tests */
1710         BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1711         BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1712
1713         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1714                 return -ENODEV;
1715
1716         if (target->thread.used_ebb)
1717                 return -ENODATA;
1718
1719         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1720                         &target->thread.ebbrr, 0, sizeof(unsigned long));
1721
1722         if (!ret)
1723                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1724                         &target->thread.ebbhr, sizeof(unsigned long),
1725                         2 * sizeof(unsigned long));
1726
1727         if (!ret)
1728                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1729                         &target->thread.bescr,
1730                         2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1731
1732         return ret;
1733 }
1734 static int pmu_active(struct task_struct *target,
1735                          const struct user_regset *regset)
1736 {
1737         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1738                 return -ENODEV;
1739
1740         return regset->n;
1741 }
1742
1743 static int pmu_get(struct task_struct *target,
1744                       const struct user_regset *regset,
1745                       unsigned int pos, unsigned int count,
1746                       void *kbuf, void __user *ubuf)
1747 {
1748         /* Build tests */
1749         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1750         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1751         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1752         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1753
1754         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1755                 return -ENODEV;
1756
1757         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1758                         &target->thread.siar, 0,
1759                         5 * sizeof(unsigned long));
1760 }
1761
1762 static int pmu_set(struct task_struct *target,
1763                       const struct user_regset *regset,
1764                       unsigned int pos, unsigned int count,
1765                       const void *kbuf, const void __user *ubuf)
1766 {
1767         int ret = 0;
1768
1769         /* Build tests */
1770         BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1771         BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1772         BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1773         BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1774
1775         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1776                 return -ENODEV;
1777
1778         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1779                         &target->thread.siar, 0,
1780                         sizeof(unsigned long));
1781
1782         if (!ret)
1783                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1784                         &target->thread.sdar, sizeof(unsigned long),
1785                         2 * sizeof(unsigned long));
1786
1787         if (!ret)
1788                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1789                         &target->thread.sier, 2 * sizeof(unsigned long),
1790                         3 * sizeof(unsigned long));
1791
1792         if (!ret)
1793                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1794                         &target->thread.mmcr2, 3 * sizeof(unsigned long),
1795                         4 * sizeof(unsigned long));
1796
1797         if (!ret)
1798                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1799                         &target->thread.mmcr0, 4 * sizeof(unsigned long),
1800                         5 * sizeof(unsigned long));
1801         return ret;
1802 }
1803 #endif
1804
1805 #ifdef CONFIG_PPC_MEM_KEYS
1806 static int pkey_active(struct task_struct *target,
1807                        const struct user_regset *regset)
1808 {
1809         if (!arch_pkeys_enabled())
1810                 return -ENODEV;
1811
1812         return regset->n;
1813 }
1814
1815 static int pkey_get(struct task_struct *target,
1816                     const struct user_regset *regset,
1817                     unsigned int pos, unsigned int count,
1818                     void *kbuf, void __user *ubuf)
1819 {
1820         BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1821         BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1822
1823         if (!arch_pkeys_enabled())
1824                 return -ENODEV;
1825
1826         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1827                                    &target->thread.amr, 0,
1828                                    ELF_NPKEY * sizeof(unsigned long));
1829 }
1830
1831 static int pkey_set(struct task_struct *target,
1832                       const struct user_regset *regset,
1833                       unsigned int pos, unsigned int count,
1834                       const void *kbuf, const void __user *ubuf)
1835 {
1836         u64 new_amr;
1837         int ret;
1838
1839         if (!arch_pkeys_enabled())
1840                 return -ENODEV;
1841
1842         /* Only the AMR can be set from userspace */
1843         if (pos != 0 || count != sizeof(new_amr))
1844                 return -EINVAL;
1845
1846         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1847                                  &new_amr, 0, sizeof(new_amr));
1848         if (ret)
1849                 return ret;
1850
1851         /* UAMOR determines which bits of the AMR can be set from userspace. */
1852         target->thread.amr = (new_amr & target->thread.uamor) |
1853                 (target->thread.amr & ~target->thread.uamor);
1854
1855         return 0;
1856 }
1857 #endif /* CONFIG_PPC_MEM_KEYS */
1858
1859 /*
1860  * These are our native regset flavors.
1861  */
1862 enum powerpc_regset {
1863         REGSET_GPR,
1864         REGSET_FPR,
1865 #ifdef CONFIG_ALTIVEC
1866         REGSET_VMX,
1867 #endif
1868 #ifdef CONFIG_VSX
1869         REGSET_VSX,
1870 #endif
1871 #ifdef CONFIG_SPE
1872         REGSET_SPE,
1873 #endif
1874 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1875         REGSET_TM_CGPR,         /* TM checkpointed GPR registers */
1876         REGSET_TM_CFPR,         /* TM checkpointed FPR registers */
1877         REGSET_TM_CVMX,         /* TM checkpointed VMX registers */
1878         REGSET_TM_CVSX,         /* TM checkpointed VSX registers */
1879         REGSET_TM_SPR,          /* TM specific SPR registers */
1880         REGSET_TM_CTAR,         /* TM checkpointed TAR register */
1881         REGSET_TM_CPPR,         /* TM checkpointed PPR register */
1882         REGSET_TM_CDSCR,        /* TM checkpointed DSCR register */
1883 #endif
1884 #ifdef CONFIG_PPC64
1885         REGSET_PPR,             /* PPR register */
1886         REGSET_DSCR,            /* DSCR register */
1887 #endif
1888 #ifdef CONFIG_PPC_BOOK3S_64
1889         REGSET_TAR,             /* TAR register */
1890         REGSET_EBB,             /* EBB registers */
1891         REGSET_PMR,             /* Performance Monitor Registers */
1892 #endif
1893 #ifdef CONFIG_PPC_MEM_KEYS
1894         REGSET_PKEY,            /* AMR register */
1895 #endif
1896 };
1897
1898 static const struct user_regset native_regsets[] = {
1899         [REGSET_GPR] = {
1900                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1901                 .size = sizeof(long), .align = sizeof(long),
1902                 .get = gpr_get, .set = gpr_set
1903         },
1904         [REGSET_FPR] = {
1905                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1906                 .size = sizeof(double), .align = sizeof(double),
1907                 .get = fpr_get, .set = fpr_set
1908         },
1909 #ifdef CONFIG_ALTIVEC
1910         [REGSET_VMX] = {
1911                 .core_note_type = NT_PPC_VMX, .n = 34,
1912                 .size = sizeof(vector128), .align = sizeof(vector128),
1913                 .active = vr_active, .get = vr_get, .set = vr_set
1914         },
1915 #endif
1916 #ifdef CONFIG_VSX
1917         [REGSET_VSX] = {
1918                 .core_note_type = NT_PPC_VSX, .n = 32,
1919                 .size = sizeof(double), .align = sizeof(double),
1920                 .active = vsr_active, .get = vsr_get, .set = vsr_set
1921         },
1922 #endif
1923 #ifdef CONFIG_SPE
1924         [REGSET_SPE] = {
1925                 .core_note_type = NT_PPC_SPE, .n = 35,
1926                 .size = sizeof(u32), .align = sizeof(u32),
1927                 .active = evr_active, .get = evr_get, .set = evr_set
1928         },
1929 #endif
1930 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1931         [REGSET_TM_CGPR] = {
1932                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1933                 .size = sizeof(long), .align = sizeof(long),
1934                 .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1935         },
1936         [REGSET_TM_CFPR] = {
1937                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1938                 .size = sizeof(double), .align = sizeof(double),
1939                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1940         },
1941         [REGSET_TM_CVMX] = {
1942                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1943                 .size = sizeof(vector128), .align = sizeof(vector128),
1944                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1945         },
1946         [REGSET_TM_CVSX] = {
1947                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1948                 .size = sizeof(double), .align = sizeof(double),
1949                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1950         },
1951         [REGSET_TM_SPR] = {
1952                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1953                 .size = sizeof(u64), .align = sizeof(u64),
1954                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1955         },
1956         [REGSET_TM_CTAR] = {
1957                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
1958                 .size = sizeof(u64), .align = sizeof(u64),
1959                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1960         },
1961         [REGSET_TM_CPPR] = {
1962                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
1963                 .size = sizeof(u64), .align = sizeof(u64),
1964                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1965         },
1966         [REGSET_TM_CDSCR] = {
1967                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1968                 .size = sizeof(u64), .align = sizeof(u64),
1969                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1970         },
1971 #endif
1972 #ifdef CONFIG_PPC64
1973         [REGSET_PPR] = {
1974                 .core_note_type = NT_PPC_PPR, .n = 1,
1975                 .size = sizeof(u64), .align = sizeof(u64),
1976                 .get = ppr_get, .set = ppr_set
1977         },
1978         [REGSET_DSCR] = {
1979                 .core_note_type = NT_PPC_DSCR, .n = 1,
1980                 .size = sizeof(u64), .align = sizeof(u64),
1981                 .get = dscr_get, .set = dscr_set
1982         },
1983 #endif
1984 #ifdef CONFIG_PPC_BOOK3S_64
1985         [REGSET_TAR] = {
1986                 .core_note_type = NT_PPC_TAR, .n = 1,
1987                 .size = sizeof(u64), .align = sizeof(u64),
1988                 .get = tar_get, .set = tar_set
1989         },
1990         [REGSET_EBB] = {
1991                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1992                 .size = sizeof(u64), .align = sizeof(u64),
1993                 .active = ebb_active, .get = ebb_get, .set = ebb_set
1994         },
1995         [REGSET_PMR] = {
1996                 .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1997                 .size = sizeof(u64), .align = sizeof(u64),
1998                 .active = pmu_active, .get = pmu_get, .set = pmu_set
1999         },
2000 #endif
2001 #ifdef CONFIG_PPC_MEM_KEYS
2002         [REGSET_PKEY] = {
2003                 .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
2004                 .size = sizeof(u64), .align = sizeof(u64),
2005                 .active = pkey_active, .get = pkey_get, .set = pkey_set
2006         },
2007 #endif
2008 };
2009
2010 static const struct user_regset_view user_ppc_native_view = {
2011         .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2012         .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2013 };
2014
2015 #ifdef CONFIG_PPC64
2016 #include <linux/compat.h>
2017
2018 static int gpr32_get_common(struct task_struct *target,
2019                      const struct user_regset *regset,
2020                      unsigned int pos, unsigned int count,
2021                             void *kbuf, void __user *ubuf,
2022                             unsigned long *regs)
2023 {
2024         compat_ulong_t *k = kbuf;
2025         compat_ulong_t __user *u = ubuf;
2026         compat_ulong_t reg;
2027
2028         pos /= sizeof(reg);
2029         count /= sizeof(reg);
2030
2031         if (kbuf)
2032                 for (; count > 0 && pos < PT_MSR; --count)
2033                         *k++ = regs[pos++];
2034         else
2035                 for (; count > 0 && pos < PT_MSR; --count)
2036                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2037                                 return -EFAULT;
2038
2039         if (count > 0 && pos == PT_MSR) {
2040                 reg = get_user_msr(target);
2041                 if (kbuf)
2042                         *k++ = reg;
2043                 else if (__put_user(reg, u++))
2044                         return -EFAULT;
2045                 ++pos;
2046                 --count;
2047         }
2048
2049         if (kbuf)
2050                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2051                         *k++ = regs[pos++];
2052         else
2053                 for (; count > 0 && pos < PT_REGS_COUNT; --count)
2054                         if (__put_user((compat_ulong_t) regs[pos++], u++))
2055                                 return -EFAULT;
2056
2057         kbuf = k;
2058         ubuf = u;
2059         pos *= sizeof(reg);
2060         count *= sizeof(reg);
2061         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2062                                         PT_REGS_COUNT * sizeof(reg), -1);
2063 }
2064
2065 static int gpr32_set_common(struct task_struct *target,
2066                      const struct user_regset *regset,
2067                      unsigned int pos, unsigned int count,
2068                      const void *kbuf, const void __user *ubuf,
2069                      unsigned long *regs)
2070 {
2071         const compat_ulong_t *k = kbuf;
2072         const compat_ulong_t __user *u = ubuf;
2073         compat_ulong_t reg;
2074
2075         pos /= sizeof(reg);
2076         count /= sizeof(reg);
2077
2078         if (kbuf)
2079                 for (; count > 0 && pos < PT_MSR; --count)
2080                         regs[pos++] = *k++;
2081         else
2082                 for (; count > 0 && pos < PT_MSR; --count) {
2083                         if (__get_user(reg, u++))
2084                                 return -EFAULT;
2085                         regs[pos++] = reg;
2086                 }
2087
2088
2089         if (count > 0 && pos == PT_MSR) {
2090                 if (kbuf)
2091                         reg = *k++;
2092                 else if (__get_user(reg, u++))
2093                         return -EFAULT;
2094                 set_user_msr(target, reg);
2095                 ++pos;
2096                 --count;
2097         }
2098
2099         if (kbuf) {
2100                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2101                         regs[pos++] = *k++;
2102                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2103                         ++k;
2104         } else {
2105                 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2106                         if (__get_user(reg, u++))
2107                                 return -EFAULT;
2108                         regs[pos++] = reg;
2109                 }
2110                 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2111                         if (__get_user(reg, u++))
2112                                 return -EFAULT;
2113         }
2114
2115         if (count > 0 && pos == PT_TRAP) {
2116                 if (kbuf)
2117                         reg = *k++;
2118                 else if (__get_user(reg, u++))
2119                         return -EFAULT;
2120                 set_user_trap(target, reg);
2121                 ++pos;
2122                 --count;
2123         }
2124
2125         kbuf = k;
2126         ubuf = u;
2127         pos *= sizeof(reg);
2128         count *= sizeof(reg);
2129         return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2130                                          (PT_TRAP + 1) * sizeof(reg), -1);
2131 }
2132
2133 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2134 static int tm_cgpr32_get(struct task_struct *target,
2135                      const struct user_regset *regset,
2136                      unsigned int pos, unsigned int count,
2137                      void *kbuf, void __user *ubuf)
2138 {
2139         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2140                         &target->thread.ckpt_regs.gpr[0]);
2141 }
2142
2143 static int tm_cgpr32_set(struct task_struct *target,
2144                      const struct user_regset *regset,
2145                      unsigned int pos, unsigned int count,
2146                      const void *kbuf, const void __user *ubuf)
2147 {
2148         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2149                         &target->thread.ckpt_regs.gpr[0]);
2150 }
2151 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2152
2153 static int gpr32_get(struct task_struct *target,
2154                      const struct user_regset *regset,
2155                      unsigned int pos, unsigned int count,
2156                      void *kbuf, void __user *ubuf)
2157 {
2158         int i;
2159
2160         if (target->thread.regs == NULL)
2161                 return -EIO;
2162
2163         if (!FULL_REGS(target->thread.regs)) {
2164                 /*
2165                  * We have a partial register set.
2166                  * Fill 14-31 with bogus values.
2167                  */
2168                 for (i = 14; i < 32; i++)
2169                         target->thread.regs->gpr[i] = NV_REG_POISON;
2170         }
2171         return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2172                         &target->thread.regs->gpr[0]);
2173 }
2174
2175 static int gpr32_set(struct task_struct *target,
2176                      const struct user_regset *regset,
2177                      unsigned int pos, unsigned int count,
2178                      const void *kbuf, const void __user *ubuf)
2179 {
2180         if (target->thread.regs == NULL)
2181                 return -EIO;
2182
2183         CHECK_FULL_REGS(target->thread.regs);
2184         return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2185                         &target->thread.regs->gpr[0]);
2186 }
2187
2188 /*
2189  * These are the regset flavors matching the CONFIG_PPC32 native set.
2190  */
2191 static const struct user_regset compat_regsets[] = {
2192         [REGSET_GPR] = {
2193                 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2194                 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2195                 .get = gpr32_get, .set = gpr32_set
2196         },
2197         [REGSET_FPR] = {
2198                 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2199                 .size = sizeof(double), .align = sizeof(double),
2200                 .get = fpr_get, .set = fpr_set
2201         },
2202 #ifdef CONFIG_ALTIVEC
2203         [REGSET_VMX] = {
2204                 .core_note_type = NT_PPC_VMX, .n = 34,
2205                 .size = sizeof(vector128), .align = sizeof(vector128),
2206                 .active = vr_active, .get = vr_get, .set = vr_set
2207         },
2208 #endif
2209 #ifdef CONFIG_SPE
2210         [REGSET_SPE] = {
2211                 .core_note_type = NT_PPC_SPE, .n = 35,
2212                 .size = sizeof(u32), .align = sizeof(u32),
2213                 .active = evr_active, .get = evr_get, .set = evr_set
2214         },
2215 #endif
2216 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2217         [REGSET_TM_CGPR] = {
2218                 .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2219                 .size = sizeof(long), .align = sizeof(long),
2220                 .active = tm_cgpr_active,
2221                 .get = tm_cgpr32_get, .set = tm_cgpr32_set
2222         },
2223         [REGSET_TM_CFPR] = {
2224                 .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2225                 .size = sizeof(double), .align = sizeof(double),
2226                 .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2227         },
2228         [REGSET_TM_CVMX] = {
2229                 .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2230                 .size = sizeof(vector128), .align = sizeof(vector128),
2231                 .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2232         },
2233         [REGSET_TM_CVSX] = {
2234                 .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2235                 .size = sizeof(double), .align = sizeof(double),
2236                 .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2237         },
2238         [REGSET_TM_SPR] = {
2239                 .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2240                 .size = sizeof(u64), .align = sizeof(u64),
2241                 .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2242         },
2243         [REGSET_TM_CTAR] = {
2244                 .core_note_type = NT_PPC_TM_CTAR, .n = 1,
2245                 .size = sizeof(u64), .align = sizeof(u64),
2246                 .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2247         },
2248         [REGSET_TM_CPPR] = {
2249                 .core_note_type = NT_PPC_TM_CPPR, .n = 1,
2250                 .size = sizeof(u64), .align = sizeof(u64),
2251                 .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2252         },
2253         [REGSET_TM_CDSCR] = {
2254                 .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2255                 .size = sizeof(u64), .align = sizeof(u64),
2256                 .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2257         },
2258 #endif
2259 #ifdef CONFIG_PPC64
2260         [REGSET_PPR] = {
2261                 .core_note_type = NT_PPC_PPR, .n = 1,
2262                 .size = sizeof(u64), .align = sizeof(u64),
2263                 .get = ppr_get, .set = ppr_set
2264         },
2265         [REGSET_DSCR] = {
2266                 .core_note_type = NT_PPC_DSCR, .n = 1,
2267                 .size = sizeof(u64), .align = sizeof(u64),
2268                 .get = dscr_get, .set = dscr_set
2269         },
2270 #endif
2271 #ifdef CONFIG_PPC_BOOK3S_64
2272         [REGSET_TAR] = {
2273                 .core_note_type = NT_PPC_TAR, .n = 1,
2274                 .size = sizeof(u64), .align = sizeof(u64),
2275                 .get = tar_get, .set = tar_set
2276         },
2277         [REGSET_EBB] = {
2278                 .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2279                 .size = sizeof(u64), .align = sizeof(u64),
2280                 .active = ebb_active, .get = ebb_get, .set = ebb_set
2281         },
2282 #endif
2283 };
2284
2285 static const struct user_regset_view user_ppc_compat_view = {
2286         .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2287         .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2288 };
2289 #endif  /* CONFIG_PPC64 */
2290
2291 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2292 {
2293 #ifdef CONFIG_PPC64
2294         if (test_tsk_thread_flag(task, TIF_32BIT))
2295                 return &user_ppc_compat_view;
2296 #endif
2297         return &user_ppc_native_view;
2298 }
2299
2300
2301 void user_enable_single_step(struct task_struct *task)
2302 {
2303         struct pt_regs *regs = task->thread.regs;
2304
2305         if (regs != NULL) {
2306 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2307                 task->thread.debug.dbcr0 &= ~DBCR0_BT;
2308                 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2309                 regs->msr |= MSR_DE;
2310 #else
2311                 regs->msr &= ~MSR_BE;
2312                 regs->msr |= MSR_SE;
2313 #endif
2314         }
2315         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2316 }
2317
2318 void user_enable_block_step(struct task_struct *task)
2319 {
2320         struct pt_regs *regs = task->thread.regs;
2321
2322         if (regs != NULL) {
2323 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2324                 task->thread.debug.dbcr0 &= ~DBCR0_IC;
2325                 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2326                 regs->msr |= MSR_DE;
2327 #else
2328                 regs->msr &= ~MSR_SE;
2329                 regs->msr |= MSR_BE;
2330 #endif
2331         }
2332         set_tsk_thread_flag(task, TIF_SINGLESTEP);
2333 }
2334
2335 void user_disable_single_step(struct task_struct *task)
2336 {
2337         struct pt_regs *regs = task->thread.regs;
2338
2339         if (regs != NULL) {
2340 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2341                 /*
2342                  * The logic to disable single stepping should be as
2343                  * simple as turning off the Instruction Complete flag.
2344                  * And, after doing so, if all debug flags are off, turn
2345                  * off DBCR0(IDM) and MSR(DE) .... Torez
2346                  */
2347                 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2348                 /*
2349                  * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2350                  */
2351                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2352                                         task->thread.debug.dbcr1)) {
2353                         /*
2354                          * All debug events were off.....
2355                          */
2356                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2357                         regs->msr &= ~MSR_DE;
2358                 }
2359 #else
2360                 regs->msr &= ~(MSR_SE | MSR_BE);
2361 #endif
2362         }
2363         clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2364 }
2365
2366 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2367 void ptrace_triggered(struct perf_event *bp,
2368                       struct perf_sample_data *data, struct pt_regs *regs)
2369 {
2370         struct perf_event_attr attr;
2371
2372         /*
2373          * Disable the breakpoint request here since ptrace has defined a
2374          * one-shot behaviour for breakpoint exceptions in PPC64.
2375          * The SIGTRAP signal is generated automatically for us in do_dabr().
2376          * We don't have to do anything about that here
2377          */
2378         attr = bp->attr;
2379         attr.disabled = true;
2380         modify_user_hw_breakpoint(bp, &attr);
2381 }
2382 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2383
2384 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2385                                unsigned long data)
2386 {
2387 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2388         int ret;
2389         struct thread_struct *thread = &(task->thread);
2390         struct perf_event *bp;
2391         struct perf_event_attr attr;
2392 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2393 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2394         bool set_bp = true;
2395         struct arch_hw_breakpoint hw_brk;
2396 #endif
2397
2398         /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2399          *  For embedded processors we support one DAC and no IAC's at the
2400          *  moment.
2401          */
2402         if (addr > 0)
2403                 return -EINVAL;
2404
2405         /* The bottom 3 bits in dabr are flags */
2406         if ((data & ~0x7UL) >= TASK_SIZE)
2407                 return -EIO;
2408
2409 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2410         /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2411          *  It was assumed, on previous implementations, that 3 bits were
2412          *  passed together with the data address, fitting the design of the
2413          *  DABR register, as follows:
2414          *
2415          *  bit 0: Read flag
2416          *  bit 1: Write flag
2417          *  bit 2: Breakpoint translation
2418          *
2419          *  Thus, we use them here as so.
2420          */
2421
2422         /* Ensure breakpoint translation bit is set */
2423         if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2424                 return -EIO;
2425         hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2426         hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2427         hw_brk.len = 8;
2428         set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
2429 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2430         bp = thread->ptrace_bps[0];
2431         if (!set_bp) {
2432                 if (bp) {
2433                         unregister_hw_breakpoint(bp);
2434                         thread->ptrace_bps[0] = NULL;
2435                 }
2436                 return 0;
2437         }
2438         if (bp) {
2439                 attr = bp->attr;
2440                 attr.bp_addr = hw_brk.address;
2441                 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2442
2443                 /* Enable breakpoint */
2444                 attr.disabled = false;
2445
2446                 ret =  modify_user_hw_breakpoint(bp, &attr);
2447                 if (ret) {
2448                         return ret;
2449                 }
2450                 thread->ptrace_bps[0] = bp;
2451                 thread->hw_brk = hw_brk;
2452                 return 0;
2453         }
2454
2455         /* Create a new breakpoint request if one doesn't exist already */
2456         hw_breakpoint_init(&attr);
2457         attr.bp_addr = hw_brk.address;
2458         attr.bp_len = 8;
2459         arch_bp_generic_fields(hw_brk.type,
2460                                &attr.bp_type);
2461
2462         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2463                                                ptrace_triggered, NULL, task);
2464         if (IS_ERR(bp)) {
2465                 thread->ptrace_bps[0] = NULL;
2466                 return PTR_ERR(bp);
2467         }
2468
2469 #else /* !CONFIG_HAVE_HW_BREAKPOINT */
2470         if (set_bp && (!ppc_breakpoint_available()))
2471                 return -ENODEV;
2472 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2473         task->thread.hw_brk = hw_brk;
2474 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2475         /* As described above, it was assumed 3 bits were passed with the data
2476          *  address, but we will assume only the mode bits will be passed
2477          *  as to not cause alignment restrictions for DAC-based processors.
2478          */
2479
2480         /* DAC's hold the whole address without any mode flags */
2481         task->thread.debug.dac1 = data & ~0x3UL;
2482
2483         if (task->thread.debug.dac1 == 0) {
2484                 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2485                 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2486                                         task->thread.debug.dbcr1)) {
2487                         task->thread.regs->msr &= ~MSR_DE;
2488                         task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2489                 }
2490                 return 0;
2491         }
2492
2493         /* Read or Write bits must be set */
2494
2495         if (!(data & 0x3UL))
2496                 return -EINVAL;
2497
2498         /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2499            register */
2500         task->thread.debug.dbcr0 |= DBCR0_IDM;
2501
2502         /* Check for write and read flags and set DBCR0
2503            accordingly */
2504         dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2505         if (data & 0x1UL)
2506                 dbcr_dac(task) |= DBCR_DAC1R;
2507         if (data & 0x2UL)
2508                 dbcr_dac(task) |= DBCR_DAC1W;
2509         task->thread.regs->msr |= MSR_DE;
2510 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2511         return 0;
2512 }
2513
2514 /*
2515  * Called by kernel/ptrace.c when detaching..
2516  *
2517  * Make sure single step bits etc are not set.
2518  */
2519 void ptrace_disable(struct task_struct *child)
2520 {
2521         /* make sure the single step bit is not set. */
2522         user_disable_single_step(child);
2523         clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
2524 }
2525
2526 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2527 static long set_instruction_bp(struct task_struct *child,
2528                               struct ppc_hw_breakpoint *bp_info)
2529 {
2530         int slot;
2531         int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2532         int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2533         int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2534         int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2535
2536         if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2537                 slot2_in_use = 1;
2538         if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2539                 slot4_in_use = 1;
2540
2541         if (bp_info->addr >= TASK_SIZE)
2542                 return -EIO;
2543
2544         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2545
2546                 /* Make sure range is valid. */
2547                 if (bp_info->addr2 >= TASK_SIZE)
2548                         return -EIO;
2549
2550                 /* We need a pair of IAC regsisters */
2551                 if ((!slot1_in_use) && (!slot2_in_use)) {
2552                         slot = 1;
2553                         child->thread.debug.iac1 = bp_info->addr;
2554                         child->thread.debug.iac2 = bp_info->addr2;
2555                         child->thread.debug.dbcr0 |= DBCR0_IAC1;
2556                         if (bp_info->addr_mode ==
2557                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2558                                 dbcr_iac_range(child) |= DBCR_IAC12X;
2559                         else
2560                                 dbcr_iac_range(child) |= DBCR_IAC12I;
2561 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2562                 } else if ((!slot3_in_use) && (!slot4_in_use)) {
2563                         slot = 3;
2564                         child->thread.debug.iac3 = bp_info->addr;
2565                         child->thread.debug.iac4 = bp_info->addr2;
2566                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2567                         if (bp_info->addr_mode ==
2568                                         PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2569                                 dbcr_iac_range(child) |= DBCR_IAC34X;
2570                         else
2571                                 dbcr_iac_range(child) |= DBCR_IAC34I;
2572 #endif
2573                 } else
2574                         return -ENOSPC;
2575         } else {
2576                 /* We only need one.  If possible leave a pair free in
2577                  * case a range is needed later
2578                  */
2579                 if (!slot1_in_use) {
2580                         /*
2581                          * Don't use iac1 if iac1-iac2 are free and either
2582                          * iac3 or iac4 (but not both) are free
2583                          */
2584                         if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2585                                 slot = 1;
2586                                 child->thread.debug.iac1 = bp_info->addr;
2587                                 child->thread.debug.dbcr0 |= DBCR0_IAC1;
2588                                 goto out;
2589                         }
2590                 }
2591                 if (!slot2_in_use) {
2592                         slot = 2;
2593                         child->thread.debug.iac2 = bp_info->addr;
2594                         child->thread.debug.dbcr0 |= DBCR0_IAC2;
2595 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2596                 } else if (!slot3_in_use) {
2597                         slot = 3;
2598                         child->thread.debug.iac3 = bp_info->addr;
2599                         child->thread.debug.dbcr0 |= DBCR0_IAC3;
2600                 } else if (!slot4_in_use) {
2601                         slot = 4;
2602                         child->thread.debug.iac4 = bp_info->addr;
2603                         child->thread.debug.dbcr0 |= DBCR0_IAC4;
2604 #endif
2605                 } else
2606                         return -ENOSPC;
2607         }
2608 out:
2609         child->thread.debug.dbcr0 |= DBCR0_IDM;
2610         child->thread.regs->msr |= MSR_DE;
2611
2612         return slot;
2613 }
2614
2615 static int del_instruction_bp(struct task_struct *child, int slot)
2616 {
2617         switch (slot) {
2618         case 1:
2619                 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2620                         return -ENOENT;
2621
2622                 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2623                         /* address range - clear slots 1 & 2 */
2624                         child->thread.debug.iac2 = 0;
2625                         dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2626                 }
2627                 child->thread.debug.iac1 = 0;
2628                 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2629                 break;
2630         case 2:
2631                 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2632                         return -ENOENT;
2633
2634                 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2635                         /* used in a range */
2636                         return -EINVAL;
2637                 child->thread.debug.iac2 = 0;
2638                 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2639                 break;
2640 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2641         case 3:
2642                 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2643                         return -ENOENT;
2644
2645                 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2646                         /* address range - clear slots 3 & 4 */
2647                         child->thread.debug.iac4 = 0;
2648                         dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2649                 }
2650                 child->thread.debug.iac3 = 0;
2651                 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2652                 break;
2653         case 4:
2654                 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2655                         return -ENOENT;
2656
2657                 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2658                         /* Used in a range */
2659                         return -EINVAL;
2660                 child->thread.debug.iac4 = 0;
2661                 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2662                 break;
2663 #endif
2664         default:
2665                 return -EINVAL;
2666         }
2667         return 0;
2668 }
2669
2670 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2671 {
2672         int byte_enable =
2673                 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2674                 & 0xf;
2675         int condition_mode =
2676                 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2677         int slot;
2678
2679         if (byte_enable && (condition_mode == 0))
2680                 return -EINVAL;
2681
2682         if (bp_info->addr >= TASK_SIZE)
2683                 return -EIO;
2684
2685         if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2686                 slot = 1;
2687                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2688                         dbcr_dac(child) |= DBCR_DAC1R;
2689                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2690                         dbcr_dac(child) |= DBCR_DAC1W;
2691                 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2692 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2693                 if (byte_enable) {
2694                         child->thread.debug.dvc1 =
2695                                 (unsigned long)bp_info->condition_value;
2696                         child->thread.debug.dbcr2 |=
2697                                 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
2698                                  (condition_mode << DBCR2_DVC1M_SHIFT));
2699                 }
2700 #endif
2701 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2702         } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2703                 /* Both dac1 and dac2 are part of a range */
2704                 return -ENOSPC;
2705 #endif
2706         } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2707                 slot = 2;
2708                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2709                         dbcr_dac(child) |= DBCR_DAC2R;
2710                 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2711                         dbcr_dac(child) |= DBCR_DAC2W;
2712                 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2713 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2714                 if (byte_enable) {
2715                         child->thread.debug.dvc2 =
2716                                 (unsigned long)bp_info->condition_value;
2717                         child->thread.debug.dbcr2 |=
2718                                 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
2719                                  (condition_mode << DBCR2_DVC2M_SHIFT));
2720                 }
2721 #endif
2722         } else
2723                 return -ENOSPC;
2724         child->thread.debug.dbcr0 |= DBCR0_IDM;
2725         child->thread.regs->msr |= MSR_DE;
2726
2727         return slot + 4;
2728 }
2729
2730 static int del_dac(struct task_struct *child, int slot)
2731 {
2732         if (slot == 1) {
2733                 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2734                         return -ENOENT;
2735
2736                 child->thread.debug.dac1 = 0;
2737                 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2738 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2739                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2740                         child->thread.debug.dac2 = 0;
2741                         child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2742                 }
2743                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2744 #endif
2745 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2746                 child->thread.debug.dvc1 = 0;
2747 #endif
2748         } else if (slot == 2) {
2749                 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2750                         return -ENOENT;
2751
2752 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2753                 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2754                         /* Part of a range */
2755                         return -EINVAL;
2756                 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2757 #endif
2758 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2759                 child->thread.debug.dvc2 = 0;
2760 #endif
2761                 child->thread.debug.dac2 = 0;
2762                 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2763         } else
2764                 return -EINVAL;
2765
2766         return 0;
2767 }
2768 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2769
2770 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2771 static int set_dac_range(struct task_struct *child,
2772                          struct ppc_hw_breakpoint *bp_info)
2773 {
2774         int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2775
2776         /* We don't allow range watchpoints to be used with DVC */
2777         if (bp_info->condition_mode)
2778                 return -EINVAL;
2779
2780         /*
2781          * Best effort to verify the address range.  The user/supervisor bits
2782          * prevent trapping in kernel space, but let's fail on an obvious bad
2783          * range.  The simple test on the mask is not fool-proof, and any
2784          * exclusive range will spill over into kernel space.
2785          */
2786         if (bp_info->addr >= TASK_SIZE)
2787                 return -EIO;
2788         if (mode == PPC_BREAKPOINT_MODE_MASK) {
2789                 /*
2790                  * dac2 is a bitmask.  Don't allow a mask that makes a
2791                  * kernel space address from a valid dac1 value
2792                  */
2793                 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2794                         return -EIO;
2795         } else {
2796                 /*
2797                  * For range breakpoints, addr2 must also be a valid address
2798                  */
2799                 if (bp_info->addr2 >= TASK_SIZE)
2800                         return -EIO;
2801         }
2802
2803         if (child->thread.debug.dbcr0 &
2804             (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2805                 return -ENOSPC;
2806
2807         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2808                 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2809         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2810                 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2811         child->thread.debug.dac1 = bp_info->addr;
2812         child->thread.debug.dac2 = bp_info->addr2;
2813         if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2814                 child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2815         else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2816                 child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2817         else    /* PPC_BREAKPOINT_MODE_MASK */
2818                 child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2819         child->thread.regs->msr |= MSR_DE;
2820
2821         return 5;
2822 }
2823 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2824
2825 static long ppc_set_hwdebug(struct task_struct *child,
2826                      struct ppc_hw_breakpoint *bp_info)
2827 {
2828 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2829         int len = 0;
2830         struct thread_struct *thread = &(child->thread);
2831         struct perf_event *bp;
2832         struct perf_event_attr attr;
2833 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2834 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2835         struct arch_hw_breakpoint brk;
2836 #endif
2837
2838         if (bp_info->version != 1)
2839                 return -ENOTSUPP;
2840 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2841         /*
2842          * Check for invalid flags and combinations
2843          */
2844         if ((bp_info->trigger_type == 0) ||
2845             (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2846                                        PPC_BREAKPOINT_TRIGGER_RW)) ||
2847             (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2848             (bp_info->condition_mode &
2849              ~(PPC_BREAKPOINT_CONDITION_MODE |
2850                PPC_BREAKPOINT_CONDITION_BE_ALL)))
2851                 return -EINVAL;
2852 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2853         if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2854                 return -EINVAL;
2855 #endif
2856
2857         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2858                 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2859                     (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2860                         return -EINVAL;
2861                 return set_instruction_bp(child, bp_info);
2862         }
2863         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2864                 return set_dac(child, bp_info);
2865
2866 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2867         return set_dac_range(child, bp_info);
2868 #else
2869         return -EINVAL;
2870 #endif
2871 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2872         /*
2873          * We only support one data breakpoint
2874          */
2875         if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2876             (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2877             bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2878                 return -EINVAL;
2879
2880         if ((unsigned long)bp_info->addr >= TASK_SIZE)
2881                 return -EIO;
2882
2883         brk.address = bp_info->addr & ~7UL;
2884         brk.type = HW_BRK_TYPE_TRANSLATE;
2885         brk.len = 8;
2886         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2887                 brk.type |= HW_BRK_TYPE_READ;
2888         if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2889                 brk.type |= HW_BRK_TYPE_WRITE;
2890 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2891         /*
2892          * Check if the request is for 'range' breakpoints. We can
2893          * support it if range < 8 bytes.
2894          */
2895         if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2896                 len = bp_info->addr2 - bp_info->addr;
2897         else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2898                 len = 1;
2899         else
2900                 return -EINVAL;
2901         bp = thread->ptrace_bps[0];
2902         if (bp)
2903                 return -ENOSPC;
2904
2905         /* Create a new breakpoint request if one doesn't exist already */
2906         hw_breakpoint_init(&attr);
2907         attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2908         attr.bp_len = len;
2909         arch_bp_generic_fields(brk.type, &attr.bp_type);
2910
2911         thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2912                                                ptrace_triggered, NULL, child);
2913         if (IS_ERR(bp)) {
2914                 thread->ptrace_bps[0] = NULL;
2915                 return PTR_ERR(bp);
2916         }
2917
2918         return 1;
2919 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2920
2921         if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2922                 return -EINVAL;
2923
2924         if (child->thread.hw_brk.address)
2925                 return -ENOSPC;
2926
2927         if (!ppc_breakpoint_available())
2928                 return -ENODEV;
2929
2930         child->thread.hw_brk = brk;
2931
2932         return 1;
2933 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2934 }
2935
2936 static long ppc_del_hwdebug(struct task_struct *child, long data)
2937 {
2938 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2939         int ret = 0;
2940         struct thread_struct *thread = &(child->thread);
2941         struct perf_event *bp;
2942 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2943 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2944         int rc;
2945
2946         if (data <= 4)
2947                 rc = del_instruction_bp(child, (int)data);
2948         else
2949                 rc = del_dac(child, (int)data - 4);
2950
2951         if (!rc) {
2952                 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2953                                         child->thread.debug.dbcr1)) {
2954                         child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2955                         child->thread.regs->msr &= ~MSR_DE;
2956                 }
2957         }
2958         return rc;
2959 #else
2960         if (data != 1)
2961                 return -EINVAL;
2962
2963 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2964         bp = thread->ptrace_bps[0];
2965         if (bp) {
2966                 unregister_hw_breakpoint(bp);
2967                 thread->ptrace_bps[0] = NULL;
2968         } else
2969                 ret = -ENOENT;
2970         return ret;
2971 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2972         if (child->thread.hw_brk.address == 0)
2973                 return -ENOENT;
2974
2975         child->thread.hw_brk.address = 0;
2976         child->thread.hw_brk.type = 0;
2977 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2978
2979         return 0;
2980 #endif
2981 }
2982
2983 long arch_ptrace(struct task_struct *child, long request,
2984                  unsigned long addr, unsigned long data)
2985 {
2986         int ret = -EPERM;
2987         void __user *datavp = (void __user *) data;
2988         unsigned long __user *datalp = datavp;
2989
2990         switch (request) {
2991         /* read the word at location addr in the USER area. */
2992         case PTRACE_PEEKUSR: {
2993                 unsigned long index, tmp;
2994
2995                 ret = -EIO;
2996                 /* convert to index and check */
2997 #ifdef CONFIG_PPC32
2998                 index = addr >> 2;
2999                 if ((addr & 3) || (index > PT_FPSCR)
3000                     || (child->thread.regs == NULL))
3001 #else
3002                 index = addr >> 3;
3003                 if ((addr & 7) || (index > PT_FPSCR))
3004 #endif
3005                         break;
3006
3007                 CHECK_FULL_REGS(child->thread.regs);
3008                 if (index < PT_FPR0) {
3009                         ret = ptrace_get_reg(child, (int) index, &tmp);
3010                         if (ret)
3011                                 break;
3012                 } else {
3013                         unsigned int fpidx = index - PT_FPR0;
3014
3015                         flush_fp_to_thread(child);
3016                         if (fpidx < (PT_FPSCR - PT_FPR0))
3017                                 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
3018                                        sizeof(long));
3019                         else
3020                                 tmp = child->thread.fp_state.fpscr;
3021                 }
3022                 ret = put_user(tmp, datalp);
3023                 break;
3024         }
3025
3026         /* write the word at location addr in the USER area */
3027         case PTRACE_POKEUSR: {
3028                 unsigned long index;
3029
3030                 ret = -EIO;
3031                 /* convert to index and check */
3032 #ifdef CONFIG_PPC32
3033                 index = addr >> 2;
3034                 if ((addr & 3) || (index > PT_FPSCR)
3035                     || (child->thread.regs == NULL))
3036 #else
3037                 index = addr >> 3;
3038                 if ((addr & 7) || (index > PT_FPSCR))
3039 #endif
3040                         break;
3041
3042                 CHECK_FULL_REGS(child->thread.regs);
3043                 if (index < PT_FPR0) {
3044                         ret = ptrace_put_reg(child, index, data);
3045                 } else {
3046                         unsigned int fpidx = index - PT_FPR0;
3047
3048                         flush_fp_to_thread(child);
3049                         if (fpidx < (PT_FPSCR - PT_FPR0))
3050                                 memcpy(&child->thread.TS_FPR(fpidx), &data,
3051                                        sizeof(long));
3052                         else
3053                                 child->thread.fp_state.fpscr = data;
3054                         ret = 0;
3055                 }
3056                 break;
3057         }
3058
3059         case PPC_PTRACE_GETHWDBGINFO: {
3060                 struct ppc_debug_info dbginfo;
3061
3062                 dbginfo.version = 1;
3063 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3064                 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3065                 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3066                 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3067                 dbginfo.data_bp_alignment = 4;
3068                 dbginfo.sizeof_condition = 4;
3069                 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3070                                    PPC_DEBUG_FEATURE_INSN_BP_MASK;
3071 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3072                 dbginfo.features |=
3073                                    PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3074                                    PPC_DEBUG_FEATURE_DATA_BP_MASK;
3075 #endif
3076 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3077                 dbginfo.num_instruction_bps = 0;
3078                 if (ppc_breakpoint_available())
3079                         dbginfo.num_data_bps = 1;
3080                 else
3081                         dbginfo.num_data_bps = 0;
3082                 dbginfo.num_condition_regs = 0;
3083 #ifdef CONFIG_PPC64
3084                 dbginfo.data_bp_alignment = 8;
3085 #else
3086                 dbginfo.data_bp_alignment = 4;
3087 #endif
3088                 dbginfo.sizeof_condition = 0;
3089 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3090                 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3091                 if (cpu_has_feature(CPU_FTR_DAWR))
3092                         dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3093 #else
3094                 dbginfo.features = 0;
3095 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3096 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3097
3098                 if (copy_to_user(datavp, &dbginfo,
3099                                  sizeof(struct ppc_debug_info)))
3100                         return -EFAULT;
3101                 return 0;
3102         }
3103
3104         case PPC_PTRACE_SETHWDEBUG: {
3105                 struct ppc_hw_breakpoint bp_info;
3106
3107                 if (copy_from_user(&bp_info, datavp,
3108                                    sizeof(struct ppc_hw_breakpoint)))
3109                         return -EFAULT;
3110                 return ppc_set_hwdebug(child, &bp_info);
3111         }
3112
3113         case PPC_PTRACE_DELHWDEBUG: {
3114                 ret = ppc_del_hwdebug(child, data);
3115                 break;
3116         }
3117
3118         case PTRACE_GET_DEBUGREG: {
3119 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3120                 unsigned long dabr_fake;
3121 #endif
3122                 ret = -EINVAL;
3123                 /* We only support one DABR and no IABRS at the moment */
3124                 if (addr > 0)
3125                         break;
3126 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3127                 ret = put_user(child->thread.debug.dac1, datalp);
3128 #else
3129                 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3130                              (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3131                 ret = put_user(dabr_fake, datalp);
3132 #endif
3133                 break;
3134         }
3135
3136         case PTRACE_SET_DEBUGREG:
3137                 ret = ptrace_set_debugreg(child, addr, data);
3138                 break;
3139
3140 #ifdef CONFIG_PPC64
3141         case PTRACE_GETREGS64:
3142 #endif
3143         case PTRACE_GETREGS:    /* Get all pt_regs from the child. */
3144                 return copy_regset_to_user(child, &user_ppc_native_view,
3145                                            REGSET_GPR,
3146                                            0, sizeof(struct user_pt_regs),
3147                                            datavp);
3148
3149 #ifdef CONFIG_PPC64
3150         case PTRACE_SETREGS64:
3151 #endif
3152         case PTRACE_SETREGS:    /* Set all gp regs in the child. */
3153                 return copy_regset_from_user(child, &user_ppc_native_view,
3154                                              REGSET_GPR,
3155                                              0, sizeof(struct user_pt_regs),
3156                                              datavp);
3157
3158         case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3159                 return copy_regset_to_user(child, &user_ppc_native_view,
3160                                            REGSET_FPR,
3161                                            0, sizeof(elf_fpregset_t),
3162                                            datavp);
3163
3164         case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3165                 return copy_regset_from_user(child, &user_ppc_native_view,
3166                                              REGSET_FPR,
3167                                              0, sizeof(elf_fpregset_t),
3168                                              datavp);
3169
3170 #ifdef CONFIG_ALTIVEC
3171         case PTRACE_GETVRREGS:
3172                 return copy_regset_to_user(child, &user_ppc_native_view,
3173                                            REGSET_VMX,
3174                                            0, (33 * sizeof(vector128) +
3175                                                sizeof(u32)),
3176                                            datavp);
3177
3178         case PTRACE_SETVRREGS:
3179                 return copy_regset_from_user(child, &user_ppc_native_view,
3180                                              REGSET_VMX,
3181                                              0, (33 * sizeof(vector128) +
3182                                                  sizeof(u32)),
3183                                              datavp);
3184 #endif
3185 #ifdef CONFIG_VSX
3186         case PTRACE_GETVSRREGS:
3187                 return copy_regset_to_user(child, &user_ppc_native_view,
3188                                            REGSET_VSX,
3189                                            0, 32 * sizeof(double),
3190                                            datavp);
3191
3192         case PTRACE_SETVSRREGS:
3193                 return copy_regset_from_user(child, &user_ppc_native_view,
3194                                              REGSET_VSX,
3195                                              0, 32 * sizeof(double),
3196                                              datavp);
3197 #endif
3198 #ifdef CONFIG_SPE
3199         case PTRACE_GETEVRREGS:
3200                 /* Get the child spe register state. */
3201                 return copy_regset_to_user(child, &user_ppc_native_view,
3202                                            REGSET_SPE, 0, 35 * sizeof(u32),
3203                                            datavp);
3204
3205         case PTRACE_SETEVRREGS:
3206                 /* Set the child spe register state. */
3207                 return copy_regset_from_user(child, &user_ppc_native_view,
3208                                              REGSET_SPE, 0, 35 * sizeof(u32),
3209                                              datavp);
3210 #endif
3211
3212         default:
3213                 ret = ptrace_request(child, request, addr, data);
3214                 break;
3215         }
3216         return ret;
3217 }
3218
3219 #ifdef CONFIG_SECCOMP
3220 static int do_seccomp(struct pt_regs *regs)
3221 {
3222         if (!test_thread_flag(TIF_SECCOMP))
3223                 return 0;
3224
3225         /*
3226          * The ABI we present to seccomp tracers is that r3 contains
3227          * the syscall return value and orig_gpr3 contains the first
3228          * syscall parameter. This is different to the ptrace ABI where
3229          * both r3 and orig_gpr3 contain the first syscall parameter.
3230          */
3231         regs->gpr[3] = -ENOSYS;
3232
3233         /*
3234          * We use the __ version here because we have already checked
3235          * TIF_SECCOMP. If this fails, there is nothing left to do, we
3236          * have already loaded -ENOSYS into r3, or seccomp has put
3237          * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3238          */
3239         if (__secure_computing(NULL))
3240                 return -1;
3241
3242         /*
3243          * The syscall was allowed by seccomp, restore the register
3244          * state to what audit expects.
3245          * Note that we use orig_gpr3, which means a seccomp tracer can
3246          * modify the first syscall parameter (in orig_gpr3) and also
3247          * allow the syscall to proceed.
3248          */
3249         regs->gpr[3] = regs->orig_gpr3;
3250
3251         return 0;
3252 }
3253 #else
3254 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3255 #endif /* CONFIG_SECCOMP */
3256
3257 /**
3258  * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3259  * @regs: the pt_regs of the task to trace (current)
3260  *
3261  * Performs various types of tracing on syscall entry. This includes seccomp,
3262  * ptrace, syscall tracepoints and audit.
3263  *
3264  * The pt_regs are potentially visible to userspace via ptrace, so their
3265  * contents is ABI.
3266  *
3267  * One or more of the tracers may modify the contents of pt_regs, in particular
3268  * to modify arguments or even the syscall number itself.
3269  *
3270  * It's also possible that a tracer can choose to reject the system call. In
3271  * that case this function will return an illegal syscall number, and will put
3272  * an appropriate return value in regs->r3.
3273  *
3274  * Return: the (possibly changed) syscall number.
3275  */
3276 long do_syscall_trace_enter(struct pt_regs *regs)
3277 {
3278         u32 flags;
3279
3280         user_exit();
3281
3282         flags = READ_ONCE(current_thread_info()->flags) &
3283                 (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
3284
3285         if (flags) {
3286                 int rc = tracehook_report_syscall_entry(regs);
3287
3288                 if (unlikely(flags & _TIF_SYSCALL_EMU)) {
3289                         /*
3290                          * A nonzero return code from
3291                          * tracehook_report_syscall_entry() tells us to prevent
3292                          * the syscall execution, but we are not going to
3293                          * execute it anyway.
3294                          *
3295                          * Returning -1 will skip the syscall execution. We want
3296                          * to avoid clobbering any registers, so we don't goto
3297                          * the skip label below.
3298                          */
3299                         return -1;
3300                 }
3301
3302                 if (rc) {
3303                         /*
3304                          * The tracer decided to abort the syscall. Note that
3305                          * the tracer may also just change regs->gpr[0] to an
3306                          * invalid syscall number, that is handled below on the
3307                          * exit path.
3308                          */
3309                         goto skip;
3310                 }
3311         }
3312
3313         /* Run seccomp after ptrace; allow it to set gpr[3]. */
3314         if (do_seccomp(regs))
3315                 return -1;
3316
3317         /* Avoid trace and audit when syscall is invalid. */
3318         if (regs->gpr[0] >= NR_syscalls)
3319                 goto skip;
3320
3321         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3322                 trace_sys_enter(regs, regs->gpr[0]);
3323
3324 #ifdef CONFIG_PPC64
3325         if (!is_32bit_task())
3326                 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3327                                     regs->gpr[5], regs->gpr[6]);
3328         else
3329 #endif
3330                 audit_syscall_entry(regs->gpr[0],
3331                                     regs->gpr[3] & 0xffffffff,
3332                                     regs->gpr[4] & 0xffffffff,
3333                                     regs->gpr[5] & 0xffffffff,
3334                                     regs->gpr[6] & 0xffffffff);
3335
3336         /* Return the possibly modified but valid syscall number */
3337         return regs->gpr[0];
3338
3339 skip:
3340         /*
3341          * If we are aborting explicitly, or if the syscall number is
3342          * now invalid, set the return value to -ENOSYS.
3343          */
3344         regs->gpr[3] = -ENOSYS;
3345         return -1;
3346 }
3347
3348 void do_syscall_trace_leave(struct pt_regs *regs)
3349 {
3350         int step;
3351
3352         audit_syscall_exit(regs);
3353
3354         if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3355                 trace_sys_exit(regs, regs->result);
3356
3357         step = test_thread_flag(TIF_SINGLESTEP);
3358         if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3359                 tracehook_report_syscall_exit(regs, step);
3360
3361         user_enter();
3362 }
3363
3364 void __init pt_regs_check(void)
3365 {
3366         BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
3367                      offsetof(struct user_pt_regs, gpr));
3368         BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
3369                      offsetof(struct user_pt_regs, nip));
3370         BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
3371                      offsetof(struct user_pt_regs, msr));
3372         BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
3373                      offsetof(struct user_pt_regs, msr));
3374         BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
3375                      offsetof(struct user_pt_regs, orig_gpr3));
3376         BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
3377                      offsetof(struct user_pt_regs, ctr));
3378         BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
3379                      offsetof(struct user_pt_regs, link));
3380         BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
3381                      offsetof(struct user_pt_regs, xer));
3382         BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
3383                      offsetof(struct user_pt_regs, ccr));
3384 #ifdef __powerpc64__
3385         BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
3386                      offsetof(struct user_pt_regs, softe));
3387 #else
3388         BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
3389                      offsetof(struct user_pt_regs, mq));
3390 #endif
3391         BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
3392                      offsetof(struct user_pt_regs, trap));
3393         BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
3394                      offsetof(struct user_pt_regs, dar));
3395         BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
3396                      offsetof(struct user_pt_regs, dsisr));
3397         BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
3398                      offsetof(struct user_pt_regs, result));
3399
3400         BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
3401 }