Merge tag 'xfs-4.15-merge-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[sfrench/cifs-2.6.git] / arch / arm64 / kernel / ptrace.c
1 /*
2  * Based on arch/arm/kernel/ptrace.c
3  *
4  * By Ross Biro 1/23/92
5  * edited by Linus Torvalds
6  * ARM modifications Copyright (C) 2000 Russell King
7  * Copyright (C) 2012 ARM Ltd.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29 #include <linux/ptrace.h>
30 #include <linux/user.h>
31 #include <linux/seccomp.h>
32 #include <linux/security.h>
33 #include <linux/init.h>
34 #include <linux/signal.h>
35 #include <linux/string.h>
36 #include <linux/uaccess.h>
37 #include <linux/perf_event.h>
38 #include <linux/hw_breakpoint.h>
39 #include <linux/regset.h>
40 #include <linux/tracehook.h>
41 #include <linux/elf.h>
42
43 #include <asm/compat.h>
44 #include <asm/cpufeature.h>
45 #include <asm/debug-monitors.h>
46 #include <asm/pgtable.h>
47 #include <asm/stacktrace.h>
48 #include <asm/syscall.h>
49 #include <asm/traps.h>
50 #include <asm/system_misc.h>
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/syscalls.h>
54
55 struct pt_regs_offset {
56         const char *name;
57         int offset;
58 };
59
60 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
61 #define REG_OFFSET_END {.name = NULL, .offset = 0}
62 #define GPR_OFFSET_NAME(r) \
63         {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
64
65 static const struct pt_regs_offset regoffset_table[] = {
66         GPR_OFFSET_NAME(0),
67         GPR_OFFSET_NAME(1),
68         GPR_OFFSET_NAME(2),
69         GPR_OFFSET_NAME(3),
70         GPR_OFFSET_NAME(4),
71         GPR_OFFSET_NAME(5),
72         GPR_OFFSET_NAME(6),
73         GPR_OFFSET_NAME(7),
74         GPR_OFFSET_NAME(8),
75         GPR_OFFSET_NAME(9),
76         GPR_OFFSET_NAME(10),
77         GPR_OFFSET_NAME(11),
78         GPR_OFFSET_NAME(12),
79         GPR_OFFSET_NAME(13),
80         GPR_OFFSET_NAME(14),
81         GPR_OFFSET_NAME(15),
82         GPR_OFFSET_NAME(16),
83         GPR_OFFSET_NAME(17),
84         GPR_OFFSET_NAME(18),
85         GPR_OFFSET_NAME(19),
86         GPR_OFFSET_NAME(20),
87         GPR_OFFSET_NAME(21),
88         GPR_OFFSET_NAME(22),
89         GPR_OFFSET_NAME(23),
90         GPR_OFFSET_NAME(24),
91         GPR_OFFSET_NAME(25),
92         GPR_OFFSET_NAME(26),
93         GPR_OFFSET_NAME(27),
94         GPR_OFFSET_NAME(28),
95         GPR_OFFSET_NAME(29),
96         GPR_OFFSET_NAME(30),
97         {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
98         REG_OFFSET_NAME(sp),
99         REG_OFFSET_NAME(pc),
100         REG_OFFSET_NAME(pstate),
101         REG_OFFSET_END,
102 };
103
104 /**
105  * regs_query_register_offset() - query register offset from its name
106  * @name:       the name of a register
107  *
108  * regs_query_register_offset() returns the offset of a register in struct
109  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
110  */
111 int regs_query_register_offset(const char *name)
112 {
113         const struct pt_regs_offset *roff;
114
115         for (roff = regoffset_table; roff->name != NULL; roff++)
116                 if (!strcmp(roff->name, name))
117                         return roff->offset;
118         return -EINVAL;
119 }
120
121 /**
122  * regs_within_kernel_stack() - check the address in the stack
123  * @regs:      pt_regs which contains kernel stack pointer.
124  * @addr:      address which is checked.
125  *
126  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
127  * If @addr is within the kernel stack, it returns true. If not, returns false.
128  */
129 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
130 {
131         return ((addr & ~(THREAD_SIZE - 1))  ==
132                 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
133                 on_irq_stack(addr);
134 }
135
136 /**
137  * regs_get_kernel_stack_nth() - get Nth entry of the stack
138  * @regs:       pt_regs which contains kernel stack pointer.
139  * @n:          stack entry number.
140  *
141  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
142  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
143  * this returns 0.
144  */
145 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
146 {
147         unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
148
149         addr += n;
150         if (regs_within_kernel_stack(regs, (unsigned long)addr))
151                 return *addr;
152         else
153                 return 0;
154 }
155
156 /*
157  * TODO: does not yet catch signals sent when the child dies.
158  * in exit.c or in signal.c.
159  */
160
161 /*
162  * Called by kernel/ptrace.c when detaching..
163  */
164 void ptrace_disable(struct task_struct *child)
165 {
166         /*
167          * This would be better off in core code, but PTRACE_DETACH has
168          * grown its fair share of arch-specific worts and changing it
169          * is likely to cause regressions on obscure architectures.
170          */
171         user_disable_single_step(child);
172 }
173
174 #ifdef CONFIG_HAVE_HW_BREAKPOINT
175 /*
176  * Handle hitting a HW-breakpoint.
177  */
178 static void ptrace_hbptriggered(struct perf_event *bp,
179                                 struct perf_sample_data *data,
180                                 struct pt_regs *regs)
181 {
182         struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
183         siginfo_t info = {
184                 .si_signo       = SIGTRAP,
185                 .si_errno       = 0,
186                 .si_code        = TRAP_HWBKPT,
187                 .si_addr        = (void __user *)(bkpt->trigger),
188         };
189
190 #ifdef CONFIG_COMPAT
191         int i;
192
193         if (!is_compat_task())
194                 goto send_sig;
195
196         for (i = 0; i < ARM_MAX_BRP; ++i) {
197                 if (current->thread.debug.hbp_break[i] == bp) {
198                         info.si_errno = (i << 1) + 1;
199                         break;
200                 }
201         }
202
203         for (i = 0; i < ARM_MAX_WRP; ++i) {
204                 if (current->thread.debug.hbp_watch[i] == bp) {
205                         info.si_errno = -((i << 1) + 1);
206                         break;
207                 }
208         }
209
210 send_sig:
211 #endif
212         force_sig_info(SIGTRAP, &info, current);
213 }
214
215 /*
216  * Unregister breakpoints from this task and reset the pointers in
217  * the thread_struct.
218  */
219 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
220 {
221         int i;
222         struct thread_struct *t = &tsk->thread;
223
224         for (i = 0; i < ARM_MAX_BRP; i++) {
225                 if (t->debug.hbp_break[i]) {
226                         unregister_hw_breakpoint(t->debug.hbp_break[i]);
227                         t->debug.hbp_break[i] = NULL;
228                 }
229         }
230
231         for (i = 0; i < ARM_MAX_WRP; i++) {
232                 if (t->debug.hbp_watch[i]) {
233                         unregister_hw_breakpoint(t->debug.hbp_watch[i]);
234                         t->debug.hbp_watch[i] = NULL;
235                 }
236         }
237 }
238
239 void ptrace_hw_copy_thread(struct task_struct *tsk)
240 {
241         memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
242 }
243
244 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
245                                                struct task_struct *tsk,
246                                                unsigned long idx)
247 {
248         struct perf_event *bp = ERR_PTR(-EINVAL);
249
250         switch (note_type) {
251         case NT_ARM_HW_BREAK:
252                 if (idx < ARM_MAX_BRP)
253                         bp = tsk->thread.debug.hbp_break[idx];
254                 break;
255         case NT_ARM_HW_WATCH:
256                 if (idx < ARM_MAX_WRP)
257                         bp = tsk->thread.debug.hbp_watch[idx];
258                 break;
259         }
260
261         return bp;
262 }
263
264 static int ptrace_hbp_set_event(unsigned int note_type,
265                                 struct task_struct *tsk,
266                                 unsigned long idx,
267                                 struct perf_event *bp)
268 {
269         int err = -EINVAL;
270
271         switch (note_type) {
272         case NT_ARM_HW_BREAK:
273                 if (idx < ARM_MAX_BRP) {
274                         tsk->thread.debug.hbp_break[idx] = bp;
275                         err = 0;
276                 }
277                 break;
278         case NT_ARM_HW_WATCH:
279                 if (idx < ARM_MAX_WRP) {
280                         tsk->thread.debug.hbp_watch[idx] = bp;
281                         err = 0;
282                 }
283                 break;
284         }
285
286         return err;
287 }
288
289 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
290                                             struct task_struct *tsk,
291                                             unsigned long idx)
292 {
293         struct perf_event *bp;
294         struct perf_event_attr attr;
295         int err, type;
296
297         switch (note_type) {
298         case NT_ARM_HW_BREAK:
299                 type = HW_BREAKPOINT_X;
300                 break;
301         case NT_ARM_HW_WATCH:
302                 type = HW_BREAKPOINT_RW;
303                 break;
304         default:
305                 return ERR_PTR(-EINVAL);
306         }
307
308         ptrace_breakpoint_init(&attr);
309
310         /*
311          * Initialise fields to sane defaults
312          * (i.e. values that will pass validation).
313          */
314         attr.bp_addr    = 0;
315         attr.bp_len     = HW_BREAKPOINT_LEN_4;
316         attr.bp_type    = type;
317         attr.disabled   = 1;
318
319         bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
320         if (IS_ERR(bp))
321                 return bp;
322
323         err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
324         if (err)
325                 return ERR_PTR(err);
326
327         return bp;
328 }
329
330 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
331                                      struct arch_hw_breakpoint_ctrl ctrl,
332                                      struct perf_event_attr *attr)
333 {
334         int err, len, type, offset, disabled = !ctrl.enabled;
335
336         attr->disabled = disabled;
337         if (disabled)
338                 return 0;
339
340         err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
341         if (err)
342                 return err;
343
344         switch (note_type) {
345         case NT_ARM_HW_BREAK:
346                 if ((type & HW_BREAKPOINT_X) != type)
347                         return -EINVAL;
348                 break;
349         case NT_ARM_HW_WATCH:
350                 if ((type & HW_BREAKPOINT_RW) != type)
351                         return -EINVAL;
352                 break;
353         default:
354                 return -EINVAL;
355         }
356
357         attr->bp_len    = len;
358         attr->bp_type   = type;
359         attr->bp_addr   += offset;
360
361         return 0;
362 }
363
364 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
365 {
366         u8 num;
367         u32 reg = 0;
368
369         switch (note_type) {
370         case NT_ARM_HW_BREAK:
371                 num = hw_breakpoint_slots(TYPE_INST);
372                 break;
373         case NT_ARM_HW_WATCH:
374                 num = hw_breakpoint_slots(TYPE_DATA);
375                 break;
376         default:
377                 return -EINVAL;
378         }
379
380         reg |= debug_monitors_arch();
381         reg <<= 8;
382         reg |= num;
383
384         *info = reg;
385         return 0;
386 }
387
388 static int ptrace_hbp_get_ctrl(unsigned int note_type,
389                                struct task_struct *tsk,
390                                unsigned long idx,
391                                u32 *ctrl)
392 {
393         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
394
395         if (IS_ERR(bp))
396                 return PTR_ERR(bp);
397
398         *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
399         return 0;
400 }
401
402 static int ptrace_hbp_get_addr(unsigned int note_type,
403                                struct task_struct *tsk,
404                                unsigned long idx,
405                                u64 *addr)
406 {
407         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
408
409         if (IS_ERR(bp))
410                 return PTR_ERR(bp);
411
412         *addr = bp ? counter_arch_bp(bp)->address : 0;
413         return 0;
414 }
415
416 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
417                                                         struct task_struct *tsk,
418                                                         unsigned long idx)
419 {
420         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
421
422         if (!bp)
423                 bp = ptrace_hbp_create(note_type, tsk, idx);
424
425         return bp;
426 }
427
428 static int ptrace_hbp_set_ctrl(unsigned int note_type,
429                                struct task_struct *tsk,
430                                unsigned long idx,
431                                u32 uctrl)
432 {
433         int err;
434         struct perf_event *bp;
435         struct perf_event_attr attr;
436         struct arch_hw_breakpoint_ctrl ctrl;
437
438         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
439         if (IS_ERR(bp)) {
440                 err = PTR_ERR(bp);
441                 return err;
442         }
443
444         attr = bp->attr;
445         decode_ctrl_reg(uctrl, &ctrl);
446         err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
447         if (err)
448                 return err;
449
450         return modify_user_hw_breakpoint(bp, &attr);
451 }
452
453 static int ptrace_hbp_set_addr(unsigned int note_type,
454                                struct task_struct *tsk,
455                                unsigned long idx,
456                                u64 addr)
457 {
458         int err;
459         struct perf_event *bp;
460         struct perf_event_attr attr;
461
462         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
463         if (IS_ERR(bp)) {
464                 err = PTR_ERR(bp);
465                 return err;
466         }
467
468         attr = bp->attr;
469         attr.bp_addr = addr;
470         err = modify_user_hw_breakpoint(bp, &attr);
471         return err;
472 }
473
474 #define PTRACE_HBP_ADDR_SZ      sizeof(u64)
475 #define PTRACE_HBP_CTRL_SZ      sizeof(u32)
476 #define PTRACE_HBP_PAD_SZ       sizeof(u32)
477
478 static int hw_break_get(struct task_struct *target,
479                         const struct user_regset *regset,
480                         unsigned int pos, unsigned int count,
481                         void *kbuf, void __user *ubuf)
482 {
483         unsigned int note_type = regset->core_note_type;
484         int ret, idx = 0, offset, limit;
485         u32 info, ctrl;
486         u64 addr;
487
488         /* Resource info */
489         ret = ptrace_hbp_get_resource_info(note_type, &info);
490         if (ret)
491                 return ret;
492
493         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
494                                   sizeof(info));
495         if (ret)
496                 return ret;
497
498         /* Pad */
499         offset = offsetof(struct user_hwdebug_state, pad);
500         ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
501                                        offset + PTRACE_HBP_PAD_SZ);
502         if (ret)
503                 return ret;
504
505         /* (address, ctrl) registers */
506         offset = offsetof(struct user_hwdebug_state, dbg_regs);
507         limit = regset->n * regset->size;
508         while (count && offset < limit) {
509                 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
510                 if (ret)
511                         return ret;
512                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
513                                           offset, offset + PTRACE_HBP_ADDR_SZ);
514                 if (ret)
515                         return ret;
516                 offset += PTRACE_HBP_ADDR_SZ;
517
518                 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
519                 if (ret)
520                         return ret;
521                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
522                                           offset, offset + PTRACE_HBP_CTRL_SZ);
523                 if (ret)
524                         return ret;
525                 offset += PTRACE_HBP_CTRL_SZ;
526
527                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
528                                                offset,
529                                                offset + PTRACE_HBP_PAD_SZ);
530                 if (ret)
531                         return ret;
532                 offset += PTRACE_HBP_PAD_SZ;
533                 idx++;
534         }
535
536         return 0;
537 }
538
539 static int hw_break_set(struct task_struct *target,
540                         const struct user_regset *regset,
541                         unsigned int pos, unsigned int count,
542                         const void *kbuf, const void __user *ubuf)
543 {
544         unsigned int note_type = regset->core_note_type;
545         int ret, idx = 0, offset, limit;
546         u32 ctrl;
547         u64 addr;
548
549         /* Resource info and pad */
550         offset = offsetof(struct user_hwdebug_state, dbg_regs);
551         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
552         if (ret)
553                 return ret;
554
555         /* (address, ctrl) registers */
556         limit = regset->n * regset->size;
557         while (count && offset < limit) {
558                 if (count < PTRACE_HBP_ADDR_SZ)
559                         return -EINVAL;
560                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
561                                          offset, offset + PTRACE_HBP_ADDR_SZ);
562                 if (ret)
563                         return ret;
564                 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
565                 if (ret)
566                         return ret;
567                 offset += PTRACE_HBP_ADDR_SZ;
568
569                 if (!count)
570                         break;
571                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
572                                          offset, offset + PTRACE_HBP_CTRL_SZ);
573                 if (ret)
574                         return ret;
575                 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
576                 if (ret)
577                         return ret;
578                 offset += PTRACE_HBP_CTRL_SZ;
579
580                 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
581                                                 offset,
582                                                 offset + PTRACE_HBP_PAD_SZ);
583                 if (ret)
584                         return ret;
585                 offset += PTRACE_HBP_PAD_SZ;
586                 idx++;
587         }
588
589         return 0;
590 }
591 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
592
593 static int gpr_get(struct task_struct *target,
594                    const struct user_regset *regset,
595                    unsigned int pos, unsigned int count,
596                    void *kbuf, void __user *ubuf)
597 {
598         struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
599         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
600 }
601
602 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
603                    unsigned int pos, unsigned int count,
604                    const void *kbuf, const void __user *ubuf)
605 {
606         int ret;
607         struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
608
609         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
610         if (ret)
611                 return ret;
612
613         if (!valid_user_regs(&newregs, target))
614                 return -EINVAL;
615
616         task_pt_regs(target)->user_regs = newregs;
617         return 0;
618 }
619
620 /*
621  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
622  */
623 static int __fpr_get(struct task_struct *target,
624                      const struct user_regset *regset,
625                      unsigned int pos, unsigned int count,
626                      void *kbuf, void __user *ubuf, unsigned int start_pos)
627 {
628         struct user_fpsimd_state *uregs;
629
630         sve_sync_to_fpsimd(target);
631
632         uregs = &target->thread.fpsimd_state.user_fpsimd;
633
634         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
635                                    start_pos, start_pos + sizeof(*uregs));
636 }
637
638 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
639                    unsigned int pos, unsigned int count,
640                    void *kbuf, void __user *ubuf)
641 {
642         if (target == current)
643                 fpsimd_preserve_current_state();
644
645         return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
646 }
647
648 static int __fpr_set(struct task_struct *target,
649                      const struct user_regset *regset,
650                      unsigned int pos, unsigned int count,
651                      const void *kbuf, const void __user *ubuf,
652                      unsigned int start_pos)
653 {
654         int ret;
655         struct user_fpsimd_state newstate;
656
657         /*
658          * Ensure target->thread.fpsimd_state is up to date, so that a
659          * short copyin can't resurrect stale data.
660          */
661         sve_sync_to_fpsimd(target);
662
663         newstate = target->thread.fpsimd_state.user_fpsimd;
664
665         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
666                                  start_pos, start_pos + sizeof(newstate));
667         if (ret)
668                 return ret;
669
670         target->thread.fpsimd_state.user_fpsimd = newstate;
671
672         return ret;
673 }
674
675 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
676                    unsigned int pos, unsigned int count,
677                    const void *kbuf, const void __user *ubuf)
678 {
679         int ret;
680
681         ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
682         if (ret)
683                 return ret;
684
685         sve_sync_from_fpsimd_zeropad(target);
686         fpsimd_flush_task_state(target);
687
688         return ret;
689 }
690
691 static int tls_get(struct task_struct *target, const struct user_regset *regset,
692                    unsigned int pos, unsigned int count,
693                    void *kbuf, void __user *ubuf)
694 {
695         unsigned long *tls = &target->thread.tp_value;
696
697         if (target == current)
698                 tls_preserve_current_state();
699
700         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
701 }
702
703 static int tls_set(struct task_struct *target, const struct user_regset *regset,
704                    unsigned int pos, unsigned int count,
705                    const void *kbuf, const void __user *ubuf)
706 {
707         int ret;
708         unsigned long tls = target->thread.tp_value;
709
710         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
711         if (ret)
712                 return ret;
713
714         target->thread.tp_value = tls;
715         return ret;
716 }
717
718 static int system_call_get(struct task_struct *target,
719                            const struct user_regset *regset,
720                            unsigned int pos, unsigned int count,
721                            void *kbuf, void __user *ubuf)
722 {
723         int syscallno = task_pt_regs(target)->syscallno;
724
725         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
726                                    &syscallno, 0, -1);
727 }
728
729 static int system_call_set(struct task_struct *target,
730                            const struct user_regset *regset,
731                            unsigned int pos, unsigned int count,
732                            const void *kbuf, const void __user *ubuf)
733 {
734         int syscallno = task_pt_regs(target)->syscallno;
735         int ret;
736
737         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
738         if (ret)
739                 return ret;
740
741         task_pt_regs(target)->syscallno = syscallno;
742         return ret;
743 }
744
745 #ifdef CONFIG_ARM64_SVE
746
747 static void sve_init_header_from_task(struct user_sve_header *header,
748                                       struct task_struct *target)
749 {
750         unsigned int vq;
751
752         memset(header, 0, sizeof(*header));
753
754         header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
755                 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
756         if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
757                 header->flags |= SVE_PT_VL_INHERIT;
758
759         header->vl = target->thread.sve_vl;
760         vq = sve_vq_from_vl(header->vl);
761
762         header->max_vl = sve_max_vl;
763         if (WARN_ON(!sve_vl_valid(sve_max_vl)))
764                 header->max_vl = header->vl;
765
766         header->size = SVE_PT_SIZE(vq, header->flags);
767         header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
768                                       SVE_PT_REGS_SVE);
769 }
770
771 static unsigned int sve_size_from_header(struct user_sve_header const *header)
772 {
773         return ALIGN(header->size, SVE_VQ_BYTES);
774 }
775
776 static unsigned int sve_get_size(struct task_struct *target,
777                                  const struct user_regset *regset)
778 {
779         struct user_sve_header header;
780
781         if (!system_supports_sve())
782                 return 0;
783
784         sve_init_header_from_task(&header, target);
785         return sve_size_from_header(&header);
786 }
787
788 static int sve_get(struct task_struct *target,
789                    const struct user_regset *regset,
790                    unsigned int pos, unsigned int count,
791                    void *kbuf, void __user *ubuf)
792 {
793         int ret;
794         struct user_sve_header header;
795         unsigned int vq;
796         unsigned long start, end;
797
798         if (!system_supports_sve())
799                 return -EINVAL;
800
801         /* Header */
802         sve_init_header_from_task(&header, target);
803         vq = sve_vq_from_vl(header.vl);
804
805         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
806                                   0, sizeof(header));
807         if (ret)
808                 return ret;
809
810         if (target == current)
811                 fpsimd_preserve_current_state();
812
813         /* Registers: FPSIMD-only case */
814
815         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
816         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
817                 return __fpr_get(target, regset, pos, count, kbuf, ubuf,
818                                  SVE_PT_FPSIMD_OFFSET);
819
820         /* Otherwise: full SVE case */
821
822         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
823         start = SVE_PT_SVE_OFFSET;
824         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
825         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
826                                   target->thread.sve_state,
827                                   start, end);
828         if (ret)
829                 return ret;
830
831         start = end;
832         end = SVE_PT_SVE_FPSR_OFFSET(vq);
833         ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
834                                        start, end);
835         if (ret)
836                 return ret;
837
838         /*
839          * Copy fpsr, and fpcr which must follow contiguously in
840          * struct fpsimd_state:
841          */
842         start = end;
843         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
844         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
845                                   &target->thread.fpsimd_state.fpsr,
846                                   start, end);
847         if (ret)
848                 return ret;
849
850         start = end;
851         end = sve_size_from_header(&header);
852         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
853                                         start, end);
854 }
855
856 static int sve_set(struct task_struct *target,
857                    const struct user_regset *regset,
858                    unsigned int pos, unsigned int count,
859                    const void *kbuf, const void __user *ubuf)
860 {
861         int ret;
862         struct user_sve_header header;
863         unsigned int vq;
864         unsigned long start, end;
865
866         if (!system_supports_sve())
867                 return -EINVAL;
868
869         /* Header */
870         if (count < sizeof(header))
871                 return -EINVAL;
872         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
873                                  0, sizeof(header));
874         if (ret)
875                 goto out;
876
877         /*
878          * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by
879          * sve_set_vector_length(), which will also validate them for us:
880          */
881         ret = sve_set_vector_length(target, header.vl,
882                 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
883         if (ret)
884                 goto out;
885
886         /* Actual VL set may be less than the user asked for: */
887         vq = sve_vq_from_vl(target->thread.sve_vl);
888
889         /* Registers: FPSIMD-only case */
890
891         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
892         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
893                 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
894                                 SVE_PT_FPSIMD_OFFSET);
895                 clear_tsk_thread_flag(target, TIF_SVE);
896                 goto out;
897         }
898
899         /* Otherwise: full SVE case */
900
901         /*
902          * If setting a different VL from the requested VL and there is
903          * register data, the data layout will be wrong: don't even
904          * try to set the registers in this case.
905          */
906         if (count && vq != sve_vq_from_vl(header.vl)) {
907                 ret = -EIO;
908                 goto out;
909         }
910
911         sve_alloc(target);
912
913         /*
914          * Ensure target->thread.sve_state is up to date with target's
915          * FPSIMD regs, so that a short copyin leaves trailing registers
916          * unmodified.
917          */
918         fpsimd_sync_to_sve(target);
919         set_tsk_thread_flag(target, TIF_SVE);
920
921         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
922         start = SVE_PT_SVE_OFFSET;
923         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
924         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
925                                  target->thread.sve_state,
926                                  start, end);
927         if (ret)
928                 goto out;
929
930         start = end;
931         end = SVE_PT_SVE_FPSR_OFFSET(vq);
932         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
933                                         start, end);
934         if (ret)
935                 goto out;
936
937         /*
938          * Copy fpsr, and fpcr which must follow contiguously in
939          * struct fpsimd_state:
940          */
941         start = end;
942         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
943         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
944                                  &target->thread.fpsimd_state.fpsr,
945                                  start, end);
946
947 out:
948         fpsimd_flush_task_state(target);
949         return ret;
950 }
951
952 #endif /* CONFIG_ARM64_SVE */
953
954 enum aarch64_regset {
955         REGSET_GPR,
956         REGSET_FPR,
957         REGSET_TLS,
958 #ifdef CONFIG_HAVE_HW_BREAKPOINT
959         REGSET_HW_BREAK,
960         REGSET_HW_WATCH,
961 #endif
962         REGSET_SYSTEM_CALL,
963 #ifdef CONFIG_ARM64_SVE
964         REGSET_SVE,
965 #endif
966 };
967
968 static const struct user_regset aarch64_regsets[] = {
969         [REGSET_GPR] = {
970                 .core_note_type = NT_PRSTATUS,
971                 .n = sizeof(struct user_pt_regs) / sizeof(u64),
972                 .size = sizeof(u64),
973                 .align = sizeof(u64),
974                 .get = gpr_get,
975                 .set = gpr_set
976         },
977         [REGSET_FPR] = {
978                 .core_note_type = NT_PRFPREG,
979                 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
980                 /*
981                  * We pretend we have 32-bit registers because the fpsr and
982                  * fpcr are 32-bits wide.
983                  */
984                 .size = sizeof(u32),
985                 .align = sizeof(u32),
986                 .get = fpr_get,
987                 .set = fpr_set
988         },
989         [REGSET_TLS] = {
990                 .core_note_type = NT_ARM_TLS,
991                 .n = 1,
992                 .size = sizeof(void *),
993                 .align = sizeof(void *),
994                 .get = tls_get,
995                 .set = tls_set,
996         },
997 #ifdef CONFIG_HAVE_HW_BREAKPOINT
998         [REGSET_HW_BREAK] = {
999                 .core_note_type = NT_ARM_HW_BREAK,
1000                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1001                 .size = sizeof(u32),
1002                 .align = sizeof(u32),
1003                 .get = hw_break_get,
1004                 .set = hw_break_set,
1005         },
1006         [REGSET_HW_WATCH] = {
1007                 .core_note_type = NT_ARM_HW_WATCH,
1008                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1009                 .size = sizeof(u32),
1010                 .align = sizeof(u32),
1011                 .get = hw_break_get,
1012                 .set = hw_break_set,
1013         },
1014 #endif
1015         [REGSET_SYSTEM_CALL] = {
1016                 .core_note_type = NT_ARM_SYSTEM_CALL,
1017                 .n = 1,
1018                 .size = sizeof(int),
1019                 .align = sizeof(int),
1020                 .get = system_call_get,
1021                 .set = system_call_set,
1022         },
1023 #ifdef CONFIG_ARM64_SVE
1024         [REGSET_SVE] = { /* Scalable Vector Extension */
1025                 .core_note_type = NT_ARM_SVE,
1026                 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1027                                   SVE_VQ_BYTES),
1028                 .size = SVE_VQ_BYTES,
1029                 .align = SVE_VQ_BYTES,
1030                 .get = sve_get,
1031                 .set = sve_set,
1032                 .get_size = sve_get_size,
1033         },
1034 #endif
1035 };
1036
1037 static const struct user_regset_view user_aarch64_view = {
1038         .name = "aarch64", .e_machine = EM_AARCH64,
1039         .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1040 };
1041
1042 #ifdef CONFIG_COMPAT
1043 #include <linux/compat.h>
1044
1045 enum compat_regset {
1046         REGSET_COMPAT_GPR,
1047         REGSET_COMPAT_VFP,
1048 };
1049
1050 static int compat_gpr_get(struct task_struct *target,
1051                           const struct user_regset *regset,
1052                           unsigned int pos, unsigned int count,
1053                           void *kbuf, void __user *ubuf)
1054 {
1055         int ret = 0;
1056         unsigned int i, start, num_regs;
1057
1058         /* Calculate the number of AArch32 registers contained in count */
1059         num_regs = count / regset->size;
1060
1061         /* Convert pos into an register number */
1062         start = pos / regset->size;
1063
1064         if (start + num_regs > regset->n)
1065                 return -EIO;
1066
1067         for (i = 0; i < num_regs; ++i) {
1068                 unsigned int idx = start + i;
1069                 compat_ulong_t reg;
1070
1071                 switch (idx) {
1072                 case 15:
1073                         reg = task_pt_regs(target)->pc;
1074                         break;
1075                 case 16:
1076                         reg = task_pt_regs(target)->pstate;
1077                         break;
1078                 case 17:
1079                         reg = task_pt_regs(target)->orig_x0;
1080                         break;
1081                 default:
1082                         reg = task_pt_regs(target)->regs[idx];
1083                 }
1084
1085                 if (kbuf) {
1086                         memcpy(kbuf, &reg, sizeof(reg));
1087                         kbuf += sizeof(reg);
1088                 } else {
1089                         ret = copy_to_user(ubuf, &reg, sizeof(reg));
1090                         if (ret) {
1091                                 ret = -EFAULT;
1092                                 break;
1093                         }
1094
1095                         ubuf += sizeof(reg);
1096                 }
1097         }
1098
1099         return ret;
1100 }
1101
1102 static int compat_gpr_set(struct task_struct *target,
1103                           const struct user_regset *regset,
1104                           unsigned int pos, unsigned int count,
1105                           const void *kbuf, const void __user *ubuf)
1106 {
1107         struct pt_regs newregs;
1108         int ret = 0;
1109         unsigned int i, start, num_regs;
1110
1111         /* Calculate the number of AArch32 registers contained in count */
1112         num_regs = count / regset->size;
1113
1114         /* Convert pos into an register number */
1115         start = pos / regset->size;
1116
1117         if (start + num_regs > regset->n)
1118                 return -EIO;
1119
1120         newregs = *task_pt_regs(target);
1121
1122         for (i = 0; i < num_regs; ++i) {
1123                 unsigned int idx = start + i;
1124                 compat_ulong_t reg;
1125
1126                 if (kbuf) {
1127                         memcpy(&reg, kbuf, sizeof(reg));
1128                         kbuf += sizeof(reg);
1129                 } else {
1130                         ret = copy_from_user(&reg, ubuf, sizeof(reg));
1131                         if (ret) {
1132                                 ret = -EFAULT;
1133                                 break;
1134                         }
1135
1136                         ubuf += sizeof(reg);
1137                 }
1138
1139                 switch (idx) {
1140                 case 15:
1141                         newregs.pc = reg;
1142                         break;
1143                 case 16:
1144                         newregs.pstate = reg;
1145                         break;
1146                 case 17:
1147                         newregs.orig_x0 = reg;
1148                         break;
1149                 default:
1150                         newregs.regs[idx] = reg;
1151                 }
1152
1153         }
1154
1155         if (valid_user_regs(&newregs.user_regs, target))
1156                 *task_pt_regs(target) = newregs;
1157         else
1158                 ret = -EINVAL;
1159
1160         return ret;
1161 }
1162
1163 static int compat_vfp_get(struct task_struct *target,
1164                           const struct user_regset *regset,
1165                           unsigned int pos, unsigned int count,
1166                           void *kbuf, void __user *ubuf)
1167 {
1168         struct user_fpsimd_state *uregs;
1169         compat_ulong_t fpscr;
1170         int ret, vregs_end_pos;
1171
1172         uregs = &target->thread.fpsimd_state.user_fpsimd;
1173
1174         if (target == current)
1175                 fpsimd_preserve_current_state();
1176
1177         /*
1178          * The VFP registers are packed into the fpsimd_state, so they all sit
1179          * nicely together for us. We just need to create the fpscr separately.
1180          */
1181         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1182         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
1183                                   0, vregs_end_pos);
1184
1185         if (count && !ret) {
1186                 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1187                         (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1188
1189                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
1190                                           vregs_end_pos, VFP_STATE_SIZE);
1191         }
1192
1193         return ret;
1194 }
1195
1196 static int compat_vfp_set(struct task_struct *target,
1197                           const struct user_regset *regset,
1198                           unsigned int pos, unsigned int count,
1199                           const void *kbuf, const void __user *ubuf)
1200 {
1201         struct user_fpsimd_state *uregs;
1202         compat_ulong_t fpscr;
1203         int ret, vregs_end_pos;
1204
1205         uregs = &target->thread.fpsimd_state.user_fpsimd;
1206
1207         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1208         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1209                                  vregs_end_pos);
1210
1211         if (count && !ret) {
1212                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1213                                          vregs_end_pos, VFP_STATE_SIZE);
1214                 if (!ret) {
1215                         uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1216                         uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1217                 }
1218         }
1219
1220         fpsimd_flush_task_state(target);
1221         return ret;
1222 }
1223
1224 static int compat_tls_get(struct task_struct *target,
1225                           const struct user_regset *regset, unsigned int pos,
1226                           unsigned int count, void *kbuf, void __user *ubuf)
1227 {
1228         compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
1229         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1230 }
1231
1232 static int compat_tls_set(struct task_struct *target,
1233                           const struct user_regset *regset, unsigned int pos,
1234                           unsigned int count, const void *kbuf,
1235                           const void __user *ubuf)
1236 {
1237         int ret;
1238         compat_ulong_t tls = target->thread.tp_value;
1239
1240         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1241         if (ret)
1242                 return ret;
1243
1244         target->thread.tp_value = tls;
1245         return ret;
1246 }
1247
1248 static const struct user_regset aarch32_regsets[] = {
1249         [REGSET_COMPAT_GPR] = {
1250                 .core_note_type = NT_PRSTATUS,
1251                 .n = COMPAT_ELF_NGREG,
1252                 .size = sizeof(compat_elf_greg_t),
1253                 .align = sizeof(compat_elf_greg_t),
1254                 .get = compat_gpr_get,
1255                 .set = compat_gpr_set
1256         },
1257         [REGSET_COMPAT_VFP] = {
1258                 .core_note_type = NT_ARM_VFP,
1259                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1260                 .size = sizeof(compat_ulong_t),
1261                 .align = sizeof(compat_ulong_t),
1262                 .get = compat_vfp_get,
1263                 .set = compat_vfp_set
1264         },
1265 };
1266
1267 static const struct user_regset_view user_aarch32_view = {
1268         .name = "aarch32", .e_machine = EM_ARM,
1269         .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1270 };
1271
1272 static const struct user_regset aarch32_ptrace_regsets[] = {
1273         [REGSET_GPR] = {
1274                 .core_note_type = NT_PRSTATUS,
1275                 .n = COMPAT_ELF_NGREG,
1276                 .size = sizeof(compat_elf_greg_t),
1277                 .align = sizeof(compat_elf_greg_t),
1278                 .get = compat_gpr_get,
1279                 .set = compat_gpr_set
1280         },
1281         [REGSET_FPR] = {
1282                 .core_note_type = NT_ARM_VFP,
1283                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1284                 .size = sizeof(compat_ulong_t),
1285                 .align = sizeof(compat_ulong_t),
1286                 .get = compat_vfp_get,
1287                 .set = compat_vfp_set
1288         },
1289         [REGSET_TLS] = {
1290                 .core_note_type = NT_ARM_TLS,
1291                 .n = 1,
1292                 .size = sizeof(compat_ulong_t),
1293                 .align = sizeof(compat_ulong_t),
1294                 .get = compat_tls_get,
1295                 .set = compat_tls_set,
1296         },
1297 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1298         [REGSET_HW_BREAK] = {
1299                 .core_note_type = NT_ARM_HW_BREAK,
1300                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1301                 .size = sizeof(u32),
1302                 .align = sizeof(u32),
1303                 .get = hw_break_get,
1304                 .set = hw_break_set,
1305         },
1306         [REGSET_HW_WATCH] = {
1307                 .core_note_type = NT_ARM_HW_WATCH,
1308                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1309                 .size = sizeof(u32),
1310                 .align = sizeof(u32),
1311                 .get = hw_break_get,
1312                 .set = hw_break_set,
1313         },
1314 #endif
1315         [REGSET_SYSTEM_CALL] = {
1316                 .core_note_type = NT_ARM_SYSTEM_CALL,
1317                 .n = 1,
1318                 .size = sizeof(int),
1319                 .align = sizeof(int),
1320                 .get = system_call_get,
1321                 .set = system_call_set,
1322         },
1323 };
1324
1325 static const struct user_regset_view user_aarch32_ptrace_view = {
1326         .name = "aarch32", .e_machine = EM_ARM,
1327         .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1328 };
1329
1330 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1331                                    compat_ulong_t __user *ret)
1332 {
1333         compat_ulong_t tmp;
1334
1335         if (off & 3)
1336                 return -EIO;
1337
1338         if (off == COMPAT_PT_TEXT_ADDR)
1339                 tmp = tsk->mm->start_code;
1340         else if (off == COMPAT_PT_DATA_ADDR)
1341                 tmp = tsk->mm->start_data;
1342         else if (off == COMPAT_PT_TEXT_END_ADDR)
1343                 tmp = tsk->mm->end_code;
1344         else if (off < sizeof(compat_elf_gregset_t))
1345                 return copy_regset_to_user(tsk, &user_aarch32_view,
1346                                            REGSET_COMPAT_GPR, off,
1347                                            sizeof(compat_ulong_t), ret);
1348         else if (off >= COMPAT_USER_SZ)
1349                 return -EIO;
1350         else
1351                 tmp = 0;
1352
1353         return put_user(tmp, ret);
1354 }
1355
1356 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1357                                     compat_ulong_t val)
1358 {
1359         int ret;
1360         mm_segment_t old_fs = get_fs();
1361
1362         if (off & 3 || off >= COMPAT_USER_SZ)
1363                 return -EIO;
1364
1365         if (off >= sizeof(compat_elf_gregset_t))
1366                 return 0;
1367
1368         set_fs(KERNEL_DS);
1369         ret = copy_regset_from_user(tsk, &user_aarch32_view,
1370                                     REGSET_COMPAT_GPR, off,
1371                                     sizeof(compat_ulong_t),
1372                                     &val);
1373         set_fs(old_fs);
1374
1375         return ret;
1376 }
1377
1378 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1379
1380 /*
1381  * Convert a virtual register number into an index for a thread_info
1382  * breakpoint array. Breakpoints are identified using positive numbers
1383  * whilst watchpoints are negative. The registers are laid out as pairs
1384  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1385  * Register 0 is reserved for describing resource information.
1386  */
1387 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1388 {
1389         return (abs(num) - 1) >> 1;
1390 }
1391
1392 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1393 {
1394         u8 num_brps, num_wrps, debug_arch, wp_len;
1395         u32 reg = 0;
1396
1397         num_brps        = hw_breakpoint_slots(TYPE_INST);
1398         num_wrps        = hw_breakpoint_slots(TYPE_DATA);
1399
1400         debug_arch      = debug_monitors_arch();
1401         wp_len          = 8;
1402         reg             |= debug_arch;
1403         reg             <<= 8;
1404         reg             |= wp_len;
1405         reg             <<= 8;
1406         reg             |= num_wrps;
1407         reg             <<= 8;
1408         reg             |= num_brps;
1409
1410         *kdata = reg;
1411         return 0;
1412 }
1413
1414 static int compat_ptrace_hbp_get(unsigned int note_type,
1415                                  struct task_struct *tsk,
1416                                  compat_long_t num,
1417                                  u32 *kdata)
1418 {
1419         u64 addr = 0;
1420         u32 ctrl = 0;
1421
1422         int err, idx = compat_ptrace_hbp_num_to_idx(num);;
1423
1424         if (num & 1) {
1425                 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1426                 *kdata = (u32)addr;
1427         } else {
1428                 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1429                 *kdata = ctrl;
1430         }
1431
1432         return err;
1433 }
1434
1435 static int compat_ptrace_hbp_set(unsigned int note_type,
1436                                  struct task_struct *tsk,
1437                                  compat_long_t num,
1438                                  u32 *kdata)
1439 {
1440         u64 addr;
1441         u32 ctrl;
1442
1443         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1444
1445         if (num & 1) {
1446                 addr = *kdata;
1447                 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1448         } else {
1449                 ctrl = *kdata;
1450                 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1451         }
1452
1453         return err;
1454 }
1455
1456 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1457                                     compat_ulong_t __user *data)
1458 {
1459         int ret;
1460         u32 kdata;
1461         mm_segment_t old_fs = get_fs();
1462
1463         set_fs(KERNEL_DS);
1464         /* Watchpoint */
1465         if (num < 0) {
1466                 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1467         /* Resource info */
1468         } else if (num == 0) {
1469                 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1470         /* Breakpoint */
1471         } else {
1472                 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1473         }
1474         set_fs(old_fs);
1475
1476         if (!ret)
1477                 ret = put_user(kdata, data);
1478
1479         return ret;
1480 }
1481
1482 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1483                                     compat_ulong_t __user *data)
1484 {
1485         int ret;
1486         u32 kdata = 0;
1487         mm_segment_t old_fs = get_fs();
1488
1489         if (num == 0)
1490                 return 0;
1491
1492         ret = get_user(kdata, data);
1493         if (ret)
1494                 return ret;
1495
1496         set_fs(KERNEL_DS);
1497         if (num < 0)
1498                 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1499         else
1500                 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1501         set_fs(old_fs);
1502
1503         return ret;
1504 }
1505 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
1506
1507 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1508                         compat_ulong_t caddr, compat_ulong_t cdata)
1509 {
1510         unsigned long addr = caddr;
1511         unsigned long data = cdata;
1512         void __user *datap = compat_ptr(data);
1513         int ret;
1514
1515         switch (request) {
1516                 case PTRACE_PEEKUSR:
1517                         ret = compat_ptrace_read_user(child, addr, datap);
1518                         break;
1519
1520                 case PTRACE_POKEUSR:
1521                         ret = compat_ptrace_write_user(child, addr, data);
1522                         break;
1523
1524                 case COMPAT_PTRACE_GETREGS:
1525                         ret = copy_regset_to_user(child,
1526                                                   &user_aarch32_view,
1527                                                   REGSET_COMPAT_GPR,
1528                                                   0, sizeof(compat_elf_gregset_t),
1529                                                   datap);
1530                         break;
1531
1532                 case COMPAT_PTRACE_SETREGS:
1533                         ret = copy_regset_from_user(child,
1534                                                     &user_aarch32_view,
1535                                                     REGSET_COMPAT_GPR,
1536                                                     0, sizeof(compat_elf_gregset_t),
1537                                                     datap);
1538                         break;
1539
1540                 case COMPAT_PTRACE_GET_THREAD_AREA:
1541                         ret = put_user((compat_ulong_t)child->thread.tp_value,
1542                                        (compat_ulong_t __user *)datap);
1543                         break;
1544
1545                 case COMPAT_PTRACE_SET_SYSCALL:
1546                         task_pt_regs(child)->syscallno = data;
1547                         ret = 0;
1548                         break;
1549
1550                 case COMPAT_PTRACE_GETVFPREGS:
1551                         ret = copy_regset_to_user(child,
1552                                                   &user_aarch32_view,
1553                                                   REGSET_COMPAT_VFP,
1554                                                   0, VFP_STATE_SIZE,
1555                                                   datap);
1556                         break;
1557
1558                 case COMPAT_PTRACE_SETVFPREGS:
1559                         ret = copy_regset_from_user(child,
1560                                                     &user_aarch32_view,
1561                                                     REGSET_COMPAT_VFP,
1562                                                     0, VFP_STATE_SIZE,
1563                                                     datap);
1564                         break;
1565
1566 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1567                 case COMPAT_PTRACE_GETHBPREGS:
1568                         ret = compat_ptrace_gethbpregs(child, addr, datap);
1569                         break;
1570
1571                 case COMPAT_PTRACE_SETHBPREGS:
1572                         ret = compat_ptrace_sethbpregs(child, addr, datap);
1573                         break;
1574 #endif
1575
1576                 default:
1577                         ret = compat_ptrace_request(child, request, addr,
1578                                                     data);
1579                         break;
1580         }
1581
1582         return ret;
1583 }
1584 #endif /* CONFIG_COMPAT */
1585
1586 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1587 {
1588 #ifdef CONFIG_COMPAT
1589         /*
1590          * Core dumping of 32-bit tasks or compat ptrace requests must use the
1591          * user_aarch32_view compatible with arm32. Native ptrace requests on
1592          * 32-bit children use an extended user_aarch32_ptrace_view to allow
1593          * access to the TLS register.
1594          */
1595         if (is_compat_task())
1596                 return &user_aarch32_view;
1597         else if (is_compat_thread(task_thread_info(task)))
1598                 return &user_aarch32_ptrace_view;
1599 #endif
1600         return &user_aarch64_view;
1601 }
1602
1603 long arch_ptrace(struct task_struct *child, long request,
1604                  unsigned long addr, unsigned long data)
1605 {
1606         return ptrace_request(child, request, addr, data);
1607 }
1608
1609 enum ptrace_syscall_dir {
1610         PTRACE_SYSCALL_ENTER = 0,
1611         PTRACE_SYSCALL_EXIT,
1612 };
1613
1614 static void tracehook_report_syscall(struct pt_regs *regs,
1615                                      enum ptrace_syscall_dir dir)
1616 {
1617         int regno;
1618         unsigned long saved_reg;
1619
1620         /*
1621          * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1622          * used to denote syscall entry/exit:
1623          */
1624         regno = (is_compat_task() ? 12 : 7);
1625         saved_reg = regs->regs[regno];
1626         regs->regs[regno] = dir;
1627
1628         if (dir == PTRACE_SYSCALL_EXIT)
1629                 tracehook_report_syscall_exit(regs, 0);
1630         else if (tracehook_report_syscall_entry(regs))
1631                 forget_syscall(regs);
1632
1633         regs->regs[regno] = saved_reg;
1634 }
1635
1636 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
1637 {
1638         if (test_thread_flag(TIF_SYSCALL_TRACE))
1639                 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1640
1641         /* Do the secure computing after ptrace; failures should be fast. */
1642         if (secure_computing(NULL) == -1)
1643                 return -1;
1644
1645         if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1646                 trace_sys_enter(regs, regs->syscallno);
1647
1648         audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1649                             regs->regs[2], regs->regs[3]);
1650
1651         return regs->syscallno;
1652 }
1653
1654 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
1655 {
1656         audit_syscall_exit(regs);
1657
1658         if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1659                 trace_sys_exit(regs, regs_return_value(regs));
1660
1661         if (test_thread_flag(TIF_SYSCALL_TRACE))
1662                 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1663 }
1664
1665 /*
1666  * Bits which are always architecturally RES0 per ARM DDI 0487A.h
1667  * Userspace cannot use these until they have an architectural meaning.
1668  * We also reserve IL for the kernel; SS is handled dynamically.
1669  */
1670 #define SPSR_EL1_AARCH64_RES0_BITS \
1671         (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
1672          GENMASK_ULL(5, 5))
1673 #define SPSR_EL1_AARCH32_RES0_BITS \
1674         (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
1675
1676 static int valid_compat_regs(struct user_pt_regs *regs)
1677 {
1678         regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1679
1680         if (!system_supports_mixed_endian_el0()) {
1681                 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1682                         regs->pstate |= COMPAT_PSR_E_BIT;
1683                 else
1684                         regs->pstate &= ~COMPAT_PSR_E_BIT;
1685         }
1686
1687         if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1688             (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
1689             (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
1690             (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
1691                 return 1;
1692         }
1693
1694         /*
1695          * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1696          * arch/arm.
1697          */
1698         regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
1699                         COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
1700                         COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
1701                         COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
1702                         COMPAT_PSR_T_BIT;
1703         regs->pstate |= PSR_MODE32_BIT;
1704
1705         return 0;
1706 }
1707
1708 static int valid_native_regs(struct user_pt_regs *regs)
1709 {
1710         regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1711
1712         if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1713             (regs->pstate & PSR_D_BIT) == 0 &&
1714             (regs->pstate & PSR_A_BIT) == 0 &&
1715             (regs->pstate & PSR_I_BIT) == 0 &&
1716             (regs->pstate & PSR_F_BIT) == 0) {
1717                 return 1;
1718         }
1719
1720         /* Force PSR to a valid 64-bit EL0t */
1721         regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1722
1723         return 0;
1724 }
1725
1726 /*
1727  * Are the current registers suitable for user mode? (used to maintain
1728  * security in signal handlers)
1729  */
1730 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1731 {
1732         if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
1733                 regs->pstate &= ~DBG_SPSR_SS;
1734
1735         if (is_compat_thread(task_thread_info(task)))
1736                 return valid_compat_regs(regs);
1737         else
1738                 return valid_native_regs(regs);
1739 }