1 // SPDX-License-Identifier: GPL-2.0
3 * Author: Hanlu Li <lihanlu@loongson.cn>
4 * Huacai Chen <chenhuacai@loongson.cn>
6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
9 * Copyright (C) 1992 Ross Biro
10 * Copyright (C) Linus Torvalds
11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12 * Copyright (C) 1996 David S. Miller
13 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
14 * Copyright (C) 1999 MIPS Technologies, Inc.
15 * Copyright (C) 2000 Ulf Carlsson
17 #include <linux/kernel.h>
18 #include <linux/audit.h>
19 #include <linux/compiler.h>
20 #include <linux/context_tracking.h>
21 #include <linux/elf.h>
22 #include <linux/errno.h>
23 #include <linux/hw_breakpoint.h>
25 #include <linux/nospec.h>
26 #include <linux/ptrace.h>
27 #include <linux/regset.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/security.h>
31 #include <linux/smp.h>
32 #include <linux/stddef.h>
33 #include <linux/seccomp.h>
34 #include <linux/thread_info.h>
35 #include <linux/uaccess.h>
37 #include <asm/byteorder.h>
39 #include <asm/cpu-info.h>
41 #include <asm/loongarch.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
47 #include <asm/syscall.h>
49 static void init_fp_ctx(struct task_struct *target)
51 /* The target already has context */
52 if (tsk_used_math(target))
55 /* Begin with data registers set to all 1s... */
56 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
57 set_stopped_child_used_math(target);
61 * Called by kernel/ptrace.c when detaching..
63 * Make sure single step bits etc are not set.
65 void ptrace_disable(struct task_struct *child)
67 /* Don't load the watchpoint registers for the ex-child. */
68 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
69 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
72 /* regset get/set implementations */
74 static int gpr_get(struct task_struct *target,
75 const struct user_regset *regset,
79 struct pt_regs *regs = task_pt_regs(target);
81 r = membuf_write(&to, ®s->regs, sizeof(u64) * GPR_NUM);
82 r = membuf_write(&to, ®s->orig_a0, sizeof(u64));
83 r = membuf_write(&to, ®s->csr_era, sizeof(u64));
84 r = membuf_write(&to, ®s->csr_badvaddr, sizeof(u64));
89 static int gpr_set(struct task_struct *target,
90 const struct user_regset *regset,
91 unsigned int pos, unsigned int count,
92 const void *kbuf, const void __user *ubuf)
95 int a0_start = sizeof(u64) * GPR_NUM;
96 int era_start = a0_start + sizeof(u64);
97 int badvaddr_start = era_start + sizeof(u64);
98 struct pt_regs *regs = task_pt_regs(target);
100 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
103 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
105 a0_start, a0_start + sizeof(u64));
106 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
108 era_start, era_start + sizeof(u64));
109 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
111 badvaddr_start, badvaddr_start + sizeof(u64));
118 * Get the general floating-point registers.
120 static int gfpr_get(struct task_struct *target, struct membuf *to)
122 return membuf_write(to, &target->thread.fpu.fpr,
123 sizeof(elf_fpreg_t) * NUM_FPU_REGS);
126 static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
131 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
132 for (i = 0; i < NUM_FPU_REGS; i++) {
133 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
134 r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
141 * Choose the appropriate helper for general registers, and then copy
142 * the FCC and FCSR registers separately.
144 static int fpr_get(struct task_struct *target,
145 const struct user_regset *regset,
150 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
151 r = gfpr_get(target, &to);
153 r = gfpr_get_simd(target, &to);
155 r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
156 r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
161 static int gfpr_set(struct task_struct *target,
162 unsigned int *pos, unsigned int *count,
163 const void **kbuf, const void __user **ubuf)
165 return user_regset_copyin(pos, count, kbuf, ubuf,
166 &target->thread.fpu.fpr,
167 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
170 static int gfpr_set_simd(struct task_struct *target,
171 unsigned int *pos, unsigned int *count,
172 const void **kbuf, const void __user **ubuf)
177 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
178 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
179 err = user_regset_copyin(pos, count, kbuf, ubuf,
180 &fpr_val, i * sizeof(elf_fpreg_t),
181 (i + 1) * sizeof(elf_fpreg_t));
184 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
191 * Choose the appropriate helper for general registers, and then copy
192 * the FCC register separately.
194 static int fpr_set(struct task_struct *target,
195 const struct user_regset *regset,
196 unsigned int pos, unsigned int count,
197 const void *kbuf, const void __user *ubuf)
199 const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
200 const int fcsr_start = fcc_start + sizeof(u64);
203 BUG_ON(count % sizeof(elf_fpreg_t));
204 if (pos + count > sizeof(elf_fpregset_t))
209 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
210 err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
212 err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
216 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
217 &target->thread.fpu.fcc, fcc_start,
218 fcc_start + sizeof(u64));
219 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
220 &target->thread.fpu.fcsr, fcsr_start,
221 fcsr_start + sizeof(u32));
226 static int cfg_get(struct task_struct *target,
227 const struct user_regset *regset,
234 while (to.left > 0) {
235 cfg_val = read_cpucfg(i++);
236 r = membuf_write(&to, &cfg_val, sizeof(u32));
243 * CFG registers are read-only.
245 static int cfg_set(struct task_struct *target,
246 const struct user_regset *regset,
247 unsigned int pos, unsigned int count,
248 const void *kbuf, const void __user *ubuf)
253 #ifdef CONFIG_HAVE_HW_BREAKPOINT
256 * Handle hitting a HW-breakpoint.
258 static void ptrace_hbptriggered(struct perf_event *bp,
259 struct perf_sample_data *data,
260 struct pt_regs *regs)
263 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
265 for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
266 if (current->thread.hbp_break[i] == bp)
269 for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
270 if (current->thread.hbp_watch[i] == bp)
273 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
276 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
277 struct task_struct *tsk,
280 struct perf_event *bp;
283 case NT_LOONGARCH_HW_BREAK:
284 if (idx >= LOONGARCH_MAX_BRP)
285 return ERR_PTR(-EINVAL);
286 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
287 bp = tsk->thread.hbp_break[idx];
289 case NT_LOONGARCH_HW_WATCH:
290 if (idx >= LOONGARCH_MAX_WRP)
291 return ERR_PTR(-EINVAL);
292 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
293 bp = tsk->thread.hbp_watch[idx];
300 static int ptrace_hbp_set_event(unsigned int note_type,
301 struct task_struct *tsk,
303 struct perf_event *bp)
306 case NT_LOONGARCH_HW_BREAK:
307 if (idx >= LOONGARCH_MAX_BRP)
309 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
310 tsk->thread.hbp_break[idx] = bp;
312 case NT_LOONGARCH_HW_WATCH:
313 if (idx >= LOONGARCH_MAX_WRP)
315 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
316 tsk->thread.hbp_watch[idx] = bp;
323 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
324 struct task_struct *tsk,
328 struct perf_event *bp;
329 struct perf_event_attr attr;
332 case NT_LOONGARCH_HW_BREAK:
333 type = HW_BREAKPOINT_X;
335 case NT_LOONGARCH_HW_WATCH:
336 type = HW_BREAKPOINT_RW;
339 return ERR_PTR(-EINVAL);
342 ptrace_breakpoint_init(&attr);
345 * Initialise fields to sane defaults
346 * (i.e. values that will pass validation).
349 attr.bp_len = HW_BREAKPOINT_LEN_4;
353 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
357 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
364 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
365 struct arch_hw_breakpoint_ctrl ctrl,
366 struct perf_event_attr *attr)
368 int err, len, type, offset;
370 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
375 case NT_LOONGARCH_HW_BREAK:
376 if ((type & HW_BREAKPOINT_X) != type)
379 case NT_LOONGARCH_HW_WATCH:
380 if ((type & HW_BREAKPOINT_RW) != type)
388 attr->bp_type = type;
389 attr->bp_addr += offset;
394 static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
400 case NT_LOONGARCH_HW_BREAK:
401 num = hw_breakpoint_slots(TYPE_INST);
403 case NT_LOONGARCH_HW_WATCH:
404 num = hw_breakpoint_slots(TYPE_DATA);
415 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
416 struct task_struct *tsk,
419 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
422 bp = ptrace_hbp_create(note_type, tsk, idx);
427 static int ptrace_hbp_get_ctrl(unsigned int note_type,
428 struct task_struct *tsk,
429 unsigned long idx, u32 *ctrl)
431 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
436 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
441 static int ptrace_hbp_get_mask(unsigned int note_type,
442 struct task_struct *tsk,
443 unsigned long idx, u64 *mask)
445 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
450 *mask = bp ? counter_arch_bp(bp)->mask : 0;
455 static int ptrace_hbp_get_addr(unsigned int note_type,
456 struct task_struct *tsk,
457 unsigned long idx, u64 *addr)
459 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
464 *addr = bp ? counter_arch_bp(bp)->address : 0;
469 static int ptrace_hbp_set_ctrl(unsigned int note_type,
470 struct task_struct *tsk,
471 unsigned long idx, u32 uctrl)
474 struct perf_event *bp;
475 struct perf_event_attr attr;
476 struct arch_hw_breakpoint_ctrl ctrl;
478 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
483 decode_ctrl_reg(uctrl, &ctrl);
484 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
488 return modify_user_hw_breakpoint(bp, &attr);
491 static int ptrace_hbp_set_mask(unsigned int note_type,
492 struct task_struct *tsk,
493 unsigned long idx, u64 mask)
495 struct perf_event *bp;
496 struct perf_event_attr attr;
497 struct arch_hw_breakpoint *info;
499 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
504 info = counter_arch_bp(bp);
507 return modify_user_hw_breakpoint(bp, &attr);
510 static int ptrace_hbp_set_addr(unsigned int note_type,
511 struct task_struct *tsk,
512 unsigned long idx, u64 addr)
514 struct perf_event *bp;
515 struct perf_event_attr attr;
517 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
524 return modify_user_hw_breakpoint(bp, &attr);
527 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
528 #define PTRACE_HBP_MASK_SZ sizeof(u64)
529 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
530 #define PTRACE_HBP_PAD_SZ sizeof(u32)
532 static int hw_break_get(struct task_struct *target,
533 const struct user_regset *regset,
540 unsigned int note_type = regset->core_note_type;
543 ret = ptrace_hbp_get_resource_info(note_type, &info);
547 membuf_write(&to, &info, sizeof(info));
549 /* (address, mask, ctrl) registers */
551 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
555 ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
559 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
563 membuf_store(&to, addr);
564 membuf_store(&to, mask);
565 membuf_store(&to, ctrl);
566 membuf_zero(&to, sizeof(u32));
573 static int hw_break_set(struct task_struct *target,
574 const struct user_regset *regset,
575 unsigned int pos, unsigned int count,
576 const void *kbuf, const void __user *ubuf)
580 int ret, idx = 0, offset, limit;
581 unsigned int note_type = regset->core_note_type;
584 offset = offsetof(struct user_watch_state, dbg_regs);
585 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
587 /* (address, mask, ctrl) registers */
588 limit = regset->n * regset->size;
589 while (count && offset < limit) {
590 if (count < PTRACE_HBP_ADDR_SZ)
593 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
594 offset, offset + PTRACE_HBP_ADDR_SZ);
598 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
601 offset += PTRACE_HBP_ADDR_SZ;
606 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
607 offset, offset + PTRACE_HBP_MASK_SZ);
611 ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
614 offset += PTRACE_HBP_MASK_SZ;
616 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
617 offset, offset + PTRACE_HBP_CTRL_SZ);
621 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
624 offset += PTRACE_HBP_CTRL_SZ;
626 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
627 offset, offset + PTRACE_HBP_PAD_SZ);
628 offset += PTRACE_HBP_PAD_SZ;
638 struct pt_regs_offset {
643 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
644 #define REG_OFFSET_END {.name = NULL, .offset = 0}
646 static const struct pt_regs_offset regoffset_table[] = {
647 REG_OFFSET_NAME(r0, regs[0]),
648 REG_OFFSET_NAME(r1, regs[1]),
649 REG_OFFSET_NAME(r2, regs[2]),
650 REG_OFFSET_NAME(r3, regs[3]),
651 REG_OFFSET_NAME(r4, regs[4]),
652 REG_OFFSET_NAME(r5, regs[5]),
653 REG_OFFSET_NAME(r6, regs[6]),
654 REG_OFFSET_NAME(r7, regs[7]),
655 REG_OFFSET_NAME(r8, regs[8]),
656 REG_OFFSET_NAME(r9, regs[9]),
657 REG_OFFSET_NAME(r10, regs[10]),
658 REG_OFFSET_NAME(r11, regs[11]),
659 REG_OFFSET_NAME(r12, regs[12]),
660 REG_OFFSET_NAME(r13, regs[13]),
661 REG_OFFSET_NAME(r14, regs[14]),
662 REG_OFFSET_NAME(r15, regs[15]),
663 REG_OFFSET_NAME(r16, regs[16]),
664 REG_OFFSET_NAME(r17, regs[17]),
665 REG_OFFSET_NAME(r18, regs[18]),
666 REG_OFFSET_NAME(r19, regs[19]),
667 REG_OFFSET_NAME(r20, regs[20]),
668 REG_OFFSET_NAME(r21, regs[21]),
669 REG_OFFSET_NAME(r22, regs[22]),
670 REG_OFFSET_NAME(r23, regs[23]),
671 REG_OFFSET_NAME(r24, regs[24]),
672 REG_OFFSET_NAME(r25, regs[25]),
673 REG_OFFSET_NAME(r26, regs[26]),
674 REG_OFFSET_NAME(r27, regs[27]),
675 REG_OFFSET_NAME(r28, regs[28]),
676 REG_OFFSET_NAME(r29, regs[29]),
677 REG_OFFSET_NAME(r30, regs[30]),
678 REG_OFFSET_NAME(r31, regs[31]),
679 REG_OFFSET_NAME(orig_a0, orig_a0),
680 REG_OFFSET_NAME(csr_era, csr_era),
681 REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
682 REG_OFFSET_NAME(csr_crmd, csr_crmd),
683 REG_OFFSET_NAME(csr_prmd, csr_prmd),
684 REG_OFFSET_NAME(csr_euen, csr_euen),
685 REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
686 REG_OFFSET_NAME(csr_estat, csr_estat),
691 * regs_query_register_offset() - query register offset from its name
692 * @name: the name of a register
694 * regs_query_register_offset() returns the offset of a register in struct
695 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
697 int regs_query_register_offset(const char *name)
699 const struct pt_regs_offset *roff;
701 for (roff = regoffset_table; roff->name != NULL; roff++)
702 if (!strcmp(roff->name, name))
707 enum loongarch_regset {
711 #ifdef CONFIG_HAVE_HW_BREAKPOINT
717 static const struct user_regset loongarch64_regsets[] = {
719 .core_note_type = NT_PRSTATUS,
721 .size = sizeof(elf_greg_t),
722 .align = sizeof(elf_greg_t),
723 .regset_get = gpr_get,
727 .core_note_type = NT_PRFPREG,
729 .size = sizeof(elf_fpreg_t),
730 .align = sizeof(elf_fpreg_t),
731 .regset_get = fpr_get,
735 .core_note_type = NT_LOONGARCH_CPUCFG,
738 .align = sizeof(u32),
739 .regset_get = cfg_get,
742 #ifdef CONFIG_HAVE_HW_BREAKPOINT
743 [REGSET_HW_BREAK] = {
744 .core_note_type = NT_LOONGARCH_HW_BREAK,
745 .n = sizeof(struct user_watch_state) / sizeof(u32),
747 .align = sizeof(u32),
748 .regset_get = hw_break_get,
751 [REGSET_HW_WATCH] = {
752 .core_note_type = NT_LOONGARCH_HW_WATCH,
753 .n = sizeof(struct user_watch_state) / sizeof(u32),
755 .align = sizeof(u32),
756 .regset_get = hw_break_get,
762 static const struct user_regset_view user_loongarch64_view = {
763 .name = "loongarch64",
764 .e_machine = ELF_ARCH,
765 .regsets = loongarch64_regsets,
766 .n = ARRAY_SIZE(loongarch64_regsets),
770 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
772 return &user_loongarch64_view;
775 static inline int read_user(struct task_struct *target, unsigned long addr,
776 unsigned long __user *data)
778 unsigned long tmp = 0;
782 tmp = task_pt_regs(target)->regs[addr];
785 tmp = task_pt_regs(target)->orig_a0;
788 tmp = task_pt_regs(target)->csr_era;
791 tmp = task_pt_regs(target)->csr_badvaddr;
797 return put_user(tmp, data);
800 static inline int write_user(struct task_struct *target, unsigned long addr,
805 task_pt_regs(target)->regs[addr] = data;
808 task_pt_regs(target)->orig_a0 = data;
811 task_pt_regs(target)->csr_era = data;
814 task_pt_regs(target)->csr_badvaddr = data;
823 long arch_ptrace(struct task_struct *child, long request,
824 unsigned long addr, unsigned long data)
827 unsigned long __user *datap = (void __user *) data;
831 ret = read_user(child, addr, datap);
835 ret = write_user(child, addr, data);
839 ret = ptrace_request(child, request, addr, data);
846 #ifdef CONFIG_HAVE_HW_BREAKPOINT
847 static void ptrace_triggered(struct perf_event *bp,
848 struct perf_sample_data *data, struct pt_regs *regs)
850 struct perf_event_attr attr;
853 attr.disabled = true;
854 modify_user_hw_breakpoint(bp, &attr);
857 static int set_single_step(struct task_struct *tsk, unsigned long addr)
859 struct perf_event *bp;
860 struct perf_event_attr attr;
861 struct arch_hw_breakpoint *info;
862 struct thread_struct *thread = &tsk->thread;
864 bp = thread->hbp_break[0];
866 ptrace_breakpoint_init(&attr);
869 attr.bp_len = HW_BREAKPOINT_LEN_8;
870 attr.bp_type = HW_BREAKPOINT_X;
872 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
877 thread->hbp_break[0] = bp;
884 /* Reenable breakpoint */
885 attr.disabled = false;
886 err = modify_user_hw_breakpoint(bp, &attr);
890 csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
892 info = counter_arch_bp(bp);
893 info->mask = TASK_SIZE - 1;
899 void user_enable_single_step(struct task_struct *task)
901 struct thread_info *ti = task_thread_info(task);
903 set_single_step(task, task_pt_regs(task)->csr_era);
904 task->thread.single_step = task_pt_regs(task)->csr_era;
905 set_ti_thread_flag(ti, TIF_SINGLESTEP);
908 void user_disable_single_step(struct task_struct *task)
910 clear_tsk_thread_flag(task, TIF_SINGLESTEP);