Merge branch 'for-5.4/hidraw-hiddev-epoll' into for-linus
[sfrench/cifs-2.6.git] / arch / x86 / kernel / perf_regs.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/sched/task_stack.h>
6 #include <linux/perf_event.h>
7 #include <linux/bug.h>
8 #include <linux/stddef.h>
9 #include <asm/perf_regs.h>
10 #include <asm/ptrace.h>
11
12 #ifdef CONFIG_X86_32
13 #define PERF_REG_X86_MAX PERF_REG_X86_32_MAX
14 #else
15 #define PERF_REG_X86_MAX PERF_REG_X86_64_MAX
16 #endif
17
18 #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
19
20 static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = {
21         PT_REGS_OFFSET(PERF_REG_X86_AX, ax),
22         PT_REGS_OFFSET(PERF_REG_X86_BX, bx),
23         PT_REGS_OFFSET(PERF_REG_X86_CX, cx),
24         PT_REGS_OFFSET(PERF_REG_X86_DX, dx),
25         PT_REGS_OFFSET(PERF_REG_X86_SI, si),
26         PT_REGS_OFFSET(PERF_REG_X86_DI, di),
27         PT_REGS_OFFSET(PERF_REG_X86_BP, bp),
28         PT_REGS_OFFSET(PERF_REG_X86_SP, sp),
29         PT_REGS_OFFSET(PERF_REG_X86_IP, ip),
30         PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags),
31         PT_REGS_OFFSET(PERF_REG_X86_CS, cs),
32         PT_REGS_OFFSET(PERF_REG_X86_SS, ss),
33 #ifdef CONFIG_X86_32
34         PT_REGS_OFFSET(PERF_REG_X86_DS, ds),
35         PT_REGS_OFFSET(PERF_REG_X86_ES, es),
36         PT_REGS_OFFSET(PERF_REG_X86_FS, fs),
37         PT_REGS_OFFSET(PERF_REG_X86_GS, gs),
38 #else
39         /*
40          * The pt_regs struct does not store
41          * ds, es, fs, gs in 64 bit mode.
42          */
43         (unsigned int) -1,
44         (unsigned int) -1,
45         (unsigned int) -1,
46         (unsigned int) -1,
47 #endif
48 #ifdef CONFIG_X86_64
49         PT_REGS_OFFSET(PERF_REG_X86_R8, r8),
50         PT_REGS_OFFSET(PERF_REG_X86_R9, r9),
51         PT_REGS_OFFSET(PERF_REG_X86_R10, r10),
52         PT_REGS_OFFSET(PERF_REG_X86_R11, r11),
53         PT_REGS_OFFSET(PERF_REG_X86_R12, r12),
54         PT_REGS_OFFSET(PERF_REG_X86_R13, r13),
55         PT_REGS_OFFSET(PERF_REG_X86_R14, r14),
56         PT_REGS_OFFSET(PERF_REG_X86_R15, r15),
57 #endif
58 };
59
60 u64 perf_reg_value(struct pt_regs *regs, int idx)
61 {
62         struct x86_perf_regs *perf_regs;
63
64         if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) {
65                 perf_regs = container_of(regs, struct x86_perf_regs, regs);
66                 if (!perf_regs->xmm_regs)
67                         return 0;
68                 return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0];
69         }
70
71         if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset)))
72                 return 0;
73
74         return regs_get_register(regs, pt_regs_offset[idx]);
75 }
76
77 #define PERF_REG_X86_RESERVED   (((1ULL << PERF_REG_X86_XMM0) - 1) & \
78                                  ~((1ULL << PERF_REG_X86_MAX) - 1))
79
80 #ifdef CONFIG_X86_32
81 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
82                        (1ULL << PERF_REG_X86_R9) | \
83                        (1ULL << PERF_REG_X86_R10) | \
84                        (1ULL << PERF_REG_X86_R11) | \
85                        (1ULL << PERF_REG_X86_R12) | \
86                        (1ULL << PERF_REG_X86_R13) | \
87                        (1ULL << PERF_REG_X86_R14) | \
88                        (1ULL << PERF_REG_X86_R15))
89
90 int perf_reg_validate(u64 mask)
91 {
92         if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
93                 return -EINVAL;
94
95         return 0;
96 }
97
98 u64 perf_reg_abi(struct task_struct *task)
99 {
100         return PERF_SAMPLE_REGS_ABI_32;
101 }
102
103 void perf_get_regs_user(struct perf_regs *regs_user,
104                         struct pt_regs *regs,
105                         struct pt_regs *regs_user_copy)
106 {
107         regs_user->regs = task_pt_regs(current);
108         regs_user->abi = perf_reg_abi(current);
109 }
110 #else /* CONFIG_X86_64 */
111 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
112                        (1ULL << PERF_REG_X86_ES) | \
113                        (1ULL << PERF_REG_X86_FS) | \
114                        (1ULL << PERF_REG_X86_GS))
115
116 int perf_reg_validate(u64 mask)
117 {
118         if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
119                 return -EINVAL;
120
121         return 0;
122 }
123
124 u64 perf_reg_abi(struct task_struct *task)
125 {
126         if (test_tsk_thread_flag(task, TIF_IA32))
127                 return PERF_SAMPLE_REGS_ABI_32;
128         else
129                 return PERF_SAMPLE_REGS_ABI_64;
130 }
131
132 void perf_get_regs_user(struct perf_regs *regs_user,
133                         struct pt_regs *regs,
134                         struct pt_regs *regs_user_copy)
135 {
136         struct pt_regs *user_regs = task_pt_regs(current);
137
138         /*
139          * If we're in an NMI that interrupted task_pt_regs setup, then
140          * we can't sample user regs at all.  This check isn't really
141          * sufficient, though, as we could be in an NMI inside an interrupt
142          * that happened during task_pt_regs setup.
143          */
144         if (regs->sp > (unsigned long)&user_regs->r11 &&
145             regs->sp <= (unsigned long)(user_regs + 1)) {
146                 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
147                 regs_user->regs = NULL;
148                 return;
149         }
150
151         /*
152          * These registers are always saved on 64-bit syscall entry.
153          * On 32-bit entry points, they are saved too except r8..r11.
154          */
155         regs_user_copy->ip = user_regs->ip;
156         regs_user_copy->ax = user_regs->ax;
157         regs_user_copy->cx = user_regs->cx;
158         regs_user_copy->dx = user_regs->dx;
159         regs_user_copy->si = user_regs->si;
160         regs_user_copy->di = user_regs->di;
161         regs_user_copy->r8 = user_regs->r8;
162         regs_user_copy->r9 = user_regs->r9;
163         regs_user_copy->r10 = user_regs->r10;
164         regs_user_copy->r11 = user_regs->r11;
165         regs_user_copy->orig_ax = user_regs->orig_ax;
166         regs_user_copy->flags = user_regs->flags;
167         regs_user_copy->sp = user_regs->sp;
168         regs_user_copy->cs = user_regs->cs;
169         regs_user_copy->ss = user_regs->ss;
170         /*
171          * Store user space frame-pointer value on sample
172          * to facilitate stack unwinding for cases when
173          * user space executable code has such support
174          * enabled at compile time:
175          */
176         regs_user_copy->bp = user_regs->bp;
177
178         regs_user_copy->bx = -1;
179         regs_user_copy->r12 = -1;
180         regs_user_copy->r13 = -1;
181         regs_user_copy->r14 = -1;
182         regs_user_copy->r15 = -1;
183         /*
184          * For this to be at all useful, we need a reasonable guess for
185          * the ABI.  Be careful: we're in NMI context, and we're
186          * considering current to be the current task, so we should
187          * be careful not to look at any other percpu variables that might
188          * change during context switches.
189          */
190         regs_user->abi = user_64bit_mode(user_regs) ?
191                 PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
192
193         regs_user->regs = regs_user_copy;
194 }
195 #endif /* CONFIG_X86_32 */