Merge branch 'work.adfs' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[sfrench/cifs-2.6.git] / arch / x86 / kernel / unwind_orc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
3 #include <linux/sort.h>
4 #include <asm/ptrace.h>
5 #include <asm/stacktrace.h>
6 #include <asm/unwind.h>
7 #include <asm/orc_types.h>
8 #include <asm/orc_lookup.h>
9
10 #define orc_warn(fmt, ...) \
11         printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
12
13 extern int __start_orc_unwind_ip[];
14 extern int __stop_orc_unwind_ip[];
15 extern struct orc_entry __start_orc_unwind[];
16 extern struct orc_entry __stop_orc_unwind[];
17
18 static DEFINE_MUTEX(sort_mutex);
19 int *cur_orc_ip_table = __start_orc_unwind_ip;
20 struct orc_entry *cur_orc_table = __start_orc_unwind;
21
22 unsigned int lookup_num_blocks;
23 bool orc_init;
24
25 static inline unsigned long orc_ip(const int *ip)
26 {
27         return (unsigned long)ip + *ip;
28 }
29
30 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
31                                     unsigned int num_entries, unsigned long ip)
32 {
33         int *first = ip_table;
34         int *last = ip_table + num_entries - 1;
35         int *mid = first, *found = first;
36
37         if (!num_entries)
38                 return NULL;
39
40         /*
41          * Do a binary range search to find the rightmost duplicate of a given
42          * starting address.  Some entries are section terminators which are
43          * "weak" entries for ensuring there are no gaps.  They should be
44          * ignored when they conflict with a real entry.
45          */
46         while (first <= last) {
47                 mid = first + ((last - first) / 2);
48
49                 if (orc_ip(mid) <= ip) {
50                         found = mid;
51                         first = mid + 1;
52                 } else
53                         last = mid - 1;
54         }
55
56         return u_table + (found - ip_table);
57 }
58
59 #ifdef CONFIG_MODULES
60 static struct orc_entry *orc_module_find(unsigned long ip)
61 {
62         struct module *mod;
63
64         mod = __module_address(ip);
65         if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
66                 return NULL;
67         return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
68                           mod->arch.num_orcs, ip);
69 }
70 #else
71 static struct orc_entry *orc_module_find(unsigned long ip)
72 {
73         return NULL;
74 }
75 #endif
76
77 #ifdef CONFIG_DYNAMIC_FTRACE
78 static struct orc_entry *orc_find(unsigned long ip);
79
80 /*
81  * Ftrace dynamic trampolines do not have orc entries of their own.
82  * But they are copies of the ftrace entries that are static and
83  * defined in ftrace_*.S, which do have orc entries.
84  *
85  * If the unwinder comes across a ftrace trampoline, then find the
86  * ftrace function that was used to create it, and use that ftrace
87  * function's orc entry, as the placement of the return code in
88  * the stack will be identical.
89  */
90 static struct orc_entry *orc_ftrace_find(unsigned long ip)
91 {
92         struct ftrace_ops *ops;
93         unsigned long caller;
94
95         ops = ftrace_ops_trampoline(ip);
96         if (!ops)
97                 return NULL;
98
99         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
100                 caller = (unsigned long)ftrace_regs_call;
101         else
102                 caller = (unsigned long)ftrace_call;
103
104         /* Prevent unlikely recursion */
105         if (ip == caller)
106                 return NULL;
107
108         return orc_find(caller);
109 }
110 #else
111 static struct orc_entry *orc_ftrace_find(unsigned long ip)
112 {
113         return NULL;
114 }
115 #endif
116
117 /*
118  * If we crash with IP==0, the last successfully executed instruction
119  * was probably an indirect function call with a NULL function pointer,
120  * and we don't have unwind information for NULL.
121  * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
122  * pointer into its parent and then continue normally from there.
123  */
124 static struct orc_entry null_orc_entry = {
125         .sp_offset = sizeof(long),
126         .sp_reg = ORC_REG_SP,
127         .bp_reg = ORC_REG_UNDEFINED,
128         .type = ORC_TYPE_CALL
129 };
130
131 /* Fake frame pointer entry -- used as a fallback for generated code */
132 static struct orc_entry orc_fp_entry = {
133         .type           = ORC_TYPE_CALL,
134         .sp_reg         = ORC_REG_BP,
135         .sp_offset      = 16,
136         .bp_reg         = ORC_REG_PREV_SP,
137         .bp_offset      = -16,
138         .end            = 0,
139 };
140
141 static struct orc_entry *orc_find(unsigned long ip)
142 {
143         static struct orc_entry *orc;
144
145         if (!orc_init)
146                 return NULL;
147
148         if (ip == 0)
149                 return &null_orc_entry;
150
151         /* For non-init vmlinux addresses, use the fast lookup table: */
152         if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
153                 unsigned int idx, start, stop;
154
155                 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
156
157                 if (unlikely((idx >= lookup_num_blocks-1))) {
158                         orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
159                                  idx, lookup_num_blocks, (void *)ip);
160                         return NULL;
161                 }
162
163                 start = orc_lookup[idx];
164                 stop = orc_lookup[idx + 1] + 1;
165
166                 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
167                              (__start_orc_unwind + stop > __stop_orc_unwind))) {
168                         orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
169                                  idx, lookup_num_blocks, start, stop, (void *)ip);
170                         return NULL;
171                 }
172
173                 return __orc_find(__start_orc_unwind_ip + start,
174                                   __start_orc_unwind + start, stop - start, ip);
175         }
176
177         /* vmlinux .init slow lookup: */
178         if (init_kernel_text(ip))
179                 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
180                                   __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
181
182         /* Module lookup: */
183         orc = orc_module_find(ip);
184         if (orc)
185                 return orc;
186
187         return orc_ftrace_find(ip);
188 }
189
190 static void orc_sort_swap(void *_a, void *_b, int size)
191 {
192         struct orc_entry *orc_a, *orc_b;
193         struct orc_entry orc_tmp;
194         int *a = _a, *b = _b, tmp;
195         int delta = _b - _a;
196
197         /* Swap the .orc_unwind_ip entries: */
198         tmp = *a;
199         *a = *b + delta;
200         *b = tmp - delta;
201
202         /* Swap the corresponding .orc_unwind entries: */
203         orc_a = cur_orc_table + (a - cur_orc_ip_table);
204         orc_b = cur_orc_table + (b - cur_orc_ip_table);
205         orc_tmp = *orc_a;
206         *orc_a = *orc_b;
207         *orc_b = orc_tmp;
208 }
209
210 static int orc_sort_cmp(const void *_a, const void *_b)
211 {
212         struct orc_entry *orc_a;
213         const int *a = _a, *b = _b;
214         unsigned long a_val = orc_ip(a);
215         unsigned long b_val = orc_ip(b);
216
217         if (a_val > b_val)
218                 return 1;
219         if (a_val < b_val)
220                 return -1;
221
222         /*
223          * The "weak" section terminator entries need to always be on the left
224          * to ensure the lookup code skips them in favor of real entries.
225          * These terminator entries exist to handle any gaps created by
226          * whitelisted .o files which didn't get objtool generation.
227          */
228         orc_a = cur_orc_table + (a - cur_orc_ip_table);
229         return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
230 }
231
232 #ifdef CONFIG_MODULES
233 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
234                         void *_orc, size_t orc_size)
235 {
236         int *orc_ip = _orc_ip;
237         struct orc_entry *orc = _orc;
238         unsigned int num_entries = orc_ip_size / sizeof(int);
239
240         WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
241                      orc_size % sizeof(*orc) != 0 ||
242                      num_entries != orc_size / sizeof(*orc));
243
244         /*
245          * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
246          * associate an .orc_unwind_ip table entry with its corresponding
247          * .orc_unwind entry so they can both be swapped.
248          */
249         mutex_lock(&sort_mutex);
250         cur_orc_ip_table = orc_ip;
251         cur_orc_table = orc;
252         sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
253         mutex_unlock(&sort_mutex);
254
255         mod->arch.orc_unwind_ip = orc_ip;
256         mod->arch.orc_unwind = orc;
257         mod->arch.num_orcs = num_entries;
258 }
259 #endif
260
261 void __init unwind_init(void)
262 {
263         size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
264         size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
265         size_t num_entries = orc_ip_size / sizeof(int);
266         struct orc_entry *orc;
267         int i;
268
269         if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
270             orc_size % sizeof(struct orc_entry) != 0 ||
271             num_entries != orc_size / sizeof(struct orc_entry)) {
272                 orc_warn("WARNING: Bad or missing .orc_unwind table.  Disabling unwinder.\n");
273                 return;
274         }
275
276         /* Sort the .orc_unwind and .orc_unwind_ip tables: */
277         sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp,
278              orc_sort_swap);
279
280         /* Initialize the fast lookup table: */
281         lookup_num_blocks = orc_lookup_end - orc_lookup;
282         for (i = 0; i < lookup_num_blocks-1; i++) {
283                 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
284                                  num_entries,
285                                  LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
286                 if (!orc) {
287                         orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
288                         return;
289                 }
290
291                 orc_lookup[i] = orc - __start_orc_unwind;
292         }
293
294         /* Initialize the ending block: */
295         orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
296                          LOOKUP_STOP_IP);
297         if (!orc) {
298                 orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
299                 return;
300         }
301         orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
302
303         orc_init = true;
304 }
305
306 unsigned long unwind_get_return_address(struct unwind_state *state)
307 {
308         if (unwind_done(state))
309                 return 0;
310
311         return __kernel_text_address(state->ip) ? state->ip : 0;
312 }
313 EXPORT_SYMBOL_GPL(unwind_get_return_address);
314
315 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
316 {
317         if (unwind_done(state))
318                 return NULL;
319
320         if (state->regs)
321                 return &state->regs->ip;
322
323         if (state->sp)
324                 return (unsigned long *)state->sp - 1;
325
326         return NULL;
327 }
328
329 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
330                             size_t len)
331 {
332         struct stack_info *info = &state->stack_info;
333         void *addr = (void *)_addr;
334
335         if (!on_stack(info, addr, len) &&
336             (get_stack_info(addr, state->task, info, &state->stack_mask)))
337                 return false;
338
339         return true;
340 }
341
342 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
343                             unsigned long *val)
344 {
345         if (!stack_access_ok(state, addr, sizeof(long)))
346                 return false;
347
348         *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
349         return true;
350 }
351
352 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
353                              unsigned long *ip, unsigned long *sp)
354 {
355         struct pt_regs *regs = (struct pt_regs *)addr;
356
357         /* x86-32 support will be more complicated due to the &regs->sp hack */
358         BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
359
360         if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
361                 return false;
362
363         *ip = regs->ip;
364         *sp = regs->sp;
365         return true;
366 }
367
368 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
369                                   unsigned long *ip, unsigned long *sp)
370 {
371         struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
372
373         if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
374                 return false;
375
376         *ip = regs->ip;
377         *sp = regs->sp;
378         return true;
379 }
380
381 bool unwind_next_frame(struct unwind_state *state)
382 {
383         unsigned long ip_p, sp, orig_ip = state->ip, prev_sp = state->sp;
384         enum stack_type prev_type = state->stack_info.type;
385         struct orc_entry *orc;
386         bool indirect = false;
387
388         if (unwind_done(state))
389                 return false;
390
391         /* Don't let modules unload while we're reading their ORC data. */
392         preempt_disable();
393
394         /* End-of-stack check for user tasks: */
395         if (state->regs && user_mode(state->regs))
396                 goto the_end;
397
398         /*
399          * Find the orc_entry associated with the text address.
400          *
401          * Decrement call return addresses by one so they work for sibling
402          * calls and calls to noreturn functions.
403          */
404         orc = orc_find(state->signal ? state->ip : state->ip - 1);
405         if (!orc) {
406                 /*
407                  * As a fallback, try to assume this code uses a frame pointer.
408                  * This is useful for generated code, like BPF, which ORC
409                  * doesn't know about.  This is just a guess, so the rest of
410                  * the unwind is no longer considered reliable.
411                  */
412                 orc = &orc_fp_entry;
413                 state->error = true;
414         }
415
416         /* End-of-stack check for kernel threads: */
417         if (orc->sp_reg == ORC_REG_UNDEFINED) {
418                 if (!orc->end)
419                         goto err;
420
421                 goto the_end;
422         }
423
424         /* Find the previous frame's stack: */
425         switch (orc->sp_reg) {
426         case ORC_REG_SP:
427                 sp = state->sp + orc->sp_offset;
428                 break;
429
430         case ORC_REG_BP:
431                 sp = state->bp + orc->sp_offset;
432                 break;
433
434         case ORC_REG_SP_INDIRECT:
435                 sp = state->sp + orc->sp_offset;
436                 indirect = true;
437                 break;
438
439         case ORC_REG_BP_INDIRECT:
440                 sp = state->bp + orc->sp_offset;
441                 indirect = true;
442                 break;
443
444         case ORC_REG_R10:
445                 if (!state->regs || !state->full_regs) {
446                         orc_warn("missing regs for base reg R10 at ip %pB\n",
447                                  (void *)state->ip);
448                         goto err;
449                 }
450                 sp = state->regs->r10;
451                 break;
452
453         case ORC_REG_R13:
454                 if (!state->regs || !state->full_regs) {
455                         orc_warn("missing regs for base reg R13 at ip %pB\n",
456                                  (void *)state->ip);
457                         goto err;
458                 }
459                 sp = state->regs->r13;
460                 break;
461
462         case ORC_REG_DI:
463                 if (!state->regs || !state->full_regs) {
464                         orc_warn("missing regs for base reg DI at ip %pB\n",
465                                  (void *)state->ip);
466                         goto err;
467                 }
468                 sp = state->regs->di;
469                 break;
470
471         case ORC_REG_DX:
472                 if (!state->regs || !state->full_regs) {
473                         orc_warn("missing regs for base reg DX at ip %pB\n",
474                                  (void *)state->ip);
475                         goto err;
476                 }
477                 sp = state->regs->dx;
478                 break;
479
480         default:
481                 orc_warn("unknown SP base reg %d for ip %pB\n",
482                          orc->sp_reg, (void *)state->ip);
483                 goto err;
484         }
485
486         if (indirect) {
487                 if (!deref_stack_reg(state, sp, &sp))
488                         goto err;
489         }
490
491         /* Find IP, SP and possibly regs: */
492         switch (orc->type) {
493         case ORC_TYPE_CALL:
494                 ip_p = sp - sizeof(long);
495
496                 if (!deref_stack_reg(state, ip_p, &state->ip))
497                         goto err;
498
499                 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
500                                                   state->ip, (void *)ip_p);
501
502                 state->sp = sp;
503                 state->regs = NULL;
504                 state->signal = false;
505                 break;
506
507         case ORC_TYPE_REGS:
508                 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
509                         orc_warn("can't dereference registers at %p for ip %pB\n",
510                                  (void *)sp, (void *)orig_ip);
511                         goto err;
512                 }
513
514                 state->regs = (struct pt_regs *)sp;
515                 state->full_regs = true;
516                 state->signal = true;
517                 break;
518
519         case ORC_TYPE_REGS_IRET:
520                 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
521                         orc_warn("can't dereference iret registers at %p for ip %pB\n",
522                                  (void *)sp, (void *)orig_ip);
523                         goto err;
524                 }
525
526                 state->regs = (void *)sp - IRET_FRAME_OFFSET;
527                 state->full_regs = false;
528                 state->signal = true;
529                 break;
530
531         default:
532                 orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
533                          orc->type, (void *)orig_ip);
534                 break;
535         }
536
537         /* Find BP: */
538         switch (orc->bp_reg) {
539         case ORC_REG_UNDEFINED:
540                 if (state->regs && state->full_regs)
541                         state->bp = state->regs->bp;
542                 break;
543
544         case ORC_REG_PREV_SP:
545                 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
546                         goto err;
547                 break;
548
549         case ORC_REG_BP:
550                 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
551                         goto err;
552                 break;
553
554         default:
555                 orc_warn("unknown BP base reg %d for ip %pB\n",
556                          orc->bp_reg, (void *)orig_ip);
557                 goto err;
558         }
559
560         /* Prevent a recursive loop due to bad ORC data: */
561         if (state->stack_info.type == prev_type &&
562             on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
563             state->sp <= prev_sp) {
564                 orc_warn("stack going in the wrong direction? ip=%pB\n",
565                          (void *)orig_ip);
566                 goto err;
567         }
568
569         preempt_enable();
570         return true;
571
572 err:
573         state->error = true;
574
575 the_end:
576         preempt_enable();
577         state->stack_info.type = STACK_TYPE_UNKNOWN;
578         return false;
579 }
580 EXPORT_SYMBOL_GPL(unwind_next_frame);
581
582 void __unwind_start(struct unwind_state *state, struct task_struct *task,
583                     struct pt_regs *regs, unsigned long *first_frame)
584 {
585         memset(state, 0, sizeof(*state));
586         state->task = task;
587
588         /*
589          * Refuse to unwind the stack of a task while it's executing on another
590          * CPU.  This check is racy, but that's ok: the unwinder has other
591          * checks to prevent it from going off the rails.
592          */
593         if (task_on_another_cpu(task))
594                 goto done;
595
596         if (regs) {
597                 if (user_mode(regs))
598                         goto done;
599
600                 state->ip = regs->ip;
601                 state->sp = regs->sp;
602                 state->bp = regs->bp;
603                 state->regs = regs;
604                 state->full_regs = true;
605                 state->signal = true;
606
607         } else if (task == current) {
608                 asm volatile("lea (%%rip), %0\n\t"
609                              "mov %%rsp, %1\n\t"
610                              "mov %%rbp, %2\n\t"
611                              : "=r" (state->ip), "=r" (state->sp),
612                                "=r" (state->bp));
613
614         } else {
615                 struct inactive_task_frame *frame = (void *)task->thread.sp;
616
617                 state->sp = task->thread.sp;
618                 state->bp = READ_ONCE_NOCHECK(frame->bp);
619                 state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
620         }
621
622         if (get_stack_info((unsigned long *)state->sp, state->task,
623                            &state->stack_info, &state->stack_mask)) {
624                 /*
625                  * We weren't on a valid stack.  It's possible that
626                  * we overflowed a valid stack into a guard page.
627                  * See if the next page up is valid so that we can
628                  * generate some kind of backtrace if this happens.
629                  */
630                 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
631                 if (get_stack_info(next_page, state->task, &state->stack_info,
632                                    &state->stack_mask))
633                         return;
634         }
635
636         /*
637          * The caller can provide the address of the first frame directly
638          * (first_frame) or indirectly (regs->sp) to indicate which stack frame
639          * to start unwinding at.  Skip ahead until we reach it.
640          */
641
642         /* When starting from regs, skip the regs frame: */
643         if (regs) {
644                 unwind_next_frame(state);
645                 return;
646         }
647
648         /* Otherwise, skip ahead to the user-specified starting frame: */
649         while (!unwind_done(state) &&
650                (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
651                         state->sp <= (unsigned long)first_frame))
652                 unwind_next_frame(state);
653
654         return;
655
656 done:
657         state->stack_info.type = STACK_TYPE_UNKNOWN;
658         return;
659 }
660 EXPORT_SYMBOL_GPL(__unwind_start);