Merge drm/drm-next into drm-intel-next-queued
[sfrench/cifs-2.6.git] / arch / arm / probes / kprobes / core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * arch/arm/kernel/kprobes.c
4  *
5  * Kprobes on ARM
6  *
7  * Abhishek Sagar <sagar.abhishek@gmail.com>
8  * Copyright (C) 2006, 2007 Motorola Inc.
9  *
10  * Nicolas Pitre <nico@marvell.com>
11  * Copyright (C) 2007 Marvell Ltd.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/kprobes.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/stop_machine.h>
19 #include <linux/sched/debug.h>
20 #include <linux/stringify.h>
21 #include <asm/traps.h>
22 #include <asm/opcodes.h>
23 #include <asm/cacheflush.h>
24 #include <linux/percpu.h>
25 #include <linux/bug.h>
26 #include <asm/patch.h>
27 #include <asm/sections.h>
28
29 #include "../decode-arm.h"
30 #include "../decode-thumb.h"
31 #include "core.h"
32
33 #define MIN_STACK_SIZE(addr)                            \
34         min((unsigned long)MAX_STACK_SIZE,              \
35             (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
36
37 #define flush_insns(addr, size)                         \
38         flush_icache_range((unsigned long)(addr),       \
39                            (unsigned long)(addr) +      \
40                            (size))
41
42 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
43 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44
45
46 int __kprobes arch_prepare_kprobe(struct kprobe *p)
47 {
48         kprobe_opcode_t insn;
49         kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
50         unsigned long addr = (unsigned long)p->addr;
51         bool thumb;
52         kprobe_decode_insn_t *decode_insn;
53         const union decode_action *actions;
54         int is;
55         const struct decode_checker **checkers;
56
57 #ifdef CONFIG_THUMB2_KERNEL
58         thumb = true;
59         addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
60         insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
61         if (is_wide_instruction(insn)) {
62                 u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
63                 insn = __opcode_thumb32_compose(insn, inst2);
64                 decode_insn = thumb32_probes_decode_insn;
65                 actions = kprobes_t32_actions;
66                 checkers = kprobes_t32_checkers;
67         } else {
68                 decode_insn = thumb16_probes_decode_insn;
69                 actions = kprobes_t16_actions;
70                 checkers = kprobes_t16_checkers;
71         }
72 #else /* !CONFIG_THUMB2_KERNEL */
73         thumb = false;
74         if (addr & 0x3)
75                 return -EINVAL;
76         insn = __mem_to_opcode_arm(*p->addr);
77         decode_insn = arm_probes_decode_insn;
78         actions = kprobes_arm_actions;
79         checkers = kprobes_arm_checkers;
80 #endif
81
82         p->opcode = insn;
83         p->ainsn.insn = tmp_insn;
84
85         switch ((*decode_insn)(insn, &p->ainsn, true, actions, checkers)) {
86         case INSN_REJECTED:     /* not supported */
87                 return -EINVAL;
88
89         case INSN_GOOD:         /* instruction uses slot */
90                 p->ainsn.insn = get_insn_slot();
91                 if (!p->ainsn.insn)
92                         return -ENOMEM;
93                 for (is = 0; is < MAX_INSN_SIZE; ++is)
94                         p->ainsn.insn[is] = tmp_insn[is];
95                 flush_insns(p->ainsn.insn,
96                                 sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
97                 p->ainsn.insn_fn = (probes_insn_fn_t *)
98                                         ((uintptr_t)p->ainsn.insn | thumb);
99                 break;
100
101         case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */
102                 p->ainsn.insn = NULL;
103                 break;
104         }
105
106         /*
107          * Never instrument insn like 'str r0, [sp, +/-r1]'. Also, insn likes
108          * 'str r0, [sp, #-68]' should also be prohibited.
109          * See __und_svc.
110          */
111         if ((p->ainsn.stack_space < 0) ||
112                         (p->ainsn.stack_space > MAX_STACK_SIZE))
113                 return -EINVAL;
114
115         return 0;
116 }
117
118 void __kprobes arch_arm_kprobe(struct kprobe *p)
119 {
120         unsigned int brkp;
121         void *addr;
122
123         if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
124                 /* Remove any Thumb flag */
125                 addr = (void *)((uintptr_t)p->addr & ~1);
126
127                 if (is_wide_instruction(p->opcode))
128                         brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
129                 else
130                         brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
131         } else {
132                 kprobe_opcode_t insn = p->opcode;
133
134                 addr = p->addr;
135                 brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
136
137                 if (insn >= 0xe0000000)
138                         brkp |= 0xe0000000;  /* Unconditional instruction */
139                 else
140                         brkp |= insn & 0xf0000000;  /* Copy condition from insn */
141         }
142
143         patch_text(addr, brkp);
144 }
145
146 /*
147  * The actual disarming is done here on each CPU and synchronized using
148  * stop_machine. This synchronization is necessary on SMP to avoid removing
149  * a probe between the moment the 'Undefined Instruction' exception is raised
150  * and the moment the exception handler reads the faulting instruction from
151  * memory. It is also needed to atomically set the two half-words of a 32-bit
152  * Thumb breakpoint.
153  */
154 struct patch {
155         void *addr;
156         unsigned int insn;
157 };
158
159 static int __kprobes_remove_breakpoint(void *data)
160 {
161         struct patch *p = data;
162         __patch_text(p->addr, p->insn);
163         return 0;
164 }
165
166 void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn)
167 {
168         struct patch p = {
169                 .addr = addr,
170                 .insn = insn,
171         };
172         stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p,
173                                 cpu_online_mask);
174 }
175
176 void __kprobes arch_disarm_kprobe(struct kprobe *p)
177 {
178         kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1),
179                         p->opcode);
180 }
181
182 void __kprobes arch_remove_kprobe(struct kprobe *p)
183 {
184         if (p->ainsn.insn) {
185                 free_insn_slot(p->ainsn.insn, 0);
186                 p->ainsn.insn = NULL;
187         }
188 }
189
190 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
191 {
192         kcb->prev_kprobe.kp = kprobe_running();
193         kcb->prev_kprobe.status = kcb->kprobe_status;
194 }
195
196 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
197 {
198         __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
199         kcb->kprobe_status = kcb->prev_kprobe.status;
200 }
201
202 static void __kprobes set_current_kprobe(struct kprobe *p)
203 {
204         __this_cpu_write(current_kprobe, p);
205 }
206
207 static void __kprobes
208 singlestep_skip(struct kprobe *p, struct pt_regs *regs)
209 {
210 #ifdef CONFIG_THUMB2_KERNEL
211         regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
212         if (is_wide_instruction(p->opcode))
213                 regs->ARM_pc += 4;
214         else
215                 regs->ARM_pc += 2;
216 #else
217         regs->ARM_pc += 4;
218 #endif
219 }
220
221 static inline void __kprobes
222 singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
223 {
224         p->ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
225 }
226
227 /*
228  * Called with IRQs disabled. IRQs must remain disabled from that point
229  * all the way until processing this kprobe is complete.  The current
230  * kprobes implementation cannot process more than one nested level of
231  * kprobe, and that level is reserved for user kprobe handlers, so we can't
232  * risk encountering a new kprobe in an interrupt handler.
233  */
234 void __kprobes kprobe_handler(struct pt_regs *regs)
235 {
236         struct kprobe *p, *cur;
237         struct kprobe_ctlblk *kcb;
238
239         kcb = get_kprobe_ctlblk();
240         cur = kprobe_running();
241
242 #ifdef CONFIG_THUMB2_KERNEL
243         /*
244          * First look for a probe which was registered using an address with
245          * bit 0 set, this is the usual situation for pointers to Thumb code.
246          * If not found, fallback to looking for one with bit 0 clear.
247          */
248         p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
249         if (!p)
250                 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
251
252 #else /* ! CONFIG_THUMB2_KERNEL */
253         p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
254 #endif
255
256         if (p) {
257                 if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
258                         /*
259                          * Probe hit but conditional execution check failed,
260                          * so just skip the instruction and continue as if
261                          * nothing had happened.
262                          * In this case, we can skip recursing check too.
263                          */
264                         singlestep_skip(p, regs);
265                 } else if (cur) {
266                         /* Kprobe is pending, so we're recursing. */
267                         switch (kcb->kprobe_status) {
268                         case KPROBE_HIT_ACTIVE:
269                         case KPROBE_HIT_SSDONE:
270                         case KPROBE_HIT_SS:
271                                 /* A pre- or post-handler probe got us here. */
272                                 kprobes_inc_nmissed_count(p);
273                                 save_previous_kprobe(kcb);
274                                 set_current_kprobe(p);
275                                 kcb->kprobe_status = KPROBE_REENTER;
276                                 singlestep(p, regs, kcb);
277                                 restore_previous_kprobe(kcb);
278                                 break;
279                         case KPROBE_REENTER:
280                                 /* A nested probe was hit in FIQ, it is a BUG */
281                                 pr_warn("Unrecoverable kprobe detected.\n");
282                                 dump_kprobe(p);
283                                 /* fall through */
284                         default:
285                                 /* impossible cases */
286                                 BUG();
287                         }
288                 } else {
289                         /* Probe hit and conditional execution check ok. */
290                         set_current_kprobe(p);
291                         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
292
293                         /*
294                          * If we have no pre-handler or it returned 0, we
295                          * continue with normal processing. If we have a
296                          * pre-handler and it returned non-zero, it will
297                          * modify the execution path and no need to single
298                          * stepping. Let's just reset current kprobe and exit.
299                          */
300                         if (!p->pre_handler || !p->pre_handler(p, regs)) {
301                                 kcb->kprobe_status = KPROBE_HIT_SS;
302                                 singlestep(p, regs, kcb);
303                                 if (p->post_handler) {
304                                         kcb->kprobe_status = KPROBE_HIT_SSDONE;
305                                         p->post_handler(p, regs, 0);
306                                 }
307                         }
308                         reset_current_kprobe();
309                 }
310         } else {
311                 /*
312                  * The probe was removed and a race is in progress.
313                  * There is nothing we can do about it.  Let's restart
314                  * the instruction.  By the time we can restart, the
315                  * real instruction will be there.
316                  */
317         }
318 }
319
320 static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
321 {
322         unsigned long flags;
323         local_irq_save(flags);
324         kprobe_handler(regs);
325         local_irq_restore(flags);
326         return 0;
327 }
328
329 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
330 {
331         struct kprobe *cur = kprobe_running();
332         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
333
334         switch (kcb->kprobe_status) {
335         case KPROBE_HIT_SS:
336         case KPROBE_REENTER:
337                 /*
338                  * We are here because the instruction being single
339                  * stepped caused a page fault. We reset the current
340                  * kprobe and the PC to point back to the probe address
341                  * and allow the page fault handler to continue as a
342                  * normal page fault.
343                  */
344                 regs->ARM_pc = (long)cur->addr;
345                 if (kcb->kprobe_status == KPROBE_REENTER) {
346                         restore_previous_kprobe(kcb);
347                 } else {
348                         reset_current_kprobe();
349                 }
350                 break;
351
352         case KPROBE_HIT_ACTIVE:
353         case KPROBE_HIT_SSDONE:
354                 /*
355                  * We increment the nmissed count for accounting,
356                  * we can also use npre/npostfault count for accounting
357                  * these specific fault cases.
358                  */
359                 kprobes_inc_nmissed_count(cur);
360
361                 /*
362                  * We come here because instructions in the pre/post
363                  * handler caused the page_fault, this could happen
364                  * if handler tries to access user space by
365                  * copy_from_user(), get_user() etc. Let the
366                  * user-specified handler try to fix it.
367                  */
368                 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
369                         return 1;
370                 break;
371
372         default:
373                 break;
374         }
375
376         return 0;
377 }
378
379 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
380                                        unsigned long val, void *data)
381 {
382         /*
383          * notify_die() is currently never called on ARM,
384          * so this callback is currently empty.
385          */
386         return NOTIFY_DONE;
387 }
388
389 /*
390  * When a retprobed function returns, trampoline_handler() is called,
391  * calling the kretprobe's handler. We construct a struct pt_regs to
392  * give a view of registers r0-r11 to the user return-handler.  This is
393  * not a complete pt_regs structure, but that should be plenty sufficient
394  * for kretprobe handlers which should normally be interested in r0 only
395  * anyway.
396  */
397 void __naked __kprobes kretprobe_trampoline(void)
398 {
399         __asm__ __volatile__ (
400                 "stmdb  sp!, {r0 - r11}         \n\t"
401                 "mov    r0, sp                  \n\t"
402                 "bl     trampoline_handler      \n\t"
403                 "mov    lr, r0                  \n\t"
404                 "ldmia  sp!, {r0 - r11}         \n\t"
405 #ifdef CONFIG_THUMB2_KERNEL
406                 "bx     lr                      \n\t"
407 #else
408                 "mov    pc, lr                  \n\t"
409 #endif
410                 : : : "memory");
411 }
412
413 /* Called from kretprobe_trampoline */
414 static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
415 {
416         struct kretprobe_instance *ri = NULL;
417         struct hlist_head *head, empty_rp;
418         struct hlist_node *tmp;
419         unsigned long flags, orig_ret_address = 0;
420         unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
421         kprobe_opcode_t *correct_ret_addr = NULL;
422
423         INIT_HLIST_HEAD(&empty_rp);
424         kretprobe_hash_lock(current, &head, &flags);
425
426         /*
427          * It is possible to have multiple instances associated with a given
428          * task either because multiple functions in the call path have
429          * a return probe installed on them, and/or more than one return
430          * probe was registered for a target function.
431          *
432          * We can handle this because:
433          *     - instances are always inserted at the head of the list
434          *     - when multiple return probes are registered for the same
435          *       function, the first instance's ret_addr will point to the
436          *       real return address, and all the rest will point to
437          *       kretprobe_trampoline
438          */
439         hlist_for_each_entry_safe(ri, tmp, head, hlist) {
440                 if (ri->task != current)
441                         /* another task is sharing our hash bucket */
442                         continue;
443
444                 orig_ret_address = (unsigned long)ri->ret_addr;
445
446                 if (orig_ret_address != trampoline_address)
447                         /*
448                          * This is the real return address. Any other
449                          * instances associated with this task are for
450                          * other calls deeper on the call stack
451                          */
452                         break;
453         }
454
455         kretprobe_assert(ri, orig_ret_address, trampoline_address);
456
457         correct_ret_addr = ri->ret_addr;
458         hlist_for_each_entry_safe(ri, tmp, head, hlist) {
459                 if (ri->task != current)
460                         /* another task is sharing our hash bucket */
461                         continue;
462
463                 orig_ret_address = (unsigned long)ri->ret_addr;
464                 if (ri->rp && ri->rp->handler) {
465                         __this_cpu_write(current_kprobe, &ri->rp->kp);
466                         get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
467                         ri->ret_addr = correct_ret_addr;
468                         ri->rp->handler(ri, regs);
469                         __this_cpu_write(current_kprobe, NULL);
470                 }
471
472                 recycle_rp_inst(ri, &empty_rp);
473
474                 if (orig_ret_address != trampoline_address)
475                         /*
476                          * This is the real return address. Any other
477                          * instances associated with this task are for
478                          * other calls deeper on the call stack
479                          */
480                         break;
481         }
482
483         kretprobe_hash_unlock(current, &flags);
484
485         hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
486                 hlist_del(&ri->hlist);
487                 kfree(ri);
488         }
489
490         return (void *)orig_ret_address;
491 }
492
493 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
494                                       struct pt_regs *regs)
495 {
496         ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
497
498         /* Replace the return addr with trampoline addr. */
499         regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
500 }
501
502 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
503 {
504         return 0;
505 }
506
507 #ifdef CONFIG_THUMB2_KERNEL
508
509 static struct undef_hook kprobes_thumb16_break_hook = {
510         .instr_mask     = 0xffff,
511         .instr_val      = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION,
512         .cpsr_mask      = MODE_MASK,
513         .cpsr_val       = SVC_MODE,
514         .fn             = kprobe_trap_handler,
515 };
516
517 static struct undef_hook kprobes_thumb32_break_hook = {
518         .instr_mask     = 0xffffffff,
519         .instr_val      = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION,
520         .cpsr_mask      = MODE_MASK,
521         .cpsr_val       = SVC_MODE,
522         .fn             = kprobe_trap_handler,
523 };
524
525 #else  /* !CONFIG_THUMB2_KERNEL */
526
527 static struct undef_hook kprobes_arm_break_hook = {
528         .instr_mask     = 0x0fffffff,
529         .instr_val      = KPROBE_ARM_BREAKPOINT_INSTRUCTION,
530         .cpsr_mask      = MODE_MASK,
531         .cpsr_val       = SVC_MODE,
532         .fn             = kprobe_trap_handler,
533 };
534
535 #endif /* !CONFIG_THUMB2_KERNEL */
536
537 int __init arch_init_kprobes()
538 {
539         arm_probes_decode_init();
540 #ifdef CONFIG_THUMB2_KERNEL
541         register_undef_hook(&kprobes_thumb16_break_hook);
542         register_undef_hook(&kprobes_thumb32_break_hook);
543 #else
544         register_undef_hook(&kprobes_arm_break_hook);
545 #endif
546         return 0;
547 }
548
549 bool arch_within_kprobe_blacklist(unsigned long addr)
550 {
551         void *a = (void *)addr;
552
553         return __in_irqentry_text(addr) ||
554                in_entry_text(addr) ||
555                in_idmap_text(addr) ||
556                memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1);
557 }