2 * arch/arm64/kernel/probes/kprobes.c
4 * Kprobes support for ARM64
6 * Copyright (C) 2013 Linaro Limited.
7 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/kasan.h>
20 #include <linux/kernel.h>
21 #include <linux/kprobes.h>
22 #include <linux/extable.h>
23 #include <linux/slab.h>
24 #include <linux/stop_machine.h>
25 #include <linux/sched/debug.h>
26 #include <linux/set_memory.h>
27 #include <linux/stringify.h>
28 #include <linux/vmalloc.h>
29 #include <asm/traps.h>
30 #include <asm/ptrace.h>
31 #include <asm/cacheflush.h>
32 #include <asm/debug-monitors.h>
33 #include <asm/system_misc.h>
35 #include <linux/uaccess.h>
37 #include <asm/sections.h>
39 #include "decode-insn.h"
41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
45 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
47 static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
55 return aarch64_insn_patch_text(addrs, insns, 1);
58 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
60 /* prepare insn slot */
61 patch_text(p->ainsn.api.insn, p->opcode);
63 flush_icache_range((uintptr_t) (p->ainsn.api.insn),
64 (uintptr_t) (p->ainsn.api.insn) +
65 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
68 * Needs restoring of return address after stepping xol.
70 p->ainsn.api.restore = (unsigned long) p->addr +
71 sizeof(kprobe_opcode_t);
74 static void __kprobes arch_prepare_simulate(struct kprobe *p)
76 /* This instructions is not executed xol. No need to adjust the PC */
77 p->ainsn.api.restore = 0;
80 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
82 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
84 if (p->ainsn.api.handler)
85 p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
87 /* single step simulated, now go for post processing */
88 post_kprobe_handler(kcb, regs);
91 int __kprobes arch_prepare_kprobe(struct kprobe *p)
93 unsigned long probe_addr = (unsigned long)p->addr;
98 /* copy instruction */
99 p->opcode = le32_to_cpu(*p->addr);
101 if (search_exception_tables(probe_addr))
104 /* decode instruction */
105 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
106 case INSN_REJECTED: /* insn not supported */
109 case INSN_GOOD_NO_SLOT: /* insn need simulation */
110 p->ainsn.api.insn = NULL;
113 case INSN_GOOD: /* instruction uses slot */
114 p->ainsn.api.insn = get_insn_slot();
115 if (!p->ainsn.api.insn)
120 /* prepare the instruction */
121 if (p->ainsn.api.insn)
122 arch_prepare_ss_slot(p);
124 arch_prepare_simulate(p);
129 void *alloc_insn_page(void)
133 page = vmalloc_exec(PAGE_SIZE);
135 set_memory_ro((unsigned long)page, 1);
140 /* arm kprobe: install breakpoint in text */
141 void __kprobes arch_arm_kprobe(struct kprobe *p)
143 patch_text(p->addr, BRK64_OPCODE_KPROBES);
146 /* disarm kprobe: remove breakpoint from text */
147 void __kprobes arch_disarm_kprobe(struct kprobe *p)
149 patch_text(p->addr, p->opcode);
152 void __kprobes arch_remove_kprobe(struct kprobe *p)
154 if (p->ainsn.api.insn) {
155 free_insn_slot(p->ainsn.api.insn, 0);
156 p->ainsn.api.insn = NULL;
160 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
162 kcb->prev_kprobe.kp = kprobe_running();
163 kcb->prev_kprobe.status = kcb->kprobe_status;
166 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
168 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
169 kcb->kprobe_status = kcb->prev_kprobe.status;
172 static void __kprobes set_current_kprobe(struct kprobe *p)
174 __this_cpu_write(current_kprobe, p);
178 * When PSTATE.D is set (masked), then software step exceptions can not be
180 * SPSR's D bit shows the value of PSTATE.D immediately before the
181 * exception was taken. PSTATE.D is set while entering into any exception
182 * mode, however software clears it for any normal (none-debug-exception)
183 * mode in the exception entry. Therefore, when we are entering into kprobe
184 * breakpoint handler from any normal mode then SPSR.D bit is already
185 * cleared, however it is set when we are entering from any debug exception
187 * Since we always need to generate single step exception after a kprobe
188 * breakpoint exception therefore we need to clear it unconditionally, when
189 * we become sure that the current breakpoint exception is for kprobe.
191 static void __kprobes
192 spsr_set_debug_flag(struct pt_regs *regs, int mask)
194 unsigned long spsr = regs->pstate;
205 * Interrupts need to be disabled before single-step mode is set, and not
206 * reenabled until after single-step mode ends.
207 * Without disabling interrupt on local CPU, there is a chance of
208 * interrupt occurrence in the period of exception return and start of
209 * out-of-line single-step, that result in wrongly single stepping
210 * into the interrupt handler.
212 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
213 struct pt_regs *regs)
215 kcb->saved_irqflag = regs->pstate;
216 regs->pstate |= PSR_I_BIT;
219 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
220 struct pt_regs *regs)
222 if (kcb->saved_irqflag & PSR_I_BIT)
223 regs->pstate |= PSR_I_BIT;
225 regs->pstate &= ~PSR_I_BIT;
228 static void __kprobes
229 set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
231 kcb->ss_ctx.ss_pending = true;
232 kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
235 static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
237 kcb->ss_ctx.ss_pending = false;
238 kcb->ss_ctx.match_addr = 0;
241 static void __kprobes setup_singlestep(struct kprobe *p,
242 struct pt_regs *regs,
243 struct kprobe_ctlblk *kcb, int reenter)
248 save_previous_kprobe(kcb);
249 set_current_kprobe(p);
250 kcb->kprobe_status = KPROBE_REENTER;
252 kcb->kprobe_status = KPROBE_HIT_SS;
256 if (p->ainsn.api.insn) {
257 /* prepare for single stepping */
258 slot = (unsigned long)p->ainsn.api.insn;
260 set_ss_context(kcb, slot); /* mark pending ss */
262 spsr_set_debug_flag(regs, 0);
264 /* IRQs and single stepping do not mix well. */
265 kprobes_save_local_irqflag(kcb, regs);
266 kernel_enable_single_step(regs);
267 instruction_pointer_set(regs, slot);
269 /* insn simulation */
270 arch_simulate_insn(p, regs);
274 static int __kprobes reenter_kprobe(struct kprobe *p,
275 struct pt_regs *regs,
276 struct kprobe_ctlblk *kcb)
278 switch (kcb->kprobe_status) {
279 case KPROBE_HIT_SSDONE:
280 case KPROBE_HIT_ACTIVE:
281 kprobes_inc_nmissed_count(p);
282 setup_singlestep(p, regs, kcb, 1);
286 pr_warn("Unrecoverable kprobe detected.\n");
298 static void __kprobes
299 post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
301 struct kprobe *cur = kprobe_running();
306 /* return addr restore if non-branching insn */
307 if (cur->ainsn.api.restore != 0)
308 instruction_pointer_set(regs, cur->ainsn.api.restore);
310 /* restore back original saved kprobe variables and continue */
311 if (kcb->kprobe_status == KPROBE_REENTER) {
312 restore_previous_kprobe(kcb);
315 /* call post handler */
316 kcb->kprobe_status = KPROBE_HIT_SSDONE;
317 if (cur->post_handler) {
318 /* post_handler can hit breakpoint and single step
319 * again, so we enable D-flag for recursive exception.
321 cur->post_handler(cur, regs, 0);
324 reset_current_kprobe();
327 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
329 struct kprobe *cur = kprobe_running();
330 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
332 switch (kcb->kprobe_status) {
336 * We are here because the instruction being single
337 * stepped caused a page fault. We reset the current
338 * kprobe and the ip points back to the probe address
339 * and allow the page fault handler to continue as a
342 instruction_pointer_set(regs, (unsigned long) cur->addr);
343 if (!instruction_pointer(regs))
346 kernel_disable_single_step();
348 if (kcb->kprobe_status == KPROBE_REENTER)
349 restore_previous_kprobe(kcb);
351 reset_current_kprobe();
354 case KPROBE_HIT_ACTIVE:
355 case KPROBE_HIT_SSDONE:
357 * We increment the nmissed count for accounting,
358 * we can also use npre/npostfault count for accounting
359 * these specific fault cases.
361 kprobes_inc_nmissed_count(cur);
364 * We come here because instructions in the pre/post
365 * handler caused the page_fault, this could happen
366 * if handler tries to access user space by
367 * copy_from_user(), get_user() etc. Let the
368 * user-specified handler try to fix it first.
370 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
374 * In case the user-specified fault handler returned
375 * zero, try to fix up.
377 if (fixup_exception(regs))
383 static void __kprobes kprobe_handler(struct pt_regs *regs)
385 struct kprobe *p, *cur_kprobe;
386 struct kprobe_ctlblk *kcb;
387 unsigned long addr = instruction_pointer(regs);
389 kcb = get_kprobe_ctlblk();
390 cur_kprobe = kprobe_running();
392 p = get_kprobe((kprobe_opcode_t *) addr);
396 if (reenter_kprobe(p, regs, kcb))
400 set_current_kprobe(p);
401 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
404 * If we have no pre-handler or it returned 0, we
405 * continue with normal processing. If we have a
406 * pre-handler and it returned non-zero, it will
407 * modify the execution path and no need to single
408 * stepping. Let's just reset current kprobe and exit.
410 * pre_handler can hit a breakpoint and can step thru
411 * before return, keep PSTATE D-flag enabled until
412 * pre_handler return back.
414 if (!p->pre_handler || !p->pre_handler(p, regs)) {
415 setup_singlestep(p, regs, kcb, 0);
417 reset_current_kprobe();
421 * The breakpoint instruction was removed right
422 * after we hit it. Another cpu has removed
423 * either a probepoint or a debugger breakpoint
424 * at this address. In either case, no further
425 * handling of this interrupt is appropriate.
426 * Return back to original instruction, and continue.
431 kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
433 if ((kcb->ss_ctx.ss_pending)
434 && (kcb->ss_ctx.match_addr == addr)) {
435 clear_ss_context(kcb); /* clear pending ss */
436 return DBG_HOOK_HANDLED;
438 /* not ours, kprobes should ignore it */
439 return DBG_HOOK_ERROR;
443 kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
445 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
448 /* return error if this is not our step */
449 retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
451 if (retval == DBG_HOOK_HANDLED) {
452 kprobes_restore_local_irqflag(kcb, regs);
453 kernel_disable_single_step();
455 post_kprobe_handler(kcb, regs);
461 static struct step_hook kprobes_step_hook = {
462 .fn = kprobe_single_step_handler,
466 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
468 kprobe_handler(regs);
469 return DBG_HOOK_HANDLED;
472 static struct break_hook kprobes_break_hook = {
473 .imm = BRK64_ESR_KPROBES,
474 .fn = kprobe_breakpoint_handler,
478 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
479 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
481 int __init arch_populate_kprobe_blacklist(void)
485 ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
486 (unsigned long)__entry_text_end);
489 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
490 (unsigned long)__irqentry_text_end);
493 ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
494 (unsigned long)__exception_text_end);
497 ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
498 (unsigned long)__idmap_text_end);
501 ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
502 (unsigned long)__hyp_text_end);
503 if (ret || is_kernel_in_hyp_mode())
505 ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
506 (unsigned long)__hyp_idmap_text_end);
510 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
512 struct kretprobe_instance *ri = NULL;
513 struct hlist_head *head, empty_rp;
514 struct hlist_node *tmp;
515 unsigned long flags, orig_ret_address = 0;
516 unsigned long trampoline_address =
517 (unsigned long)&kretprobe_trampoline;
518 kprobe_opcode_t *correct_ret_addr = NULL;
520 INIT_HLIST_HEAD(&empty_rp);
521 kretprobe_hash_lock(current, &head, &flags);
524 * It is possible to have multiple instances associated with a given
525 * task either because multiple functions in the call path have
526 * return probes installed on them, and/or more than one
527 * return probe was registered for a target function.
529 * We can handle this because:
530 * - instances are always pushed into the head of the list
531 * - when multiple return probes are registered for the same
532 * function, the (chronologically) first instance's ret_addr
533 * will be the real return address, and all the rest will
534 * point to kretprobe_trampoline.
536 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
537 if (ri->task != current)
538 /* another task is sharing our hash bucket */
541 orig_ret_address = (unsigned long)ri->ret_addr;
543 if (orig_ret_address != trampoline_address)
545 * This is the real return address. Any other
546 * instances associated with this task are for
547 * other calls deeper on the call stack
552 kretprobe_assert(ri, orig_ret_address, trampoline_address);
554 correct_ret_addr = ri->ret_addr;
555 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
556 if (ri->task != current)
557 /* another task is sharing our hash bucket */
560 orig_ret_address = (unsigned long)ri->ret_addr;
561 if (ri->rp && ri->rp->handler) {
562 __this_cpu_write(current_kprobe, &ri->rp->kp);
563 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
564 ri->ret_addr = correct_ret_addr;
565 ri->rp->handler(ri, regs);
566 __this_cpu_write(current_kprobe, NULL);
569 recycle_rp_inst(ri, &empty_rp);
571 if (orig_ret_address != trampoline_address)
573 * This is the real return address. Any other
574 * instances associated with this task are for
575 * other calls deeper on the call stack
580 kretprobe_hash_unlock(current, &flags);
582 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
583 hlist_del(&ri->hlist);
586 return (void *)orig_ret_address;
589 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
590 struct pt_regs *regs)
592 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
594 /* replace return addr (x30) with trampoline */
595 regs->regs[30] = (long)&kretprobe_trampoline;
598 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
603 int __init arch_init_kprobes(void)
605 register_kernel_break_hook(&kprobes_break_hook);
606 register_kernel_step_hook(&kprobes_step_hook);