1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Kernel Probes Jump Optimization (Optprobes)
5 * Copyright (C) IBM Corporation, 2002, 2004
6 * Copyright (C) Hitachi Ltd., 2012
8 #include <linux/kprobes.h>
9 #include <linux/ptrace.h>
10 #include <linux/string.h>
11 #include <linux/slab.h>
12 #include <linux/hardirq.h>
13 #include <linux/preempt.h>
14 #include <linux/extable.h>
15 #include <linux/kdebug.h>
16 #include <linux/kallsyms.h>
17 #include <linux/ftrace.h>
18 #include <linux/frame.h>
20 #include <asm/text-patching.h>
21 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
24 #include <linux/uaccess.h>
25 #include <asm/alternative.h>
27 #include <asm/debugreg.h>
28 #include <asm/set_memory.h>
29 #include <asm/sections.h>
30 #include <asm/nospec-branch.h>
34 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
36 struct optimized_kprobe *op;
41 for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
42 kp = get_kprobe((void *)addr - i);
43 /* This function only handles jump-optimized kprobe */
44 if (kp && kprobe_optimized(kp)) {
45 op = container_of(kp, struct optimized_kprobe, kp);
46 /* If op->list is not empty, op is under optimizing */
47 if (list_empty(&op->list))
55 * If the kprobe can be optimized, original bytes which can be
56 * overwritten by jump destination address. In this case, original
57 * bytes must be recovered from op->optinsn.copied_insn buffer.
59 if (probe_kernel_read(buf, (void *)addr,
60 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
63 if (addr == (unsigned long)kp->addr) {
65 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
67 offs = addr - (unsigned long)kp->addr - 1;
68 memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
71 return (unsigned long)buf;
74 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
75 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
83 *(unsigned long *)addr = val;
87 ".pushsection .rodata\n"
88 "optprobe_template_func:\n"
89 ".global optprobe_template_entry\n"
90 "optprobe_template_entry:\n"
92 /* We don't bother saving the ss register */
97 ".global optprobe_template_val\n"
98 "optprobe_template_val:\n"
101 ".global optprobe_template_call\n"
102 "optprobe_template_call:\n"
104 /* Move flags to rsp */
105 " movq 144(%rsp), %rdx\n"
106 " movq %rdx, 152(%rsp)\n"
108 /* Skip flags entry */
111 #else /* CONFIG_X86_32 */
115 ".global optprobe_template_val\n"
116 "optprobe_template_val:\n"
118 ".global optprobe_template_call\n"
119 "optprobe_template_call:\n"
122 " addl $4, %esp\n" /* skip cs */
125 ".global optprobe_template_end\n"
126 "optprobe_template_end:\n"
129 void optprobe_template_func(void);
130 STACK_FRAME_NON_STANDARD(optprobe_template_func);
132 #define TMPL_MOVE_IDX \
133 ((long)optprobe_template_val - (long)optprobe_template_entry)
134 #define TMPL_CALL_IDX \
135 ((long)optprobe_template_call - (long)optprobe_template_entry)
136 #define TMPL_END_IDX \
137 ((long)optprobe_template_end - (long)optprobe_template_entry)
139 #define INT3_SIZE sizeof(kprobe_opcode_t)
141 /* Optimized kprobe call back function: called from optinsn */
143 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
145 /* This is possible if op is under delayed unoptimizing */
146 if (kprobe_disabled(&op->kp))
150 if (kprobe_running()) {
151 kprobes_inc_nmissed_count(&op->kp);
153 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
154 /* Save skipped registers */
156 regs->cs = __KERNEL_CS;
158 regs->cs = __KERNEL_CS | get_kernel_rpl();
161 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
162 regs->orig_ax = ~0UL;
164 __this_cpu_write(current_kprobe, &op->kp);
165 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
166 opt_pre_handler(&op->kp, regs);
167 __this_cpu_write(current_kprobe, NULL);
171 NOKPROBE_SYMBOL(optimized_callback);
173 static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
178 while (len < RELATIVEJUMP_SIZE) {
179 ret = __copy_instruction(dest + len, src + len, real + len, &insn);
180 if (!ret || !can_boost(&insn, src + len))
184 /* Check whether the address range is reserved */
185 if (ftrace_text_reserved(src, src + len - 1) ||
186 alternatives_text_reserved(src, src + len - 1) ||
187 jump_label_text_reserved(src, src + len - 1))
193 /* Check whether insn is indirect jump */
194 static int __insn_is_indirect_jump(struct insn *insn)
196 return ((insn->opcode.bytes[0] == 0xff &&
197 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
198 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
201 /* Check whether insn jumps into specified address range */
202 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
204 unsigned long target = 0;
206 switch (insn->opcode.bytes[0]) {
207 case 0xe0: /* loopne */
208 case 0xe1: /* loope */
209 case 0xe2: /* loop */
210 case 0xe3: /* jcxz */
211 case 0xe9: /* near relative jump */
212 case 0xeb: /* short relative jump */
215 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
219 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
223 target = (unsigned long)insn->next_byte + insn->immediate.value;
225 return (start <= target && target <= start + len);
228 static int insn_is_indirect_jump(struct insn *insn)
230 int ret = __insn_is_indirect_jump(insn);
232 #ifdef CONFIG_RETPOLINE
234 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
235 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
236 * older gcc may use indirect jump. So we add this check instead of
237 * replace indirect-jump check.
240 ret = insn_jump_into_range(insn,
241 (unsigned long)__indirect_thunk_start,
242 (unsigned long)__indirect_thunk_end -
243 (unsigned long)__indirect_thunk_start);
248 /* Decode whole function to ensure any instructions don't jump into target */
249 static int can_optimize(unsigned long paddr)
251 unsigned long addr, size = 0, offset = 0;
253 kprobe_opcode_t buf[MAX_INSN_SIZE];
255 /* Lookup symbol including addr */
256 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
260 * Do not optimize in the entry code due to the unstable
261 * stack handling and registers setup.
263 if (((paddr >= (unsigned long)__entry_text_start) &&
264 (paddr < (unsigned long)__entry_text_end)) ||
265 ((paddr >= (unsigned long)__irqentry_text_start) &&
266 (paddr < (unsigned long)__irqentry_text_end)))
269 /* Check there is enough space for a relative jump. */
270 if (size - offset < RELATIVEJUMP_SIZE)
273 /* Decode instructions */
274 addr = paddr - offset;
275 while (addr < paddr - offset + size) { /* Decode until function end */
276 unsigned long recovered_insn;
277 if (search_exception_tables(addr))
279 * Since some fixup code will jumps into this function,
280 * we can't optimize kprobe in this function.
283 recovered_insn = recover_probed_instruction(buf, addr);
286 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
287 insn_get_length(&insn);
288 /* Another subsystem puts a breakpoint */
289 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
291 /* Recover address */
292 insn.kaddr = (void *)addr;
293 insn.next_byte = (void *)(addr + insn.length);
294 /* Check any instructions don't jump into target */
295 if (insn_is_indirect_jump(&insn) ||
296 insn_jump_into_range(&insn, paddr + INT3_SIZE,
305 /* Check optimized_kprobe can actually be optimized. */
306 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
311 for (i = 1; i < op->optinsn.size; i++) {
312 p = get_kprobe(op->kp.addr + i);
313 if (p && !kprobe_disabled(p))
320 /* Check the addr is within the optimized instructions. */
321 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
324 return ((unsigned long)op->kp.addr <= addr &&
325 (unsigned long)op->kp.addr + op->optinsn.size > addr);
328 /* Free optimized instruction slot */
330 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
332 if (op->optinsn.insn) {
333 free_optinsn_slot(op->optinsn.insn, dirty);
334 op->optinsn.insn = NULL;
335 op->optinsn.size = 0;
339 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
341 __arch_remove_optimized_kprobe(op, 1);
345 * Copy replacing target instructions
346 * Target instructions MUST be relocatable (checked inside)
347 * This is called when new aggr(opt)probe is allocated or reused.
349 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
350 struct kprobe *__unused)
352 u8 *buf = NULL, *slot;
356 if (!can_optimize((unsigned long)op->kp.addr))
359 buf = kzalloc(MAX_OPTINSN_SIZE, GFP_KERNEL);
363 op->optinsn.insn = slot = get_optinsn_slot();
370 * Verify if the address gap is in 2GB range, because this uses
373 rel = (long)slot - (long)op->kp.addr + RELATIVEJUMP_SIZE;
374 if (abs(rel) > 0x7fffffff) {
379 /* Copy arch-dep-instance from template */
380 memcpy(buf, optprobe_template_entry, TMPL_END_IDX);
382 /* Copy instructions into the out-of-line buffer */
383 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr,
384 slot + TMPL_END_IDX);
387 op->optinsn.size = ret;
388 len = TMPL_END_IDX + op->optinsn.size;
390 /* Set probe information */
391 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
393 /* Set probe function call */
394 synthesize_relcall(buf + TMPL_CALL_IDX,
395 slot + TMPL_CALL_IDX, optimized_callback);
397 /* Set returning jmp instruction at the tail of out-of-line buffer */
398 synthesize_reljump(buf + len, slot + len,
399 (u8 *)op->kp.addr + op->optinsn.size);
400 len += RELATIVEJUMP_SIZE;
402 /* We have to use text_poke for instuction buffer because it is RO */
403 text_poke(slot, buf, len);
410 __arch_remove_optimized_kprobe(op, 0);
415 * Replace breakpoints (int3) with relative jumps.
416 * Caller must call with locking kprobe_mutex and text_mutex.
418 void arch_optimize_kprobes(struct list_head *oplist)
420 struct optimized_kprobe *op, *tmp;
421 u8 insn_buf[RELATIVEJUMP_SIZE];
423 list_for_each_entry_safe(op, tmp, oplist, list) {
424 s32 rel = (s32)((long)op->optinsn.insn -
425 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
427 WARN_ON(kprobe_disabled(&op->kp));
429 /* Backup instructions which will be replaced by jump address */
430 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
433 insn_buf[0] = RELATIVEJUMP_OPCODE;
434 *(s32 *)(&insn_buf[1]) = rel;
436 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
439 list_del_init(&op->list);
443 /* Replace a relative jump with a breakpoint (int3). */
444 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
446 u8 insn_buf[RELATIVEJUMP_SIZE];
448 /* Set int3 to first byte for kprobes */
449 insn_buf[0] = BREAKPOINT_INSTRUCTION;
450 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
451 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
456 * Recover original instructions and breakpoints from relative jumps.
457 * Caller must call with locking kprobe_mutex.
459 extern void arch_unoptimize_kprobes(struct list_head *oplist,
460 struct list_head *done_list)
462 struct optimized_kprobe *op, *tmp;
464 list_for_each_entry_safe(op, tmp, oplist, list) {
465 arch_unoptimize_kprobe(op);
466 list_move(&op->list, done_list);
470 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
472 struct optimized_kprobe *op;
474 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
475 /* This kprobe is really able to run optimized path. */
476 op = container_of(p, struct optimized_kprobe, kp);
477 /* Detour through copied instructions */
478 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
480 reset_current_kprobe();
485 NOKPROBE_SYMBOL(setup_detour_execution);