ARM: 8678/1: ftrace: Adds support for CONFIG_DYNAMIC_FTRACE_WITH_REGS
authorAbel Vesa <abelvesa@linux.com>
Fri, 26 May 2017 20:49:47 +0000 (21:49 +0100)
committerRussell King <rmk+kernel@armlinux.org.uk>
Sun, 18 Jun 2017 21:25:16 +0000 (22:25 +0100)
The DYNAMIC_FTRACE_WITH_REGS configuration makes it possible for a
ftrace operation to specify if registers need to saved/restored by
the ftrace handler. This is needed by kgraft and possibly other
ftrace-based tools, and the ARM architecture is currently lacking
this feature. It would also be the first step to support the
"Kprobes-on-ftrace" optimization on ARM.

This patch introduces a new ftrace handler that stores the registers
on the stack before calling the next stage. The registers are restored
from the stack before going back to the instrumented function.

A side-effect of this patch is to activate the support for
ftrace_modify_call() as it defines ARCH_SUPPORTS_FTRACE_OPS for the
ARM architecture.

Signed-off-by: Abel Vesa <abelvesa@linux.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
arch/arm/Kconfig
arch/arm/include/asm/ftrace.h
arch/arm/kernel/entry-ftrace.S
arch/arm/kernel/ftrace.c

index 4c1a35f1583872d2ce39db5c1cacce28be48ccb1..730d456e2843d8fa14071867655d9a059dd5467d 100644 (file)
@@ -56,6 +56,7 @@ config ARM
        select HAVE_DMA_API_DEBUG
        select HAVE_DMA_CONTIGUOUS if MMU
        select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
        select HAVE_EXIT_THREAD
        select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
index 22b73112b75f2070e440068184f9655cff781afe..f379881d5cc3feba90ebda6a873fb4fb950a1cdf 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef _ASM_ARM_FTRACE
 #define _ASM_ARM_FTRACE
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
 #ifdef CONFIG_FUNCTION_TRACER
 #define MCOUNT_ADDR            ((unsigned long)(__gnu_mcount_nc))
 #define MCOUNT_INSN_SIZE       4 /* sizeof mcount call */
index c73c4030ca5dd549e3b102d4a77f493e1549e02d..efcd9f25a14bfbc13eeea1108a7d47f73d168604 100644 (file)
 2:     mcount_exit
 .endm
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+
+.macro __ftrace_regs_caller
+
+       sub     sp, sp, #8      @ space for PC and CPSR OLD_R0,
+                               @ OLD_R0 will overwrite previous LR
+
+       add     ip, sp, #12     @ move in IP the value of SP as it was
+                               @ before the push {lr} of the mcount mechanism
+
+       str     lr, [sp, #0]    @ store LR instead of PC
+
+       ldr     lr, [sp, #8]    @ get previous LR
+
+       str     r0, [sp, #8]    @ write r0 as OLD_R0 over previous LR
+
+       stmdb   sp!, {ip, lr}
+       stmdb   sp!, {r0-r11, lr}
+
+       @ stack content at this point:
+       @ 0  4          48   52       56            60   64    68       72
+       @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
+
+       mov r3, sp                              @ struct pt_regs*
+
+       ldr r2, =function_trace_op
+       ldr r2, [r2]                            @ pointer to the current
+                                               @ function tracing op
+
+       ldr     r1, [sp, #S_LR]                 @ lr of instrumented func
+
+       ldr     lr, [sp, #S_PC]                 @ get LR
+
+       mcount_adjust_addr      r0, lr          @ instrumented function
+
+       .globl ftrace_regs_call
+ftrace_regs_call:
+       bl      ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       .globl ftrace_graph_regs_call
+ftrace_graph_regs_call:
+       mov     r0, r0
+#endif
+
+       @ pop saved regs
+       ldmia   sp!, {r0-r12}                   @ restore r0 through r12
+       ldr     ip, [sp, #8]                    @ restore PC
+       ldr     lr, [sp, #4]                    @ restore LR
+       ldr     sp, [sp, #0]                    @ restore SP
+       mov     pc, ip                          @ return
+.endm
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.macro __ftrace_graph_regs_caller
+
+       sub     r0, fp, #4              @ lr of instrumented routine (parent)
+
+       @ called from __ftrace_regs_caller
+       ldr     r1, [sp, #S_PC]         @ instrumented routine (func)
+       mcount_adjust_addr      r1, r1
+
+       mov     r2, fp                  @ frame pointer
+       bl      prepare_ftrace_return
+
+       @ pop registers saved in ftrace_regs_caller
+       ldmia   sp!, {r0-r12}                   @ restore r0 through r12
+       ldr     ip, [sp, #8]                    @ restore PC
+       ldr     lr, [sp, #4]                    @ restore LR
+       ldr     sp, [sp, #0]                    @ restore SP
+       mov     pc, ip                          @ return
+
+.endm
+#endif
+#endif
+
 .macro __ftrace_caller suffix
        mcount_enter
 
        mcount_get_lr   r1                      @ lr of instrumented func
        mcount_adjust_addr      r0, lr          @ instrumented function
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+       ldr r2, =function_trace_op
+       ldr r2, [r2]                            @ pointer to the current
+                                               @ function tracing op
+       mov r3, #0                              @ regs is NULL
+#endif
+
        .globl ftrace_call\suffix
 ftrace_call\suffix:
        bl      ftrace_stub
@@ -212,6 +295,15 @@ UNWIND(.fnstart)
        __ftrace_caller
 UNWIND(.fnend)
 ENDPROC(ftrace_caller)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ENTRY(ftrace_regs_caller)
+UNWIND(.fnstart)
+       __ftrace_regs_caller
+UNWIND(.fnend)
+ENDPROC(ftrace_regs_caller)
+#endif
+
 #endif
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -220,6 +312,14 @@ UNWIND(.fnstart)
        __ftrace_graph_caller
 UNWIND(.fnend)
 ENDPROC(ftrace_graph_caller)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ENTRY(ftrace_graph_regs_caller)
+UNWIND(.fnstart)
+       __ftrace_graph_regs_caller
+UNWIND(.fnend)
+ENDPROC(ftrace_graph_regs_caller)
+#endif
 #endif
 
 .purgem mcount_enter
index 833c991075a18decd0fc64f06d1b188bb5c19f37..5617932a83dfaab416a05e429642b2d19b0a4438 100644 (file)
@@ -141,6 +141,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
 
        ret = ftrace_modify_code(pc, 0, new, false);
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+       if (!ret) {
+               pc = (unsigned long)&ftrace_regs_call;
+               new = ftrace_call_replace(pc, (unsigned long)func);
+
+               ret = ftrace_modify_code(pc, 0, new, false);
+       }
+#endif
+
 #ifdef CONFIG_OLD_MCOUNT
        if (!ret) {
                pc = (unsigned long)&ftrace_call_old;
@@ -159,11 +168,29 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        unsigned long ip = rec->ip;
 
        old = ftrace_nop_replace(rec);
+
+       new = ftrace_call_replace(ip, adjust_address(rec, addr));
+
+       return ftrace_modify_code(rec->ip, old, new, true);
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+                               unsigned long addr)
+{
+       unsigned long new, old;
+       unsigned long ip = rec->ip;
+
+       old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
+
        new = ftrace_call_replace(ip, adjust_address(rec, addr));
 
        return ftrace_modify_code(rec->ip, old, new, true);
 }
 
+#endif
+
 int ftrace_make_nop(struct module *mod,
                    struct dyn_ftrace *rec, unsigned long addr)
 {
@@ -231,6 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 extern unsigned long ftrace_graph_call;
 extern unsigned long ftrace_graph_call_old;
 extern void ftrace_graph_caller_old(void);
+extern unsigned long ftrace_graph_regs_call;
+extern void ftrace_graph_regs_caller(void);
 
 static int __ftrace_modify_caller(unsigned long *callsite,
                                  void (*func) (void), bool enable)
@@ -253,6 +282,14 @@ static int ftrace_modify_graph_caller(bool enable)
                                     ftrace_graph_caller,
                                     enable);
 
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+       if (!ret)
+               ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
+                                    ftrace_graph_regs_caller,
+                                    enable);
+#endif
+
+
 #ifdef CONFIG_OLD_MCOUNT
        if (!ret)
                ret = __ftrace_modify_caller(&ftrace_graph_call_old,