s390/ftrace: optimize function graph caller code
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Fri, 15 Aug 2014 10:33:46 +0000 (12:33 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 9 Sep 2014 06:53:28 +0000 (08:53 +0200)
When the function graph tracer is disabled we can skip three additional
instructions. So let's just do this.

So if function tracing is enabled but function graph tracing is
runtime disabled, we get away with a single unconditional branch.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/ftrace.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/mcount64.S

index bf246dae1367333c109f7f28a60789c2f97deb84..7b8e456d76c9ee05004b1bbf722394d17aad346e 100644 (file)
@@ -4,6 +4,7 @@
 #ifndef __ASSEMBLY__
 
 extern void _mcount(void);
+extern char ftrace_graph_caller_end;
 
 struct dyn_arch_ftrace { };
 
index 54d6493c4a561b050ad5e47bb21b6a041b6d496f..de55efa5b64e255914c1c9e9c95629eb8680f768 100644 (file)
@@ -170,6 +170,29 @@ out:
  * directly after the instructions. To enable the call we calculate
  * the original offset to prepare_ftrace_return and put it back.
  */
+
+#ifdef CONFIG_64BIT
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       static unsigned short offset = 0x0002;
+
+       return probe_kernel_write((void *) ftrace_graph_caller + 2,
+                                 &offset, sizeof(offset));
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       unsigned short offset;
+
+       offset = ((void *) &ftrace_graph_caller_end -
+                 (void *) ftrace_graph_caller) / 2;
+       return probe_kernel_write((void *) ftrace_graph_caller + 2,
+                                 &offset, sizeof(offset));
+}
+
+#else /* CONFIG_64BIT */
+
 int ftrace_enable_ftrace_graph_caller(void)
 {
        unsigned short offset;
@@ -188,5 +211,6 @@ int ftrace_disable_ftrace_graph_caller(void)
                                  &offset, sizeof(offset));
 }
 
+#endif /* CONFIG_64BIT */
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index c67a8bf0fd9afa48f4d906cffc7cc5e2594ebf07..5b33c83adde9d2ea78ccb8d174e3d85ab56a632b 100644 (file)
@@ -32,14 +32,17 @@ ENTRY(ftrace_caller)
        lg      %r14,0(%r14)
        basr    %r14,%r14
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+# The j instruction gets runtime patched to a nop instruction.
+# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
+#      j       .+4
+ENTRY(ftrace_graph_caller)
+       j       ftrace_graph_caller_end
        lg      %r2,168(%r15)
        lg      %r3,272(%r15)
-ENTRY(ftrace_graph_caller)
-# The bras instruction gets runtime patched to call prepare_ftrace_return.
-# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
-#      bras    %r14,prepare_ftrace_return
-       bras    %r14,0f
-0:     stg     %r2,168(%r15)
+       brasl   %r14,prepare_ftrace_return
+       stg     %r2,168(%r15)
+ftrace_graph_caller_end:
+       .globl  ftrace_graph_caller_end
 #endif
        aghi    %r15,160
        lmg     %r2,%r5,32(%r15)
This page took 0.025985 seconds and 5 git commands to generate.