cpuidle: Move trace_cpu_idle() into generic code
authorPeter Zijlstra <peterz@infradead.org>
Wed, 12 Aug 2020 10:27:10 +0000 (12:27 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 26 Aug 2020 10:41:54 +0000 (12:41 +0200)
Remove trace_cpu_idle() from the arch_cpu_idle() implementations and
put it in the generic code, right before disabling RCU. Gets rid of
more trace_*_rcuidle() users.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Marco Elver <elver@google.com>
Link: https://lkml.kernel.org/r/20200821085348.428433395@infradead.org
arch/arm/mach-omap2/pm34xx.c
arch/arm64/kernel/process.c
arch/s390/kernel/idle.c
arch/x86/kernel/process.c
kernel/sched/idle.c

index 6df395fff971b08f5b24cc67dec547d710ab9845..f5dfddf492e210979564f29c0cfa80b9f2850f0a 100644 (file)
@@ -298,11 +298,7 @@ static void omap3_pm_idle(void)
        if (omap_irq_pending())
                return;
 
-       trace_cpu_idle_rcuidle(1, smp_processor_id());
-
        omap_sram_idle();
-
-       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 #ifdef CONFIG_SUSPEND
index b63ce4c54cfe9c3804db3814cdb5b26b6aac38e5..f1804496b93508caad6174a635dbe91e98dfeedf 100644 (file)
@@ -123,10 +123,8 @@ void arch_cpu_idle(void)
         * This should do all the clock switching and wait for interrupt
         * tricks
         */
-       trace_cpu_idle_rcuidle(1, smp_processor_id());
        cpu_do_idle();
        local_irq_enable();
-       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index 88bb42ca50084769c155acdcb3c1876fac07a287..c73f50649e7e41092c741635f5eed5f52afbd9e5 100644 (file)
@@ -33,14 +33,13 @@ void enabled_wait(void)
                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
        clear_cpu_flag(CIF_NOHZ_DELAY);
 
-       trace_cpu_idle_rcuidle(1, smp_processor_id());
        local_irq_save(flags);
        /* Call the assembler magic in entry.S */
        psw_idle(idle, psw_mask);
        local_irq_restore(flags);
-       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 
        /* Account time spent with enabled wait psw loaded as idle time. */
+       /* XXX seqcount has tracepoints that require RCU */
        write_seqcount_begin(&idle->seqcount);
        idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
        idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
index 994d8393f2f7b078b7e5f4c0111d06e01ccae994..13ce616cc7afb7af965fde21772978bee8e07a58 100644 (file)
@@ -684,9 +684,7 @@ void arch_cpu_idle(void)
  */
 void __cpuidle default_idle(void)
 {
-       trace_cpu_idle_rcuidle(1, smp_processor_id());
        safe_halt();
-       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
 EXPORT_SYMBOL(default_idle);
@@ -792,7 +790,6 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
 static __cpuidle void mwait_idle(void)
 {
        if (!current_set_polling_and_test()) {
-               trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
                        mb(); /* quirk */
                        clflush((void *)&current_thread_info()->flags);
@@ -804,7 +801,6 @@ static __cpuidle void mwait_idle(void)
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
-               trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();
        }
index ea3a0989dc4cf7e674fbbac83874157f0fd1c386..f324dc36fc43de36949d7a0bce547bd3add7c12b 100644 (file)
@@ -91,11 +91,14 @@ void __cpuidle default_idle_call(void)
        if (current_clr_polling_and_test()) {
                local_irq_enable();
        } else {
+
+               trace_cpu_idle(1, smp_processor_id());
                stop_critical_timings();
                rcu_idle_enter();
                arch_cpu_idle();
                rcu_idle_exit();
                start_critical_timings();
+               trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
        }
 }