tick/nohz: Only wake up a single target cpu when kicking a task
authorFrederic Weisbecker <frederic@kernel.org>
Wed, 12 May 2021 23:29:20 +0000 (01:29 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 13 May 2021 12:21:22 +0000 (14:21 +0200)
When adding a tick dependency to a task, its necessary to
wake up the CPU where the task resides to reevaluate tick
dependencies on that CPU.

However the current code wakes up all nohz_full CPUs, which
is unnecessary.

Switch to waking up a single CPU, by using ordering of writes
to task->cpu and task->tick_dep_mask.

[ mingo: Minor readability edit. ]

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210512232924.150322-7-frederic@kernel.org
kernel/time/tick-sched.c

index 89ec0abcd62b3ed8c6c8555eebd1d4a64ee965de..b90ca6635ea4c6775ec71a2c4a0bc6595be9a9b0 100644 (file)
@@ -322,6 +322,31 @@ void tick_nohz_full_kick_cpu(int cpu)
        irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
 }
 
+static void tick_nohz_kick_task(struct task_struct *tsk)
+{
+       int cpu = task_cpu(tsk);
+
+       /*
+        * If the task concurrently migrates to another CPU,
+        * we guarantee it sees the new tick dependency upon
+        * schedule.
+        *
+        *
+        * set_task_cpu(p, cpu);
+        *   STORE p->cpu = @cpu
+        * __schedule() (switch to task 'p')
+        *   LOCK rq->lock
+        *   smp_mb__after_spin_lock()          STORE p->tick_dep_mask
+        *   tick_nohz_task_switch()            smp_mb() (atomic_fetch_or())
+        *      LOAD p->tick_dep_mask           LOAD p->cpu
+        */
+
+       preempt_disable();
+       if (cpu_online(cpu))
+               tick_nohz_full_kick_cpu(cpu);
+       preempt_enable();
+}
+
 /*
  * Kick all full dynticks CPUs in order to force these to re-evaluate
  * their dependency on the tick and restart it if necessary.
@@ -404,19 +429,8 @@ EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
  */
 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
 {
-       if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) {
-               if (tsk == current) {
-                       preempt_disable();
-                       tick_nohz_full_kick();
-                       preempt_enable();
-               } else {
-                       /*
-                        * Some future tick_nohz_full_kick_task()
-                        * should optimize this.
-                        */
-                       tick_nohz_full_kick_all();
-               }
-       }
+       if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask))
+               tick_nohz_kick_task(tsk);
 }
 EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);