sched: move around resched_task()
authorIngo Molnar <mingo@elte.hu>
Mon, 9 Jul 2007 16:51:59 +0000 (18:51 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 9 Jul 2007 16:51:59 +0000 (18:51 +0200)
move resched_task()/resched_cpu() into the 'public interfaces'
section of sched.c, for use by kernel/sched_fair/rt/idletask.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/sched.c

index 53c0ee742f690ea15407cf21ff7087fbcb1b6328..e642bfa61fe3c220de7c0e3f5f890f20082454de 100644 (file)
@@ -617,6 +617,58 @@ static inline struct rq *this_rq_lock(void)
        return rq;
 }
 
+/*
+ * resched_task - mark a task 'to be rescheduled now'.
+ *
+ * On UP this means the setting of the need_resched flag, on SMP it
+ * might also involve a cross-CPU call to trigger the scheduler on
+ * the target CPU.
+ */
+#ifdef CONFIG_SMP
+
+#ifndef tsk_is_polling
+#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
+#endif
+
+static void resched_task(struct task_struct *p)
+{
+       int cpu;
+
+       assert_spin_locked(&task_rq(p)->lock);
+
+       if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
+               return;
+
+       set_tsk_thread_flag(p, TIF_NEED_RESCHED);
+
+       cpu = task_cpu(p);
+       if (cpu == smp_processor_id())
+               return;
+
+       /* NEED_RESCHED must be visible before we test polling */
+       smp_mb();
+       if (!tsk_is_polling(p))
+               smp_send_reschedule(cpu);
+}
+
+static void resched_cpu(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags;
+
+       if (!spin_trylock_irqsave(&rq->lock, flags))
+               return;
+       resched_task(cpu_curr(cpu));
+       spin_unlock_irqrestore(&rq->lock, flags);
+}
+#else
+static inline void resched_task(struct task_struct *p)
+{
+       assert_spin_locked(&task_rq(p)->lock);
+       set_tsk_need_resched(p);
+}
+#endif
+
 #include "sched_stats.h"
 
 /*
@@ -953,58 +1005,6 @@ static void deactivate_task(struct task_struct *p, struct rq *rq)
        p->array = NULL;
 }
 
-/*
- * resched_task - mark a task 'to be rescheduled now'.
- *
- * On UP this means the setting of the need_resched flag, on SMP it
- * might also involve a cross-CPU call to trigger the scheduler on
- * the target CPU.
- */
-#ifdef CONFIG_SMP
-
-#ifndef tsk_is_polling
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-#endif
-
-static void resched_task(struct task_struct *p)
-{
-       int cpu;
-
-       assert_spin_locked(&task_rq(p)->lock);
-
-       if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
-               return;
-
-       set_tsk_thread_flag(p, TIF_NEED_RESCHED);
-
-       cpu = task_cpu(p);
-       if (cpu == smp_processor_id())
-               return;
-
-       /* NEED_RESCHED must be visible before we test polling */
-       smp_mb();
-       if (!tsk_is_polling(p))
-               smp_send_reschedule(cpu);
-}
-
-static void resched_cpu(int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long flags;
-
-       if (!spin_trylock_irqsave(&rq->lock, flags))
-               return;
-       resched_task(cpu_curr(cpu));
-       spin_unlock_irqrestore(&rq->lock, flags);
-}
-#else
-static inline void resched_task(struct task_struct *p)
-{
-       assert_spin_locked(&task_rq(p)->lock);
-       set_tsk_need_resched(p);
-}
-#endif
-
 /**
  * task_curr - is this task currently executing on a CPU?
  * @p: the task in question.