sched: don't allow rt_runtime_us to be zero for groups having rt tasks
[sfrench/cifs-2.6.git] / kernel / sched.c
index dcd553cc4ee89b52ec511f70361c4f7fd586d976..52b98675acb2b04d51bb9130ba6f195a5a200dad 100644 (file)
@@ -4422,7 +4422,7 @@ int task_nice(const struct task_struct *p)
 {
        return TASK_NICE(p);
 }
-EXPORT_SYMBOL_GPL(task_nice);
+EXPORT_SYMBOL(task_nice);
 
 /**
  * idle_cpu - is a given cpu idle currently?
@@ -5100,7 +5100,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
        time_slice = 0;
        if (p->policy == SCHED_RR) {
                time_slice = DEF_TIMESLICE;
-       } else {
+       } else if (p->policy != SCHED_FIFO) {
                struct sched_entity *se = &p->se;
                unsigned long flags;
                struct rq *rq;
@@ -7625,6 +7625,11 @@ void sched_move_task(struct task_struct *tsk)
 
        set_task_rq(tsk, task_cpu(tsk));
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       if (tsk->sched_class->moved_group)
+               tsk->sched_class->moved_group(tsk);
+#endif
+
        if (on_rq) {
                if (unlikely(running))
                        tsk->sched_class->set_curr_task(rq);
@@ -7721,9 +7726,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
        if (runtime == RUNTIME_INF)
                return 1ULL << 16;
 
-       runtime *= (1ULL << 16);
-       div64_64(runtime, period);
-       return runtime;
+       return div64_64(runtime << 16, period);
 }
 
 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
@@ -7747,25 +7750,40 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
        return total + to_ratio(period, runtime) < global_ratio;
 }
 
+/* Must be called with tasklist_lock held */
+static inline int tg_has_rt_tasks(struct task_group *tg)
+{
+       struct task_struct *g, *p;
+       do_each_thread(g, p) {
+               if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
+                       return 1;
+       } while_each_thread(g, p);
+       return 0;
+}
+
 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
 {
        u64 rt_runtime, rt_period;
        int err = 0;
 
-       rt_period = sysctl_sched_rt_period * NSEC_PER_USEC;
+       rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
        rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
        if (rt_runtime_us == -1)
-               rt_runtime = rt_period;
+               rt_runtime = RUNTIME_INF;
 
        mutex_lock(&rt_constraints_mutex);
+       read_lock(&tasklist_lock);
+       if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
+               err = -EBUSY;
+               goto unlock;
+       }
        if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
                err = -EINVAL;
                goto unlock;
        }
-       if (rt_runtime_us == -1)
-               rt_runtime = RUNTIME_INF;
        tg->rt_runtime = rt_runtime;
  unlock:
+       read_unlock(&tasklist_lock);
        mutex_unlock(&rt_constraints_mutex);
 
        return err;