sched: fix sched_rt.c:join/leave_domain
[sfrench/cifs-2.6.git] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         cpu_set(rq->cpu, rq->rd->rto_mask);
16         /*
17          * Make sure the mask is visible before we set
18          * the overload count. That is checked to determine
19          * if we should look at the mask. It would be a shame
20          * if we looked at the mask, but the mask was not
21          * updated yet.
22          */
23         wmb();
24         atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29         /* the order here really doesn't matter */
30         atomic_dec(&rq->rd->rto_count);
31         cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37                 rt_set_overload(rq);
38                 rq->rt.overloaded = 1;
39         } else {
40                 rt_clear_overload(rq);
41                 rq->rt.overloaded = 0;
42         }
43 }
44 #endif /* CONFIG_SMP */
45
46 /*
47  * Update the current task's runtime statistics. Skip current tasks that
48  * are not in our scheduling class.
49  */
50 static void update_curr_rt(struct rq *rq)
51 {
52         struct task_struct *curr = rq->curr;
53         u64 delta_exec;
54
55         if (!task_has_rt_policy(curr))
56                 return;
57
58         delta_exec = rq->clock - curr->se.exec_start;
59         if (unlikely((s64)delta_exec < 0))
60                 delta_exec = 0;
61
62         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
63
64         curr->se.sum_exec_runtime += delta_exec;
65         curr->se.exec_start = rq->clock;
66         cpuacct_charge(curr, delta_exec);
67 }
68
69 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
70 {
71         WARN_ON(!rt_task(p));
72         rq->rt.rt_nr_running++;
73 #ifdef CONFIG_SMP
74         if (p->prio < rq->rt.highest_prio)
75                 rq->rt.highest_prio = p->prio;
76         if (p->nr_cpus_allowed > 1)
77                 rq->rt.rt_nr_migratory++;
78
79         update_rt_migration(rq);
80 #endif /* CONFIG_SMP */
81 }
82
83 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
84 {
85         WARN_ON(!rt_task(p));
86         WARN_ON(!rq->rt.rt_nr_running);
87         rq->rt.rt_nr_running--;
88 #ifdef CONFIG_SMP
89         if (rq->rt.rt_nr_running) {
90                 struct rt_prio_array *array;
91
92                 WARN_ON(p->prio < rq->rt.highest_prio);
93                 if (p->prio == rq->rt.highest_prio) {
94                         /* recalculate */
95                         array = &rq->rt.active;
96                         rq->rt.highest_prio =
97                                 sched_find_first_bit(array->bitmap);
98                 } /* otherwise leave rq->highest prio alone */
99         } else
100                 rq->rt.highest_prio = MAX_RT_PRIO;
101         if (p->nr_cpus_allowed > 1)
102                 rq->rt.rt_nr_migratory--;
103
104         update_rt_migration(rq);
105 #endif /* CONFIG_SMP */
106 }
107
108 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
109 {
110         struct rt_prio_array *array = &rq->rt.active;
111
112         list_add_tail(&p->run_list, array->queue + p->prio);
113         __set_bit(p->prio, array->bitmap);
114         inc_cpu_load(rq, p->se.load.weight);
115
116         inc_rt_tasks(p, rq);
117 }
118
119 /*
120  * Adding/removing a task to/from a priority array:
121  */
122 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
123 {
124         struct rt_prio_array *array = &rq->rt.active;
125
126         update_curr_rt(rq);
127
128         list_del(&p->run_list);
129         if (list_empty(array->queue + p->prio))
130                 __clear_bit(p->prio, array->bitmap);
131         dec_cpu_load(rq, p->se.load.weight);
132
133         dec_rt_tasks(p, rq);
134 }
135
136 /*
137  * Put task to the end of the run list without the overhead of dequeue
138  * followed by enqueue.
139  */
140 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
141 {
142         struct rt_prio_array *array = &rq->rt.active;
143
144         list_move_tail(&p->run_list, array->queue + p->prio);
145 }
146
147 static void
148 yield_task_rt(struct rq *rq)
149 {
150         requeue_task_rt(rq, rq->curr);
151 }
152
153 #ifdef CONFIG_SMP
154 static int find_lowest_rq(struct task_struct *task);
155
156 static int select_task_rq_rt(struct task_struct *p, int sync)
157 {
158         struct rq *rq = task_rq(p);
159
160         /*
161          * If the current task is an RT task, then
162          * try to see if we can wake this RT task up on another
163          * runqueue. Otherwise simply start this RT task
164          * on its current runqueue.
165          *
166          * We want to avoid overloading runqueues. Even if
167          * the RT task is of higher priority than the current RT task.
168          * RT tasks behave differently than other tasks. If
169          * one gets preempted, we try to push it off to another queue.
170          * So trying to keep a preempting RT task on the same
171          * cache hot CPU will force the running RT task to
172          * a cold CPU. So we waste all the cache for the lower
173          * RT task in hopes of saving some of a RT task
174          * that is just being woken and probably will have
175          * cold cache anyway.
176          */
177         if (unlikely(rt_task(rq->curr)) &&
178             (p->nr_cpus_allowed > 1)) {
179                 int cpu = find_lowest_rq(p);
180
181                 return (cpu == -1) ? task_cpu(p) : cpu;
182         }
183
184         /*
185          * Otherwise, just let it ride on the affined RQ and the
186          * post-schedule router will push the preempted task away
187          */
188         return task_cpu(p);
189 }
190 #endif /* CONFIG_SMP */
191
192 /*
193  * Preempt the current task with a newly woken task if needed:
194  */
195 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
196 {
197         if (p->prio < rq->curr->prio)
198                 resched_task(rq->curr);
199 }
200
201 static struct task_struct *pick_next_task_rt(struct rq *rq)
202 {
203         struct rt_prio_array *array = &rq->rt.active;
204         struct task_struct *next;
205         struct list_head *queue;
206         int idx;
207
208         idx = sched_find_first_bit(array->bitmap);
209         if (idx >= MAX_RT_PRIO)
210                 return NULL;
211
212         queue = array->queue + idx;
213         next = list_entry(queue->next, struct task_struct, run_list);
214
215         next->se.exec_start = rq->clock;
216
217         return next;
218 }
219
220 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
221 {
222         update_curr_rt(rq);
223         p->se.exec_start = 0;
224 }
225
226 #ifdef CONFIG_SMP
227 /* Only try algorithms three times */
228 #define RT_MAX_TRIES 3
229
230 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
231 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
232
233 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
234 {
235         if (!task_running(rq, p) &&
236             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
237             (p->nr_cpus_allowed > 1))
238                 return 1;
239         return 0;
240 }
241
242 /* Return the second highest RT task, NULL otherwise */
243 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
244 {
245         struct rt_prio_array *array = &rq->rt.active;
246         struct task_struct *next;
247         struct list_head *queue;
248         int idx;
249
250         if (likely(rq->rt.rt_nr_running < 2))
251                 return NULL;
252
253         idx = sched_find_first_bit(array->bitmap);
254         if (unlikely(idx >= MAX_RT_PRIO)) {
255                 WARN_ON(1); /* rt_nr_running is bad */
256                 return NULL;
257         }
258
259         queue = array->queue + idx;
260         BUG_ON(list_empty(queue));
261
262         next = list_entry(queue->next, struct task_struct, run_list);
263         if (unlikely(pick_rt_task(rq, next, cpu)))
264                 goto out;
265
266         if (queue->next->next != queue) {
267                 /* same prio task */
268                 next = list_entry(queue->next->next, struct task_struct,
269                                   run_list);
270                 if (pick_rt_task(rq, next, cpu))
271                         goto out;
272         }
273
274  retry:
275         /* slower, but more flexible */
276         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
277         if (unlikely(idx >= MAX_RT_PRIO))
278                 return NULL;
279
280         queue = array->queue + idx;
281         BUG_ON(list_empty(queue));
282
283         list_for_each_entry(next, queue, run_list) {
284                 if (pick_rt_task(rq, next, cpu))
285                         goto out;
286         }
287
288         goto retry;
289
290  out:
291         return next;
292 }
293
294 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
295
296 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
297 {
298         int       lowest_prio = -1;
299         int       lowest_cpu  = -1;
300         int       count       = 0;
301         int       cpu;
302
303         cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
304
305         /*
306          * Scan each rq for the lowest prio.
307          */
308         for_each_cpu_mask(cpu, *lowest_mask) {
309                 struct rq *rq = cpu_rq(cpu);
310
311                 /* We look for lowest RT prio or non-rt CPU */
312                 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
313                         /*
314                          * if we already found a low RT queue
315                          * and now we found this non-rt queue
316                          * clear the mask and set our bit.
317                          * Otherwise just return the queue as is
318                          * and the count==1 will cause the algorithm
319                          * to use the first bit found.
320                          */
321                         if (lowest_cpu != -1) {
322                                 cpus_clear(*lowest_mask);
323                                 cpu_set(rq->cpu, *lowest_mask);
324                         }
325                         return 1;
326                 }
327
328                 /* no locking for now */
329                 if ((rq->rt.highest_prio > task->prio)
330                     && (rq->rt.highest_prio >= lowest_prio)) {
331                         if (rq->rt.highest_prio > lowest_prio) {
332                                 /* new low - clear old data */
333                                 lowest_prio = rq->rt.highest_prio;
334                                 lowest_cpu = cpu;
335                                 count = 0;
336                         }
337                         count++;
338                 } else
339                         cpu_clear(cpu, *lowest_mask);
340         }
341
342         /*
343          * Clear out all the set bits that represent
344          * runqueues that were of higher prio than
345          * the lowest_prio.
346          */
347         if (lowest_cpu > 0) {
348                 /*
349                  * Perhaps we could add another cpumask op to
350                  * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
351                  * Then that could be optimized to use memset and such.
352                  */
353                 for_each_cpu_mask(cpu, *lowest_mask) {
354                         if (cpu >= lowest_cpu)
355                                 break;
356                         cpu_clear(cpu, *lowest_mask);
357                 }
358         }
359
360         return count;
361 }
362
363 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
364 {
365         int first;
366
367         /* "this_cpu" is cheaper to preempt than a remote processor */
368         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
369                 return this_cpu;
370
371         first = first_cpu(*mask);
372         if (first != NR_CPUS)
373                 return first;
374
375         return -1;
376 }
377
378 static int find_lowest_rq(struct task_struct *task)
379 {
380         struct sched_domain *sd;
381         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
382         int this_cpu = smp_processor_id();
383         int cpu      = task_cpu(task);
384         int count    = find_lowest_cpus(task, lowest_mask);
385
386         if (!count)
387                 return -1; /* No targets found */
388
389         /*
390          * There is no sense in performing an optimal search if only one
391          * target is found.
392          */
393         if (count == 1)
394                 return first_cpu(*lowest_mask);
395
396         /*
397          * At this point we have built a mask of cpus representing the
398          * lowest priority tasks in the system.  Now we want to elect
399          * the best one based on our affinity and topology.
400          *
401          * We prioritize the last cpu that the task executed on since
402          * it is most likely cache-hot in that location.
403          */
404         if (cpu_isset(cpu, *lowest_mask))
405                 return cpu;
406
407         /*
408          * Otherwise, we consult the sched_domains span maps to figure
409          * out which cpu is logically closest to our hot cache data.
410          */
411         if (this_cpu == cpu)
412                 this_cpu = -1; /* Skip this_cpu opt if the same */
413
414         for_each_domain(cpu, sd) {
415                 if (sd->flags & SD_WAKE_AFFINE) {
416                         cpumask_t domain_mask;
417                         int       best_cpu;
418
419                         cpus_and(domain_mask, sd->span, *lowest_mask);
420
421                         best_cpu = pick_optimal_cpu(this_cpu,
422                                                     &domain_mask);
423                         if (best_cpu != -1)
424                                 return best_cpu;
425                 }
426         }
427
428         /*
429          * And finally, if there were no matches within the domains
430          * just give the caller *something* to work with from the compatible
431          * locations.
432          */
433         return pick_optimal_cpu(this_cpu, lowest_mask);
434 }
435
436 /* Will lock the rq it finds */
437 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
438 {
439         struct rq *lowest_rq = NULL;
440         int tries;
441         int cpu;
442
443         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
444                 cpu = find_lowest_rq(task);
445
446                 if ((cpu == -1) || (cpu == rq->cpu))
447                         break;
448
449                 lowest_rq = cpu_rq(cpu);
450
451                 /* if the prio of this runqueue changed, try again */
452                 if (double_lock_balance(rq, lowest_rq)) {
453                         /*
454                          * We had to unlock the run queue. In
455                          * the mean time, task could have
456                          * migrated already or had its affinity changed.
457                          * Also make sure that it wasn't scheduled on its rq.
458                          */
459                         if (unlikely(task_rq(task) != rq ||
460                                      !cpu_isset(lowest_rq->cpu,
461                                                 task->cpus_allowed) ||
462                                      task_running(rq, task) ||
463                                      !task->se.on_rq)) {
464
465                                 spin_unlock(&lowest_rq->lock);
466                                 lowest_rq = NULL;
467                                 break;
468                         }
469                 }
470
471                 /* If this rq is still suitable use it. */
472                 if (lowest_rq->rt.highest_prio > task->prio)
473                         break;
474
475                 /* try again */
476                 spin_unlock(&lowest_rq->lock);
477                 lowest_rq = NULL;
478         }
479
480         return lowest_rq;
481 }
482
483 /*
484  * If the current CPU has more than one RT task, see if the non
485  * running task can migrate over to a CPU that is running a task
486  * of lesser priority.
487  */
488 static int push_rt_task(struct rq *rq)
489 {
490         struct task_struct *next_task;
491         struct rq *lowest_rq;
492         int ret = 0;
493         int paranoid = RT_MAX_TRIES;
494
495         if (!rq->rt.overloaded)
496                 return 0;
497
498         next_task = pick_next_highest_task_rt(rq, -1);
499         if (!next_task)
500                 return 0;
501
502  retry:
503         if (unlikely(next_task == rq->curr)) {
504                 WARN_ON(1);
505                 return 0;
506         }
507
508         /*
509          * It's possible that the next_task slipped in of
510          * higher priority than current. If that's the case
511          * just reschedule current.
512          */
513         if (unlikely(next_task->prio < rq->curr->prio)) {
514                 resched_task(rq->curr);
515                 return 0;
516         }
517
518         /* We might release rq lock */
519         get_task_struct(next_task);
520
521         /* find_lock_lowest_rq locks the rq if found */
522         lowest_rq = find_lock_lowest_rq(next_task, rq);
523         if (!lowest_rq) {
524                 struct task_struct *task;
525                 /*
526                  * find lock_lowest_rq releases rq->lock
527                  * so it is possible that next_task has changed.
528                  * If it has, then try again.
529                  */
530                 task = pick_next_highest_task_rt(rq, -1);
531                 if (unlikely(task != next_task) && task && paranoid--) {
532                         put_task_struct(next_task);
533                         next_task = task;
534                         goto retry;
535                 }
536                 goto out;
537         }
538
539         deactivate_task(rq, next_task, 0);
540         set_task_cpu(next_task, lowest_rq->cpu);
541         activate_task(lowest_rq, next_task, 0);
542
543         resched_task(lowest_rq->curr);
544
545         spin_unlock(&lowest_rq->lock);
546
547         ret = 1;
548 out:
549         put_task_struct(next_task);
550
551         return ret;
552 }
553
554 /*
555  * TODO: Currently we just use the second highest prio task on
556  *       the queue, and stop when it can't migrate (or there's
557  *       no more RT tasks).  There may be a case where a lower
558  *       priority RT task has a different affinity than the
559  *       higher RT task. In this case the lower RT task could
560  *       possibly be able to migrate where as the higher priority
561  *       RT task could not.  We currently ignore this issue.
562  *       Enhancements are welcome!
563  */
564 static void push_rt_tasks(struct rq *rq)
565 {
566         /* push_rt_task will return true if it moved an RT */
567         while (push_rt_task(rq))
568                 ;
569 }
570
571 static int pull_rt_task(struct rq *this_rq)
572 {
573         int this_cpu = this_rq->cpu, ret = 0, cpu;
574         struct task_struct *p, *next;
575         struct rq *src_rq;
576
577         if (likely(!rt_overloaded(this_rq)))
578                 return 0;
579
580         next = pick_next_task_rt(this_rq);
581
582         for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
583                 if (this_cpu == cpu)
584                         continue;
585
586                 src_rq = cpu_rq(cpu);
587                 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
588                         /*
589                          * It is possible that overlapping cpusets
590                          * will miss clearing a non overloaded runqueue.
591                          * Clear it now.
592                          */
593                         if (double_lock_balance(this_rq, src_rq)) {
594                                 /* unlocked our runqueue lock */
595                                 struct task_struct *old_next = next;
596
597                                 next = pick_next_task_rt(this_rq);
598                                 if (next != old_next)
599                                         ret = 1;
600                         }
601                         if (likely(src_rq->rt.rt_nr_running <= 1)) {
602                                 /*
603                                  * Small chance that this_rq->curr changed
604                                  * but it's really harmless here.
605                                  */
606                                 rt_clear_overload(this_rq);
607                         } else {
608                                 /*
609                                  * Heh, the src_rq is now overloaded, since
610                                  * we already have the src_rq lock, go straight
611                                  * to pulling tasks from it.
612                                  */
613                                 goto try_pulling;
614                         }
615                         spin_unlock(&src_rq->lock);
616                         continue;
617                 }
618
619                 /*
620                  * We can potentially drop this_rq's lock in
621                  * double_lock_balance, and another CPU could
622                  * steal our next task - hence we must cause
623                  * the caller to recalculate the next task
624                  * in that case:
625                  */
626                 if (double_lock_balance(this_rq, src_rq)) {
627                         struct task_struct *old_next = next;
628
629                         next = pick_next_task_rt(this_rq);
630                         if (next != old_next)
631                                 ret = 1;
632                 }
633
634                 /*
635                  * Are there still pullable RT tasks?
636                  */
637                 if (src_rq->rt.rt_nr_running <= 1) {
638                         spin_unlock(&src_rq->lock);
639                         continue;
640                 }
641
642  try_pulling:
643                 p = pick_next_highest_task_rt(src_rq, this_cpu);
644
645                 /*
646                  * Do we have an RT task that preempts
647                  * the to-be-scheduled task?
648                  */
649                 if (p && (!next || (p->prio < next->prio))) {
650                         WARN_ON(p == src_rq->curr);
651                         WARN_ON(!p->se.on_rq);
652
653                         /*
654                          * There's a chance that p is higher in priority
655                          * than what's currently running on its cpu.
656                          * This is just that p is wakeing up and hasn't
657                          * had a chance to schedule. We only pull
658                          * p if it is lower in priority than the
659                          * current task on the run queue or
660                          * this_rq next task is lower in prio than
661                          * the current task on that rq.
662                          */
663                         if (p->prio < src_rq->curr->prio ||
664                             (next && next->prio < src_rq->curr->prio))
665                                 goto out;
666
667                         ret = 1;
668
669                         deactivate_task(src_rq, p, 0);
670                         set_task_cpu(p, this_cpu);
671                         activate_task(this_rq, p, 0);
672                         /*
673                          * We continue with the search, just in
674                          * case there's an even higher prio task
675                          * in another runqueue. (low likelyhood
676                          * but possible)
677                          *
678                          * Update next so that we won't pick a task
679                          * on another cpu with a priority lower (or equal)
680                          * than the one we just picked.
681                          */
682                         next = p;
683
684                 }
685  out:
686                 spin_unlock(&src_rq->lock);
687         }
688
689         return ret;
690 }
691
692 static void schedule_balance_rt(struct rq *rq, struct task_struct *prev)
693 {
694         /* Try to pull RT tasks here if we lower this rq's prio */
695         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
696                 pull_rt_task(rq);
697 }
698
699 static void schedule_tail_balance_rt(struct rq *rq)
700 {
701         /*
702          * If we have more than one rt_task queued, then
703          * see if we can push the other rt_tasks off to other CPUS.
704          * Note we may release the rq lock, and since
705          * the lock was owned by prev, we need to release it
706          * first via finish_lock_switch and then reaquire it here.
707          */
708         if (unlikely(rq->rt.overloaded)) {
709                 spin_lock_irq(&rq->lock);
710                 push_rt_tasks(rq);
711                 spin_unlock_irq(&rq->lock);
712         }
713 }
714
715
716 static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
717 {
718         if (unlikely(rt_task(p)) &&
719             !task_running(rq, p) &&
720             (p->prio >= rq->rt.highest_prio) &&
721             rq->rt.overloaded)
722                 push_rt_tasks(rq);
723 }
724
725 static unsigned long
726 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
727                 unsigned long max_load_move,
728                 struct sched_domain *sd, enum cpu_idle_type idle,
729                 int *all_pinned, int *this_best_prio)
730 {
731         /* don't touch RT tasks */
732         return 0;
733 }
734
735 static int
736 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
737                  struct sched_domain *sd, enum cpu_idle_type idle)
738 {
739         /* don't touch RT tasks */
740         return 0;
741 }
742
743 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
744 {
745         int weight = cpus_weight(*new_mask);
746
747         BUG_ON(!rt_task(p));
748
749         /*
750          * Update the migration status of the RQ if we have an RT task
751          * which is running AND changing its weight value.
752          */
753         if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
754                 struct rq *rq = task_rq(p);
755
756                 if ((p->nr_cpus_allowed <= 1) && (weight > 1)) {
757                         rq->rt.rt_nr_migratory++;
758                 } else if ((p->nr_cpus_allowed > 1) && (weight <= 1)) {
759                         BUG_ON(!rq->rt.rt_nr_migratory);
760                         rq->rt.rt_nr_migratory--;
761                 }
762
763                 update_rt_migration(rq);
764         }
765
766         p->cpus_allowed    = *new_mask;
767         p->nr_cpus_allowed = weight;
768 }
769
770 /* Assumes rq->lock is held */
771 static void join_domain_rt(struct rq *rq)
772 {
773         if (rq->rt.overloaded)
774                 rt_set_overload(rq);
775 }
776
777 /* Assumes rq->lock is held */
778 static void leave_domain_rt(struct rq *rq)
779 {
780         if (rq->rt.overloaded)
781                 rt_clear_overload(rq);
782 }
783
784 #else /* CONFIG_SMP */
785 # define schedule_tail_balance_rt(rq)   do { } while (0)
786 # define schedule_balance_rt(rq, prev)  do { } while (0)
787 # define wakeup_balance_rt(rq, p)       do { } while (0)
788 #endif /* CONFIG_SMP */
789
790 static void task_tick_rt(struct rq *rq, struct task_struct *p)
791 {
792         update_curr_rt(rq);
793
794         /*
795          * RR tasks need a special form of timeslice management.
796          * FIFO tasks have no timeslices.
797          */
798         if (p->policy != SCHED_RR)
799                 return;
800
801         if (--p->time_slice)
802                 return;
803
804         p->time_slice = DEF_TIMESLICE;
805
806         /*
807          * Requeue to the end of queue if we are not the only element
808          * on the queue:
809          */
810         if (p->run_list.prev != p->run_list.next) {
811                 requeue_task_rt(rq, p);
812                 set_tsk_need_resched(p);
813         }
814 }
815
816 static void set_curr_task_rt(struct rq *rq)
817 {
818         struct task_struct *p = rq->curr;
819
820         p->se.exec_start = rq->clock;
821 }
822
823 const struct sched_class rt_sched_class = {
824         .next                   = &fair_sched_class,
825         .enqueue_task           = enqueue_task_rt,
826         .dequeue_task           = dequeue_task_rt,
827         .yield_task             = yield_task_rt,
828 #ifdef CONFIG_SMP
829         .select_task_rq         = select_task_rq_rt,
830 #endif /* CONFIG_SMP */
831
832         .check_preempt_curr     = check_preempt_curr_rt,
833
834         .pick_next_task         = pick_next_task_rt,
835         .put_prev_task          = put_prev_task_rt,
836
837 #ifdef CONFIG_SMP
838         .load_balance           = load_balance_rt,
839         .move_one_task          = move_one_task_rt,
840         .set_cpus_allowed       = set_cpus_allowed_rt,
841         .join_domain            = join_domain_rt,
842         .leave_domain           = leave_domain_rt,
843 #endif
844
845         .set_curr_task          = set_curr_task_rt,
846         .task_tick              = task_tick_rt,
847 };