Merge branch 'avr32-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/hskinnemo...
[sfrench/cifs-2.6.git] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         cpu_set(rq->cpu, rq->rd->rto_mask);
16         /*
17          * Make sure the mask is visible before we set
18          * the overload count. That is checked to determine
19          * if we should look at the mask. It would be a shame
20          * if we looked at the mask, but the mask was not
21          * updated yet.
22          */
23         wmb();
24         atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29         /* the order here really doesn't matter */
30         atomic_dec(&rq->rd->rto_count);
31         cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37                 if (!rq->rt.overloaded) {
38                         rt_set_overload(rq);
39                         rq->rt.overloaded = 1;
40                 }
41         } else if (rq->rt.overloaded) {
42                 rt_clear_overload(rq);
43                 rq->rt.overloaded = 0;
44         }
45 }
46 #endif /* CONFIG_SMP */
47
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
49 {
50         return container_of(rt_se, struct task_struct, rt);
51 }
52
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54 {
55         return !list_empty(&rt_se->run_list);
56 }
57
58 #ifdef CONFIG_RT_GROUP_SCHED
59
60 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
61 {
62         if (!rt_rq->tg)
63                 return RUNTIME_INF;
64
65         return rt_rq->tg->rt_runtime;
66 }
67
68 #define for_each_leaf_rt_rq(rt_rq, rq) \
69         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
70
71 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
72 {
73         return rt_rq->rq;
74 }
75
76 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
77 {
78         return rt_se->rt_rq;
79 }
80
81 #define for_each_sched_rt_entity(rt_se) \
82         for (; rt_se; rt_se = rt_se->parent)
83
84 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
85 {
86         return rt_se->my_q;
87 }
88
89 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
90 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
91
92 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
93 {
94         struct sched_rt_entity *rt_se = rt_rq->rt_se;
95
96         if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
97                 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
98
99                 enqueue_rt_entity(rt_se);
100                 if (rt_rq->highest_prio < curr->prio)
101                         resched_task(curr);
102         }
103 }
104
105 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
106 {
107         struct sched_rt_entity *rt_se = rt_rq->rt_se;
108
109         if (rt_se && on_rt_rq(rt_se))
110                 dequeue_rt_entity(rt_se);
111 }
112
113 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
114 {
115         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
116 }
117
118 static int rt_se_boosted(struct sched_rt_entity *rt_se)
119 {
120         struct rt_rq *rt_rq = group_rt_rq(rt_se);
121         struct task_struct *p;
122
123         if (rt_rq)
124                 return !!rt_rq->rt_nr_boosted;
125
126         p = rt_task_of(rt_se);
127         return p->prio != p->normal_prio;
128 }
129
130 #else
131
132 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
133 {
134         if (sysctl_sched_rt_runtime == -1)
135                 return RUNTIME_INF;
136
137         return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
138 }
139
140 #define for_each_leaf_rt_rq(rt_rq, rq) \
141         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
142
143 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
144 {
145         return container_of(rt_rq, struct rq, rt);
146 }
147
148 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
149 {
150         struct task_struct *p = rt_task_of(rt_se);
151         struct rq *rq = task_rq(p);
152
153         return &rq->rt;
154 }
155
156 #define for_each_sched_rt_entity(rt_se) \
157         for (; rt_se; rt_se = NULL)
158
159 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
160 {
161         return NULL;
162 }
163
164 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
165 {
166 }
167
168 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
169 {
170 }
171
172 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
173 {
174         return rt_rq->rt_throttled;
175 }
176 #endif
177
178 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
179 {
180 #ifdef CONFIG_RT_GROUP_SCHED
181         struct rt_rq *rt_rq = group_rt_rq(rt_se);
182
183         if (rt_rq)
184                 return rt_rq->highest_prio;
185 #endif
186
187         return rt_task_of(rt_se)->prio;
188 }
189
190 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
191 {
192         u64 runtime = sched_rt_runtime(rt_rq);
193
194         if (runtime == RUNTIME_INF)
195                 return 0;
196
197         if (rt_rq->rt_throttled)
198                 return rt_rq_throttled(rt_rq);
199
200         if (rt_rq->rt_time > runtime) {
201                 struct rq *rq = rq_of_rt_rq(rt_rq);
202
203                 rq->rt_throttled = 1;
204                 rt_rq->rt_throttled = 1;
205
206                 if (rt_rq_throttled(rt_rq)) {
207                         sched_rt_rq_dequeue(rt_rq);
208                         return 1;
209                 }
210         }
211
212         return 0;
213 }
214
215 static void update_sched_rt_period(struct rq *rq)
216 {
217         struct rt_rq *rt_rq;
218         u64 period;
219
220         while (rq->clock > rq->rt_period_expire) {
221                 period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
222                 rq->rt_period_expire += period;
223
224                 for_each_leaf_rt_rq(rt_rq, rq) {
225                         u64 runtime = sched_rt_runtime(rt_rq);
226
227                         rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
228                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
229                                 rt_rq->rt_throttled = 0;
230                                 sched_rt_rq_enqueue(rt_rq);
231                         }
232                 }
233
234                 rq->rt_throttled = 0;
235         }
236 }
237
238 /*
239  * Update the current task's runtime statistics. Skip current tasks that
240  * are not in our scheduling class.
241  */
242 static void update_curr_rt(struct rq *rq)
243 {
244         struct task_struct *curr = rq->curr;
245         struct sched_rt_entity *rt_se = &curr->rt;
246         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
247         u64 delta_exec;
248
249         if (!task_has_rt_policy(curr))
250                 return;
251
252         delta_exec = rq->clock - curr->se.exec_start;
253         if (unlikely((s64)delta_exec < 0))
254                 delta_exec = 0;
255
256         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
257
258         curr->se.sum_exec_runtime += delta_exec;
259         curr->se.exec_start = rq->clock;
260         cpuacct_charge(curr, delta_exec);
261
262         rt_rq->rt_time += delta_exec;
263         if (sched_rt_runtime_exceeded(rt_rq))
264                 resched_task(curr);
265 }
266
267 static inline
268 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
269 {
270         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
271         rt_rq->rt_nr_running++;
272 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
273         if (rt_se_prio(rt_se) < rt_rq->highest_prio)
274                 rt_rq->highest_prio = rt_se_prio(rt_se);
275 #endif
276 #ifdef CONFIG_SMP
277         if (rt_se->nr_cpus_allowed > 1) {
278                 struct rq *rq = rq_of_rt_rq(rt_rq);
279                 rq->rt.rt_nr_migratory++;
280         }
281
282         update_rt_migration(rq_of_rt_rq(rt_rq));
283 #endif
284 #ifdef CONFIG_RT_GROUP_SCHED
285         if (rt_se_boosted(rt_se))
286                 rt_rq->rt_nr_boosted++;
287 #endif
288 }
289
290 static inline
291 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
292 {
293         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
294         WARN_ON(!rt_rq->rt_nr_running);
295         rt_rq->rt_nr_running--;
296 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
297         if (rt_rq->rt_nr_running) {
298                 struct rt_prio_array *array;
299
300                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
301                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
302                         /* recalculate */
303                         array = &rt_rq->active;
304                         rt_rq->highest_prio =
305                                 sched_find_first_bit(array->bitmap);
306                 } /* otherwise leave rq->highest prio alone */
307         } else
308                 rt_rq->highest_prio = MAX_RT_PRIO;
309 #endif
310 #ifdef CONFIG_SMP
311         if (rt_se->nr_cpus_allowed > 1) {
312                 struct rq *rq = rq_of_rt_rq(rt_rq);
313                 rq->rt.rt_nr_migratory--;
314         }
315
316         update_rt_migration(rq_of_rt_rq(rt_rq));
317 #endif /* CONFIG_SMP */
318 #ifdef CONFIG_RT_GROUP_SCHED
319         if (rt_se_boosted(rt_se))
320                 rt_rq->rt_nr_boosted--;
321
322         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
323 #endif
324 }
325
326 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
327 {
328         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
329         struct rt_prio_array *array = &rt_rq->active;
330         struct rt_rq *group_rq = group_rt_rq(rt_se);
331
332         if (group_rq && rt_rq_throttled(group_rq))
333                 return;
334
335         list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
336         __set_bit(rt_se_prio(rt_se), array->bitmap);
337
338         inc_rt_tasks(rt_se, rt_rq);
339 }
340
341 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
342 {
343         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
344         struct rt_prio_array *array = &rt_rq->active;
345
346         list_del_init(&rt_se->run_list);
347         if (list_empty(array->queue + rt_se_prio(rt_se)))
348                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
349
350         dec_rt_tasks(rt_se, rt_rq);
351 }
352
353 /*
354  * Because the prio of an upper entry depends on the lower
355  * entries, we must remove entries top - down.
356  *
357  * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
358  *      doesn't matter much for now, as h=2 for GROUP_SCHED.
359  */
360 static void dequeue_rt_stack(struct task_struct *p)
361 {
362         struct sched_rt_entity *rt_se, *top_se;
363
364         /*
365          * dequeue all, top - down.
366          */
367         do {
368                 rt_se = &p->rt;
369                 top_se = NULL;
370                 for_each_sched_rt_entity(rt_se) {
371                         if (on_rt_rq(rt_se))
372                                 top_se = rt_se;
373                 }
374                 if (top_se)
375                         dequeue_rt_entity(top_se);
376         } while (top_se);
377 }
378
379 /*
380  * Adding/removing a task to/from a priority array:
381  */
382 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
383 {
384         struct sched_rt_entity *rt_se = &p->rt;
385
386         if (wakeup)
387                 rt_se->timeout = 0;
388
389         dequeue_rt_stack(p);
390
391         /*
392          * enqueue everybody, bottom - up.
393          */
394         for_each_sched_rt_entity(rt_se)
395                 enqueue_rt_entity(rt_se);
396
397         inc_cpu_load(rq, p->se.load.weight);
398 }
399
400 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
401 {
402         struct sched_rt_entity *rt_se = &p->rt;
403         struct rt_rq *rt_rq;
404
405         update_curr_rt(rq);
406
407         dequeue_rt_stack(p);
408
409         /*
410          * re-enqueue all non-empty rt_rq entities.
411          */
412         for_each_sched_rt_entity(rt_se) {
413                 rt_rq = group_rt_rq(rt_se);
414                 if (rt_rq && rt_rq->rt_nr_running)
415                         enqueue_rt_entity(rt_se);
416         }
417
418         dec_cpu_load(rq, p->se.load.weight);
419 }
420
421 /*
422  * Put task to the end of the run list without the overhead of dequeue
423  * followed by enqueue.
424  */
425 static
426 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
427 {
428         struct rt_prio_array *array = &rt_rq->active;
429
430         list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
431 }
432
433 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
434 {
435         struct sched_rt_entity *rt_se = &p->rt;
436         struct rt_rq *rt_rq;
437
438         for_each_sched_rt_entity(rt_se) {
439                 rt_rq = rt_rq_of_se(rt_se);
440                 requeue_rt_entity(rt_rq, rt_se);
441         }
442 }
443
444 static void yield_task_rt(struct rq *rq)
445 {
446         requeue_task_rt(rq, rq->curr);
447 }
448
449 #ifdef CONFIG_SMP
450 static int find_lowest_rq(struct task_struct *task);
451
452 static int select_task_rq_rt(struct task_struct *p, int sync)
453 {
454         struct rq *rq = task_rq(p);
455
456         /*
457          * If the current task is an RT task, then
458          * try to see if we can wake this RT task up on another
459          * runqueue. Otherwise simply start this RT task
460          * on its current runqueue.
461          *
462          * We want to avoid overloading runqueues. Even if
463          * the RT task is of higher priority than the current RT task.
464          * RT tasks behave differently than other tasks. If
465          * one gets preempted, we try to push it off to another queue.
466          * So trying to keep a preempting RT task on the same
467          * cache hot CPU will force the running RT task to
468          * a cold CPU. So we waste all the cache for the lower
469          * RT task in hopes of saving some of a RT task
470          * that is just being woken and probably will have
471          * cold cache anyway.
472          */
473         if (unlikely(rt_task(rq->curr)) &&
474             (p->rt.nr_cpus_allowed > 1)) {
475                 int cpu = find_lowest_rq(p);
476
477                 return (cpu == -1) ? task_cpu(p) : cpu;
478         }
479
480         /*
481          * Otherwise, just let it ride on the affined RQ and the
482          * post-schedule router will push the preempted task away
483          */
484         return task_cpu(p);
485 }
486 #endif /* CONFIG_SMP */
487
488 /*
489  * Preempt the current task with a newly woken task if needed:
490  */
491 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
492 {
493         if (p->prio < rq->curr->prio)
494                 resched_task(rq->curr);
495 }
496
497 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
498                                                    struct rt_rq *rt_rq)
499 {
500         struct rt_prio_array *array = &rt_rq->active;
501         struct sched_rt_entity *next = NULL;
502         struct list_head *queue;
503         int idx;
504
505         idx = sched_find_first_bit(array->bitmap);
506         BUG_ON(idx >= MAX_RT_PRIO);
507
508         queue = array->queue + idx;
509         next = list_entry(queue->next, struct sched_rt_entity, run_list);
510
511         return next;
512 }
513
514 static struct task_struct *pick_next_task_rt(struct rq *rq)
515 {
516         struct sched_rt_entity *rt_se;
517         struct task_struct *p;
518         struct rt_rq *rt_rq;
519
520         rt_rq = &rq->rt;
521
522         if (unlikely(!rt_rq->rt_nr_running))
523                 return NULL;
524
525         if (rt_rq_throttled(rt_rq))
526                 return NULL;
527
528         do {
529                 rt_se = pick_next_rt_entity(rq, rt_rq);
530                 BUG_ON(!rt_se);
531                 rt_rq = group_rt_rq(rt_se);
532         } while (rt_rq);
533
534         p = rt_task_of(rt_se);
535         p->se.exec_start = rq->clock;
536         return p;
537 }
538
539 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
540 {
541         update_curr_rt(rq);
542         p->se.exec_start = 0;
543 }
544
545 #ifdef CONFIG_SMP
546
547 /* Only try algorithms three times */
548 #define RT_MAX_TRIES 3
549
550 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
551 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
552
553 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
554 {
555         if (!task_running(rq, p) &&
556             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
557             (p->rt.nr_cpus_allowed > 1))
558                 return 1;
559         return 0;
560 }
561
562 /* Return the second highest RT task, NULL otherwise */
563 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
564 {
565         struct task_struct *next = NULL;
566         struct sched_rt_entity *rt_se;
567         struct rt_prio_array *array;
568         struct rt_rq *rt_rq;
569         int idx;
570
571         for_each_leaf_rt_rq(rt_rq, rq) {
572                 array = &rt_rq->active;
573                 idx = sched_find_first_bit(array->bitmap);
574  next_idx:
575                 if (idx >= MAX_RT_PRIO)
576                         continue;
577                 if (next && next->prio < idx)
578                         continue;
579                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
580                         struct task_struct *p = rt_task_of(rt_se);
581                         if (pick_rt_task(rq, p, cpu)) {
582                                 next = p;
583                                 break;
584                         }
585                 }
586                 if (!next) {
587                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
588                         goto next_idx;
589                 }
590         }
591
592         return next;
593 }
594
595 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
596
597 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
598 {
599         int       lowest_prio = -1;
600         int       lowest_cpu  = -1;
601         int       count       = 0;
602         int       cpu;
603
604         cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
605
606         /*
607          * Scan each rq for the lowest prio.
608          */
609         for_each_cpu_mask(cpu, *lowest_mask) {
610                 struct rq *rq = cpu_rq(cpu);
611
612                 /* We look for lowest RT prio or non-rt CPU */
613                 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
614                         /*
615                          * if we already found a low RT queue
616                          * and now we found this non-rt queue
617                          * clear the mask and set our bit.
618                          * Otherwise just return the queue as is
619                          * and the count==1 will cause the algorithm
620                          * to use the first bit found.
621                          */
622                         if (lowest_cpu != -1) {
623                                 cpus_clear(*lowest_mask);
624                                 cpu_set(rq->cpu, *lowest_mask);
625                         }
626                         return 1;
627                 }
628
629                 /* no locking for now */
630                 if ((rq->rt.highest_prio > task->prio)
631                     && (rq->rt.highest_prio >= lowest_prio)) {
632                         if (rq->rt.highest_prio > lowest_prio) {
633                                 /* new low - clear old data */
634                                 lowest_prio = rq->rt.highest_prio;
635                                 lowest_cpu = cpu;
636                                 count = 0;
637                         }
638                         count++;
639                 } else
640                         cpu_clear(cpu, *lowest_mask);
641         }
642
643         /*
644          * Clear out all the set bits that represent
645          * runqueues that were of higher prio than
646          * the lowest_prio.
647          */
648         if (lowest_cpu > 0) {
649                 /*
650                  * Perhaps we could add another cpumask op to
651                  * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
652                  * Then that could be optimized to use memset and such.
653                  */
654                 for_each_cpu_mask(cpu, *lowest_mask) {
655                         if (cpu >= lowest_cpu)
656                                 break;
657                         cpu_clear(cpu, *lowest_mask);
658                 }
659         }
660
661         return count;
662 }
663
664 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
665 {
666         int first;
667
668         /* "this_cpu" is cheaper to preempt than a remote processor */
669         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
670                 return this_cpu;
671
672         first = first_cpu(*mask);
673         if (first != NR_CPUS)
674                 return first;
675
676         return -1;
677 }
678
679 static int find_lowest_rq(struct task_struct *task)
680 {
681         struct sched_domain *sd;
682         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
683         int this_cpu = smp_processor_id();
684         int cpu      = task_cpu(task);
685         int count    = find_lowest_cpus(task, lowest_mask);
686
687         if (!count)
688                 return -1; /* No targets found */
689
690         /*
691          * There is no sense in performing an optimal search if only one
692          * target is found.
693          */
694         if (count == 1)
695                 return first_cpu(*lowest_mask);
696
697         /*
698          * At this point we have built a mask of cpus representing the
699          * lowest priority tasks in the system.  Now we want to elect
700          * the best one based on our affinity and topology.
701          *
702          * We prioritize the last cpu that the task executed on since
703          * it is most likely cache-hot in that location.
704          */
705         if (cpu_isset(cpu, *lowest_mask))
706                 return cpu;
707
708         /*
709          * Otherwise, we consult the sched_domains span maps to figure
710          * out which cpu is logically closest to our hot cache data.
711          */
712         if (this_cpu == cpu)
713                 this_cpu = -1; /* Skip this_cpu opt if the same */
714
715         for_each_domain(cpu, sd) {
716                 if (sd->flags & SD_WAKE_AFFINE) {
717                         cpumask_t domain_mask;
718                         int       best_cpu;
719
720                         cpus_and(domain_mask, sd->span, *lowest_mask);
721
722                         best_cpu = pick_optimal_cpu(this_cpu,
723                                                     &domain_mask);
724                         if (best_cpu != -1)
725                                 return best_cpu;
726                 }
727         }
728
729         /*
730          * And finally, if there were no matches within the domains
731          * just give the caller *something* to work with from the compatible
732          * locations.
733          */
734         return pick_optimal_cpu(this_cpu, lowest_mask);
735 }
736
737 /* Will lock the rq it finds */
738 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
739 {
740         struct rq *lowest_rq = NULL;
741         int tries;
742         int cpu;
743
744         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
745                 cpu = find_lowest_rq(task);
746
747                 if ((cpu == -1) || (cpu == rq->cpu))
748                         break;
749
750                 lowest_rq = cpu_rq(cpu);
751
752                 /* if the prio of this runqueue changed, try again */
753                 if (double_lock_balance(rq, lowest_rq)) {
754                         /*
755                          * We had to unlock the run queue. In
756                          * the mean time, task could have
757                          * migrated already or had its affinity changed.
758                          * Also make sure that it wasn't scheduled on its rq.
759                          */
760                         if (unlikely(task_rq(task) != rq ||
761                                      !cpu_isset(lowest_rq->cpu,
762                                                 task->cpus_allowed) ||
763                                      task_running(rq, task) ||
764                                      !task->se.on_rq)) {
765
766                                 spin_unlock(&lowest_rq->lock);
767                                 lowest_rq = NULL;
768                                 break;
769                         }
770                 }
771
772                 /* If this rq is still suitable use it. */
773                 if (lowest_rq->rt.highest_prio > task->prio)
774                         break;
775
776                 /* try again */
777                 spin_unlock(&lowest_rq->lock);
778                 lowest_rq = NULL;
779         }
780
781         return lowest_rq;
782 }
783
784 /*
785  * If the current CPU has more than one RT task, see if the non
786  * running task can migrate over to a CPU that is running a task
787  * of lesser priority.
788  */
789 static int push_rt_task(struct rq *rq)
790 {
791         struct task_struct *next_task;
792         struct rq *lowest_rq;
793         int ret = 0;
794         int paranoid = RT_MAX_TRIES;
795
796         if (!rq->rt.overloaded)
797                 return 0;
798
799         next_task = pick_next_highest_task_rt(rq, -1);
800         if (!next_task)
801                 return 0;
802
803  retry:
804         if (unlikely(next_task == rq->curr)) {
805                 WARN_ON(1);
806                 return 0;
807         }
808
809         /*
810          * It's possible that the next_task slipped in of
811          * higher priority than current. If that's the case
812          * just reschedule current.
813          */
814         if (unlikely(next_task->prio < rq->curr->prio)) {
815                 resched_task(rq->curr);
816                 return 0;
817         }
818
819         /* We might release rq lock */
820         get_task_struct(next_task);
821
822         /* find_lock_lowest_rq locks the rq if found */
823         lowest_rq = find_lock_lowest_rq(next_task, rq);
824         if (!lowest_rq) {
825                 struct task_struct *task;
826                 /*
827                  * find lock_lowest_rq releases rq->lock
828                  * so it is possible that next_task has changed.
829                  * If it has, then try again.
830                  */
831                 task = pick_next_highest_task_rt(rq, -1);
832                 if (unlikely(task != next_task) && task && paranoid--) {
833                         put_task_struct(next_task);
834                         next_task = task;
835                         goto retry;
836                 }
837                 goto out;
838         }
839
840         deactivate_task(rq, next_task, 0);
841         set_task_cpu(next_task, lowest_rq->cpu);
842         activate_task(lowest_rq, next_task, 0);
843
844         resched_task(lowest_rq->curr);
845
846         spin_unlock(&lowest_rq->lock);
847
848         ret = 1;
849 out:
850         put_task_struct(next_task);
851
852         return ret;
853 }
854
855 /*
856  * TODO: Currently we just use the second highest prio task on
857  *       the queue, and stop when it can't migrate (or there's
858  *       no more RT tasks).  There may be a case where a lower
859  *       priority RT task has a different affinity than the
860  *       higher RT task. In this case the lower RT task could
861  *       possibly be able to migrate where as the higher priority
862  *       RT task could not.  We currently ignore this issue.
863  *       Enhancements are welcome!
864  */
865 static void push_rt_tasks(struct rq *rq)
866 {
867         /* push_rt_task will return true if it moved an RT */
868         while (push_rt_task(rq))
869                 ;
870 }
871
872 static int pull_rt_task(struct rq *this_rq)
873 {
874         int this_cpu = this_rq->cpu, ret = 0, cpu;
875         struct task_struct *p, *next;
876         struct rq *src_rq;
877
878         if (likely(!rt_overloaded(this_rq)))
879                 return 0;
880
881         next = pick_next_task_rt(this_rq);
882
883         for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
884                 if (this_cpu == cpu)
885                         continue;
886
887                 src_rq = cpu_rq(cpu);
888                 /*
889                  * We can potentially drop this_rq's lock in
890                  * double_lock_balance, and another CPU could
891                  * steal our next task - hence we must cause
892                  * the caller to recalculate the next task
893                  * in that case:
894                  */
895                 if (double_lock_balance(this_rq, src_rq)) {
896                         struct task_struct *old_next = next;
897
898                         next = pick_next_task_rt(this_rq);
899                         if (next != old_next)
900                                 ret = 1;
901                 }
902
903                 /*
904                  * Are there still pullable RT tasks?
905                  */
906                 if (src_rq->rt.rt_nr_running <= 1)
907                         goto skip;
908
909                 p = pick_next_highest_task_rt(src_rq, this_cpu);
910
911                 /*
912                  * Do we have an RT task that preempts
913                  * the to-be-scheduled task?
914                  */
915                 if (p && (!next || (p->prio < next->prio))) {
916                         WARN_ON(p == src_rq->curr);
917                         WARN_ON(!p->se.on_rq);
918
919                         /*
920                          * There's a chance that p is higher in priority
921                          * than what's currently running on its cpu.
922                          * This is just that p is wakeing up and hasn't
923                          * had a chance to schedule. We only pull
924                          * p if it is lower in priority than the
925                          * current task on the run queue or
926                          * this_rq next task is lower in prio than
927                          * the current task on that rq.
928                          */
929                         if (p->prio < src_rq->curr->prio ||
930                             (next && next->prio < src_rq->curr->prio))
931                                 goto skip;
932
933                         ret = 1;
934
935                         deactivate_task(src_rq, p, 0);
936                         set_task_cpu(p, this_cpu);
937                         activate_task(this_rq, p, 0);
938                         /*
939                          * We continue with the search, just in
940                          * case there's an even higher prio task
941                          * in another runqueue. (low likelyhood
942                          * but possible)
943                          *
944                          * Update next so that we won't pick a task
945                          * on another cpu with a priority lower (or equal)
946                          * than the one we just picked.
947                          */
948                         next = p;
949
950                 }
951  skip:
952                 spin_unlock(&src_rq->lock);
953         }
954
955         return ret;
956 }
957
958 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
959 {
960         /* Try to pull RT tasks here if we lower this rq's prio */
961         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
962                 pull_rt_task(rq);
963 }
964
965 static void post_schedule_rt(struct rq *rq)
966 {
967         /*
968          * If we have more than one rt_task queued, then
969          * see if we can push the other rt_tasks off to other CPUS.
970          * Note we may release the rq lock, and since
971          * the lock was owned by prev, we need to release it
972          * first via finish_lock_switch and then reaquire it here.
973          */
974         if (unlikely(rq->rt.overloaded)) {
975                 spin_lock_irq(&rq->lock);
976                 push_rt_tasks(rq);
977                 spin_unlock_irq(&rq->lock);
978         }
979 }
980
981
982 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
983 {
984         if (!task_running(rq, p) &&
985             (p->prio >= rq->rt.highest_prio) &&
986             rq->rt.overloaded)
987                 push_rt_tasks(rq);
988 }
989
990 static unsigned long
991 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
992                 unsigned long max_load_move,
993                 struct sched_domain *sd, enum cpu_idle_type idle,
994                 int *all_pinned, int *this_best_prio)
995 {
996         /* don't touch RT tasks */
997         return 0;
998 }
999
1000 static int
1001 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1002                  struct sched_domain *sd, enum cpu_idle_type idle)
1003 {
1004         /* don't touch RT tasks */
1005         return 0;
1006 }
1007
1008 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
1009 {
1010         int weight = cpus_weight(*new_mask);
1011
1012         BUG_ON(!rt_task(p));
1013
1014         /*
1015          * Update the migration status of the RQ if we have an RT task
1016          * which is running AND changing its weight value.
1017          */
1018         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1019                 struct rq *rq = task_rq(p);
1020
1021                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1022                         rq->rt.rt_nr_migratory++;
1023                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1024                         BUG_ON(!rq->rt.rt_nr_migratory);
1025                         rq->rt.rt_nr_migratory--;
1026                 }
1027
1028                 update_rt_migration(rq);
1029         }
1030
1031         p->cpus_allowed    = *new_mask;
1032         p->rt.nr_cpus_allowed = weight;
1033 }
1034
1035 /* Assumes rq->lock is held */
1036 static void join_domain_rt(struct rq *rq)
1037 {
1038         if (rq->rt.overloaded)
1039                 rt_set_overload(rq);
1040 }
1041
1042 /* Assumes rq->lock is held */
1043 static void leave_domain_rt(struct rq *rq)
1044 {
1045         if (rq->rt.overloaded)
1046                 rt_clear_overload(rq);
1047 }
1048
1049 /*
1050  * When switch from the rt queue, we bring ourselves to a position
1051  * that we might want to pull RT tasks from other runqueues.
1052  */
1053 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1054                            int running)
1055 {
1056         /*
1057          * If there are other RT tasks then we will reschedule
1058          * and the scheduling of the other RT tasks will handle
1059          * the balancing. But if we are the last RT task
1060          * we may need to handle the pulling of RT tasks
1061          * now.
1062          */
1063         if (!rq->rt.rt_nr_running)
1064                 pull_rt_task(rq);
1065 }
1066 #endif /* CONFIG_SMP */
1067
1068 /*
1069  * When switching a task to RT, we may overload the runqueue
1070  * with RT tasks. In this case we try to push them off to
1071  * other runqueues.
1072  */
1073 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1074                            int running)
1075 {
1076         int check_resched = 1;
1077
1078         /*
1079          * If we are already running, then there's nothing
1080          * that needs to be done. But if we are not running
1081          * we may need to preempt the current running task.
1082          * If that current running task is also an RT task
1083          * then see if we can move to another run queue.
1084          */
1085         if (!running) {
1086 #ifdef CONFIG_SMP
1087                 if (rq->rt.overloaded && push_rt_task(rq) &&
1088                     /* Don't resched if we changed runqueues */
1089                     rq != task_rq(p))
1090                         check_resched = 0;
1091 #endif /* CONFIG_SMP */
1092                 if (check_resched && p->prio < rq->curr->prio)
1093                         resched_task(rq->curr);
1094         }
1095 }
1096
1097 /*
1098  * Priority of the task has changed. This may cause
1099  * us to initiate a push or pull.
1100  */
1101 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1102                             int oldprio, int running)
1103 {
1104         if (running) {
1105 #ifdef CONFIG_SMP
1106                 /*
1107                  * If our priority decreases while running, we
1108                  * may need to pull tasks to this runqueue.
1109                  */
1110                 if (oldprio < p->prio)
1111                         pull_rt_task(rq);
1112                 /*
1113                  * If there's a higher priority task waiting to run
1114                  * then reschedule.
1115                  */
1116                 if (p->prio > rq->rt.highest_prio)
1117                         resched_task(p);
1118 #else
1119                 /* For UP simply resched on drop of prio */
1120                 if (oldprio < p->prio)
1121                         resched_task(p);
1122 #endif /* CONFIG_SMP */
1123         } else {
1124                 /*
1125                  * This task is not running, but if it is
1126                  * greater than the current running task
1127                  * then reschedule.
1128                  */
1129                 if (p->prio < rq->curr->prio)
1130                         resched_task(rq->curr);
1131         }
1132 }
1133
1134 static void watchdog(struct rq *rq, struct task_struct *p)
1135 {
1136         unsigned long soft, hard;
1137
1138         if (!p->signal)
1139                 return;
1140
1141         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1142         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1143
1144         if (soft != RLIM_INFINITY) {
1145                 unsigned long next;
1146
1147                 p->rt.timeout++;
1148                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1149                 if (p->rt.timeout > next)
1150                         p->it_sched_expires = p->se.sum_exec_runtime;
1151         }
1152 }
1153
1154 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1155 {
1156         update_curr_rt(rq);
1157
1158         watchdog(rq, p);
1159
1160         /*
1161          * RR tasks need a special form of timeslice management.
1162          * FIFO tasks have no timeslices.
1163          */
1164         if (p->policy != SCHED_RR)
1165                 return;
1166
1167         if (--p->rt.time_slice)
1168                 return;
1169
1170         p->rt.time_slice = DEF_TIMESLICE;
1171
1172         /*
1173          * Requeue to the end of queue if we are not the only element
1174          * on the queue:
1175          */
1176         if (p->rt.run_list.prev != p->rt.run_list.next) {
1177                 requeue_task_rt(rq, p);
1178                 set_tsk_need_resched(p);
1179         }
1180 }
1181
1182 static void set_curr_task_rt(struct rq *rq)
1183 {
1184         struct task_struct *p = rq->curr;
1185
1186         p->se.exec_start = rq->clock;
1187 }
1188
1189 const struct sched_class rt_sched_class = {
1190         .next                   = &fair_sched_class,
1191         .enqueue_task           = enqueue_task_rt,
1192         .dequeue_task           = dequeue_task_rt,
1193         .yield_task             = yield_task_rt,
1194 #ifdef CONFIG_SMP
1195         .select_task_rq         = select_task_rq_rt,
1196 #endif /* CONFIG_SMP */
1197
1198         .check_preempt_curr     = check_preempt_curr_rt,
1199
1200         .pick_next_task         = pick_next_task_rt,
1201         .put_prev_task          = put_prev_task_rt,
1202
1203 #ifdef CONFIG_SMP
1204         .load_balance           = load_balance_rt,
1205         .move_one_task          = move_one_task_rt,
1206         .set_cpus_allowed       = set_cpus_allowed_rt,
1207         .join_domain            = join_domain_rt,
1208         .leave_domain           = leave_domain_rt,
1209         .pre_schedule           = pre_schedule_rt,
1210         .post_schedule          = post_schedule_rt,
1211         .task_wake_up           = task_wake_up_rt,
1212         .switched_from          = switched_from_rt,
1213 #endif
1214
1215         .set_curr_task          = set_curr_task_rt,
1216         .task_tick              = task_tick_rt,
1217
1218         .prio_changed           = prio_changed_rt,
1219         .switched_to            = switched_to_rt,
1220 };