Merge tag 'v3.12' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11
12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14 struct rt_bandwidth def_rt_bandwidth;
15
16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17 {
18         struct rt_bandwidth *rt_b =
19                 container_of(timer, struct rt_bandwidth, rt_period_timer);
20         ktime_t now;
21         int overrun;
22         int idle = 0;
23
24         for (;;) {
25                 now = hrtimer_cb_get_time(timer);
26                 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28                 if (!overrun)
29                         break;
30
31                 idle = do_sched_rt_period_timer(rt_b, overrun);
32         }
33
34         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 }
36
37 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38 {
39         rt_b->rt_period = ns_to_ktime(period);
40         rt_b->rt_runtime = runtime;
41
42         raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44         hrtimer_init(&rt_b->rt_period_timer,
45                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46         rt_b->rt_period_timer.function = sched_rt_period_timer;
47 }
48
49 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50 {
51         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52                 return;
53
54         if (hrtimer_active(&rt_b->rt_period_timer))
55                 return;
56
57         raw_spin_lock(&rt_b->rt_runtime_lock);
58         start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59         raw_spin_unlock(&rt_b->rt_runtime_lock);
60 }
61
62 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63 {
64         struct rt_prio_array *array;
65         int i;
66
67         array = &rt_rq->active;
68         for (i = 0; i < MAX_RT_PRIO; i++) {
69                 INIT_LIST_HEAD(array->queue + i);
70                 __clear_bit(i, array->bitmap);
71         }
72         /* delimiter for bitsearch: */
73         __set_bit(MAX_RT_PRIO, array->bitmap);
74
75 #if defined CONFIG_SMP
76         rt_rq->highest_prio.curr = MAX_RT_PRIO;
77         rt_rq->highest_prio.next = MAX_RT_PRIO;
78         rt_rq->rt_nr_migratory = 0;
79         rt_rq->overloaded = 0;
80         plist_head_init(&rt_rq->pushable_tasks);
81 #endif
82
83         rt_rq->rt_time = 0;
84         rt_rq->rt_throttled = 0;
85         rt_rq->rt_runtime = 0;
86         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87 }
88
89 #ifdef CONFIG_RT_GROUP_SCHED
90 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91 {
92         hrtimer_cancel(&rt_b->rt_period_timer);
93 }
94
95 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
97 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98 {
99 #ifdef CONFIG_SCHED_DEBUG
100         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101 #endif
102         return container_of(rt_se, struct task_struct, rt);
103 }
104
105 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106 {
107         return rt_rq->rq;
108 }
109
110 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111 {
112         return rt_se->rt_rq;
113 }
114
115 void free_rt_sched_group(struct task_group *tg)
116 {
117         int i;
118
119         if (tg->rt_se)
120                 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122         for_each_possible_cpu(i) {
123                 if (tg->rt_rq)
124                         kfree(tg->rt_rq[i]);
125                 if (tg->rt_se)
126                         kfree(tg->rt_se[i]);
127         }
128
129         kfree(tg->rt_rq);
130         kfree(tg->rt_se);
131 }
132
133 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134                 struct sched_rt_entity *rt_se, int cpu,
135                 struct sched_rt_entity *parent)
136 {
137         struct rq *rq = cpu_rq(cpu);
138
139         rt_rq->highest_prio.curr = MAX_RT_PRIO;
140         rt_rq->rt_nr_boosted = 0;
141         rt_rq->rq = rq;
142         rt_rq->tg = tg;
143
144         tg->rt_rq[cpu] = rt_rq;
145         tg->rt_se[cpu] = rt_se;
146
147         if (!rt_se)
148                 return;
149
150         if (!parent)
151                 rt_se->rt_rq = &rq->rt;
152         else
153                 rt_se->rt_rq = parent->my_q;
154
155         rt_se->my_q = rt_rq;
156         rt_se->parent = parent;
157         INIT_LIST_HEAD(&rt_se->run_list);
158 }
159
160 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161 {
162         struct rt_rq *rt_rq;
163         struct sched_rt_entity *rt_se;
164         int i;
165
166         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167         if (!tg->rt_rq)
168                 goto err;
169         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170         if (!tg->rt_se)
171                 goto err;
172
173         init_rt_bandwidth(&tg->rt_bandwidth,
174                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176         for_each_possible_cpu(i) {
177                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178                                      GFP_KERNEL, cpu_to_node(i));
179                 if (!rt_rq)
180                         goto err;
181
182                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183                                      GFP_KERNEL, cpu_to_node(i));
184                 if (!rt_se)
185                         goto err_free_rq;
186
187                 init_rt_rq(rt_rq, cpu_rq(i));
188                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190         }
191
192         return 1;
193
194 err_free_rq:
195         kfree(rt_rq);
196 err:
197         return 0;
198 }
199
200 #else /* CONFIG_RT_GROUP_SCHED */
201
202 #define rt_entity_is_task(rt_se) (1)
203
204 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205 {
206         return container_of(rt_se, struct task_struct, rt);
207 }
208
209 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210 {
211         return container_of(rt_rq, struct rq, rt);
212 }
213
214 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215 {
216         struct task_struct *p = rt_task_of(rt_se);
217         struct rq *rq = task_rq(p);
218
219         return &rq->rt;
220 }
221
222 void free_rt_sched_group(struct task_group *tg) { }
223
224 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225 {
226         return 1;
227 }
228 #endif /* CONFIG_RT_GROUP_SCHED */
229
230 #ifdef CONFIG_SMP
231
232 static inline int rt_overloaded(struct rq *rq)
233 {
234         return atomic_read(&rq->rd->rto_count);
235 }
236
237 static inline void rt_set_overload(struct rq *rq)
238 {
239         if (!rq->online)
240                 return;
241
242         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
243         /*
244          * Make sure the mask is visible before we set
245          * the overload count. That is checked to determine
246          * if we should look at the mask. It would be a shame
247          * if we looked at the mask, but the mask was not
248          * updated yet.
249          */
250         wmb();
251         atomic_inc(&rq->rd->rto_count);
252 }
253
254 static inline void rt_clear_overload(struct rq *rq)
255 {
256         if (!rq->online)
257                 return;
258
259         /* the order here really doesn't matter */
260         atomic_dec(&rq->rd->rto_count);
261         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
262 }
263
264 static void update_rt_migration(struct rt_rq *rt_rq)
265 {
266         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
267                 if (!rt_rq->overloaded) {
268                         rt_set_overload(rq_of_rt_rq(rt_rq));
269                         rt_rq->overloaded = 1;
270                 }
271         } else if (rt_rq->overloaded) {
272                 rt_clear_overload(rq_of_rt_rq(rt_rq));
273                 rt_rq->overloaded = 0;
274         }
275 }
276
277 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
278 {
279         struct task_struct *p;
280
281         if (!rt_entity_is_task(rt_se))
282                 return;
283
284         p = rt_task_of(rt_se);
285         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
286
287         rt_rq->rt_nr_total++;
288         if (p->nr_cpus_allowed > 1)
289                 rt_rq->rt_nr_migratory++;
290
291         update_rt_migration(rt_rq);
292 }
293
294 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
295 {
296         struct task_struct *p;
297
298         if (!rt_entity_is_task(rt_se))
299                 return;
300
301         p = rt_task_of(rt_se);
302         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
303
304         rt_rq->rt_nr_total--;
305         if (p->nr_cpus_allowed > 1)
306                 rt_rq->rt_nr_migratory--;
307
308         update_rt_migration(rt_rq);
309 }
310
311 static inline int has_pushable_tasks(struct rq *rq)
312 {
313         return !plist_head_empty(&rq->rt.pushable_tasks);
314 }
315
316 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
317 {
318         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
319         plist_node_init(&p->pushable_tasks, p->prio);
320         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
321
322         /* Update the highest prio pushable task */
323         if (p->prio < rq->rt.highest_prio.next)
324                 rq->rt.highest_prio.next = p->prio;
325 }
326
327 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
328 {
329         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
330
331         /* Update the new highest prio pushable task */
332         if (has_pushable_tasks(rq)) {
333                 p = plist_first_entry(&rq->rt.pushable_tasks,
334                                       struct task_struct, pushable_tasks);
335                 rq->rt.highest_prio.next = p->prio;
336         } else
337                 rq->rt.highest_prio.next = MAX_RT_PRIO;
338 }
339
340 #else
341
342 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
343 {
344 }
345
346 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
347 {
348 }
349
350 static inline
351 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
352 {
353 }
354
355 static inline
356 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
357 {
358 }
359
360 #endif /* CONFIG_SMP */
361
362 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
363 {
364         return !list_empty(&rt_se->run_list);
365 }
366
367 #ifdef CONFIG_RT_GROUP_SCHED
368
369 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
370 {
371         if (!rt_rq->tg)
372                 return RUNTIME_INF;
373
374         return rt_rq->rt_runtime;
375 }
376
377 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
378 {
379         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
380 }
381
382 typedef struct task_group *rt_rq_iter_t;
383
384 static inline struct task_group *next_task_group(struct task_group *tg)
385 {
386         do {
387                 tg = list_entry_rcu(tg->list.next,
388                         typeof(struct task_group), list);
389         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
390
391         if (&tg->list == &task_groups)
392                 tg = NULL;
393
394         return tg;
395 }
396
397 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
398         for (iter = container_of(&task_groups, typeof(*iter), list);    \
399                 (iter = next_task_group(iter)) &&                       \
400                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
401
402 #define for_each_sched_rt_entity(rt_se) \
403         for (; rt_se; rt_se = rt_se->parent)
404
405 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
406 {
407         return rt_se->my_q;
408 }
409
410 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
411 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
412
413 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
414 {
415         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
416         struct sched_rt_entity *rt_se;
417
418         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
419
420         rt_se = rt_rq->tg->rt_se[cpu];
421
422         if (rt_rq->rt_nr_running) {
423                 if (rt_se && !on_rt_rq(rt_se))
424                         enqueue_rt_entity(rt_se, false);
425                 if (rt_rq->highest_prio.curr < curr->prio)
426                         resched_task(curr);
427         }
428 }
429
430 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
431 {
432         struct sched_rt_entity *rt_se;
433         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
434
435         rt_se = rt_rq->tg->rt_se[cpu];
436
437         if (rt_se && on_rt_rq(rt_se))
438                 dequeue_rt_entity(rt_se);
439 }
440
441 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
442 {
443         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
444 }
445
446 static int rt_se_boosted(struct sched_rt_entity *rt_se)
447 {
448         struct rt_rq *rt_rq = group_rt_rq(rt_se);
449         struct task_struct *p;
450
451         if (rt_rq)
452                 return !!rt_rq->rt_nr_boosted;
453
454         p = rt_task_of(rt_se);
455         return p->prio != p->normal_prio;
456 }
457
458 #ifdef CONFIG_SMP
459 static inline const struct cpumask *sched_rt_period_mask(void)
460 {
461         return this_rq()->rd->span;
462 }
463 #else
464 static inline const struct cpumask *sched_rt_period_mask(void)
465 {
466         return cpu_online_mask;
467 }
468 #endif
469
470 static inline
471 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
472 {
473         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
474 }
475
476 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
477 {
478         return &rt_rq->tg->rt_bandwidth;
479 }
480
481 #else /* !CONFIG_RT_GROUP_SCHED */
482
483 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
484 {
485         return rt_rq->rt_runtime;
486 }
487
488 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
489 {
490         return ktime_to_ns(def_rt_bandwidth.rt_period);
491 }
492
493 typedef struct rt_rq *rt_rq_iter_t;
494
495 #define for_each_rt_rq(rt_rq, iter, rq) \
496         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
497
498 #define for_each_sched_rt_entity(rt_se) \
499         for (; rt_se; rt_se = NULL)
500
501 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
502 {
503         return NULL;
504 }
505
506 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
507 {
508         if (rt_rq->rt_nr_running)
509                 resched_task(rq_of_rt_rq(rt_rq)->curr);
510 }
511
512 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
513 {
514 }
515
516 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
517 {
518         return rt_rq->rt_throttled;
519 }
520
521 static inline const struct cpumask *sched_rt_period_mask(void)
522 {
523         return cpu_online_mask;
524 }
525
526 static inline
527 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
528 {
529         return &cpu_rq(cpu)->rt;
530 }
531
532 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
533 {
534         return &def_rt_bandwidth;
535 }
536
537 #endif /* CONFIG_RT_GROUP_SCHED */
538
539 #ifdef CONFIG_SMP
540 /*
541  * We ran out of runtime, see if we can borrow some from our neighbours.
542  */
543 static int do_balance_runtime(struct rt_rq *rt_rq)
544 {
545         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
546         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
547         int i, weight, more = 0;
548         u64 rt_period;
549
550         weight = cpumask_weight(rd->span);
551
552         raw_spin_lock(&rt_b->rt_runtime_lock);
553         rt_period = ktime_to_ns(rt_b->rt_period);
554         for_each_cpu(i, rd->span) {
555                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
556                 s64 diff;
557
558                 if (iter == rt_rq)
559                         continue;
560
561                 raw_spin_lock(&iter->rt_runtime_lock);
562                 /*
563                  * Either all rqs have inf runtime and there's nothing to steal
564                  * or __disable_runtime() below sets a specific rq to inf to
565                  * indicate its been disabled and disalow stealing.
566                  */
567                 if (iter->rt_runtime == RUNTIME_INF)
568                         goto next;
569
570                 /*
571                  * From runqueues with spare time, take 1/n part of their
572                  * spare time, but no more than our period.
573                  */
574                 diff = iter->rt_runtime - iter->rt_time;
575                 if (diff > 0) {
576                         diff = div_u64((u64)diff, weight);
577                         if (rt_rq->rt_runtime + diff > rt_period)
578                                 diff = rt_period - rt_rq->rt_runtime;
579                         iter->rt_runtime -= diff;
580                         rt_rq->rt_runtime += diff;
581                         more = 1;
582                         if (rt_rq->rt_runtime == rt_period) {
583                                 raw_spin_unlock(&iter->rt_runtime_lock);
584                                 break;
585                         }
586                 }
587 next:
588                 raw_spin_unlock(&iter->rt_runtime_lock);
589         }
590         raw_spin_unlock(&rt_b->rt_runtime_lock);
591
592         return more;
593 }
594
595 /*
596  * Ensure this RQ takes back all the runtime it lend to its neighbours.
597  */
598 static void __disable_runtime(struct rq *rq)
599 {
600         struct root_domain *rd = rq->rd;
601         rt_rq_iter_t iter;
602         struct rt_rq *rt_rq;
603
604         if (unlikely(!scheduler_running))
605                 return;
606
607         for_each_rt_rq(rt_rq, iter, rq) {
608                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
609                 s64 want;
610                 int i;
611
612                 raw_spin_lock(&rt_b->rt_runtime_lock);
613                 raw_spin_lock(&rt_rq->rt_runtime_lock);
614                 /*
615                  * Either we're all inf and nobody needs to borrow, or we're
616                  * already disabled and thus have nothing to do, or we have
617                  * exactly the right amount of runtime to take out.
618                  */
619                 if (rt_rq->rt_runtime == RUNTIME_INF ||
620                                 rt_rq->rt_runtime == rt_b->rt_runtime)
621                         goto balanced;
622                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
623
624                 /*
625                  * Calculate the difference between what we started out with
626                  * and what we current have, that's the amount of runtime
627                  * we lend and now have to reclaim.
628                  */
629                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
630
631                 /*
632                  * Greedy reclaim, take back as much as we can.
633                  */
634                 for_each_cpu(i, rd->span) {
635                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
636                         s64 diff;
637
638                         /*
639                          * Can't reclaim from ourselves or disabled runqueues.
640                          */
641                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
642                                 continue;
643
644                         raw_spin_lock(&iter->rt_runtime_lock);
645                         if (want > 0) {
646                                 diff = min_t(s64, iter->rt_runtime, want);
647                                 iter->rt_runtime -= diff;
648                                 want -= diff;
649                         } else {
650                                 iter->rt_runtime -= want;
651                                 want -= want;
652                         }
653                         raw_spin_unlock(&iter->rt_runtime_lock);
654
655                         if (!want)
656                                 break;
657                 }
658
659                 raw_spin_lock(&rt_rq->rt_runtime_lock);
660                 /*
661                  * We cannot be left wanting - that would mean some runtime
662                  * leaked out of the system.
663                  */
664                 BUG_ON(want);
665 balanced:
666                 /*
667                  * Disable all the borrow logic by pretending we have inf
668                  * runtime - in which case borrowing doesn't make sense.
669                  */
670                 rt_rq->rt_runtime = RUNTIME_INF;
671                 rt_rq->rt_throttled = 0;
672                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
673                 raw_spin_unlock(&rt_b->rt_runtime_lock);
674         }
675 }
676
677 static void __enable_runtime(struct rq *rq)
678 {
679         rt_rq_iter_t iter;
680         struct rt_rq *rt_rq;
681
682         if (unlikely(!scheduler_running))
683                 return;
684
685         /*
686          * Reset each runqueue's bandwidth settings
687          */
688         for_each_rt_rq(rt_rq, iter, rq) {
689                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
690
691                 raw_spin_lock(&rt_b->rt_runtime_lock);
692                 raw_spin_lock(&rt_rq->rt_runtime_lock);
693                 rt_rq->rt_runtime = rt_b->rt_runtime;
694                 rt_rq->rt_time = 0;
695                 rt_rq->rt_throttled = 0;
696                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
697                 raw_spin_unlock(&rt_b->rt_runtime_lock);
698         }
699 }
700
701 static int balance_runtime(struct rt_rq *rt_rq)
702 {
703         int more = 0;
704
705         if (!sched_feat(RT_RUNTIME_SHARE))
706                 return more;
707
708         if (rt_rq->rt_time > rt_rq->rt_runtime) {
709                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
710                 more = do_balance_runtime(rt_rq);
711                 raw_spin_lock(&rt_rq->rt_runtime_lock);
712         }
713
714         return more;
715 }
716 #else /* !CONFIG_SMP */
717 static inline int balance_runtime(struct rt_rq *rt_rq)
718 {
719         return 0;
720 }
721 #endif /* CONFIG_SMP */
722
723 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
724 {
725         int i, idle = 1, throttled = 0;
726         const struct cpumask *span;
727
728         span = sched_rt_period_mask();
729 #ifdef CONFIG_RT_GROUP_SCHED
730         /*
731          * FIXME: isolated CPUs should really leave the root task group,
732          * whether they are isolcpus or were isolated via cpusets, lest
733          * the timer run on a CPU which does not service all runqueues,
734          * potentially leaving other CPUs indefinitely throttled.  If
735          * isolation is really required, the user will turn the throttle
736          * off to kill the perturbations it causes anyway.  Meanwhile,
737          * this maintains functionality for boot and/or troubleshooting.
738          */
739         if (rt_b == &root_task_group.rt_bandwidth)
740                 span = cpu_online_mask;
741 #endif
742         for_each_cpu(i, span) {
743                 int enqueue = 0;
744                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
745                 struct rq *rq = rq_of_rt_rq(rt_rq);
746
747                 raw_spin_lock(&rq->lock);
748                 if (rt_rq->rt_time) {
749                         u64 runtime;
750
751                         raw_spin_lock(&rt_rq->rt_runtime_lock);
752                         if (rt_rq->rt_throttled)
753                                 balance_runtime(rt_rq);
754                         runtime = rt_rq->rt_runtime;
755                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
756                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
757                                 rt_rq->rt_throttled = 0;
758                                 enqueue = 1;
759
760                                 /*
761                                  * Force a clock update if the CPU was idle,
762                                  * lest wakeup -> unthrottle time accumulate.
763                                  */
764                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
765                                         rq->skip_clock_update = -1;
766                         }
767                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
768                                 idle = 0;
769                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
770                 } else if (rt_rq->rt_nr_running) {
771                         idle = 0;
772                         if (!rt_rq_throttled(rt_rq))
773                                 enqueue = 1;
774                 }
775                 if (rt_rq->rt_throttled)
776                         throttled = 1;
777
778                 if (enqueue)
779                         sched_rt_rq_enqueue(rt_rq);
780                 raw_spin_unlock(&rq->lock);
781         }
782
783         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
784                 return 1;
785
786         return idle;
787 }
788
789 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
790 {
791 #ifdef CONFIG_RT_GROUP_SCHED
792         struct rt_rq *rt_rq = group_rt_rq(rt_se);
793
794         if (rt_rq)
795                 return rt_rq->highest_prio.curr;
796 #endif
797
798         return rt_task_of(rt_se)->prio;
799 }
800
801 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
802 {
803         u64 runtime = sched_rt_runtime(rt_rq);
804
805         if (rt_rq->rt_throttled)
806                 return rt_rq_throttled(rt_rq);
807
808         if (runtime >= sched_rt_period(rt_rq))
809                 return 0;
810
811         balance_runtime(rt_rq);
812         runtime = sched_rt_runtime(rt_rq);
813         if (runtime == RUNTIME_INF)
814                 return 0;
815
816         if (rt_rq->rt_time > runtime) {
817                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
818
819                 /*
820                  * Don't actually throttle groups that have no runtime assigned
821                  * but accrue some time due to boosting.
822                  */
823                 if (likely(rt_b->rt_runtime)) {
824                         static bool once = false;
825
826                         rt_rq->rt_throttled = 1;
827
828                         if (!once) {
829                                 once = true;
830                                 printk_sched("sched: RT throttling activated\n");
831                         }
832                 } else {
833                         /*
834                          * In case we did anyway, make it go away,
835                          * replenishment is a joke, since it will replenish us
836                          * with exactly 0 ns.
837                          */
838                         rt_rq->rt_time = 0;
839                 }
840
841                 if (rt_rq_throttled(rt_rq)) {
842                         sched_rt_rq_dequeue(rt_rq);
843                         return 1;
844                 }
845         }
846
847         return 0;
848 }
849
850 /*
851  * Update the current task's runtime statistics. Skip current tasks that
852  * are not in our scheduling class.
853  */
854 static void update_curr_rt(struct rq *rq)
855 {
856         struct task_struct *curr = rq->curr;
857         struct sched_rt_entity *rt_se = &curr->rt;
858         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
859         u64 delta_exec;
860
861         if (curr->sched_class != &rt_sched_class)
862                 return;
863
864         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
865         if (unlikely((s64)delta_exec <= 0))
866                 return;
867
868         schedstat_set(curr->se.statistics.exec_max,
869                       max(curr->se.statistics.exec_max, delta_exec));
870
871         curr->se.sum_exec_runtime += delta_exec;
872         account_group_exec_runtime(curr, delta_exec);
873
874         curr->se.exec_start = rq_clock_task(rq);
875         cpuacct_charge(curr, delta_exec);
876
877         sched_rt_avg_update(rq, delta_exec);
878
879         if (!rt_bandwidth_enabled())
880                 return;
881
882         for_each_sched_rt_entity(rt_se) {
883                 rt_rq = rt_rq_of_se(rt_se);
884
885                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
886                         raw_spin_lock(&rt_rq->rt_runtime_lock);
887                         rt_rq->rt_time += delta_exec;
888                         if (sched_rt_runtime_exceeded(rt_rq))
889                                 resched_task(curr);
890                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
891                 }
892         }
893 }
894
895 #if defined CONFIG_SMP
896
897 static void
898 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
899 {
900         struct rq *rq = rq_of_rt_rq(rt_rq);
901
902         if (rq->online && prio < prev_prio)
903                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
904 }
905
906 static void
907 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
908 {
909         struct rq *rq = rq_of_rt_rq(rt_rq);
910
911         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
912                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
913 }
914
915 #else /* CONFIG_SMP */
916
917 static inline
918 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
919 static inline
920 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
921
922 #endif /* CONFIG_SMP */
923
924 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
925 static void
926 inc_rt_prio(struct rt_rq *rt_rq, int prio)
927 {
928         int prev_prio = rt_rq->highest_prio.curr;
929
930         if (prio < prev_prio)
931                 rt_rq->highest_prio.curr = prio;
932
933         inc_rt_prio_smp(rt_rq, prio, prev_prio);
934 }
935
936 static void
937 dec_rt_prio(struct rt_rq *rt_rq, int prio)
938 {
939         int prev_prio = rt_rq->highest_prio.curr;
940
941         if (rt_rq->rt_nr_running) {
942
943                 WARN_ON(prio < prev_prio);
944
945                 /*
946                  * This may have been our highest task, and therefore
947                  * we may have some recomputation to do
948                  */
949                 if (prio == prev_prio) {
950                         struct rt_prio_array *array = &rt_rq->active;
951
952                         rt_rq->highest_prio.curr =
953                                 sched_find_first_bit(array->bitmap);
954                 }
955
956         } else
957                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
958
959         dec_rt_prio_smp(rt_rq, prio, prev_prio);
960 }
961
962 #else
963
964 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
965 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
966
967 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
968
969 #ifdef CONFIG_RT_GROUP_SCHED
970
971 static void
972 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
973 {
974         if (rt_se_boosted(rt_se))
975                 rt_rq->rt_nr_boosted++;
976
977         if (rt_rq->tg)
978                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
979 }
980
981 static void
982 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
983 {
984         if (rt_se_boosted(rt_se))
985                 rt_rq->rt_nr_boosted--;
986
987         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
988 }
989
990 #else /* CONFIG_RT_GROUP_SCHED */
991
992 static void
993 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
994 {
995         start_rt_bandwidth(&def_rt_bandwidth);
996 }
997
998 static inline
999 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1000
1001 #endif /* CONFIG_RT_GROUP_SCHED */
1002
1003 static inline
1004 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1005 {
1006         int prio = rt_se_prio(rt_se);
1007
1008         WARN_ON(!rt_prio(prio));
1009         rt_rq->rt_nr_running++;
1010
1011         inc_rt_prio(rt_rq, prio);
1012         inc_rt_migration(rt_se, rt_rq);
1013         inc_rt_group(rt_se, rt_rq);
1014 }
1015
1016 static inline
1017 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1018 {
1019         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1020         WARN_ON(!rt_rq->rt_nr_running);
1021         rt_rq->rt_nr_running--;
1022
1023         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1024         dec_rt_migration(rt_se, rt_rq);
1025         dec_rt_group(rt_se, rt_rq);
1026 }
1027
1028 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1029 {
1030         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1031         struct rt_prio_array *array = &rt_rq->active;
1032         struct rt_rq *group_rq = group_rt_rq(rt_se);
1033         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1034
1035         /*
1036          * Don't enqueue the group if its throttled, or when empty.
1037          * The latter is a consequence of the former when a child group
1038          * get throttled and the current group doesn't have any other
1039          * active members.
1040          */
1041         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1042                 return;
1043
1044         if (head)
1045                 list_add(&rt_se->run_list, queue);
1046         else
1047                 list_add_tail(&rt_se->run_list, queue);
1048         __set_bit(rt_se_prio(rt_se), array->bitmap);
1049
1050         inc_rt_tasks(rt_se, rt_rq);
1051 }
1052
1053 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1054 {
1055         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1056         struct rt_prio_array *array = &rt_rq->active;
1057
1058         list_del_init(&rt_se->run_list);
1059         if (list_empty(array->queue + rt_se_prio(rt_se)))
1060                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1061
1062         dec_rt_tasks(rt_se, rt_rq);
1063 }
1064
1065 /*
1066  * Because the prio of an upper entry depends on the lower
1067  * entries, we must remove entries top - down.
1068  */
1069 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1070 {
1071         struct sched_rt_entity *back = NULL;
1072
1073         for_each_sched_rt_entity(rt_se) {
1074                 rt_se->back = back;
1075                 back = rt_se;
1076         }
1077
1078         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1079                 if (on_rt_rq(rt_se))
1080                         __dequeue_rt_entity(rt_se);
1081         }
1082 }
1083
1084 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1085 {
1086         dequeue_rt_stack(rt_se);
1087         for_each_sched_rt_entity(rt_se)
1088                 __enqueue_rt_entity(rt_se, head);
1089 }
1090
1091 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1092 {
1093         dequeue_rt_stack(rt_se);
1094
1095         for_each_sched_rt_entity(rt_se) {
1096                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1097
1098                 if (rt_rq && rt_rq->rt_nr_running)
1099                         __enqueue_rt_entity(rt_se, false);
1100         }
1101 }
1102
1103 /*
1104  * Adding/removing a task to/from a priority array:
1105  */
1106 static void
1107 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1108 {
1109         struct sched_rt_entity *rt_se = &p->rt;
1110
1111         if (flags & ENQUEUE_WAKEUP)
1112                 rt_se->timeout = 0;
1113
1114         enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1115
1116         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1117                 enqueue_pushable_task(rq, p);
1118
1119         inc_nr_running(rq);
1120 }
1121
1122 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1123 {
1124         struct sched_rt_entity *rt_se = &p->rt;
1125
1126         update_curr_rt(rq);
1127         dequeue_rt_entity(rt_se);
1128
1129         dequeue_pushable_task(rq, p);
1130
1131         dec_nr_running(rq);
1132 }
1133
1134 /*
1135  * Put task to the head or the end of the run list without the overhead of
1136  * dequeue followed by enqueue.
1137  */
1138 static void
1139 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1140 {
1141         if (on_rt_rq(rt_se)) {
1142                 struct rt_prio_array *array = &rt_rq->active;
1143                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1144
1145                 if (head)
1146                         list_move(&rt_se->run_list, queue);
1147                 else
1148                         list_move_tail(&rt_se->run_list, queue);
1149         }
1150 }
1151
1152 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1153 {
1154         struct sched_rt_entity *rt_se = &p->rt;
1155         struct rt_rq *rt_rq;
1156
1157         for_each_sched_rt_entity(rt_se) {
1158                 rt_rq = rt_rq_of_se(rt_se);
1159                 requeue_rt_entity(rt_rq, rt_se, head);
1160         }
1161 }
1162
1163 static void yield_task_rt(struct rq *rq)
1164 {
1165         requeue_task_rt(rq, rq->curr, 0);
1166 }
1167
1168 #ifdef CONFIG_SMP
1169 static int find_lowest_rq(struct task_struct *task);
1170
1171 static int
1172 select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1173 {
1174         struct task_struct *curr;
1175         struct rq *rq;
1176         int cpu;
1177
1178         cpu = task_cpu(p);
1179
1180         if (p->nr_cpus_allowed == 1)
1181                 goto out;
1182
1183         /* For anything but wake ups, just return the task_cpu */
1184         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1185                 goto out;
1186
1187         rq = cpu_rq(cpu);
1188
1189         rcu_read_lock();
1190         curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1191
1192         /*
1193          * If the current task on @p's runqueue is an RT task, then
1194          * try to see if we can wake this RT task up on another
1195          * runqueue. Otherwise simply start this RT task
1196          * on its current runqueue.
1197          *
1198          * We want to avoid overloading runqueues. If the woken
1199          * task is a higher priority, then it will stay on this CPU
1200          * and the lower prio task should be moved to another CPU.
1201          * Even though this will probably make the lower prio task
1202          * lose its cache, we do not want to bounce a higher task
1203          * around just because it gave up its CPU, perhaps for a
1204          * lock?
1205          *
1206          * For equal prio tasks, we just let the scheduler sort it out.
1207          *
1208          * Otherwise, just let it ride on the affined RQ and the
1209          * post-schedule router will push the preempted task away
1210          *
1211          * This test is optimistic, if we get it wrong the load-balancer
1212          * will have to sort it out.
1213          */
1214         if (curr && unlikely(rt_task(curr)) &&
1215             (curr->nr_cpus_allowed < 2 ||
1216              curr->prio <= p->prio) &&
1217             (p->nr_cpus_allowed > 1)) {
1218                 int target = find_lowest_rq(p);
1219
1220                 if (target != -1)
1221                         cpu = target;
1222         }
1223         rcu_read_unlock();
1224
1225 out:
1226         return cpu;
1227 }
1228
1229 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1230 {
1231         if (rq->curr->nr_cpus_allowed == 1)
1232                 return;
1233
1234         if (p->nr_cpus_allowed != 1
1235             && cpupri_find(&rq->rd->cpupri, p, NULL))
1236                 return;
1237
1238         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1239                 return;
1240
1241         /*
1242          * There appears to be other cpus that can accept
1243          * current and none to run 'p', so lets reschedule
1244          * to try and push current away:
1245          */
1246         requeue_task_rt(rq, p, 1);
1247         resched_task(rq->curr);
1248 }
1249
1250 #endif /* CONFIG_SMP */
1251
1252 /*
1253  * Preempt the current task with a newly woken task if needed:
1254  */
1255 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1256 {
1257         if (p->prio < rq->curr->prio) {
1258                 resched_task(rq->curr);
1259                 return;
1260         }
1261
1262 #ifdef CONFIG_SMP
1263         /*
1264          * If:
1265          *
1266          * - the newly woken task is of equal priority to the current task
1267          * - the newly woken task is non-migratable while current is migratable
1268          * - current will be preempted on the next reschedule
1269          *
1270          * we should check to see if current can readily move to a different
1271          * cpu.  If so, we will reschedule to allow the push logic to try
1272          * to move current somewhere else, making room for our non-migratable
1273          * task.
1274          */
1275         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1276                 check_preempt_equal_prio(rq, p);
1277 #endif
1278 }
1279
1280 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1281                                                    struct rt_rq *rt_rq)
1282 {
1283         struct rt_prio_array *array = &rt_rq->active;
1284         struct sched_rt_entity *next = NULL;
1285         struct list_head *queue;
1286         int idx;
1287
1288         idx = sched_find_first_bit(array->bitmap);
1289         BUG_ON(idx >= MAX_RT_PRIO);
1290
1291         queue = array->queue + idx;
1292         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1293
1294         return next;
1295 }
1296
1297 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1298 {
1299         struct sched_rt_entity *rt_se;
1300         struct task_struct *p;
1301         struct rt_rq *rt_rq;
1302
1303         rt_rq = &rq->rt;
1304
1305         if (!rt_rq->rt_nr_running)
1306                 return NULL;
1307
1308         if (rt_rq_throttled(rt_rq))
1309                 return NULL;
1310
1311         do {
1312                 rt_se = pick_next_rt_entity(rq, rt_rq);
1313                 BUG_ON(!rt_se);
1314                 rt_rq = group_rt_rq(rt_se);
1315         } while (rt_rq);
1316
1317         p = rt_task_of(rt_se);
1318         p->se.exec_start = rq_clock_task(rq);
1319
1320         return p;
1321 }
1322
1323 static struct task_struct *pick_next_task_rt(struct rq *rq)
1324 {
1325         struct task_struct *p = _pick_next_task_rt(rq);
1326
1327         /* The running task is never eligible for pushing */
1328         if (p)
1329                 dequeue_pushable_task(rq, p);
1330
1331 #ifdef CONFIG_SMP
1332         /*
1333          * We detect this state here so that we can avoid taking the RQ
1334          * lock again later if there is no need to push
1335          */
1336         rq->post_schedule = has_pushable_tasks(rq);
1337 #endif
1338
1339         return p;
1340 }
1341
1342 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1343 {
1344         update_curr_rt(rq);
1345
1346         /*
1347          * The previous task needs to be made eligible for pushing
1348          * if it is still active
1349          */
1350         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1351                 enqueue_pushable_task(rq, p);
1352 }
1353
1354 #ifdef CONFIG_SMP
1355
1356 /* Only try algorithms three times */
1357 #define RT_MAX_TRIES 3
1358
1359 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1360 {
1361         if (!task_running(rq, p) &&
1362             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1363                 return 1;
1364         return 0;
1365 }
1366
1367 /*
1368  * Return the highest pushable rq's task, which is suitable to be executed
1369  * on the cpu, NULL otherwise
1370  */
1371 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1372 {
1373         struct plist_head *head = &rq->rt.pushable_tasks;
1374         struct task_struct *p;
1375
1376         if (!has_pushable_tasks(rq))
1377                 return NULL;
1378
1379         plist_for_each_entry(p, head, pushable_tasks) {
1380                 if (pick_rt_task(rq, p, cpu))
1381                         return p;
1382         }
1383
1384         return NULL;
1385 }
1386
1387 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1388
1389 static int find_lowest_rq(struct task_struct *task)
1390 {
1391         struct sched_domain *sd;
1392         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1393         int this_cpu = smp_processor_id();
1394         int cpu      = task_cpu(task);
1395
1396         /* Make sure the mask is initialized first */
1397         if (unlikely(!lowest_mask))
1398                 return -1;
1399
1400         if (task->nr_cpus_allowed == 1)
1401                 return -1; /* No other targets possible */
1402
1403         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1404                 return -1; /* No targets found */
1405
1406         /*
1407          * At this point we have built a mask of cpus representing the
1408          * lowest priority tasks in the system.  Now we want to elect
1409          * the best one based on our affinity and topology.
1410          *
1411          * We prioritize the last cpu that the task executed on since
1412          * it is most likely cache-hot in that location.
1413          */
1414         if (cpumask_test_cpu(cpu, lowest_mask))
1415                 return cpu;
1416
1417         /*
1418          * Otherwise, we consult the sched_domains span maps to figure
1419          * out which cpu is logically closest to our hot cache data.
1420          */
1421         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1422                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1423
1424         rcu_read_lock();
1425         for_each_domain(cpu, sd) {
1426                 if (sd->flags & SD_WAKE_AFFINE) {
1427                         int best_cpu;
1428
1429                         /*
1430                          * "this_cpu" is cheaper to preempt than a
1431                          * remote processor.
1432                          */
1433                         if (this_cpu != -1 &&
1434                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1435                                 rcu_read_unlock();
1436                                 return this_cpu;
1437                         }
1438
1439                         best_cpu = cpumask_first_and(lowest_mask,
1440                                                      sched_domain_span(sd));
1441                         if (best_cpu < nr_cpu_ids) {
1442                                 rcu_read_unlock();
1443                                 return best_cpu;
1444                         }
1445                 }
1446         }
1447         rcu_read_unlock();
1448
1449         /*
1450          * And finally, if there were no matches within the domains
1451          * just give the caller *something* to work with from the compatible
1452          * locations.
1453          */
1454         if (this_cpu != -1)
1455                 return this_cpu;
1456
1457         cpu = cpumask_any(lowest_mask);
1458         if (cpu < nr_cpu_ids)
1459                 return cpu;
1460         return -1;
1461 }
1462
1463 /* Will lock the rq it finds */
1464 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1465 {
1466         struct rq *lowest_rq = NULL;
1467         int tries;
1468         int cpu;
1469
1470         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1471                 cpu = find_lowest_rq(task);
1472
1473                 if ((cpu == -1) || (cpu == rq->cpu))
1474                         break;
1475
1476                 lowest_rq = cpu_rq(cpu);
1477
1478                 /* if the prio of this runqueue changed, try again */
1479                 if (double_lock_balance(rq, lowest_rq)) {
1480                         /*
1481                          * We had to unlock the run queue. In
1482                          * the mean time, task could have
1483                          * migrated already or had its affinity changed.
1484                          * Also make sure that it wasn't scheduled on its rq.
1485                          */
1486                         if (unlikely(task_rq(task) != rq ||
1487                                      !cpumask_test_cpu(lowest_rq->cpu,
1488                                                        tsk_cpus_allowed(task)) ||
1489                                      task_running(rq, task) ||
1490                                      !task->on_rq)) {
1491
1492                                 double_unlock_balance(rq, lowest_rq);
1493                                 lowest_rq = NULL;
1494                                 break;
1495                         }
1496                 }
1497
1498                 /* If this rq is still suitable use it. */
1499                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1500                         break;
1501
1502                 /* try again */
1503                 double_unlock_balance(rq, lowest_rq);
1504                 lowest_rq = NULL;
1505         }
1506
1507         return lowest_rq;
1508 }
1509
1510 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1511 {
1512         struct task_struct *p;
1513
1514         if (!has_pushable_tasks(rq))
1515                 return NULL;
1516
1517         p = plist_first_entry(&rq->rt.pushable_tasks,
1518                               struct task_struct, pushable_tasks);
1519
1520         BUG_ON(rq->cpu != task_cpu(p));
1521         BUG_ON(task_current(rq, p));
1522         BUG_ON(p->nr_cpus_allowed <= 1);
1523
1524         BUG_ON(!p->on_rq);
1525         BUG_ON(!rt_task(p));
1526
1527         return p;
1528 }
1529
1530 /*
1531  * If the current CPU has more than one RT task, see if the non
1532  * running task can migrate over to a CPU that is running a task
1533  * of lesser priority.
1534  */
1535 static int push_rt_task(struct rq *rq)
1536 {
1537         struct task_struct *next_task;
1538         struct rq *lowest_rq;
1539         int ret = 0;
1540
1541         if (!rq->rt.overloaded)
1542                 return 0;
1543
1544         next_task = pick_next_pushable_task(rq);
1545         if (!next_task)
1546                 return 0;
1547
1548 retry:
1549         if (unlikely(next_task == rq->curr)) {
1550                 WARN_ON(1);
1551                 return 0;
1552         }
1553
1554         /*
1555          * It's possible that the next_task slipped in of
1556          * higher priority than current. If that's the case
1557          * just reschedule current.
1558          */
1559         if (unlikely(next_task->prio < rq->curr->prio)) {
1560                 resched_task(rq->curr);
1561                 return 0;
1562         }
1563
1564         /* We might release rq lock */
1565         get_task_struct(next_task);
1566
1567         /* find_lock_lowest_rq locks the rq if found */
1568         lowest_rq = find_lock_lowest_rq(next_task, rq);
1569         if (!lowest_rq) {
1570                 struct task_struct *task;
1571                 /*
1572                  * find_lock_lowest_rq releases rq->lock
1573                  * so it is possible that next_task has migrated.
1574                  *
1575                  * We need to make sure that the task is still on the same
1576                  * run-queue and is also still the next task eligible for
1577                  * pushing.
1578                  */
1579                 task = pick_next_pushable_task(rq);
1580                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1581                         /*
1582                          * The task hasn't migrated, and is still the next
1583                          * eligible task, but we failed to find a run-queue
1584                          * to push it to.  Do not retry in this case, since
1585                          * other cpus will pull from us when ready.
1586                          */
1587                         goto out;
1588                 }
1589
1590                 if (!task)
1591                         /* No more tasks, just exit */
1592                         goto out;
1593
1594                 /*
1595                  * Something has shifted, try again.
1596                  */
1597                 put_task_struct(next_task);
1598                 next_task = task;
1599                 goto retry;
1600         }
1601
1602         deactivate_task(rq, next_task, 0);
1603         set_task_cpu(next_task, lowest_rq->cpu);
1604         activate_task(lowest_rq, next_task, 0);
1605         ret = 1;
1606
1607         resched_task(lowest_rq->curr);
1608
1609         double_unlock_balance(rq, lowest_rq);
1610
1611 out:
1612         put_task_struct(next_task);
1613
1614         return ret;
1615 }
1616
1617 static void push_rt_tasks(struct rq *rq)
1618 {
1619         /* push_rt_task will return true if it moved an RT */
1620         while (push_rt_task(rq))
1621                 ;
1622 }
1623
1624 static int pull_rt_task(struct rq *this_rq)
1625 {
1626         int this_cpu = this_rq->cpu, ret = 0, cpu;
1627         struct task_struct *p;
1628         struct rq *src_rq;
1629
1630         if (likely(!rt_overloaded(this_rq)))
1631                 return 0;
1632
1633         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1634                 if (this_cpu == cpu)
1635                         continue;
1636
1637                 src_rq = cpu_rq(cpu);
1638
1639                 /*
1640                  * Don't bother taking the src_rq->lock if the next highest
1641                  * task is known to be lower-priority than our current task.
1642                  * This may look racy, but if this value is about to go
1643                  * logically higher, the src_rq will push this task away.
1644                  * And if its going logically lower, we do not care
1645                  */
1646                 if (src_rq->rt.highest_prio.next >=
1647                     this_rq->rt.highest_prio.curr)
1648                         continue;
1649
1650                 /*
1651                  * We can potentially drop this_rq's lock in
1652                  * double_lock_balance, and another CPU could
1653                  * alter this_rq
1654                  */
1655                 double_lock_balance(this_rq, src_rq);
1656
1657                 /*
1658                  * We can pull only a task, which is pushable
1659                  * on its rq, and no others.
1660                  */
1661                 p = pick_highest_pushable_task(src_rq, this_cpu);
1662
1663                 /*
1664                  * Do we have an RT task that preempts
1665                  * the to-be-scheduled task?
1666                  */
1667                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1668                         WARN_ON(p == src_rq->curr);
1669                         WARN_ON(!p->on_rq);
1670
1671                         /*
1672                          * There's a chance that p is higher in priority
1673                          * than what's currently running on its cpu.
1674                          * This is just that p is wakeing up and hasn't
1675                          * had a chance to schedule. We only pull
1676                          * p if it is lower in priority than the
1677                          * current task on the run queue
1678                          */
1679                         if (p->prio < src_rq->curr->prio)
1680                                 goto skip;
1681
1682                         ret = 1;
1683
1684                         deactivate_task(src_rq, p, 0);
1685                         set_task_cpu(p, this_cpu);
1686                         activate_task(this_rq, p, 0);
1687                         /*
1688                          * We continue with the search, just in
1689                          * case there's an even higher prio task
1690                          * in another runqueue. (low likelihood
1691                          * but possible)
1692                          */
1693                 }
1694 skip:
1695                 double_unlock_balance(this_rq, src_rq);
1696         }
1697
1698         return ret;
1699 }
1700
1701 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1702 {
1703         /* Try to pull RT tasks here if we lower this rq's prio */
1704         if (rq->rt.highest_prio.curr > prev->prio)
1705                 pull_rt_task(rq);
1706 }
1707
1708 static void post_schedule_rt(struct rq *rq)
1709 {
1710         push_rt_tasks(rq);
1711 }
1712
1713 /*
1714  * If we are not running and we are not going to reschedule soon, we should
1715  * try to push tasks away now
1716  */
1717 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1718 {
1719         if (!task_running(rq, p) &&
1720             !test_tsk_need_resched(rq->curr) &&
1721             has_pushable_tasks(rq) &&
1722             p->nr_cpus_allowed > 1 &&
1723             rt_task(rq->curr) &&
1724             (rq->curr->nr_cpus_allowed < 2 ||
1725              rq->curr->prio <= p->prio))
1726                 push_rt_tasks(rq);
1727 }
1728
1729 static void set_cpus_allowed_rt(struct task_struct *p,
1730                                 const struct cpumask *new_mask)
1731 {
1732         struct rq *rq;
1733         int weight;
1734
1735         BUG_ON(!rt_task(p));
1736
1737         if (!p->on_rq)
1738                 return;
1739
1740         weight = cpumask_weight(new_mask);
1741
1742         /*
1743          * Only update if the process changes its state from whether it
1744          * can migrate or not.
1745          */
1746         if ((p->nr_cpus_allowed > 1) == (weight > 1))
1747                 return;
1748
1749         rq = task_rq(p);
1750
1751         /*
1752          * The process used to be able to migrate OR it can now migrate
1753          */
1754         if (weight <= 1) {
1755                 if (!task_current(rq, p))
1756                         dequeue_pushable_task(rq, p);
1757                 BUG_ON(!rq->rt.rt_nr_migratory);
1758                 rq->rt.rt_nr_migratory--;
1759         } else {
1760                 if (!task_current(rq, p))
1761                         enqueue_pushable_task(rq, p);
1762                 rq->rt.rt_nr_migratory++;
1763         }
1764
1765         update_rt_migration(&rq->rt);
1766 }
1767
1768 /* Assumes rq->lock is held */
1769 static void rq_online_rt(struct rq *rq)
1770 {
1771         if (rq->rt.overloaded)
1772                 rt_set_overload(rq);
1773
1774         __enable_runtime(rq);
1775
1776         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1777 }
1778
1779 /* Assumes rq->lock is held */
1780 static void rq_offline_rt(struct rq *rq)
1781 {
1782         if (rq->rt.overloaded)
1783                 rt_clear_overload(rq);
1784
1785         __disable_runtime(rq);
1786
1787         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1788 }
1789
1790 /*
1791  * When switch from the rt queue, we bring ourselves to a position
1792  * that we might want to pull RT tasks from other runqueues.
1793  */
1794 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1795 {
1796         /*
1797          * If there are other RT tasks then we will reschedule
1798          * and the scheduling of the other RT tasks will handle
1799          * the balancing. But if we are the last RT task
1800          * we may need to handle the pulling of RT tasks
1801          * now.
1802          */
1803         if (!p->on_rq || rq->rt.rt_nr_running)
1804                 return;
1805
1806         if (pull_rt_task(rq))
1807                 resched_task(rq->curr);
1808 }
1809
1810 void init_sched_rt_class(void)
1811 {
1812         unsigned int i;
1813
1814         for_each_possible_cpu(i) {
1815                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1816                                         GFP_KERNEL, cpu_to_node(i));
1817         }
1818 }
1819 #endif /* CONFIG_SMP */
1820
1821 /*
1822  * When switching a task to RT, we may overload the runqueue
1823  * with RT tasks. In this case we try to push them off to
1824  * other runqueues.
1825  */
1826 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1827 {
1828         int check_resched = 1;
1829
1830         /*
1831          * If we are already running, then there's nothing
1832          * that needs to be done. But if we are not running
1833          * we may need to preempt the current running task.
1834          * If that current running task is also an RT task
1835          * then see if we can move to another run queue.
1836          */
1837         if (p->on_rq && rq->curr != p) {
1838 #ifdef CONFIG_SMP
1839                 if (rq->rt.overloaded && push_rt_task(rq) &&
1840                     /* Don't resched if we changed runqueues */
1841                     rq != task_rq(p))
1842                         check_resched = 0;
1843 #endif /* CONFIG_SMP */
1844                 if (check_resched && p->prio < rq->curr->prio)
1845                         resched_task(rq->curr);
1846         }
1847 }
1848
1849 /*
1850  * Priority of the task has changed. This may cause
1851  * us to initiate a push or pull.
1852  */
1853 static void
1854 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1855 {
1856         if (!p->on_rq)
1857                 return;
1858
1859         if (rq->curr == p) {
1860 #ifdef CONFIG_SMP
1861                 /*
1862                  * If our priority decreases while running, we
1863                  * may need to pull tasks to this runqueue.
1864                  */
1865                 if (oldprio < p->prio)
1866                         pull_rt_task(rq);
1867                 /*
1868                  * If there's a higher priority task waiting to run
1869                  * then reschedule. Note, the above pull_rt_task
1870                  * can release the rq lock and p could migrate.
1871                  * Only reschedule if p is still on the same runqueue.
1872                  */
1873                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1874                         resched_task(p);
1875 #else
1876                 /* For UP simply resched on drop of prio */
1877                 if (oldprio < p->prio)
1878                         resched_task(p);
1879 #endif /* CONFIG_SMP */
1880         } else {
1881                 /*
1882                  * This task is not running, but if it is
1883                  * greater than the current running task
1884                  * then reschedule.
1885                  */
1886                 if (p->prio < rq->curr->prio)
1887                         resched_task(rq->curr);
1888         }
1889 }
1890
1891 static void watchdog(struct rq *rq, struct task_struct *p)
1892 {
1893         unsigned long soft, hard;
1894
1895         /* max may change after cur was read, this will be fixed next tick */
1896         soft = task_rlimit(p, RLIMIT_RTTIME);
1897         hard = task_rlimit_max(p, RLIMIT_RTTIME);
1898
1899         if (soft != RLIM_INFINITY) {
1900                 unsigned long next;
1901
1902                 if (p->rt.watchdog_stamp != jiffies) {
1903                         p->rt.timeout++;
1904                         p->rt.watchdog_stamp = jiffies;
1905                 }
1906
1907                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1908                 if (p->rt.timeout > next)
1909                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1910         }
1911 }
1912
1913 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1914 {
1915         struct sched_rt_entity *rt_se = &p->rt;
1916
1917         update_curr_rt(rq);
1918
1919         watchdog(rq, p);
1920
1921         /*
1922          * RR tasks need a special form of timeslice management.
1923          * FIFO tasks have no timeslices.
1924          */
1925         if (p->policy != SCHED_RR)
1926                 return;
1927
1928         if (--p->rt.time_slice)
1929                 return;
1930
1931         p->rt.time_slice = sched_rr_timeslice;
1932
1933         /*
1934          * Requeue to the end of queue if we (and all of our ancestors) are the
1935          * only element on the queue
1936          */
1937         for_each_sched_rt_entity(rt_se) {
1938                 if (rt_se->run_list.prev != rt_se->run_list.next) {
1939                         requeue_task_rt(rq, p, 0);
1940                         set_tsk_need_resched(p);
1941                         return;
1942                 }
1943         }
1944 }
1945
1946 static void set_curr_task_rt(struct rq *rq)
1947 {
1948         struct task_struct *p = rq->curr;
1949
1950         p->se.exec_start = rq_clock_task(rq);
1951
1952         /* The running task is never eligible for pushing */
1953         dequeue_pushable_task(rq, p);
1954 }
1955
1956 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1957 {
1958         /*
1959          * Time slice is 0 for SCHED_FIFO tasks
1960          */
1961         if (task->policy == SCHED_RR)
1962                 return sched_rr_timeslice;
1963         else
1964                 return 0;
1965 }
1966
1967 const struct sched_class rt_sched_class = {
1968         .next                   = &fair_sched_class,
1969         .enqueue_task           = enqueue_task_rt,
1970         .dequeue_task           = dequeue_task_rt,
1971         .yield_task             = yield_task_rt,
1972
1973         .check_preempt_curr     = check_preempt_curr_rt,
1974
1975         .pick_next_task         = pick_next_task_rt,
1976         .put_prev_task          = put_prev_task_rt,
1977
1978 #ifdef CONFIG_SMP
1979         .select_task_rq         = select_task_rq_rt,
1980
1981         .set_cpus_allowed       = set_cpus_allowed_rt,
1982         .rq_online              = rq_online_rt,
1983         .rq_offline             = rq_offline_rt,
1984         .pre_schedule           = pre_schedule_rt,
1985         .post_schedule          = post_schedule_rt,
1986         .task_woken             = task_woken_rt,
1987         .switched_from          = switched_from_rt,
1988 #endif
1989
1990         .set_curr_task          = set_curr_task_rt,
1991         .task_tick              = task_tick_rt,
1992
1993         .get_rr_interval        = get_rr_interval_rt,
1994
1995         .prio_changed           = prio_changed_rt,
1996         .switched_to            = switched_to_rt,
1997 };
1998
1999 #ifdef CONFIG_SCHED_DEBUG
2000 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2001
2002 void print_rt_stats(struct seq_file *m, int cpu)
2003 {
2004         rt_rq_iter_t iter;
2005         struct rt_rq *rt_rq;
2006
2007         rcu_read_lock();
2008         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2009                 print_rt_rq(m, cpu, rt_rq);
2010         rcu_read_unlock();
2011 }
2012 #endif /* CONFIG_SCHED_DEBUG */