Merge tag 'v3.14' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #include "sched.h"
7
8 #include <linux/slab.h>
9
10 int sched_rr_timeslice = RR_TIMESLICE;
11
12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14 struct rt_bandwidth def_rt_bandwidth;
15
16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17 {
18         struct rt_bandwidth *rt_b =
19                 container_of(timer, struct rt_bandwidth, rt_period_timer);
20         ktime_t now;
21         int overrun;
22         int idle = 0;
23
24         for (;;) {
25                 now = hrtimer_cb_get_time(timer);
26                 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28                 if (!overrun)
29                         break;
30
31                 idle = do_sched_rt_period_timer(rt_b, overrun);
32         }
33
34         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35 }
36
37 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38 {
39         rt_b->rt_period = ns_to_ktime(period);
40         rt_b->rt_runtime = runtime;
41
42         raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44         hrtimer_init(&rt_b->rt_period_timer,
45                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46         rt_b->rt_period_timer.function = sched_rt_period_timer;
47 }
48
49 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50 {
51         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52                 return;
53
54         if (hrtimer_active(&rt_b->rt_period_timer))
55                 return;
56
57         raw_spin_lock(&rt_b->rt_runtime_lock);
58         start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59         raw_spin_unlock(&rt_b->rt_runtime_lock);
60 }
61
62 void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63 {
64         struct rt_prio_array *array;
65         int i;
66
67         array = &rt_rq->active;
68         for (i = 0; i < MAX_RT_PRIO; i++) {
69                 INIT_LIST_HEAD(array->queue + i);
70                 __clear_bit(i, array->bitmap);
71         }
72         /* delimiter for bitsearch: */
73         __set_bit(MAX_RT_PRIO, array->bitmap);
74
75 #if defined CONFIG_SMP
76         rt_rq->highest_prio.curr = MAX_RT_PRIO;
77         rt_rq->highest_prio.next = MAX_RT_PRIO;
78         rt_rq->rt_nr_migratory = 0;
79         rt_rq->overloaded = 0;
80         plist_head_init(&rt_rq->pushable_tasks);
81 #endif
82
83         rt_rq->rt_time = 0;
84         rt_rq->rt_throttled = 0;
85         rt_rq->rt_runtime = 0;
86         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87 }
88
89 #ifdef CONFIG_RT_GROUP_SCHED
90 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91 {
92         hrtimer_cancel(&rt_b->rt_period_timer);
93 }
94
95 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
97 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98 {
99 #ifdef CONFIG_SCHED_DEBUG
100         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101 #endif
102         return container_of(rt_se, struct task_struct, rt);
103 }
104
105 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106 {
107         return rt_rq->rq;
108 }
109
110 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111 {
112         return rt_se->rt_rq;
113 }
114
115 void free_rt_sched_group(struct task_group *tg)
116 {
117         int i;
118
119         if (tg->rt_se)
120                 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122         for_each_possible_cpu(i) {
123                 if (tg->rt_rq)
124                         kfree(tg->rt_rq[i]);
125                 if (tg->rt_se)
126                         kfree(tg->rt_se[i]);
127         }
128
129         kfree(tg->rt_rq);
130         kfree(tg->rt_se);
131 }
132
133 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134                 struct sched_rt_entity *rt_se, int cpu,
135                 struct sched_rt_entity *parent)
136 {
137         struct rq *rq = cpu_rq(cpu);
138
139         rt_rq->highest_prio.curr = MAX_RT_PRIO;
140         rt_rq->rt_nr_boosted = 0;
141         rt_rq->rq = rq;
142         rt_rq->tg = tg;
143
144         tg->rt_rq[cpu] = rt_rq;
145         tg->rt_se[cpu] = rt_se;
146
147         if (!rt_se)
148                 return;
149
150         if (!parent)
151                 rt_se->rt_rq = &rq->rt;
152         else
153                 rt_se->rt_rq = parent->my_q;
154
155         rt_se->my_q = rt_rq;
156         rt_se->parent = parent;
157         INIT_LIST_HEAD(&rt_se->run_list);
158 }
159
160 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161 {
162         struct rt_rq *rt_rq;
163         struct sched_rt_entity *rt_se;
164         int i;
165
166         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167         if (!tg->rt_rq)
168                 goto err;
169         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170         if (!tg->rt_se)
171                 goto err;
172
173         init_rt_bandwidth(&tg->rt_bandwidth,
174                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176         for_each_possible_cpu(i) {
177                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178                                      GFP_KERNEL, cpu_to_node(i));
179                 if (!rt_rq)
180                         goto err;
181
182                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183                                      GFP_KERNEL, cpu_to_node(i));
184                 if (!rt_se)
185                         goto err_free_rq;
186
187                 init_rt_rq(rt_rq, cpu_rq(i));
188                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190         }
191
192         return 1;
193
194 err_free_rq:
195         kfree(rt_rq);
196 err:
197         return 0;
198 }
199
200 #else /* CONFIG_RT_GROUP_SCHED */
201
202 #define rt_entity_is_task(rt_se) (1)
203
204 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205 {
206         return container_of(rt_se, struct task_struct, rt);
207 }
208
209 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210 {
211         return container_of(rt_rq, struct rq, rt);
212 }
213
214 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215 {
216         struct task_struct *p = rt_task_of(rt_se);
217         struct rq *rq = task_rq(p);
218
219         return &rq->rt;
220 }
221
222 void free_rt_sched_group(struct task_group *tg) { }
223
224 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225 {
226         return 1;
227 }
228 #endif /* CONFIG_RT_GROUP_SCHED */
229
230 #ifdef CONFIG_SMP
231
232 static inline int rt_overloaded(struct rq *rq)
233 {
234         return atomic_read(&rq->rd->rto_count);
235 }
236
237 static inline void rt_set_overload(struct rq *rq)
238 {
239         if (!rq->online)
240                 return;
241
242         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
243         /*
244          * Make sure the mask is visible before we set
245          * the overload count. That is checked to determine
246          * if we should look at the mask. It would be a shame
247          * if we looked at the mask, but the mask was not
248          * updated yet.
249          *
250          * Matched by the barrier in pull_rt_task().
251          */
252         smp_wmb();
253         atomic_inc(&rq->rd->rto_count);
254 }
255
256 static inline void rt_clear_overload(struct rq *rq)
257 {
258         if (!rq->online)
259                 return;
260
261         /* the order here really doesn't matter */
262         atomic_dec(&rq->rd->rto_count);
263         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
264 }
265
266 static void update_rt_migration(struct rt_rq *rt_rq)
267 {
268         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
269                 if (!rt_rq->overloaded) {
270                         rt_set_overload(rq_of_rt_rq(rt_rq));
271                         rt_rq->overloaded = 1;
272                 }
273         } else if (rt_rq->overloaded) {
274                 rt_clear_overload(rq_of_rt_rq(rt_rq));
275                 rt_rq->overloaded = 0;
276         }
277 }
278
279 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
280 {
281         struct task_struct *p;
282
283         if (!rt_entity_is_task(rt_se))
284                 return;
285
286         p = rt_task_of(rt_se);
287         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
288
289         rt_rq->rt_nr_total++;
290         if (p->nr_cpus_allowed > 1)
291                 rt_rq->rt_nr_migratory++;
292
293         update_rt_migration(rt_rq);
294 }
295
296 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
297 {
298         struct task_struct *p;
299
300         if (!rt_entity_is_task(rt_se))
301                 return;
302
303         p = rt_task_of(rt_se);
304         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
305
306         rt_rq->rt_nr_total--;
307         if (p->nr_cpus_allowed > 1)
308                 rt_rq->rt_nr_migratory--;
309
310         update_rt_migration(rt_rq);
311 }
312
313 static inline int has_pushable_tasks(struct rq *rq)
314 {
315         return !plist_head_empty(&rq->rt.pushable_tasks);
316 }
317
318 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
319 {
320         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
321         plist_node_init(&p->pushable_tasks, p->prio);
322         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
323
324         /* Update the highest prio pushable task */
325         if (p->prio < rq->rt.highest_prio.next)
326                 rq->rt.highest_prio.next = p->prio;
327 }
328
329 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
330 {
331         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
332
333         /* Update the new highest prio pushable task */
334         if (has_pushable_tasks(rq)) {
335                 p = plist_first_entry(&rq->rt.pushable_tasks,
336                                       struct task_struct, pushable_tasks);
337                 rq->rt.highest_prio.next = p->prio;
338         } else
339                 rq->rt.highest_prio.next = MAX_RT_PRIO;
340 }
341
342 #else
343
344 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
345 {
346 }
347
348 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
349 {
350 }
351
352 static inline
353 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
354 {
355 }
356
357 static inline
358 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
359 {
360 }
361
362 #endif /* CONFIG_SMP */
363
364 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
365 {
366         return !list_empty(&rt_se->run_list);
367 }
368
369 #ifdef CONFIG_RT_GROUP_SCHED
370
371 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
372 {
373         if (!rt_rq->tg)
374                 return RUNTIME_INF;
375
376         return rt_rq->rt_runtime;
377 }
378
379 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
380 {
381         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
382 }
383
384 typedef struct task_group *rt_rq_iter_t;
385
386 static inline struct task_group *next_task_group(struct task_group *tg)
387 {
388         do {
389                 tg = list_entry_rcu(tg->list.next,
390                         typeof(struct task_group), list);
391         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
392
393         if (&tg->list == &task_groups)
394                 tg = NULL;
395
396         return tg;
397 }
398
399 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
400         for (iter = container_of(&task_groups, typeof(*iter), list);    \
401                 (iter = next_task_group(iter)) &&                       \
402                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
403
404 #define for_each_sched_rt_entity(rt_se) \
405         for (; rt_se; rt_se = rt_se->parent)
406
407 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
408 {
409         return rt_se->my_q;
410 }
411
412 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
413 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
414
415 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
416 {
417         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
418         struct sched_rt_entity *rt_se;
419
420         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
421
422         rt_se = rt_rq->tg->rt_se[cpu];
423
424         if (rt_rq->rt_nr_running) {
425                 if (rt_se && !on_rt_rq(rt_se))
426                         enqueue_rt_entity(rt_se, false);
427                 if (rt_rq->highest_prio.curr < curr->prio)
428                         resched_task(curr);
429         }
430 }
431
432 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
433 {
434         struct sched_rt_entity *rt_se;
435         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
436
437         rt_se = rt_rq->tg->rt_se[cpu];
438
439         if (rt_se && on_rt_rq(rt_se))
440                 dequeue_rt_entity(rt_se);
441 }
442
443 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
444 {
445         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
446 }
447
448 static int rt_se_boosted(struct sched_rt_entity *rt_se)
449 {
450         struct rt_rq *rt_rq = group_rt_rq(rt_se);
451         struct task_struct *p;
452
453         if (rt_rq)
454                 return !!rt_rq->rt_nr_boosted;
455
456         p = rt_task_of(rt_se);
457         return p->prio != p->normal_prio;
458 }
459
460 #ifdef CONFIG_SMP
461 static inline const struct cpumask *sched_rt_period_mask(void)
462 {
463         return this_rq()->rd->span;
464 }
465 #else
466 static inline const struct cpumask *sched_rt_period_mask(void)
467 {
468         return cpu_online_mask;
469 }
470 #endif
471
472 static inline
473 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
474 {
475         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
476 }
477
478 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
479 {
480         return &rt_rq->tg->rt_bandwidth;
481 }
482
483 #else /* !CONFIG_RT_GROUP_SCHED */
484
485 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
486 {
487         return rt_rq->rt_runtime;
488 }
489
490 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
491 {
492         return ktime_to_ns(def_rt_bandwidth.rt_period);
493 }
494
495 typedef struct rt_rq *rt_rq_iter_t;
496
497 #define for_each_rt_rq(rt_rq, iter, rq) \
498         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
499
500 #define for_each_sched_rt_entity(rt_se) \
501         for (; rt_se; rt_se = NULL)
502
503 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
504 {
505         return NULL;
506 }
507
508 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
509 {
510         if (rt_rq->rt_nr_running)
511                 resched_task(rq_of_rt_rq(rt_rq)->curr);
512 }
513
514 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
515 {
516 }
517
518 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
519 {
520         return rt_rq->rt_throttled;
521 }
522
523 static inline const struct cpumask *sched_rt_period_mask(void)
524 {
525         return cpu_online_mask;
526 }
527
528 static inline
529 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
530 {
531         return &cpu_rq(cpu)->rt;
532 }
533
534 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
535 {
536         return &def_rt_bandwidth;
537 }
538
539 #endif /* CONFIG_RT_GROUP_SCHED */
540
541 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
542 {
543         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
544
545         return (hrtimer_active(&rt_b->rt_period_timer) ||
546                 rt_rq->rt_time < rt_b->rt_runtime);
547 }
548
549 #ifdef CONFIG_SMP
550 /*
551  * We ran out of runtime, see if we can borrow some from our neighbours.
552  */
553 static int do_balance_runtime(struct rt_rq *rt_rq)
554 {
555         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
556         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
557         int i, weight, more = 0;
558         u64 rt_period;
559
560         weight = cpumask_weight(rd->span);
561
562         raw_spin_lock(&rt_b->rt_runtime_lock);
563         rt_period = ktime_to_ns(rt_b->rt_period);
564         for_each_cpu(i, rd->span) {
565                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
566                 s64 diff;
567
568                 if (iter == rt_rq)
569                         continue;
570
571                 raw_spin_lock(&iter->rt_runtime_lock);
572                 /*
573                  * Either all rqs have inf runtime and there's nothing to steal
574                  * or __disable_runtime() below sets a specific rq to inf to
575                  * indicate its been disabled and disalow stealing.
576                  */
577                 if (iter->rt_runtime == RUNTIME_INF)
578                         goto next;
579
580                 /*
581                  * From runqueues with spare time, take 1/n part of their
582                  * spare time, but no more than our period.
583                  */
584                 diff = iter->rt_runtime - iter->rt_time;
585                 if (diff > 0) {
586                         diff = div_u64((u64)diff, weight);
587                         if (rt_rq->rt_runtime + diff > rt_period)
588                                 diff = rt_period - rt_rq->rt_runtime;
589                         iter->rt_runtime -= diff;
590                         rt_rq->rt_runtime += diff;
591                         more = 1;
592                         if (rt_rq->rt_runtime == rt_period) {
593                                 raw_spin_unlock(&iter->rt_runtime_lock);
594                                 break;
595                         }
596                 }
597 next:
598                 raw_spin_unlock(&iter->rt_runtime_lock);
599         }
600         raw_spin_unlock(&rt_b->rt_runtime_lock);
601
602         return more;
603 }
604
605 /*
606  * Ensure this RQ takes back all the runtime it lend to its neighbours.
607  */
608 static void __disable_runtime(struct rq *rq)
609 {
610         struct root_domain *rd = rq->rd;
611         rt_rq_iter_t iter;
612         struct rt_rq *rt_rq;
613
614         if (unlikely(!scheduler_running))
615                 return;
616
617         for_each_rt_rq(rt_rq, iter, rq) {
618                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
619                 s64 want;
620                 int i;
621
622                 raw_spin_lock(&rt_b->rt_runtime_lock);
623                 raw_spin_lock(&rt_rq->rt_runtime_lock);
624                 /*
625                  * Either we're all inf and nobody needs to borrow, or we're
626                  * already disabled and thus have nothing to do, or we have
627                  * exactly the right amount of runtime to take out.
628                  */
629                 if (rt_rq->rt_runtime == RUNTIME_INF ||
630                                 rt_rq->rt_runtime == rt_b->rt_runtime)
631                         goto balanced;
632                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
633
634                 /*
635                  * Calculate the difference between what we started out with
636                  * and what we current have, that's the amount of runtime
637                  * we lend and now have to reclaim.
638                  */
639                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
640
641                 /*
642                  * Greedy reclaim, take back as much as we can.
643                  */
644                 for_each_cpu(i, rd->span) {
645                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
646                         s64 diff;
647
648                         /*
649                          * Can't reclaim from ourselves or disabled runqueues.
650                          */
651                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
652                                 continue;
653
654                         raw_spin_lock(&iter->rt_runtime_lock);
655                         if (want > 0) {
656                                 diff = min_t(s64, iter->rt_runtime, want);
657                                 iter->rt_runtime -= diff;
658                                 want -= diff;
659                         } else {
660                                 iter->rt_runtime -= want;
661                                 want -= want;
662                         }
663                         raw_spin_unlock(&iter->rt_runtime_lock);
664
665                         if (!want)
666                                 break;
667                 }
668
669                 raw_spin_lock(&rt_rq->rt_runtime_lock);
670                 /*
671                  * We cannot be left wanting - that would mean some runtime
672                  * leaked out of the system.
673                  */
674                 BUG_ON(want);
675 balanced:
676                 /*
677                  * Disable all the borrow logic by pretending we have inf
678                  * runtime - in which case borrowing doesn't make sense.
679                  */
680                 rt_rq->rt_runtime = RUNTIME_INF;
681                 rt_rq->rt_throttled = 0;
682                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
683                 raw_spin_unlock(&rt_b->rt_runtime_lock);
684         }
685 }
686
687 static void __enable_runtime(struct rq *rq)
688 {
689         rt_rq_iter_t iter;
690         struct rt_rq *rt_rq;
691
692         if (unlikely(!scheduler_running))
693                 return;
694
695         /*
696          * Reset each runqueue's bandwidth settings
697          */
698         for_each_rt_rq(rt_rq, iter, rq) {
699                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
700
701                 raw_spin_lock(&rt_b->rt_runtime_lock);
702                 raw_spin_lock(&rt_rq->rt_runtime_lock);
703                 rt_rq->rt_runtime = rt_b->rt_runtime;
704                 rt_rq->rt_time = 0;
705                 rt_rq->rt_throttled = 0;
706                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
707                 raw_spin_unlock(&rt_b->rt_runtime_lock);
708         }
709 }
710
711 static int balance_runtime(struct rt_rq *rt_rq)
712 {
713         int more = 0;
714
715         if (!sched_feat(RT_RUNTIME_SHARE))
716                 return more;
717
718         if (rt_rq->rt_time > rt_rq->rt_runtime) {
719                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
720                 more = do_balance_runtime(rt_rq);
721                 raw_spin_lock(&rt_rq->rt_runtime_lock);
722         }
723
724         return more;
725 }
726 #else /* !CONFIG_SMP */
727 static inline int balance_runtime(struct rt_rq *rt_rq)
728 {
729         return 0;
730 }
731 #endif /* CONFIG_SMP */
732
733 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
734 {
735         int i, idle = 1, throttled = 0;
736         const struct cpumask *span;
737
738         span = sched_rt_period_mask();
739 #ifdef CONFIG_RT_GROUP_SCHED
740         /*
741          * FIXME: isolated CPUs should really leave the root task group,
742          * whether they are isolcpus or were isolated via cpusets, lest
743          * the timer run on a CPU which does not service all runqueues,
744          * potentially leaving other CPUs indefinitely throttled.  If
745          * isolation is really required, the user will turn the throttle
746          * off to kill the perturbations it causes anyway.  Meanwhile,
747          * this maintains functionality for boot and/or troubleshooting.
748          */
749         if (rt_b == &root_task_group.rt_bandwidth)
750                 span = cpu_online_mask;
751 #endif
752         for_each_cpu(i, span) {
753                 int enqueue = 0;
754                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
755                 struct rq *rq = rq_of_rt_rq(rt_rq);
756
757                 raw_spin_lock(&rq->lock);
758                 if (rt_rq->rt_time) {
759                         u64 runtime;
760
761                         raw_spin_lock(&rt_rq->rt_runtime_lock);
762                         if (rt_rq->rt_throttled)
763                                 balance_runtime(rt_rq);
764                         runtime = rt_rq->rt_runtime;
765                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
766                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
767                                 rt_rq->rt_throttled = 0;
768                                 enqueue = 1;
769
770                                 /*
771                                  * Force a clock update if the CPU was idle,
772                                  * lest wakeup -> unthrottle time accumulate.
773                                  */
774                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
775                                         rq->skip_clock_update = -1;
776                         }
777                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
778                                 idle = 0;
779                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
780                 } else if (rt_rq->rt_nr_running) {
781                         idle = 0;
782                         if (!rt_rq_throttled(rt_rq))
783                                 enqueue = 1;
784                 }
785                 if (rt_rq->rt_throttled)
786                         throttled = 1;
787
788                 if (enqueue)
789                         sched_rt_rq_enqueue(rt_rq);
790                 raw_spin_unlock(&rq->lock);
791         }
792
793         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
794                 return 1;
795
796         return idle;
797 }
798
799 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
800 {
801 #ifdef CONFIG_RT_GROUP_SCHED
802         struct rt_rq *rt_rq = group_rt_rq(rt_se);
803
804         if (rt_rq)
805                 return rt_rq->highest_prio.curr;
806 #endif
807
808         return rt_task_of(rt_se)->prio;
809 }
810
811 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
812 {
813         u64 runtime = sched_rt_runtime(rt_rq);
814
815         if (rt_rq->rt_throttled)
816                 return rt_rq_throttled(rt_rq);
817
818         if (runtime >= sched_rt_period(rt_rq))
819                 return 0;
820
821         balance_runtime(rt_rq);
822         runtime = sched_rt_runtime(rt_rq);
823         if (runtime == RUNTIME_INF)
824                 return 0;
825
826         if (rt_rq->rt_time > runtime) {
827                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
828
829                 /*
830                  * Don't actually throttle groups that have no runtime assigned
831                  * but accrue some time due to boosting.
832                  */
833                 if (likely(rt_b->rt_runtime)) {
834                         static bool once = false;
835
836                         rt_rq->rt_throttled = 1;
837
838                         if (!once) {
839                                 once = true;
840                                 printk_sched("sched: RT throttling activated\n");
841                         }
842                 } else {
843                         /*
844                          * In case we did anyway, make it go away,
845                          * replenishment is a joke, since it will replenish us
846                          * with exactly 0 ns.
847                          */
848                         rt_rq->rt_time = 0;
849                 }
850
851                 if (rt_rq_throttled(rt_rq)) {
852                         sched_rt_rq_dequeue(rt_rq);
853                         return 1;
854                 }
855         }
856
857         return 0;
858 }
859
860 /*
861  * Update the current task's runtime statistics. Skip current tasks that
862  * are not in our scheduling class.
863  */
864 static void update_curr_rt(struct rq *rq)
865 {
866         struct task_struct *curr = rq->curr;
867         struct sched_rt_entity *rt_se = &curr->rt;
868         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
869         u64 delta_exec;
870
871         if (curr->sched_class != &rt_sched_class)
872                 return;
873
874         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
875         if (unlikely((s64)delta_exec <= 0))
876                 return;
877
878         schedstat_set(curr->se.statistics.exec_max,
879                       max(curr->se.statistics.exec_max, delta_exec));
880
881         curr->se.sum_exec_runtime += delta_exec;
882         account_group_exec_runtime(curr, delta_exec);
883
884         curr->se.exec_start = rq_clock_task(rq);
885         cpuacct_charge(curr, delta_exec);
886
887         sched_rt_avg_update(rq, delta_exec);
888
889         if (!rt_bandwidth_enabled())
890                 return;
891
892         for_each_sched_rt_entity(rt_se) {
893                 rt_rq = rt_rq_of_se(rt_se);
894
895                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
896                         raw_spin_lock(&rt_rq->rt_runtime_lock);
897                         rt_rq->rt_time += delta_exec;
898                         if (sched_rt_runtime_exceeded(rt_rq))
899                                 resched_task(curr);
900                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
901                 }
902         }
903 }
904
905 #if defined CONFIG_SMP
906
907 static void
908 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
909 {
910         struct rq *rq = rq_of_rt_rq(rt_rq);
911
912 #ifdef CONFIG_RT_GROUP_SCHED
913         /*
914          * Change rq's cpupri only if rt_rq is the top queue.
915          */
916         if (&rq->rt != rt_rq)
917                 return;
918 #endif
919         if (rq->online && prio < prev_prio)
920                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
921 }
922
923 static void
924 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
925 {
926         struct rq *rq = rq_of_rt_rq(rt_rq);
927
928 #ifdef CONFIG_RT_GROUP_SCHED
929         /*
930          * Change rq's cpupri only if rt_rq is the top queue.
931          */
932         if (&rq->rt != rt_rq)
933                 return;
934 #endif
935         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
936                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
937 }
938
939 #else /* CONFIG_SMP */
940
941 static inline
942 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
943 static inline
944 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
945
946 #endif /* CONFIG_SMP */
947
948 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
949 static void
950 inc_rt_prio(struct rt_rq *rt_rq, int prio)
951 {
952         int prev_prio = rt_rq->highest_prio.curr;
953
954         if (prio < prev_prio)
955                 rt_rq->highest_prio.curr = prio;
956
957         inc_rt_prio_smp(rt_rq, prio, prev_prio);
958 }
959
960 static void
961 dec_rt_prio(struct rt_rq *rt_rq, int prio)
962 {
963         int prev_prio = rt_rq->highest_prio.curr;
964
965         if (rt_rq->rt_nr_running) {
966
967                 WARN_ON(prio < prev_prio);
968
969                 /*
970                  * This may have been our highest task, and therefore
971                  * we may have some recomputation to do
972                  */
973                 if (prio == prev_prio) {
974                         struct rt_prio_array *array = &rt_rq->active;
975
976                         rt_rq->highest_prio.curr =
977                                 sched_find_first_bit(array->bitmap);
978                 }
979
980         } else
981                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
982
983         dec_rt_prio_smp(rt_rq, prio, prev_prio);
984 }
985
986 #else
987
988 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
989 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
990
991 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
992
993 #ifdef CONFIG_RT_GROUP_SCHED
994
995 static void
996 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
997 {
998         if (rt_se_boosted(rt_se))
999                 rt_rq->rt_nr_boosted++;
1000
1001         if (rt_rq->tg)
1002                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1003 }
1004
1005 static void
1006 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1007 {
1008         if (rt_se_boosted(rt_se))
1009                 rt_rq->rt_nr_boosted--;
1010
1011         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1012 }
1013
1014 #else /* CONFIG_RT_GROUP_SCHED */
1015
1016 static void
1017 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1018 {
1019         start_rt_bandwidth(&def_rt_bandwidth);
1020 }
1021
1022 static inline
1023 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1024
1025 #endif /* CONFIG_RT_GROUP_SCHED */
1026
1027 static inline
1028 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1029 {
1030         int prio = rt_se_prio(rt_se);
1031
1032         WARN_ON(!rt_prio(prio));
1033         rt_rq->rt_nr_running++;
1034
1035         inc_rt_prio(rt_rq, prio);
1036         inc_rt_migration(rt_se, rt_rq);
1037         inc_rt_group(rt_se, rt_rq);
1038 }
1039
1040 static inline
1041 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1042 {
1043         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1044         WARN_ON(!rt_rq->rt_nr_running);
1045         rt_rq->rt_nr_running--;
1046
1047         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1048         dec_rt_migration(rt_se, rt_rq);
1049         dec_rt_group(rt_se, rt_rq);
1050 }
1051
1052 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1053 {
1054         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1055         struct rt_prio_array *array = &rt_rq->active;
1056         struct rt_rq *group_rq = group_rt_rq(rt_se);
1057         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1058
1059         /*
1060          * Don't enqueue the group if its throttled, or when empty.
1061          * The latter is a consequence of the former when a child group
1062          * get throttled and the current group doesn't have any other
1063          * active members.
1064          */
1065         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1066                 return;
1067
1068         if (head)
1069                 list_add(&rt_se->run_list, queue);
1070         else
1071                 list_add_tail(&rt_se->run_list, queue);
1072         __set_bit(rt_se_prio(rt_se), array->bitmap);
1073
1074         inc_rt_tasks(rt_se, rt_rq);
1075 }
1076
1077 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1078 {
1079         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1080         struct rt_prio_array *array = &rt_rq->active;
1081
1082         list_del_init(&rt_se->run_list);
1083         if (list_empty(array->queue + rt_se_prio(rt_se)))
1084                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1085
1086         dec_rt_tasks(rt_se, rt_rq);
1087 }
1088
1089 /*
1090  * Because the prio of an upper entry depends on the lower
1091  * entries, we must remove entries top - down.
1092  */
1093 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1094 {
1095         struct sched_rt_entity *back = NULL;
1096
1097         for_each_sched_rt_entity(rt_se) {
1098                 rt_se->back = back;
1099                 back = rt_se;
1100         }
1101
1102         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1103                 if (on_rt_rq(rt_se))
1104                         __dequeue_rt_entity(rt_se);
1105         }
1106 }
1107
1108 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1109 {
1110         dequeue_rt_stack(rt_se);
1111         for_each_sched_rt_entity(rt_se)
1112                 __enqueue_rt_entity(rt_se, head);
1113 }
1114
1115 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1116 {
1117         dequeue_rt_stack(rt_se);
1118
1119         for_each_sched_rt_entity(rt_se) {
1120                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1121
1122                 if (rt_rq && rt_rq->rt_nr_running)
1123                         __enqueue_rt_entity(rt_se, false);
1124         }
1125 }
1126
1127 /*
1128  * Adding/removing a task to/from a priority array:
1129  */
1130 static void
1131 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1132 {
1133         struct sched_rt_entity *rt_se = &p->rt;
1134
1135         if (flags & ENQUEUE_WAKEUP)
1136                 rt_se->timeout = 0;
1137
1138         enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1139
1140         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1141                 enqueue_pushable_task(rq, p);
1142
1143         inc_nr_running(rq);
1144 }
1145
1146 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1147 {
1148         struct sched_rt_entity *rt_se = &p->rt;
1149
1150         update_curr_rt(rq);
1151         dequeue_rt_entity(rt_se);
1152
1153         dequeue_pushable_task(rq, p);
1154
1155         dec_nr_running(rq);
1156 }
1157
1158 /*
1159  * Put task to the head or the end of the run list without the overhead of
1160  * dequeue followed by enqueue.
1161  */
1162 static void
1163 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1164 {
1165         if (on_rt_rq(rt_se)) {
1166                 struct rt_prio_array *array = &rt_rq->active;
1167                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1168
1169                 if (head)
1170                         list_move(&rt_se->run_list, queue);
1171                 else
1172                         list_move_tail(&rt_se->run_list, queue);
1173         }
1174 }
1175
1176 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1177 {
1178         struct sched_rt_entity *rt_se = &p->rt;
1179         struct rt_rq *rt_rq;
1180
1181         for_each_sched_rt_entity(rt_se) {
1182                 rt_rq = rt_rq_of_se(rt_se);
1183                 requeue_rt_entity(rt_rq, rt_se, head);
1184         }
1185 }
1186
1187 static void yield_task_rt(struct rq *rq)
1188 {
1189         requeue_task_rt(rq, rq->curr, 0);
1190 }
1191
1192 #ifdef CONFIG_SMP
1193 static int find_lowest_rq(struct task_struct *task);
1194
1195 static int
1196 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1197 {
1198         struct task_struct *curr;
1199         struct rq *rq;
1200
1201         if (p->nr_cpus_allowed == 1)
1202                 goto out;
1203
1204         /* For anything but wake ups, just return the task_cpu */
1205         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1206                 goto out;
1207
1208         rq = cpu_rq(cpu);
1209
1210         rcu_read_lock();
1211         curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1212
1213         /*
1214          * If the current task on @p's runqueue is an RT task, then
1215          * try to see if we can wake this RT task up on another
1216          * runqueue. Otherwise simply start this RT task
1217          * on its current runqueue.
1218          *
1219          * We want to avoid overloading runqueues. If the woken
1220          * task is a higher priority, then it will stay on this CPU
1221          * and the lower prio task should be moved to another CPU.
1222          * Even though this will probably make the lower prio task
1223          * lose its cache, we do not want to bounce a higher task
1224          * around just because it gave up its CPU, perhaps for a
1225          * lock?
1226          *
1227          * For equal prio tasks, we just let the scheduler sort it out.
1228          *
1229          * Otherwise, just let it ride on the affined RQ and the
1230          * post-schedule router will push the preempted task away
1231          *
1232          * This test is optimistic, if we get it wrong the load-balancer
1233          * will have to sort it out.
1234          */
1235         if (curr && unlikely(rt_task(curr)) &&
1236             (curr->nr_cpus_allowed < 2 ||
1237              curr->prio <= p->prio)) {
1238                 int target = find_lowest_rq(p);
1239
1240                 if (target != -1)
1241                         cpu = target;
1242         }
1243         rcu_read_unlock();
1244
1245 out:
1246         return cpu;
1247 }
1248
1249 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1250 {
1251         if (rq->curr->nr_cpus_allowed == 1)
1252                 return;
1253
1254         if (p->nr_cpus_allowed != 1
1255             && cpupri_find(&rq->rd->cpupri, p, NULL))
1256                 return;
1257
1258         if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1259                 return;
1260
1261         /*
1262          * There appears to be other cpus that can accept
1263          * current and none to run 'p', so lets reschedule
1264          * to try and push current away:
1265          */
1266         requeue_task_rt(rq, p, 1);
1267         resched_task(rq->curr);
1268 }
1269
1270 #endif /* CONFIG_SMP */
1271
1272 /*
1273  * Preempt the current task with a newly woken task if needed:
1274  */
1275 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1276 {
1277         if (p->prio < rq->curr->prio) {
1278                 resched_task(rq->curr);
1279                 return;
1280         }
1281
1282 #ifdef CONFIG_SMP
1283         /*
1284          * If:
1285          *
1286          * - the newly woken task is of equal priority to the current task
1287          * - the newly woken task is non-migratable while current is migratable
1288          * - current will be preempted on the next reschedule
1289          *
1290          * we should check to see if current can readily move to a different
1291          * cpu.  If so, we will reschedule to allow the push logic to try
1292          * to move current somewhere else, making room for our non-migratable
1293          * task.
1294          */
1295         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1296                 check_preempt_equal_prio(rq, p);
1297 #endif
1298 }
1299
1300 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1301                                                    struct rt_rq *rt_rq)
1302 {
1303         struct rt_prio_array *array = &rt_rq->active;
1304         struct sched_rt_entity *next = NULL;
1305         struct list_head *queue;
1306         int idx;
1307
1308         idx = sched_find_first_bit(array->bitmap);
1309         BUG_ON(idx >= MAX_RT_PRIO);
1310
1311         queue = array->queue + idx;
1312         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1313
1314         return next;
1315 }
1316
1317 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1318 {
1319         struct sched_rt_entity *rt_se;
1320         struct task_struct *p;
1321         struct rt_rq *rt_rq;
1322
1323         rt_rq = &rq->rt;
1324
1325         if (!rt_rq->rt_nr_running)
1326                 return NULL;
1327
1328         if (rt_rq_throttled(rt_rq))
1329                 return NULL;
1330
1331         do {
1332                 rt_se = pick_next_rt_entity(rq, rt_rq);
1333                 BUG_ON(!rt_se);
1334                 rt_rq = group_rt_rq(rt_se);
1335         } while (rt_rq);
1336
1337         p = rt_task_of(rt_se);
1338         p->se.exec_start = rq_clock_task(rq);
1339
1340         return p;
1341 }
1342
1343 static struct task_struct *pick_next_task_rt(struct rq *rq)
1344 {
1345         struct task_struct *p = _pick_next_task_rt(rq);
1346
1347         /* The running task is never eligible for pushing */
1348         if (p)
1349                 dequeue_pushable_task(rq, p);
1350
1351 #ifdef CONFIG_SMP
1352         /*
1353          * We detect this state here so that we can avoid taking the RQ
1354          * lock again later if there is no need to push
1355          */
1356         rq->post_schedule = has_pushable_tasks(rq);
1357 #endif
1358
1359         return p;
1360 }
1361
1362 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1363 {
1364         update_curr_rt(rq);
1365
1366         /*
1367          * The previous task needs to be made eligible for pushing
1368          * if it is still active
1369          */
1370         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1371                 enqueue_pushable_task(rq, p);
1372 }
1373
1374 #ifdef CONFIG_SMP
1375
1376 /* Only try algorithms three times */
1377 #define RT_MAX_TRIES 3
1378
1379 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1380 {
1381         if (!task_running(rq, p) &&
1382             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1383                 return 1;
1384         return 0;
1385 }
1386
1387 /*
1388  * Return the highest pushable rq's task, which is suitable to be executed
1389  * on the cpu, NULL otherwise
1390  */
1391 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1392 {
1393         struct plist_head *head = &rq->rt.pushable_tasks;
1394         struct task_struct *p;
1395
1396         if (!has_pushable_tasks(rq))
1397                 return NULL;
1398
1399         plist_for_each_entry(p, head, pushable_tasks) {
1400                 if (pick_rt_task(rq, p, cpu))
1401                         return p;
1402         }
1403
1404         return NULL;
1405 }
1406
1407 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1408
1409 static int find_lowest_rq(struct task_struct *task)
1410 {
1411         struct sched_domain *sd;
1412         struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1413         int this_cpu = smp_processor_id();
1414         int cpu      = task_cpu(task);
1415
1416         /* Make sure the mask is initialized first */
1417         if (unlikely(!lowest_mask))
1418                 return -1;
1419
1420         if (task->nr_cpus_allowed == 1)
1421                 return -1; /* No other targets possible */
1422
1423         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1424                 return -1; /* No targets found */
1425
1426         /*
1427          * At this point we have built a mask of cpus representing the
1428          * lowest priority tasks in the system.  Now we want to elect
1429          * the best one based on our affinity and topology.
1430          *
1431          * We prioritize the last cpu that the task executed on since
1432          * it is most likely cache-hot in that location.
1433          */
1434         if (cpumask_test_cpu(cpu, lowest_mask))
1435                 return cpu;
1436
1437         /*
1438          * Otherwise, we consult the sched_domains span maps to figure
1439          * out which cpu is logically closest to our hot cache data.
1440          */
1441         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1442                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1443
1444         rcu_read_lock();
1445         for_each_domain(cpu, sd) {
1446                 if (sd->flags & SD_WAKE_AFFINE) {
1447                         int best_cpu;
1448
1449                         /*
1450                          * "this_cpu" is cheaper to preempt than a
1451                          * remote processor.
1452                          */
1453                         if (this_cpu != -1 &&
1454                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1455                                 rcu_read_unlock();
1456                                 return this_cpu;
1457                         }
1458
1459                         best_cpu = cpumask_first_and(lowest_mask,
1460                                                      sched_domain_span(sd));
1461                         if (best_cpu < nr_cpu_ids) {
1462                                 rcu_read_unlock();
1463                                 return best_cpu;
1464                         }
1465                 }
1466         }
1467         rcu_read_unlock();
1468
1469         /*
1470          * And finally, if there were no matches within the domains
1471          * just give the caller *something* to work with from the compatible
1472          * locations.
1473          */
1474         if (this_cpu != -1)
1475                 return this_cpu;
1476
1477         cpu = cpumask_any(lowest_mask);
1478         if (cpu < nr_cpu_ids)
1479                 return cpu;
1480         return -1;
1481 }
1482
1483 /* Will lock the rq it finds */
1484 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1485 {
1486         struct rq *lowest_rq = NULL;
1487         int tries;
1488         int cpu;
1489
1490         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1491                 cpu = find_lowest_rq(task);
1492
1493                 if ((cpu == -1) || (cpu == rq->cpu))
1494                         break;
1495
1496                 lowest_rq = cpu_rq(cpu);
1497
1498                 /* if the prio of this runqueue changed, try again */
1499                 if (double_lock_balance(rq, lowest_rq)) {
1500                         /*
1501                          * We had to unlock the run queue. In
1502                          * the mean time, task could have
1503                          * migrated already or had its affinity changed.
1504                          * Also make sure that it wasn't scheduled on its rq.
1505                          */
1506                         if (unlikely(task_rq(task) != rq ||
1507                                      !cpumask_test_cpu(lowest_rq->cpu,
1508                                                        tsk_cpus_allowed(task)) ||
1509                                      task_running(rq, task) ||
1510                                      !task->on_rq)) {
1511
1512                                 double_unlock_balance(rq, lowest_rq);
1513                                 lowest_rq = NULL;
1514                                 break;
1515                         }
1516                 }
1517
1518                 /* If this rq is still suitable use it. */
1519                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1520                         break;
1521
1522                 /* try again */
1523                 double_unlock_balance(rq, lowest_rq);
1524                 lowest_rq = NULL;
1525         }
1526
1527         return lowest_rq;
1528 }
1529
1530 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1531 {
1532         struct task_struct *p;
1533
1534         if (!has_pushable_tasks(rq))
1535                 return NULL;
1536
1537         p = plist_first_entry(&rq->rt.pushable_tasks,
1538                               struct task_struct, pushable_tasks);
1539
1540         BUG_ON(rq->cpu != task_cpu(p));
1541         BUG_ON(task_current(rq, p));
1542         BUG_ON(p->nr_cpus_allowed <= 1);
1543
1544         BUG_ON(!p->on_rq);
1545         BUG_ON(!rt_task(p));
1546
1547         return p;
1548 }
1549
1550 /*
1551  * If the current CPU has more than one RT task, see if the non
1552  * running task can migrate over to a CPU that is running a task
1553  * of lesser priority.
1554  */
1555 static int push_rt_task(struct rq *rq)
1556 {
1557         struct task_struct *next_task;
1558         struct rq *lowest_rq;
1559         int ret = 0;
1560
1561         if (!rq->rt.overloaded)
1562                 return 0;
1563
1564         next_task = pick_next_pushable_task(rq);
1565         if (!next_task)
1566                 return 0;
1567
1568 retry:
1569         if (unlikely(next_task == rq->curr)) {
1570                 WARN_ON(1);
1571                 return 0;
1572         }
1573
1574         /*
1575          * It's possible that the next_task slipped in of
1576          * higher priority than current. If that's the case
1577          * just reschedule current.
1578          */
1579         if (unlikely(next_task->prio < rq->curr->prio)) {
1580                 resched_task(rq->curr);
1581                 return 0;
1582         }
1583
1584         /* We might release rq lock */
1585         get_task_struct(next_task);
1586
1587         /* find_lock_lowest_rq locks the rq if found */
1588         lowest_rq = find_lock_lowest_rq(next_task, rq);
1589         if (!lowest_rq) {
1590                 struct task_struct *task;
1591                 /*
1592                  * find_lock_lowest_rq releases rq->lock
1593                  * so it is possible that next_task has migrated.
1594                  *
1595                  * We need to make sure that the task is still on the same
1596                  * run-queue and is also still the next task eligible for
1597                  * pushing.
1598                  */
1599                 task = pick_next_pushable_task(rq);
1600                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1601                         /*
1602                          * The task hasn't migrated, and is still the next
1603                          * eligible task, but we failed to find a run-queue
1604                          * to push it to.  Do not retry in this case, since
1605                          * other cpus will pull from us when ready.
1606                          */
1607                         goto out;
1608                 }
1609
1610                 if (!task)
1611                         /* No more tasks, just exit */
1612                         goto out;
1613
1614                 /*
1615                  * Something has shifted, try again.
1616                  */
1617                 put_task_struct(next_task);
1618                 next_task = task;
1619                 goto retry;
1620         }
1621
1622         deactivate_task(rq, next_task, 0);
1623         set_task_cpu(next_task, lowest_rq->cpu);
1624         activate_task(lowest_rq, next_task, 0);
1625         ret = 1;
1626
1627         resched_task(lowest_rq->curr);
1628
1629         double_unlock_balance(rq, lowest_rq);
1630
1631 out:
1632         put_task_struct(next_task);
1633
1634         return ret;
1635 }
1636
1637 static void push_rt_tasks(struct rq *rq)
1638 {
1639         /* push_rt_task will return true if it moved an RT */
1640         while (push_rt_task(rq))
1641                 ;
1642 }
1643
1644 static int pull_rt_task(struct rq *this_rq)
1645 {
1646         int this_cpu = this_rq->cpu, ret = 0, cpu;
1647         struct task_struct *p;
1648         struct rq *src_rq;
1649
1650         if (likely(!rt_overloaded(this_rq)))
1651                 return 0;
1652
1653         /*
1654          * Match the barrier from rt_set_overloaded; this guarantees that if we
1655          * see overloaded we must also see the rto_mask bit.
1656          */
1657         smp_rmb();
1658
1659         for_each_cpu(cpu, this_rq->rd->rto_mask) {
1660                 if (this_cpu == cpu)
1661                         continue;
1662
1663                 src_rq = cpu_rq(cpu);
1664
1665                 /*
1666                  * Don't bother taking the src_rq->lock if the next highest
1667                  * task is known to be lower-priority than our current task.
1668                  * This may look racy, but if this value is about to go
1669                  * logically higher, the src_rq will push this task away.
1670                  * And if its going logically lower, we do not care
1671                  */
1672                 if (src_rq->rt.highest_prio.next >=
1673                     this_rq->rt.highest_prio.curr)
1674                         continue;
1675
1676                 /*
1677                  * We can potentially drop this_rq's lock in
1678                  * double_lock_balance, and another CPU could
1679                  * alter this_rq
1680                  */
1681                 double_lock_balance(this_rq, src_rq);
1682
1683                 /*
1684                  * We can pull only a task, which is pushable
1685                  * on its rq, and no others.
1686                  */
1687                 p = pick_highest_pushable_task(src_rq, this_cpu);
1688
1689                 /*
1690                  * Do we have an RT task that preempts
1691                  * the to-be-scheduled task?
1692                  */
1693                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1694                         WARN_ON(p == src_rq->curr);
1695                         WARN_ON(!p->on_rq);
1696
1697                         /*
1698                          * There's a chance that p is higher in priority
1699                          * than what's currently running on its cpu.
1700                          * This is just that p is wakeing up and hasn't
1701                          * had a chance to schedule. We only pull
1702                          * p if it is lower in priority than the
1703                          * current task on the run queue
1704                          */
1705                         if (p->prio < src_rq->curr->prio)
1706                                 goto skip;
1707
1708                         ret = 1;
1709
1710                         deactivate_task(src_rq, p, 0);
1711                         set_task_cpu(p, this_cpu);
1712                         activate_task(this_rq, p, 0);
1713                         /*
1714                          * We continue with the search, just in
1715                          * case there's an even higher prio task
1716                          * in another runqueue. (low likelihood
1717                          * but possible)
1718                          */
1719                 }
1720 skip:
1721                 double_unlock_balance(this_rq, src_rq);
1722         }
1723
1724         return ret;
1725 }
1726
1727 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1728 {
1729         /* Try to pull RT tasks here if we lower this rq's prio */
1730         if (rq->rt.highest_prio.curr > prev->prio)
1731                 pull_rt_task(rq);
1732 }
1733
1734 static void post_schedule_rt(struct rq *rq)
1735 {
1736         push_rt_tasks(rq);
1737 }
1738
1739 /*
1740  * If we are not running and we are not going to reschedule soon, we should
1741  * try to push tasks away now
1742  */
1743 static void task_woken_rt(struct rq *rq, struct task_struct *p)
1744 {
1745         if (!task_running(rq, p) &&
1746             !test_tsk_need_resched(rq->curr) &&
1747             has_pushable_tasks(rq) &&
1748             p->nr_cpus_allowed > 1 &&
1749             (dl_task(rq->curr) || rt_task(rq->curr)) &&
1750             (rq->curr->nr_cpus_allowed < 2 ||
1751              rq->curr->prio <= p->prio))
1752                 push_rt_tasks(rq);
1753 }
1754
1755 static void set_cpus_allowed_rt(struct task_struct *p,
1756                                 const struct cpumask *new_mask)
1757 {
1758         struct rq *rq;
1759         int weight;
1760
1761         BUG_ON(!rt_task(p));
1762
1763         if (!p->on_rq)
1764                 return;
1765
1766         weight = cpumask_weight(new_mask);
1767
1768         /*
1769          * Only update if the process changes its state from whether it
1770          * can migrate or not.
1771          */
1772         if ((p->nr_cpus_allowed > 1) == (weight > 1))
1773                 return;
1774
1775         rq = task_rq(p);
1776
1777         /*
1778          * The process used to be able to migrate OR it can now migrate
1779          */
1780         if (weight <= 1) {
1781                 if (!task_current(rq, p))
1782                         dequeue_pushable_task(rq, p);
1783                 BUG_ON(!rq->rt.rt_nr_migratory);
1784                 rq->rt.rt_nr_migratory--;
1785         } else {
1786                 if (!task_current(rq, p))
1787                         enqueue_pushable_task(rq, p);
1788                 rq->rt.rt_nr_migratory++;
1789         }
1790
1791         update_rt_migration(&rq->rt);
1792 }
1793
1794 /* Assumes rq->lock is held */
1795 static void rq_online_rt(struct rq *rq)
1796 {
1797         if (rq->rt.overloaded)
1798                 rt_set_overload(rq);
1799
1800         __enable_runtime(rq);
1801
1802         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1803 }
1804
1805 /* Assumes rq->lock is held */
1806 static void rq_offline_rt(struct rq *rq)
1807 {
1808         if (rq->rt.overloaded)
1809                 rt_clear_overload(rq);
1810
1811         __disable_runtime(rq);
1812
1813         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1814 }
1815
1816 /*
1817  * When switch from the rt queue, we bring ourselves to a position
1818  * that we might want to pull RT tasks from other runqueues.
1819  */
1820 static void switched_from_rt(struct rq *rq, struct task_struct *p)
1821 {
1822         /*
1823          * If there are other RT tasks then we will reschedule
1824          * and the scheduling of the other RT tasks will handle
1825          * the balancing. But if we are the last RT task
1826          * we may need to handle the pulling of RT tasks
1827          * now.
1828          */
1829         if (!p->on_rq || rq->rt.rt_nr_running)
1830                 return;
1831
1832         if (pull_rt_task(rq))
1833                 resched_task(rq->curr);
1834 }
1835
1836 void init_sched_rt_class(void)
1837 {
1838         unsigned int i;
1839
1840         for_each_possible_cpu(i) {
1841                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1842                                         GFP_KERNEL, cpu_to_node(i));
1843         }
1844 }
1845 #endif /* CONFIG_SMP */
1846
1847 /*
1848  * When switching a task to RT, we may overload the runqueue
1849  * with RT tasks. In this case we try to push them off to
1850  * other runqueues.
1851  */
1852 static void switched_to_rt(struct rq *rq, struct task_struct *p)
1853 {
1854         int check_resched = 1;
1855
1856         /*
1857          * If we are already running, then there's nothing
1858          * that needs to be done. But if we are not running
1859          * we may need to preempt the current running task.
1860          * If that current running task is also an RT task
1861          * then see if we can move to another run queue.
1862          */
1863         if (p->on_rq && rq->curr != p) {
1864 #ifdef CONFIG_SMP
1865                 if (rq->rt.overloaded && push_rt_task(rq) &&
1866                     /* Don't resched if we changed runqueues */
1867                     rq != task_rq(p))
1868                         check_resched = 0;
1869 #endif /* CONFIG_SMP */
1870                 if (check_resched && p->prio < rq->curr->prio)
1871                         resched_task(rq->curr);
1872         }
1873 }
1874
1875 /*
1876  * Priority of the task has changed. This may cause
1877  * us to initiate a push or pull.
1878  */
1879 static void
1880 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1881 {
1882         if (!p->on_rq)
1883                 return;
1884
1885         if (rq->curr == p) {
1886 #ifdef CONFIG_SMP
1887                 /*
1888                  * If our priority decreases while running, we
1889                  * may need to pull tasks to this runqueue.
1890                  */
1891                 if (oldprio < p->prio)
1892                         pull_rt_task(rq);
1893                 /*
1894                  * If there's a higher priority task waiting to run
1895                  * then reschedule. Note, the above pull_rt_task
1896                  * can release the rq lock and p could migrate.
1897                  * Only reschedule if p is still on the same runqueue.
1898                  */
1899                 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1900                         resched_task(p);
1901 #else
1902                 /* For UP simply resched on drop of prio */
1903                 if (oldprio < p->prio)
1904                         resched_task(p);
1905 #endif /* CONFIG_SMP */
1906         } else {
1907                 /*
1908                  * This task is not running, but if it is
1909                  * greater than the current running task
1910                  * then reschedule.
1911                  */
1912                 if (p->prio < rq->curr->prio)
1913                         resched_task(rq->curr);
1914         }
1915 }
1916
1917 static void watchdog(struct rq *rq, struct task_struct *p)
1918 {
1919         unsigned long soft, hard;
1920
1921         /* max may change after cur was read, this will be fixed next tick */
1922         soft = task_rlimit(p, RLIMIT_RTTIME);
1923         hard = task_rlimit_max(p, RLIMIT_RTTIME);
1924
1925         if (soft != RLIM_INFINITY) {
1926                 unsigned long next;
1927
1928                 if (p->rt.watchdog_stamp != jiffies) {
1929                         p->rt.timeout++;
1930                         p->rt.watchdog_stamp = jiffies;
1931                 }
1932
1933                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1934                 if (p->rt.timeout > next)
1935                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1936         }
1937 }
1938
1939 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1940 {
1941         struct sched_rt_entity *rt_se = &p->rt;
1942
1943         update_curr_rt(rq);
1944
1945         watchdog(rq, p);
1946
1947         /*
1948          * RR tasks need a special form of timeslice management.
1949          * FIFO tasks have no timeslices.
1950          */
1951         if (p->policy != SCHED_RR)
1952                 return;
1953
1954         if (--p->rt.time_slice)
1955                 return;
1956
1957         p->rt.time_slice = sched_rr_timeslice;
1958
1959         /*
1960          * Requeue to the end of queue if we (and all of our ancestors) are not
1961          * the only element on the queue
1962          */
1963         for_each_sched_rt_entity(rt_se) {
1964                 if (rt_se->run_list.prev != rt_se->run_list.next) {
1965                         requeue_task_rt(rq, p, 0);
1966                         set_tsk_need_resched(p);
1967                         return;
1968                 }
1969         }
1970 }
1971
1972 static void set_curr_task_rt(struct rq *rq)
1973 {
1974         struct task_struct *p = rq->curr;
1975
1976         p->se.exec_start = rq_clock_task(rq);
1977
1978         /* The running task is never eligible for pushing */
1979         dequeue_pushable_task(rq, p);
1980 }
1981
1982 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1983 {
1984         /*
1985          * Time slice is 0 for SCHED_FIFO tasks
1986          */
1987         if (task->policy == SCHED_RR)
1988                 return sched_rr_timeslice;
1989         else
1990                 return 0;
1991 }
1992
1993 const struct sched_class rt_sched_class = {
1994         .next                   = &fair_sched_class,
1995         .enqueue_task           = enqueue_task_rt,
1996         .dequeue_task           = dequeue_task_rt,
1997         .yield_task             = yield_task_rt,
1998
1999         .check_preempt_curr     = check_preempt_curr_rt,
2000
2001         .pick_next_task         = pick_next_task_rt,
2002         .put_prev_task          = put_prev_task_rt,
2003
2004 #ifdef CONFIG_SMP
2005         .select_task_rq         = select_task_rq_rt,
2006
2007         .set_cpus_allowed       = set_cpus_allowed_rt,
2008         .rq_online              = rq_online_rt,
2009         .rq_offline             = rq_offline_rt,
2010         .pre_schedule           = pre_schedule_rt,
2011         .post_schedule          = post_schedule_rt,
2012         .task_woken             = task_woken_rt,
2013         .switched_from          = switched_from_rt,
2014 #endif
2015
2016         .set_curr_task          = set_curr_task_rt,
2017         .task_tick              = task_tick_rt,
2018
2019         .get_rr_interval        = get_rr_interval_rt,
2020
2021         .prio_changed           = prio_changed_rt,
2022         .switched_to            = switched_to_rt,
2023 };
2024
2025 #ifdef CONFIG_SCHED_DEBUG
2026 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2027
2028 void print_rt_stats(struct seq_file *m, int cpu)
2029 {
2030         rt_rq_iter_t iter;
2031         struct rt_rq *rt_rq;
2032
2033         rcu_read_lock();
2034         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2035                 print_rt_rq(m, cpu, rt_rq);
2036         rcu_read_unlock();
2037 }
2038 #endif /* CONFIG_SCHED_DEBUG */