Merge tag 'v3.18' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / rt.c
index 1999021..20bca39 100644 (file)
@@ -79,6 +79,8 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
        rt_rq->overloaded = 0;
        plist_head_init(&rt_rq->pushable_tasks);
 #endif
+       /* We start is dequeued state, because no RT tasks are queued */
+       rt_rq->rt_queued = 0;
 
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
@@ -112,6 +114,13 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
        return rt_se->rt_rq;
 }
 
+static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *rt_rq = rt_se->rt_rq;
+
+       return rt_rq->rq;
+}
+
 void free_rt_sched_group(struct task_group *tg)
 {
        int i;
@@ -211,10 +220,16 @@ static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
        return container_of(rt_rq, struct rq, rt);
 }
 
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
 {
        struct task_struct *p = rt_task_of(rt_se);
-       struct rq *rq = task_rq(p);
+
+       return task_rq(p);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+       struct rq *rq = rq_of_rt_se(rt_se);
 
        return &rq->rt;
 }
@@ -229,6 +244,14 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 
 #ifdef CONFIG_SMP
 
+static int pull_rt_task(struct rq *this_rq);
+
+static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
+{
+       /* Try to pull RT tasks here if we lower this rq's prio */
+       return rq->rt.highest_prio.curr > prev->prio;
+}
+
 static inline int rt_overloaded(struct rq *rq)
 {
        return atomic_read(&rq->rd->rto_count);
@@ -315,6 +338,15 @@ static inline int has_pushable_tasks(struct rq *rq)
        return !plist_head_empty(&rq->rt.pushable_tasks);
 }
 
+static inline void set_post_schedule(struct rq *rq)
+{
+       /*
+        * We detect this state here so that we can avoid taking the RQ
+        * lock again later if there is no need to push
+        */
+       rq->post_schedule = has_pushable_tasks(rq);
+}
+
 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 {
        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -359,8 +391,24 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
 }
 
+static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
+{
+       return false;
+}
+
+static inline int pull_rt_task(struct rq *this_rq)
+{
+       return 0;
+}
+
+static inline void set_post_schedule(struct rq *rq)
+{
+}
 #endif /* CONFIG_SMP */
 
+static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
+static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
+
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 {
        return !list_empty(&rt_se->run_list);
@@ -415,17 +463,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
        struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
+       struct rq *rq = rq_of_rt_rq(rt_rq);
        struct sched_rt_entity *rt_se;
 
-       int cpu = cpu_of(rq_of_rt_rq(rt_rq));
+       int cpu = cpu_of(rq);
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
        if (rt_rq->rt_nr_running) {
-               if (rt_se && !on_rt_rq(rt_se))
+               if (!rt_se)
+                       enqueue_top_rt_rq(rt_rq);
+               else if (!on_rt_rq(rt_se))
                        enqueue_rt_entity(rt_se, false);
+
                if (rt_rq->highest_prio.curr < curr->prio)
-                       resched_task(curr);
+                       resched_curr(rq);
        }
 }
 
@@ -436,7 +488,9 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
-       if (rt_se && on_rt_rq(rt_se))
+       if (!rt_se)
+               dequeue_top_rt_rq(rt_rq);
+       else if (on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se);
 }
 
@@ -507,12 +561,18 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 
 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
-       if (rt_rq->rt_nr_running)
-               resched_task(rq_of_rt_rq(rt_rq)->curr);
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       if (!rt_rq->rt_nr_running)
+               return;
+
+       enqueue_top_rt_rq(rt_rq);
+       resched_curr(rq);
 }
 
 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
+       dequeue_top_rt_rq(rt_rq);
 }
 
 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
@@ -681,6 +741,9 @@ balanced:
                rt_rq->rt_throttled = 0;
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                raw_spin_unlock(&rt_b->rt_runtime_lock);
+
+               /* Make rt_rq available for pick_next_task() */
+               sched_rt_rq_enqueue(rt_rq);
        }
 }
 
@@ -831,14 +894,8 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
                 * but accrue some time due to boosting.
                 */
                if (likely(rt_b->rt_runtime)) {
-                       static bool once = false;
-
                        rt_rq->rt_throttled = 1;
-
-                       if (!once) {
-                               once = true;
-                               printk_sched("sched: RT throttling activated\n");
-                       }
+                       printk_deferred_once("sched: RT throttling activated\n");
                } else {
                        /*
                         * In case we did anyway, make it go away,
@@ -865,7 +922,6 @@ static void update_curr_rt(struct rq *rq)
 {
        struct task_struct *curr = rq->curr;
        struct sched_rt_entity *rt_se = &curr->rt;
-       struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
        u64 delta_exec;
 
        if (curr->sched_class != &rt_sched_class)
@@ -890,18 +946,50 @@ static void update_curr_rt(struct rq *rq)
                return;
 
        for_each_sched_rt_entity(rt_se) {
-               rt_rq = rt_rq_of_se(rt_se);
+               struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 
                if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
                        raw_spin_lock(&rt_rq->rt_runtime_lock);
                        rt_rq->rt_time += delta_exec;
                        if (sched_rt_runtime_exceeded(rt_rq))
-                               resched_task(curr);
+                               resched_curr(rq);
                        raw_spin_unlock(&rt_rq->rt_runtime_lock);
                }
        }
 }
 
+static void
+dequeue_top_rt_rq(struct rt_rq *rt_rq)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       BUG_ON(&rq->rt != rt_rq);
+
+       if (!rt_rq->rt_queued)
+               return;
+
+       BUG_ON(!rq->nr_running);
+
+       sub_nr_running(rq, rt_rq->rt_nr_running);
+       rt_rq->rt_queued = 0;
+}
+
+static void
+enqueue_top_rt_rq(struct rt_rq *rt_rq)
+{
+       struct rq *rq = rq_of_rt_rq(rt_rq);
+
+       BUG_ON(&rq->rt != rt_rq);
+
+       if (rt_rq->rt_queued)
+               return;
+       if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+               return;
+
+       add_nr_running(rq, rt_rq->rt_nr_running);
+       rt_rq->rt_queued = 1;
+}
+
 #if defined CONFIG_SMP
 
 static void
@@ -1024,13 +1112,24 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
 
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+static inline
+unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *group_rq = group_rt_rq(rt_se);
+
+       if (group_rq)
+               return group_rq->rt_nr_running;
+       else
+               return 1;
+}
+
 static inline
 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
        int prio = rt_se_prio(rt_se);
 
        WARN_ON(!rt_prio(prio));
-       rt_rq->rt_nr_running++;
+       rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
 
        inc_rt_prio(rt_rq, prio);
        inc_rt_migration(rt_se, rt_rq);
@@ -1042,7 +1141,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        WARN_ON(!rt_rq->rt_nr_running);
-       rt_rq->rt_nr_running--;
+       rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
 
        dec_rt_prio(rt_rq, rt_se_prio(rt_se));
        dec_rt_migration(rt_se, rt_rq);
@@ -1099,6 +1198,8 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
                back = rt_se;
        }
 
+       dequeue_top_rt_rq(rt_rq_of_se(back));
+
        for (rt_se = back; rt_se; rt_se = rt_se->back) {
                if (on_rt_rq(rt_se))
                        __dequeue_rt_entity(rt_se);
@@ -1107,13 +1208,18 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
 
 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
 {
+       struct rq *rq = rq_of_rt_se(rt_se);
+
        dequeue_rt_stack(rt_se);
        for_each_sched_rt_entity(rt_se)
                __enqueue_rt_entity(rt_se, head);
+       enqueue_top_rt_rq(&rq->rt);
 }
 
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
 {
+       struct rq *rq = rq_of_rt_se(rt_se);
+
        dequeue_rt_stack(rt_se);
 
        for_each_sched_rt_entity(rt_se) {
@@ -1122,6 +1228,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
                if (rt_rq && rt_rq->rt_nr_running)
                        __enqueue_rt_entity(rt_se, false);
        }
+       enqueue_top_rt_rq(&rq->rt);
 }
 
 /*
@@ -1139,8 +1246,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
 
        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
                enqueue_pushable_task(rq, p);
-
-       inc_nr_running(rq);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1151,8 +1256,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
        dequeue_rt_entity(rt_se);
 
        dequeue_pushable_task(rq, p);
-
-       dec_nr_running(rq);
 }
 
 /*
@@ -1264,7 +1367,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
         * to try and push current away:
         */
        requeue_task_rt(rq, p, 1);
-       resched_task(rq->curr);
+       resched_curr(rq);
 }
 
 #endif /* CONFIG_SMP */
@@ -1275,7 +1378,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
 {
        if (p->prio < rq->curr->prio) {
-               resched_task(rq->curr);
+               resched_curr(rq);
                return;
        }
 
@@ -1318,15 +1421,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
 {
        struct sched_rt_entity *rt_se;
        struct task_struct *p;
-       struct rt_rq *rt_rq;
-
-       rt_rq = &rq->rt;
-
-       if (!rt_rq->rt_nr_running)
-               return NULL;
-
-       if (rt_rq_throttled(rt_rq))
-               return NULL;
+       struct rt_rq *rt_rq  = &rq->rt;
 
        do {
                rt_se = pick_next_rt_entity(rq, rt_rq);
@@ -1340,21 +1435,42 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
        return p;
 }
 
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct task_struct *
+pick_next_task_rt(struct rq *rq, struct task_struct *prev)
 {
-       struct task_struct *p = _pick_next_task_rt(rq);
+       struct task_struct *p;
+       struct rt_rq *rt_rq = &rq->rt;
 
-       /* The running task is never eligible for pushing */
-       if (p)
-               dequeue_pushable_task(rq, p);
+       if (need_pull_rt_task(rq, prev)) {
+               pull_rt_task(rq);
+               /*
+                * pull_rt_task() can drop (and re-acquire) rq->lock; this
+                * means a dl or stop task can slip in, in which case we need
+                * to re-start task selection.
+                */
+               if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
+                            rq->dl.dl_nr_running))
+                       return RETRY_TASK;
+       }
 
-#ifdef CONFIG_SMP
        /*
-        * We detect this state here so that we can avoid taking the RQ
-        * lock again later if there is no need to push
+        * We may dequeue prev's rt_rq in put_prev_task().
+        * So, we update time before rt_nr_running check.
         */
-       rq->post_schedule = has_pushable_tasks(rq);
-#endif
+       if (prev->sched_class == &rt_sched_class)
+               update_curr_rt(rq);
+
+       if (!rt_rq->rt_queued)
+               return NULL;
+
+       put_prev_task(rq, prev);
+
+       p = _pick_next_task_rt(rq);
+
+       /* The running task is never eligible for pushing */
+       dequeue_pushable_task(rq, p);
+
+       set_post_schedule(rq);
 
        return p;
 }
@@ -1409,7 +1525,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 static int find_lowest_rq(struct task_struct *task)
 {
        struct sched_domain *sd;
-       struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
+       struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
 
@@ -1507,7 +1623,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                                     !cpumask_test_cpu(lowest_rq->cpu,
                                                       tsk_cpus_allowed(task)) ||
                                     task_running(rq, task) ||
-                                    !task->on_rq)) {
+                                    !task_on_rq_queued(task))) {
 
                                double_unlock_balance(rq, lowest_rq);
                                lowest_rq = NULL;
@@ -1541,7 +1657,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
        BUG_ON(task_current(rq, p));
        BUG_ON(p->nr_cpus_allowed <= 1);
 
-       BUG_ON(!p->on_rq);
+       BUG_ON(!task_on_rq_queued(p));
        BUG_ON(!rt_task(p));
 
        return p;
@@ -1577,7 +1693,7 @@ retry:
         * just reschedule current.
         */
        if (unlikely(next_task->prio < rq->curr->prio)) {
-               resched_task(rq->curr);
+               resched_curr(rq);
                return 0;
        }
 
@@ -1624,7 +1740,7 @@ retry:
        activate_task(lowest_rq, next_task, 0);
        ret = 1;
 
-       resched_task(lowest_rq->curr);
+       resched_curr(lowest_rq);
 
        double_unlock_balance(rq, lowest_rq);
 
@@ -1692,7 +1808,7 @@ static int pull_rt_task(struct rq *this_rq)
                 */
                if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
                        WARN_ON(p == src_rq->curr);
-                       WARN_ON(!p->on_rq);
+                       WARN_ON(!task_on_rq_queued(p));
 
                        /*
                         * There's a chance that p is higher in priority
@@ -1724,13 +1840,6 @@ skip:
        return ret;
 }
 
-static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
-{
-       /* Try to pull RT tasks here if we lower this rq's prio */
-       if (rq->rt.highest_prio.curr > prev->prio)
-               pull_rt_task(rq);
-}
-
 static void post_schedule_rt(struct rq *rq)
 {
        push_rt_tasks(rq);
@@ -1760,7 +1869,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
 
        BUG_ON(!rt_task(p));
 
-       if (!p->on_rq)
+       if (!task_on_rq_queued(p))
                return;
 
        weight = cpumask_weight(new_mask);
@@ -1826,14 +1935,14 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
         * we may need to handle the pulling of RT tasks
         * now.
         */
-       if (!p->on_rq || rq->rt.rt_nr_running)
+       if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
                return;
 
        if (pull_rt_task(rq))
-               resched_task(rq->curr);
+               resched_curr(rq);
 }
 
-void init_sched_rt_class(void)
+void __init init_sched_rt_class(void)
 {
        unsigned int i;
 
@@ -1860,15 +1969,15 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
         * If that current running task is also an RT task
         * then see if we can move to another run queue.
         */
-       if (p->on_rq && rq->curr != p) {
+       if (task_on_rq_queued(p) && rq->curr != p) {
 #ifdef CONFIG_SMP
-               if (rq->rt.overloaded && push_rt_task(rq) &&
+               if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
                    /* Don't resched if we changed runqueues */
-                   rq != task_rq(p))
+                   push_rt_task(rq) && rq != task_rq(p))
                        check_resched = 0;
 #endif /* CONFIG_SMP */
                if (check_resched && p->prio < rq->curr->prio)
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        }
 }
 
@@ -1879,7 +1988,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
 static void
 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
 {
-       if (!p->on_rq)
+       if (!task_on_rq_queued(p))
                return;
 
        if (rq->curr == p) {
@@ -1897,11 +2006,11 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
                 * Only reschedule if p is still on the same runqueue.
                 */
                if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
-                       resched_task(p);
+                       resched_curr(rq);
 #else
                /* For UP simply resched on drop of prio */
                if (oldprio < p->prio)
-                       resched_task(p);
+                       resched_curr(rq);
 #endif /* CONFIG_SMP */
        } else {
                /*
@@ -1910,7 +2019,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
                 * then reschedule.
                 */
                if (p->prio < rq->curr->prio)
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        }
 }
 
@@ -1963,7 +2072,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        for_each_sched_rt_entity(rt_se) {
                if (rt_se->run_list.prev != rt_se->run_list.next) {
                        requeue_task_rt(rq, p, 0);
-                       set_tsk_need_resched(p);
+                       resched_curr(rq);
                        return;
                }
        }
@@ -2007,7 +2116,6 @@ const struct sched_class rt_sched_class = {
        .set_cpus_allowed       = set_cpus_allowed_rt,
        .rq_online              = rq_online_rt,
        .rq_offline             = rq_offline_rt,
-       .pre_schedule           = pre_schedule_rt,
        .post_schedule          = post_schedule_rt,
        .task_woken             = task_woken_rt,
        .switched_from          = switched_from_rt,
@@ -2020,6 +2128,8 @@ const struct sched_class rt_sched_class = {
 
        .prio_changed           = prio_changed_rt,
        .switched_to            = switched_to_rt,
+
+       .update_curr            = update_curr_rt,
 };
 
 #ifdef CONFIG_SCHED_DEBUG