Merge tag 'v4.1' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / core.c
index aae0686..f08622d 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
-#ifdef smp_mb__before_atomic
-void __smp_mb__before_atomic(void)
-{
-       smp_mb__before_atomic();
-}
-EXPORT_SYMBOL(__smp_mb__before_atomic);
-#endif
-
-#ifdef smp_mb__after_atomic
-void __smp_mb__after_atomic(void)
-{
-       smp_mb__after_atomic();
-}
-EXPORT_SYMBOL(__smp_mb__after_atomic);
-#endif
-
 void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
 {
        unsigned long delta;
@@ -135,10 +119,14 @@ void update_rq_clock(struct rq *rq)
 {
        s64 delta;
 
-       if (rq->skip_clock_update > 0)
+       lockdep_assert_held(&rq->lock);
+
+       if (rq->clock_skip_update & RQCF_ACT_SKIP)
                return;
 
        delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+       if (delta < 0)
+               return;
        rq->clock += delta;
        update_rq_clock_task(rq, delta);
 }
@@ -243,6 +231,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        char buf[64];
        char *cmp;
        int i;
+       struct inode *inode;
 
        if (cnt > 63)
                cnt = 63;
@@ -253,7 +242,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        buf[cnt] = 0;
        cmp = strstrip(buf);
 
+       /* Ensure the static_key remains in a consistent state */
+       inode = file_inode(filp);
+       mutex_lock(&inode->i_mutex);
        i = sched_feat_set(cmp);
+       mutex_unlock(&inode->i_mutex);
        if (i == __SCHED_FEAT_NR)
                return -EINVAL;
 
@@ -313,59 +306,8 @@ __read_mostly int scheduler_running;
  */
 int sysctl_sched_rt_runtime = 950000;
 
-/*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       lockdep_assert_held(&p->pi_lock);
-
-       for (;;) {
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-       }
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-       __acquires(p->pi_lock)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       for (;;) {
-               raw_spin_lock_irqsave(&p->pi_lock, *flags);
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-       }
-}
-
-static void __task_rq_unlock(struct rq *rq)
-       __releases(rq->lock)
-{
-       raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-       __releases(rq->lock)
-       __releases(p->pi_lock)
-{
-       raw_spin_unlock(&rq->lock);
-       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
+/* cpus with isolated domains */
+cpumask_var_t cpu_isolated_map;
 
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
@@ -442,7 +384,15 @@ static void __hrtick_start(void *arg)
 void hrtick_start(struct rq *rq, u64 delay)
 {
        struct hrtimer *timer = &rq->hrtick_timer;
-       ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
+       ktime_t time;
+       s64 delta;
+
+       /*
+        * Don't schedule slices shorter than 10000ns, that just
+        * doesn't make sense and can cause timer DoS.
+        */
+       delta = max_t(s64, delay, 10000LL);
+       time = ktime_add_ns(timer->base->get_time(), delta);
 
        hrtimer_set_expires(timer, time);
 
@@ -485,6 +435,11 @@ static __init void init_hrtick(void)
  */
 void hrtick_start(struct rq *rq, u64 delay)
 {
+       /*
+        * Don't schedule slices shorter than 10000ns, that just
+        * doesn't make sense. Rely on vruntime for fairness.
+        */
+       delay = max_t(u64, delay, 10000LL);
        __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
                        HRTIMER_MODE_REL_PINNED, 0);
 }
@@ -587,30 +542,31 @@ static bool set_nr_if_polling(struct task_struct *p)
 #endif
 
 /*
- * resched_task - mark a task 'to be rescheduled now'.
+ * resched_curr - mark rq's current task 'to be rescheduled now'.
  *
  * On UP this means the setting of the need_resched flag, on SMP it
  * might also involve a cross-CPU call to trigger the scheduler on
  * the target CPU.
  */
-void resched_task(struct task_struct *p)
+void resched_curr(struct rq *rq)
 {
+       struct task_struct *curr = rq->curr;
        int cpu;
 
-       lockdep_assert_held(&task_rq(p)->lock);
+       lockdep_assert_held(&rq->lock);
 
-       if (test_tsk_need_resched(p))
+       if (test_tsk_need_resched(curr))
                return;
 
-       cpu = task_cpu(p);
+       cpu = cpu_of(rq);
 
        if (cpu == smp_processor_id()) {
-               set_tsk_need_resched(p);
+               set_tsk_need_resched(curr);
                set_preempt_need_resched();
                return;
        }
 
-       if (set_nr_and_not_polling(p))
+       if (set_nr_and_not_polling(curr))
                smp_send_reschedule(cpu);
        else
                trace_sched_wake_idle_without_ipi(cpu);
@@ -623,7 +579,7 @@ void resched_cpu(int cpu)
 
        if (!raw_spin_trylock_irqsave(&rq->lock, flags))
                return;
-       resched_task(cpu_curr(cpu));
+       resched_curr(rq);
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -684,10 +640,16 @@ static void wake_up_idle_cpu(int cpu)
 
 static bool wake_up_full_nohz_cpu(int cpu)
 {
+       /*
+        * We just need the target to call irq_exit() and re-evaluate
+        * the next tick. The nohz full kick at least implies that.
+        * If needed we can still optimize that later with an
+        * empty IRQ.
+        */
        if (tick_nohz_full_cpu(cpu)) {
                if (cpu != smp_processor_id() ||
                    tick_nohz_tick_stopped())
-                       smp_send_reschedule(cpu);
+                       tick_nohz_full_kick_cpu(cpu);
                return true;
        }
 
@@ -730,18 +692,32 @@ static inline bool got_nohz_idle_kick(void)
 #ifdef CONFIG_NO_HZ_FULL
 bool sched_can_stop_tick(void)
 {
-       struct rq *rq;
+       /*
+        * FIFO realtime policy runs the highest priority task. Other runnable
+        * tasks are of a lower priority. The scheduler tick does nothing.
+        */
+       if (current->policy == SCHED_FIFO)
+               return true;
 
-       rq = this_rq();
+       /*
+        * Round-robin realtime tasks time slice with other tasks at the same
+        * realtime priority. Is this task the only one at this priority?
+        */
+       if (current->policy == SCHED_RR) {
+               struct sched_rt_entity *rt_se = &current->rt;
 
-       /* Make sure rq->nr_running update is visible after the IPI */
-       smp_rmb();
+               return rt_se->run_list.prev == rt_se->run_list.next;
+       }
 
-       /* More than one running task need preemption */
-       if (rq->nr_running > 1)
-               return false;
+       /*
+        * More than one running task need preemption.
+        * nr_running update is assumed to be visible
+        * after IPI is sent from wakers.
+        */
+       if (this_rq()->nr_running > 1)
+               return false;
 
-       return true;
+       return true;
 }
 #endif /* CONFIG_NO_HZ_FULL */
 
@@ -999,6 +975,9 @@ inline int task_curr(const struct task_struct *p)
        return cpu_curr(task_cpu(p)) == p;
 }
 
+/*
+ * Can drop rq->lock because from sched_class::switched_from() methods drop it.
+ */
 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                                       const struct sched_class *prev_class,
                                       int oldprio)
@@ -1006,6 +985,7 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
        if (prev_class != p->sched_class) {
                if (prev_class->switched_from)
                        prev_class->switched_from(rq, p);
+               /* Possble rq->lock 'hole'.  */
                p->sched_class->switched_to(rq, p);
        } else if (oldprio != p->prio || dl_task(p))
                p->sched_class->prio_changed(rq, p, oldprio);
@@ -1022,7 +1002,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
                        if (class == rq->curr->sched_class)
                                break;
                        if (class == p->sched_class) {
-                               resched_task(rq->curr);
+                               resched_curr(rq);
                                break;
                        }
                }
@@ -1032,8 +1012,8 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
         * A queue event has occurred, and we're going to schedule.  In
         * this case, we can save a useless back to back clock update.
         */
-       if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
-               rq->skip_clock_update = 1;
+       if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
+               rq_clock_skip_update(rq, true);
 }
 
 #ifdef CONFIG_SMP
@@ -1045,7 +1025,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
         * ttwu() will sort out the placement.
         */
        WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
-                       !(task_preempt_count(p) & PREEMPT_ACTIVE));
+                       !p->on_rq);
 
 #ifdef CONFIG_LOCKDEP
        /*
@@ -1069,7 +1049,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                if (p->sched_class->migrate_task_rq)
                        p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
-               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+               perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
        }
 
        __set_task_cpu(p, new_cpu);
@@ -1077,7 +1057,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 
 static void __migrate_swap_task(struct task_struct *p, int cpu)
 {
-       if (p->on_rq) {
+       if (task_on_rq_queued(p)) {
                struct rq *src_rq, *dst_rq;
 
                src_rq = task_rq(p);
@@ -1203,7 +1183,7 @@ static int migration_cpu_stop(void *data);
 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
 {
        unsigned long flags;
-       int running, on_rq;
+       int running, queued;
        unsigned long ncsw;
        struct rq *rq;
 
@@ -1241,7 +1221,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                rq = task_rq_lock(p, &flags);
                trace_sched_wait_task(p);
                running = task_running(rq, p);
-               on_rq = p->on_rq;
+               queued = task_on_rq_queued(p);
                ncsw = 0;
                if (!match_state || p->state == match_state)
                        ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -1273,7 +1253,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 * running right now), it's preempted, and we should
                 * yield - it could be a while.
                 */
-               if (unlikely(on_rq)) {
+               if (unlikely(queued)) {
                        ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
 
                        set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1398,7 +1378,8 @@ out:
 static inline
 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
 {
-       cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+       if (p->nr_cpus_allowed > 1)
+               cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
 
        /*
         * In order not to call set_task_cpu() on a blocking task we need
@@ -1467,7 +1448,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
 {
        activate_task(rq, p, en_flags);
-       p->on_rq = 1;
+       p->on_rq = TASK_ON_RQ_QUEUED;
 
        /* if a worker is waking up, notify workqueue */
        if (p->flags & PF_WQ_WORKER)
@@ -1526,7 +1507,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
        int ret = 0;
 
        rq = __task_rq_lock(p);
-       if (p->on_rq) {
+       if (task_on_rq_queued(p)) {
                /* check_preempt_curr() may use rq clock */
                update_rq_clock(rq);
                ttwu_do_wakeup(rq, p, wake_flags);
@@ -1568,9 +1549,7 @@ void scheduler_ipi(void)
         */
        preempt_fold_need_resched();
 
-       if (llist_empty(&this_rq()->wake_list)
-                       && !tick_nohz_full_cpu(smp_processor_id())
-                       && !got_nohz_idle_kick())
+       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
                return;
 
        /*
@@ -1587,7 +1566,6 @@ void scheduler_ipi(void)
         * somewhat pessimize the simple resched case.
         */
        irq_enter();
-       tick_nohz_full_check();
        sched_ttwu_pending();
 
        /*
@@ -1612,6 +1590,30 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
        }
 }
 
+void wake_up_if_idle(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags;
+
+       rcu_read_lock();
+
+       if (!is_idle_task(rcu_dereference(rq->curr)))
+               goto out;
+
+       if (set_nr_if_polling(rq->idle)) {
+               trace_sched_wake_idle_without_ipi(cpu);
+       } else {
+               raw_spin_lock_irqsave(&rq->lock, flags);
+               if (is_idle_task(rq->curr))
+                       smp_send_reschedule(cpu);
+               /* Else cpu is not in idle, do nothing here */
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       }
+
+out:
+       rcu_read_unlock();
+}
+
 bool cpus_share_cache(int this_cpu, int that_cpu)
 {
        return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
@@ -1738,7 +1740,7 @@ static void try_to_wake_up_local(struct task_struct *p)
        if (!(p->state & TASK_NORMAL))
                goto out;
 
-       if (!p->on_rq)
+       if (!task_on_rq_queued(p))
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
        ttwu_do_wakeup(rq, p, 0);
@@ -1773,6 +1775,24 @@ int wake_up_state(struct task_struct *p, unsigned int state)
 }
 EXPORT_SYMBOL(wake_up_process);
 
+/*
+ * This function clears the sched_dl_entity static params.
+ */
+void __dl_clear_params(struct task_struct *p)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       dl_se->dl_runtime = 0;
+       dl_se->dl_deadline = 0;
+       dl_se->dl_period = 0;
+       dl_se->flags = 0;
+       dl_se->dl_bw = 0;
+
+       dl_se->dl_throttled = 0;
+       dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
+}
+
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
@@ -1789,6 +1809,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
        p->se.prev_sum_exec_runtime     = 0;
        p->se.nr_migrations             = 0;
        p->se.vruntime                  = 0;
+#ifdef CONFIG_SMP
+       p->se.avg.decay_count           = 0;
+#endif
        INIT_LIST_HEAD(&p->se.group_node);
 
 #ifdef CONFIG_SCHEDSTATS
@@ -1796,11 +1819,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 #endif
 
        RB_CLEAR_NODE(&p->dl.rb_node);
-       hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       p->dl.dl_runtime = p->dl.runtime = 0;
-       p->dl.dl_deadline = p->dl.deadline = 0;
-       p->dl.dl_period = 0;
-       p->dl.flags = 0;
+       init_dl_task_timer(&p->dl);
+       __dl_clear_params(p);
 
        INIT_LIST_HEAD(&p->rt.run_list);
 
@@ -1823,12 +1843,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
        p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
        p->numa_scan_period = sysctl_numa_balancing_scan_delay;
        p->numa_work.next = &p->numa_work;
-       p->numa_faults_memory = NULL;
-       p->numa_faults_buffer_memory = NULL;
+       p->numa_faults = NULL;
        p->last_task_numa_placement = 0;
        p->last_sum_exec_runtime = 0;
 
-       INIT_LIST_HEAD(&p->numa_entry);
        p->numa_group = NULL;
 #endif /* CONFIG_NUMA_BALANCING */
 }
@@ -1975,6 +1993,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
 #ifdef CONFIG_SMP
 inline struct dl_bw *dl_bw_of(int i)
 {
+       rcu_lockdep_assert(rcu_read_lock_sched_held(),
+                          "sched RCU must be held");
        return &cpu_rq(i)->rd->dl_bw;
 }
 
@@ -1983,6 +2003,8 @@ static inline int dl_bw_cpus(int i)
        struct root_domain *rd = cpu_rq(i)->rd;
        int cpus = 0;
 
+       rcu_lockdep_assert(rcu_read_lock_sched_held(),
+                          "sched RCU must be held");
        for_each_cpu_and(i, rd->span, cpu_active_mask)
                cpus++;
 
@@ -2000,25 +2022,6 @@ static inline int dl_bw_cpus(int i)
 }
 #endif
 
-static inline
-void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
-{
-       dl_b->total_bw -= tsk_bw;
-}
-
-static inline
-void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
-{
-       dl_b->total_bw += tsk_bw;
-}
-
-static inline
-bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
-{
-       return dl_b->bw != -1 &&
-              dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
-}
-
 /*
  * We must be sure that accepting a new task (or allowing changing the
  * parameters of an existing one) is consistent with the bandwidth
@@ -2026,6 +2029,9 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
  * allocated bandwidth to reflect the new situation.
  *
  * This function is called while holding p's rq->lock.
+ *
+ * XXX we should delay bw change until the task's 0-lag point, see
+ * __setparam_dl().
  */
 static int dl_overflow(struct task_struct *p, int policy,
                       const struct sched_attr *attr)
@@ -2093,7 +2099,7 @@ void wake_up_new_task(struct task_struct *p)
        init_task_runnable_average(p);
        rq = __task_rq_lock(p);
        activate_task(rq, p, 0);
-       p->on_rq = 1;
+       p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p, true);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
@@ -2186,7 +2192,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
 
 /**
  * finish_task_switch - clean up after a task-switch
- * @rq: runqueue associated with task-switch
  * @prev: the thread we just switched away from.
  *
  * finish_task_switch must be called after the context switch, paired
@@ -2198,10 +2203,16 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
  * so, we finish that here outside of the runqueue lock. (Doing it
  * with the lock held can cause deadlocks; see schedule() for
  * details.)
+ *
+ * The context switch have flipped the stack from under us and restored the
+ * local variables which were saved when this task called schedule() in the
+ * past. prev == current is still correct but we need to recalculate this_rq
+ * because prev may have moved to another CPU.
  */
-static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+static struct rq *finish_task_switch(struct task_struct *prev)
        __releases(rq->lock)
 {
+       struct rq *rq = this_rq();
        struct mm_struct *mm = rq->prev_mm;
        long prev_state;
 
@@ -2241,6 +2252,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        }
 
        tick_nohz_task_switch(current);
+       return rq;
 }
 
 #ifdef CONFIG_SMP
@@ -2275,29 +2287,22 @@ static inline void post_schedule(struct rq *rq)
 asmlinkage __visible void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
-       struct rq *rq = this_rq();
-
-       finish_task_switch(rq, prev);
+       struct rq *rq;
 
-       /*
-        * FIXME: do we need to worry about rq being invalidated by the
-        * task_switch?
-        */
+       /* finish_task_switch() drops rq->lock and enables preemtion */
+       preempt_disable();
+       rq = finish_task_switch(prev);
        post_schedule(rq);
-
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
-       /* In this case, finish_task_switch does not reenable preemption */
        preempt_enable();
-#endif
+
        if (current->set_child_tid)
                put_user(task_pid_vnr(current), current->set_child_tid);
 }
 
 /*
- * context_switch - switch to the new MM and the new
- * thread's register state.
+ * context_switch - switch to the new MM and the new thread's register state.
  */
-static inline void
+static inline struct rq *
 context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
 {
@@ -2331,21 +2336,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
         * of the scheduler it's an obvious special-case), so we
         * do an early lockdep release here:
         */
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-#endif
 
        context_tracking_task_switch(prev, next);
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
-
        barrier();
-       /*
-        * this_rq must be evaluated again because prev may have moved
-        * CPUs since it called schedule(), thus the 'rq' on its stack
-        * frame will be invalid.
-        */
-       finish_task_switch(this_rq(), prev);
+
+       return finish_task_switch(prev);
 }
 
 /*
@@ -2364,6 +2362,18 @@ unsigned long nr_running(void)
        return sum;
 }
 
+/*
+ * Check if only the current task is running on the cpu.
+ */
+bool single_task_running(void)
+{
+       if (cpu_rq(smp_processor_id())->nr_running == 1)
+               return true;
+       else
+               return false;
+}
+EXPORT_SYMBOL(single_task_running);
+
 unsigned long long nr_context_switches(void)
 {
        int i;
@@ -2391,6 +2401,13 @@ unsigned long nr_iowait_cpu(int cpu)
        return atomic_read(&this->nr_iowait);
 }
 
+void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
+{
+       struct rq *this = this_rq();
+       *nr_waiters = atomic_read(&this->nr_iowait);
+       *load = this->cpu_load[0];
+}
+
 #ifdef CONFIG_SMP
 
 /*
@@ -2427,39 +2444,6 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 EXPORT_PER_CPU_SYMBOL(kstat);
 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
 
-/*
- * Return any ns on the sched_clock that have not yet been accounted in
- * @p in case that task is currently running.
- *
- * Called with task_rq_lock() held on @rq.
- */
-static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
-{
-       u64 ns = 0;
-
-       if (task_current(rq, p)) {
-               update_rq_clock(rq);
-               ns = rq_clock_task(rq) - p->se.exec_start;
-               if ((s64)ns < 0)
-                       ns = 0;
-       }
-
-       return ns;
-}
-
-unsigned long long task_delta_exec(struct task_struct *p)
-{
-       unsigned long flags;
-       struct rq *rq;
-       u64 ns = 0;
-
-       rq = task_rq_lock(p, &flags);
-       ns = do_task_delta_exec(p, rq);
-       task_rq_unlock(rq, p, &flags);
-
-       return ns;
-}
-
 /*
  * Return accounted runtime for the task.
  * In case the task is currently running, return the runtime plus current's
@@ -2469,7 +2453,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
 {
        unsigned long flags;
        struct rq *rq;
-       u64 ns = 0;
+       u64 ns;
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
        /*
@@ -2480,13 +2464,24 @@ unsigned long long task_sched_runtime(struct task_struct *p)
         * If we race with it leaving cpu, we'll take a lock. So we're correct.
         * If we race with it entering cpu, unaccounted time is 0. This is
         * indistinguishable from the read occurring a few cycles earlier.
+        * If we see ->on_cpu without ->on_rq, the task is leaving, and has
+        * been accounted, so we're correct here as well.
         */
-       if (!p->on_cpu)
+       if (!p->on_cpu || !task_on_rq_queued(p))
                return p->se.sum_exec_runtime;
 #endif
 
        rq = task_rq_lock(p, &flags);
-       ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
+       /*
+        * Must be ->curr _and_ ->on_rq.  If dequeued, we would
+        * project cycles that may never be accounted to this
+        * thread, breaking clock_gettime().
+        */
+       if (task_current(rq, p) && task_on_rq_queued(p)) {
+               update_rq_clock(rq);
+               p->sched_class->update_curr(rq);
+       }
+       ns = p->se.sum_exec_runtime;
        task_rq_unlock(rq, p, &flags);
 
        return ns;
@@ -2644,6 +2639,9 @@ static noinline void __schedule_bug(struct task_struct *prev)
  */
 static inline void schedule_debug(struct task_struct *prev)
 {
+#ifdef CONFIG_SCHED_STACK_END_CHECK
+       BUG_ON(unlikely(task_stack_end_corrupted(prev)));
+#endif
        /*
         * Test if we are atomic. Since do_exit() needs to call into
         * schedule() atomically, we ignore that path. Otherwise whine
@@ -2733,6 +2731,10 @@ again:
  *          - explicit schedule() call
  *          - return from syscall or exception to user-space
  *          - return from interrupt-handler to user-space
+ *
+ * WARNING: all callers must re-check need_resched() afterward and reschedule
+ * accordingly in case an event triggered the need for rescheduling (such as
+ * an interrupt waking up a task) while preemption was disabled in __schedule().
  */
 //void print_rb_nodes(struct rq *rq) {
 //     struct task_struct *p;
@@ -2754,11 +2756,10 @@ static void __sched __schedule(void)
        struct rq *rq;
        int i, cpu;
 
-need_resched:
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
-       rcu_note_context_switch(cpu);
+       rcu_note_context_switch();
        prev = rq->curr;
 
        schedule_debug(prev);
@@ -2774,6 +2775,8 @@ need_resched:
        smp_mb__before_spinlock();
        raw_spin_lock_irq(&rq->lock);
 
+       rq->clock_skip_update <<= 1; /* promote REQ to ACT */
+
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2798,36 +2801,27 @@ need_resched:
                switch_count = &prev->nvcsw;
        }
 
-       if (prev->on_rq || rq->skip_clock_update < 0)
+       if (task_on_rq_queued(prev))
                update_rq_clock(rq);
 
        next = pick_next_task(rq, prev);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();
-       rq->skip_clock_update = 0;
+       rq->clock_skip_update = 0;
 
        if (likely(prev != next)) {
                rq->nr_switches++;
                rq->curr = next;
                ++*switch_count;
 
-               context_switch(rq, prev, next); /* unlocks the rq */
-               /*
-                * The context switch have flipped the stack from under us
-                * and restored the local variables which were saved when
-                * this task called schedule() in the past. prev == current
-                * is still correct, but it can be moved to another cpu/rq.
-                */
-               cpu = smp_processor_id();
-               rq = cpu_rq(cpu);
+               rq = context_switch(rq, prev, next); /* unlocks the rq */
+               cpu = cpu_of(rq);
        } else
                raw_spin_unlock_irq(&rq->lock);
 
        post_schedule(rq);
 
        sched_preempt_enable_no_resched();
-       if (need_resched())
-               goto need_resched;
 }
 
 static inline void sched_submit_work(struct task_struct *tsk)
@@ -2847,7 +2841,9 @@ asmlinkage __visible void __sched schedule(void)
        struct task_struct *tsk = current;
 
        sched_submit_work(tsk);
-       __schedule();
+       do {
+               __schedule();
+       } while (need_resched());
 }
 EXPORT_SYMBOL(schedule);
 
@@ -2859,10 +2855,14 @@ asmlinkage __visible void __sched schedule_user(void)
         * or we have been woken up remotely but the IPI has not yet arrived,
         * we haven't yet exited the RCU idle mode. Do it here manually until
         * we find a better solution.
+        *
+        * NB: There are buggy callers of this function.  Ideally we
+        * should warn if prev_state != CONTEXT_USER, but that will trigger
+        * too frequently to make sense yet.
         */
-       user_exit();
+       enum ctx_state prev_state = exception_enter();
        schedule();
-       user_enter();
+       exception_exit(prev_state);
 }
 #endif
 
@@ -2878,6 +2878,21 @@ void __sched schedule_preempt_disabled(void)
        preempt_disable();
 }
 
+static void __sched notrace preempt_schedule_common(void)
+{
+       do {
+               __preempt_count_add(PREEMPT_ACTIVE);
+               __schedule();
+               __preempt_count_sub(PREEMPT_ACTIVE);
+
+               /*
+                * Check again in case we missed a preemption opportunity
+                * between schedule and now.
+                */
+               barrier();
+       } while (need_resched());
+}
+
 #ifdef CONFIG_PREEMPT
 /*
  * this is the entry point to schedule() from in-kernel preemption
@@ -2893,20 +2908,51 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
        if (likely(!preemptible()))
                return;
 
+       preempt_schedule_common();
+}
+NOKPROBE_SYMBOL(preempt_schedule);
+EXPORT_SYMBOL(preempt_schedule);
+
+#ifdef CONFIG_CONTEXT_TRACKING
+/**
+ * preempt_schedule_context - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule_context(void)
+{
+       enum ctx_state prev_ctx;
+
+       if (likely(!preemptible()))
+               return;
+
        do {
                __preempt_count_add(PREEMPT_ACTIVE);
-               __schedule();
-               __preempt_count_sub(PREEMPT_ACTIVE);
-
                /*
-                * Check again in case we missed a preemption opportunity
-                * between schedule and now.
+                * Needs preempt disabled in case user_exit() is traced
+                * and the tracer calls preempt_enable_notrace() causing
+                * an infinite recursion.
                 */
+               prev_ctx = exception_enter();
+               __schedule();
+               exception_exit(prev_ctx);
+
+               __preempt_count_sub(PREEMPT_ACTIVE);
                barrier();
        } while (need_resched());
 }
-NOKPROBE_SYMBOL(preempt_schedule);
-EXPORT_SYMBOL(preempt_schedule);
+EXPORT_SYMBOL_GPL(preempt_schedule_context);
+#endif /* CONFIG_CONTEXT_TRACKING */
+
 #endif /* CONFIG_PREEMPT */
 
 /*
@@ -2963,7 +3009,7 @@ EXPORT_SYMBOL(default_wake_function);
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-       int oldprio, on_rq, running, enqueue_flag = 0;
+       int oldprio, queued, running, enqueue_flag = 0;
        struct rq *rq;
        const struct sched_class *prev_class;
 
@@ -2990,15 +3036,14 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        }
 
        trace_sched_pi_setprio(p, prio);
-       p->pi_top_task = rt_mutex_get_top_task(p);
        oldprio = p->prio;
        prev_class = p->sched_class;
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        /*
         * Boosting condition are:
@@ -3010,8 +3055,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
         *          running task
         */
        if (dl_prio(prio)) {
-               if (!dl_prio(p->normal_prio) || (p->pi_top_task &&
-                       dl_entity_preempt(&p->pi_top_task->dl, &p->dl))) {
+               struct task_struct *pi_task = rt_mutex_get_top_task(p);
+               if (!dl_prio(p->normal_prio) ||
+                   (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
                        p->dl.dl_boosted = 1;
                        p->dl.dl_throttled = 0;
                        enqueue_flag = ENQUEUE_REPLENISH;
@@ -3027,6 +3073,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        } else {
                if (dl_prio(oldprio))
                        p->dl.dl_boosted = 0;
+               if (rt_prio(oldprio))
+                       p->rt.timeout = 0;
                p->sched_class = &fair_sched_class;
        }
 
@@ -3034,7 +3082,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, p, enqueue_flag);
 
        check_class_changed(rq, p, prev_class, oldprio);
@@ -3045,7 +3093,7 @@ out_unlock:
 
 void set_user_nice(struct task_struct *p, long nice)
 {
-       int old_prio, delta, on_rq;
+       int old_prio, delta, queued;
        unsigned long flags;
        struct rq *rq;
 
@@ -3066,8 +3114,8 @@ void set_user_nice(struct task_struct *p, long nice)
                p->static_prio = NICE_TO_PRIO(nice);
                goto out_unlock;
        }
-       on_rq = p->on_rq;
-       if (on_rq)
+       queued = task_on_rq_queued(p);
+       if (queued)
                dequeue_task(rq, p, 0);
 
        p->static_prio = NICE_TO_PRIO(nice);
@@ -3076,14 +3124,14 @@ void set_user_nice(struct task_struct *p, long nice)
        p->prio = effective_prio(p);
        delta = p->prio - old_prio;
 
-       if (on_rq) {
+       if (queued) {
                enqueue_task(rq, p, 0);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
                 */
                if (delta < 0 || (delta > 0 && task_running(rq, p)))
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        }
 out_unlock:
        task_rq_unlock(rq, p, &flags);
@@ -3211,23 +3259,45 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
 {
        struct sched_dl_entity *dl_se = &p->dl;
 
-       init_dl_task_timer(dl_se);
        dl_se->dl_runtime = attr->sched_runtime;
        dl_se->dl_deadline = attr->sched_deadline;
        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
        dl_se->flags = attr->sched_flags;
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
-       dl_se->dl_throttled = 0;
-       dl_se->dl_new = 1;
-       dl_se->dl_yielded = 0;
+
+       /*
+        * Changing the parameters of a task is 'tricky' and we're not doing
+        * the correct thing -- also see task_dead_dl() and switched_from_dl().
+        *
+        * What we SHOULD do is delay the bandwidth release until the 0-lag
+        * point. This would include retaining the task_struct until that time
+        * and change dl_overflow() to not immediately decrement the current
+        * amount.
+        *
+        * Instead we retain the current runtime/deadline and let the new
+        * parameters take effect after the current reservation period lapses.
+        * This is safe (albeit pessimistic) because the 0-lag point is always
+        * before the current scheduling deadline.
+        *
+        * We can still have temporary overloads because we do not delay the
+        * change in bandwidth until that time; so admission control is
+        * not on the safe side. It does however guarantee tasks will never
+        * consume more than promised.
+        */
 }
 
+/*
+ * sched_setparam() passes in -1 for its policy, to let the functions
+ * it calls know not to change it.
+ */
+#define SETPARAM_POLICY        -1
+
 static void __setscheduler_params(struct task_struct *p,
                const struct sched_attr *attr)
 {
        int policy = attr->sched_policy;
 
-       if (policy == -1) /* setparam */
+       if (policy == SETPARAM_POLICY)
                policy = p->policy;
 
        p->policy = policy;
@@ -3249,15 +3319,18 @@ static void __setscheduler_params(struct task_struct *p,
 
 /* Actually do priority change: must hold pi & rq lock. */
 static void __setscheduler(struct rq *rq, struct task_struct *p,
-                          const struct sched_attr *attr)
+                          const struct sched_attr *attr, bool keep_boost)
 {
        __setscheduler_params(p, attr);
 
        /*
-        * If we get here, there was no pi waiters boosting the
-        * task. It is safe to use the normal prio.
+        * Keep a potential priority boosting if called from
+        * sched_setscheduler().
         */
-       p->prio = normal_prio(p);
+       if (keep_boost)
+               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+       else
+               p->prio = normal_prio(p);
 
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
@@ -3336,14 +3409,28 @@ static bool check_same_owner(struct task_struct *p)
        return match;
 }
 
+static bool dl_param_changed(struct task_struct *p,
+               const struct sched_attr *attr)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       if (dl_se->dl_runtime != attr->sched_runtime ||
+               dl_se->dl_deadline != attr->sched_deadline ||
+               dl_se->dl_period != attr->sched_period ||
+               dl_se->flags != attr->sched_flags)
+               return true;
+
+       return false;
+}
+
 static int __sched_setscheduler(struct task_struct *p,
                                const struct sched_attr *attr,
                                bool user)
 {
        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
                      MAX_RT_PRIO - 1 - attr->sched_priority;
-       int retval, oldprio, oldpolicy = -1, on_rq, running;
-       int policy = attr->sched_policy;
+       int retval, oldprio, oldpolicy = -1, queued, running;
+       int new_effective_prio, policy = attr->sched_policy;
        unsigned long flags;
        const struct sched_class *prev_class;
        struct rq *rq;
@@ -3464,7 +3551,7 @@ recheck:
                        goto change;
                if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
                        goto change;
-               if (dl_policy(policy))
+               if (dl_policy(policy) && dl_param_changed(p, attr))
                        goto change;
 
                p->sched_reset_on_fork = reset_on_fork;
@@ -3525,33 +3612,32 @@ change:
        oldprio = p->prio;
 
        /*
-        * Special case for priority boosted tasks.
-        *
-        * If the new priority is lower or equal (user space view)
-        * than the current (boosted) priority, we just store the new
+        * Take priority boosted tasks into account. If the new
+        * effective priority is unchanged, we just store the new
         * normal parameters and do not touch the scheduler class and
         * the runqueue. This will be done when the task deboost
         * itself.
         */
-       if (rt_mutex_check_prio(p, newprio)) {
+       new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
+       if (new_effective_prio == oldprio) {
                __setscheduler_params(p, attr);
                task_rq_unlock(rq, p, &flags);
                return 0;
        }
 
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        prev_class = p->sched_class;
-       __setscheduler(rq, p, attr);
+       __setscheduler(rq, p, attr, true);
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq) {
+       if (queued) {
                /*
                 * We enqueue to tail when the priority of a task is
                 * increased (user space view).
@@ -3576,10 +3662,8 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
                .sched_nice     = PRIO_TO_NICE(p->static_prio),
        };
 
-       /*
-        * Fixup the legacy SCHED_RESET_ON_FORK hack
-        */
-       if (policy & SCHED_RESET_ON_FORK) {
+       /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
+       if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
                policy &= ~SCHED_RESET_ON_FORK;
                attr.sched_policy = policy;
@@ -3749,7 +3833,7 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  */
 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 {
-       return do_sched_setscheduler(pid, -1, param);
+       return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
 }
 
 /**
@@ -3977,14 +4061,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
                rcu_read_lock();
                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
                        rcu_read_unlock();
-                       goto out_unlock;
+                       goto out_free_new_mask;
                }
                rcu_read_unlock();
        }
 
        retval = security_task_setscheduler(p);
        if (retval)
-               goto out_unlock;
+               goto out_free_new_mask;
 
 
        cpuset_cpus_allowed(p, cpus_allowed);
@@ -3997,13 +4081,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
         * root_domain.
         */
 #ifdef CONFIG_SMP
-       if (task_has_dl_policy(p)) {
-               const struct cpumask *span = task_rq(p)->rd->span;
-
-               if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
+       if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
+               rcu_read_lock();
+               if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
                        retval = -EBUSY;
-                       goto out_unlock;
+                       rcu_read_unlock();
+                       goto out_free_new_mask;
                }
+               rcu_read_unlock();
        }
 #endif
 again:
@@ -4021,7 +4106,7 @@ again:
                        goto again;
                }
        }
-out_unlock:
+out_free_new_mask:
        free_cpumask_var(new_mask);
 out_free_cpus_allowed:
        free_cpumask_var(cpus_allowed);
@@ -4157,17 +4242,10 @@ SYSCALL_DEFINE0(sched_yield)
        return 0;
 }
 
-static void __cond_resched(void)
-{
-       __preempt_count_add(PREEMPT_ACTIVE);
-       __schedule();
-       __preempt_count_sub(PREEMPT_ACTIVE);
-}
-
 int __sched _cond_resched(void)
 {
        if (should_resched()) {
-               __cond_resched();
+               preempt_schedule_common();
                return 1;
        }
        return 0;
@@ -4192,7 +4270,7 @@ int __cond_resched_lock(spinlock_t *lock)
        if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
-                       __cond_resched();
+                       preempt_schedule_common();
                else
                        cpu_relax();
                ret = 1;
@@ -4208,7 +4286,7 @@ int __sched __cond_resched_softirq(void)
 
        if (should_resched()) {
                local_bh_enable();
-               __cond_resched();
+               preempt_schedule_common();
                local_bh_disable();
                return 1;
        }
@@ -4304,7 +4382,7 @@ again:
                 * fairness.
                 */
                if (preempt && rq != p_rq)
-                       resched_task(p_rq->curr);
+                       resched_curr(p_rq);
        }
 
 out_unlock:
@@ -4323,36 +4401,26 @@ EXPORT_SYMBOL_GPL(yield_to);
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  */
-void __sched io_schedule(void)
-{
-       struct rq *rq = raw_rq();
-
-       delayacct_blkio_start();
-       atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
-       schedule();
-       current->in_iowait = 0;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
-}
-EXPORT_SYMBOL(io_schedule);
-
 long __sched io_schedule_timeout(long timeout)
 {
-       struct rq *rq = raw_rq();
+       int old_iowait = current->in_iowait;
+       struct rq *rq;
        long ret;
 
+       current->in_iowait = 1;
+       blk_schedule_flush_plug(current);
+
        delayacct_blkio_start();
+       rq = raw_rq();
        atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
        ret = schedule_timeout(timeout);
-       current->in_iowait = 0;
+       current->in_iowait = old_iowait;
        atomic_dec(&rq->nr_iowait);
        delayacct_blkio_end();
+
        return ret;
 }
+EXPORT_SYMBOL(io_schedule_timeout);
 
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
@@ -4463,9 +4531,10 @@ void sched_show_task(struct task_struct *p)
 {
        unsigned long free = 0;
        int ppid;
-       unsigned state;
+       unsigned long state = p->state;
 
-       state = p->state ? __ffs(p->state) + 1 : 0;
+       if (state)
+               state = __ffs(state) + 1;
        printk(KERN_INFO "%-15.15s %c", p->comm,
                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
 #if BITS_PER_LONG == 32
@@ -4482,8 +4551,10 @@ void sched_show_task(struct task_struct *p)
 #ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
 #endif
+       ppid = 0;
        rcu_read_lock();
-       ppid = task_pid_nr(rcu_dereference(p->real_parent));
+       if (pid_alive(p))
+               ppid = task_pid_nr(rcu_dereference(p->real_parent));
        rcu_read_unlock();
        printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
                task_pid_nr(p), ppid,
@@ -4505,7 +4576,7 @@ void show_state_filter(unsigned long state_filter)
                "  task                        PC stack   pid father\n");
 #endif
        rcu_read_lock();
-       do_each_thread(g, p) {
+       for_each_process_thread(g, p) {
                /*
                 * reset the NMI-timeout, listing all files on a slow
                 * console might take a lot of time:
@@ -4513,7 +4584,7 @@ void show_state_filter(unsigned long state_filter)
                touch_nmi_watchdog();
                if (!state_filter || (p->state & state_filter))
                        sched_show_task(p);
-       } while_each_thread(g, p);
+       }
 
        touch_all_softlockup_watchdogs();
 
@@ -4568,7 +4639,7 @@ void init_idle(struct task_struct *idle, int cpu)
        rcu_read_unlock();
 
        rq->curr = rq->idle = idle;
-       idle->on_rq = 1;
+       idle->on_rq = TASK_ON_RQ_QUEUED;
 #if defined(CONFIG_SMP)
        idle->on_cpu = 1;
 #endif
@@ -4588,10 +4659,115 @@ void init_idle(struct task_struct *idle, int cpu)
 #endif
 }
 
+int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                             const struct cpumask *trial)
+{
+       int ret = 1, trial_cpus;
+       struct dl_bw *cur_dl_b;
+       unsigned long flags;
+
+       if (!cpumask_weight(cur))
+               return ret;
+
+       rcu_read_lock_sched();
+       cur_dl_b = dl_bw_of(cpumask_any(cur));
+       trial_cpus = cpumask_weight(trial);
+
+       raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+       if (cur_dl_b->bw != -1 &&
+           cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
+               ret = 0;
+       raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+       rcu_read_unlock_sched();
+
+       return ret;
+}
+
+int task_can_attach(struct task_struct *p,
+                   const struct cpumask *cs_cpus_allowed)
+{
+       int ret = 0;
+
+       /*
+        * Kthreads which disallow setaffinity shouldn't be moved
+        * to a new cpuset; we don't want to change their cpu
+        * affinity and isolating such threads by their set of
+        * allowed nodes is unnecessary.  Thus, cpusets are not
+        * applicable for such threads.  This prevents checking for
+        * success of set_cpus_allowed_ptr() on all attached tasks
+        * before cpus_allowed may be changed.
+        */
+       if (p->flags & PF_NO_SETAFFINITY) {
+               ret = -EINVAL;
+               goto out;
+       }
+
 #ifdef CONFIG_SMP
+       if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
+                                             cs_cpus_allowed)) {
+               unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
+                                                       cs_cpus_allowed);
+               struct dl_bw *dl_b;
+               bool overflow;
+               int cpus;
+               unsigned long flags;
+
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(dest_cpu);
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(dest_cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
+               if (overflow)
+                       ret = -EBUSY;
+               else {
+                       /*
+                        * We reserve space for this task in the destination
+                        * root_domain, as we can't fail after this point.
+                        * We will free resources in the source root_domain
+                        * later on (see set_cpus_allowed_dl()).
+                        */
+                       __dl_add(dl_b, p->dl.dl_bw);
+               }
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+               rcu_read_unlock_sched();
+
+       }
+#endif
+out:
+       return ret;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
+{
+       struct rq *rq = task_rq(p);
+
+       lockdep_assert_held(&rq->lock);
+
+       dequeue_task(rq, p, 0);
+       p->on_rq = TASK_ON_RQ_MIGRATING;
+       set_task_cpu(p, new_cpu);
+       raw_spin_unlock(&rq->lock);
+
+       rq = cpu_rq(new_cpu);
+
+       raw_spin_lock(&rq->lock);
+       BUG_ON(task_cpu(p) != new_cpu);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       enqueue_task(rq, p, 0);
+       check_preempt_curr(rq, p, 0);
+
+       return rq;
+}
+
 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
-       if (p->sched_class && p->sched_class->set_cpus_allowed)
+       if (p->sched_class->set_cpus_allowed)
                p->sched_class->set_cpus_allowed(p, new_mask);
 
        cpumask_copy(&p->cpus_allowed, new_mask);
@@ -4645,14 +4821,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
                goto out;
 
        dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-       if (p->on_rq) {
+       if (task_running(rq, p) || p->state == TASK_WAKING) {
                struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
                task_rq_unlock(rq, p, &flags);
                stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
                tlb_migrate_finish(p->mm);
                return 0;
-       }
+       } else if (task_on_rq_queued(p))
+               rq = move_queued_task(p, dest_cpu);
 out:
        task_rq_unlock(rq, p, &flags);
 
@@ -4673,20 +4850,20 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  */
 static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
 {
-       struct rq *rq_dest, *rq_src;
+       struct rq *rq;
        int ret = 0;
 
        if (unlikely(!cpu_active(dest_cpu)))
                return ret;
 
-       rq_src = cpu_rq(src_cpu);
-       rq_dest = cpu_rq(dest_cpu);
+       rq = cpu_rq(src_cpu);
 
        raw_spin_lock(&p->pi_lock);
-       double_rq_lock(rq_src, rq_dest);
+       raw_spin_lock(&rq->lock);
        /* Already moved. */
        if (task_cpu(p) != src_cpu)
                goto done;
+
        /* Affinity changed (again). */
        if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
                goto fail;
@@ -4695,16 +4872,12 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
         * If we're not on a rq, the next wake-up will ensure we're
         * placed properly.
         */
-       if (p->on_rq) {
-               dequeue_task(rq_src, p, 0);
-               set_task_cpu(p, dest_cpu);
-               enqueue_task(rq_dest, p, 0);
-               check_preempt_curr(rq_dest, p, 0);
-       }
+       if (task_on_rq_queued(p))
+               rq = move_queued_task(p, dest_cpu);
 done:
        ret = 1;
 fail:
-       double_rq_unlock(rq_src, rq_dest);
+       raw_spin_unlock(&rq->lock);
        raw_spin_unlock(&p->pi_lock);
        return ret;
 }
@@ -4736,22 +4909,22 @@ void sched_setnuma(struct task_struct *p, int nid)
 {
        struct rq *rq;
        unsigned long flags;
-       bool on_rq, running;
+       bool queued, running;
 
        rq = task_rq_lock(p, &flags);
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
 
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        p->numa_preferred_nid = nid;
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, p, 0);
        task_rq_unlock(rq, p, &flags);
 }
@@ -4771,6 +4944,12 @@ static int migration_cpu_stop(void *data)
         * be on another cpu but it doesn't matter.
         */
        local_irq_disable();
+       /*
+        * We need to explicitly wake pending tasks before running
+        * __migrate_task() such that we will not miss enforcing cpus_allowed
+        * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+        */
+       sched_ttwu_pending();
        __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
        local_irq_enable();
        return 0;
@@ -5179,31 +5358,13 @@ static int sched_cpu_active(struct notifier_block *nfb,
 static int sched_cpu_inactive(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
-       unsigned long flags;
-       long cpu = (long)hcpu;
-
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
-               set_cpu_active(cpu, false);
-
-               /* explicitly allow suspend */
-               if (!(action & CPU_TASKS_FROZEN)) {
-                       struct dl_bw *dl_b = dl_bw_of(cpu);
-                       bool overflow;
-                       int cpus;
-
-                       raw_spin_lock_irqsave(&dl_b->lock, flags);
-                       cpus = dl_bw_cpus(cpu);
-                       overflow = __dl_overflow(dl_b, cpus, 0, 0);
-                       raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-                       if (overflow)
-                               return notifier_from_errno(-EBUSY);
-               }
+               set_cpu_active((long)hcpu, false);
                return NOTIFY_OK;
+       default:
+               return NOTIFY_DONE;
        }
-
-       return NOTIFY_DONE;
 }
 
 static int __init migration_init(void)
@@ -5251,9 +5412,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                                  struct cpumask *groupmask)
 {
        struct sched_group *group = sd->groups;
-       char str[256];
 
-       cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
        cpumask_clear(groupmask);
 
        printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
@@ -5266,7 +5425,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                return -1;
        }
 
-       printk(KERN_CONT "span %s level %s\n", str, sd->name);
+       printk(KERN_CONT "span %*pbl level %s\n",
+              cpumask_pr_args(sched_domain_span(sd)), sd->name);
 
        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
                printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -5285,17 +5445,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               /*
-                * Even though we initialize ->capacity to something semi-sane,
-                * we leave capacity_orig unset. This allows us to detect if
-                * domain iteration is still funny without causing /0 traps.
-                */
-               if (!group->sgc->capacity_orig) {
-                       printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: domain->cpu_capacity not set\n");
-                       break;
-               }
-
                if (!cpumask_weight(sched_group_cpus(group))) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: empty group\n");
@@ -5311,9 +5460,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
                cpumask_or(groupmask, groupmask, sched_group_cpus(group));
 
-               cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
-
-               printk(KERN_CONT " %s", str);
+               printk(KERN_CONT " %*pbl",
+                      cpumask_pr_args(sched_group_cpus(group)));
                if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
                        printk(KERN_CONT " (cpu_capacity = %d)",
                                group->sgc->capacity);
@@ -5669,9 +5817,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
        update_top_cache_domain(cpu);
 }
 
-/* cpus with isolated domains */
-static cpumask_var_t cpu_isolated_map;
-
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
 {
@@ -5739,7 +5884,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
        const struct cpumask *span = sched_domain_span(sd);
        struct cpumask *covered = sched_domains_tmpmask;
        struct sd_data *sdd = sd->private;
-       struct sched_domain *child;
+       struct sched_domain *sibling;
        int i;
 
        cpumask_clear(covered);
@@ -5750,10 +5895,10 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                if (cpumask_test_cpu(i, covered))
                        continue;
 
-               child = *per_cpu_ptr(sdd->sd, i);
+               sibling = *per_cpu_ptr(sdd->sd, i);
 
                /* See the comment near build_group_mask(). */
-               if (!cpumask_test_cpu(i, sched_domain_span(child)))
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
                        continue;
 
                sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
@@ -5763,10 +5908,9 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                        goto fail;
 
                sg_span = sched_group_cpus(sg);
-               if (child->child) {
-                       child = child->child;
-                       cpumask_copy(sg_span, sched_domain_span(child));
-               } else
+               if (sibling->child)
+                       cpumask_copy(sg_span, sched_domain_span(sibling->child));
+               else
                        cpumask_set_cpu(i, sg_span);
 
                cpumask_or(covered, covered, sg_span);
@@ -5781,7 +5925,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                 * die on a /0 trap.
                 */
                sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
-               sg->sgc->capacity_orig = sg->sgc->capacity;
 
                /*
                 * Make sure the first group of this domain contains the
@@ -6004,7 +6147,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
 
 #ifdef CONFIG_NUMA
 static int sched_domains_numa_levels;
+enum numa_topology_type sched_numa_topology_type;
 static int *sched_domains_numa_distance;
+int sched_max_numa_distance;
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
 #endif
@@ -6090,6 +6235,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
         */
 
        if (sd->flags & SD_SHARE_CPUCAPACITY) {
+               sd->flags |= SD_PREFER_SIBLING;
                sd->imbalance_pct = 110;
                sd->smt_gain = 1178; /* ~15% */
 
@@ -6176,7 +6322,7 @@ static void sched_numa_warn(const char *str)
        printk(KERN_WARNING "\n");
 }
 
-static bool find_numa_distance(int distance)
+bool find_numa_distance(int distance)
 {
        int i;
 
@@ -6191,6 +6337,56 @@ static bool find_numa_distance(int distance)
        return false;
 }
 
+/*
+ * A system can have three types of NUMA topology:
+ * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
+ * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
+ * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
+ *
+ * The difference between a glueless mesh topology and a backplane
+ * topology lies in whether communication between not directly
+ * connected nodes goes through intermediary nodes (where programs
+ * could run), or through backplane controllers. This affects
+ * placement of programs.
+ *
+ * The type of topology can be discerned with the following tests:
+ * - If the maximum distance between any nodes is 1 hop, the system
+ *   is directly connected.
+ * - If for two nodes A and B, located N > 1 hops away from each other,
+ *   there is an intermediary node C, which is < N hops away from both
+ *   nodes A and B, the system is a glueless mesh.
+ */
+static void init_numa_topology_type(void)
+{
+       int a, b, c, n;
+
+       n = sched_max_numa_distance;
+
+       if (n <= 1)
+               sched_numa_topology_type = NUMA_DIRECT;
+
+       for_each_online_node(a) {
+               for_each_online_node(b) {
+                       /* Find two nodes furthest removed from each other. */
+                       if (node_distance(a, b) < n)
+                               continue;
+
+                       /* Is there an intermediary node between a and b? */
+                       for_each_online_node(c) {
+                               if (node_distance(a, c) < n &&
+                                   node_distance(b, c) < n) {
+                                       sched_numa_topology_type =
+                                                       NUMA_GLUELESS_MESH;
+                                       return;
+                               }
+                       }
+
+                       sched_numa_topology_type = NUMA_BACKPLANE;
+                       return;
+               }
+       }
+}
+
 static void sched_init_numa(void)
 {
        int next_distance, curr_distance = node_distance(0, 0);
@@ -6244,6 +6440,10 @@ static void sched_init_numa(void)
                if (!sched_debug())
                        break;
        }
+
+       if (!level)
+               return;
+
        /*
         * 'level' contains the number of unique distances, excluding the
         * identity distance node_distance(i,i).
@@ -6323,6 +6523,9 @@ static void sched_init_numa(void)
        sched_domain_topology = tl;
 
        sched_domains_numa_levels = level;
+       sched_max_numa_distance = sched_domains_numa_distance[level - 1];
+
+       init_numa_topology_type();
 }
 
 static void sched_domains_numa_masks_set(int cpu)
@@ -6484,6 +6687,20 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
                sched_domain_level_max = max(sched_domain_level_max, sd->level);
                child->parent = sd;
                sd->child = child;
+
+               if (!cpumask_subset(sched_domain_span(child),
+                                   sched_domain_span(sd))) {
+                       pr_err("BUG: arch topology borken\n");
+#ifdef CONFIG_SCHED_DEBUG
+                       pr_err("     the %s domain not a subset of the %s domain\n",
+                                       child->name, sd->name);
+#endif
+                       /* Fixup, ensure @sd has at least @child cpus. */
+                       cpumask_or(sched_domain_span(sd),
+                                  sched_domain_span(sd),
+                                  sched_domain_span(child));
+               }
+
        }
        set_domain_attribute(sd, attr);
 
@@ -6784,7 +7001,6 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
                 */
 
        case CPU_ONLINE:
-       case CPU_DOWN_FAILED:
                cpuset_update_active_cpus(true);
                break;
        default:
@@ -6796,8 +7012,26 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
                               void *hcpu)
 {
+       unsigned long flags;
+       long cpu = (long)hcpu;
+       struct dl_bw *dl_b;
+       bool overflow;
+       int cpus;
+
        switch (action) {
        case CPU_DOWN_PREPARE:
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, 0);
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
+
+               if (overflow)
+                       return notifier_from_errno(-EBUSY);
                cpuset_update_active_cpus(false);
                break;
        case CPU_DOWN_PREPARE_FROZEN:
@@ -6883,9 +7117,6 @@ void __init sched_init(void)
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       alloc_size += num_possible_cpus() * cpumask_size();
 #endif
        if (alloc_size) {
                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -6906,13 +7137,13 @@ void __init sched_init(void)
                ptr += nr_cpu_ids * sizeof(void **);
 
 #endif /* CONFIG_RT_GROUP_SCHED */
+       }
 #ifdef CONFIG_CPUMASK_OFFSTACK
-               for_each_possible_cpu(i) {
-                       per_cpu(load_balance_mask, i) = (void *)ptr;
-                       ptr += cpumask_size();
-               }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+       for_each_possible_cpu(i) {
+               per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+                       cpumask_size(), GFP_KERNEL, cpu_to_node(i));
        }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
 
        init_rt_bandwidth(&def_rt_bandwidth,
                        global_rt_period(), global_rt_runtime());
@@ -6945,8 +7176,8 @@ void __init sched_init(void)
                rq->calc_load_active = 0;
                rq->calc_load_update = jiffies + LOAD_FREQ;
                init_cfs_rq(&rq->cfs);
-               init_rt_rq(&rq->rt, rq);
-               init_dl_rq(&rq->dl, rq);
+               init_rt_rq(&rq->rt);
+               init_dl_rq(&rq->dl);
 #ifdef CONFIG_FAIR_GROUP_SCHED
                root_task_group.shares = ROOT_TASK_GROUP_LOAD;
                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
@@ -6986,7 +7217,7 @@ void __init sched_init(void)
 #ifdef CONFIG_SMP
                rq->sd = NULL;
                rq->rd = NULL;
-               rq->cpu_capacity = SCHED_CAPACITY_SCALE;
+               rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
                rq->post_schedule = 0;
                rq->active_balance = 0;
                rq->next_balance = jiffies;
@@ -7023,6 +7254,11 @@ void __init sched_init(void)
        atomic_inc(&init_mm.mm_count);
        enter_lazy_tlb(&init_mm, current);
 
+       /*
+        * During early bootup we pretend to be a normal task:
+        */
+       current->sched_class = &fair_sched_class;
+
        /*
         * Make us the idle thread. Technically, schedule() should not be
         * called from this thread, however somewhere below it might be,
@@ -7033,11 +7269,6 @@ void __init sched_init(void)
 
        calc_load_update = jiffies + LOAD_FREQ;
 
-       /*
-        * During early bootup we pretend to be a normal task:
-        */
-       current->sched_class = &fair_sched_class;
-
 #ifdef CONFIG_SMP
        zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
        /* May be allocated at isolcpus cmdline parse time */
@@ -7060,6 +7291,24 @@ static inline int preempt_count_equals(int preempt_offset)
 }
 
 void __might_sleep(const char *file, int line, int preempt_offset)
+{
+       /*
+        * Blocking primitives will set (and therefore destroy) current->state,
+        * since we will exit with TASK_RUNNING make sure we enter with it,
+        * otherwise we will destroy state.
+        */
+       WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
+                       "do not call blocking ops when !TASK_RUNNING; "
+                       "state=%lx set at [<%p>] %pS\n",
+                       current->state,
+                       (void *)current->task_state_change,
+                       (void *)current->task_state_change);
+
+       ___might_sleep(file, line, preempt_offset);
+}
+EXPORT_SYMBOL(__might_sleep);
+
+void ___might_sleep(const char *file, int line, int preempt_offset)
 {
        static unsigned long prev_jiffy;        /* ratelimiting */
 
@@ -7080,6 +7329,9 @@ void __might_sleep(const char *file, int line, int preempt_offset)
                        in_atomic(), irqs_disabled(),
                        current->pid, current->comm);
 
+       if (task_stack_end_corrupted(current))
+               printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
+
        debug_show_held_locks(current);
        if (irqs_disabled())
                print_irqtrace_events(current);
@@ -7092,7 +7344,7 @@ void __might_sleep(const char *file, int line, int preempt_offset)
 #endif
        dump_stack();
 }
-EXPORT_SYMBOL(__might_sleep);
+EXPORT_SYMBOL(___might_sleep);
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
@@ -7103,15 +7355,15 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
                .sched_policy = SCHED_NORMAL,
        };
        int old_prio = p->prio;
-       int on_rq;
+       int queued;
 
-       on_rq = p->on_rq;
-       if (on_rq)
+       queued = task_on_rq_queued(p);
+       if (queued)
                dequeue_task(rq, p, 0);
-       __setscheduler(rq, p, &attr);
-       if (on_rq) {
+       __setscheduler(rq, p, &attr, false);
+       if (queued) {
                enqueue_task(rq, p, 0);
-               resched_task(rq->curr);
+               resched_curr(rq);
        }
 
        check_class_changed(rq, p, prev_class, old_prio);
@@ -7123,12 +7375,12 @@ void normalize_rt_tasks(void)
        unsigned long flags;
        struct rq *rq;
 
-       read_lock_irqsave(&tasklist_lock, flags);
-       do_each_thread(g, p) {
+       read_lock(&tasklist_lock);
+       for_each_process_thread(g, p) {
                /*
                 * Only normalize user tasks:
                 */
-               if (!p->mm)
+               if (p->flags & PF_KTHREAD)
                        continue;
 
                p->se.exec_start                = 0;
@@ -7143,21 +7395,16 @@ void normalize_rt_tasks(void)
                         * Renice negative nice level userspace
                         * tasks back to 0:
                         */
-                       if (task_nice(p) < 0 && p->mm)
+                       if (task_nice(p) < 0)
                                set_user_nice(p, 0);
                        continue;
                }
 
-               raw_spin_lock(&p->pi_lock);
-               rq = __task_rq_lock(p);
-
+               rq = task_rq_lock(p, &flags);
                normalize_task(rq, p);
-
-               __task_rq_unlock(rq);
-               raw_spin_unlock(&p->pi_lock);
-       } while_each_thread(g, p);
-
-       read_unlock_irqrestore(&tasklist_lock, flags);
+               task_rq_unlock(rq, p, &flags);
+       }
+       read_unlock(&tasklist_lock);
 }
 
 #endif /* CONFIG_MAGIC_SYSRQ */
@@ -7297,36 +7544,40 @@ void sched_offline_group(struct task_group *tg)
 void sched_move_task(struct task_struct *tsk)
 {
        struct task_group *tg;
-       int on_rq, running;
+       int queued, running;
        unsigned long flags;
        struct rq *rq;
 
        rq = task_rq_lock(tsk, &flags);
 
        running = task_current(rq, tsk);
-       on_rq = tsk->on_rq;
+       queued = task_on_rq_queued(tsk);
 
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, tsk, 0);
        if (unlikely(running))
-               tsk->sched_class->put_prev_task(rq, tsk);
+               put_prev_task(rq, tsk);
 
-       tg = container_of(task_css_check(tsk, cpu_cgrp_id,
-                               lockdep_is_held(&tsk->sighand->siglock)),
+       /*
+        * All callers are synchronized by task_rq_lock(); we do not use RCU
+        * which is pointless here. Thus, we pass "true" to task_css_check()
+        * to prevent lockdep warnings.
+        */
+       tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
                          struct task_group, css);
        tg = autogroup_task_group(tsk, tg);
        tsk->sched_task_group = tg;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->task_move_group)
-               tsk->sched_class->task_move_group(tsk, on_rq);
+               tsk->sched_class->task_move_group(tsk, queued);
        else
 #endif
                set_task_rq(tsk, task_cpu(tsk));
 
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, tsk, 0);
 
        task_rq_unlock(rq, tsk, &flags);
@@ -7344,10 +7595,16 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
 {
        struct task_struct *g, *p;
 
-       do_each_thread(g, p) {
-               if (rt_task(p) && task_rq(p)->rt.tg == tg)
+       /*
+        * Autogroups do not have RT tasks; see autogroup_create().
+        */
+       if (task_group_is_autogroup(tg))
+               return 0;
+
+       for_each_process_thread(g, p) {
+               if (rt_task(p) && task_group(p) == tg)
                        return 1;
-       } while_each_thread(g, p);
+       }
 
        return 0;
 }
@@ -7436,6 +7693,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
 {
        int i, err = 0;
 
+       /*
+        * Disallowing the root group RT runtime is BAD, it would disallow the
+        * kernel creating (and or operating) RT threads.
+        */
+       if (tg == &root_task_group && rt_runtime == 0)
+               return -EINVAL;
+
+       /* No period doesn't make any sense. */
+       if (rt_period == 0)
+               return -EINVAL;
+
        mutex_lock(&rt_constraints_mutex);
        read_lock(&tasklist_lock);
        err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7492,9 +7760,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
        rt_period = (u64)rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
-       if (rt_period == 0)
-               return -EINVAL;
-
        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
 
@@ -7551,11 +7816,12 @@ static int sched_rt_global_constraints(void)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
-static int sched_dl_global_constraints(void)
+static int sched_dl_global_validate(void)
 {
        u64 runtime = global_rt_runtime();
        u64 period = global_rt_period();
        u64 new_bw = to_ratio(period, runtime);
+       struct dl_bw *dl_b;
        int cpu, ret = 0;
        unsigned long flags;
 
@@ -7569,13 +7835,16 @@ static int sched_dl_global_constraints(void)
         * solutions is welcome!
         */
        for_each_possible_cpu(cpu) {
-               struct dl_bw *dl_b = dl_bw_of(cpu);
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
 
                raw_spin_lock_irqsave(&dl_b->lock, flags);
                if (new_bw < dl_b->total_bw)
                        ret = -EBUSY;
                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
 
+               rcu_read_unlock_sched();
+
                if (ret)
                        break;
        }
@@ -7586,6 +7855,7 @@ static int sched_dl_global_constraints(void)
 static void sched_dl_do_global(void)
 {
        u64 new_bw = -1;
+       struct dl_bw *dl_b;
        int cpu;
        unsigned long flags;
 
@@ -7599,11 +7869,14 @@ static void sched_dl_do_global(void)
         * FIXME: As above...
         */
        for_each_possible_cpu(cpu) {
-               struct dl_bw *dl_b = dl_bw_of(cpu);
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
 
                raw_spin_lock_irqsave(&dl_b->lock, flags);
                dl_b->bw = new_bw;
                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
        }
 }
 
@@ -7644,11 +7917,11 @@ int sched_rt_handler(struct ctl_table *table, int write,
                if (ret)
                        goto undo;
 
-               ret = sched_rt_global_constraints();
+               ret = sched_dl_global_validate();
                if (ret)
                        goto undo;
 
-               ret = sched_dl_global_constraints();
+               ret = sched_rt_global_constraints();
                if (ret)
                        goto undo;
 
@@ -7733,6 +8006,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
        sched_offline_group(tg);
 }
 
+static void cpu_cgroup_fork(struct task_struct *task)
+{
+       sched_move_task(task);
+}
+
 static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
@@ -7822,6 +8100,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
        if (period > max_cfs_quota_period)
                return -EINVAL;
 
+       /*
+        * Prevent race between setting of cfs_rq->runtime_enabled and
+        * unthrottle_offline_cfs_rqs().
+        */
+       get_online_cpus();
        mutex_lock(&cfs_constraints_mutex);
        ret = __cfs_schedulable(tg, period, quota);
        if (ret)
@@ -7847,7 +8130,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
        }
        raw_spin_unlock_irq(&cfs_b->lock);
 
-       for_each_possible_cpu(i) {
+       for_each_online_cpu(i) {
                struct cfs_rq *cfs_rq = tg->cfs_rq[i];
                struct rq *rq = cfs_rq->rq;
 
@@ -7863,6 +8146,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
                cfs_bandwidth_usage_dec();
 out_unlock:
        mutex_unlock(&cfs_constraints_mutex);
+       put_online_cpus();
 
        return ret;
 }
@@ -7978,7 +8262,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
 
                quota = normalize_cfs_quota(tg, d);
-               parent_quota = parent_b->hierarchal_quota;
+               parent_quota = parent_b->hierarchical_quota;
 
                /*
                 * ensure max(child_quota) <= parent_quota, inherit when no
@@ -7989,7 +8273,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                else if (parent_quota != RUNTIME_INF && quota > parent_quota)
                        return -EINVAL;
        }
-       cfs_b->hierarchal_quota = quota;
+       cfs_b->hierarchical_quota = quota;
 
        return 0;
 }
@@ -8099,10 +8383,11 @@ struct cgroup_subsys cpu_cgrp_subsys = {
        .css_free       = cpu_cgroup_css_free,
        .css_online     = cpu_cgroup_css_online,
        .css_offline    = cpu_cgroup_css_offline,
+       .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
-       .base_cftypes   = cpu_files,
+       .legacy_cftypes = cpu_files,
        .early_init     = 1,
 };