Merge tag 'v4.3' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / core.c
index aae0686..0c301cf 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
-#ifdef smp_mb__before_atomic
-void __smp_mb__before_atomic(void)
-{
-       smp_mb__before_atomic();
-}
-EXPORT_SYMBOL(__smp_mb__before_atomic);
-#endif
-
-#ifdef smp_mb__after_atomic
-void __smp_mb__after_atomic(void)
-{
-       smp_mb__after_atomic();
-}
-EXPORT_SYMBOL(__smp_mb__after_atomic);
-#endif
-
-void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
-{
-       unsigned long delta;
-       ktime_t soft, hard, now;
-
-       for (;;) {
-               if (hrtimer_active(period_timer))
-                       break;
-
-               now = hrtimer_cb_get_time(period_timer);
-               hrtimer_forward(period_timer, now, period);
-
-               soft = hrtimer_get_softexpires(period_timer);
-               hard = hrtimer_get_expires(period_timer);
-               delta = ktime_to_ns(ktime_sub(hard, soft));
-               __hrtimer_start_range_ns(period_timer, soft, delta,
-                                        HRTIMER_MODE_ABS_PINNED, 0);
-       }
-}
-
 DEFINE_MUTEX(sched_domains_mutex);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
@@ -135,10 +99,14 @@ void update_rq_clock(struct rq *rq)
 {
        s64 delta;
 
-       if (rq->skip_clock_update > 0)
+       lockdep_assert_held(&rq->lock);
+
+       if (rq->clock_skip_update & RQCF_ACT_SKIP)
                return;
 
        delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+       if (delta < 0)
+               return;
        rq->clock += delta;
        update_rq_clock_task(rq, delta);
 }
@@ -196,14 +164,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 
 static void sched_feat_disable(int i)
 {
-       if (static_key_enabled(&sched_feat_keys[i]))
-               static_key_slow_dec(&sched_feat_keys[i]);
+       static_key_disable(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-       if (!static_key_enabled(&sched_feat_keys[i]))
-               static_key_slow_inc(&sched_feat_keys[i]);
+       static_key_enable(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
@@ -243,6 +209,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        char buf[64];
        char *cmp;
        int i;
+       struct inode *inode;
 
        if (cnt > 63)
                cnt = 63;
@@ -253,7 +220,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        buf[cnt] = 0;
        cmp = strstrip(buf);
 
+       /* Ensure the static_key remains in a consistent state */
+       inode = file_inode(filp);
+       mutex_lock(&inode->i_mutex);
        i = sched_feat_set(cmp);
+       mutex_unlock(&inode->i_mutex);
        if (i == __SCHED_FEAT_NR)
                return -EINVAL;
 
@@ -313,59 +284,8 @@ __read_mostly int scheduler_running;
  */
 int sysctl_sched_rt_runtime = 950000;
 
-/*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       lockdep_assert_held(&p->pi_lock);
-
-       for (;;) {
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-       }
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-       __acquires(p->pi_lock)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       for (;;) {
-               raw_spin_lock_irqsave(&p->pi_lock, *flags);
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-       }
-}
-
-static void __task_rq_unlock(struct rq *rq)
-       __releases(rq->lock)
-{
-       raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-       __releases(rq->lock)
-       __releases(p->pi_lock)
-{
-       raw_spin_unlock(&rq->lock);
-       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
+/* cpus with isolated domains */
+cpumask_var_t cpu_isolated_map;
 
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
@@ -413,12 +333,11 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 
 #ifdef CONFIG_SMP
 
-static int __hrtick_restart(struct rq *rq)
+static void __hrtick_restart(struct rq *rq)
 {
        struct hrtimer *timer = &rq->hrtick_timer;
-       ktime_t time = hrtimer_get_softexpires(timer);
 
-       return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
+       hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 /*
@@ -442,7 +361,15 @@ static void __hrtick_start(void *arg)
 void hrtick_start(struct rq *rq, u64 delay)
 {
        struct hrtimer *timer = &rq->hrtick_timer;
-       ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
+       ktime_t time;
+       s64 delta;
+
+       /*
+        * Don't schedule slices shorter than 10000ns, that just
+        * doesn't make sense and can cause timer DoS.
+        */
+       delta = max_t(s64, delay, 10000LL);
+       time = ktime_add_ns(timer->base->get_time(), delta);
 
        hrtimer_set_expires(timer, time);
 
@@ -485,8 +412,13 @@ static __init void init_hrtick(void)
  */
 void hrtick_start(struct rq *rq, u64 delay)
 {
-       __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
+       /*
+        * Don't schedule slices shorter than 10000ns, that just
+        * doesn't make sense. Rely on vruntime for fairness.
+        */
+       delay = max_t(u64, delay, 10000LL);
+       hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
+                     HRTIMER_MODE_REL_PINNED);
 }
 
 static inline void init_hrtick(void)
@@ -556,7 +488,7 @@ static bool set_nr_and_not_polling(struct task_struct *p)
 static bool set_nr_if_polling(struct task_struct *p)
 {
        struct thread_info *ti = task_thread_info(p);
-       typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags);
+       typeof(ti->flags) old, val = READ_ONCE(ti->flags);
 
        for (;;) {
                if (!(val & _TIF_POLLING_NRFLAG))
@@ -586,31 +518,78 @@ static bool set_nr_if_polling(struct task_struct *p)
 #endif
 #endif
 
+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+       struct wake_q_node *node = &task->wake_q;
+
+       /*
+        * Atomically grab the task, if ->wake_q is !nil already it means
+        * its already queued (either by us or someone else) and will get the
+        * wakeup due to that.
+        *
+        * This cmpxchg() implies a full barrier, which pairs with the write
+        * barrier implied by the wakeup in wake_up_list().
+        */
+       if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+               return;
+
+       get_task_struct(task);
+
+       /*
+        * The head is context local, there can be no concurrency.
+        */
+       *head->lastp = node;
+       head->lastp = &node->next;
+}
+
+void wake_up_q(struct wake_q_head *head)
+{
+       struct wake_q_node *node = head->first;
+
+       while (node != WAKE_Q_TAIL) {
+               struct task_struct *task;
+
+               task = container_of(node, struct task_struct, wake_q);
+               BUG_ON(!task);
+               /* task can safely be re-inserted now */
+               node = node->next;
+               task->wake_q.next = NULL;
+
+               /*
+                * wake_up_process() implies a wmb() to pair with the queueing
+                * in wake_q_add() so as not to miss wakeups.
+                */
+               wake_up_process(task);
+               put_task_struct(task);
+       }
+}
+
 /*
- * resched_task - mark a task 'to be rescheduled now'.
+ * resched_curr - mark rq's current task 'to be rescheduled now'.
  *
  * On UP this means the setting of the need_resched flag, on SMP it
  * might also involve a cross-CPU call to trigger the scheduler on
  * the target CPU.
  */
-void resched_task(struct task_struct *p)
+void resched_curr(struct rq *rq)
 {
+       struct task_struct *curr = rq->curr;
        int cpu;
 
-       lockdep_assert_held(&task_rq(p)->lock);
+       lockdep_assert_held(&rq->lock);
 
-       if (test_tsk_need_resched(p))
+       if (test_tsk_need_resched(curr))
                return;
 
-       cpu = task_cpu(p);
+       cpu = cpu_of(rq);
 
        if (cpu == smp_processor_id()) {
-               set_tsk_need_resched(p);
+               set_tsk_need_resched(curr);
                set_preempt_need_resched();
                return;
        }
 
-       if (set_nr_and_not_polling(p))
+       if (set_nr_and_not_polling(curr))
                smp_send_reschedule(cpu);
        else
                trace_sched_wake_idle_without_ipi(cpu);
@@ -623,7 +602,7 @@ void resched_cpu(int cpu)
 
        if (!raw_spin_trylock_irqsave(&rq->lock, flags))
                return;
-       resched_task(cpu_curr(cpu));
+       resched_curr(rq);
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -637,24 +616,26 @@ void resched_cpu(int cpu)
  * selecting an idle cpu will add more delays to the timers than intended
  * (as that cpu's timer base may not be uptodate wrt jiffies etc).
  */
-int get_nohz_timer_target(int pinned)
+int get_nohz_timer_target(void)
 {
-       int cpu = smp_processor_id();
-       int i;
+       int i, cpu = smp_processor_id();
        struct sched_domain *sd;
 
-       if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
+       if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
                return cpu;
 
        rcu_read_lock();
        for_each_domain(cpu, sd) {
                for_each_cpu(i, sched_domain_span(sd)) {
-                       if (!idle_cpu(i)) {
+                       if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
                                cpu = i;
                                goto unlock;
                        }
                }
        }
+
+       if (!is_housekeeping_cpu(cpu))
+               cpu = housekeeping_any_cpu();
 unlock:
        rcu_read_unlock();
        return cpu;
@@ -684,10 +665,16 @@ static void wake_up_idle_cpu(int cpu)
 
 static bool wake_up_full_nohz_cpu(int cpu)
 {
+       /*
+        * We just need the target to call irq_exit() and re-evaluate
+        * the next tick. The nohz full kick at least implies that.
+        * If needed we can still optimize that later with an
+        * empty IRQ.
+        */
        if (tick_nohz_full_cpu(cpu)) {
                if (cpu != smp_processor_id() ||
                    tick_nohz_tick_stopped())
-                       smp_send_reschedule(cpu);
+                       tick_nohz_full_kick_cpu(cpu);
                return true;
        }
 
@@ -730,18 +717,32 @@ static inline bool got_nohz_idle_kick(void)
 #ifdef CONFIG_NO_HZ_FULL
 bool sched_can_stop_tick(void)
 {
-       struct rq *rq;
+       /*
+        * FIFO realtime policy runs the highest priority task. Other runnable
+        * tasks are of a lower priority. The scheduler tick does nothing.
+        */
+       if (current->policy == SCHED_FIFO)
+               return true;
 
-       rq = this_rq();
+       /*
+        * Round-robin realtime tasks time slice with other tasks at the same
+        * realtime priority. Is this task the only one at this priority?
+        */
+       if (current->policy == SCHED_RR) {
+               struct sched_rt_entity *rt_se = &current->rt;
 
-       /* Make sure rq->nr_running update is visible after the IPI */
-       smp_rmb();
+               return rt_se->run_list.prev == rt_se->run_list.next;
+       }
 
-       /* More than one running task need preemption */
-       if (rq->nr_running > 1)
-               return false;
+       /*
+        * More than one running task need preemption.
+        * nr_running update is assumed to be visible
+        * after IPI is sent from wakers.
+        */
+       if (this_rq()->nr_running > 1)
+               return false;
 
-       return true;
+       return true;
 }
 #endif /* CONFIG_NO_HZ_FULL */
 
@@ -999,6 +1000,13 @@ inline int task_curr(const struct task_struct *p)
        return cpu_curr(task_cpu(p)) == p;
 }
 
+/*
+ * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
+ * use the balance_callback list if you want balancing.
+ *
+ * this means any call to check_class_changed() must be followed by a call to
+ * balance_callback().
+ */
 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                                       const struct sched_class *prev_class,
                                       int oldprio)
@@ -1006,6 +1014,7 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
        if (prev_class != p->sched_class) {
                if (prev_class->switched_from)
                        prev_class->switched_from(rq, p);
+
                p->sched_class->switched_to(rq, p);
        } else if (oldprio != p->prio || dl_task(p))
                p->sched_class->prio_changed(rq, p, oldprio);
@@ -1022,7 +1031,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
                        if (class == rq->curr->sched_class)
                                break;
                        if (class == p->sched_class) {
-                               resched_task(rq->curr);
+                               resched_curr(rq);
                                break;
                        }
                }
@@ -1032,11 +1041,227 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
         * A queue event has occurred, and we're going to schedule.  In
         * this case, we can save a useless back to back clock update.
         */
-       if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
-               rq->skip_clock_update = 1;
+       if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
+               rq_clock_skip_update(rq, true);
 }
 
 #ifdef CONFIG_SMP
+/*
+ * This is how migration works:
+ *
+ * 1) we invoke migration_cpu_stop() on the target CPU using
+ *    stop_one_cpu().
+ * 2) stopper starts to run (implicitly forcing the migrated thread
+ *    off the CPU)
+ * 3) it checks whether the migrated task is still in the wrong runqueue.
+ * 4) if it's in the wrong runqueue then the migration thread removes
+ *    it and puts it into the right queue.
+ * 5) stopper completes and stop_one_cpu() returns and the migration
+ *    is done.
+ */
+
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+       lockdep_assert_held(&rq->lock);
+
+       dequeue_task(rq, p, 0);
+       p->on_rq = TASK_ON_RQ_MIGRATING;
+       set_task_cpu(p, new_cpu);
+       raw_spin_unlock(&rq->lock);
+
+       rq = cpu_rq(new_cpu);
+
+       raw_spin_lock(&rq->lock);
+       BUG_ON(task_cpu(p) != new_cpu);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       enqueue_task(rq, p, 0);
+       check_preempt_curr(rq, p, 0);
+
+       return rq;
+}
+
+struct migration_arg {
+       struct task_struct *task;
+       int dest_cpu;
+};
+
+/*
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+ * away from this CPU, or CPU going down), or because we're
+ * attempting to rebalance this task on exec (sched_exec).
+ *
+ * So we race with normal scheduler movements, but that's OK, as long
+ * as the task is no longer on this CPU.
+ */
+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
+{
+       if (unlikely(!cpu_active(dest_cpu)))
+               return rq;
+
+       /* Affinity changed (again). */
+       if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+               return rq;
+
+       rq = move_queued_task(rq, p, dest_cpu);
+
+       return rq;
+}
+
+/*
+ * migration_cpu_stop - this will be executed by a highprio stopper thread
+ * and performs thread migration by bumping thread off CPU then
+ * 'pushing' onto another runqueue.
+ */
+static int migration_cpu_stop(void *data)
+{
+       struct migration_arg *arg = data;
+       struct task_struct *p = arg->task;
+       struct rq *rq = this_rq();
+
+       /*
+        * The original target cpu might have gone down and we might
+        * be on another cpu but it doesn't matter.
+        */
+       local_irq_disable();
+       /*
+        * We need to explicitly wake pending tasks before running
+        * __migrate_task() such that we will not miss enforcing cpus_allowed
+        * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+        */
+       sched_ttwu_pending();
+
+       raw_spin_lock(&p->pi_lock);
+       raw_spin_lock(&rq->lock);
+       /*
+        * If task_rq(p) != rq, it cannot be migrated here, because we're
+        * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
+        * we're holding p->pi_lock.
+        */
+       if (task_rq(p) == rq && task_on_rq_queued(p))
+               rq = __migrate_task(rq, p, arg->dest_cpu);
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock(&p->pi_lock);
+
+       local_irq_enable();
+       return 0;
+}
+
+/*
+ * sched_class::set_cpus_allowed must do the below, but is not required to
+ * actually call this function.
+ */
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+{
+       cpumask_copy(&p->cpus_allowed, new_mask);
+       p->nr_cpus_allowed = cpumask_weight(new_mask);
+}
+
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+       struct rq *rq = task_rq(p);
+       bool queued, running;
+
+       lockdep_assert_held(&p->pi_lock);
+
+       queued = task_on_rq_queued(p);
+       running = task_current(rq, p);
+
+       if (queued) {
+               /*
+                * Because __kthread_bind() calls this on blocked tasks without
+                * holding rq->lock.
+                */
+               lockdep_assert_held(&rq->lock);
+               dequeue_task(rq, p, 0);
+       }
+       if (running)
+               put_prev_task(rq, p);
+
+       p->sched_class->set_cpus_allowed(p, new_mask);
+
+       if (running)
+               p->sched_class->set_curr_task(rq);
+       if (queued)
+               enqueue_task(rq, p, 0);
+}
+
+/*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+                                 const struct cpumask *new_mask, bool check)
+{
+       unsigned long flags;
+       struct rq *rq;
+       unsigned int dest_cpu;
+       int ret = 0;
+
+       rq = task_rq_lock(p, &flags);
+
+       /*
+        * Must re-check here, to close a race against __kthread_bind(),
+        * sched_setaffinity() is not guaranteed to observe the flag.
+        */
+       if (check && (p->flags & PF_NO_SETAFFINITY)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (cpumask_equal(&p->cpus_allowed, new_mask))
+               goto out;
+
+       if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       do_set_cpus_allowed(p, new_mask);
+
+       /* Can the task run on the task's current CPU? If so, we're done */
+       if (cpumask_test_cpu(task_cpu(p), new_mask))
+               goto out;
+
+       dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+       if (task_running(rq, p) || p->state == TASK_WAKING) {
+               struct migration_arg arg = { p, dest_cpu };
+               /* Need help from migration thread: drop lock and wait. */
+               task_rq_unlock(rq, p, &flags);
+               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+               tlb_migrate_finish(p->mm);
+               return 0;
+       } else if (task_on_rq_queued(p)) {
+               /*
+                * OK, since we're going to drop the lock immediately
+                * afterwards anyway.
+                */
+               lockdep_unpin_lock(&rq->lock);
+               rq = move_queued_task(rq, p, dest_cpu);
+               lockdep_pin_lock(&rq->lock);
+       }
+out:
+       task_rq_unlock(rq, p, &flags);
+
+       return ret;
+}
+
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+       return __set_cpus_allowed_ptr(p, new_mask, false);
+}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 {
 #ifdef CONFIG_SCHED_DEBUG
@@ -1045,7 +1270,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
         * ttwu() will sort out the placement.
         */
        WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
-                       !(task_preempt_count(p) & PREEMPT_ACTIVE));
+                       !p->on_rq);
 
 #ifdef CONFIG_LOCKDEP
        /*
@@ -1069,7 +1294,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                if (p->sched_class->migrate_task_rq)
                        p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
-               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+               perf_event_task_migrate(p);
        }
 
        __set_task_cpu(p, new_cpu);
@@ -1077,7 +1302,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 
 static void __migrate_swap_task(struct task_struct *p, int cpu)
 {
-       if (p->on_rq) {
+       if (task_on_rq_queued(p)) {
                struct rq *src_rq, *dst_rq;
 
                src_rq = task_rq(p);
@@ -1177,13 +1402,6 @@ out:
        return ret;
 }
 
-struct migration_arg {
-       struct task_struct *task;
-       int dest_cpu;
-};
-
-static int migration_cpu_stop(void *data);
-
 /*
  * wait_task_inactive - wait for a thread to unschedule.
  *
@@ -1203,7 +1421,7 @@ static int migration_cpu_stop(void *data);
 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
 {
        unsigned long flags;
-       int running, on_rq;
+       int running, queued;
        unsigned long ncsw;
        struct rq *rq;
 
@@ -1241,7 +1459,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                rq = task_rq_lock(p, &flags);
                trace_sched_wait_task(p);
                running = task_running(rq, p);
-               on_rq = p->on_rq;
+               queued = task_on_rq_queued(p);
                ncsw = 0;
                if (!match_state || p->state == match_state)
                        ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -1273,7 +1491,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 * running right now), it's preempted, and we should
                 * yield - it could be a while.
                 */
-               if (unlikely(on_rq)) {
+               if (unlikely(queued)) {
                        ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
 
                        set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1316,9 +1534,7 @@ void kick_process(struct task_struct *p)
        preempt_enable();
 }
 EXPORT_SYMBOL_GPL(kick_process);
-#endif /* CONFIG_SMP */
 
-#ifdef CONFIG_SMP
 /*
  * ->cpus_allowed is protected by both rq->lock and p->pi_lock
  */
@@ -1398,7 +1614,10 @@ out:
 static inline
 int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
 {
-       cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+       lockdep_assert_held(&p->pi_lock);
+
+       if (p->nr_cpus_allowed > 1)
+               cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
 
        /*
         * In order not to call set_task_cpu() on a blocking task we need
@@ -1422,7 +1641,16 @@ static void update_avg(u64 *avg, u64 sample)
        s64 diff = sample - *avg;
        *avg += diff >> 3;
 }
-#endif
+
+#else
+
+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
+                                        const struct cpumask *new_mask, bool check)
+{
+       return set_cpus_allowed_ptr(p, new_mask);
+}
+
+#endif /* CONFIG_SMP */
 
 static void
 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
@@ -1467,7 +1695,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
 {
        activate_task(rq, p, en_flags);
-       p->on_rq = 1;
+       p->on_rq = TASK_ON_RQ_QUEUED;
 
        /* if a worker is waking up, notify workqueue */
        if (p->flags & PF_WQ_WORKER)
@@ -1481,12 +1709,19 @@ static void
 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 {
        check_preempt_curr(rq, p, wake_flags);
-       trace_sched_wakeup(p, true);
-
        p->state = TASK_RUNNING;
+       trace_sched_wakeup(p);
+
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_woken)
+       if (p->sched_class->task_woken) {
+               /*
+                * Our task @p is fully woken up and running; so its safe to
+                * drop the rq->lock, hereafter rq is only used for statistics.
+                */
+               lockdep_unpin_lock(&rq->lock);
                p->sched_class->task_woken(rq, p);
+               lockdep_pin_lock(&rq->lock);
+       }
 
        if (rq->idle_stamp) {
                u64 delta = rq_clock(rq) - rq->idle_stamp;
@@ -1505,6 +1740,8 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 static void
 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
 {
+       lockdep_assert_held(&rq->lock);
+
 #ifdef CONFIG_SMP
        if (p->sched_contributes_to_load)
                rq->nr_uninterruptible--;
@@ -1526,7 +1763,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
        int ret = 0;
 
        rq = __task_rq_lock(p);
-       if (p->on_rq) {
+       if (task_on_rq_queued(p)) {
                /* check_preempt_curr() may use rq clock */
                update_rq_clock(rq);
                ttwu_do_wakeup(rq, p, wake_flags);
@@ -1549,6 +1786,7 @@ void sched_ttwu_pending(void)
                return;
 
        raw_spin_lock_irqsave(&rq->lock, flags);
+       lockdep_pin_lock(&rq->lock);
 
        while (llist) {
                p = llist_entry(llist, struct task_struct, wake_entry);
@@ -1556,6 +1794,7 @@ void sched_ttwu_pending(void)
                ttwu_do_activate(rq, p, 0);
        }
 
+       lockdep_unpin_lock(&rq->lock);
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -1568,9 +1807,7 @@ void scheduler_ipi(void)
         */
        preempt_fold_need_resched();
 
-       if (llist_empty(&this_rq()->wake_list)
-                       && !tick_nohz_full_cpu(smp_processor_id())
-                       && !got_nohz_idle_kick())
+       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
                return;
 
        /*
@@ -1587,7 +1824,6 @@ void scheduler_ipi(void)
         * somewhat pessimize the simple resched case.
         */
        irq_enter();
-       tick_nohz_full_check();
        sched_ttwu_pending();
 
        /*
@@ -1612,6 +1848,30 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
        }
 }
 
+void wake_up_if_idle(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags;
+
+       rcu_read_lock();
+
+       if (!is_idle_task(rcu_dereference(rq->curr)))
+               goto out;
+
+       if (set_nr_if_polling(rq->idle)) {
+               trace_sched_wake_idle_without_ipi(cpu);
+       } else {
+               raw_spin_lock_irqsave(&rq->lock, flags);
+               if (is_idle_task(rq->curr))
+                       smp_send_reschedule(cpu);
+               /* Else cpu is not in idle, do nothing here */
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       }
+
+out:
+       rcu_read_unlock();
+}
+
 bool cpus_share_cache(int this_cpu, int that_cpu)
 {
        return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
@@ -1631,7 +1891,9 @@ static void ttwu_queue(struct task_struct *p, int cpu)
 #endif
 
        raw_spin_lock(&rq->lock);
+       lockdep_pin_lock(&rq->lock);
        ttwu_do_activate(rq, p, 0);
+       lockdep_unpin_lock(&rq->lock);
        raw_spin_unlock(&rq->lock);
 }
 
@@ -1667,6 +1929,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        if (!(p->state & state))
                goto out;
 
+       trace_sched_waking(p);
+
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
@@ -1730,15 +1994,25 @@ static void try_to_wake_up_local(struct task_struct *p)
        lockdep_assert_held(&rq->lock);
 
        if (!raw_spin_trylock(&p->pi_lock)) {
+               /*
+                * This is OK, because current is on_cpu, which avoids it being
+                * picked for load-balance and preemption/IRQs are still
+                * disabled avoiding further scheduler activity on it and we've
+                * not yet picked a replacement task.
+                */
+               lockdep_unpin_lock(&rq->lock);
                raw_spin_unlock(&rq->lock);
                raw_spin_lock(&p->pi_lock);
                raw_spin_lock(&rq->lock);
+               lockdep_pin_lock(&rq->lock);
        }
 
        if (!(p->state & TASK_NORMAL))
                goto out;
 
-       if (!p->on_rq)
+       trace_sched_waking(p);
+
+       if (!task_on_rq_queued(p))
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
        ttwu_do_wakeup(rq, p, 0);
@@ -1773,6 +2047,24 @@ int wake_up_state(struct task_struct *p, unsigned int state)
 }
 EXPORT_SYMBOL(wake_up_process);
 
+/*
+ * This function clears the sched_dl_entity static params.
+ */
+void __dl_clear_params(struct task_struct *p)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       dl_se->dl_runtime = 0;
+       dl_se->dl_deadline = 0;
+       dl_se->dl_period = 0;
+       dl_se->flags = 0;
+       dl_se->dl_bw = 0;
+
+       dl_se->dl_throttled = 0;
+       dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
+}
+
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
@@ -1796,11 +2088,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 #endif
 
        RB_CLEAR_NODE(&p->dl.rb_node);
-       hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       p->dl.dl_runtime = p->dl.runtime = 0;
-       p->dl.dl_deadline = p->dl.deadline = 0;
-       p->dl.dl_period = 0;
-       p->dl.flags = 0;
+       init_dl_task_timer(&p->dl);
+       __dl_clear_params(p);
 
        INIT_LIST_HEAD(&p->rt.run_list);
 
@@ -1823,12 +2112,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
        p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
        p->numa_scan_period = sysctl_numa_balancing_scan_delay;
        p->numa_work.next = &p->numa_work;
-       p->numa_faults_memory = NULL;
-       p->numa_faults_buffer_memory = NULL;
+       p->numa_faults = NULL;
        p->last_task_numa_placement = 0;
        p->last_sum_exec_runtime = 0;
 
-       INIT_LIST_HEAD(&p->numa_entry);
        p->numa_group = NULL;
 #endif /* CONFIG_NUMA_BALANCING */
 }
@@ -1939,7 +2226,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        set_task_cpu(p, cpu);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
        if (likely(sched_info_on()))
                memset(&p->sched_info, 0, sizeof(p->sched_info));
 #endif
@@ -1975,6 +2262,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
 #ifdef CONFIG_SMP
 inline struct dl_bw *dl_bw_of(int i)
 {
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+                        "sched RCU must be held");
        return &cpu_rq(i)->rd->dl_bw;
 }
 
@@ -1983,6 +2272,8 @@ static inline int dl_bw_cpus(int i)
        struct root_domain *rd = cpu_rq(i)->rd;
        int cpus = 0;
 
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+                        "sched RCU must be held");
        for_each_cpu_and(i, rd->span, cpu_active_mask)
                cpus++;
 
@@ -2000,25 +2291,6 @@ static inline int dl_bw_cpus(int i)
 }
 #endif
 
-static inline
-void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
-{
-       dl_b->total_bw -= tsk_bw;
-}
-
-static inline
-void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
-{
-       dl_b->total_bw += tsk_bw;
-}
-
-static inline
-bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
-{
-       return dl_b->bw != -1 &&
-              dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
-}
-
 /*
  * We must be sure that accepting a new task (or allowing changing the
  * parameters of an existing one) is consistent with the bandwidth
@@ -2026,6 +2298,9 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
  * allocated bandwidth to reflect the new situation.
  *
  * This function is called while holding p's rq->lock.
+ *
+ * XXX we should delay bw change until the task's 0-lag point, see
+ * __setparam_dl().
  */
 static int dl_overflow(struct task_struct *p, int policy,
                       const struct sched_attr *attr)
@@ -2090,20 +2365,41 @@ void wake_up_new_task(struct task_struct *p)
 #endif
 
        /* Initialize new task's runnable average */
-       init_task_runnable_average(p);
+       init_entity_runnable_average(&p->se);
        rq = __task_rq_lock(p);
        activate_task(rq, p, 0);
-       p->on_rq = 1;
-       trace_sched_wakeup_new(p, true);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_woken)
+       if (p->sched_class->task_woken) {
+               /*
+                * Nothing relies on rq->lock after this, so its fine to
+                * drop it.
+                */
+               lockdep_unpin_lock(&rq->lock);
                p->sched_class->task_woken(rq, p);
+               lockdep_pin_lock(&rq->lock);
+       }
 #endif
        task_rq_unlock(rq, p, &flags);
 }
 
-#ifdef CONFIG_PREEMPT_NOTIFIERS
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+
+static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
+
+void preempt_notifier_inc(void)
+{
+       static_key_slow_inc(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
+
+void preempt_notifier_dec(void)
+{
+       static_key_slow_dec(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
 
 /**
  * preempt_notifier_register - tell me when current is being preempted & rescheduled
@@ -2111,6 +2407,9 @@ void wake_up_new_task(struct task_struct *p)
  */
 void preempt_notifier_register(struct preempt_notifier *notifier)
 {
+       if (!static_key_false(&preempt_notifier_key))
+               WARN(1, "registering preempt_notifier while notifiers disabled\n");
+
        hlist_add_head(&notifier->link, &current->preempt_notifiers);
 }
 EXPORT_SYMBOL_GPL(preempt_notifier_register);
@@ -2119,7 +2418,7 @@ EXPORT_SYMBOL_GPL(preempt_notifier_register);
  * preempt_notifier_unregister - no longer interested in preemption notifications
  * @notifier: notifier struct to unregister
  *
- * This is safe to call from within a preemption notifier.
+ * This is *not* safe to call from within a preemption notifier.
  */
 void preempt_notifier_unregister(struct preempt_notifier *notifier)
 {
@@ -2127,7 +2426,7 @@ void preempt_notifier_unregister(struct preempt_notifier *notifier)
 }
 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
 
-static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
 {
        struct preempt_notifier *notifier;
 
@@ -2135,9 +2434,15 @@ static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
                notifier->ops->sched_in(notifier, raw_smp_processor_id());
 }
 
+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+       if (static_key_false(&preempt_notifier_key))
+               __fire_sched_in_preempt_notifiers(curr);
+}
+
 static void
-fire_sched_out_preempt_notifiers(struct task_struct *curr,
-                                struct task_struct *next)
+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
+                                  struct task_struct *next)
 {
        struct preempt_notifier *notifier;
 
@@ -2145,13 +2450,21 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
                notifier->ops->sched_out(notifier, next);
 }
 
+static __always_inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+                                struct task_struct *next)
+{
+       if (static_key_false(&preempt_notifier_key))
+               __fire_sched_out_preempt_notifiers(curr, next);
+}
+
 #else /* !CONFIG_PREEMPT_NOTIFIERS */
 
-static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
 {
 }
 
-static void
+static inline void
 fire_sched_out_preempt_notifiers(struct task_struct *curr,
                                 struct task_struct *next)
 {
@@ -2186,7 +2499,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
 
 /**
  * finish_task_switch - clean up after a task-switch
- * @rq: runqueue associated with task-switch
  * @prev: the thread we just switched away from.
  *
  * finish_task_switch must be called after the context switch, paired
@@ -2198,10 +2510,16 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
  * so, we finish that here outside of the runqueue lock. (Doing it
  * with the lock held can cause deadlocks; see schedule() for
  * details.)
+ *
+ * The context switch have flipped the stack from under us and restored the
+ * local variables which were saved when this task called schedule() in the
+ * past. prev == current is still correct but we need to recalculate this_rq
+ * because prev may have moved to another CPU.
  */
-static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+static struct rq *finish_task_switch(struct task_struct *prev)
        __releases(rq->lock)
 {
+       struct rq *rq = this_rq();
        struct mm_struct *mm = rq->prev_mm;
        long prev_state;
 
@@ -2212,15 +2530,14 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
         * schedule one last time. The schedule call will never return, and
         * the scheduled task must drop that reference.
-        * The test for TASK_DEAD must occur while the runqueue locks are
-        * still held, otherwise prev could be scheduled on another cpu, die
-        * there before we look at prev->state, and then the reference would
-        * be dropped twice.
-        *              Manfred Spraul <manfred@colorfullife.com>
+        *
+        * We must observe prev->state before clearing prev->on_cpu (in
+        * finish_lock_switch), otherwise a concurrent wakeup can get prev
+        * running on another CPU and we could rave with its RUNNING -> DEAD
+        * transition, resulting in a double drop.
         */
        prev_state = prev->state;
        vtime_task_switch(prev);
-       finish_arch_switch(prev);
        perf_event_task_sched_in(prev, current);
        finish_lock_switch(rq, prev);
        finish_arch_post_lock_switch();
@@ -2240,29 +2557,42 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
                put_task_struct(prev);
        }
 
-       tick_nohz_task_switch(current);
+       tick_nohz_task_switch();
+       return rq;
 }
 
 #ifdef CONFIG_SMP
 
 /* rq->lock is NOT held, but preemption is disabled */
-static inline void post_schedule(struct rq *rq)
+static void __balance_callback(struct rq *rq)
 {
-       if (rq->post_schedule) {
-               unsigned long flags;
+       struct callback_head *head, *next;
+       void (*func)(struct rq *rq);
+       unsigned long flags;
 
-               raw_spin_lock_irqsave(&rq->lock, flags);
-               if (rq->curr->sched_class->post_schedule)
-                       rq->curr->sched_class->post_schedule(rq);
-               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
+       head = rq->balance_callback;
+       rq->balance_callback = NULL;
+       while (head) {
+               func = (void (*)(struct rq *))head->func;
+               next = head->next;
+               head->next = NULL;
+               head = next;
 
-               rq->post_schedule = 0;
+               func(rq);
        }
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static inline void balance_callback(struct rq *rq)
+{
+       if (unlikely(rq->balance_callback))
+               __balance_callback(rq);
 }
 
 #else
 
-static inline void post_schedule(struct rq *rq)
+static inline void balance_callback(struct rq *rq)
 {
 }
 
@@ -2275,29 +2605,22 @@ static inline void post_schedule(struct rq *rq)
 asmlinkage __visible void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
-       struct rq *rq = this_rq();
-
-       finish_task_switch(rq, prev);
-
-       /*
-        * FIXME: do we need to worry about rq being invalidated by the
-        * task_switch?
-        */
-       post_schedule(rq);
+       struct rq *rq;
 
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
-       /* In this case, finish_task_switch does not reenable preemption */
+       /* finish_task_switch() drops rq->lock and enables preemtion */
+       preempt_disable();
+       rq = finish_task_switch(prev);
+       balance_callback(rq);
        preempt_enable();
-#endif
+
        if (current->set_child_tid)
                put_user(task_pid_vnr(current), current->set_child_tid);
 }
 
 /*
- * context_switch - switch to the new MM and the new
- * thread's register state.
+ * context_switch - switch to the new MM and the new thread's register state.
  */
-static inline void
+static inline struct rq *
 context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
 {
@@ -2331,21 +2654,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
         * of the scheduler it's an obvious special-case), so we
         * do an early lockdep release here:
         */
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+       lockdep_unpin_lock(&rq->lock);
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-#endif
 
-       context_tracking_task_switch(prev, next);
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
-
        barrier();
-       /*
-        * this_rq must be evaluated again because prev may have moved
-        * CPUs since it called schedule(), thus the 'rq' on its stack
-        * frame will be invalid.
-        */
-       finish_task_switch(this_rq(), prev);
+
+       return finish_task_switch(prev);
 }
 
 /*
@@ -2364,6 +2680,25 @@ unsigned long nr_running(void)
        return sum;
 }
 
+/*
+ * Check if only the current task is running on the cpu.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race.  The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptable section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
+ */
+bool single_task_running(void)
+{
+       return raw_rq()->nr_running == 1;
+}
+EXPORT_SYMBOL(single_task_running);
+
 unsigned long long nr_context_switches(void)
 {
        int i;
@@ -2391,6 +2726,13 @@ unsigned long nr_iowait_cpu(int cpu)
        return atomic_read(&this->nr_iowait);
 }
 
+void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
+{
+       struct rq *rq = this_rq();
+       *nr_waiters = atomic_read(&rq->nr_iowait);
+       *load = rq->load.weight;
+}
+
 #ifdef CONFIG_SMP
 
 /*
@@ -2427,39 +2769,6 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 EXPORT_PER_CPU_SYMBOL(kstat);
 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
 
-/*
- * Return any ns on the sched_clock that have not yet been accounted in
- * @p in case that task is currently running.
- *
- * Called with task_rq_lock() held on @rq.
- */
-static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
-{
-       u64 ns = 0;
-
-       if (task_current(rq, p)) {
-               update_rq_clock(rq);
-               ns = rq_clock_task(rq) - p->se.exec_start;
-               if ((s64)ns < 0)
-                       ns = 0;
-       }
-
-       return ns;
-}
-
-unsigned long long task_delta_exec(struct task_struct *p)
-{
-       unsigned long flags;
-       struct rq *rq;
-       u64 ns = 0;
-
-       rq = task_rq_lock(p, &flags);
-       ns = do_task_delta_exec(p, rq);
-       task_rq_unlock(rq, p, &flags);
-
-       return ns;
-}
-
 /*
  * Return accounted runtime for the task.
  * In case the task is currently running, return the runtime plus current's
@@ -2469,7 +2778,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
 {
        unsigned long flags;
        struct rq *rq;
-       u64 ns = 0;
+       u64 ns;
 
 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
        /*
@@ -2480,13 +2789,24 @@ unsigned long long task_sched_runtime(struct task_struct *p)
         * If we race with it leaving cpu, we'll take a lock. So we're correct.
         * If we race with it entering cpu, unaccounted time is 0. This is
         * indistinguishable from the read occurring a few cycles earlier.
+        * If we see ->on_cpu without ->on_rq, the task is leaving, and has
+        * been accounted, so we're correct here as well.
         */
-       if (!p->on_cpu)
+       if (!p->on_cpu || !task_on_rq_queued(p))
                return p->se.sum_exec_runtime;
 #endif
 
        rq = task_rq_lock(p, &flags);
-       ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
+       /*
+        * Must be ->curr _and_ ->on_rq.  If dequeued, we would
+        * project cycles that may never be accounted to this
+        * thread, breaking clock_gettime().
+        */
+       if (task_current(rq, p) && task_on_rq_queued(p)) {
+               update_rq_clock(rq);
+               p->sched_class->update_curr(rq);
+       }
+       ns = p->se.sum_exec_runtime;
        task_rq_unlock(rq, p, &flags);
 
        return ns;
@@ -2508,6 +2828,7 @@ void scheduler_tick(void)
        update_rq_clock(rq);
        curr->sched_class->task_tick(rq, curr, 0);
        update_cpu_load_active(rq);
+       calc_global_load_tick(rq);
        raw_spin_unlock(&rq->lock);
 
        perf_event_task_tick();
@@ -2536,7 +2857,7 @@ void scheduler_tick(void)
 u64 scheduler_tick_max_deferment(void)
 {
        struct rq *rq = this_rq();
-       unsigned long next, now = ACCESS_ONCE(jiffies);
+       unsigned long next, now = READ_ONCE(jiffies);
 
        next = rq->last_sched_tick + HZ;
 
@@ -2644,6 +2965,9 @@ static noinline void __schedule_bug(struct task_struct *prev)
  */
 static inline void schedule_debug(struct task_struct *prev)
 {
+#ifdef CONFIG_SCHED_STACK_END_CHECK
+       BUG_ON(unlikely(task_stack_end_corrupted(prev)));
+#endif
        /*
         * Test if we are atomic. Since do_exit() needs to call into
         * schedule() atomically, we ignore that path. Otherwise whine
@@ -2733,6 +3057,8 @@ again:
  *          - explicit schedule() call
  *          - return from syscall or exception to user-space
  *          - return from interrupt-handler to user-space
+ *
+ * WARNING: must be called with preemption disabled!
  */
 //void print_rb_nodes(struct rq *rq) {
 //     struct task_struct *p;
@@ -2754,11 +3080,9 @@ static void __sched __schedule(void)
        struct rq *rq;
        int i, cpu;
 
-need_resched:
-       preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
-       rcu_note_context_switch(cpu);
+       rcu_note_context_switch();
        prev = rq->curr;
 
        schedule_debug(prev);
@@ -2773,6 +3097,9 @@ need_resched:
         */
        smp_mb__before_spinlock();
        raw_spin_lock_irq(&rq->lock);
+       lockdep_pin_lock(&rq->lock);
+
+       rq->clock_skip_update <<= 1; /* promote REQ to ACT */
 
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
@@ -2798,36 +3125,27 @@ need_resched:
                switch_count = &prev->nvcsw;
        }
 
-       if (prev->on_rq || rq->skip_clock_update < 0)
+       if (task_on_rq_queued(prev))
                update_rq_clock(rq);
 
        next = pick_next_task(rq, prev);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();
-       rq->skip_clock_update = 0;
+       rq->clock_skip_update = 0;
 
        if (likely(prev != next)) {
                rq->nr_switches++;
                rq->curr = next;
                ++*switch_count;
 
-               context_switch(rq, prev, next); /* unlocks the rq */
-               /*
-                * The context switch have flipped the stack from under us
-                * and restored the local variables which were saved when
-                * this task called schedule() in the past. prev == current
-                * is still correct, but it can be moved to another cpu/rq.
-                */
-               cpu = smp_processor_id();
-               rq = cpu_rq(cpu);
-       } else
+               rq = context_switch(rq, prev, next); /* unlocks the rq */
+               cpu = cpu_of(rq);
+       } else {
+               lockdep_unpin_lock(&rq->lock);
                raw_spin_unlock_irq(&rq->lock);
+       }
 
-       post_schedule(rq);
-
-       sched_preempt_enable_no_resched();
-       if (need_resched())
-               goto need_resched;
+       balance_callback(rq);
 }
 
 static inline void sched_submit_work(struct task_struct *tsk)
@@ -2847,7 +3165,11 @@ asmlinkage __visible void __sched schedule(void)
        struct task_struct *tsk = current;
 
        sched_submit_work(tsk);
-       __schedule();
+       do {
+               preempt_disable();
+               __schedule();
+               sched_preempt_enable_no_resched();
+       } while (need_resched());
 }
 EXPORT_SYMBOL(schedule);
 
@@ -2859,10 +3181,14 @@ asmlinkage __visible void __sched schedule_user(void)
         * or we have been woken up remotely but the IPI has not yet arrived,
         * we haven't yet exited the RCU idle mode. Do it here manually until
         * we find a better solution.
+        *
+        * NB: There are buggy callers of this function.  Ideally we
+        * should warn if prev_state != CONTEXT_USER, but that will trigger
+        * too frequently to make sense yet.
         */
-       user_exit();
+       enum ctx_state prev_state = exception_enter();
        schedule();
-       user_enter();
+       exception_exit(prev_state);
 }
 #endif
 
@@ -2878,6 +3204,20 @@ void __sched schedule_preempt_disabled(void)
        preempt_disable();
 }
 
+static void __sched notrace preempt_schedule_common(void)
+{
+       do {
+               preempt_active_enter();
+               __schedule();
+               preempt_active_exit();
+
+               /*
+                * Check again in case we missed a preemption opportunity
+                * between schedule and now.
+                */
+       } while (need_resched());
+}
+
 #ifdef CONFIG_PREEMPT
 /*
  * this is the entry point to schedule() from in-kernel preemption
@@ -2893,20 +3233,55 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
        if (likely(!preemptible()))
                return;
 
-       do {
-               __preempt_count_add(PREEMPT_ACTIVE);
-               __schedule();
-               __preempt_count_sub(PREEMPT_ACTIVE);
+       preempt_schedule_common();
+}
+NOKPROBE_SYMBOL(preempt_schedule);
+EXPORT_SYMBOL(preempt_schedule);
+
+/**
+ * preempt_schedule_notrace - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+{
+       enum ctx_state prev_ctx;
+
+       if (likely(!preemptible()))
+               return;
 
+       do {
                /*
-                * Check again in case we missed a preemption opportunity
-                * between schedule and now.
+                * Use raw __prempt_count() ops that don't call function.
+                * We can't call functions before disabling preemption which
+                * disarm preemption tracing recursions.
                 */
+               __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
                barrier();
+               /*
+                * Needs preempt disabled in case user_exit() is traced
+                * and the tracer calls preempt_enable_notrace() causing
+                * an infinite recursion.
+                */
+               prev_ctx = exception_enter();
+               __schedule();
+               exception_exit(prev_ctx);
+
+               barrier();
+               __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
        } while (need_resched());
 }
-NOKPROBE_SYMBOL(preempt_schedule);
-EXPORT_SYMBOL(preempt_schedule);
+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
+
 #endif /* CONFIG_PREEMPT */
 
 /*
@@ -2925,17 +3300,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
        prev_state = exception_enter();
 
        do {
-               __preempt_count_add(PREEMPT_ACTIVE);
+               preempt_active_enter();
                local_irq_enable();
                __schedule();
                local_irq_disable();
-               __preempt_count_sub(PREEMPT_ACTIVE);
-
-               /*
-                * Check again in case we missed a preemption opportunity
-                * between schedule and now.
-                */
-               barrier();
+               preempt_active_exit();
        } while (need_resched());
 
        exception_exit(prev_state);
@@ -2963,7 +3332,7 @@ EXPORT_SYMBOL(default_wake_function);
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-       int oldprio, on_rq, running, enqueue_flag = 0;
+       int oldprio, queued, running, enqueue_flag = 0;
        struct rq *rq;
        const struct sched_class *prev_class;
 
@@ -2990,15 +3359,14 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        }
 
        trace_sched_pi_setprio(p, prio);
-       p->pi_top_task = rt_mutex_get_top_task(p);
        oldprio = p->prio;
        prev_class = p->sched_class;
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        /*
         * Boosting condition are:
@@ -3010,10 +3378,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
         *          running task
         */
        if (dl_prio(prio)) {
-               if (!dl_prio(p->normal_prio) || (p->pi_top_task &&
-                       dl_entity_preempt(&p->pi_top_task->dl, &p->dl))) {
+               struct task_struct *pi_task = rt_mutex_get_top_task(p);
+               if (!dl_prio(p->normal_prio) ||
+                   (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
                        p->dl.dl_boosted = 1;
-                       p->dl.dl_throttled = 0;
                        enqueue_flag = ENQUEUE_REPLENISH;
                } else
                        p->dl.dl_boosted = 0;
@@ -3027,6 +3395,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        } else {
                if (dl_prio(oldprio))
                        p->dl.dl_boosted = 0;
+               if (rt_prio(oldprio))
+                       p->rt.timeout = 0;
                p->sched_class = &fair_sched_class;
        }
 
@@ -3034,18 +3404,22 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, p, enqueue_flag);
 
        check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
+       preempt_disable(); /* avoid rq from going away on us */
        __task_rq_unlock(rq);
+
+       balance_callback(rq);
+       preempt_enable();
 }
 #endif
 
 void set_user_nice(struct task_struct *p, long nice)
 {
-       int old_prio, delta, on_rq;
+       int old_prio, delta, queued;
        unsigned long flags;
        struct rq *rq;
 
@@ -3066,8 +3440,8 @@ void set_user_nice(struct task_struct *p, long nice)
                p->static_prio = NICE_TO_PRIO(nice);
                goto out_unlock;
        }
-       on_rq = p->on_rq;
-       if (on_rq)
+       queued = task_on_rq_queued(p);
+       if (queued)
                dequeue_task(rq, p, 0);
 
        p->static_prio = NICE_TO_PRIO(nice);
@@ -3076,14 +3450,14 @@ void set_user_nice(struct task_struct *p, long nice)
        p->prio = effective_prio(p);
        delta = p->prio - old_prio;
 
-       if (on_rq) {
+       if (queued) {
                enqueue_task(rq, p, 0);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
                 */
                if (delta < 0 || (delta > 0 && task_running(rq, p)))
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        }
 out_unlock:
        task_rq_unlock(rq, p, &flags);
@@ -3211,23 +3585,45 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
 {
        struct sched_dl_entity *dl_se = &p->dl;
 
-       init_dl_task_timer(dl_se);
        dl_se->dl_runtime = attr->sched_runtime;
        dl_se->dl_deadline = attr->sched_deadline;
        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
        dl_se->flags = attr->sched_flags;
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
-       dl_se->dl_throttled = 0;
-       dl_se->dl_new = 1;
-       dl_se->dl_yielded = 0;
+
+       /*
+        * Changing the parameters of a task is 'tricky' and we're not doing
+        * the correct thing -- also see task_dead_dl() and switched_from_dl().
+        *
+        * What we SHOULD do is delay the bandwidth release until the 0-lag
+        * point. This would include retaining the task_struct until that time
+        * and change dl_overflow() to not immediately decrement the current
+        * amount.
+        *
+        * Instead we retain the current runtime/deadline and let the new
+        * parameters take effect after the current reservation period lapses.
+        * This is safe (albeit pessimistic) because the 0-lag point is always
+        * before the current scheduling deadline.
+        *
+        * We can still have temporary overloads because we do not delay the
+        * change in bandwidth until that time; so admission control is
+        * not on the safe side. It does however guarantee tasks will never
+        * consume more than promised.
+        */
 }
 
+/*
+ * sched_setparam() passes in -1 for its policy, to let the functions
+ * it calls know not to change it.
+ */
+#define SETPARAM_POLICY        -1
+
 static void __setscheduler_params(struct task_struct *p,
                const struct sched_attr *attr)
 {
        int policy = attr->sched_policy;
 
-       if (policy == -1) /* setparam */
+       if (policy == SETPARAM_POLICY)
                policy = p->policy;
 
        p->policy = policy;
@@ -3249,15 +3645,18 @@ static void __setscheduler_params(struct task_struct *p,
 
 /* Actually do priority change: must hold pi & rq lock. */
 static void __setscheduler(struct rq *rq, struct task_struct *p,
-                          const struct sched_attr *attr)
+                          const struct sched_attr *attr, bool keep_boost)
 {
        __setscheduler_params(p, attr);
 
        /*
-        * If we get here, there was no pi waiters boosting the
-        * task. It is safe to use the normal prio.
+        * Keep a potential priority boosting if called from
+        * sched_setscheduler().
         */
-       p->prio = normal_prio(p);
+       if (keep_boost)
+               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+       else
+               p->prio = normal_prio(p);
 
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
@@ -3336,14 +3735,28 @@ static bool check_same_owner(struct task_struct *p)
        return match;
 }
 
+static bool dl_param_changed(struct task_struct *p,
+               const struct sched_attr *attr)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       if (dl_se->dl_runtime != attr->sched_runtime ||
+               dl_se->dl_deadline != attr->sched_deadline ||
+               dl_se->dl_period != attr->sched_period ||
+               dl_se->flags != attr->sched_flags)
+               return true;
+
+       return false;
+}
+
 static int __sched_setscheduler(struct task_struct *p,
                                const struct sched_attr *attr,
-                               bool user)
+                               bool user, bool pi)
 {
        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
                      MAX_RT_PRIO - 1 - attr->sched_priority;
-       int retval, oldprio, oldpolicy = -1, on_rq, running;
-       int policy = attr->sched_policy;
+       int retval, oldprio, oldpolicy = -1, queued, running;
+       int new_effective_prio, policy = attr->sched_policy;
        unsigned long flags;
        const struct sched_class *prev_class;
        struct rq *rq;
@@ -3464,7 +3877,7 @@ recheck:
                        goto change;
                if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
                        goto change;
-               if (dl_policy(policy))
+               if (dl_policy(policy) && dl_param_changed(p, attr))
                        goto change;
 
                p->sched_reset_on_fork = reset_on_fork;
@@ -3524,34 +3937,35 @@ change:
        p->sched_reset_on_fork = reset_on_fork;
        oldprio = p->prio;
 
-       /*
-        * Special case for priority boosted tasks.
-        *
-        * If the new priority is lower or equal (user space view)
-        * than the current (boosted) priority, we just store the new
-        * normal parameters and do not touch the scheduler class and
-        * the runqueue. This will be done when the task deboost
-        * itself.
-        */
-       if (rt_mutex_check_prio(p, newprio)) {
-               __setscheduler_params(p, attr);
-               task_rq_unlock(rq, p, &flags);
-               return 0;
+       if (pi) {
+               /*
+                * Take priority boosted tasks into account. If the new
+                * effective priority is unchanged, we just store the new
+                * normal parameters and do not touch the scheduler class and
+                * the runqueue. This will be done when the task deboost
+                * itself.
+                */
+               new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
+               if (new_effective_prio == oldprio) {
+                       __setscheduler_params(p, attr);
+                       task_rq_unlock(rq, p, &flags);
+                       return 0;
+               }
        }
 
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        prev_class = p->sched_class;
-       __setscheduler(rq, p, attr);
+       __setscheduler(rq, p, attr, pi);
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq) {
+       if (queued) {
                /*
                 * We enqueue to tail when the priority of a task is
                 * increased (user space view).
@@ -3560,9 +3974,17 @@ change:
        }
 
        check_class_changed(rq, p, prev_class, oldprio);
+       preempt_disable(); /* avoid rq from going away on us */
        task_rq_unlock(rq, p, &flags);
 
-       rt_mutex_adjust_pi(p);
+       if (pi)
+               rt_mutex_adjust_pi(p);
+
+       /*
+        * Run balance callbacks after we've adjusted the PI chain.
+        */
+       balance_callback(rq);
+       preempt_enable();
 
        return 0;
 }
@@ -3576,16 +3998,14 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
                .sched_nice     = PRIO_TO_NICE(p->static_prio),
        };
 
-       /*
-        * Fixup the legacy SCHED_RESET_ON_FORK hack
-        */
-       if (policy & SCHED_RESET_ON_FORK) {
+       /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
+       if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
                policy &= ~SCHED_RESET_ON_FORK;
                attr.sched_policy = policy;
        }
 
-       return __sched_setscheduler(p, &attr, check);
+       return __sched_setscheduler(p, &attr, check, true);
 }
 /**
  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
@@ -3606,7 +4026,7 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
 
 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
 {
-       return __sched_setscheduler(p, attr, true);
+       return __sched_setscheduler(p, attr, true, true);
 }
 EXPORT_SYMBOL_GPL(sched_setattr);
 
@@ -3749,7 +4169,7 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  */
 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 {
-       return do_sched_setscheduler(pid, -1, param);
+       return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
 }
 
 /**
@@ -3977,14 +4397,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
                rcu_read_lock();
                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
                        rcu_read_unlock();
-                       goto out_unlock;
+                       goto out_free_new_mask;
                }
                rcu_read_unlock();
        }
 
        retval = security_task_setscheduler(p);
        if (retval)
-               goto out_unlock;
+               goto out_free_new_mask;
 
 
        cpuset_cpus_allowed(p, cpus_allowed);
@@ -3997,17 +4417,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
         * root_domain.
         */
 #ifdef CONFIG_SMP
-       if (task_has_dl_policy(p)) {
-               const struct cpumask *span = task_rq(p)->rd->span;
-
-               if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
+       if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
+               rcu_read_lock();
+               if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
                        retval = -EBUSY;
-                       goto out_unlock;
+                       rcu_read_unlock();
+                       goto out_free_new_mask;
                }
+               rcu_read_unlock();
        }
 #endif
 again:
-       retval = set_cpus_allowed_ptr(p, new_mask);
+       retval = __set_cpus_allowed_ptr(p, new_mask, true);
 
        if (!retval) {
                cpuset_cpus_allowed(p, cpus_allowed);
@@ -4021,7 +4442,7 @@ again:
                        goto again;
                }
        }
-out_unlock:
+out_free_new_mask:
        free_cpumask_var(new_mask);
 out_free_cpus_allowed:
        free_cpumask_var(cpus_allowed);
@@ -4157,17 +4578,10 @@ SYSCALL_DEFINE0(sched_yield)
        return 0;
 }
 
-static void __cond_resched(void)
-{
-       __preempt_count_add(PREEMPT_ACTIVE);
-       __schedule();
-       __preempt_count_sub(PREEMPT_ACTIVE);
-}
-
 int __sched _cond_resched(void)
 {
-       if (should_resched()) {
-               __cond_resched();
+       if (should_resched(0)) {
+               preempt_schedule_common();
                return 1;
        }
        return 0;
@@ -4184,7 +4598,7 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
-       int resched = should_resched();
+       int resched = should_resched(PREEMPT_LOCK_OFFSET);
        int ret = 0;
 
        lockdep_assert_held(lock);
@@ -4192,7 +4606,7 @@ int __cond_resched_lock(spinlock_t *lock)
        if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
-                       __cond_resched();
+                       preempt_schedule_common();
                else
                        cpu_relax();
                ret = 1;
@@ -4206,9 +4620,9 @@ int __sched __cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       if (should_resched()) {
+       if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
                local_bh_enable();
-               __cond_resched();
+               preempt_schedule_common();
                local_bh_disable();
                return 1;
        }
@@ -4304,7 +4718,7 @@ again:
                 * fairness.
                 */
                if (preempt && rq != p_rq)
-                       resched_task(p_rq->curr);
+                       resched_curr(p_rq);
        }
 
 out_unlock:
@@ -4323,36 +4737,26 @@ EXPORT_SYMBOL_GPL(yield_to);
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  */
-void __sched io_schedule(void)
-{
-       struct rq *rq = raw_rq();
-
-       delayacct_blkio_start();
-       atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
-       schedule();
-       current->in_iowait = 0;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
-}
-EXPORT_SYMBOL(io_schedule);
-
 long __sched io_schedule_timeout(long timeout)
 {
-       struct rq *rq = raw_rq();
+       int old_iowait = current->in_iowait;
+       struct rq *rq;
        long ret;
 
+       current->in_iowait = 1;
+       blk_schedule_flush_plug(current);
+
        delayacct_blkio_start();
+       rq = raw_rq();
        atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
        ret = schedule_timeout(timeout);
-       current->in_iowait = 0;
+       current->in_iowait = old_iowait;
        atomic_dec(&rq->nr_iowait);
        delayacct_blkio_end();
+
        return ret;
 }
+EXPORT_SYMBOL(io_schedule_timeout);
 
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
@@ -4463,9 +4867,10 @@ void sched_show_task(struct task_struct *p)
 {
        unsigned long free = 0;
        int ppid;
-       unsigned state;
+       unsigned long state = p->state;
 
-       state = p->state ? __ffs(p->state) + 1 : 0;
+       if (state)
+               state = __ffs(state) + 1;
        printk(KERN_INFO "%-15.15s %c", p->comm,
                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
 #if BITS_PER_LONG == 32
@@ -4482,8 +4887,10 @@ void sched_show_task(struct task_struct *p)
 #ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
 #endif
+       ppid = 0;
        rcu_read_lock();
-       ppid = task_pid_nr(rcu_dereference(p->real_parent));
+       if (pid_alive(p))
+               ppid = task_pid_nr(rcu_dereference(p->real_parent));
        rcu_read_unlock();
        printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
                task_pid_nr(p), ppid,
@@ -4505,7 +4912,7 @@ void show_state_filter(unsigned long state_filter)
                "  task                        PC stack   pid father\n");
 #endif
        rcu_read_lock();
-       do_each_thread(g, p) {
+       for_each_process_thread(g, p) {
                /*
                 * reset the NMI-timeout, listing all files on a slow
                 * console might take a lot of time:
@@ -4513,7 +4920,7 @@ void show_state_filter(unsigned long state_filter)
                touch_nmi_watchdog();
                if (!state_filter || (p->state & state_filter))
                        sched_show_task(p);
-       } while_each_thread(g, p);
+       }
 
        touch_all_softlockup_watchdogs();
 
@@ -4546,13 +4953,22 @@ void init_idle(struct task_struct *idle, int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&idle->pi_lock, flags);
+       raw_spin_lock(&rq->lock);
 
        __sched_fork(0, idle);
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
-       do_set_cpus_allowed(idle, cpumask_of(cpu));
+#ifdef CONFIG_SMP
+       /*
+        * Its possible that init_idle() gets called multiple times on a task,
+        * in that case do_set_cpus_allowed() will not do the right thing.
+        *
+        * And since this is boot we can forgo the serialization.
+        */
+       set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif
        /*
         * We're having a chicken and egg problem, even though we are
         * holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -4568,11 +4984,12 @@ void init_idle(struct task_struct *idle, int cpu)
        rcu_read_unlock();
 
        rq->curr = rq->idle = idle;
-       idle->on_rq = 1;
-#if defined(CONFIG_SMP)
+       idle->on_rq = TASK_ON_RQ_QUEUED;
+#ifdef CONFIG_SMP
        idle->on_cpu = 1;
 #endif
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
        init_idle_preempt_count(idle, cpu);
@@ -4583,132 +5000,91 @@ void init_idle(struct task_struct *idle, int cpu)
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
        vtime_init_idle(idle, cpu);
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
-}
-
-#ifdef CONFIG_SMP
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-{
-       if (p->sched_class && p->sched_class->set_cpus_allowed)
-               p->sched_class->set_cpus_allowed(p, new_mask);
-
-       cpumask_copy(&p->cpus_allowed, new_mask);
-       p->nr_cpus_allowed = cpumask_weight(new_mask);
-}
-
-/*
- * This is how migration works:
- *
- * 1) we invoke migration_cpu_stop() on the target CPU using
- *    stop_one_cpu().
- * 2) stopper starts to run (implicitly forcing the migrated thread
- *    off the CPU)
- * 3) it checks whether the migrated task is still in the wrong runqueue.
- * 4) if it's in the wrong runqueue then the migration thread removes
- *    it and puts it into the right queue.
- * 5) stopper completes and stop_one_cpu() returns and the migration
- *    is done.
- */
-
-/*
- * Change a given task's CPU affinity. Migrate the thread to a
- * proper CPU and schedule it away if the CPU it's executing on
- * is removed from the allowed bitmask.
- *
- * NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. The
- * call is not atomic; no spinlocks may be held.
- */
-int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+}
+
+int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                             const struct cpumask *trial)
 {
+       int ret = 1, trial_cpus;
+       struct dl_bw *cur_dl_b;
        unsigned long flags;
-       struct rq *rq;
-       unsigned int dest_cpu;
-       int ret = 0;
-
-       rq = task_rq_lock(p, &flags);
-
-       if (cpumask_equal(&p->cpus_allowed, new_mask))
-               goto out;
-
-       if (!cpumask_intersects(new_mask, cpu_active_mask)) {
-               ret = -EINVAL;
-               goto out;
-       }
 
-       do_set_cpus_allowed(p, new_mask);
+       if (!cpumask_weight(cur))
+               return ret;
 
-       /* Can the task run on the task's current CPU? If so, we're done */
-       if (cpumask_test_cpu(task_cpu(p), new_mask))
-               goto out;
+       rcu_read_lock_sched();
+       cur_dl_b = dl_bw_of(cpumask_any(cur));
+       trial_cpus = cpumask_weight(trial);
 
-       dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-       if (p->on_rq) {
-               struct migration_arg arg = { p, dest_cpu };
-               /* Need help from migration thread: drop lock and wait. */
-               task_rq_unlock(rq, p, &flags);
-               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-               tlb_migrate_finish(p->mm);
-               return 0;
-       }
-out:
-       task_rq_unlock(rq, p, &flags);
+       raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+       if (cur_dl_b->bw != -1 &&
+           cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
+               ret = 0;
+       raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+       rcu_read_unlock_sched();
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 
-/*
- * Move (not current) task off this cpu, onto dest cpu. We're doing
- * this because either it can't run here any more (set_cpus_allowed()
- * away from this CPU, or CPU going down), or because we're
- * attempting to rebalance this task on exec (sched_exec).
- *
- * So we race with normal scheduler movements, but that's OK, as long
- * as the task is no longer on this CPU.
- *
- * Returns non-zero if task was successfully migrated.
- */
-static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+int task_can_attach(struct task_struct *p,
+                   const struct cpumask *cs_cpus_allowed)
 {
-       struct rq *rq_dest, *rq_src;
        int ret = 0;
 
-       if (unlikely(!cpu_active(dest_cpu)))
-               return ret;
+       /*
+        * Kthreads which disallow setaffinity shouldn't be moved
+        * to a new cpuset; we don't want to change their cpu
+        * affinity and isolating such threads by their set of
+        * allowed nodes is unnecessary.  Thus, cpusets are not
+        * applicable for such threads.  This prevents checking for
+        * success of set_cpus_allowed_ptr() on all attached tasks
+        * before cpus_allowed may be changed.
+        */
+       if (p->flags & PF_NO_SETAFFINITY) {
+               ret = -EINVAL;
+               goto out;
+       }
 
-       rq_src = cpu_rq(src_cpu);
-       rq_dest = cpu_rq(dest_cpu);
+#ifdef CONFIG_SMP
+       if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
+                                             cs_cpus_allowed)) {
+               unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
+                                                       cs_cpus_allowed);
+               struct dl_bw *dl_b;
+               bool overflow;
+               int cpus;
+               unsigned long flags;
 
-       raw_spin_lock(&p->pi_lock);
-       double_rq_lock(rq_src, rq_dest);
-       /* Already moved. */
-       if (task_cpu(p) != src_cpu)
-               goto done;
-       /* Affinity changed (again). */
-       if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
-               goto fail;
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(dest_cpu);
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(dest_cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
+               if (overflow)
+                       ret = -EBUSY;
+               else {
+                       /*
+                        * We reserve space for this task in the destination
+                        * root_domain, as we can't fail after this point.
+                        * We will free resources in the source root_domain
+                        * later on (see set_cpus_allowed_dl()).
+                        */
+                       __dl_add(dl_b, p->dl.dl_bw);
+               }
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+               rcu_read_unlock_sched();
 
-       /*
-        * If we're not on a rq, the next wake-up will ensure we're
-        * placed properly.
-        */
-       if (p->on_rq) {
-               dequeue_task(rq_src, p, 0);
-               set_task_cpu(p, dest_cpu);
-               enqueue_task(rq_dest, p, 0);
-               check_preempt_curr(rq_dest, p, 0);
-       }
-done:
-       ret = 1;
-fail:
-       double_rq_unlock(rq_src, rq_dest);
-       raw_spin_unlock(&p->pi_lock);
+       }
+#endif
+out:
        return ret;
 }
 
+#ifdef CONFIG_SMP
+
 #ifdef CONFIG_NUMA_BALANCING
 /* Migrate current task p to target_cpu */
 int migrate_task_to(struct task_struct *p, int target_cpu)
@@ -4736,48 +5112,28 @@ void sched_setnuma(struct task_struct *p, int nid)
 {
        struct rq *rq;
        unsigned long flags;
-       bool on_rq, running;
+       bool queued, running;
 
        rq = task_rq_lock(p, &flags);
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
 
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
        p->numa_preferred_nid = nid;
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, p, 0);
        task_rq_unlock(rq, p, &flags);
 }
-#endif
-
-/*
- * migration_cpu_stop - this will be executed by a highprio stopper thread
- * and performs thread migration by bumping thread off CPU then
- * 'pushing' onto another runqueue.
- */
-static int migration_cpu_stop(void *data)
-{
-       struct migration_arg *arg = data;
-
-       /*
-        * The original target cpu might have gone down and we might
-        * be on another cpu but it doesn't matter.
-        */
-       local_irq_disable();
-       __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
-       local_irq_enable();
-       return 0;
-}
+#endif /* CONFIG_NUMA_BALANCING */
 
 #ifdef CONFIG_HOTPLUG_CPU
-
 /*
  * Ensures that the idle task is using init_mm right before its cpu goes
  * offline.
@@ -4833,9 +5189,9 @@ static struct task_struct fake_task = {
  * there's no concurrency possible, we hold the required locks anyway
  * because of lock validation efforts.
  */
-static void migrate_tasks(unsigned int dead_cpu)
+static void migrate_tasks(struct rq *dead_rq)
 {
-       struct rq *rq = cpu_rq(dead_cpu);
+       struct rq *rq = dead_rq;
        struct task_struct *next, *stop = rq->stop;
        int dest_cpu;
 
@@ -4857,7 +5213,7 @@ static void migrate_tasks(unsigned int dead_cpu)
         */
        update_rq_clock(rq);
 
-       for ( ; ; ) {
+       for (;;) {
                /*
                 * There's this thread running, bail when that's the only
                 * remaining thread.
@@ -4865,22 +5221,52 @@ static void migrate_tasks(unsigned int dead_cpu)
                if (rq->nr_running == 1)
                        break;
 
+               /*
+                * pick_next_task assumes pinned rq->lock.
+                */
+               lockdep_pin_lock(&rq->lock);
                next = pick_next_task(rq, &fake_task);
                BUG_ON(!next);
                next->sched_class->put_prev_task(rq, next);
 
-               /* Find suitable destination for @next, with force if needed. */
-               dest_cpu = select_fallback_rq(dead_cpu, next);
+               /*
+                * Rules for changing task_struct::cpus_allowed are holding
+                * both pi_lock and rq->lock, such that holding either
+                * stabilizes the mask.
+                *
+                * Drop rq->lock is not quite as disastrous as it usually is
+                * because !cpu_active at this point, which means load-balance
+                * will not interfere. Also, stop-machine.
+                */
+               lockdep_unpin_lock(&rq->lock);
                raw_spin_unlock(&rq->lock);
+               raw_spin_lock(&next->pi_lock);
+               raw_spin_lock(&rq->lock);
 
-               __migrate_task(next, dead_cpu, dest_cpu);
+               /*
+                * Since we're inside stop-machine, _nothing_ should have
+                * changed the task, WARN if weird stuff happened, because in
+                * that case the above rq->lock drop is a fail too.
+                */
+               if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
+                       raw_spin_unlock(&next->pi_lock);
+                       continue;
+               }
 
-               raw_spin_lock(&rq->lock);
+               /* Find suitable destination for @next, with force if needed. */
+               dest_cpu = select_fallback_rq(dead_rq->cpu, next);
+
+               rq = __migrate_task(rq, next, dest_cpu);
+               if (rq != dead_rq) {
+                       raw_spin_unlock(&rq->lock);
+                       rq = dead_rq;
+                       raw_spin_lock(&rq->lock);
+               }
+               raw_spin_unlock(&next->pi_lock);
        }
 
        rq->stop = stop;
 }
-
 #endif /* CONFIG_HOTPLUG_CPU */
 
 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -5046,8 +5432,7 @@ static void register_sched_domain_sysctl(void)
 /* may be called multiple times per register */
 static void unregister_sched_domain_sysctl(void)
 {
-       if (sd_sysctl_header)
-               unregister_sysctl_table(sd_sysctl_header);
+       unregister_sysctl_table(sd_sysctl_header);
        sd_sysctl_header = NULL;
        if (sd_ctl_dir[0].child)
                sd_free_ctl_entry(&sd_ctl_dir[0].child);
@@ -5059,7 +5444,7 @@ static void register_sched_domain_sysctl(void)
 static void unregister_sched_domain_sysctl(void)
 {
 }
-#endif
+#endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
 
 static void set_rq_online(struct rq *rq)
 {
@@ -5128,7 +5513,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                        set_rq_offline(rq);
                }
-               migrate_tasks(cpu);
+               migrate_tasks(rq);
                BUG_ON(rq->nr_running != 1); /* the migration thread */
                raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
@@ -5154,7 +5539,7 @@ static struct notifier_block migration_notifier = {
        .priority = CPU_PRI_MIGRATION,
 };
 
-static void __cpuinit set_cpu_rq_start_time(void)
+static void set_cpu_rq_start_time(void)
 {
        int cpu = smp_processor_id();
        struct rq *rq = cpu_rq(cpu);
@@ -5168,6 +5553,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
        case CPU_STARTING:
                set_cpu_rq_start_time();
                return NOTIFY_OK;
+       case CPU_ONLINE:
+               /*
+                * At this point a starting CPU has marked itself as online via
+                * set_cpu_online(). But it might not yet have marked itself
+                * as active, which is essential from here on.
+                *
+                * Thus, fall-through and help the starting CPU along.
+                */
        case CPU_DOWN_FAILED:
                set_cpu_active((long)hcpu, true);
                return NOTIFY_OK;
@@ -5179,31 +5572,13 @@ static int sched_cpu_active(struct notifier_block *nfb,
 static int sched_cpu_inactive(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
-       unsigned long flags;
-       long cpu = (long)hcpu;
-
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
-               set_cpu_active(cpu, false);
-
-               /* explicitly allow suspend */
-               if (!(action & CPU_TASKS_FROZEN)) {
-                       struct dl_bw *dl_b = dl_bw_of(cpu);
-                       bool overflow;
-                       int cpus;
-
-                       raw_spin_lock_irqsave(&dl_b->lock, flags);
-                       cpus = dl_bw_cpus(cpu);
-                       overflow = __dl_overflow(dl_b, cpus, 0, 0);
-                       raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-                       if (overflow)
-                               return notifier_from_errno(-EBUSY);
-               }
+               set_cpu_active((long)hcpu, false);
                return NOTIFY_OK;
+       default:
+               return NOTIFY_DONE;
        }
-
-       return NOTIFY_DONE;
 }
 
 static int __init migration_init(void)
@@ -5224,9 +5599,6 @@ static int __init migration_init(void)
        return 0;
 }
 early_initcall(migration_init);
-#endif
-
-#ifdef CONFIG_SMP
 
 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
 
@@ -5251,9 +5623,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                                  struct cpumask *groupmask)
 {
        struct sched_group *group = sd->groups;
-       char str[256];
 
-       cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
        cpumask_clear(groupmask);
 
        printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
@@ -5266,7 +5636,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                return -1;
        }
 
-       printk(KERN_CONT "span %s level %s\n", str, sd->name);
+       printk(KERN_CONT "span %*pbl level %s\n",
+              cpumask_pr_args(sched_domain_span(sd)), sd->name);
 
        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
                printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -5285,17 +5656,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               /*
-                * Even though we initialize ->capacity to something semi-sane,
-                * we leave capacity_orig unset. This allows us to detect if
-                * domain iteration is still funny without causing /0 traps.
-                */
-               if (!group->sgc->capacity_orig) {
-                       printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: domain->cpu_capacity not set\n");
-                       break;
-               }
-
                if (!cpumask_weight(sched_group_cpus(group))) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: empty group\n");
@@ -5311,9 +5671,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
                cpumask_or(groupmask, groupmask, sched_group_cpus(group));
 
-               cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
-
-               printk(KERN_CONT " %s", str);
+               printk(KERN_CONT " %*pbl",
+                      cpumask_pr_args(sched_group_cpus(group)));
                if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
                        printk(KERN_CONT " (cpu_capacity = %d)",
                                group->sgc->capacity);
@@ -5669,9 +6028,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
        update_top_cache_domain(cpu);
 }
 
-/* cpus with isolated domains */
-static cpumask_var_t cpu_isolated_map;
-
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
 {
@@ -5739,7 +6095,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
        const struct cpumask *span = sched_domain_span(sd);
        struct cpumask *covered = sched_domains_tmpmask;
        struct sd_data *sdd = sd->private;
-       struct sched_domain *child;
+       struct sched_domain *sibling;
        int i;
 
        cpumask_clear(covered);
@@ -5750,10 +6106,10 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                if (cpumask_test_cpu(i, covered))
                        continue;
 
-               child = *per_cpu_ptr(sdd->sd, i);
+               sibling = *per_cpu_ptr(sdd->sd, i);
 
                /* See the comment near build_group_mask(). */
-               if (!cpumask_test_cpu(i, sched_domain_span(child)))
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
                        continue;
 
                sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
@@ -5763,10 +6119,9 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                        goto fail;
 
                sg_span = sched_group_cpus(sg);
-               if (child->child) {
-                       child = child->child;
-                       cpumask_copy(sg_span, sched_domain_span(child));
-               } else
+               if (sibling->child)
+                       cpumask_copy(sg_span, sched_domain_span(sibling->child));
+               else
                        cpumask_set_cpu(i, sg_span);
 
                cpumask_or(covered, covered, sg_span);
@@ -5781,7 +6136,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                 * die on a /0 trap.
                 */
                sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
-               sg->sgc->capacity_orig = sg->sgc->capacity;
 
                /*
                 * Make sure the first group of this domain contains the
@@ -6004,7 +6358,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
 
 #ifdef CONFIG_NUMA
 static int sched_domains_numa_levels;
+enum numa_topology_type sched_numa_topology_type;
 static int *sched_domains_numa_distance;
+int sched_max_numa_distance;
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
 #endif
@@ -6090,6 +6446,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
         */
 
        if (sd->flags & SD_SHARE_CPUCAPACITY) {
+               sd->flags |= SD_PREFER_SIBLING;
                sd->imbalance_pct = 110;
                sd->smt_gain = 1178; /* ~15% */
 
@@ -6176,7 +6533,7 @@ static void sched_numa_warn(const char *str)
        printk(KERN_WARNING "\n");
 }
 
-static bool find_numa_distance(int distance)
+bool find_numa_distance(int distance)
 {
        int i;
 
@@ -6191,6 +6548,58 @@ static bool find_numa_distance(int distance)
        return false;
 }
 
+/*
+ * A system can have three types of NUMA topology:
+ * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
+ * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
+ * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
+ *
+ * The difference between a glueless mesh topology and a backplane
+ * topology lies in whether communication between not directly
+ * connected nodes goes through intermediary nodes (where programs
+ * could run), or through backplane controllers. This affects
+ * placement of programs.
+ *
+ * The type of topology can be discerned with the following tests:
+ * - If the maximum distance between any nodes is 1 hop, the system
+ *   is directly connected.
+ * - If for two nodes A and B, located N > 1 hops away from each other,
+ *   there is an intermediary node C, which is < N hops away from both
+ *   nodes A and B, the system is a glueless mesh.
+ */
+static void init_numa_topology_type(void)
+{
+       int a, b, c, n;
+
+       n = sched_max_numa_distance;
+
+       if (sched_domains_numa_levels <= 1) {
+               sched_numa_topology_type = NUMA_DIRECT;
+               return;
+       }
+
+       for_each_online_node(a) {
+               for_each_online_node(b) {
+                       /* Find two nodes furthest removed from each other. */
+                       if (node_distance(a, b) < n)
+                               continue;
+
+                       /* Is there an intermediary node between a and b? */
+                       for_each_online_node(c) {
+                               if (node_distance(a, c) < n &&
+                                   node_distance(b, c) < n) {
+                                       sched_numa_topology_type =
+                                                       NUMA_GLUELESS_MESH;
+                                       return;
+                               }
+                       }
+
+                       sched_numa_topology_type = NUMA_BACKPLANE;
+                       return;
+               }
+       }
+}
+
 static void sched_init_numa(void)
 {
        int next_distance, curr_distance = node_distance(0, 0);
@@ -6244,6 +6653,10 @@ static void sched_init_numa(void)
                if (!sched_debug())
                        break;
        }
+
+       if (!level)
+               return;
+
        /*
         * 'level' contains the number of unique distances, excluding the
         * identity distance node_distance(i,i).
@@ -6323,6 +6736,9 @@ static void sched_init_numa(void)
        sched_domain_topology = tl;
 
        sched_domains_numa_levels = level;
+       sched_max_numa_distance = sched_domains_numa_distance[level - 1];
+
+       init_numa_topology_type();
 }
 
 static void sched_domains_numa_masks_set(int cpu)
@@ -6410,7 +6826,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                        struct sched_group *sg;
                        struct sched_group_capacity *sgc;
 
-                       sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
+                       sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
                        if (!sd)
                                return -ENOMEM;
@@ -6484,6 +6900,20 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
                sched_domain_level_max = max(sched_domain_level_max, sd->level);
                child->parent = sd;
                sd->child = child;
+
+               if (!cpumask_subset(sched_domain_span(child),
+                                   sched_domain_span(sd))) {
+                       pr_err("BUG: arch topology borken\n");
+#ifdef CONFIG_SCHED_DEBUG
+                       pr_err("     the %s domain not a subset of the %s domain\n",
+                                       child->name, sd->name);
+#endif
+                       /* Fixup, ensure @sd has at least @child cpus. */
+                       cpumask_or(sched_domain_span(sd),
+                                  sched_domain_span(sd),
+                                  sched_domain_span(child));
+               }
+
        }
        set_domain_attribute(sd, attr);
 
@@ -6784,7 +7214,6 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
                 */
 
        case CPU_ONLINE:
-       case CPU_DOWN_FAILED:
                cpuset_update_active_cpus(true);
                break;
        default:
@@ -6796,8 +7225,26 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
                               void *hcpu)
 {
+       unsigned long flags;
+       long cpu = (long)hcpu;
+       struct dl_bw *dl_b;
+       bool overflow;
+       int cpus;
+
        switch (action) {
        case CPU_DOWN_PREPARE:
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, 0);
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
+
+               if (overflow)
+                       return notifier_from_errno(-EBUSY);
                cpuset_update_active_cpus(false);
                break;
        case CPU_DOWN_PREPARE_FROZEN:
@@ -6853,8 +7300,6 @@ void __init sched_init_smp(void)
 }
 #endif /* CONFIG_SMP */
 
-const_debug unsigned int sysctl_timer_migration = 1;
-
 int in_sched_functions(unsigned long addr)
 {
        return in_lock_functions(addr) ||
@@ -6883,9 +7328,6 @@ void __init sched_init(void)
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       alloc_size += num_possible_cpus() * cpumask_size();
 #endif
        if (alloc_size) {
                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -6906,13 +7348,13 @@ void __init sched_init(void)
                ptr += nr_cpu_ids * sizeof(void **);
 
 #endif /* CONFIG_RT_GROUP_SCHED */
+       }
 #ifdef CONFIG_CPUMASK_OFFSTACK
-               for_each_possible_cpu(i) {
-                       per_cpu(load_balance_mask, i) = (void *)ptr;
-                       ptr += cpumask_size();
-               }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+       for_each_possible_cpu(i) {
+               per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+                       cpumask_size(), GFP_KERNEL, cpu_to_node(i));
        }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
 
        init_rt_bandwidth(&def_rt_bandwidth,
                        global_rt_period(), global_rt_runtime());
@@ -6945,8 +7387,8 @@ void __init sched_init(void)
                rq->calc_load_active = 0;
                rq->calc_load_update = jiffies + LOAD_FREQ;
                init_cfs_rq(&rq->cfs);
-               init_rt_rq(&rq->rt, rq);
-               init_dl_rq(&rq->dl, rq);
+               init_rt_rq(&rq->rt);
+               init_dl_rq(&rq->dl);
 #ifdef CONFIG_FAIR_GROUP_SCHED
                root_task_group.shares = ROOT_TASK_GROUP_LOAD;
                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
@@ -6986,8 +7428,8 @@ void __init sched_init(void)
 #ifdef CONFIG_SMP
                rq->sd = NULL;
                rq->rd = NULL;
-               rq->cpu_capacity = SCHED_CAPACITY_SCALE;
-               rq->post_schedule = 0;
+               rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
+               rq->balance_callback = NULL;
                rq->active_balance = 0;
                rq->next_balance = jiffies;
                rq->push_cpu = 0;
@@ -7023,6 +7465,11 @@ void __init sched_init(void)
        atomic_inc(&init_mm.mm_count);
        enter_lazy_tlb(&init_mm, current);
 
+       /*
+        * During early bootup we pretend to be a normal task:
+        */
+       current->sched_class = &fair_sched_class;
+
        /*
         * Make us the idle thread. Technically, schedule() should not be
         * called from this thread, however somewhere below it might be,
@@ -7033,11 +7480,6 @@ void __init sched_init(void)
 
        calc_load_update = jiffies + LOAD_FREQ;
 
-       /*
-        * During early bootup we pretend to be a normal task:
-        */
-       current->sched_class = &fair_sched_class;
-
 #ifdef CONFIG_SMP
        zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
        /* May be allocated at isolcpus cmdline parse time */
@@ -7060,6 +7502,24 @@ static inline int preempt_count_equals(int preempt_offset)
 }
 
 void __might_sleep(const char *file, int line, int preempt_offset)
+{
+       /*
+        * Blocking primitives will set (and therefore destroy) current->state,
+        * since we will exit with TASK_RUNNING make sure we enter with it,
+        * otherwise we will destroy state.
+        */
+       WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
+                       "do not call blocking ops when !TASK_RUNNING; "
+                       "state=%lx set at [<%p>] %pS\n",
+                       current->state,
+                       (void *)current->task_state_change,
+                       (void *)current->task_state_change);
+
+       ___might_sleep(file, line, preempt_offset);
+}
+EXPORT_SYMBOL(__might_sleep);
+
+void ___might_sleep(const char *file, int line, int preempt_offset)
 {
        static unsigned long prev_jiffy;        /* ratelimiting */
 
@@ -7080,6 +7540,9 @@ void __might_sleep(const char *file, int line, int preempt_offset)
                        in_atomic(), irqs_disabled(),
                        current->pid, current->comm);
 
+       if (task_stack_end_corrupted(current))
+               printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
+
        debug_show_held_locks(current);
        if (irqs_disabled())
                print_irqtrace_events(current);
@@ -7092,43 +7555,23 @@ void __might_sleep(const char *file, int line, int preempt_offset)
 #endif
        dump_stack();
 }
-EXPORT_SYMBOL(__might_sleep);
+EXPORT_SYMBOL(___might_sleep);
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static void normalize_task(struct rq *rq, struct task_struct *p)
+void normalize_rt_tasks(void)
 {
-       const struct sched_class *prev_class = p->sched_class;
+       struct task_struct *g, *p;
        struct sched_attr attr = {
                .sched_policy = SCHED_NORMAL,
        };
-       int old_prio = p->prio;
-       int on_rq;
 
-       on_rq = p->on_rq;
-       if (on_rq)
-               dequeue_task(rq, p, 0);
-       __setscheduler(rq, p, &attr);
-       if (on_rq) {
-               enqueue_task(rq, p, 0);
-               resched_task(rq->curr);
-       }
-
-       check_class_changed(rq, p, prev_class, old_prio);
-}
-
-void normalize_rt_tasks(void)
-{
-       struct task_struct *g, *p;
-       unsigned long flags;
-       struct rq *rq;
-
-       read_lock_irqsave(&tasklist_lock, flags);
-       do_each_thread(g, p) {
+       read_lock(&tasklist_lock);
+       for_each_process_thread(g, p) {
                /*
                 * Only normalize user tasks:
                 */
-               if (!p->mm)
+               if (p->flags & PF_KTHREAD)
                        continue;
 
                p->se.exec_start                = 0;
@@ -7143,21 +7586,14 @@ void normalize_rt_tasks(void)
                         * Renice negative nice level userspace
                         * tasks back to 0:
                         */
-                       if (task_nice(p) < 0 && p->mm)
+                       if (task_nice(p) < 0)
                                set_user_nice(p, 0);
                        continue;
                }
 
-               raw_spin_lock(&p->pi_lock);
-               rq = __task_rq_lock(p);
-
-               normalize_task(rq, p);
-
-               __task_rq_unlock(rq);
-               raw_spin_unlock(&p->pi_lock);
-       } while_each_thread(g, p);
-
-       read_unlock_irqrestore(&tasklist_lock, flags);
+               __sched_setscheduler(p, &attr, false, false);
+       }
+       read_unlock(&tasklist_lock);
 }
 
 #endif /* CONFIG_MAGIC_SYSRQ */
@@ -7297,36 +7733,40 @@ void sched_offline_group(struct task_group *tg)
 void sched_move_task(struct task_struct *tsk)
 {
        struct task_group *tg;
-       int on_rq, running;
+       int queued, running;
        unsigned long flags;
        struct rq *rq;
 
        rq = task_rq_lock(tsk, &flags);
 
        running = task_current(rq, tsk);
-       on_rq = tsk->on_rq;
+       queued = task_on_rq_queued(tsk);
 
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, tsk, 0);
        if (unlikely(running))
-               tsk->sched_class->put_prev_task(rq, tsk);
+               put_prev_task(rq, tsk);
 
-       tg = container_of(task_css_check(tsk, cpu_cgrp_id,
-                               lockdep_is_held(&tsk->sighand->siglock)),
+       /*
+        * All callers are synchronized by task_rq_lock(); we do not use RCU
+        * which is pointless here. Thus, we pass "true" to task_css_check()
+        * to prevent lockdep warnings.
+        */
+       tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
                          struct task_group, css);
        tg = autogroup_task_group(tsk, tg);
        tsk->sched_task_group = tg;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->task_move_group)
-               tsk->sched_class->task_move_group(tsk, on_rq);
+               tsk->sched_class->task_move_group(tsk, queued);
        else
 #endif
                set_task_rq(tsk, task_cpu(tsk));
 
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, tsk, 0);
 
        task_rq_unlock(rq, tsk, &flags);
@@ -7344,10 +7784,16 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
 {
        struct task_struct *g, *p;
 
-       do_each_thread(g, p) {
-               if (rt_task(p) && task_rq(p)->rt.tg == tg)
+       /*
+        * Autogroups do not have RT tasks; see autogroup_create().
+        */
+       if (task_group_is_autogroup(tg))
+               return 0;
+
+       for_each_process_thread(g, p) {
+               if (rt_task(p) && task_group(p) == tg)
                        return 1;
-       } while_each_thread(g, p);
+       }
 
        return 0;
 }
@@ -7436,6 +7882,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
 {
        int i, err = 0;
 
+       /*
+        * Disallowing the root group RT runtime is BAD, it would disallow the
+        * kernel creating (and or operating) RT threads.
+        */
+       if (tg == &root_task_group && rt_runtime == 0)
+               return -EINVAL;
+
+       /* No period doesn't make any sense. */
+       if (rt_period == 0)
+               return -EINVAL;
+
        mutex_lock(&rt_constraints_mutex);
        read_lock(&tasklist_lock);
        err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7485,16 +7942,13 @@ static long sched_group_rt_runtime(struct task_group *tg)
        return rt_runtime_us;
 }
 
-static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
+static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
 {
        u64 rt_runtime, rt_period;
 
-       rt_period = (u64)rt_period_us * NSEC_PER_USEC;
+       rt_period = rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
-       if (rt_period == 0)
-               return -EINVAL;
-
        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
 
@@ -7551,11 +8005,12 @@ static int sched_rt_global_constraints(void)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
-static int sched_dl_global_constraints(void)
+static int sched_dl_global_validate(void)
 {
        u64 runtime = global_rt_runtime();
        u64 period = global_rt_period();
        u64 new_bw = to_ratio(period, runtime);
+       struct dl_bw *dl_b;
        int cpu, ret = 0;
        unsigned long flags;
 
@@ -7569,13 +8024,16 @@ static int sched_dl_global_constraints(void)
         * solutions is welcome!
         */
        for_each_possible_cpu(cpu) {
-               struct dl_bw *dl_b = dl_bw_of(cpu);
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
 
                raw_spin_lock_irqsave(&dl_b->lock, flags);
                if (new_bw < dl_b->total_bw)
                        ret = -EBUSY;
                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
 
+               rcu_read_unlock_sched();
+
                if (ret)
                        break;
        }
@@ -7586,6 +8044,7 @@ static int sched_dl_global_constraints(void)
 static void sched_dl_do_global(void)
 {
        u64 new_bw = -1;
+       struct dl_bw *dl_b;
        int cpu;
        unsigned long flags;
 
@@ -7599,11 +8058,14 @@ static void sched_dl_do_global(void)
         * FIXME: As above...
         */
        for_each_possible_cpu(cpu) {
-               struct dl_bw *dl_b = dl_bw_of(cpu);
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
 
                raw_spin_lock_irqsave(&dl_b->lock, flags);
                dl_b->bw = new_bw;
                raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
        }
 }
 
@@ -7644,11 +8106,11 @@ int sched_rt_handler(struct ctl_table *table, int write,
                if (ret)
                        goto undo;
 
-               ret = sched_rt_global_constraints();
+               ret = sched_dl_global_validate();
                if (ret)
                        goto undo;
 
-               ret = sched_dl_global_constraints();
+               ret = sched_rt_global_constraints();
                if (ret)
                        goto undo;
 
@@ -7733,6 +8195,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
        sched_offline_group(tg);
 }
 
+static void cpu_cgroup_fork(struct task_struct *task, void *private)
+{
+       sched_move_task(task);
+}
+
 static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
@@ -7822,6 +8289,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
        if (period > max_cfs_quota_period)
                return -EINVAL;
 
+       /*
+        * Prevent race between setting of cfs_rq->runtime_enabled and
+        * unthrottle_offline_cfs_rqs().
+        */
+       get_online_cpus();
        mutex_lock(&cfs_constraints_mutex);
        ret = __cfs_schedulable(tg, period, quota);
        if (ret)
@@ -7841,13 +8313,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 
        __refill_cfs_bandwidth_runtime(cfs_b);
        /* restart the period timer (if active) to handle new period expiry */
-       if (runtime_enabled && cfs_b->timer_active) {
-               /* force a reprogram */
-               __start_cfs_bandwidth(cfs_b, true);
-       }
+       if (runtime_enabled)
+               start_cfs_bandwidth(cfs_b);
        raw_spin_unlock_irq(&cfs_b->lock);
 
-       for_each_possible_cpu(i) {
+       for_each_online_cpu(i) {
                struct cfs_rq *cfs_rq = tg->cfs_rq[i];
                struct rq *rq = cfs_rq->rq;
 
@@ -7863,6 +8333,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
                cfs_bandwidth_usage_dec();
 out_unlock:
        mutex_unlock(&cfs_constraints_mutex);
+       put_online_cpus();
 
        return ret;
 }
@@ -7978,7 +8449,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
 
                quota = normalize_cfs_quota(tg, d);
-               parent_quota = parent_b->hierarchal_quota;
+               parent_quota = parent_b->hierarchical_quota;
 
                /*
                 * ensure max(child_quota) <= parent_quota, inherit when no
@@ -7989,7 +8460,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                else if (parent_quota != RUNTIME_INF && quota > parent_quota)
                        return -EINVAL;
        }
-       cfs_b->hierarchal_quota = quota;
+       cfs_b->hierarchical_quota = quota;
 
        return 0;
 }
@@ -8099,10 +8570,11 @@ struct cgroup_subsys cpu_cgrp_subsys = {
        .css_free       = cpu_cgroup_css_free,
        .css_online     = cpu_cgroup_css_online,
        .css_offline    = cpu_cgroup_css_offline,
+       .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
-       .base_cftypes   = cpu_files,
+       .legacy_cftypes = cpu_files,
        .early_init     = 1,
 };