Merge tag 'v4.3' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / core.c
index 2d04ea0..0c301cf 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/init_task.h>
 #include <linux/binfmts.h>
 #include <linux/context_tracking.h>
+#include <linux/compiler.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
-void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
-{
-       unsigned long delta;
-       ktime_t soft, hard, now;
-
-       for (;;) {
-               if (hrtimer_active(period_timer))
-                       break;
-
-               now = hrtimer_cb_get_time(period_timer);
-               hrtimer_forward(period_timer, now, period);
-
-               soft = hrtimer_get_softexpires(period_timer);
-               hard = hrtimer_get_expires(period_timer);
-               delta = ktime_to_ns(ktime_sub(hard, soft));
-               __hrtimer_start_range_ns(period_timer, soft, delta,
-                                        HRTIMER_MODE_ABS_PINNED, 0);
-       }
-}
-
 DEFINE_MUTEX(sched_domains_mutex);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
@@ -118,10 +99,14 @@ void update_rq_clock(struct rq *rq)
 {
        s64 delta;
 
-       if (rq->skip_clock_update > 0)
+       lockdep_assert_held(&rq->lock);
+
+       if (rq->clock_skip_update & RQCF_ACT_SKIP)
                return;
 
        delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+       if (delta < 0)
+               return;
        rq->clock += delta;
        update_rq_clock_task(rq, delta);
 }
@@ -179,14 +164,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 
 static void sched_feat_disable(int i)
 {
-       if (static_key_enabled(&sched_feat_keys[i]))
-               static_key_slow_dec(&sched_feat_keys[i]);
+       static_key_disable(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-       if (!static_key_enabled(&sched_feat_keys[i]))
-               static_key_slow_inc(&sched_feat_keys[i]);
+       static_key_enable(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
@@ -226,6 +209,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        char buf[64];
        char *cmp;
        int i;
+       struct inode *inode;
 
        if (cnt > 63)
                cnt = 63;
@@ -236,7 +220,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        buf[cnt] = 0;
        cmp = strstrip(buf);
 
+       /* Ensure the static_key remains in a consistent state */
+       inode = file_inode(filp);
+       mutex_lock(&inode->i_mutex);
        i = sched_feat_set(cmp);
+       mutex_unlock(&inode->i_mutex);
        if (i == __SCHED_FEAT_NR)
                return -EINVAL;
 
@@ -296,61 +284,8 @@ __read_mostly int scheduler_running;
  */
 int sysctl_sched_rt_runtime = 950000;
 
-
-
-/*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       lockdep_assert_held(&p->pi_lock);
-
-       for (;;) {
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-       }
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-       __acquires(p->pi_lock)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       for (;;) {
-               raw_spin_lock_irqsave(&p->pi_lock, *flags);
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-       }
-}
-
-static void __task_rq_unlock(struct rq *rq)
-       __releases(rq->lock)
-{
-       raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-       __releases(rq->lock)
-       __releases(p->pi_lock)
-{
-       raw_spin_unlock(&rq->lock);
-       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
+/* cpus with isolated domains */
+cpumask_var_t cpu_isolated_map;
 
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
@@ -370,13 +305,6 @@ static struct rq *this_rq_lock(void)
 #ifdef CONFIG_SCHED_HRTICK
 /*
  * Use HR-timers to deliver accurate preemption points.
- *
- * Its all a bit involved since we cannot program an hrt while holding the
- * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
- * reschedule event.
- *
- * When we get rescheduled we reprogram the hrtick_timer outside of the
- * rq->lock.
  */
 
 static void hrtick_clear(struct rq *rq)
@@ -404,6 +332,14 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 }
 
 #ifdef CONFIG_SMP
+
+static void __hrtick_restart(struct rq *rq)
+{
+       struct hrtimer *timer = &rq->hrtick_timer;
+
+       hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
+}
+
 /*
  * called from hardirq (IPI) context
  */
@@ -412,7 +348,7 @@ static void __hrtick_start(void *arg)
        struct rq *rq = arg;
 
        raw_spin_lock(&rq->lock);
-       hrtimer_restart(&rq->hrtick_timer);
+       __hrtick_restart(rq);
        rq->hrtick_csd_pending = 0;
        raw_spin_unlock(&rq->lock);
 }
@@ -425,14 +361,22 @@ static void __hrtick_start(void *arg)
 void hrtick_start(struct rq *rq, u64 delay)
 {
        struct hrtimer *timer = &rq->hrtick_timer;
-       ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
+       ktime_t time;
+       s64 delta;
+
+       /*
+        * Don't schedule slices shorter than 10000ns, that just
+        * doesn't make sense and can cause timer DoS.
+        */
+       delta = max_t(s64, delay, 10000LL);
+       time = ktime_add_ns(timer->base->get_time(), delta);
 
        hrtimer_set_expires(timer, time);
 
        if (rq == this_rq()) {
-               hrtimer_restart(timer);
+               __hrtick_restart(rq);
        } else if (!rq->hrtick_csd_pending) {
-               __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
+               smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
                rq->hrtick_csd_pending = 1;
        }
 }
@@ -468,8 +412,13 @@ static __init void init_hrtick(void)
  */
 void hrtick_start(struct rq *rq, u64 delay)
 {
-       __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
+       /*
+        * Don't schedule slices shorter than 10000ns, that just
+        * doesn't make sense. Rely on vruntime for fairness.
+        */
+       delay = max_t(u64, delay, 10000LL);
+       hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
+                     HRTIMER_MODE_REL_PINNED);
 }
 
 static inline void init_hrtick(void)
@@ -505,32 +454,145 @@ static inline void init_hrtick(void)
 #endif /* CONFIG_SCHED_HRTICK */
 
 /*
- * resched_task - mark a task 'to be rescheduled now'.
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#define fetch_or(ptr, val)                                             \
+({     typeof(*(ptr)) __old, __val = *(ptr);                           \
+       for (;;) {                                                      \
+               __old = cmpxchg((ptr), __val, __val | (val));           \
+               if (__old == __val)                                     \
+                       break;                                          \
+               __val = __old;                                          \
+       }                                                               \
+       __old;                                                          \
+})
+
+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
+/*
+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
+ * this avoids any races wrt polling state changes and thereby avoids
+ * spurious IPIs.
+ */
+static bool set_nr_and_not_polling(struct task_struct *p)
+{
+       struct thread_info *ti = task_thread_info(p);
+       return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
+}
+
+/*
+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
+ *
+ * If this returns true, then the idle task promises to call
+ * sched_ttwu_pending() and reschedule soon.
+ */
+static bool set_nr_if_polling(struct task_struct *p)
+{
+       struct thread_info *ti = task_thread_info(p);
+       typeof(ti->flags) old, val = READ_ONCE(ti->flags);
+
+       for (;;) {
+               if (!(val & _TIF_POLLING_NRFLAG))
+                       return false;
+               if (val & _TIF_NEED_RESCHED)
+                       return true;
+               old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
+               if (old == val)
+                       break;
+               val = old;
+       }
+       return true;
+}
+
+#else
+static bool set_nr_and_not_polling(struct task_struct *p)
+{
+       set_tsk_need_resched(p);
+       return true;
+}
+
+#ifdef CONFIG_SMP
+static bool set_nr_if_polling(struct task_struct *p)
+{
+       return false;
+}
+#endif
+#endif
+
+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+       struct wake_q_node *node = &task->wake_q;
+
+       /*
+        * Atomically grab the task, if ->wake_q is !nil already it means
+        * its already queued (either by us or someone else) and will get the
+        * wakeup due to that.
+        *
+        * This cmpxchg() implies a full barrier, which pairs with the write
+        * barrier implied by the wakeup in wake_up_list().
+        */
+       if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+               return;
+
+       get_task_struct(task);
+
+       /*
+        * The head is context local, there can be no concurrency.
+        */
+       *head->lastp = node;
+       head->lastp = &node->next;
+}
+
+void wake_up_q(struct wake_q_head *head)
+{
+       struct wake_q_node *node = head->first;
+
+       while (node != WAKE_Q_TAIL) {
+               struct task_struct *task;
+
+               task = container_of(node, struct task_struct, wake_q);
+               BUG_ON(!task);
+               /* task can safely be re-inserted now */
+               node = node->next;
+               task->wake_q.next = NULL;
+
+               /*
+                * wake_up_process() implies a wmb() to pair with the queueing
+                * in wake_q_add() so as not to miss wakeups.
+                */
+               wake_up_process(task);
+               put_task_struct(task);
+       }
+}
+
+/*
+ * resched_curr - mark rq's current task 'to be rescheduled now'.
  *
  * On UP this means the setting of the need_resched flag, on SMP it
  * might also involve a cross-CPU call to trigger the scheduler on
  * the target CPU.
  */
-#ifdef CONFIG_SMP
-void resched_task(struct task_struct *p)
+void resched_curr(struct rq *rq)
 {
+       struct task_struct *curr = rq->curr;
        int cpu;
 
-       assert_raw_spin_locked(&task_rq(p)->lock);
+       lockdep_assert_held(&rq->lock);
 
-       if (test_tsk_need_resched(p))
+       if (test_tsk_need_resched(curr))
                return;
 
-       set_tsk_need_resched(p);
+       cpu = cpu_of(rq);
 
-       cpu = task_cpu(p);
-       if (cpu == smp_processor_id())
+       if (cpu == smp_processor_id()) {
+               set_tsk_need_resched(curr);
+               set_preempt_need_resched();
                return;
+       }
 
-       /* NEED_RESCHED must be visible before we test polling */
-       smp_mb();
-       if (!tsk_is_polling(p))
+       if (set_nr_and_not_polling(curr))
                smp_send_reschedule(cpu);
+       else
+               trace_sched_wake_idle_without_ipi(cpu);
 }
 
 void resched_cpu(int cpu)
@@ -540,10 +602,11 @@ void resched_cpu(int cpu)
 
        if (!raw_spin_trylock_irqsave(&rq->lock, flags))
                return;
-       resched_task(cpu_curr(cpu));
+       resched_curr(rq);
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
+#ifdef CONFIG_SMP
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * In the semi idle case, use the nearest busy cpu for migrating timers
@@ -555,19 +618,24 @@ void resched_cpu(int cpu)
  */
 int get_nohz_timer_target(void)
 {
-       int cpu = smp_processor_id();
-       int i;
+       int i, cpu = smp_processor_id();
        struct sched_domain *sd;
 
+       if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
+               return cpu;
+
        rcu_read_lock();
        for_each_domain(cpu, sd) {
                for_each_cpu(i, sched_domain_span(sd)) {
-                       if (!idle_cpu(i)) {
+                       if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) {
                                cpu = i;
                                goto unlock;
                        }
                }
        }
+
+       if (!is_housekeeping_cpu(cpu))
+               cpu = housekeeping_any_cpu();
 unlock:
        rcu_read_unlock();
        return cpu;
@@ -589,35 +657,24 @@ static void wake_up_idle_cpu(int cpu)
        if (cpu == smp_processor_id())
                return;
 
-       /*
-        * This is safe, as this function is called with the timer
-        * wheel base lock of (cpu) held. When the CPU is on the way
-        * to idle and has not yet set rq->curr to idle then it will
-        * be serialized on the timer wheel base lock and take the new
-        * timer into account automatically.
-        */
-       if (rq->curr != rq->idle)
-               return;
-
-       /*
-        * We can set TIF_RESCHED on the idle task of the other CPU
-        * lockless. The worst case is that the other CPU runs the
-        * idle task through an additional NOOP schedule()
-        */
-       set_tsk_need_resched(rq->idle);
-
-       /* NEED_RESCHED must be visible before we test polling */
-       smp_mb();
-       if (!tsk_is_polling(rq->idle))
+       if (set_nr_and_not_polling(rq->idle))
                smp_send_reschedule(cpu);
+       else
+               trace_sched_wake_idle_without_ipi(cpu);
 }
 
 static bool wake_up_full_nohz_cpu(int cpu)
 {
+       /*
+        * We just need the target to call irq_exit() and re-evaluate
+        * the next tick. The nohz full kick at least implies that.
+        * If needed we can still optimize that later with an
+        * empty IRQ.
+        */
        if (tick_nohz_full_cpu(cpu)) {
                if (cpu != smp_processor_id() ||
                    tick_nohz_tick_stopped())
-                       smp_send_reschedule(cpu);
+                       tick_nohz_full_kick_cpu(cpu);
                return true;
        }
 
@@ -660,18 +717,32 @@ static inline bool got_nohz_idle_kick(void)
 #ifdef CONFIG_NO_HZ_FULL
 bool sched_can_stop_tick(void)
 {
-       struct rq *rq;
+       /*
+        * FIFO realtime policy runs the highest priority task. Other runnable
+        * tasks are of a lower priority. The scheduler tick does nothing.
+        */
+       if (current->policy == SCHED_FIFO)
+               return true;
 
-       rq = this_rq();
+       /*
+        * Round-robin realtime tasks time slice with other tasks at the same
+        * realtime priority. Is this task the only one at this priority?
+        */
+       if (current->policy == SCHED_RR) {
+               struct sched_rt_entity *rt_se = &current->rt;
 
-       /* Make sure rq->nr_running update is visible after the IPI */
-       smp_rmb();
+               return rt_se->run_list.prev == rt_se->run_list.next;
+       }
 
-       /* More than one running task need preemption */
-       if (rq->nr_running > 1)
-               return false;
+       /*
+        * More than one running task need preemption.
+        * nr_running update is assumed to be visible
+        * after IPI is sent from wakers.
+        */
+       if (this_rq()->nr_running > 1)
+               return false;
 
-       return true;
+       return true;
 }
 #endif /* CONFIG_NO_HZ_FULL */
 
@@ -679,7 +750,7 @@ void sched_avg_update(struct rq *rq)
 {
        s64 period = sched_avg_period();
 
-       while ((s64)(rq->clock - rq->age_stamp) > period) {
+       while ((s64)(rq_clock(rq) - rq->age_stamp) > period) {
                /*
                 * Inline assembly required to prevent the compiler
                 * optimising this loop into a divmod call.
@@ -691,12 +762,6 @@ void sched_avg_update(struct rq *rq)
        }
 }
 
-#else /* !CONFIG_SMP */
-void resched_task(struct task_struct *p)
-{
-       assert_raw_spin_locked(&task_rq(p)->lock);
-       set_tsk_need_resched(p);
-}
 #endif /* CONFIG_SMP */
 
 #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
@@ -765,14 +830,14 @@ static void set_load_weight(struct task_struct *p)
 static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
 {
        update_rq_clock(rq);
-       sched_info_queued(p);
+       sched_info_queued(rq, p);
        p->sched_class->enqueue_task(rq, p, flags);
 }
 
 static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
 {
        update_rq_clock(rq);
-       sched_info_dequeued(p);
+       sched_info_dequeued(rq, p);
        p->sched_class->dequeue_task(rq, p, flags);
 }
 
@@ -827,19 +892,13 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
 #endif
 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
        if (static_key_false((&paravirt_steal_rq_enabled))) {
-               u64 st;
-
                steal = paravirt_steal_clock(cpu_of(rq));
                steal -= rq->prev_steal_time_rq;
 
                if (unlikely(steal > delta))
                        steal = delta;
 
-               st = steal_ticks(steal);
-               steal = st * TICK_NSEC;
-
                rq->prev_steal_time_rq += steal;
-
                delta -= steal;
        }
 #endif
@@ -847,7 +906,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
        rq->clock_task += delta;
 
 #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
+       if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY))
                sched_rt_avg_update(rq, irq_delta + steal);
 #endif
 }
@@ -901,7 +960,9 @@ static inline int normal_prio(struct task_struct *p)
 {
        int prio;
 
-       if (task_has_rt_policy(p))
+       if (task_has_dl_policy(p))
+               prio = MAX_DL_PRIO-1;
+       else if (task_has_rt_policy(p))
                prio = MAX_RT_PRIO-1 - p->rt_priority;
        else
                prio = __normal_prio(p);
@@ -931,12 +992,21 @@ static int effective_prio(struct task_struct *p)
 /**
  * task_curr - is this task currently executing on a CPU?
  * @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
  */
 inline int task_curr(const struct task_struct *p)
 {
        return cpu_curr(task_cpu(p)) == p;
 }
 
+/*
+ * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock,
+ * use the balance_callback list if you want balancing.
+ *
+ * this means any call to check_class_changed() must be followed by a call to
+ * balance_callback().
+ */
 static inline void check_class_changed(struct rq *rq, struct task_struct *p,
                                       const struct sched_class *prev_class,
                                       int oldprio)
@@ -944,8 +1014,9 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
        if (prev_class != p->sched_class) {
                if (prev_class->switched_from)
                        prev_class->switched_from(rq, p);
+
                p->sched_class->switched_to(rq, p);
-       } else if (oldprio != p->prio)
+       } else if (oldprio != p->prio || dl_task(p))
                p->sched_class->prio_changed(rq, p, oldprio);
 }
 
@@ -960,7 +1031,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
                        if (class == rq->curr->sched_class)
                                break;
                        if (class == p->sched_class) {
-                               resched_task(rq->curr);
+                               resched_curr(rq);
                                break;
                        }
                }
@@ -970,62 +1041,48 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
         * A queue event has occurred, and we're going to schedule.  In
         * this case, we can save a useless back to back clock update.
         */
-       if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
-               rq->skip_clock_update = 1;
-}
-
-static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-
-void register_task_migration_notifier(struct notifier_block *n)
-{
-       atomic_notifier_chain_register(&task_migration_notifier, n);
+       if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
+               rq_clock_skip_update(rq, true);
 }
 
 #ifdef CONFIG_SMP
-void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
-{
-#ifdef CONFIG_SCHED_DEBUG
-       /*
-        * We should never call set_task_cpu() on a blocked task,
-        * ttwu() will sort out the placement.
-        */
-       WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
-                       !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
-
-#ifdef CONFIG_LOCKDEP
-       /*
-        * The caller should hold either p->pi_lock or rq->lock, when changing
-        * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
-        *
-        * sched_move_task() holds both and thus holding either pins the cgroup,
-        * see task_group().
-        *
-        * Furthermore, all task_rq users should acquire both locks, see
-        * task_rq_lock().
-        */
-       WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-                                     lockdep_is_held(&task_rq(p)->lock)));
-#endif
-#endif
-
-       trace_sched_migrate_task(p, new_cpu);
+/*
+ * This is how migration works:
+ *
+ * 1) we invoke migration_cpu_stop() on the target CPU using
+ *    stop_one_cpu().
+ * 2) stopper starts to run (implicitly forcing the migrated thread
+ *    off the CPU)
+ * 3) it checks whether the migrated task is still in the wrong runqueue.
+ * 4) if it's in the wrong runqueue then the migration thread removes
+ *    it and puts it into the right queue.
+ * 5) stopper completes and stop_one_cpu() returns and the migration
+ *    is done.
+ */
 
-       if (task_cpu(p) != new_cpu) {
-               struct task_migration_notifier tmn;
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+       lockdep_assert_held(&rq->lock);
 
-               if (p->sched_class->migrate_task_rq)
-                       p->sched_class->migrate_task_rq(p, new_cpu);
-               p->se.nr_migrations++;
-               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+       dequeue_task(rq, p, 0);
+       p->on_rq = TASK_ON_RQ_MIGRATING;
+       set_task_cpu(p, new_cpu);
+       raw_spin_unlock(&rq->lock);
 
-               tmn.task = p;
-               tmn.from_cpu = task_cpu(p);
-               tmn.to_cpu = new_cpu;
+       rq = cpu_rq(new_cpu);
 
-               atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
-       }
+       raw_spin_lock(&rq->lock);
+       BUG_ON(task_cpu(p) != new_cpu);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       enqueue_task(rq, p, 0);
+       check_preempt_curr(rq, p, 0);
 
-       __set_task_cpu(p, new_cpu);
+       return rq;
 }
 
 struct migration_arg {
@@ -1033,41 +1090,351 @@ struct migration_arg {
        int dest_cpu;
 };
 
-static int migration_cpu_stop(void *data);
-
 /*
- * wait_task_inactive - wait for a thread to unschedule.
- *
- * If @match_state is nonzero, it's the @p->state value just checked and
- * not expected to change.  If it changes, i.e. @p might have woken up,
- * then return zero.  When we succeed in waiting for @p to be off its CPU,
- * we return a positive number (its total switch count).  If a second call
- * a short while later returns the same number, the caller can be sure that
- * @p has remained unscheduled the whole time.
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+ * away from this CPU, or CPU going down), or because we're
+ * attempting to rebalance this task on exec (sched_exec).
  *
- * The caller must ensure that the task *will* unschedule sometime soon,
- * else this function might spin for a *long* time. This function can't
- * be called with interrupts off, or it may introduce deadlock with
- * smp_call_function() if an IPI is sent by the same process we are
- * waiting to become inactive.
+ * So we race with normal scheduler movements, but that's OK, as long
+ * as the task is no longer on this CPU.
  */
-unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
 {
-       unsigned long flags;
-       int running, on_rq;
-       unsigned long ncsw;
-       struct rq *rq;
+       if (unlikely(!cpu_active(dest_cpu)))
+               return rq;
 
-       for (;;) {
-               /*
-                * We do the initial early heuristics without holding
-                * any task-queue locks at all. We'll only try to get
-                * the runqueue lock when things look like they will
-                * work out!
-                */
-               rq = task_rq(p);
+       /* Affinity changed (again). */
+       if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+               return rq;
 
-               /*
+       rq = move_queued_task(rq, p, dest_cpu);
+
+       return rq;
+}
+
+/*
+ * migration_cpu_stop - this will be executed by a highprio stopper thread
+ * and performs thread migration by bumping thread off CPU then
+ * 'pushing' onto another runqueue.
+ */
+static int migration_cpu_stop(void *data)
+{
+       struct migration_arg *arg = data;
+       struct task_struct *p = arg->task;
+       struct rq *rq = this_rq();
+
+       /*
+        * The original target cpu might have gone down and we might
+        * be on another cpu but it doesn't matter.
+        */
+       local_irq_disable();
+       /*
+        * We need to explicitly wake pending tasks before running
+        * __migrate_task() such that we will not miss enforcing cpus_allowed
+        * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+        */
+       sched_ttwu_pending();
+
+       raw_spin_lock(&p->pi_lock);
+       raw_spin_lock(&rq->lock);
+       /*
+        * If task_rq(p) != rq, it cannot be migrated here, because we're
+        * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
+        * we're holding p->pi_lock.
+        */
+       if (task_rq(p) == rq && task_on_rq_queued(p))
+               rq = __migrate_task(rq, p, arg->dest_cpu);
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock(&p->pi_lock);
+
+       local_irq_enable();
+       return 0;
+}
+
+/*
+ * sched_class::set_cpus_allowed must do the below, but is not required to
+ * actually call this function.
+ */
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+{
+       cpumask_copy(&p->cpus_allowed, new_mask);
+       p->nr_cpus_allowed = cpumask_weight(new_mask);
+}
+
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+       struct rq *rq = task_rq(p);
+       bool queued, running;
+
+       lockdep_assert_held(&p->pi_lock);
+
+       queued = task_on_rq_queued(p);
+       running = task_current(rq, p);
+
+       if (queued) {
+               /*
+                * Because __kthread_bind() calls this on blocked tasks without
+                * holding rq->lock.
+                */
+               lockdep_assert_held(&rq->lock);
+               dequeue_task(rq, p, 0);
+       }
+       if (running)
+               put_prev_task(rq, p);
+
+       p->sched_class->set_cpus_allowed(p, new_mask);
+
+       if (running)
+               p->sched_class->set_curr_task(rq);
+       if (queued)
+               enqueue_task(rq, p, 0);
+}
+
+/*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+                                 const struct cpumask *new_mask, bool check)
+{
+       unsigned long flags;
+       struct rq *rq;
+       unsigned int dest_cpu;
+       int ret = 0;
+
+       rq = task_rq_lock(p, &flags);
+
+       /*
+        * Must re-check here, to close a race against __kthread_bind(),
+        * sched_setaffinity() is not guaranteed to observe the flag.
+        */
+       if (check && (p->flags & PF_NO_SETAFFINITY)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (cpumask_equal(&p->cpus_allowed, new_mask))
+               goto out;
+
+       if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       do_set_cpus_allowed(p, new_mask);
+
+       /* Can the task run on the task's current CPU? If so, we're done */
+       if (cpumask_test_cpu(task_cpu(p), new_mask))
+               goto out;
+
+       dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+       if (task_running(rq, p) || p->state == TASK_WAKING) {
+               struct migration_arg arg = { p, dest_cpu };
+               /* Need help from migration thread: drop lock and wait. */
+               task_rq_unlock(rq, p, &flags);
+               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+               tlb_migrate_finish(p->mm);
+               return 0;
+       } else if (task_on_rq_queued(p)) {
+               /*
+                * OK, since we're going to drop the lock immediately
+                * afterwards anyway.
+                */
+               lockdep_unpin_lock(&rq->lock);
+               rq = move_queued_task(rq, p, dest_cpu);
+               lockdep_pin_lock(&rq->lock);
+       }
+out:
+       task_rq_unlock(rq, p, &flags);
+
+       return ret;
+}
+
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+       return __set_cpus_allowed_ptr(p, new_mask, false);
+}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+{
+#ifdef CONFIG_SCHED_DEBUG
+       /*
+        * We should never call set_task_cpu() on a blocked task,
+        * ttwu() will sort out the placement.
+        */
+       WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
+                       !p->on_rq);
+
+#ifdef CONFIG_LOCKDEP
+       /*
+        * The caller should hold either p->pi_lock or rq->lock, when changing
+        * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+        *
+        * sched_move_task() holds both and thus holding either pins the cgroup,
+        * see task_group().
+        *
+        * Furthermore, all task_rq users should acquire both locks, see
+        * task_rq_lock().
+        */
+       WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
+                                     lockdep_is_held(&task_rq(p)->lock)));
+#endif
+#endif
+
+       trace_sched_migrate_task(p, new_cpu);
+
+       if (task_cpu(p) != new_cpu) {
+               if (p->sched_class->migrate_task_rq)
+                       p->sched_class->migrate_task_rq(p, new_cpu);
+               p->se.nr_migrations++;
+               perf_event_task_migrate(p);
+       }
+
+       __set_task_cpu(p, new_cpu);
+}
+
+static void __migrate_swap_task(struct task_struct *p, int cpu)
+{
+       if (task_on_rq_queued(p)) {
+               struct rq *src_rq, *dst_rq;
+
+               src_rq = task_rq(p);
+               dst_rq = cpu_rq(cpu);
+
+               deactivate_task(src_rq, p, 0);
+               set_task_cpu(p, cpu);
+               activate_task(dst_rq, p, 0);
+               check_preempt_curr(dst_rq, p, 0);
+       } else {
+               /*
+                * Task isn't running anymore; make it appear like we migrated
+                * it before it went to sleep. This means on wakeup we make the
+                * previous cpu our targer instead of where it really is.
+                */
+               p->wake_cpu = cpu;
+       }
+}
+
+struct migration_swap_arg {
+       struct task_struct *src_task, *dst_task;
+       int src_cpu, dst_cpu;
+};
+
+static int migrate_swap_stop(void *data)
+{
+       struct migration_swap_arg *arg = data;
+       struct rq *src_rq, *dst_rq;
+       int ret = -EAGAIN;
+
+       src_rq = cpu_rq(arg->src_cpu);
+       dst_rq = cpu_rq(arg->dst_cpu);
+
+       double_raw_lock(&arg->src_task->pi_lock,
+                       &arg->dst_task->pi_lock);
+       double_rq_lock(src_rq, dst_rq);
+       if (task_cpu(arg->dst_task) != arg->dst_cpu)
+               goto unlock;
+
+       if (task_cpu(arg->src_task) != arg->src_cpu)
+               goto unlock;
+
+       if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task)))
+               goto unlock;
+
+       if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task)))
+               goto unlock;
+
+       __migrate_swap_task(arg->src_task, arg->dst_cpu);
+       __migrate_swap_task(arg->dst_task, arg->src_cpu);
+
+       ret = 0;
+
+unlock:
+       double_rq_unlock(src_rq, dst_rq);
+       raw_spin_unlock(&arg->dst_task->pi_lock);
+       raw_spin_unlock(&arg->src_task->pi_lock);
+
+       return ret;
+}
+
+/*
+ * Cross migrate two tasks
+ */
+int migrate_swap(struct task_struct *cur, struct task_struct *p)
+{
+       struct migration_swap_arg arg;
+       int ret = -EINVAL;
+
+       arg = (struct migration_swap_arg){
+               .src_task = cur,
+               .src_cpu = task_cpu(cur),
+               .dst_task = p,
+               .dst_cpu = task_cpu(p),
+       };
+
+       if (arg.src_cpu == arg.dst_cpu)
+               goto out;
+
+       /*
+        * These three tests are all lockless; this is OK since all of them
+        * will be re-checked with proper locks held further down the line.
+        */
+       if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
+               goto out;
+
+       if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task)))
+               goto out;
+
+       if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
+               goto out;
+
+       trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
+       ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
+
+out:
+       return ret;
+}
+
+/*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+ * If @match_state is nonzero, it's the @p->state value just checked and
+ * not expected to change.  If it changes, i.e. @p might have woken up,
+ * then return zero.  When we succeed in waiting for @p to be off its CPU,
+ * we return a positive number (its total switch count).  If a second call
+ * a short while later returns the same number, the caller can be sure that
+ * @p has remained unscheduled the whole time.
+ *
+ * The caller must ensure that the task *will* unschedule sometime soon,
+ * else this function might spin for a *long* time. This function can't
+ * be called with interrupts off, or it may introduce deadlock with
+ * smp_call_function() if an IPI is sent by the same process we are
+ * waiting to become inactive.
+ */
+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+{
+       unsigned long flags;
+       int running, queued;
+       unsigned long ncsw;
+       struct rq *rq;
+
+       for (;;) {
+               /*
+                * We do the initial early heuristics without holding
+                * any task-queue locks at all. We'll only try to get
+                * the runqueue lock when things look like they will
+                * work out!
+                */
+               rq = task_rq(p);
+
+               /*
                 * If the task is actively running on another CPU
                 * still, just relax and busy-wait without holding
                 * any locks.
@@ -1092,7 +1459,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                rq = task_rq_lock(p, &flags);
                trace_sched_wait_task(p);
                running = task_running(rq, p);
-               on_rq = p->on_rq;
+               queued = task_on_rq_queued(p);
                ncsw = 0;
                if (!match_state || p->state == match_state)
                        ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -1124,7 +1491,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 * running right now), it's preempted, and we should
                 * yield - it could be a while.
                 */
-               if (unlikely(on_rq)) {
+               if (unlikely(queued)) {
                        ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
 
                        set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1167,9 +1534,7 @@ void kick_process(struct task_struct *p)
        preempt_enable();
 }
 EXPORT_SYMBOL_GPL(kick_process);
-#endif /* CONFIG_SMP */
 
-#ifdef CONFIG_SMP
 /*
  * ->cpus_allowed is protected by both rq->lock and p->pi_lock
  */
@@ -1235,7 +1600,7 @@ out:
                 * leave kernel.
                 */
                if (p->mm && printk_ratelimit()) {
-                       printk_sched("process %d (%s) no longer affine to cpu%d\n",
+                       printk_deferred("process %d (%s) no longer affine to cpu%d\n",
                                        task_pid_nr(p), p->comm, cpu);
                }
        }
@@ -1247,9 +1612,12 @@ out:
  * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
  */
 static inline
-int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
+int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
 {
-       int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+       lockdep_assert_held(&p->pi_lock);
+
+       if (p->nr_cpus_allowed > 1)
+               cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
 
        /*
         * In order not to call set_task_cpu() on a blocking task we need
@@ -1273,7 +1641,16 @@ static void update_avg(u64 *avg, u64 sample)
        s64 diff = sample - *avg;
        *avg += diff >> 3;
 }
-#endif
+
+#else
+
+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
+                                        const struct cpumask *new_mask, bool check)
+{
+       return set_cpus_allowed_ptr(p, new_mask);
+}
+
+#endif /* CONFIG_SMP */
 
 static void
 ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
@@ -1318,7 +1695,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
 {
        activate_task(rq, p, en_flags);
-       p->on_rq = 1;
+       p->on_rq = TASK_ON_RQ_QUEUED;
 
        /* if a worker is waking up, notify workqueue */
        if (p->flags & PF_WQ_WORKER)
@@ -1332,21 +1709,29 @@ static void
 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 {
        check_preempt_curr(rq, p, wake_flags);
-       trace_sched_wakeup(p, true);
-
        p->state = TASK_RUNNING;
+       trace_sched_wakeup(p);
+
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_woken)
+       if (p->sched_class->task_woken) {
+               /*
+                * Our task @p is fully woken up and running; so its safe to
+                * drop the rq->lock, hereafter rq is only used for statistics.
+                */
+               lockdep_unpin_lock(&rq->lock);
                p->sched_class->task_woken(rq, p);
+               lockdep_pin_lock(&rq->lock);
+       }
 
        if (rq->idle_stamp) {
-               u64 delta = rq->clock - rq->idle_stamp;
-               u64 max = 2*sysctl_sched_migration_cost;
+               u64 delta = rq_clock(rq) - rq->idle_stamp;
+               u64 max = 2*rq->max_idle_balance_cost;
+
+               update_avg(&rq->avg_idle, delta);
 
-               if (delta > max)
+               if (rq->avg_idle > max)
                        rq->avg_idle = max;
-               else
-                       update_avg(&rq->avg_idle, delta);
+
                rq->idle_stamp = 0;
        }
 #endif
@@ -1355,6 +1740,8 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 static void
 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
 {
+       lockdep_assert_held(&rq->lock);
+
 #ifdef CONFIG_SMP
        if (p->sched_contributes_to_load)
                rq->nr_uninterruptible--;
@@ -1376,7 +1763,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
        int ret = 0;
 
        rq = __task_rq_lock(p);
-       if (p->on_rq) {
+       if (task_on_rq_queued(p)) {
+               /* check_preempt_curr() may use rq clock */
+               update_rq_clock(rq);
                ttwu_do_wakeup(rq, p, wake_flags);
                ret = 1;
        }
@@ -1386,13 +1775,18 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
 }
 
 #ifdef CONFIG_SMP
-static void sched_ttwu_pending(void)
+void sched_ttwu_pending(void)
 {
        struct rq *rq = this_rq();
        struct llist_node *llist = llist_del_all(&rq->wake_list);
        struct task_struct *p;
+       unsigned long flags;
 
-       raw_spin_lock(&rq->lock);
+       if (!llist)
+               return;
+
+       raw_spin_lock_irqsave(&rq->lock, flags);
+       lockdep_pin_lock(&rq->lock);
 
        while (llist) {
                p = llist_entry(llist, struct task_struct, wake_entry);
@@ -1400,14 +1794,20 @@ static void sched_ttwu_pending(void)
                ttwu_do_activate(rq, p, 0);
        }
 
-       raw_spin_unlock(&rq->lock);
+       lockdep_unpin_lock(&rq->lock);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 void scheduler_ipi(void)
 {
-       if (llist_empty(&this_rq()->wake_list)
-                       && !tick_nohz_full_cpu(smp_processor_id())
-                       && !got_nohz_idle_kick())
+       /*
+        * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
+        * TIF_NEED_RESCHED remotely (for the first time) will also send
+        * this IPI.
+        */
+       preempt_fold_need_resched();
+
+       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
                return;
 
        /*
@@ -1424,7 +1824,6 @@ void scheduler_ipi(void)
         * somewhat pessimize the simple resched case.
         */
        irq_enter();
-       tick_nohz_full_check();
        sched_ttwu_pending();
 
        /*
@@ -1439,8 +1838,38 @@ void scheduler_ipi(void)
 
 static void ttwu_queue_remote(struct task_struct *p, int cpu)
 {
-       if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
-               smp_send_reschedule(cpu);
+       struct rq *rq = cpu_rq(cpu);
+
+       if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
+               if (!set_nr_if_polling(rq->idle))
+                       smp_send_reschedule(cpu);
+               else
+                       trace_sched_wake_idle_without_ipi(cpu);
+       }
+}
+
+void wake_up_if_idle(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long flags;
+
+       rcu_read_lock();
+
+       if (!is_idle_task(rcu_dereference(rq->curr)))
+               goto out;
+
+       if (set_nr_if_polling(rq->idle)) {
+               trace_sched_wake_idle_without_ipi(cpu);
+       } else {
+               raw_spin_lock_irqsave(&rq->lock, flags);
+               if (is_idle_task(rq->curr))
+                       smp_send_reschedule(cpu);
+               /* Else cpu is not in idle, do nothing here */
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       }
+
+out:
+       rcu_read_unlock();
 }
 
 bool cpus_share_cache(int this_cpu, int that_cpu)
@@ -1462,7 +1891,9 @@ static void ttwu_queue(struct task_struct *p, int cpu)
 #endif
 
        raw_spin_lock(&rq->lock);
+       lockdep_pin_lock(&rq->lock);
        ttwu_do_activate(rq, p, 0);
+       lockdep_unpin_lock(&rq->lock);
        raw_spin_unlock(&rq->lock);
 }
 
@@ -1478,7 +1909,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
  * the simpler "current->state = TASK_RUNNING" to mark yourself
  * runnable without the overhead of this.
  *
- * Returns %true if @p was woken up, %false if it was already running
+ * Return: %true if @p was woken up, %false if it was already running.
  * or @state didn't match @p's state.
  */
 static int
@@ -1487,11 +1918,19 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        unsigned long flags;
        int cpu, success = 0;
 
-       smp_wmb();
+       /*
+        * If we are going to wake up a thread waiting for CONDITION we
+        * need to ensure that CONDITION=1 done by the caller can not be
+        * reordered with p->state check below. This pairs with mb() in
+        * set_current_state() the waiting thread does.
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        if (!(p->state & state))
                goto out;
 
+       trace_sched_waking(p);
+
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
@@ -1516,7 +1955,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        if (p->sched_class->task_waking)
                p->sched_class->task_waking(p);
 
-       cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+       cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
        if (task_cpu(p) != cpu) {
                wake_flags |= WF_MIGRATED;
                set_task_cpu(p, cpu);
@@ -1555,15 +1994,25 @@ static void try_to_wake_up_local(struct task_struct *p)
        lockdep_assert_held(&rq->lock);
 
        if (!raw_spin_trylock(&p->pi_lock)) {
+               /*
+                * This is OK, because current is on_cpu, which avoids it being
+                * picked for load-balance and preemption/IRQs are still
+                * disabled avoiding further scheduler activity on it and we've
+                * not yet picked a replacement task.
+                */
+               lockdep_unpin_lock(&rq->lock);
                raw_spin_unlock(&rq->lock);
                raw_spin_lock(&p->pi_lock);
                raw_spin_lock(&rq->lock);
+               lockdep_pin_lock(&rq->lock);
        }
 
        if (!(p->state & TASK_NORMAL))
                goto out;
 
-       if (!p->on_rq)
+       trace_sched_waking(p);
+
+       if (!task_on_rq_queued(p))
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
        ttwu_do_wakeup(rq, p, 0);
@@ -1577,8 +2026,9 @@ out:
  * @p: The process to be woken up.
  *
  * Attempt to wake up the nominated process and move it to the set of runnable
- * processes.  Returns 1 if the process was woken up, 0 if it was already
- * running.
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
  *
  * It may be assumed that this function implies a write memory barrier before
  * changing the task state if and only if any tasks are woken up.
@@ -1597,13 +2047,31 @@ int wake_up_state(struct task_struct *p, unsigned int state)
 }
 EXPORT_SYMBOL(wake_up_process);
 
+/*
+ * This function clears the sched_dl_entity static params.
+ */
+void __dl_clear_params(struct task_struct *p)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       dl_se->dl_runtime = 0;
+       dl_se->dl_deadline = 0;
+       dl_se->dl_period = 0;
+       dl_se->flags = 0;
+       dl_se->dl_bw = 0;
+
+       dl_se->dl_throttled = 0;
+       dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
+}
+
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
  *
  * __sched_fork() is basic setup used by init_idle() too:
  */
-static void __sched_fork(struct task_struct *p)
+static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 {
        p->on_rq                        = 0;
 
@@ -1615,19 +2083,14 @@ static void __sched_fork(struct task_struct *p)
        p->se.vruntime                  = 0;
        INIT_LIST_HEAD(&p->se.group_node);
 
-/*
- * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
- * removed when useful for applications beyond shares distribution (e.g.
- * load-balance).
- */
-#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
-       p->se.avg.runnable_avg_period = 0;
-       p->se.avg.runnable_avg_sum = 0;
-#endif
 #ifdef CONFIG_SCHEDSTATS
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 #endif
 
+       RB_CLEAR_NODE(&p->dl.rb_node);
+       init_dl_task_timer(&p->dl);
+       __dl_clear_params(p);
+
        INIT_LIST_HEAD(&p->rt.run_list);
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -1636,16 +2099,24 @@ static void __sched_fork(struct task_struct *p)
 
 #ifdef CONFIG_NUMA_BALANCING
        if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
-               p->mm->numa_next_scan = jiffies;
-               p->mm->numa_next_reset = jiffies;
+               p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
                p->mm->numa_scan_seq = 0;
        }
 
+       if (clone_flags & CLONE_VM)
+               p->numa_preferred_nid = current->numa_preferred_nid;
+       else
+               p->numa_preferred_nid = -1;
+
        p->node_stamp = 0ULL;
        p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
-       p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
        p->numa_scan_period = sysctl_numa_balancing_scan_delay;
        p->numa_work.next = &p->numa_work;
+       p->numa_faults = NULL;
+       p->last_task_numa_placement = 0;
+       p->last_sum_exec_runtime = 0;
+
+       p->numa_group = NULL;
 #endif /* CONFIG_NUMA_BALANCING */
 }
 
@@ -1666,17 +2137,39 @@ void set_numabalancing_state(bool enabled)
        numabalancing_enabled = enabled;
 }
 #endif /* CONFIG_SCHED_DEBUG */
-#endif /* CONFIG_NUMA_BALANCING */
+
+#ifdef CONFIG_PROC_SYSCTL
+int sysctl_numa_balancing(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct ctl_table t;
+       int err;
+       int state = numabalancing_enabled;
+
+       if (write && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       t = *table;
+       t.data = &state;
+       err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
+       if (err < 0)
+               return err;
+       if (write)
+               set_numabalancing_state(state);
+       return err;
+}
+#endif
+#endif
 
 /*
  * fork()/clone()-time setup:
  */
-void sched_fork(struct task_struct *p)
+int sched_fork(unsigned long clone_flags, struct task_struct *p)
 {
        unsigned long flags;
        int cpu = get_cpu();
 
-       __sched_fork(p);
+       __sched_fork(clone_flags, p);
        /*
         * We mark the process as running here. This guarantees that
         * nobody will actually run it, and a signal or other external
@@ -1693,7 +2186,7 @@ void sched_fork(struct task_struct *p)
         * Revert to default priority/policy on fork if requested.
         */
        if (unlikely(p->sched_reset_on_fork)) {
-               if (task_has_rt_policy(p)) {
+               if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
                        p->policy = SCHED_NORMAL;
                        p->static_prio = NICE_TO_PRIO(0);
                        p->rt_priority = 0;
@@ -1710,8 +2203,14 @@ void sched_fork(struct task_struct *p)
                p->sched_reset_on_fork = 0;
        }
 
-       if (!rt_prio(p->prio))
+       if (dl_prio(p->prio)) {
+               put_cpu();
+               return -EAGAIN;
+       } else if (rt_prio(p->prio)) {
+               p->sched_class = &rt_sched_class;
+       } else {
                p->sched_class = &fair_sched_class;
+       }
 
        if (p->sched_class->task_fork)
                p->sched_class->task_fork(p);
@@ -1727,24 +2226,122 @@ void sched_fork(struct task_struct *p)
        set_task_cpu(p, cpu);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
-#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+#ifdef CONFIG_SCHED_INFO
        if (likely(sched_info_on()))
                memset(&p->sched_info, 0, sizeof(p->sched_info));
 #endif
 #if defined(CONFIG_SMP)
        p->on_cpu = 0;
 #endif
-#ifdef CONFIG_PREEMPT_COUNT
-       /* Want to start with kernel preemption disabled. */
-       task_thread_info(p)->preempt_count = 1;
-#endif
+       init_task_preempt_count(p);
 #ifdef CONFIG_SMP
        plist_node_init(&p->pushable_tasks, MAX_PRIO);
+       RB_CLEAR_NODE(&p->pushable_dl_tasks);
 #endif
 
        put_cpu();
+       return 0;
+}
+
+unsigned long to_ratio(u64 period, u64 runtime)
+{
+       if (runtime == RUNTIME_INF)
+               return 1ULL << 20;
+
+       /*
+        * Doing this here saves a lot of checks in all
+        * the calling paths, and returning zero seems
+        * safe for them anyway.
+        */
+       if (period == 0)
+               return 0;
+
+       return div64_u64(runtime << 20, period);
+}
+
+#ifdef CONFIG_SMP
+inline struct dl_bw *dl_bw_of(int i)
+{
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+                        "sched RCU must be held");
+       return &cpu_rq(i)->rd->dl_bw;
+}
+
+static inline int dl_bw_cpus(int i)
+{
+       struct root_domain *rd = cpu_rq(i)->rd;
+       int cpus = 0;
+
+       RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+                        "sched RCU must be held");
+       for_each_cpu_and(i, rd->span, cpu_active_mask)
+               cpus++;
+
+       return cpus;
+}
+#else
+inline struct dl_bw *dl_bw_of(int i)
+{
+       return &cpu_rq(i)->dl.dl_bw;
+}
+
+static inline int dl_bw_cpus(int i)
+{
+       return 1;
+}
+#endif
+
+/*
+ * We must be sure that accepting a new task (or allowing changing the
+ * parameters of an existing one) is consistent with the bandwidth
+ * constraints. If yes, this function also accordingly updates the currently
+ * allocated bandwidth to reflect the new situation.
+ *
+ * This function is called while holding p's rq->lock.
+ *
+ * XXX we should delay bw change until the task's 0-lag point, see
+ * __setparam_dl().
+ */
+static int dl_overflow(struct task_struct *p, int policy,
+                      const struct sched_attr *attr)
+{
+
+       struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+       u64 period = attr->sched_period ?: attr->sched_deadline;
+       u64 runtime = attr->sched_runtime;
+       u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
+       int cpus, err = -1;
+
+       if (new_bw == p->dl.dl_bw)
+               return 0;
+
+       /*
+        * Either if a task, enters, leave, or stays -deadline but changes
+        * its parameters, we may need to update accordingly the total
+        * allocated bandwidth of the container.
+        */
+       raw_spin_lock(&dl_b->lock);
+       cpus = dl_bw_cpus(task_cpu(p));
+       if (dl_policy(policy) && !task_has_dl_policy(p) &&
+           !__dl_overflow(dl_b, cpus, 0, new_bw)) {
+               __dl_add(dl_b, new_bw);
+               err = 0;
+       } else if (dl_policy(policy) && task_has_dl_policy(p) &&
+                  !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
+               __dl_clear(dl_b, p->dl.dl_bw);
+               __dl_add(dl_b, new_bw);
+               err = 0;
+       } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
+               __dl_clear(dl_b, p->dl.dl_bw);
+               err = 0;
+       }
+       raw_spin_unlock(&dl_b->lock);
+
+       return err;
 }
 
+extern void init_dl_bw(struct dl_bw *dl_b);
+
 /*
  * wake_up_new_task - wake up a newly created task for the first time.
  *
@@ -1764,29 +2361,55 @@ void wake_up_new_task(struct task_struct *p)
         *  - cpus_allowed can change in the fork path
         *  - any previously selected cpu might disappear through hotplug
         */
-       set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
+       set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
 #endif
 
+       /* Initialize new task's runnable average */
+       init_entity_runnable_average(&p->se);
        rq = __task_rq_lock(p);
        activate_task(rq, p, 0);
-       p->on_rq = 1;
-       trace_sched_wakeup_new(p, true);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
-       if (p->sched_class->task_woken)
+       if (p->sched_class->task_woken) {
+               /*
+                * Nothing relies on rq->lock after this, so its fine to
+                * drop it.
+                */
+               lockdep_unpin_lock(&rq->lock);
                p->sched_class->task_woken(rq, p);
+               lockdep_pin_lock(&rq->lock);
+       }
 #endif
        task_rq_unlock(rq, p, &flags);
 }
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
 
+static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
+
+void preempt_notifier_inc(void)
+{
+       static_key_slow_inc(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
+
+void preempt_notifier_dec(void)
+{
+       static_key_slow_dec(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
+
 /**
  * preempt_notifier_register - tell me when current is being preempted & rescheduled
  * @notifier: notifier struct to register
  */
 void preempt_notifier_register(struct preempt_notifier *notifier)
 {
+       if (!static_key_false(&preempt_notifier_key))
+               WARN(1, "registering preempt_notifier while notifiers disabled\n");
+
        hlist_add_head(&notifier->link, &current->preempt_notifiers);
 }
 EXPORT_SYMBOL_GPL(preempt_notifier_register);
@@ -1795,7 +2418,7 @@ EXPORT_SYMBOL_GPL(preempt_notifier_register);
  * preempt_notifier_unregister - no longer interested in preemption notifications
  * @notifier: notifier struct to unregister
  *
- * This is safe to call from within a preemption notifier.
+ * This is *not* safe to call from within a preemption notifier.
  */
 void preempt_notifier_unregister(struct preempt_notifier *notifier)
 {
@@ -1803,7 +2426,7 @@ void preempt_notifier_unregister(struct preempt_notifier *notifier)
 }
 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
 
-static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
 {
        struct preempt_notifier *notifier;
 
@@ -1811,9 +2434,15 @@ static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
                notifier->ops->sched_in(notifier, raw_smp_processor_id());
 }
 
+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+       if (static_key_false(&preempt_notifier_key))
+               __fire_sched_in_preempt_notifiers(curr);
+}
+
 static void
-fire_sched_out_preempt_notifiers(struct task_struct *curr,
-                                struct task_struct *next)
+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
+                                  struct task_struct *next)
 {
        struct preempt_notifier *notifier;
 
@@ -1821,13 +2450,21 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
                notifier->ops->sched_out(notifier, next);
 }
 
+static __always_inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+                                struct task_struct *next)
+{
+       if (static_key_false(&preempt_notifier_key))
+               __fire_sched_out_preempt_notifiers(curr, next);
+}
+
 #else /* !CONFIG_PREEMPT_NOTIFIERS */
 
-static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
 {
 }
 
-static void
+static inline void
 fire_sched_out_preempt_notifiers(struct task_struct *curr,
                                 struct task_struct *next)
 {
@@ -1853,7 +2490,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
                    struct task_struct *next)
 {
        trace_sched_switch(prev, next);
-       sched_info_switch(prev, next);
+       sched_info_switch(rq, prev, next);
        perf_event_task_sched_out(prev, next);
        fire_sched_out_preempt_notifiers(prev, next);
        prepare_lock_switch(rq, next);
@@ -1862,7 +2499,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
 
 /**
  * finish_task_switch - clean up after a task-switch
- * @rq: runqueue associated with task-switch
  * @prev: the thread we just switched away from.
  *
  * finish_task_switch must be called after the context switch, paired
@@ -1874,10 +2510,16 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
  * so, we finish that here outside of the runqueue lock. (Doing it
  * with the lock held can cause deadlocks; see schedule() for
  * details.)
+ *
+ * The context switch have flipped the stack from under us and restored the
+ * local variables which were saved when this task called schedule() in the
+ * past. prev == current is still correct but we need to recalculate this_rq
+ * because prev may have moved to another CPU.
  */
-static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+static struct rq *finish_task_switch(struct task_struct *prev)
        __releases(rq->lock)
 {
+       struct rq *rq = this_rq();
        struct mm_struct *mm = rq->prev_mm;
        long prev_state;
 
@@ -1888,15 +2530,14 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
         * schedule one last time. The schedule call will never return, and
         * the scheduled task must drop that reference.
-        * The test for TASK_DEAD must occur while the runqueue locks are
-        * still held, otherwise prev could be scheduled on another cpu, die
-        * there before we look at prev->state, and then the reference would
-        * be dropped twice.
-        *              Manfred Spraul <manfred@colorfullife.com>
+        *
+        * We must observe prev->state before clearing prev->on_cpu (in
+        * finish_lock_switch), otherwise a concurrent wakeup can get prev
+        * running on another CPU and we could rave with its RUNNING -> DEAD
+        * transition, resulting in a double drop.
         */
        prev_state = prev->state;
        vtime_task_switch(prev);
-       finish_arch_switch(prev);
        perf_event_task_sched_in(prev, current);
        finish_lock_switch(rq, prev);
        finish_arch_post_lock_switch();
@@ -1905,6 +2546,9 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        if (mm)
                mmdrop(mm);
        if (unlikely(prev_state == TASK_DEAD)) {
+               if (prev->sched_class->task_dead)
+                       prev->sched_class->task_dead(prev);
+
                /*
                 * Remove function-return probe instances associated with this
                 * task and put them back on the free list.
@@ -1913,40 +2557,42 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
                put_task_struct(prev);
        }
 
-       tick_nohz_task_switch(current);
+       tick_nohz_task_switch();
+       return rq;
 }
 
 #ifdef CONFIG_SMP
 
-/* assumes rq->lock is held */
-static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
-{
-       if (prev->sched_class->pre_schedule)
-               prev->sched_class->pre_schedule(rq, prev);
-}
-
 /* rq->lock is NOT held, but preemption is disabled */
-static inline void post_schedule(struct rq *rq)
+static void __balance_callback(struct rq *rq)
 {
-       if (rq->post_schedule) {
-               unsigned long flags;
+       struct callback_head *head, *next;
+       void (*func)(struct rq *rq);
+       unsigned long flags;
 
-               raw_spin_lock_irqsave(&rq->lock, flags);
-               if (rq->curr->sched_class->post_schedule)
-                       rq->curr->sched_class->post_schedule(rq);
-               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
+       head = rq->balance_callback;
+       rq->balance_callback = NULL;
+       while (head) {
+               func = (void (*)(struct rq *))head->func;
+               next = head->next;
+               head->next = NULL;
+               head = next;
 
-               rq->post_schedule = 0;
+               func(rq);
        }
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
-#else
-
-static inline void pre_schedule(struct rq *rq, struct task_struct *p)
+static inline void balance_callback(struct rq *rq)
 {
+       if (unlikely(rq->balance_callback))
+               __balance_callback(rq);
 }
 
-static inline void post_schedule(struct rq *rq)
+#else
+
+static inline void balance_callback(struct rq *rq)
 {
 }
 
@@ -1956,32 +2602,25 @@ static inline void post_schedule(struct rq *rq)
  * schedule_tail - first thing a freshly forked thread must call.
  * @prev: the thread we just switched away from.
  */
-asmlinkage void schedule_tail(struct task_struct *prev)
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
-       struct rq *rq = this_rq();
-
-       finish_task_switch(rq, prev);
-
-       /*
-        * FIXME: do we need to worry about rq being invalidated by the
-        * task_switch?
-        */
-       post_schedule(rq);
+       struct rq *rq;
 
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
-       /* In this case, finish_task_switch does not reenable preemption */
+       /* finish_task_switch() drops rq->lock and enables preemtion */
+       preempt_disable();
+       rq = finish_task_switch(prev);
+       balance_callback(rq);
        preempt_enable();
-#endif
+
        if (current->set_child_tid)
                put_user(task_pid_vnr(current), current->set_child_tid);
 }
 
 /*
- * context_switch - switch to the new MM and the new
- * thread's register state.
+ * context_switch - switch to the new MM and the new thread's register state.
  */
-static inline void
+static inline struct rq *
 context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
 {
@@ -2015,21 +2654,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
         * of the scheduler it's an obvious special-case), so we
         * do an early lockdep release here:
         */
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+       lockdep_unpin_lock(&rq->lock);
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-#endif
 
-       context_tracking_task_switch(prev, next);
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
-
        barrier();
-       /*
-        * this_rq must be evaluated again because prev may have moved
-        * CPUs since it called schedule(), thus the 'rq' on its stack
-        * frame will be invalid.
-        */
-       finish_task_switch(this_rq(), prev);
+
+       return finish_task_switch(prev);
 }
 
 /*
@@ -2048,6 +2680,25 @@ unsigned long nr_running(void)
        return sum;
 }
 
+/*
+ * Check if only the current task is running on the cpu.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race.  The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptable section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
+ */
+bool single_task_running(void)
+{
+       return raw_rq()->nr_running == 1;
+}
+EXPORT_SYMBOL(single_task_running);
+
 unsigned long long nr_context_switches(void)
 {
        int i;
@@ -2075,667 +2726,97 @@ unsigned long nr_iowait_cpu(int cpu)
        return atomic_read(&this->nr_iowait);
 }
 
-unsigned long this_cpu_load(void)
+void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
 {
-       struct rq *this = this_rq();
-       return this->cpu_load[0];
+       struct rq *rq = this_rq();
+       *nr_waiters = atomic_read(&rq->nr_iowait);
+       *load = rq->load.weight;
 }
 
+#ifdef CONFIG_SMP
 
 /*
- * Global load-average calculations
- *
- * We take a distributed and async approach to calculating the global load-avg
- * in order to minimize overhead.
- *
- * The global load average is an exponentially decaying average of nr_running +
- * nr_uninterruptible.
- *
- * Once every LOAD_FREQ:
- *
- *   nr_active = 0;
- *   for_each_possible_cpu(cpu)
- *     nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
- *
- *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
- *
- * Due to a number of reasons the above turns in the mess below:
- *
- *  - for_each_possible_cpu() is prohibitively expensive on machines with
- *    serious number of cpus, therefore we need to take a distributed approach
- *    to calculating nr_active.
- *
- *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
- *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
- *
- *    So assuming nr_active := 0 when we start out -- true per definition, we
- *    can simply take per-cpu deltas and fold those into a global accumulate
- *    to obtain the same result. See calc_load_fold_active().
- *
- *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
- *    across the machine, we assume 10 ticks is sufficient time for every
- *    cpu to have completed this task.
- *
- *    This places an upper-bound on the IRQ-off latency of the machine. Then
- *    again, being late doesn't loose the delta, just wrecks the sample.
- *
- *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
- *    this would add another cross-cpu cacheline miss and atomic operation
- *    to the wakeup path. Instead we increment on whatever cpu the task ran
- *    when it went into uninterruptible state and decrement on whatever cpu
- *    did the wakeup. This means that only the sum of nr_uninterruptible over
- *    all cpus yields the correct result.
- *
- *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache footprint.
  */
+void sched_exec(void)
+{
+       struct task_struct *p = current;
+       unsigned long flags;
+       int dest_cpu;
 
-/* Variables and functions for calc_load */
-static atomic_long_t calc_load_tasks;
-static unsigned long calc_load_update;
-unsigned long avenrun[3];
-EXPORT_SYMBOL(avenrun); /* should be removed */
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
+       dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
+       if (dest_cpu == smp_processor_id())
+               goto unlock;
 
-/**
- * get_avenrun - get the load average array
- * @loads:     pointer to dest load array
- * @offset:    offset to add
- * @shift:     shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
-       loads[0] = (avenrun[0] + offset) << shift;
-       loads[1] = (avenrun[1] + offset) << shift;
-       loads[2] = (avenrun[2] + offset) << shift;
+       if (likely(cpu_active(dest_cpu))) {
+               struct migration_arg arg = { p, dest_cpu };
+
+               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+               stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+               return;
+       }
+unlock:
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 }
 
-static long calc_load_fold_active(struct rq *this_rq)
+#endif
+
+DEFINE_PER_CPU(struct kernel_stat, kstat);
+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
+
+EXPORT_PER_CPU_SYMBOL(kstat);
+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
+
+/*
+ * Return accounted runtime for the task.
+ * In case the task is currently running, return the runtime plus current's
+ * pending runtime that have not been accounted yet.
+ */
+unsigned long long task_sched_runtime(struct task_struct *p)
 {
-       long nr_active, delta = 0;
+       unsigned long flags;
+       struct rq *rq;
+       u64 ns;
 
-       nr_active = this_rq->nr_running;
-       nr_active += (long) this_rq->nr_uninterruptible;
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+       /*
+        * 64-bit doesn't need locks to atomically read a 64bit value.
+        * So we have a optimization chance when the task's delta_exec is 0.
+        * Reading ->on_cpu is racy, but this is ok.
+        *
+        * If we race with it leaving cpu, we'll take a lock. So we're correct.
+        * If we race with it entering cpu, unaccounted time is 0. This is
+        * indistinguishable from the read occurring a few cycles earlier.
+        * If we see ->on_cpu without ->on_rq, the task is leaving, and has
+        * been accounted, so we're correct here as well.
+        */
+       if (!p->on_cpu || !task_on_rq_queued(p))
+               return p->se.sum_exec_runtime;
+#endif
 
-       if (nr_active != this_rq->calc_load_active) {
-               delta = nr_active - this_rq->calc_load_active;
-               this_rq->calc_load_active = nr_active;
+       rq = task_rq_lock(p, &flags);
+       /*
+        * Must be ->curr _and_ ->on_rq.  If dequeued, we would
+        * project cycles that may never be accounted to this
+        * thread, breaking clock_gettime().
+        */
+       if (task_current(rq, p) && task_on_rq_queued(p)) {
+               update_rq_clock(rq);
+               p->sched_class->update_curr(rq);
        }
+       ns = p->se.sum_exec_runtime;
+       task_rq_unlock(rq, p, &flags);
 
-       return delta;
+       return ns;
 }
 
 /*
- * a1 = a0 * e + a * (1 - e)
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
  */
-static unsigned long
-calc_load(unsigned long load, unsigned long exp, unsigned long active)
-{
-       load *= exp;
-       load += active * (FIXED_1 - exp);
-       load += 1UL << (FSHIFT - 1);
-       return load >> FSHIFT;
-}
-
-#ifdef CONFIG_NO_HZ_COMMON
-/*
- * Handle NO_HZ for the global load-average.
- *
- * Since the above described distributed algorithm to compute the global
- * load-average relies on per-cpu sampling from the tick, it is affected by
- * NO_HZ.
- *
- * The basic idea is to fold the nr_active delta into a global idle-delta upon
- * entering NO_HZ state such that we can include this as an 'extra' cpu delta
- * when we read the global state.
- *
- * Obviously reality has to ruin such a delightfully simple scheme:
- *
- *  - When we go NO_HZ idle during the window, we can negate our sample
- *    contribution, causing under-accounting.
- *
- *    We avoid this by keeping two idle-delta counters and flipping them
- *    when the window starts, thus separating old and new NO_HZ load.
- *
- *    The only trick is the slight shift in index flip for read vs write.
- *
- *        0s            5s            10s           15s
- *          +10           +10           +10           +10
- *        |-|-----------|-|-----------|-|-----------|-|
- *    r:0 0 1           1 0           0 1           1 0
- *    w:0 1 1           0 0           1 1           0 0
- *
- *    This ensures we'll fold the old idle contribution in this window while
- *    accumlating the new one.
- *
- *  - When we wake up from NO_HZ idle during the window, we push up our
- *    contribution, since we effectively move our sample point to a known
- *    busy state.
- *
- *    This is solved by pushing the window forward, and thus skipping the
- *    sample, for this cpu (effectively using the idle-delta for this cpu which
- *    was in effect at the time the window opened). This also solves the issue
- *    of having to deal with a cpu having been in NOHZ idle for multiple
- *    LOAD_FREQ intervals.
- *
- * When making the ILB scale, we should try to pull this in as well.
- */
-static atomic_long_t calc_load_idle[2];
-static int calc_load_idx;
-
-static inline int calc_load_write_idx(void)
-{
-       int idx = calc_load_idx;
-
-       /*
-        * See calc_global_nohz(), if we observe the new index, we also
-        * need to observe the new update time.
-        */
-       smp_rmb();
-
-       /*
-        * If the folding window started, make sure we start writing in the
-        * next idle-delta.
-        */
-       if (!time_before(jiffies, calc_load_update))
-               idx++;
-
-       return idx & 1;
-}
-
-static inline int calc_load_read_idx(void)
-{
-       return calc_load_idx & 1;
-}
-
-void calc_load_enter_idle(void)
-{
-       struct rq *this_rq = this_rq();
-       long delta;
-
-       /*
-        * We're going into NOHZ mode, if there's any pending delta, fold it
-        * into the pending idle delta.
-        */
-       delta = calc_load_fold_active(this_rq);
-       if (delta) {
-               int idx = calc_load_write_idx();
-               atomic_long_add(delta, &calc_load_idle[idx]);
-       }
-}
-
-void calc_load_exit_idle(void)
-{
-       struct rq *this_rq = this_rq();
-
-       /*
-        * If we're still before the sample window, we're done.
-        */
-       if (time_before(jiffies, this_rq->calc_load_update))
-               return;
-
-       /*
-        * We woke inside or after the sample window, this means we're already
-        * accounted through the nohz accounting, so skip the entire deal and
-        * sync up for the next window.
-        */
-       this_rq->calc_load_update = calc_load_update;
-       if (time_before(jiffies, this_rq->calc_load_update + 10))
-               this_rq->calc_load_update += LOAD_FREQ;
-}
-
-static long calc_load_fold_idle(void)
-{
-       int idx = calc_load_read_idx();
-       long delta = 0;
-
-       if (atomic_long_read(&calc_load_idle[idx]))
-               delta = atomic_long_xchg(&calc_load_idle[idx], 0);
-
-       return delta;
-}
-
-/**
- * fixed_power_int - compute: x^n, in O(log n) time
- *
- * @x:         base of the power
- * @frac_bits: fractional bits of @x
- * @n:         power to raise @x to.
- *
- * By exploiting the relation between the definition of the natural power
- * function: x^n := x*x*...*x (x multiplied by itself for n times), and
- * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
- * (where: n_i \elem {0, 1}, the binary vector representing n),
- * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
- * of course trivially computable in O(log_2 n), the length of our binary
- * vector.
- */
-static unsigned long
-fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
-{
-       unsigned long result = 1UL << frac_bits;
-
-       if (n) for (;;) {
-               if (n & 1) {
-                       result *= x;
-                       result += 1UL << (frac_bits - 1);
-                       result >>= frac_bits;
-               }
-               n >>= 1;
-               if (!n)
-                       break;
-               x *= x;
-               x += 1UL << (frac_bits - 1);
-               x >>= frac_bits;
-       }
-
-       return result;
-}
-
-/*
- * a1 = a0 * e + a * (1 - e)
- *
- * a2 = a1 * e + a * (1 - e)
- *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
- *    = a0 * e^2 + a * (1 - e) * (1 + e)
- *
- * a3 = a2 * e + a * (1 - e)
- *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
- *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
- *
- *  ...
- *
- * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
- *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
- *    = a0 * e^n + a * (1 - e^n)
- *
- * [1] application of the geometric series:
- *
- *              n         1 - x^(n+1)
- *     S_n := \Sum x^i = -------------
- *             i=0          1 - x
- */
-static unsigned long
-calc_load_n(unsigned long load, unsigned long exp,
-           unsigned long active, unsigned int n)
-{
-
-       return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
-}
-
-/*
- * NO_HZ can leave us missing all per-cpu ticks calling
- * calc_load_account_active(), but since an idle CPU folds its delta into
- * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
- * in the pending idle delta if our idle period crossed a load cycle boundary.
- *
- * Once we've updated the global active value, we need to apply the exponential
- * weights adjusted to the number of cycles missed.
- */
-static void calc_global_nohz(void)
-{
-       long delta, active, n;
-
-       if (!time_before(jiffies, calc_load_update + 10)) {
-               /*
-                * Catch-up, fold however many we are behind still
-                */
-               delta = jiffies - calc_load_update - 10;
-               n = 1 + (delta / LOAD_FREQ);
-
-               active = atomic_long_read(&calc_load_tasks);
-               active = active > 0 ? active * FIXED_1 : 0;
-
-               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
-               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
-               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
-
-               calc_load_update += n * LOAD_FREQ;
-       }
-
-       /*
-        * Flip the idle index...
-        *
-        * Make sure we first write the new time then flip the index, so that
-        * calc_load_write_idx() will see the new time when it reads the new
-        * index, this avoids a double flip messing things up.
-        */
-       smp_wmb();
-       calc_load_idx++;
-}
-#else /* !CONFIG_NO_HZ_COMMON */
-
-static inline long calc_load_fold_idle(void) { return 0; }
-static inline void calc_global_nohz(void) { }
-
-#endif /* CONFIG_NO_HZ_COMMON */
-
-/*
- * calc_load - update the avenrun load estimates 10 ticks after the
- * CPUs have updated calc_load_tasks.
- */
-void calc_global_load(unsigned long ticks)
-{
-       long active, delta;
-
-       if (time_before(jiffies, calc_load_update + 10))
-               return;
-
-       /*
-        * Fold the 'old' idle-delta to include all NO_HZ cpus.
-        */
-       delta = calc_load_fold_idle();
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
-
-       active = atomic_long_read(&calc_load_tasks);
-       active = active > 0 ? active * FIXED_1 : 0;
-
-       avenrun[0] = calc_load(avenrun[0], EXP_1, active);
-       avenrun[1] = calc_load(avenrun[1], EXP_5, active);
-       avenrun[2] = calc_load(avenrun[2], EXP_15, active);
-
-       calc_load_update += LOAD_FREQ;
-
-       /*
-        * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
-        */
-       calc_global_nohz();
-}
-
-/*
- * Called from update_cpu_load() to periodically update this CPU's
- * active count.
- */
-static void calc_load_account_active(struct rq *this_rq)
-{
-       long delta;
-
-       if (time_before(jiffies, this_rq->calc_load_update))
-               return;
-
-       delta  = calc_load_fold_active(this_rq);
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
-
-       this_rq->calc_load_update += LOAD_FREQ;
-}
-
-/*
- * End of global load-average stuff
- */
-
-/*
- * The exact cpuload at various idx values, calculated at every tick would be
- * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
- *
- * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
- * on nth tick when cpu may be busy, then we have:
- * load = ((2^idx - 1) / 2^idx)^(n-1) * load
- * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
- *
- * decay_load_missed() below does efficient calculation of
- * load = ((2^idx - 1) / 2^idx)^(n-1) * load
- * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
- *
- * The calculation is approximated on a 128 point scale.
- * degrade_zero_ticks is the number of ticks after which load at any
- * particular idx is approximated to be zero.
- * degrade_factor is a precomputed table, a row for each load idx.
- * Each column corresponds to degradation factor for a power of two ticks,
- * based on 128 point scale.
- * Example:
- * row 2, col 3 (=12) says that the degradation at load idx 2 after
- * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
- *
- * With this power of 2 load factors, we can degrade the load n times
- * by looking at 1 bits in n and doing as many mult/shift instead of
- * n mult/shifts needed by the exact degradation.
- */
-#define DEGRADE_SHIFT          7
-static const unsigned char
-               degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
-static const unsigned char
-               degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
-                                       {0, 0, 0, 0, 0, 0, 0, 0},
-                                       {64, 32, 8, 0, 0, 0, 0, 0},
-                                       {96, 72, 40, 12, 1, 0, 0},
-                                       {112, 98, 75, 43, 15, 1, 0},
-                                       {120, 112, 98, 76, 45, 16, 2} };
-
-/*
- * Update cpu_load for any missed ticks, due to tickless idle. The backlog
- * would be when CPU is idle and so we just decay the old load without
- * adding any new load.
- */
-static unsigned long
-decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
-{
-       int j = 0;
-
-       if (!missed_updates)
-               return load;
-
-       if (missed_updates >= degrade_zero_ticks[idx])
-               return 0;
-
-       if (idx == 1)
-               return load >> missed_updates;
-
-       while (missed_updates) {
-               if (missed_updates % 2)
-                       load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
-
-               missed_updates >>= 1;
-               j++;
-       }
-       return load;
-}
-
-/*
- * Update rq->cpu_load[] statistics. This function is usually called every
- * scheduler tick (TICK_NSEC). With tickless idle this will not be called
- * every tick. We fix it up based on jiffies.
- */
-static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
-                             unsigned long pending_updates)
-{
-       int i, scale;
-
-       this_rq->nr_load_updates++;
-
-       /* Update our load: */
-       this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
-       for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
-               unsigned long old_load, new_load;
-
-               /* scale is effectively 1 << i now, and >> i divides by scale */
-
-               old_load = this_rq->cpu_load[i];
-               old_load = decay_load_missed(old_load, pending_updates - 1, i);
-               new_load = this_load;
-               /*
-                * Round up the averaging division if load is increasing. This
-                * prevents us from getting stuck on 9 if the load is 10, for
-                * example.
-                */
-               if (new_load > old_load)
-                       new_load += scale - 1;
-
-               this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
-       }
-
-       sched_avg_update(this_rq);
-}
-
-#ifdef CONFIG_NO_HZ_COMMON
-/*
- * There is no sane way to deal with nohz on smp when using jiffies because the
- * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
- * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
- *
- * Therefore we cannot use the delta approach from the regular tick since that
- * would seriously skew the load calculation. However we'll make do for those
- * updates happening while idle (nohz_idle_balance) or coming out of idle
- * (tick_nohz_idle_exit).
- *
- * This means we might still be one tick off for nohz periods.
- */
-
-/*
- * Called from nohz_idle_balance() to update the load ratings before doing the
- * idle balance.
- */
-void update_idle_cpu_load(struct rq *this_rq)
-{
-       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
-       unsigned long load = this_rq->load.weight;
-       unsigned long pending_updates;
-
-       /*
-        * bail if there's load or we're actually up-to-date.
-        */
-       if (load || curr_jiffies == this_rq->last_load_update_tick)
-               return;
-
-       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
-       this_rq->last_load_update_tick = curr_jiffies;
-
-       __update_cpu_load(this_rq, load, pending_updates);
-}
-
-/*
- * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
- */
-void update_cpu_load_nohz(void)
-{
-       struct rq *this_rq = this_rq();
-       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
-       unsigned long pending_updates;
-
-       if (curr_jiffies == this_rq->last_load_update_tick)
-               return;
-
-       raw_spin_lock(&this_rq->lock);
-       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
-       if (pending_updates) {
-               this_rq->last_load_update_tick = curr_jiffies;
-               /*
-                * We were idle, this means load 0, the current load might be
-                * !0 due to remote wakeups and the sort.
-                */
-               __update_cpu_load(this_rq, 0, pending_updates);
-       }
-       raw_spin_unlock(&this_rq->lock);
-}
-#endif /* CONFIG_NO_HZ_COMMON */
-
-/*
- * Called from scheduler_tick()
- */
-static void update_cpu_load_active(struct rq *this_rq)
-{
-       /*
-        * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
-        */
-       this_rq->last_load_update_tick = jiffies;
-       __update_cpu_load(this_rq, this_rq->load.weight, 1);
-
-       calc_load_account_active(this_rq);
-}
-
-#ifdef CONFIG_SMP
-
-/*
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
- */
-void sched_exec(void)
-{
-       struct task_struct *p = current;
-       unsigned long flags;
-       int dest_cpu;
-
-       raw_spin_lock_irqsave(&p->pi_lock, flags);
-       dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
-       if (dest_cpu == smp_processor_id())
-               goto unlock;
-
-       if (likely(cpu_active(dest_cpu))) {
-               struct migration_arg arg = { p, dest_cpu };
-
-               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-               stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
-               return;
-       }
-unlock:
-       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-}
-
-#endif
-
-DEFINE_PER_CPU(struct kernel_stat, kstat);
-DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-
-EXPORT_PER_CPU_SYMBOL(kstat);
-EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
-
-/*
- * Return any ns on the sched_clock that have not yet been accounted in
- * @p in case that task is currently running.
- *
- * Called with task_rq_lock() held on @rq.
- */
-static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
-{
-       u64 ns = 0;
-
-       if (task_current(rq, p)) {
-               update_rq_clock(rq);
-               ns = rq->clock_task - p->se.exec_start;
-               if ((s64)ns < 0)
-                       ns = 0;
-       }
-
-       return ns;
-}
-
-unsigned long long task_delta_exec(struct task_struct *p)
-{
-       unsigned long flags;
-       struct rq *rq;
-       u64 ns = 0;
-
-       rq = task_rq_lock(p, &flags);
-       ns = do_task_delta_exec(p, rq);
-       task_rq_unlock(rq, p, &flags);
-
-       return ns;
-}
-
-/*
- * Return accounted runtime for the task.
- * In case the task is currently running, return the runtime plus current's
- * pending runtime that have not been accounted yet.
- */
-unsigned long long task_sched_runtime(struct task_struct *p)
-{
-       unsigned long flags;
-       struct rq *rq;
-       u64 ns = 0;
-
-       rq = task_rq_lock(p, &flags);
-       ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
-       task_rq_unlock(rq, p, &flags);
-
-       return ns;
-}
-
-/*
- * This function gets called by the timer code, with HZ frequency.
- * We call it with interrupts disabled.
- */
-void scheduler_tick(void)
+void scheduler_tick(void)
 {
        int cpu = smp_processor_id();
        struct rq *rq = cpu_rq(cpu);
@@ -2745,15 +2826,16 @@ void scheduler_tick(void)
 
        raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
-       update_cpu_load_active(rq);
        curr->sched_class->task_tick(rq, curr, 0);
+       update_cpu_load_active(rq);
+       calc_global_load_tick(rq);
        raw_spin_unlock(&rq->lock);
 
        perf_event_task_tick();
 
 #ifdef CONFIG_SMP
        rq->idle_balance = idle_cpu(cpu);
-       trigger_load_balance(rq, cpu);
+       trigger_load_balance(rq);
 #endif
        rq_last_tick_reset(rq);
 }
@@ -2769,18 +2851,20 @@ void scheduler_tick(void)
  * This makes sure that uptime, CFS vruntime, load
  * balancing, etc... continue to move forward, even
  * with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
  */
 u64 scheduler_tick_max_deferment(void)
 {
        struct rq *rq = this_rq();
-       unsigned long next, now = ACCESS_ONCE(jiffies);
+       unsigned long next, now = READ_ONCE(jiffies);
 
        next = rq->last_sched_tick + HZ;
 
        if (time_before_eq(next, now))
                return 0;
 
-       return jiffies_to_usecs(next - now) * NSEC_PER_USEC;
+       return jiffies_to_nsecs(next - now);
 }
 #endif
 
@@ -2797,7 +2881,7 @@ notrace unsigned long get_parent_ip(unsigned long addr)
 #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
                                defined(CONFIG_PREEMPT_TRACER))
 
-void __kprobes add_preempt_count(int val)
+void preempt_count_add(int val)
 {
 #ifdef CONFIG_DEBUG_PREEMPT
        /*
@@ -2806,7 +2890,7 @@ void __kprobes add_preempt_count(int val)
        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
                return;
 #endif
-       preempt_count() += val;
+       __preempt_count_add(val);
 #ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Spinlock count overflowing soon?
@@ -2814,12 +2898,18 @@ void __kprobes add_preempt_count(int val)
        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
                                PREEMPT_MASK - 10);
 #endif
-       if (preempt_count() == val)
-               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+       if (preempt_count() == val) {
+               unsigned long ip = get_parent_ip(CALLER_ADDR1);
+#ifdef CONFIG_DEBUG_PREEMPT
+               current->preempt_disable_ip = ip;
+#endif
+               trace_preempt_off(CALLER_ADDR0, ip);
+       }
 }
-EXPORT_SYMBOL(add_preempt_count);
+EXPORT_SYMBOL(preempt_count_add);
+NOKPROBE_SYMBOL(preempt_count_add);
 
-void __kprobes sub_preempt_count(int val)
+void preempt_count_sub(int val)
 {
 #ifdef CONFIG_DEBUG_PREEMPT
        /*
@@ -2837,9 +2927,10 @@ void __kprobes sub_preempt_count(int val)
 
        if (preempt_count() == val)
                trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
-       preempt_count() -= val;
+       __preempt_count_sub(val);
 }
-EXPORT_SYMBOL(sub_preempt_count);
+EXPORT_SYMBOL(preempt_count_sub);
+NOKPROBE_SYMBOL(preempt_count_sub);
 
 #endif
 
@@ -2858,6 +2949,13 @@ static noinline void __schedule_bug(struct task_struct *prev)
        print_modules();
        if (irqs_disabled())
                print_irqtrace_events(prev);
+#ifdef CONFIG_DEBUG_PREEMPT
+       if (in_atomic_preempt_off()) {
+               pr_err("Preemption disabled at:");
+               print_ip_sym(current->preempt_disable_ip);
+               pr_cont("\n");
+       }
+#endif
        dump_stack();
        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 }
@@ -2867,12 +2965,15 @@ static noinline void __schedule_bug(struct task_struct *prev)
  */
 static inline void schedule_debug(struct task_struct *prev)
 {
+#ifdef CONFIG_SCHED_STACK_END_CHECK
+       BUG_ON(unlikely(task_stack_end_corrupted(prev)));
+#endif
        /*
         * Test if we are atomic. Since do_exit() needs to call into
-        * schedule() atomically, we ignore that path for now.
-        * Otherwise, whine if we are scheduling when we should not be.
+        * schedule() atomically, we ignore that path. Otherwise whine
+        * if we are scheduling when we should not.
         */
-       if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
+       if (unlikely(in_atomic_preempt_off() && prev->state != TASK_DEAD))
                __schedule_bug(prev);
        rcu_sleep_check();
 
@@ -2881,36 +2982,40 @@ static inline void schedule_debug(struct task_struct *prev)
        schedstat_inc(this_rq(), sched_count);
 }
 
-static void put_prev_task(struct rq *rq, struct task_struct *prev)
-{
-       if (prev->on_rq || rq->skip_clock_update < 0)
-               update_rq_clock(rq);
-       prev->sched_class->put_prev_task(rq, prev);
-}
-
 /*
  * Pick up the highest-prio task:
  */
 static inline struct task_struct *
-pick_next_task(struct rq *rq)
+pick_next_task(struct rq *rq, struct task_struct *prev)
 {
-       const struct sched_class *class;
+       const struct sched_class *class = &fair_sched_class;
        struct task_struct *p;
 
        /*
         * Optimization: we know that if all tasks are in
         * the fair class we can call that function directly:
         */
-       if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
-               p = fair_sched_class.pick_next_task(rq);
-               if (likely(p))
-                       return p;
+       if (likely(prev->sched_class == class &&
+                  rq->nr_running == rq->cfs.h_nr_running)) {
+               p = fair_sched_class.pick_next_task(rq, prev);
+               if (unlikely(p == RETRY_TASK))
+                       goto again;
+
+               /* assumes fair_sched_class->next == idle_sched_class */
+               if (unlikely(!p))
+                       p = idle_sched_class.pick_next_task(rq, prev);
+
+               return p;
        }
 
+again:
        for_each_class(class) {
-               p = class->pick_next_task(rq);
-               if (p)
+               p = class->pick_next_task(rq, prev);
+               if (p) {
+                       if (unlikely(p == RETRY_TASK))
+                               goto again;
                        return p;
+               }
        }
 
        BUG(); /* the idle class will always have a runnable task */
@@ -2952,6 +3057,8 @@ pick_next_task(struct rq *rq)
  *          - explicit schedule() call
  *          - return from syscall or exception to user-space
  *          - return from interrupt-handler to user-space
+ *
+ * WARNING: must be called with preemption disabled!
  */
 //void print_rb_nodes(struct rq *rq) {
 //     struct task_struct *p;
@@ -2973,11 +3080,9 @@ static void __sched __schedule(void)
        struct rq *rq;
        int i, cpu;
 
-need_resched:
-       preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
-       rcu_note_context_switch(cpu);
+       rcu_note_context_switch();
        prev = rq->curr;
 
        schedule_debug(prev);
@@ -2985,619 +3090,232 @@ need_resched:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
+       /*
+        * Make sure that signal_pending_state()->signal_pending() below
+        * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+        * done by the caller to avoid the race with signal_wake_up().
+        */
+       smp_mb__before_spinlock();
        raw_spin_lock_irq(&rq->lock);
+       lockdep_pin_lock(&rq->lock);
+
+       rq->clock_skip_update <<= 1; /* promote REQ to ACT */
 
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
                        prev->state = TASK_RUNNING;
-               } else {
-                       deactivate_task(rq, prev, DEQUEUE_SLEEP);
-                       prev->on_rq = 0;
-
-                       /*
-                        * If a worker went to sleep, notify and ask workqueue
-                        * whether it wants to wake up a task to maintain
-                        * concurrency.
-                        */
-                       if (prev->flags & PF_WQ_WORKER) {
-                               struct task_struct *to_wakeup;
-
-                               to_wakeup = wq_worker_sleeping(prev, cpu);
-                               if (to_wakeup)
-                                       try_to_wake_up_local(to_wakeup);
-                       }
-               }
-               switch_count = &prev->nvcsw;
-       }
-
-       pre_schedule(rq, prev);
-
-       if (unlikely(!rq->nr_running))
-               idle_balance(cpu, rq);
-
-       put_prev_task(rq, prev);
-       next = pick_next_task(rq);
-       clear_tsk_need_resched(prev);
-       rq->skip_clock_update = 0;
-
-       if (likely(prev != next)) {
-               rq->nr_switches++;
-               rq->curr = next;
-               ++*switch_count;
-
-               context_switch(rq, prev, next); /* unlocks the rq */
-               /*
-                * The context switch have flipped the stack from under us
-                * and restored the local variables which were saved when
-                * this task called schedule() in the past. prev == current
-                * is still correct, but it can be moved to another cpu/rq.
-                */
-               cpu = smp_processor_id();
-               rq = cpu_rq(cpu);
-       } else
-               raw_spin_unlock_irq(&rq->lock);
-
-       post_schedule(rq);
-
-       sched_preempt_enable_no_resched();
-       if (need_resched())
-               goto need_resched;
-}
-
-static inline void sched_submit_work(struct task_struct *tsk)
-{
-       if (!tsk->state || tsk_is_pi_blocked(tsk))
-               return;
-       /*
-        * If we are going to sleep and we have plugged IO queued,
-        * make sure to submit it to avoid deadlocks.
-        */
-       if (blk_needs_flush_plug(tsk))
-               blk_schedule_flush_plug(tsk);
-}
-
-asmlinkage void __sched schedule(void)
-{
-       struct task_struct *tsk = current;
-
-       sched_submit_work(tsk);
-       __schedule();
-}
-EXPORT_SYMBOL(schedule);
-
-#ifdef CONFIG_CONTEXT_TRACKING
-asmlinkage void __sched schedule_user(void)
-{
-       /*
-        * If we come here after a random call to set_need_resched(),
-        * or we have been woken up remotely but the IPI has not yet arrived,
-        * we haven't yet exited the RCU idle mode. Do it here manually until
-        * we find a better solution.
-        */
-       user_exit();
-       schedule();
-       user_enter();
-}
-#endif
-
-/**
- * schedule_preempt_disabled - called with preemption disabled
- *
- * Returns with preemption disabled. Note: preempt_count must be 1
- */
-void __sched schedule_preempt_disabled(void)
-{
-       sched_preempt_enable_no_resched();
-       schedule();
-       preempt_disable();
-}
-
-#ifdef CONFIG_PREEMPT
-/*
- * this is the entry point to schedule() from in-kernel preemption
- * off of preempt_enable. Kernel preemptions off return from interrupt
- * occur there and call schedule directly.
- */
-asmlinkage void __sched notrace preempt_schedule(void)
-{
-       struct thread_info *ti = current_thread_info();
-
-       /*
-        * If there is a non-zero preempt_count or interrupts are disabled,
-        * we do not want to preempt the current task. Just return..
-        */
-       if (likely(ti->preempt_count || irqs_disabled()))
-               return;
-
-       do {
-               add_preempt_count_notrace(PREEMPT_ACTIVE);
-               __schedule();
-               sub_preempt_count_notrace(PREEMPT_ACTIVE);
-
-               /*
-                * Check again in case we missed a preemption opportunity
-                * between schedule and now.
-                */
-               barrier();
-       } while (need_resched());
-}
-EXPORT_SYMBOL(preempt_schedule);
-
-/*
- * this is the entry point to schedule() from kernel preemption
- * off of irq context.
- * Note, that this is called and return with irqs disabled. This will
- * protect us against recursive calling from irq.
- */
-asmlinkage void __sched preempt_schedule_irq(void)
-{
-       struct thread_info *ti = current_thread_info();
-       enum ctx_state prev_state;
-
-       /* Catch callers which need to be fixed */
-       BUG_ON(ti->preempt_count || !irqs_disabled());
-
-       prev_state = exception_enter();
-
-       do {
-               add_preempt_count(PREEMPT_ACTIVE);
-               local_irq_enable();
-               __schedule();
-               local_irq_disable();
-               sub_preempt_count(PREEMPT_ACTIVE);
-
-               /*
-                * Check again in case we missed a preemption opportunity
-                * between schedule and now.
-                */
-               barrier();
-       } while (need_resched());
-
-       exception_exit(prev_state);
-}
-
-#endif /* CONFIG_PREEMPT */
-
-int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
-                         void *key)
-{
-       return try_to_wake_up(curr->private, mode, wake_flags);
-}
-EXPORT_SYMBOL(default_wake_function);
-
-/*
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
- * number) then we wake all the non-exclusive tasks and one exclusive task.
- *
- * There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
- * zero in this (rare) case, and we handle it by continuing to scan the queue.
- */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, int wake_flags, void *key)
-{
-       wait_queue_t *curr, *next;
-
-       list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
-               unsigned flags = curr->flags;
-
-               if (curr->func(curr, mode, wake_flags, key) &&
-                               (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
-                       break;
-       }
-}
-
-/**
- * __wake_up - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
- * @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
- * @key: is directly passed to the wakeup function
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void __wake_up(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, void *key)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&q->lock, flags);
-       __wake_up_common(q, mode, nr_exclusive, 0, key);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(__wake_up);
-
-/*
- * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
- */
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
-{
-       __wake_up_common(q, mode, nr, 0, NULL);
-}
-EXPORT_SYMBOL_GPL(__wake_up_locked);
-
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
-{
-       __wake_up_common(q, mode, 1, 0, key);
-}
-EXPORT_SYMBOL_GPL(__wake_up_locked_key);
-
-/**
- * __wake_up_sync_key - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
- * @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
- * @key: opaque value to be passed to wakeup targets
- *
- * The sync wakeup differs that the waker knows that it will schedule
- * away soon, so while the target thread will be woken up, it will not
- * be migrated to another CPU - ie. the two threads are 'synchronized'
- * with each other. This can prevent needless bouncing between CPUs.
- *
- * On UP it can prevent extra preemption.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, void *key)
-{
-       unsigned long flags;
-       int wake_flags = WF_SYNC;
-
-       if (unlikely(!q))
-               return;
-
-       if (unlikely(!nr_exclusive))
-               wake_flags = 0;
-
-       spin_lock_irqsave(&q->lock, flags);
-       __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL_GPL(__wake_up_sync_key);
-
-/*
- * __wake_up_sync - see __wake_up_sync_key()
- */
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
-{
-       __wake_up_sync_key(q, mode, nr_exclusive, NULL);
-}
-EXPORT_SYMBOL_GPL(__wake_up_sync);     /* For internal use only */
-
-/**
- * complete: - signals a single thread waiting on this completion
- * @x:  holds the state of this particular completion
- *
- * This will wake up a single thread waiting on this completion. Threads will be
- * awakened in the same order in which they were queued.
- *
- * See also complete_all(), wait_for_completion() and related routines.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void complete(struct completion *x)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&x->wait.lock, flags);
-       x->done++;
-       __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-}
-EXPORT_SYMBOL(complete);
-
-/**
- * complete_all: - signals all threads waiting on this completion
- * @x:  holds the state of this particular completion
- *
- * This will wake up all threads waiting on this particular completion event.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void complete_all(struct completion *x)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&x->wait.lock, flags);
-       x->done += UINT_MAX/2;
-       __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-}
-EXPORT_SYMBOL(complete_all);
+               } else {
+                       deactivate_task(rq, prev, DEQUEUE_SLEEP);
+                       prev->on_rq = 0;
 
-static inline long __sched
-do_wait_for_common(struct completion *x,
-                  long (*action)(long), long timeout, int state)
-{
-       if (!x->done) {
-               DECLARE_WAITQUEUE(wait, current);
+                       /*
+                        * If a worker went to sleep, notify and ask workqueue
+                        * whether it wants to wake up a task to maintain
+                        * concurrency.
+                        */
+                       if (prev->flags & PF_WQ_WORKER) {
+                               struct task_struct *to_wakeup;
 
-               __add_wait_queue_tail_exclusive(&x->wait, &wait);
-               do {
-                       if (signal_pending_state(state, current)) {
-                               timeout = -ERESTARTSYS;
-                               break;
+                               to_wakeup = wq_worker_sleeping(prev, cpu);
+                               if (to_wakeup)
+                                       try_to_wake_up_local(to_wakeup);
                        }
-                       __set_current_state(state);
-                       spin_unlock_irq(&x->wait.lock);
-                       timeout = action(timeout);
-                       spin_lock_irq(&x->wait.lock);
-               } while (!x->done && timeout);
-               __remove_wait_queue(&x->wait, &wait);
-               if (!x->done)
-                       return timeout;
+               }
+               switch_count = &prev->nvcsw;
        }
-       x->done--;
-       return timeout ?: 1;
-}
 
-static inline long __sched
-__wait_for_common(struct completion *x,
-                 long (*action)(long), long timeout, int state)
-{
-       might_sleep();
+       if (task_on_rq_queued(prev))
+               update_rq_clock(rq);
 
-       spin_lock_irq(&x->wait.lock);
-       timeout = do_wait_for_common(x, action, timeout, state);
-       spin_unlock_irq(&x->wait.lock);
-       return timeout;
-}
+       next = pick_next_task(rq, prev);
+       clear_tsk_need_resched(prev);
+       clear_preempt_need_resched();
+       rq->clock_skip_update = 0;
 
-static long __sched
-wait_for_common(struct completion *x, long timeout, int state)
-{
-       return __wait_for_common(x, schedule_timeout, timeout, state);
-}
+       if (likely(prev != next)) {
+               rq->nr_switches++;
+               rq->curr = next;
+               ++*switch_count;
 
-static long __sched
-wait_for_common_io(struct completion *x, long timeout, int state)
-{
-       return __wait_for_common(x, io_schedule_timeout, timeout, state);
-}
+               rq = context_switch(rq, prev, next); /* unlocks the rq */
+               cpu = cpu_of(rq);
+       } else {
+               lockdep_unpin_lock(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
+       }
 
-/**
- * wait_for_completion: - waits for completion of a task
- * @x:  holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It is NOT
- * interruptible and there is no timeout.
- *
- * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
- * and interrupt capability. Also see complete().
- */
-void __sched wait_for_completion(struct completion *x)
-{
-       wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
+       balance_callback(rq);
 }
-EXPORT_SYMBOL(wait_for_completion);
 
-/**
- * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
- * @x:  holds the state of this particular completion
- * @timeout:  timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. The timeout is in jiffies. It is not
- * interruptible.
- *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
- */
-unsigned long __sched
-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
+static inline void sched_submit_work(struct task_struct *tsk)
 {
-       return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
+       if (!tsk->state || tsk_is_pi_blocked(tsk))
+               return;
+       /*
+        * If we are going to sleep and we have plugged IO queued,
+        * make sure to submit it to avoid deadlocks.
+        */
+       if (blk_needs_flush_plug(tsk))
+               blk_schedule_flush_plug(tsk);
 }
-EXPORT_SYMBOL(wait_for_completion_timeout);
 
-/**
- * wait_for_completion_io: - waits for completion of a task
- * @x:  holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It is NOT
- * interruptible and there is no timeout. The caller is accounted as waiting
- * for IO.
- */
-void __sched wait_for_completion_io(struct completion *x)
+asmlinkage __visible void __sched schedule(void)
 {
-       wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion_io);
+       struct task_struct *tsk = current;
 
-/**
- * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
- * @x:  holds the state of this particular completion
- * @timeout:  timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. The timeout is in jiffies. It is not
- * interruptible. The caller is accounted as waiting for IO.
- *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
- */
-unsigned long __sched
-wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
-{
-       return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
+       sched_submit_work(tsk);
+       do {
+               preempt_disable();
+               __schedule();
+               sched_preempt_enable_no_resched();
+       } while (need_resched());
 }
-EXPORT_SYMBOL(wait_for_completion_io_timeout);
+EXPORT_SYMBOL(schedule);
 
-/**
- * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
- * @x:  holds the state of this particular completion
- *
- * This waits for completion of a specific task to be signaled. It is
- * interruptible.
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
- */
-int __sched wait_for_completion_interruptible(struct completion *x)
+#ifdef CONFIG_CONTEXT_TRACKING
+asmlinkage __visible void __sched schedule_user(void)
 {
-       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
-       if (t == -ERESTARTSYS)
-               return t;
-       return 0;
+       /*
+        * If we come here after a random call to set_need_resched(),
+        * or we have been woken up remotely but the IPI has not yet arrived,
+        * we haven't yet exited the RCU idle mode. Do it here manually until
+        * we find a better solution.
+        *
+        * NB: There are buggy callers of this function.  Ideally we
+        * should warn if prev_state != CONTEXT_USER, but that will trigger
+        * too frequently to make sense yet.
+        */
+       enum ctx_state prev_state = exception_enter();
+       schedule();
+       exception_exit(prev_state);
 }
-EXPORT_SYMBOL(wait_for_completion_interruptible);
+#endif
 
 /**
- * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
- * @x:  holds the state of this particular completion
- * @timeout:  timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ * schedule_preempt_disabled - called with preemption disabled
  *
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Returns with preemption disabled. Note: preempt_count must be 1
  */
-long __sched
-wait_for_completion_interruptible_timeout(struct completion *x,
-                                         unsigned long timeout)
+void __sched schedule_preempt_disabled(void)
 {
-       return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
+       sched_preempt_enable_no_resched();
+       schedule();
+       preempt_disable();
 }
-EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
 
-/**
- * wait_for_completion_killable: - waits for completion of a task (killable)
- * @x:  holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It can be
- * interrupted by a kill signal.
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
- */
-int __sched wait_for_completion_killable(struct completion *x)
+static void __sched notrace preempt_schedule_common(void)
 {
-       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
-       if (t == -ERESTARTSYS)
-               return t;
-       return 0;
-}
-EXPORT_SYMBOL(wait_for_completion_killable);
+       do {
+               preempt_active_enter();
+               __schedule();
+               preempt_active_exit();
 
-/**
- * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
- * @x:  holds the state of this particular completion
- * @timeout:  timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be
- * signaled or for a specified timeout to expire. It can be
- * interrupted by a kill signal. The timeout is in jiffies.
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
- */
-long __sched
-wait_for_completion_killable_timeout(struct completion *x,
-                                    unsigned long timeout)
-{
-       return wait_for_common(x, timeout, TASK_KILLABLE);
+               /*
+                * Check again in case we missed a preemption opportunity
+                * between schedule and now.
+                */
+       } while (need_resched());
 }
-EXPORT_SYMBOL(wait_for_completion_killable_timeout);
 
-/**
- *     try_wait_for_completion - try to decrement a completion without blocking
- *     @x:     completion structure
- *
- *     Returns: 0 if a decrement cannot be done without blocking
- *              1 if a decrement succeeded.
- *
- *     If a completion is being used as a counting completion,
- *     attempt to decrement the counter without blocking. This
- *     enables us to avoid waiting if the resource the completion
- *     is protecting is not available.
+#ifdef CONFIG_PREEMPT
+/*
+ * this is the entry point to schedule() from in-kernel preemption
+ * off of preempt_enable. Kernel preemptions off return from interrupt
+ * occur there and call schedule directly.
  */
-bool try_wait_for_completion(struct completion *x)
+asmlinkage __visible void __sched notrace preempt_schedule(void)
 {
-       unsigned long flags;
-       int ret = 1;
+       /*
+        * If there is a non-zero preempt_count or interrupts are disabled,
+        * we do not want to preempt the current task. Just return..
+        */
+       if (likely(!preemptible()))
+               return;
 
-       spin_lock_irqsave(&x->wait.lock, flags);
-       if (!x->done)
-               ret = 0;
-       else
-               x->done--;
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-       return ret;
+       preempt_schedule_common();
 }
-EXPORT_SYMBOL(try_wait_for_completion);
+NOKPROBE_SYMBOL(preempt_schedule);
+EXPORT_SYMBOL(preempt_schedule);
 
 /**
- *     completion_done - Test to see if a completion has any waiters
- *     @x:     completion structure
+ * preempt_schedule_notrace - preempt_schedule called by tracing
  *
- *     Returns: 0 if there are waiters (wait_for_completion() in progress)
- *              1 if there are no waiters.
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
  *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
  */
-bool completion_done(struct completion *x)
+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
 {
-       unsigned long flags;
-       int ret = 1;
+       enum ctx_state prev_ctx;
 
-       spin_lock_irqsave(&x->wait.lock, flags);
-       if (!x->done)
-               ret = 0;
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-       return ret;
-}
-EXPORT_SYMBOL(completion_done);
+       if (likely(!preemptible()))
+               return;
 
-static long __sched
-sleep_on_common(wait_queue_head_t *q, int state, long timeout)
-{
-       unsigned long flags;
-       wait_queue_t wait;
+       do {
+               /*
+                * Use raw __prempt_count() ops that don't call function.
+                * We can't call functions before disabling preemption which
+                * disarm preemption tracing recursions.
+                */
+               __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
+               barrier();
+               /*
+                * Needs preempt disabled in case user_exit() is traced
+                * and the tracer calls preempt_enable_notrace() causing
+                * an infinite recursion.
+                */
+               prev_ctx = exception_enter();
+               __schedule();
+               exception_exit(prev_ctx);
 
-       init_waitqueue_entry(&wait, current);
+               barrier();
+               __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
+       } while (need_resched());
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
 
-       __set_current_state(state);
+#endif /* CONFIG_PREEMPT */
 
-       spin_lock_irqsave(&q->lock, flags);
-       __add_wait_queue(q, &wait);
-       spin_unlock(&q->lock);
-       timeout = schedule_timeout(timeout);
-       spin_lock_irq(&q->lock);
-       __remove_wait_queue(q, &wait);
-       spin_unlock_irqrestore(&q->lock, flags);
+/*
+ * this is the entry point to schedule() from kernel preemption
+ * off of irq context.
+ * Note, that this is called and return with irqs disabled. This will
+ * protect us against recursive calling from irq.
+ */
+asmlinkage __visible void __sched preempt_schedule_irq(void)
+{
+       enum ctx_state prev_state;
 
-       return timeout;
-}
+       /* Catch callers which need to be fixed */
+       BUG_ON(preempt_count() || !irqs_disabled());
 
-void __sched interruptible_sleep_on(wait_queue_head_t *q)
-{
-       sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
-}
-EXPORT_SYMBOL(interruptible_sleep_on);
+       prev_state = exception_enter();
 
-long __sched
-interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
-{
-       return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
-}
-EXPORT_SYMBOL(interruptible_sleep_on_timeout);
+       do {
+               preempt_active_enter();
+               local_irq_enable();
+               __schedule();
+               local_irq_disable();
+               preempt_active_exit();
+       } while (need_resched());
 
-void __sched sleep_on(wait_queue_head_t *q)
-{
-       sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+       exception_exit(prev_state);
 }
-EXPORT_SYMBOL(sleep_on);
 
-long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
+int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
+                         void *key)
 {
-       return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
+       return try_to_wake_up(curr->private, mode, wake_flags);
 }
-EXPORT_SYMBOL(sleep_on_timeout);
+EXPORT_SYMBOL(default_wake_function);
 
 #ifdef CONFIG_RT_MUTEXES
 
@@ -3609,15 +3327,16 @@ EXPORT_SYMBOL(sleep_on_timeout);
  * This function changes the 'effective' priority of a task. It does
  * not touch ->normal_prio like __setscheduler().
  *
- * Used by the rt_mutex code to implement priority inheritance logic.
+ * Used by the rt_mutex code to implement priority inheritance
+ * logic. Call site only calls if the priority of the task changed.
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-       int oldprio, on_rq, running;
+       int oldprio, queued, running, enqueue_flag = 0;
        struct rq *rq;
        const struct sched_class *prev_class;
 
-       BUG_ON(prio < 0 || prio > MAX_PRIO);
+       BUG_ON(prio > MAX_PRIO);
 
        rq = __task_rq_lock(p);
 
@@ -3642,37 +3361,69 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        trace_sched_pi_setprio(p, prio);
        oldprio = p->prio;
        prev_class = p->sched_class;
-       on_rq = p->on_rq;
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
+               put_prev_task(rq, p);
 
-       if (rt_prio(prio))
+       /*
+        * Boosting condition are:
+        * 1. -rt task is running and holds mutex A
+        *      --> -dl task blocks on mutex A
+        *
+        * 2. -dl task is running and holds mutex A
+        *      --> -dl task blocks on mutex A and could preempt the
+        *          running task
+        */
+       if (dl_prio(prio)) {
+               struct task_struct *pi_task = rt_mutex_get_top_task(p);
+               if (!dl_prio(p->normal_prio) ||
+                   (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
+                       p->dl.dl_boosted = 1;
+                       enqueue_flag = ENQUEUE_REPLENISH;
+               } else
+                       p->dl.dl_boosted = 0;
+               p->sched_class = &dl_sched_class;
+       } else if (rt_prio(prio)) {
+               if (dl_prio(oldprio))
+                       p->dl.dl_boosted = 0;
+               if (oldprio < prio)
+                       enqueue_flag = ENQUEUE_HEAD;
                p->sched_class = &rt_sched_class;
-       else
+       } else {
+               if (dl_prio(oldprio))
+                       p->dl.dl_boosted = 0;
+               if (rt_prio(oldprio))
+                       p->rt.timeout = 0;
                p->sched_class = &fair_sched_class;
+       }
 
        p->prio = prio;
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
-               enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
+       if (queued)
+               enqueue_task(rq, p, enqueue_flag);
 
        check_class_changed(rq, p, prev_class, oldprio);
 out_unlock:
+       preempt_disable(); /* avoid rq from going away on us */
        __task_rq_unlock(rq);
+
+       balance_callback(rq);
+       preempt_enable();
 }
 #endif
+
 void set_user_nice(struct task_struct *p, long nice)
 {
-       int old_prio, delta, on_rq;
+       int old_prio, delta, queued;
        unsigned long flags;
        struct rq *rq;
 
-       if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
+       if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
                return;
        /*
         * We have to be careful, if called from sys_setpriority(),
@@ -3683,14 +3434,14 @@ void set_user_nice(struct task_struct *p, long nice)
         * The RT priorities are set via sched_setscheduler(), but we still
         * allow the 'normal' nice value to be set - but as expected
         * it wont have any effect on scheduling until the task is
-        * SCHED_FIFO/SCHED_RR:
+        * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
         */
-       if (task_has_rt_policy(p)) {
+       if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
                p->static_prio = NICE_TO_PRIO(nice);
                goto out_unlock;
        }
-       on_rq = p->on_rq;
-       if (on_rq)
+       queued = task_on_rq_queued(p);
+       if (queued)
                dequeue_task(rq, p, 0);
 
        p->static_prio = NICE_TO_PRIO(nice);
@@ -3699,14 +3450,14 @@ void set_user_nice(struct task_struct *p, long nice)
        p->prio = effective_prio(p);
        delta = p->prio - old_prio;
 
-       if (on_rq) {
+       if (queued) {
                enqueue_task(rq, p, 0);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
                 */
                if (delta < 0 || (delta > 0 && task_running(rq, p)))
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        }
 out_unlock:
        task_rq_unlock(rq, p, &flags);
@@ -3721,7 +3472,7 @@ EXPORT_SYMBOL(set_user_nice);
 int can_nice(const struct task_struct *p, const int nice)
 {
        /* convert nice value [19,-20] to rlimit style value [1,40] */
-       int nice_rlim = 20 - nice;
+       int nice_rlim = nice_to_rlimit(nice);
 
        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
                capable(CAP_SYS_NICE));
@@ -3745,17 +3496,10 @@ SYSCALL_DEFINE1(nice, int, increment)
         * We don't have to worry. Conceptually one call occurs first
         * and we have a single winner.
         */
-       if (increment < -40)
-               increment = -40;
-       if (increment > 40)
-               increment = 40;
-
-       nice = TASK_NICE(current) + increment;
-       if (nice < -20)
-               nice = -20;
-       if (nice > 19)
-               nice = 19;
+       increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
+       nice = task_nice(current) + increment;
 
+       nice = clamp_val(nice, MIN_NICE, MAX_NICE);
        if (increment < 0 && !can_nice(current, nice))
                return -EPERM;
 
@@ -3773,7 +3517,7 @@ SYSCALL_DEFINE1(nice, int, increment)
  * task_prio - return the priority value of a given task.
  * @p: the task in question.
  *
- * This is the priority value as seen by users in /proc.
+ * Return: The priority value as seen by users in /proc.
  * RT tasks are offset by -200. Normal tasks are centered
  * around 0, value goes from -16 to +15.
  */
@@ -3782,19 +3526,11 @@ int task_prio(const struct task_struct *p)
        return p->prio - MAX_RT_PRIO;
 }
 
-/**
- * task_nice - return the nice value of a given task.
- * @p: the task in question.
- */
-int task_nice(const struct task_struct *p)
-{
-       return TASK_NICE(p);
-}
-EXPORT_SYMBOL(task_nice);
-
 /**
  * idle_cpu - is a given cpu idle currently?
  * @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
  */
 int idle_cpu(int cpu)
 {
@@ -3817,6 +3553,8 @@ int idle_cpu(int cpu)
 /**
  * idle_task - return the idle task for a given cpu.
  * @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
  */
 struct task_struct *idle_task(int cpu)
 {
@@ -3826,26 +3564,159 @@ struct task_struct *idle_task(int cpu)
 /**
  * find_process_by_pid - find a process with a matching PID value.
  * @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
  */
 static struct task_struct *find_process_by_pid(pid_t pid)
 {
        return pid ? find_task_by_vpid(pid) : current;
 }
 
-/* Actually do priority change: must hold rq lock. */
+/*
+ * This function initializes the sched_dl_entity of a newly becoming
+ * SCHED_DEADLINE task.
+ *
+ * Only the static values are considered here, the actual runtime and the
+ * absolute deadline will be properly calculated when the task is enqueued
+ * for the first time with its new policy.
+ */
 static void
-__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
+__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
 {
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       dl_se->dl_runtime = attr->sched_runtime;
+       dl_se->dl_deadline = attr->sched_deadline;
+       dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
+       dl_se->flags = attr->sched_flags;
+       dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
+
+       /*
+        * Changing the parameters of a task is 'tricky' and we're not doing
+        * the correct thing -- also see task_dead_dl() and switched_from_dl().
+        *
+        * What we SHOULD do is delay the bandwidth release until the 0-lag
+        * point. This would include retaining the task_struct until that time
+        * and change dl_overflow() to not immediately decrement the current
+        * amount.
+        *
+        * Instead we retain the current runtime/deadline and let the new
+        * parameters take effect after the current reservation period lapses.
+        * This is safe (albeit pessimistic) because the 0-lag point is always
+        * before the current scheduling deadline.
+        *
+        * We can still have temporary overloads because we do not delay the
+        * change in bandwidth until that time; so admission control is
+        * not on the safe side. It does however guarantee tasks will never
+        * consume more than promised.
+        */
+}
+
+/*
+ * sched_setparam() passes in -1 for its policy, to let the functions
+ * it calls know not to change it.
+ */
+#define SETPARAM_POLICY        -1
+
+static void __setscheduler_params(struct task_struct *p,
+               const struct sched_attr *attr)
+{
+       int policy = attr->sched_policy;
+
+       if (policy == SETPARAM_POLICY)
+               policy = p->policy;
+
        p->policy = policy;
-       p->rt_priority = prio;
+
+       if (dl_policy(policy))
+               __setparam_dl(p, attr);
+       else if (fair_policy(policy))
+               p->static_prio = NICE_TO_PRIO(attr->sched_nice);
+
+       /*
+        * __sched_setscheduler() ensures attr->sched_priority == 0 when
+        * !rt_policy. Always setting this ensures that things like
+        * getparam()/getattr() don't report silly values for !rt tasks.
+        */
+       p->rt_priority = attr->sched_priority;
        p->normal_prio = normal_prio(p);
-       /* we are holding p->pi_lock already */
-       p->prio = rt_mutex_getprio(p);
-       if (rt_prio(p->prio))
+       set_load_weight(p);
+}
+
+/* Actually do priority change: must hold pi & rq lock. */
+static void __setscheduler(struct rq *rq, struct task_struct *p,
+                          const struct sched_attr *attr, bool keep_boost)
+{
+       __setscheduler_params(p, attr);
+
+       /*
+        * Keep a potential priority boosting if called from
+        * sched_setscheduler().
+        */
+       if (keep_boost)
+               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+       else
+               p->prio = normal_prio(p);
+
+       if (dl_prio(p->prio))
+               p->sched_class = &dl_sched_class;
+       else if (rt_prio(p->prio))
                p->sched_class = &rt_sched_class;
        else
                p->sched_class = &fair_sched_class;
-       set_load_weight(p);
+}
+
+static void
+__getparam_dl(struct task_struct *p, struct sched_attr *attr)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       attr->sched_priority = p->rt_priority;
+       attr->sched_runtime = dl_se->dl_runtime;
+       attr->sched_deadline = dl_se->dl_deadline;
+       attr->sched_period = dl_se->dl_period;
+       attr->sched_flags = dl_se->flags;
+}
+
+/*
+ * This function validates the new parameters of a -deadline task.
+ * We ask for the deadline not being zero, and greater or equal
+ * than the runtime, as well as the period of being zero or
+ * greater than deadline. Furthermore, we have to be sure that
+ * user parameters are above the internal resolution of 1us (we
+ * check sched_runtime only since it is always the smaller one) and
+ * below 2^63 ns (we have to check both sched_deadline and
+ * sched_period, as the latter can be zero).
+ */
+static bool
+__checkparam_dl(const struct sched_attr *attr)
+{
+       /* deadline != 0 */
+       if (attr->sched_deadline == 0)
+               return false;
+
+       /*
+        * Since we truncate DL_SCALE bits, make sure we're at least
+        * that big.
+        */
+       if (attr->sched_runtime < (1ULL << DL_SCALE))
+               return false;
+
+       /*
+        * Since we use the MSB for wrap-around and sign issues, make
+        * sure it's not set (mind that period can be equal to zero).
+        */
+       if (attr->sched_deadline & (1ULL << 63) ||
+           attr->sched_period & (1ULL << 63))
+               return false;
+
+       /* runtime <= deadline <= period (if period != 0) */
+       if ((attr->sched_period != 0 &&
+            attr->sched_period < attr->sched_deadline) ||
+           attr->sched_deadline < attr->sched_runtime)
+               return false;
+
+       return true;
 }
 
 /*
@@ -3864,10 +3735,28 @@ static bool check_same_owner(struct task_struct *p)
        return match;
 }
 
-static int __sched_setscheduler(struct task_struct *p, int policy,
-                               const struct sched_param *param, bool user)
+static bool dl_param_changed(struct task_struct *p,
+               const struct sched_attr *attr)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       if (dl_se->dl_runtime != attr->sched_runtime ||
+               dl_se->dl_deadline != attr->sched_deadline ||
+               dl_se->dl_period != attr->sched_period ||
+               dl_se->flags != attr->sched_flags)
+               return true;
+
+       return false;
+}
+
+static int __sched_setscheduler(struct task_struct *p,
+                               const struct sched_attr *attr,
+                               bool user, bool pi)
 {
-       int retval, oldprio, oldpolicy = -1, on_rq, running;
+       int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
+                     MAX_RT_PRIO - 1 - attr->sched_priority;
+       int retval, oldprio, oldpolicy = -1, queued, running;
+       int new_effective_prio, policy = attr->sched_policy;
        unsigned long flags;
        const struct sched_class *prev_class;
        struct rq *rq;
@@ -3881,31 +3770,40 @@ recheck:
                reset_on_fork = p->sched_reset_on_fork;
                policy = oldpolicy = p->policy;
        } else {
-               reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
-               policy &= ~SCHED_RESET_ON_FORK;
+               reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK);
 
-               if (policy != SCHED_FIFO && policy != SCHED_RR &&
+               if (policy != SCHED_DEADLINE &&
+                               policy != SCHED_FIFO && policy != SCHED_RR &&
                                policy != SCHED_NORMAL && policy != SCHED_BATCH &&
                                policy != SCHED_IDLE)
                        return -EINVAL;
        }
 
+       if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK))
+               return -EINVAL;
+
        /*
         * Valid priorities for SCHED_FIFO and SCHED_RR are
         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
         * SCHED_BATCH and SCHED_IDLE is 0.
         */
-       if (param->sched_priority < 0 ||
-           (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
-           (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
+       if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
+           (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
                return -EINVAL;
-       if (rt_policy(policy) != (param->sched_priority != 0))
+       if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
+           (rt_policy(policy) != (attr->sched_priority != 0)))
                return -EINVAL;
 
        /*
         * Allow unprivileged RT tasks to decrease priority:
         */
        if (user && !capable(CAP_SYS_NICE)) {
+               if (fair_policy(policy)) {
+                       if (attr->sched_nice < task_nice(p) &&
+                           !can_nice(p, attr->sched_nice))
+                               return -EPERM;
+               }
+
                if (rt_policy(policy)) {
                        unsigned long rlim_rtprio =
                                        task_rlimit(p, RLIMIT_RTPRIO);
@@ -3915,17 +3813,26 @@ recheck:
                                return -EPERM;
 
                        /* can't increase priority */
-                       if (param->sched_priority > p->rt_priority &&
-                           param->sched_priority > rlim_rtprio)
+                       if (attr->sched_priority > p->rt_priority &&
+                           attr->sched_priority > rlim_rtprio)
                                return -EPERM;
                }
 
+                /*
+                 * Can't set/change SCHED_DEADLINE policy at all for now
+                 * (safest behavior); in the future we would like to allow
+                 * unprivileged DL tasks to increase their relative deadline
+                 * or reduce their runtime (both ways reducing utilization)
+                 */
+               if (dl_policy(policy))
+                       return -EPERM;
+
                /*
                 * Treat SCHED_IDLE as nice 20. Only allow a switch to
                 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
                 */
                if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
-                       if (!can_nice(p, TASK_NICE(p)))
+                       if (!can_nice(p, task_nice(p)))
                                return -EPERM;
                }
 
@@ -3962,16 +3869,25 @@ recheck:
        }
 
        /*
-        * If not changing anything there's no need to proceed further:
+        * If not changing anything there's no need to proceed further,
+        * but store a possible modification of reset_on_fork.
         */
-       if (unlikely(policy == p->policy && (!rt_policy(policy) ||
-                       param->sched_priority == p->rt_priority))) {
+       if (unlikely(policy == p->policy)) {
+               if (fair_policy(policy) && attr->sched_nice != task_nice(p))
+                       goto change;
+               if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
+                       goto change;
+               if (dl_policy(policy) && dl_param_changed(p, attr))
+                       goto change;
+
+               p->sched_reset_on_fork = reset_on_fork;
                task_rq_unlock(rq, p, &flags);
                return 0;
        }
+change:
 
-#ifdef CONFIG_RT_GROUP_SCHED
        if (user) {
+#ifdef CONFIG_RT_GROUP_SCHED
                /*
                 * Do not allow realtime tasks into groups that have no runtime
                 * assigned.
@@ -3982,8 +3898,24 @@ recheck:
                        task_rq_unlock(rq, p, &flags);
                        return -EPERM;
                }
-       }
 #endif
+#ifdef CONFIG_SMP
+               if (dl_bandwidth_enabled() && dl_policy(policy)) {
+                       cpumask_t *span = rq->rd->span;
+
+                       /*
+                        * Don't allow tasks with an affinity mask smaller than
+                        * the entire root_domain to become SCHED_DEADLINE. We
+                        * will also fail if there's no bandwidth available.
+                        */
+                       if (!cpumask_subset(span, &p->cpus_allowed) ||
+                           rq->rd->dl_bw.bw == 0) {
+                               task_rq_unlock(rq, p, &flags);
+                               return -EPERM;
+                       }
+               }
+#endif
+       }
 
        /* recheck policy now with rq lock held */
        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
@@ -3991,47 +3923,113 @@ recheck:
                task_rq_unlock(rq, p, &flags);
                goto recheck;
        }
-       on_rq = p->on_rq;
+
+       /*
+        * If setscheduling to SCHED_DEADLINE (or changing the parameters
+        * of a SCHED_DEADLINE task) we need to check if enough bandwidth
+        * is available.
+        */
+       if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
+               task_rq_unlock(rq, p, &flags);
+               return -EBUSY;
+       }
+
+       p->sched_reset_on_fork = reset_on_fork;
+       oldprio = p->prio;
+
+       if (pi) {
+               /*
+                * Take priority boosted tasks into account. If the new
+                * effective priority is unchanged, we just store the new
+                * normal parameters and do not touch the scheduler class and
+                * the runqueue. This will be done when the task deboost
+                * itself.
+                */
+               new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
+               if (new_effective_prio == oldprio) {
+                       __setscheduler_params(p, attr);
+                       task_rq_unlock(rq, p, &flags);
+                       return 0;
+               }
+       }
+
+       queued = task_on_rq_queued(p);
        running = task_current(rq, p);
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, p, 0);
        if (running)
-               p->sched_class->put_prev_task(rq, p);
-
-       p->sched_reset_on_fork = reset_on_fork;
+               put_prev_task(rq, p);
 
-       oldprio = p->prio;
        prev_class = p->sched_class;
-       __setscheduler(rq, p, policy, param->sched_priority);
+       __setscheduler(rq, p, attr, pi);
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
-               enqueue_task(rq, p, 0);
+       if (queued) {
+               /*
+                * We enqueue to tail when the priority of a task is
+                * increased (user space view).
+                */
+               enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
+       }
 
        check_class_changed(rq, p, prev_class, oldprio);
+       preempt_disable(); /* avoid rq from going away on us */
        task_rq_unlock(rq, p, &flags);
 
-       rt_mutex_adjust_pi(p);
+       if (pi)
+               rt_mutex_adjust_pi(p);
+
+       /*
+        * Run balance callbacks after we've adjusted the PI chain.
+        */
+       balance_callback(rq);
+       preempt_enable();
 
        return 0;
 }
 
+static int _sched_setscheduler(struct task_struct *p, int policy,
+                              const struct sched_param *param, bool check)
+{
+       struct sched_attr attr = {
+               .sched_policy   = policy,
+               .sched_priority = param->sched_priority,
+               .sched_nice     = PRIO_TO_NICE(p->static_prio),
+       };
+
+       /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
+       if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
+               attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+               policy &= ~SCHED_RESET_ON_FORK;
+               attr.sched_policy = policy;
+       }
+
+       return __sched_setscheduler(p, &attr, check, true);
+}
 /**
  * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
  * @p: the task in question.
  * @policy: new policy.
  * @param: structure containing the new RT priority.
  *
+ * Return: 0 on success. An error code otherwise.
+ *
  * NOTE that the task may be already dead.
  */
 int sched_setscheduler(struct task_struct *p, int policy,
                       const struct sched_param *param)
 {
-       return __sched_setscheduler(p, policy, param, true);
+       return _sched_setscheduler(p, policy, param, true);
 }
 EXPORT_SYMBOL_GPL(sched_setscheduler);
 
+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
+{
+       return __sched_setscheduler(p, attr, true, true);
+}
+EXPORT_SYMBOL_GPL(sched_setattr);
+
 /**
  * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
  * @p: the task in question.
@@ -4042,11 +4040,13 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
  * current context has permission.  For example, this is needed in
  * stop_machine(): we create temporary high priority worker threads,
  * but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 int sched_setscheduler_nocheck(struct task_struct *p, int policy,
                               const struct sched_param *param)
 {
-       return __sched_setscheduler(p, policy, param, false);
+       return _sched_setscheduler(p, policy, param, false);
 }
 
 static int
@@ -4071,11 +4071,84 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
        return retval;
 }
 
+/*
+ * Mimics kernel/events/core.c perf_copy_attr().
+ */
+static int sched_copy_attr(struct sched_attr __user *uattr,
+                          struct sched_attr *attr)
+{
+       u32 size;
+       int ret;
+
+       if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0))
+               return -EFAULT;
+
+       /*
+        * zero the full structure, so that a short copy will be nice.
+        */
+       memset(attr, 0, sizeof(*attr));
+
+       ret = get_user(size, &uattr->size);
+       if (ret)
+               return ret;
+
+       if (size > PAGE_SIZE)   /* silly large */
+               goto err_size;
+
+       if (!size)              /* abi compat */
+               size = SCHED_ATTR_SIZE_VER0;
+
+       if (size < SCHED_ATTR_SIZE_VER0)
+               goto err_size;
+
+       /*
+        * If we're handed a bigger struct than we know of,
+        * ensure all the unknown bits are 0 - i.e. new
+        * user-space does not rely on any kernel feature
+        * extensions we dont know about yet.
+        */
+       if (size > sizeof(*attr)) {
+               unsigned char __user *addr;
+               unsigned char __user *end;
+               unsigned char val;
+
+               addr = (void __user *)uattr + sizeof(*attr);
+               end  = (void __user *)uattr + size;
+
+               for (; addr < end; addr++) {
+                       ret = get_user(val, addr);
+                       if (ret)
+                               return ret;
+                       if (val)
+                               goto err_size;
+               }
+               size = sizeof(*attr);
+       }
+
+       ret = copy_from_user(attr, uattr, size);
+       if (ret)
+               return -EFAULT;
+
+       /*
+        * XXX: do we want to be lenient like existing syscalls; or do we want
+        * to be strict and return an error on out-of-bounds values?
+        */
+       attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
+
+       return 0;
+
+err_size:
+       put_user(sizeof(*attr), &uattr->size);
+       return -E2BIG;
+}
+
 /**
  * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  * @pid: the pid in question.
  * @policy: new policy.
  * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
                struct sched_param __user *, param)
@@ -4091,15 +4164,53 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  * sys_sched_setparam - set/change the RT priority of a thread
  * @pid: the pid in question.
  * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 {
-       return do_sched_setscheduler(pid, -1, param);
+       return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
+}
+
+/**
+ * sys_sched_setattr - same as above, but with extended sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ * @flags: for future extension.
+ */
+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
+                              unsigned int, flags)
+{
+       struct sched_attr attr;
+       struct task_struct *p;
+       int retval;
+
+       if (!uattr || pid < 0 || flags)
+               return -EINVAL;
+
+       retval = sched_copy_attr(uattr, &attr);
+       if (retval)
+               return retval;
+
+       if ((int)attr.sched_policy < 0)
+               return -EINVAL;
+
+       rcu_read_lock();
+       retval = -ESRCH;
+       p = find_process_by_pid(pid);
+       if (p != NULL)
+               retval = sched_setattr(p, &attr);
+       rcu_read_unlock();
+
+       return retval;
 }
 
 /**
  * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  * @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
  */
 SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
 {
@@ -4126,10 +4237,13 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
  * sys_sched_getparam - get the RT priority of a thread
  * @pid: the pid in question.
  * @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
  */
 SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
 {
-       struct sched_param lp;
+       struct sched_param lp = { .sched_priority = 0 };
        struct task_struct *p;
        int retval;
 
@@ -4146,7 +4260,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        if (retval)
                goto out_unlock;
 
-       lp.sched_priority = p->rt_priority;
+       if (task_has_rt_policy(p))
+               lp.sched_priority = p->rt_priority;
        rcu_read_unlock();
 
        /*
@@ -4161,19 +4276,103 @@ out_unlock:
        return retval;
 }
 
+static int sched_read_attr(struct sched_attr __user *uattr,
+                          struct sched_attr *attr,
+                          unsigned int usize)
+{
+       int ret;
+
+       if (!access_ok(VERIFY_WRITE, uattr, usize))
+               return -EFAULT;
+
+       /*
+        * If we're handed a smaller struct than we know of,
+        * ensure all the unknown bits are 0 - i.e. old
+        * user-space does not get uncomplete information.
+        */
+       if (usize < sizeof(*attr)) {
+               unsigned char *addr;
+               unsigned char *end;
+
+               addr = (void *)attr + usize;
+               end  = (void *)attr + sizeof(*attr);
+
+               for (; addr < end; addr++) {
+                       if (*addr)
+                               return -EFBIG;
+               }
+
+               attr->size = usize;
+       }
+
+       ret = copy_to_user(uattr, attr, attr->size);
+       if (ret)
+               return -EFAULT;
+
+       return 0;
+}
+
+/**
+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ * @size: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
+ */
+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+               unsigned int, size, unsigned int, flags)
+{
+       struct sched_attr attr = {
+               .size = sizeof(struct sched_attr),
+       };
+       struct task_struct *p;
+       int retval;
+
+       if (!uattr || pid < 0 || size > PAGE_SIZE ||
+           size < SCHED_ATTR_SIZE_VER0 || flags)
+               return -EINVAL;
+
+       rcu_read_lock();
+       p = find_process_by_pid(pid);
+       retval = -ESRCH;
+       if (!p)
+               goto out_unlock;
+
+       retval = security_task_getscheduler(p);
+       if (retval)
+               goto out_unlock;
+
+       attr.sched_policy = p->policy;
+       if (p->sched_reset_on_fork)
+               attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+       if (task_has_dl_policy(p))
+               __getparam_dl(p, &attr);
+       else if (task_has_rt_policy(p))
+               attr.sched_priority = p->rt_priority;
+       else
+               attr.sched_nice = task_nice(p);
+
+       rcu_read_unlock();
+
+       retval = sched_read_attr(uattr, &attr, size);
+       return retval;
+
+out_unlock:
+       rcu_read_unlock();
+       return retval;
+}
+
 long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
 {
        cpumask_var_t cpus_allowed, new_mask;
        struct task_struct *p;
        int retval;
 
-       get_online_cpus();
        rcu_read_lock();
 
        p = find_process_by_pid(pid);
        if (!p) {
                rcu_read_unlock();
-               put_online_cpus();
                return -ESRCH;
        }
 
@@ -4198,19 +4397,38 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
                rcu_read_lock();
                if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
                        rcu_read_unlock();
-                       goto out_unlock;
+                       goto out_free_new_mask;
                }
                rcu_read_unlock();
        }
 
        retval = security_task_setscheduler(p);
        if (retval)
-               goto out_unlock;
+               goto out_free_new_mask;
+
 
        cpuset_cpus_allowed(p, cpus_allowed);
        cpumask_and(new_mask, in_mask, cpus_allowed);
+
+       /*
+        * Since bandwidth control happens on root_domain basis,
+        * if admission test is enabled, we only admit -deadline
+        * tasks allowed to run on all the CPUs in the task's
+        * root_domain.
+        */
+#ifdef CONFIG_SMP
+       if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
+               rcu_read_lock();
+               if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
+                       retval = -EBUSY;
+                       rcu_read_unlock();
+                       goto out_free_new_mask;
+               }
+               rcu_read_unlock();
+       }
+#endif
 again:
-       retval = set_cpus_allowed_ptr(p, new_mask);
+       retval = __set_cpus_allowed_ptr(p, new_mask, true);
 
        if (!retval) {
                cpuset_cpus_allowed(p, cpus_allowed);
@@ -4224,13 +4442,12 @@ again:
                        goto again;
                }
        }
-out_unlock:
+out_free_new_mask:
        free_cpumask_var(new_mask);
 out_free_cpus_allowed:
        free_cpumask_var(cpus_allowed);
 out_put_task:
        put_task_struct(p);
-       put_online_cpus();
        return retval;
 }
 
@@ -4250,6 +4467,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  * @pid: pid of the process
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to the new cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
@@ -4273,7 +4492,6 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        unsigned long flags;
        int retval;
 
-       get_online_cpus();
        rcu_read_lock();
 
        retval = -ESRCH;
@@ -4286,12 +4504,11 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
                goto out_unlock;
 
        raw_spin_lock_irqsave(&p->pi_lock, flags);
-       cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
+       cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
 out_unlock:
        rcu_read_unlock();
-       put_online_cpus();
 
        return retval;
 }
@@ -4301,6 +4518,8 @@ out_unlock:
  * @pid: pid of the process
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to hold the current cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
  */
 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
@@ -4335,6 +4554,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
  *
  * This function yields the current CPU to other tasks. If there are no
  * other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
  */
 SYSCALL_DEFINE0(sched_yield)
 {
@@ -4357,22 +4578,10 @@ SYSCALL_DEFINE0(sched_yield)
        return 0;
 }
 
-static inline int should_resched(void)
-{
-       return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
-}
-
-static void __cond_resched(void)
-{
-       add_preempt_count(PREEMPT_ACTIVE);
-       __schedule();
-       sub_preempt_count(PREEMPT_ACTIVE);
-}
-
 int __sched _cond_resched(void)
 {
-       if (should_resched()) {
-               __cond_resched();
+       if (should_resched(0)) {
+               preempt_schedule_common();
                return 1;
        }
        return 0;
@@ -4389,7 +4598,7 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
-       int resched = should_resched();
+       int resched = should_resched(PREEMPT_LOCK_OFFSET);
        int ret = 0;
 
        lockdep_assert_held(lock);
@@ -4397,7 +4606,7 @@ int __cond_resched_lock(spinlock_t *lock)
        if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
-                       __cond_resched();
+                       preempt_schedule_common();
                else
                        cpu_relax();
                ret = 1;
@@ -4411,9 +4620,9 @@ int __sched __cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       if (should_resched()) {
+       if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
                local_bh_enable();
-               __cond_resched();
+               preempt_schedule_common();
                local_bh_disable();
                return 1;
        }
@@ -4460,12 +4669,12 @@ EXPORT_SYMBOL(yield);
  * It's the caller's job to ensure that the target task struct
  * can't go away on us before we can do any checks.
  *
- * Returns:
+ * Return:
  *     true (>0) if we indeed boosted the target task.
  *     false (0) if we failed to boost the target.
  *     -ESRCH if there's no task to yield to.
  */
-bool __sched yield_to(struct task_struct *p, bool preempt)
+int __sched yield_to(struct task_struct *p, bool preempt)
 {
        struct task_struct *curr = current;
        struct rq *rq, *p_rq;
@@ -4487,7 +4696,7 @@ again:
        }
 
        double_rq_lock(rq, p_rq);
-       while (task_rq(p) != p_rq) {
+       if (task_rq(p) != p_rq) {
                double_rq_unlock(rq, p_rq);
                goto again;
        }
@@ -4509,7 +4718,7 @@ again:
                 * fairness.
                 */
                if (preempt && rq != p_rq)
-                       resched_task(p_rq->curr);
+                       resched_curr(p_rq);
        }
 
 out_unlock:
@@ -4528,43 +4737,34 @@ EXPORT_SYMBOL_GPL(yield_to);
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  */
-void __sched io_schedule(void)
-{
-       struct rq *rq = raw_rq();
-
-       delayacct_blkio_start();
-       atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
-       schedule();
-       current->in_iowait = 0;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
-}
-EXPORT_SYMBOL(io_schedule);
-
 long __sched io_schedule_timeout(long timeout)
 {
-       struct rq *rq = raw_rq();
+       int old_iowait = current->in_iowait;
+       struct rq *rq;
        long ret;
 
+       current->in_iowait = 1;
+       blk_schedule_flush_plug(current);
+
        delayacct_blkio_start();
+       rq = raw_rq();
        atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
        ret = schedule_timeout(timeout);
-       current->in_iowait = 0;
+       current->in_iowait = old_iowait;
        atomic_dec(&rq->nr_iowait);
        delayacct_blkio_end();
+
        return ret;
 }
+EXPORT_SYMBOL(io_schedule_timeout);
 
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
  * @policy: scheduling class.
  *
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
  */
 SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
 {
@@ -4575,6 +4775,7 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
        case SCHED_RR:
                ret = MAX_USER_RT_PRIO-1;
                break;
+       case SCHED_DEADLINE:
        case SCHED_NORMAL:
        case SCHED_BATCH:
        case SCHED_IDLE:
@@ -4588,8 +4789,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
  * sys_sched_get_priority_min - return minimum RT priority.
  * @policy: scheduling class.
  *
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
  */
 SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
 {
@@ -4600,6 +4802,7 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
        case SCHED_RR:
                ret = 1;
                break;
+       case SCHED_DEADLINE:
        case SCHED_NORMAL:
        case SCHED_BATCH:
        case SCHED_IDLE:
@@ -4615,6 +4818,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
  *
  * this syscall writes the default timeslice value of a given process
  * into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
  */
 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                struct timespec __user *, interval)
@@ -4640,7 +4846,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                goto out_unlock;
 
        rq = task_rq_lock(p, &flags);
-       time_slice = p->sched_class->get_rr_interval(rq, p);
+       time_slice = 0;
+       if (p->sched_class->get_rr_interval)
+               time_slice = p->sched_class->get_rr_interval(rq, p);
        task_rq_unlock(rq, p, &flags);
 
        rcu_read_unlock();
@@ -4659,9 +4867,10 @@ void sched_show_task(struct task_struct *p)
 {
        unsigned long free = 0;
        int ppid;
-       unsigned state;
+       unsigned long state = p->state;
 
-       state = p->state ? __ffs(p->state) + 1 : 0;
+       if (state)
+               state = __ffs(state) + 1;
        printk(KERN_INFO "%-15.15s %c", p->comm,
                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
 #if BITS_PER_LONG == 32
@@ -4678,8 +4887,10 @@ void sched_show_task(struct task_struct *p)
 #ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
 #endif
+       ppid = 0;
        rcu_read_lock();
-       ppid = task_pid_nr(rcu_dereference(p->real_parent));
+       if (pid_alive(p))
+               ppid = task_pid_nr(rcu_dereference(p->real_parent));
        rcu_read_unlock();
        printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
                task_pid_nr(p), ppid,
@@ -4701,7 +4912,7 @@ void show_state_filter(unsigned long state_filter)
                "  task                        PC stack   pid father\n");
 #endif
        rcu_read_lock();
-       do_each_thread(g, p) {
+       for_each_process_thread(g, p) {
                /*
                 * reset the NMI-timeout, listing all files on a slow
                 * console might take a lot of time:
@@ -4709,7 +4920,7 @@ void show_state_filter(unsigned long state_filter)
                touch_nmi_watchdog();
                if (!state_filter || (p->state & state_filter))
                        sched_show_task(p);
-       } while_each_thread(g, p);
+       }
 
        touch_all_softlockup_watchdogs();
 
@@ -4724,7 +4935,7 @@ void show_state_filter(unsigned long state_filter)
                debug_show_all_locks();
 }
 
-void __cpuinit init_idle_bootup_task(struct task_struct *idle)
+void init_idle_bootup_task(struct task_struct *idle)
 {
        idle->sched_class = &idle_sched_class;
 }
@@ -4737,18 +4948,27 @@ void __cpuinit init_idle_bootup_task(struct task_struct *idle)
  * NOTE: this function does not set the idle thread's NEED_RESCHED
  * flag, to make booting more robust.
  */
-void __cpuinit init_idle(struct task_struct *idle, int cpu)
+void init_idle(struct task_struct *idle, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&idle->pi_lock, flags);
+       raw_spin_lock(&rq->lock);
 
-       __sched_fork(idle);
+       __sched_fork(0, idle);
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
-       do_set_cpus_allowed(idle, cpumask_of(cpu));
+#ifdef CONFIG_SMP
+       /*
+        * Its possible that init_idle() gets called multiple times on a task,
+        * in that case do_set_cpus_allowed() will not do the right thing.
+        *
+        * And since this is boot we can forgo the serialization.
+        */
+       set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif
        /*
         * We're having a chicken and egg problem, even though we are
         * holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -4764,167 +4984,156 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        rcu_read_unlock();
 
        rq->curr = rq->idle = idle;
-#if defined(CONFIG_SMP)
+       idle->on_rq = TASK_ON_RQ_QUEUED;
+#ifdef CONFIG_SMP
        idle->on_cpu = 1;
 #endif
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
-       task_thread_info(idle)->preempt_count = 0;
+       init_idle_preempt_count(idle, cpu);
 
        /*
         * The idle tasks have their own, simple scheduling class:
-        */
-       idle->sched_class = &idle_sched_class;
-       ftrace_graph_init_idle_task(idle, cpu);
-       vtime_init_idle(idle, cpu);
-#if defined(CONFIG_SMP)
-       sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
-#endif
-}
-
-#ifdef CONFIG_SMP
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-{
-       if (p->sched_class && p->sched_class->set_cpus_allowed)
-               p->sched_class->set_cpus_allowed(p, new_mask);
-
-       cpumask_copy(&p->cpus_allowed, new_mask);
-       p->nr_cpus_allowed = cpumask_weight(new_mask);
-}
-
-/*
- * This is how migration works:
- *
- * 1) we invoke migration_cpu_stop() on the target CPU using
- *    stop_one_cpu().
- * 2) stopper starts to run (implicitly forcing the migrated thread
- *    off the CPU)
- * 3) it checks whether the migrated task is still in the wrong runqueue.
- * 4) if it's in the wrong runqueue then the migration thread removes
- *    it and puts it into the right queue.
- * 5) stopper completes and stop_one_cpu() returns and the migration
- *    is done.
- */
-
-/*
- * Change a given task's CPU affinity. Migrate the thread to a
- * proper CPU and schedule it away if the CPU it's executing on
- * is removed from the allowed bitmask.
- *
- * NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. The
- * call is not atomic; no spinlocks may be held.
- */
-int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+        */
+       idle->sched_class = &idle_sched_class;
+       ftrace_graph_init_idle_task(idle, cpu);
+       vtime_init_idle(idle, cpu);
+#ifdef CONFIG_SMP
+       sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
+}
+
+int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+                             const struct cpumask *trial)
 {
+       int ret = 1, trial_cpus;
+       struct dl_bw *cur_dl_b;
        unsigned long flags;
-       struct rq *rq;
-       unsigned int dest_cpu;
-       int ret = 0;
 
-       rq = task_rq_lock(p, &flags);
+       if (!cpumask_weight(cur))
+               return ret;
 
-       if (cpumask_equal(&p->cpus_allowed, new_mask))
-               goto out;
+       rcu_read_lock_sched();
+       cur_dl_b = dl_bw_of(cpumask_any(cur));
+       trial_cpus = cpumask_weight(trial);
 
-       if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+       raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+       if (cur_dl_b->bw != -1 &&
+           cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
+               ret = 0;
+       raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+       rcu_read_unlock_sched();
+
+       return ret;
+}
+
+int task_can_attach(struct task_struct *p,
+                   const struct cpumask *cs_cpus_allowed)
+{
+       int ret = 0;
+
+       /*
+        * Kthreads which disallow setaffinity shouldn't be moved
+        * to a new cpuset; we don't want to change their cpu
+        * affinity and isolating such threads by their set of
+        * allowed nodes is unnecessary.  Thus, cpusets are not
+        * applicable for such threads.  This prevents checking for
+        * success of set_cpus_allowed_ptr() on all attached tasks
+        * before cpus_allowed may be changed.
+        */
+       if (p->flags & PF_NO_SETAFFINITY) {
                ret = -EINVAL;
                goto out;
        }
 
-       do_set_cpus_allowed(p, new_mask);
+#ifdef CONFIG_SMP
+       if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
+                                             cs_cpus_allowed)) {
+               unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
+                                                       cs_cpus_allowed);
+               struct dl_bw *dl_b;
+               bool overflow;
+               int cpus;
+               unsigned long flags;
 
-       /* Can the task run on the task's current CPU? If so, we're done */
-       if (cpumask_test_cpu(task_cpu(p), new_mask))
-               goto out;
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(dest_cpu);
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(dest_cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
+               if (overflow)
+                       ret = -EBUSY;
+               else {
+                       /*
+                        * We reserve space for this task in the destination
+                        * root_domain, as we can't fail after this point.
+                        * We will free resources in the source root_domain
+                        * later on (see set_cpus_allowed_dl()).
+                        */
+                       __dl_add(dl_b, p->dl.dl_bw);
+               }
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+               rcu_read_unlock_sched();
 
-       dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-       if (p->on_rq) {
-               struct migration_arg arg = { p, dest_cpu };
-               /* Need help from migration thread: drop lock and wait. */
-               task_rq_unlock(rq, p, &flags);
-               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-               tlb_migrate_finish(p->mm);
-               return 0;
        }
+#endif
 out:
-       task_rq_unlock(rq, p, &flags);
-
        return ret;
 }
-EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 
-/*
- * Move (not current) task off this cpu, onto dest cpu. We're doing
- * this because either it can't run here any more (set_cpus_allowed()
- * away from this CPU, or CPU going down), or because we're
- * attempting to rebalance this task on exec (sched_exec).
- *
- * So we race with normal scheduler movements, but that's OK, as long
- * as the task is no longer on this CPU.
- *
- * Returns non-zero if task was successfully migrated.
- */
-static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+#ifdef CONFIG_SMP
+
+#ifdef CONFIG_NUMA_BALANCING
+/* Migrate current task p to target_cpu */
+int migrate_task_to(struct task_struct *p, int target_cpu)
 {
-       struct rq *rq_dest, *rq_src;
-       int ret = 0;
+       struct migration_arg arg = { p, target_cpu };
+       int curr_cpu = task_cpu(p);
 
-       if (unlikely(!cpu_active(dest_cpu)))
-               return ret;
+       if (curr_cpu == target_cpu)
+               return 0;
 
-       rq_src = cpu_rq(src_cpu);
-       rq_dest = cpu_rq(dest_cpu);
+       if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p)))
+               return -EINVAL;
 
-       raw_spin_lock(&p->pi_lock);
-       double_rq_lock(rq_src, rq_dest);
-       /* Already moved. */
-       if (task_cpu(p) != src_cpu)
-               goto done;
-       /* Affinity changed (again). */
-       if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
-               goto fail;
+       /* TODO: This is not properly updating schedstats */
 
-       /*
-        * If we're not on a rq, the next wake-up will ensure we're
-        * placed properly.
-        */
-       if (p->on_rq) {
-               dequeue_task(rq_src, p, 0);
-               set_task_cpu(p, dest_cpu);
-               enqueue_task(rq_dest, p, 0);
-               check_preempt_curr(rq_dest, p, 0);
-       }
-done:
-       ret = 1;
-fail:
-       double_rq_unlock(rq_src, rq_dest);
-       raw_spin_unlock(&p->pi_lock);
-       return ret;
+       trace_sched_move_numa(p, curr_cpu, target_cpu);
+       return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
 }
 
 /*
- * migration_cpu_stop - this will be executed by a highprio stopper thread
- * and performs thread migration by bumping thread off CPU then
- * 'pushing' onto another runqueue.
+ * Requeue a task on a given node and accurately track the number of NUMA
+ * tasks on the runqueues
  */
-static int migration_cpu_stop(void *data)
+void sched_setnuma(struct task_struct *p, int nid)
 {
-       struct migration_arg *arg = data;
+       struct rq *rq;
+       unsigned long flags;
+       bool queued, running;
 
-       /*
-        * The original target cpu might have gone down and we might
-        * be on another cpu but it doesn't matter.
-        */
-       local_irq_disable();
-       __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
-       local_irq_enable();
-       return 0;
+       rq = task_rq_lock(p, &flags);
+       queued = task_on_rq_queued(p);
+       running = task_current(rq, p);
+
+       if (queued)
+               dequeue_task(rq, p, 0);
+       if (running)
+               put_prev_task(rq, p);
+
+       p->numa_preferred_nid = nid;
+
+       if (running)
+               p->sched_class->set_curr_task(rq);
+       if (queued)
+               enqueue_task(rq, p, 0);
+       task_rq_unlock(rq, p, &flags);
 }
+#endif /* CONFIG_NUMA_BALANCING */
 
 #ifdef CONFIG_HOTPLUG_CPU
-
 /*
  * Ensures that the idle task is using init_mm right before its cpu goes
  * offline.
@@ -4935,8 +5144,10 @@ void idle_task_exit(void)
 
        BUG_ON(cpu_online(smp_processor_id()));
 
-       if (mm != &init_mm)
+       if (mm != &init_mm) {
                switch_mm(mm, &init_mm, current);
+               finish_arch_post_lock_switch();
+       }
        mmdrop(mm);
 }
 
@@ -4954,6 +5165,22 @@ static void calc_load_migrate(struct rq *rq)
                atomic_long_add(delta, &calc_load_tasks);
 }
 
+static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
+{
+}
+
+static const struct sched_class fake_sched_class = {
+       .put_prev_task = put_prev_task_fake,
+};
+
+static struct task_struct fake_task = {
+       /*
+        * Avoid pull_{rt,dl}_task()
+        */
+       .prio = MAX_PRIO + 1,
+       .sched_class = &fake_sched_class,
+};
+
 /*
  * Migrate all tasks from the rq, sleeping tasks will be migrated by
  * try_to_wake_up()->select_task_rq().
@@ -4962,9 +5189,9 @@ static void calc_load_migrate(struct rq *rq)
  * there's no concurrency possible, we hold the required locks anyway
  * because of lock validation efforts.
  */
-static void migrate_tasks(unsigned int dead_cpu)
+static void migrate_tasks(struct rq *dead_rq)
 {
-       struct rq *rq = cpu_rq(dead_cpu);
+       struct rq *rq = dead_rq;
        struct task_struct *next, *stop = rq->stop;
        int dest_cpu;
 
@@ -4979,7 +5206,14 @@ static void migrate_tasks(unsigned int dead_cpu)
         */
        rq->stop = NULL;
 
-       for ( ; ; ) {
+       /*
+        * put_prev_task() and pick_next_task() sched
+        * class method both need to have an up-to-date
+        * value of rq->clock[_task]
+        */
+       update_rq_clock(rq);
+
+       for (;;) {
                /*
                 * There's this thread running, bail when that's the only
                 * remaining thread.
@@ -4987,22 +5221,52 @@ static void migrate_tasks(unsigned int dead_cpu)
                if (rq->nr_running == 1)
                        break;
 
-               next = pick_next_task(rq);
+               /*
+                * pick_next_task assumes pinned rq->lock.
+                */
+               lockdep_pin_lock(&rq->lock);
+               next = pick_next_task(rq, &fake_task);
                BUG_ON(!next);
                next->sched_class->put_prev_task(rq, next);
 
-               /* Find suitable destination for @next, with force if needed. */
-               dest_cpu = select_fallback_rq(dead_cpu, next);
+               /*
+                * Rules for changing task_struct::cpus_allowed are holding
+                * both pi_lock and rq->lock, such that holding either
+                * stabilizes the mask.
+                *
+                * Drop rq->lock is not quite as disastrous as it usually is
+                * because !cpu_active at this point, which means load-balance
+                * will not interfere. Also, stop-machine.
+                */
+               lockdep_unpin_lock(&rq->lock);
                raw_spin_unlock(&rq->lock);
+               raw_spin_lock(&next->pi_lock);
+               raw_spin_lock(&rq->lock);
 
-               __migrate_task(next, dead_cpu, dest_cpu);
+               /*
+                * Since we're inside stop-machine, _nothing_ should have
+                * changed the task, WARN if weird stuff happened, because in
+                * that case the above rq->lock drop is a fail too.
+                */
+               if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) {
+                       raw_spin_unlock(&next->pi_lock);
+                       continue;
+               }
 
-               raw_spin_lock(&rq->lock);
+               /* Find suitable destination for @next, with force if needed. */
+               dest_cpu = select_fallback_rq(dead_rq->cpu, next);
+
+               rq = __migrate_task(rq, next, dest_cpu);
+               if (rq != dead_rq) {
+                       raw_spin_unlock(&rq->lock);
+                       rq = dead_rq;
+                       raw_spin_lock(&rq->lock);
+               }
+               raw_spin_unlock(&next->pi_lock);
        }
 
        rq->stop = stop;
 }
-
 #endif /* CONFIG_HOTPLUG_CPU */
 
 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -5077,7 +5341,7 @@ set_table_entry(struct ctl_table *entry,
 static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
-       struct ctl_table *table = sd_alloc_ctl_entry(13);
+       struct ctl_table *table = sd_alloc_ctl_entry(14);
 
        if (table == NULL)
                return NULL;
@@ -5105,14 +5369,17 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
                sizeof(int), 0644, proc_dointvec_minmax, false);
        set_table_entry(&table[10], "flags", &sd->flags,
                sizeof(int), 0644, proc_dointvec_minmax, false);
-       set_table_entry(&table[11], "name", sd->name,
+       set_table_entry(&table[11], "max_newidle_lb_cost",
+               &sd->max_newidle_lb_cost,
+               sizeof(long), 0644, proc_doulongvec_minmax, false);
+       set_table_entry(&table[12], "name", sd->name,
                CORENAME_MAX_SIZE, 0444, proc_dostring, false);
-       /* &table[12] is terminator */
+       /* &table[13] is terminator */
 
        return table;
 }
 
-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
+static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
 {
        struct ctl_table *entry, *table;
        struct sched_domain *sd;
@@ -5165,8 +5432,7 @@ static void register_sched_domain_sysctl(void)
 /* may be called multiple times per register */
 static void unregister_sched_domain_sysctl(void)
 {
-       if (sd_sysctl_header)
-               unregister_sysctl_table(sd_sysctl_header);
+       unregister_sysctl_table(sd_sysctl_header);
        sd_sysctl_header = NULL;
        if (sd_ctl_dir[0].child)
                sd_free_ctl_entry(&sd_ctl_dir[0].child);
@@ -5178,7 +5444,7 @@ static void register_sched_domain_sysctl(void)
 static void unregister_sched_domain_sysctl(void)
 {
 }
-#endif
+#endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */
 
 static void set_rq_online(struct rq *rq)
 {
@@ -5214,7 +5480,7 @@ static void set_rq_offline(struct rq *rq)
  * migration_call - callback that gets triggered when a CPU is added.
  * Here we can start up the necessary migration thread for the new CPU.
  */
-static int __cpuinit
+static int
 migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
        int cpu = (long)hcpu;
@@ -5247,7 +5513,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                        set_rq_offline(rq);
                }
-               migrate_tasks(cpu);
+               migrate_tasks(rq);
                BUG_ON(rq->nr_running != 1); /* the migration thread */
                raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
@@ -5268,16 +5534,33 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
  * happens before everything else.  This has to be lower priority than
  * the notifier in the perf_event subsystem, though.
  */
-static struct notifier_block __cpuinitdata migration_notifier = {
+static struct notifier_block migration_notifier = {
        .notifier_call = migration_call,
        .priority = CPU_PRI_MIGRATION,
 };
 
-static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
+static void set_cpu_rq_start_time(void)
+{
+       int cpu = smp_processor_id();
+       struct rq *rq = cpu_rq(cpu);
+       rq->age_stamp = sched_clock_cpu(cpu);
+}
+
+static int sched_cpu_active(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_STARTING:
+               set_cpu_rq_start_time();
+               return NOTIFY_OK;
+       case CPU_ONLINE:
+               /*
+                * At this point a starting CPU has marked itself as online via
+                * set_cpu_online(). But it might not yet have marked itself
+                * as active, which is essential from here on.
+                *
+                * Thus, fall-through and help the starting CPU along.
+                */
        case CPU_DOWN_FAILED:
                set_cpu_active((long)hcpu, true);
                return NOTIFY_OK;
@@ -5286,7 +5569,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
        }
 }
 
-static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
+static int sched_cpu_inactive(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
        switch (action & ~CPU_TASKS_FROZEN) {
@@ -5316,9 +5599,6 @@ static int __init migration_init(void)
        return 0;
 }
 early_initcall(migration_init);
-#endif
-
-#ifdef CONFIG_SMP
 
 static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
 
@@ -5343,9 +5623,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                                  struct cpumask *groupmask)
 {
        struct sched_group *group = sd->groups;
-       char str[256];
 
-       cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
        cpumask_clear(groupmask);
 
        printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
@@ -5358,7 +5636,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                return -1;
        }
 
-       printk(KERN_CONT "span %s level %s\n", str, sd->name);
+       printk(KERN_CONT "span %*pbl level %s\n",
+              cpumask_pr_args(sched_domain_span(sd)), sd->name);
 
        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
                printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -5377,18 +5656,6 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               /*
-                * Even though we initialize ->power to something semi-sane,
-                * we leave power_orig unset. This allows us to detect if
-                * domain iteration is still funny without causing /0 traps.
-                */
-               if (!group->sgp->power_orig) {
-                       printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: domain->cpu_power not "
-                                       "set\n");
-                       break;
-               }
-
                if (!cpumask_weight(sched_group_cpus(group))) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: empty group\n");
@@ -5404,12 +5671,11 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
                cpumask_or(groupmask, groupmask, sched_group_cpus(group));
 
-               cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
-
-               printk(KERN_CONT " %s", str);
-               if (group->sgp->power != SCHED_POWER_SCALE) {
-                       printk(KERN_CONT " (cpu_power = %d)",
-                               group->sgp->power);
+               printk(KERN_CONT " %*pbl",
+                      cpumask_pr_args(sched_group_cpus(group)));
+               if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
+                       printk(KERN_CONT " (cpu_capacity = %d)",
+                               group->sgc->capacity);
                }
 
                group = group->next;
@@ -5467,8 +5733,9 @@ static int sd_degenerate(struct sched_domain *sd)
                         SD_BALANCE_NEWIDLE |
                         SD_BALANCE_FORK |
                         SD_BALANCE_EXEC |
-                        SD_SHARE_CPUPOWER |
-                        SD_SHARE_PKG_RESOURCES)) {
+                        SD_SHARE_CPUCAPACITY |
+                        SD_SHARE_PKG_RESOURCES |
+                        SD_SHARE_POWERDOMAIN)) {
                if (sd->groups != sd->groups->next)
                        return 0;
        }
@@ -5497,8 +5764,10 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
                                SD_BALANCE_NEWIDLE |
                                SD_BALANCE_FORK |
                                SD_BALANCE_EXEC |
-                               SD_SHARE_CPUPOWER |
-                               SD_SHARE_PKG_RESOURCES);
+                               SD_SHARE_CPUCAPACITY |
+                               SD_SHARE_PKG_RESOURCES |
+                               SD_PREFER_SIBLING |
+                               SD_SHARE_POWERDOMAIN);
                if (nr_node_ids == 1)
                        pflags &= ~SD_SERIALIZE;
        }
@@ -5513,6 +5782,8 @@ static void free_rootdomain(struct rcu_head *rcu)
        struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
 
        cpupri_cleanup(&rd->cpupri);
+       cpudl_cleanup(&rd->cpudl);
+       free_cpumask_var(rd->dlo_mask);
        free_cpumask_var(rd->rto_mask);
        free_cpumask_var(rd->online);
        free_cpumask_var(rd->span);
@@ -5535,7 +5806,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
                cpumask_clear_cpu(rq->cpu, old_rd->span);
 
                /*
-                * If we dont want to free the old_rt yet then
+                * If we dont want to free the old_rd yet then
                 * set old_rd to NULL to skip the freeing later
                 * in this function:
                 */
@@ -5564,8 +5835,14 @@ static int init_rootdomain(struct root_domain *rd)
                goto out;
        if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
                goto free_span;
-       if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+       if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
                goto free_online;
+       if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+               goto free_dlo_mask;
+
+       init_dl_bw(&rd->dl_bw);
+       if (cpudl_init(&rd->cpudl) != 0)
+               goto free_dlo_mask;
 
        if (cpupri_init(&rd->cpupri) != 0)
                goto free_rto_mask;
@@ -5573,6 +5850,8 @@ static int init_rootdomain(struct root_domain *rd)
 
 free_rto_mask:
        free_cpumask_var(rd->rto_mask);
+free_dlo_mask:
+       free_cpumask_var(rd->dlo_mask);
 free_online:
        free_cpumask_var(rd->online);
 free_span:
@@ -5610,7 +5889,7 @@ static struct root_domain *alloc_rootdomain(void)
        return rd;
 }
 
-static void free_sched_groups(struct sched_group *sg, int free_sgp)
+static void free_sched_groups(struct sched_group *sg, int free_sgc)
 {
        struct sched_group *tmp, *first;
 
@@ -5621,8 +5900,8 @@ static void free_sched_groups(struct sched_group *sg, int free_sgp)
        do {
                tmp = sg->next;
 
-               if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
-                       kfree(sg->sgp);
+               if (free_sgc && atomic_dec_and_test(&sg->sgc->ref))
+                       kfree(sg->sgc);
 
                kfree(sg);
                sg = tmp;
@@ -5640,7 +5919,7 @@ static void free_sched_domain(struct rcu_head *rcu)
        if (sd->flags & SD_OVERLAP) {
                free_sched_groups(sd->groups, 1);
        } else if (atomic_dec_and_test(&sd->groups->ref)) {
-               kfree(sd->groups->sgp);
+               kfree(sd->groups->sgc);
                kfree(sd->groups);
        }
        kfree(sd);
@@ -5667,19 +5946,36 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
  * two cpus are in the same cache domain, see cpus_share_cache().
  */
 DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+DEFINE_PER_CPU(int, sd_llc_size);
 DEFINE_PER_CPU(int, sd_llc_id);
+DEFINE_PER_CPU(struct sched_domain *, sd_numa);
+DEFINE_PER_CPU(struct sched_domain *, sd_busy);
+DEFINE_PER_CPU(struct sched_domain *, sd_asym);
 
 static void update_top_cache_domain(int cpu)
 {
        struct sched_domain *sd;
+       struct sched_domain *busy_sd = NULL;
        int id = cpu;
+       int size = 1;
 
        sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
-       if (sd)
+       if (sd) {
                id = cpumask_first(sched_domain_span(sd));
+               size = cpumask_weight(sched_domain_span(sd));
+               busy_sd = sd->parent; /* sd_busy */
+       }
+       rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
 
        rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
+       per_cpu(sd_llc_size, cpu) = size;
        per_cpu(sd_llc_id, cpu) = id;
+
+       sd = lowest_flag_domain(cpu, SD_NUMA);
+       rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
+
+       sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
+       rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
 }
 
 /*
@@ -5702,6 +5998,13 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
                        tmp->parent = parent->parent;
                        if (parent->parent)
                                parent->parent->child = tmp;
+                       /*
+                        * Transfer SD_PREFER_SIBLING down in case of a
+                        * degenerate parent; the spans match for this
+                        * so the property transfers.
+                        */
+                       if (parent->flags & SD_PREFER_SIBLING)
+                               tmp->flags |= SD_PREFER_SIBLING;
                        destroy_sched_domain(parent, cpu);
                } else
                        tmp = tmp->parent;
@@ -5725,9 +6028,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
        update_top_cache_domain(cpu);
 }
 
-/* cpus with isolated domains */
-static cpumask_var_t cpu_isolated_map;
-
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
 {
@@ -5738,17 +6038,6 @@ static int __init isolated_cpu_setup(char *str)
 
 __setup("isolcpus=", isolated_cpu_setup);
 
-static const struct cpumask *cpu_cpu_mask(int cpu)
-{
-       return cpumask_of_node(cpu_to_node(cpu));
-}
-
-struct sd_data {
-       struct sched_domain **__percpu sd;
-       struct sched_group **__percpu sg;
-       struct sched_group_power **__percpu sgp;
-};
-
 struct s_data {
        struct sched_domain ** __percpu sd;
        struct root_domain      *rd;
@@ -5761,21 +6050,6 @@ enum s_alloc {
        sa_none,
 };
 
-struct sched_domain_topology_level;
-
-typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
-typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
-
-#define SDTL_OVERLAP   0x01
-
-struct sched_domain_topology_level {
-       sched_domain_init_f init;
-       sched_domain_mask_f mask;
-       int                 flags;
-       int                 numa_level;
-       struct sd_data      data;
-};
-
 /*
  * Build an iteration mask that can exclude certain CPUs from the upwards
  * domain traversal.
@@ -5821,7 +6095,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
        const struct cpumask *span = sched_domain_span(sd);
        struct cpumask *covered = sched_domains_tmpmask;
        struct sd_data *sdd = sd->private;
-       struct sched_domain *child;
+       struct sched_domain *sibling;
        int i;
 
        cpumask_clear(covered);
@@ -5832,10 +6106,10 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                if (cpumask_test_cpu(i, covered))
                        continue;
 
-               child = *per_cpu_ptr(sdd->sd, i);
+               sibling = *per_cpu_ptr(sdd->sd, i);
 
                /* See the comment near build_group_mask(). */
-               if (!cpumask_test_cpu(i, sched_domain_span(child)))
+               if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
                        continue;
 
                sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
@@ -5845,24 +6119,23 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
                        goto fail;
 
                sg_span = sched_group_cpus(sg);
-               if (child->child) {
-                       child = child->child;
-                       cpumask_copy(sg_span, sched_domain_span(child));
-               } else
+               if (sibling->child)
+                       cpumask_copy(sg_span, sched_domain_span(sibling->child));
+               else
                        cpumask_set_cpu(i, sg_span);
 
                cpumask_or(covered, covered, sg_span);
 
-               sg->sgp = *per_cpu_ptr(sdd->sgp, i);
-               if (atomic_inc_return(&sg->sgp->ref) == 1)
+               sg->sgc = *per_cpu_ptr(sdd->sgc, i);
+               if (atomic_inc_return(&sg->sgc->ref) == 1)
                        build_group_mask(sd, sg);
 
                /*
-                * Initialize sgp->power such that even if we mess up the
+                * Initialize sgc->capacity such that even if we mess up the
                 * domains and no possible iteration will get us here, we won't
                 * die on a /0 trap.
                 */
-               sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
+               sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
 
                /*
                 * Make sure the first group of this domain contains the
@@ -5900,8 +6173,8 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
 
        if (sg) {
                *sg = *per_cpu_ptr(sdd->sg, cpu);
-               (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
-               atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+               (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
+               atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */
        }
 
        return cpu;
@@ -5910,7 +6183,7 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
 /*
  * build_sched_groups will build a circular linked list of the groups
  * covered by the given span, and will set each group's ->cpumask correctly,
- * and ->cpu_power to 0.
+ * and ->cpu_capacity to 0.
  *
  * Assumes the sched_domain tree is fully constructed
  */
@@ -5926,7 +6199,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
        get_group(cpu, sdd, &sd->groups);
        atomic_inc(&sd->groups->ref);
 
-       if (cpu != cpumask_first(sched_domain_span(sd)))
+       if (cpu != cpumask_first(span))
                return 0;
 
        lockdep_assert_held(&sched_domains_mutex);
@@ -5936,14 +6209,12 @@ build_sched_groups(struct sched_domain *sd, int cpu)
 
        for_each_cpu(i, span) {
                struct sched_group *sg;
-               int group = get_group(i, sdd, &sg);
-               int j;
+               int group, j;
 
                if (cpumask_test_cpu(i, covered))
                        continue;
 
-               cpumask_clear(sched_group_cpus(sg));
-               sg->sgp->power = 0;
+               group = get_group(i, sdd, &sg);
                cpumask_setall(sched_group_mask(sg));
 
                for_each_cpu(j, span) {
@@ -5966,20 +6237,20 @@ build_sched_groups(struct sched_domain *sd, int cpu)
 }
 
 /*
- * Initialize sched groups cpu_power.
+ * Initialize sched groups cpu_capacity.
  *
- * cpu_power indicates the capacity of sched group, which is used while
+ * cpu_capacity indicates the capacity of sched group, which is used while
  * distributing the load between different sched groups in a sched domain.
- * Typically cpu_power for all the groups in a sched domain will be same unless
- * there are asymmetries in the topology. If there are asymmetries, group
- * having more cpu_power will pickup more load compared to the group having
- * less cpu_power.
+ * Typically cpu_capacity for all the groups in a sched domain will be same
+ * unless there are asymmetries in the topology. If there are asymmetries,
+ * group having more cpu_capacity will pickup more load compared to the
+ * group having less cpu_capacity.
  */
-static void init_sched_groups_power(int cpu, struct sched_domain *sd)
+static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 {
        struct sched_group *sg = sd->groups;
 
-       WARN_ON(!sd || !sg);
+       WARN_ON(!sg);
 
        do {
                sg->group_weight = cpumask_weight(sched_group_cpus(sg));
@@ -5989,13 +6260,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
        if (cpu != group_balance_cpu(sg))
                return;
 
-       update_group_power(sd, cpu);
-       atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
-}
-
-int __weak arch_sd_sibling_asym_packing(void)
-{
-       return 0*SD_ASYM_PACKING;
+       update_group_capacity(sd, cpu);
+       atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
 }
 
 /*
@@ -6003,34 +6269,6 @@ int __weak arch_sd_sibling_asym_packing(void)
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
  */
 
-#ifdef CONFIG_SCHED_DEBUG
-# define SD_INIT_NAME(sd, type)                sd->name = #type
-#else
-# define SD_INIT_NAME(sd, type)                do { } while (0)
-#endif
-
-#define SD_INIT_FUNC(type)                                             \
-static noinline struct sched_domain *                                  \
-sd_init_##type(struct sched_domain_topology_level *tl, int cpu)        \
-{                                                                      \
-       struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);       \
-       *sd = SD_##type##_INIT;                                         \
-       SD_INIT_NAME(sd, type);                                         \
-       sd->private = &tl->data;                                        \
-       return sd;                                                      \
-}
-
-SD_INIT_FUNC(CPU)
-#ifdef CONFIG_SCHED_SMT
- SD_INIT_FUNC(SIBLING)
-#endif
-#ifdef CONFIG_SCHED_MC
- SD_INIT_FUNC(MC)
-#endif
-#ifdef CONFIG_SCHED_BOOK
- SD_INIT_FUNC(BOOK)
-#endif
-
 static int default_relax_domain_level = -1;
 int sched_domain_level_max;
 
@@ -6114,97 +6352,161 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
        if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
                *per_cpu_ptr(sdd->sg, cpu) = NULL;
 
-       if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
-               *per_cpu_ptr(sdd->sgp, cpu) = NULL;
-}
-
-#ifdef CONFIG_SCHED_SMT
-static const struct cpumask *cpu_smt_mask(int cpu)
-{
-       return topology_thread_cpumask(cpu);
+       if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref))
+               *per_cpu_ptr(sdd->sgc, cpu) = NULL;
 }
-#endif
-
-/*
- * Topology list, bottom-up.
- */
-static struct sched_domain_topology_level default_topology[] = {
-#ifdef CONFIG_SCHED_SMT
-       { sd_init_SIBLING, cpu_smt_mask, },
-#endif
-#ifdef CONFIG_SCHED_MC
-       { sd_init_MC, cpu_coregroup_mask, },
-#endif
-#ifdef CONFIG_SCHED_BOOK
-       { sd_init_BOOK, cpu_book_mask, },
-#endif
-       { sd_init_CPU, cpu_cpu_mask, },
-       { NULL, },
-};
-
-static struct sched_domain_topology_level *sched_domain_topology = default_topology;
 
 #ifdef CONFIG_NUMA
-
 static int sched_domains_numa_levels;
+enum numa_topology_type sched_numa_topology_type;
 static int *sched_domains_numa_distance;
+int sched_max_numa_distance;
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
+#endif
 
-static inline int sd_local_flags(int level)
-{
-       if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
-               return 0;
-
-       return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
-}
+/*
+ * SD_flags allowed in topology descriptions.
+ *
+ * SD_SHARE_CPUCAPACITY      - describes SMT topologies
+ * SD_SHARE_PKG_RESOURCES - describes shared caches
+ * SD_NUMA                - describes NUMA topologies
+ * SD_SHARE_POWERDOMAIN   - describes shared power domain
+ *
+ * Odd one out:
+ * SD_ASYM_PACKING        - describes SMT quirks
+ */
+#define TOPOLOGY_SD_FLAGS              \
+       (SD_SHARE_CPUCAPACITY |         \
+        SD_SHARE_PKG_RESOURCES |       \
+        SD_NUMA |                      \
+        SD_ASYM_PACKING |              \
+        SD_SHARE_POWERDOMAIN)
 
 static struct sched_domain *
-sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
+sd_init(struct sched_domain_topology_level *tl, int cpu)
 {
        struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
-       int level = tl->numa_level;
-       int sd_weight = cpumask_weight(
-                       sched_domains_numa_masks[level][cpu_to_node(cpu)]);
+       int sd_weight, sd_flags = 0;
+
+#ifdef CONFIG_NUMA
+       /*
+        * Ugly hack to pass state to sd_numa_mask()...
+        */
+       sched_domains_curr_level = tl->numa_level;
+#endif
+
+       sd_weight = cpumask_weight(tl->mask(cpu));
+
+       if (tl->sd_flags)
+               sd_flags = (*tl->sd_flags)();
+       if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS,
+                       "wrong sd_flags in topology description\n"))
+               sd_flags &= ~TOPOLOGY_SD_FLAGS;
 
        *sd = (struct sched_domain){
                .min_interval           = sd_weight,
                .max_interval           = 2*sd_weight,
                .busy_factor            = 32,
                .imbalance_pct          = 125,
-               .cache_nice_tries       = 2,
-               .busy_idx               = 3,
-               .idle_idx               = 2,
+
+               .cache_nice_tries       = 0,
+               .busy_idx               = 0,
+               .idle_idx               = 0,
                .newidle_idx            = 0,
                .wake_idx               = 0,
                .forkexec_idx           = 0,
 
                .flags                  = 1*SD_LOAD_BALANCE
                                        | 1*SD_BALANCE_NEWIDLE
-                                       | 0*SD_BALANCE_EXEC
-                                       | 0*SD_BALANCE_FORK
+                                       | 1*SD_BALANCE_EXEC
+                                       | 1*SD_BALANCE_FORK
                                        | 0*SD_BALANCE_WAKE
-                                       | 0*SD_WAKE_AFFINE
-                                       | 0*SD_SHARE_CPUPOWER
+                                       | 1*SD_WAKE_AFFINE
+                                       | 0*SD_SHARE_CPUCAPACITY
                                        | 0*SD_SHARE_PKG_RESOURCES
-                                       | 1*SD_SERIALIZE
+                                       | 0*SD_SERIALIZE
                                        | 0*SD_PREFER_SIBLING
-                                       | sd_local_flags(level)
+                                       | 0*SD_NUMA
+                                       | sd_flags
                                        ,
+
                .last_balance           = jiffies,
                .balance_interval       = sd_weight,
+               .smt_gain               = 0,
+               .max_newidle_lb_cost    = 0,
+               .next_decay_max_lb_cost = jiffies,
+#ifdef CONFIG_SCHED_DEBUG
+               .name                   = tl->name,
+#endif
        };
-       SD_INIT_NAME(sd, NUMA);
+
+       /*
+        * Convert topological properties into behaviour.
+        */
+
+       if (sd->flags & SD_SHARE_CPUCAPACITY) {
+               sd->flags |= SD_PREFER_SIBLING;
+               sd->imbalance_pct = 110;
+               sd->smt_gain = 1178; /* ~15% */
+
+       } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
+               sd->imbalance_pct = 117;
+               sd->cache_nice_tries = 1;
+               sd->busy_idx = 2;
+
+#ifdef CONFIG_NUMA
+       } else if (sd->flags & SD_NUMA) {
+               sd->cache_nice_tries = 2;
+               sd->busy_idx = 3;
+               sd->idle_idx = 2;
+
+               sd->flags |= SD_SERIALIZE;
+               if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) {
+                       sd->flags &= ~(SD_BALANCE_EXEC |
+                                      SD_BALANCE_FORK |
+                                      SD_WAKE_AFFINE);
+               }
+
+#endif
+       } else {
+               sd->flags |= SD_PREFER_SIBLING;
+               sd->cache_nice_tries = 1;
+               sd->busy_idx = 2;
+               sd->idle_idx = 1;
+       }
+
        sd->private = &tl->data;
 
-       /*
-        * Ugly hack to pass state to sd_numa_mask()...
-        */
-       sched_domains_curr_level = tl->numa_level;
+       return sd;
+}
+
+/*
+ * Topology list, bottom-up.
+ */
+static struct sched_domain_topology_level default_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+       { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+       { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+#endif
+       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+       { NULL, },
+};
+
+struct sched_domain_topology_level *sched_domain_topology = default_topology;
 
-       return sd;
+#define for_each_sd_topology(tl)                       \
+       for (tl = sched_domain_topology; tl->mask; tl++)
+
+void set_sched_topology(struct sched_domain_topology_level *tl)
+{
+       sched_domain_topology = tl;
 }
 
+#ifdef CONFIG_NUMA
+
 static const struct cpumask *sd_numa_mask(int cpu)
 {
        return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
@@ -6231,7 +6533,7 @@ static void sched_numa_warn(const char *str)
        printk(KERN_WARNING "\n");
 }
 
-static bool find_numa_distance(int distance)
+bool find_numa_distance(int distance)
 {
        int i;
 
@@ -6246,6 +6548,58 @@ static bool find_numa_distance(int distance)
        return false;
 }
 
+/*
+ * A system can have three types of NUMA topology:
+ * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system
+ * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes
+ * NUMA_BACKPLANE: nodes can reach other nodes through a backplane
+ *
+ * The difference between a glueless mesh topology and a backplane
+ * topology lies in whether communication between not directly
+ * connected nodes goes through intermediary nodes (where programs
+ * could run), or through backplane controllers. This affects
+ * placement of programs.
+ *
+ * The type of topology can be discerned with the following tests:
+ * - If the maximum distance between any nodes is 1 hop, the system
+ *   is directly connected.
+ * - If for two nodes A and B, located N > 1 hops away from each other,
+ *   there is an intermediary node C, which is < N hops away from both
+ *   nodes A and B, the system is a glueless mesh.
+ */
+static void init_numa_topology_type(void)
+{
+       int a, b, c, n;
+
+       n = sched_max_numa_distance;
+
+       if (sched_domains_numa_levels <= 1) {
+               sched_numa_topology_type = NUMA_DIRECT;
+               return;
+       }
+
+       for_each_online_node(a) {
+               for_each_online_node(b) {
+                       /* Find two nodes furthest removed from each other. */
+                       if (node_distance(a, b) < n)
+                               continue;
+
+                       /* Is there an intermediary node between a and b? */
+                       for_each_online_node(c) {
+                               if (node_distance(a, c) < n &&
+                                   node_distance(b, c) < n) {
+                                       sched_numa_topology_type =
+                                                       NUMA_GLUELESS_MESH;
+                                       return;
+                               }
+                       }
+
+                       sched_numa_topology_type = NUMA_BACKPLANE;
+                       return;
+               }
+       }
+}
+
 static void sched_init_numa(void)
 {
        int next_distance, curr_distance = node_distance(0, 0);
@@ -6299,6 +6653,10 @@ static void sched_init_numa(void)
                if (!sched_debug())
                        break;
        }
+
+       if (!level)
+               return;
+
        /*
         * 'level' contains the number of unique distances, excluding the
         * identity distance node_distance(i,i).
@@ -6348,7 +6706,10 @@ static void sched_init_numa(void)
                }
        }
 
-       tl = kzalloc((ARRAY_SIZE(default_topology) + level) *
+       /* Compute default topology size */
+       for (i = 0; sched_domain_topology[i].mask; i++);
+
+       tl = kzalloc((i + level + 1) *
                        sizeof(struct sched_domain_topology_level), GFP_KERNEL);
        if (!tl)
                return;
@@ -6356,24 +6717,28 @@ static void sched_init_numa(void)
        /*
         * Copy the default topology bits..
         */
-       for (i = 0; default_topology[i].init; i++)
-               tl[i] = default_topology[i];
+       for (i = 0; sched_domain_topology[i].mask; i++)
+               tl[i] = sched_domain_topology[i];
 
        /*
         * .. and append 'j' levels of NUMA goodness.
         */
        for (j = 0; j < level; i++, j++) {
                tl[i] = (struct sched_domain_topology_level){
-                       .init = sd_numa_init,
                        .mask = sd_numa_mask,
+                       .sd_flags = cpu_numa_flags,
                        .flags = SDTL_OVERLAP,
                        .numa_level = j,
+                       SD_INIT_NAME(NUMA)
                };
        }
 
        sched_domain_topology = tl;
 
        sched_domains_numa_levels = level;
+       sched_max_numa_distance = sched_domains_numa_distance[level - 1];
+
+       init_numa_topology_type();
 }
 
 static void sched_domains_numa_masks_set(int cpu)
@@ -6441,7 +6806,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
        struct sched_domain_topology_level *tl;
        int j;
 
-       for (tl = sched_domain_topology; tl->init; tl++) {
+       for_each_sd_topology(tl) {
                struct sd_data *sdd = &tl->data;
 
                sdd->sd = alloc_percpu(struct sched_domain *);
@@ -6452,16 +6817,16 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                if (!sdd->sg)
                        return -ENOMEM;
 
-               sdd->sgp = alloc_percpu(struct sched_group_power *);
-               if (!sdd->sgp)
+               sdd->sgc = alloc_percpu(struct sched_group_capacity *);
+               if (!sdd->sgc)
                        return -ENOMEM;
 
                for_each_cpu(j, cpu_map) {
                        struct sched_domain *sd;
                        struct sched_group *sg;
-                       struct sched_group_power *sgp;
+                       struct sched_group_capacity *sgc;
 
-                       sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
+                       sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
                        if (!sd)
                                return -ENOMEM;
@@ -6477,12 +6842,12 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
 
                        *per_cpu_ptr(sdd->sg, j) = sg;
 
-                       sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
+                       sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(),
                                        GFP_KERNEL, cpu_to_node(j));
-                       if (!sgp)
+                       if (!sgc)
                                return -ENOMEM;
 
-                       *per_cpu_ptr(sdd->sgp, j) = sgp;
+                       *per_cpu_ptr(sdd->sgc, j) = sgc;
                }
        }
 
@@ -6494,7 +6859,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
        struct sched_domain_topology_level *tl;
        int j;
 
-       for (tl = sched_domain_topology; tl->init; tl++) {
+       for_each_sd_topology(tl) {
                struct sd_data *sdd = &tl->data;
 
                for_each_cpu(j, cpu_map) {
@@ -6509,24 +6874,23 @@ static void __sdt_free(const struct cpumask *cpu_map)
 
                        if (sdd->sg)
                                kfree(*per_cpu_ptr(sdd->sg, j));
-                       if (sdd->sgp)
-                               kfree(*per_cpu_ptr(sdd->sgp, j));
+                       if (sdd->sgc)
+                               kfree(*per_cpu_ptr(sdd->sgc, j));
                }
                free_percpu(sdd->sd);
                sdd->sd = NULL;
                free_percpu(sdd->sg);
                sdd->sg = NULL;
-               free_percpu(sdd->sgp);
-               sdd->sgp = NULL;
+               free_percpu(sdd->sgc);
+               sdd->sgc = NULL;
        }
 }
 
 struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
-               struct s_data *d, const struct cpumask *cpu_map,
-               struct sched_domain_attr *attr, struct sched_domain *child,
-               int cpu)
+               const struct cpumask *cpu_map, struct sched_domain_attr *attr,
+               struct sched_domain *child, int cpu)
 {
-       struct sched_domain *sd = tl->init(tl, cpu);
+       struct sched_domain *sd = sd_init(tl, cpu);
        if (!sd)
                return child;
 
@@ -6535,8 +6899,22 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
                sd->level = child->level + 1;
                sched_domain_level_max = max(sched_domain_level_max, sd->level);
                child->parent = sd;
+               sd->child = child;
+
+               if (!cpumask_subset(sched_domain_span(child),
+                                   sched_domain_span(sd))) {
+                       pr_err("BUG: arch topology borken\n");
+#ifdef CONFIG_SCHED_DEBUG
+                       pr_err("     the %s domain not a subset of the %s domain\n",
+                                       child->name, sd->name);
+#endif
+                       /* Fixup, ensure @sd has at least @child cpus. */
+                       cpumask_or(sched_domain_span(sd),
+                                  sched_domain_span(sd),
+                                  sched_domain_span(child));
+               }
+
        }
-       sd->child = child;
        set_domain_attribute(sd, attr);
 
        return sd;
@@ -6549,7 +6927,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
 static int build_sched_domains(const struct cpumask *cpu_map,
                               struct sched_domain_attr *attr)
 {
-       enum s_alloc alloc_state = sa_none;
+       enum s_alloc alloc_state;
        struct sched_domain *sd;
        struct s_data d;
        int i, ret = -ENOMEM;
@@ -6563,18 +6941,15 @@ static int build_sched_domains(const struct cpumask *cpu_map,
                struct sched_domain_topology_level *tl;
 
                sd = NULL;
-               for (tl = sched_domain_topology; tl->init; tl++) {
-                       sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
+               for_each_sd_topology(tl) {
+                       sd = build_sched_domain(tl, cpu_map, attr, sd, i);
+                       if (tl == sched_domain_topology)
+                               *per_cpu_ptr(d.sd, i) = sd;
                        if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
                                sd->flags |= SD_OVERLAP;
                        if (cpumask_equal(cpu_map, sched_domain_span(sd)))
                                break;
                }
-
-               while (sd->child)
-                       sd = sd->child;
-
-               *per_cpu_ptr(d.sd, i) = sd;
        }
 
        /* Build the groups for the domains */
@@ -6591,14 +6966,14 @@ static int build_sched_domains(const struct cpumask *cpu_map,
                }
        }
 
-       /* Calculate CPU power for physical packages and nodes */
+       /* Calculate CPU capacity for physical packages and nodes */
        for (i = nr_cpumask_bits-1; i >= 0; i--) {
                if (!cpumask_test_cpu(i, cpu_map))
                        continue;
 
                for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
                        claim_allocations(i, sd);
-                       init_sched_groups_power(i, sd);
+                       init_sched_groups_capacity(i, sd);
                }
        }
 
@@ -6633,7 +7008,7 @@ static cpumask_var_t fallback_doms;
  * cpu core maps. It is supposed to return 1 if the topology changed
  * or 0 if it stayed the same.
  */
-int __attribute__((weak)) arch_update_cpu_topology(void)
+int __weak arch_update_cpu_topology(void)
 {
        return 0;
 }
@@ -6769,8 +7144,9 @@ match1:
                ;
        }
 
+       n = ndoms_cur;
        if (doms_new == NULL) {
-               ndoms_cur = 0;
+               n = 0;
                doms_new = &fallback_doms;
                cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
                WARN_ON_ONCE(dattr_new);
@@ -6778,7 +7154,7 @@ match1:
 
        /* Build new domains */
        for (i = 0; i < ndoms_new; i++) {
-               for (j = 0; j < ndoms_cur && !new_topology; j++) {
+               for (j = 0; j < n && !new_topology; j++) {
                        if (cpumask_equal(doms_new[i], doms_cur[j])
                            && dattrs_equal(dattr_new, i, dattr_cur, j))
                                goto match2;
@@ -6838,7 +7214,6 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
                 */
 
        case CPU_ONLINE:
-       case CPU_DOWN_FAILED:
                cpuset_update_active_cpus(true);
                break;
        default:
@@ -6850,8 +7225,26 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
                               void *hcpu)
 {
+       unsigned long flags;
+       long cpu = (long)hcpu;
+       struct dl_bw *dl_b;
+       bool overflow;
+       int cpus;
+
        switch (action) {
        case CPU_DOWN_PREPARE:
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, 0);
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
+
+               if (overflow)
+                       return notifier_from_errno(-EBUSY);
                cpuset_update_active_cpus(false);
                break;
        case CPU_DOWN_PREPARE_FROZEN:
@@ -6873,22 +7266,22 @@ void __init sched_init_smp(void)
 
        sched_init_numa();
 
-       get_online_cpus();
+       /*
+        * There's no userspace yet to cause hotplug operations; hence all the
+        * cpu masks are stable and all blatant races in the below code cannot
+        * happen.
+        */
        mutex_lock(&sched_domains_mutex);
        init_sched_domains(cpu_active_mask);
        cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
        if (cpumask_empty(non_isolated_cpus))
                cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
        mutex_unlock(&sched_domains_mutex);
-       put_online_cpus();
 
        hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
        hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
        hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
 
-       /* RT runtime code needs to handle some hotplug events */
-       hotcpu_notifier(update_runtime, 0);
-
        init_hrtick();
 
        /* Move init over to a non-isolated CPU */
@@ -6898,6 +7291,7 @@ void __init sched_init_smp(void)
        free_cpumask_var(non_isolated_cpus);
 
        init_sched_rt_class();
+       init_sched_dl_class();
 }
 #else
 void __init sched_init_smp(void)
@@ -6906,8 +7300,6 @@ void __init sched_init_smp(void)
 }
 #endif /* CONFIG_SMP */
 
-const_debug unsigned int sysctl_timer_migration = 1;
-
 int in_sched_functions(unsigned long addr)
 {
        return in_lock_functions(addr) ||
@@ -6936,9 +7328,6 @@ void __init sched_init(void)
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
        alloc_size += 2 * nr_cpu_ids * sizeof(void **);
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       alloc_size += num_possible_cpus() * cpumask_size();
 #endif
        if (alloc_size) {
                ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -6959,21 +7348,23 @@ void __init sched_init(void)
                ptr += nr_cpu_ids * sizeof(void **);
 
 #endif /* CONFIG_RT_GROUP_SCHED */
+       }
 #ifdef CONFIG_CPUMASK_OFFSTACK
-               for_each_possible_cpu(i) {
-                       per_cpu(load_balance_mask, i) = (void *)ptr;
-                       ptr += cpumask_size();
-               }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+       for_each_possible_cpu(i) {
+               per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+                       cpumask_size(), GFP_KERNEL, cpu_to_node(i));
        }
+#endif /* CONFIG_CPUMASK_OFFSTACK */
+
+       init_rt_bandwidth(&def_rt_bandwidth,
+                       global_rt_period(), global_rt_runtime());
+       init_dl_bandwidth(&def_dl_bandwidth,
+                       global_rt_period(), global_rt_runtime());
 
 #ifdef CONFIG_SMP
        init_defrootdomain();
 #endif
 
-       init_rt_bandwidth(&def_rt_bandwidth,
-                       global_rt_period(), global_rt_runtime());
-
 #ifdef CONFIG_RT_GROUP_SCHED
        init_rt_bandwidth(&root_task_group.rt_bandwidth,
                        global_rt_period(), global_rt_runtime());
@@ -6996,7 +7387,8 @@ void __init sched_init(void)
                rq->calc_load_active = 0;
                rq->calc_load_update = jiffies + LOAD_FREQ;
                init_cfs_rq(&rq->cfs);
-               init_rt_rq(&rq->rt, rq);
+               init_rt_rq(&rq->rt);
+               init_dl_rq(&rq->dl);
 #ifdef CONFIG_FAIR_GROUP_SCHED
                root_task_group.shares = ROOT_TASK_GROUP_LOAD;
                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
@@ -7025,7 +7417,6 @@ void __init sched_init(void)
 
                rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
 #ifdef CONFIG_RT_GROUP_SCHED
-               INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
 #endif
 
@@ -7037,8 +7428,8 @@ void __init sched_init(void)
 #ifdef CONFIG_SMP
                rq->sd = NULL;
                rq->rd = NULL;
-               rq->cpu_power = SCHED_POWER_SCALE;
-               rq->post_schedule = 0;
+               rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
+               rq->balance_callback = NULL;
                rq->active_balance = 0;
                rq->next_balance = jiffies;
                rq->push_cpu = 0;
@@ -7046,6 +7437,7 @@ void __init sched_init(void)
                rq->online = 0;
                rq->idle_stamp = 0;
                rq->avg_idle = 2*sysctl_sched_migration_cost;
+               rq->max_idle_balance_cost = sysctl_sched_migration_cost;
 
                INIT_LIST_HEAD(&rq->cfs_tasks);
 
@@ -7067,16 +7459,17 @@ void __init sched_init(void)
        INIT_HLIST_HEAD(&init_task.preempt_notifiers);
 #endif
 
-#ifdef CONFIG_RT_MUTEXES
-       plist_head_init(&init_task.pi_waiters);
-#endif
-
        /*
         * The boot idle thread does lazy MMU switching as well:
         */
        atomic_inc(&init_mm.mm_count);
        enter_lazy_tlb(&init_mm, current);
 
+       /*
+        * During early bootup we pretend to be a normal task:
+        */
+       current->sched_class = &fair_sched_class;
+
        /*
         * Make us the idle thread. Technically, schedule() should not be
         * called from this thread, however somewhere below it might be,
@@ -7087,17 +7480,13 @@ void __init sched_init(void)
 
        calc_load_update = jiffies + LOAD_FREQ;
 
-       /*
-        * During early bootup we pretend to be a normal task:
-        */
-       current->sched_class = &fair_sched_class;
-
 #ifdef CONFIG_SMP
        zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
        /* May be allocated at isolcpus cmdline parse time */
        if (cpu_isolated_map == NULL)
                zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
        idle_thread_set_boot_cpu();
+       set_cpu_rq_start_time();
 #endif
        init_sched_fair_class();
 
@@ -7113,11 +7502,30 @@ static inline int preempt_count_equals(int preempt_offset)
 }
 
 void __might_sleep(const char *file, int line, int preempt_offset)
+{
+       /*
+        * Blocking primitives will set (and therefore destroy) current->state,
+        * since we will exit with TASK_RUNNING make sure we enter with it,
+        * otherwise we will destroy state.
+        */
+       WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
+                       "do not call blocking ops when !TASK_RUNNING; "
+                       "state=%lx set at [<%p>] %pS\n",
+                       current->state,
+                       (void *)current->task_state_change,
+                       (void *)current->task_state_change);
+
+       ___might_sleep(file, line, preempt_offset);
+}
+EXPORT_SYMBOL(__might_sleep);
+
+void ___might_sleep(const char *file, int line, int preempt_offset)
 {
        static unsigned long prev_jiffy;        /* ratelimiting */
 
        rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
-       if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
+       if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
+            !is_idle_task(current)) ||
            system_state != SYSTEM_RUNNING || oops_in_progress)
                return;
        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
@@ -7132,45 +7540,38 @@ void __might_sleep(const char *file, int line, int preempt_offset)
                        in_atomic(), irqs_disabled(),
                        current->pid, current->comm);
 
+       if (task_stack_end_corrupted(current))
+               printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
+
        debug_show_held_locks(current);
        if (irqs_disabled())
                print_irqtrace_events(current);
+#ifdef CONFIG_DEBUG_PREEMPT
+       if (!preempt_count_equals(preempt_offset)) {
+               pr_err("Preemption disabled at:");
+               print_ip_sym(current->preempt_disable_ip);
+               pr_cont("\n");
+       }
+#endif
        dump_stack();
 }
-EXPORT_SYMBOL(__might_sleep);
+EXPORT_SYMBOL(___might_sleep);
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static void normalize_task(struct rq *rq, struct task_struct *p)
-{
-       const struct sched_class *prev_class = p->sched_class;
-       int old_prio = p->prio;
-       int on_rq;
-
-       on_rq = p->on_rq;
-       if (on_rq)
-               dequeue_task(rq, p, 0);
-       __setscheduler(rq, p, SCHED_NORMAL, 0);
-       if (on_rq) {
-               enqueue_task(rq, p, 0);
-               resched_task(rq->curr);
-       }
-
-       check_class_changed(rq, p, prev_class, old_prio);
-}
-
 void normalize_rt_tasks(void)
 {
        struct task_struct *g, *p;
-       unsigned long flags;
-       struct rq *rq;
+       struct sched_attr attr = {
+               .sched_policy = SCHED_NORMAL,
+       };
 
-       read_lock_irqsave(&tasklist_lock, flags);
-       do_each_thread(g, p) {
+       read_lock(&tasklist_lock);
+       for_each_process_thread(g, p) {
                /*
                 * Only normalize user tasks:
                 */
-               if (!p->mm)
+               if (p->flags & PF_KTHREAD)
                        continue;
 
                p->se.exec_start                = 0;
@@ -7180,26 +7581,19 @@ void normalize_rt_tasks(void)
                p->se.statistics.block_start    = 0;
 #endif
 
-               if (!rt_task(p)) {
+               if (!dl_task(p) && !rt_task(p)) {
                        /*
                         * Renice negative nice level userspace
                         * tasks back to 0:
                         */
-                       if (TASK_NICE(p) < 0 && p->mm)
+                       if (task_nice(p) < 0)
                                set_user_nice(p, 0);
                        continue;
                }
 
-               raw_spin_lock(&p->pi_lock);
-               rq = __task_rq_lock(p);
-
-               normalize_task(rq, p);
-
-               __task_rq_unlock(rq);
-               raw_spin_unlock(&p->pi_lock);
-       } while_each_thread(g, p);
-
-       read_unlock_irqrestore(&tasklist_lock, flags);
+               __sched_setscheduler(p, &attr, false, false);
+       }
+       read_unlock(&tasklist_lock);
 }
 
 #endif /* CONFIG_MAGIC_SYSRQ */
@@ -7220,6 +7614,8 @@ void normalize_rt_tasks(void)
  * @cpu: the processor in question.
  *
  * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
  */
 struct task_struct *curr_task(int cpu)
 {
@@ -7337,52 +7733,46 @@ void sched_offline_group(struct task_group *tg)
 void sched_move_task(struct task_struct *tsk)
 {
        struct task_group *tg;
-       int on_rq, running;
+       int queued, running;
        unsigned long flags;
        struct rq *rq;
 
        rq = task_rq_lock(tsk, &flags);
 
        running = task_current(rq, tsk);
-       on_rq = tsk->on_rq;
+       queued = task_on_rq_queued(tsk);
 
-       if (on_rq)
+       if (queued)
                dequeue_task(rq, tsk, 0);
        if (unlikely(running))
-               tsk->sched_class->put_prev_task(rq, tsk);
+               put_prev_task(rq, tsk);
 
-       tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
-                               lockdep_is_held(&tsk->sighand->siglock)),
+       /*
+        * All callers are synchronized by task_rq_lock(); we do not use RCU
+        * which is pointless here. Thus, we pass "true" to task_css_check()
+        * to prevent lockdep warnings.
+        */
+       tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
                          struct task_group, css);
        tg = autogroup_task_group(tsk, tg);
        tsk->sched_task_group = tg;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        if (tsk->sched_class->task_move_group)
-               tsk->sched_class->task_move_group(tsk, on_rq);
+               tsk->sched_class->task_move_group(tsk, queued);
        else
 #endif
                set_task_rq(tsk, task_cpu(tsk));
 
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
-       if (on_rq)
+       if (queued)
                enqueue_task(rq, tsk, 0);
 
        task_rq_unlock(rq, tsk, &flags);
 }
 #endif /* CONFIG_CGROUP_SCHED */
 
-#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
-static unsigned long to_ratio(u64 period, u64 runtime)
-{
-       if (runtime == RUNTIME_INF)
-               return 1ULL << 20;
-
-       return div64_u64(runtime << 20, period);
-}
-#endif
-
 #ifdef CONFIG_RT_GROUP_SCHED
 /*
  * Ensure that the real time constraints are schedulable.
@@ -7394,10 +7784,16 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
 {
        struct task_struct *g, *p;
 
-       do_each_thread(g, p) {
-               if (rt_task(p) && task_rq(p)->rt.tg == tg)
+       /*
+        * Autogroups do not have RT tasks; see autogroup_create().
+        */
+       if (task_group_is_autogroup(tg))
+               return 0;
+
+       for_each_process_thread(g, p) {
+               if (rt_task(p) && task_group(p) == tg)
                        return 1;
-       } while_each_thread(g, p);
+       }
 
        return 0;
 }
@@ -7486,6 +7882,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
 {
        int i, err = 0;
 
+       /*
+        * Disallowing the root group RT runtime is BAD, it would disallow the
+        * kernel creating (and or operating) RT threads.
+        */
+       if (tg == &root_task_group && rt_runtime == 0)
+               return -EINVAL;
+
+       /* No period doesn't make any sense. */
+       if (rt_period == 0)
+               return -EINVAL;
+
        mutex_lock(&rt_constraints_mutex);
        read_lock(&tasklist_lock);
        err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7535,16 +7942,13 @@ static long sched_group_rt_runtime(struct task_group *tg)
        return rt_runtime_us;
 }
 
-static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
+static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
 {
        u64 rt_runtime, rt_period;
 
-       rt_period = (u64)rt_period_us * NSEC_PER_USEC;
+       rt_period = rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
-       if (rt_period == 0)
-               return -EINVAL;
-
        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
 
@@ -7556,24 +7960,13 @@ static long sched_group_rt_period(struct task_group *tg)
        do_div(rt_period_us, NSEC_PER_USEC);
        return rt_period_us;
 }
+#endif /* CONFIG_RT_GROUP_SCHED */
 
+#ifdef CONFIG_RT_GROUP_SCHED
 static int sched_rt_global_constraints(void)
 {
-       u64 runtime, period;
        int ret = 0;
 
-       if (sysctl_sched_rt_period <= 0)
-               return -EINVAL;
-
-       runtime = global_rt_runtime();
-       period = global_rt_period();
-
-       /*
-        * Sanity check on the sysctl variables.
-        */
-       if (runtime > period && runtime != RUNTIME_INF)
-               return -EINVAL;
-
        mutex_lock(&rt_constraints_mutex);
        read_lock(&tasklist_lock);
        ret = __rt_schedulable(NULL, 0, 0);
@@ -7596,17 +7989,7 @@ static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
 static int sched_rt_global_constraints(void)
 {
        unsigned long flags;
-       int i;
-
-       if (sysctl_sched_rt_period <= 0)
-               return -EINVAL;
-
-       /*
-        * There's always some RT tasks in the root group
-        * -- migration, kstopmachine etc..
-        */
-       if (sysctl_sched_rt_runtime == 0)
-               return -EBUSY;
+       int i, ret = 0;
 
        raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
        for_each_possible_cpu(i) {
@@ -7618,36 +8001,99 @@ static int sched_rt_global_constraints(void)
        }
        raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
 
-       return 0;
+       return ret;
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
-int sched_rr_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *lenp,
-               loff_t *ppos)
+static int sched_dl_global_validate(void)
 {
-       int ret;
-       static DEFINE_MUTEX(mutex);
+       u64 runtime = global_rt_runtime();
+       u64 period = global_rt_period();
+       u64 new_bw = to_ratio(period, runtime);
+       struct dl_bw *dl_b;
+       int cpu, ret = 0;
+       unsigned long flags;
 
-       mutex_lock(&mutex);
-       ret = proc_dointvec(table, write, buffer, lenp, ppos);
-       /* make sure that internally we keep jiffies */
-       /* also, writing zero resets timeslice to default */
-       if (!ret && write) {
-               sched_rr_timeslice = sched_rr_timeslice <= 0 ?
-                       RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+       /*
+        * Here we want to check the bandwidth not being set to some
+        * value smaller than the currently allocated bandwidth in
+        * any of the root_domains.
+        *
+        * FIXME: Cycling on all the CPUs is overdoing, but simpler than
+        * cycling on root_domains... Discussion on different/better
+        * solutions is welcome!
+        */
+       for_each_possible_cpu(cpu) {
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               if (new_bw < dl_b->total_bw)
+                       ret = -EBUSY;
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
+
+               if (ret)
+                       break;
        }
-       mutex_unlock(&mutex);
+
        return ret;
 }
 
+static void sched_dl_do_global(void)
+{
+       u64 new_bw = -1;
+       struct dl_bw *dl_b;
+       int cpu;
+       unsigned long flags;
+
+       def_dl_bandwidth.dl_period = global_rt_period();
+       def_dl_bandwidth.dl_runtime = global_rt_runtime();
+
+       if (global_rt_runtime() != RUNTIME_INF)
+               new_bw = to_ratio(global_rt_period(), global_rt_runtime());
+
+       /*
+        * FIXME: As above...
+        */
+       for_each_possible_cpu(cpu) {
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
+
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               dl_b->bw = new_bw;
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+
+               rcu_read_unlock_sched();
+       }
+}
+
+static int sched_rt_global_validate(void)
+{
+       if (sysctl_sched_rt_period <= 0)
+               return -EINVAL;
+
+       if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
+               (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
+               return -EINVAL;
+
+       return 0;
+}
+
+static void sched_rt_do_global(void)
+{
+       def_rt_bandwidth.rt_runtime = global_rt_runtime();
+       def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
+}
+
 int sched_rt_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
 {
-       int ret;
        int old_period, old_runtime;
        static DEFINE_MUTEX(mutex);
+       int ret;
 
        mutex_lock(&mutex);
        old_period = sysctl_sched_rt_period;
@@ -7656,40 +8102,68 @@ int sched_rt_handler(struct ctl_table *table, int write,
        ret = proc_dointvec(table, write, buffer, lenp, ppos);
 
        if (!ret && write) {
+               ret = sched_rt_global_validate();
+               if (ret)
+                       goto undo;
+
+               ret = sched_dl_global_validate();
+               if (ret)
+                       goto undo;
+
                ret = sched_rt_global_constraints();
-               if (ret) {
-                       sysctl_sched_rt_period = old_period;
-                       sysctl_sched_rt_runtime = old_runtime;
-               } else {
-                       def_rt_bandwidth.rt_runtime = global_rt_runtime();
-                       def_rt_bandwidth.rt_period =
-                               ns_to_ktime(global_rt_period());
-               }
+               if (ret)
+                       goto undo;
+
+               sched_rt_do_global();
+               sched_dl_do_global();
+       }
+       if (0) {
+undo:
+               sysctl_sched_rt_period = old_period;
+               sysctl_sched_rt_runtime = old_runtime;
        }
        mutex_unlock(&mutex);
 
        return ret;
 }
 
+int sched_rr_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret;
+       static DEFINE_MUTEX(mutex);
+
+       mutex_lock(&mutex);
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       /* make sure that internally we keep jiffies */
+       /* also, writing zero resets timeslice to default */
+       if (!ret && write) {
+               sched_rr_timeslice = sched_rr_timeslice <= 0 ?
+                       RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+       }
+       mutex_unlock(&mutex);
+       return ret;
+}
+
 #ifdef CONFIG_CGROUP_SCHED
 
-/* return corresponding task_group object of a cgroup */
-static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
 {
-       return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
-                           struct task_group, css);
+       return css ? container_of(css, struct task_group, css) : NULL;
 }
 
-static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
-       struct task_group *tg, *parent;
+       struct task_group *parent = css_tg(parent_css);
+       struct task_group *tg;
 
-       if (!cgrp->parent) {
+       if (!parent) {
                /* This is early initialization for the top cgroup */
                return &root_task_group.css;
        }
 
-       parent = cgroup_tg(cgrp->parent);
        tg = sched_create_group(parent);
        if (IS_ERR(tg))
                return ERR_PTR(-ENOMEM);
@@ -7697,41 +8171,43 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
        return &tg->css;
 }
 
-static int cpu_cgroup_css_online(struct cgroup *cgrp)
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
-       struct task_group *parent;
+       struct task_group *tg = css_tg(css);
+       struct task_group *parent = css_tg(css->parent);
 
-       if (!cgrp->parent)
-               return 0;
-
-       parent = cgroup_tg(cgrp->parent);
-       sched_online_group(tg, parent);
+       if (parent)
+               sched_online_group(tg, parent);
        return 0;
 }
 
-static void cpu_cgroup_css_free(struct cgroup *cgrp)
+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *tg = css_tg(css);
 
        sched_destroy_group(tg);
 }
 
-static void cpu_cgroup_css_offline(struct cgroup *cgrp)
+static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *tg = css_tg(css);
 
        sched_offline_group(tg);
 }
 
-static int cpu_cgroup_can_attach(struct cgroup *cgrp,
+static void cpu_cgroup_fork(struct task_struct *task, void *private)
+{
+       sched_move_task(task);
+}
+
+static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
                                 struct cgroup_taskset *tset)
 {
        struct task_struct *task;
 
-       cgroup_taskset_for_each(task, cgrp, tset) {
+       cgroup_taskset_for_each(task, tset) {
 #ifdef CONFIG_RT_GROUP_SCHED
-               if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
+               if (!sched_rt_can_attach(css_tg(css), task))
                        return -EINVAL;
 #else
                /* We don't support RT-tasks being in separate groups */
@@ -7742,18 +8218,18 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp,
        return 0;
 }
 
-static void cpu_cgroup_attach(struct cgroup *cgrp,
+static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
                              struct cgroup_taskset *tset)
 {
        struct task_struct *task;
 
-       cgroup_taskset_for_each(task, cgrp, tset)
+       cgroup_taskset_for_each(task, tset)
                sched_move_task(task);
 }
 
-static void
-cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
-               struct task_struct *task)
+static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
+                           struct cgroup_subsys_state *old_css,
+                           struct task_struct *task)
 {
        /*
         * cgroup_exit() is called in the copy_process() failure path.
@@ -7767,15 +8243,16 @@ cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
-                               u64 shareval)
+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
+                               struct cftype *cftype, u64 shareval)
 {
-       return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
+       return sched_group_set_shares(css_tg(css), scale_load(shareval));
 }
 
-static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
+                              struct cftype *cft)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *tg = css_tg(css);
 
        return (u64) scale_load_down(tg->shares);
 }
@@ -7812,6 +8289,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
        if (period > max_cfs_quota_period)
                return -EINVAL;
 
+       /*
+        * Prevent race between setting of cfs_rq->runtime_enabled and
+        * unthrottle_offline_cfs_rqs().
+        */
+       get_online_cpus();
        mutex_lock(&cfs_constraints_mutex);
        ret = __cfs_schedulable(tg, period, quota);
        if (ret)
@@ -7819,21 +8301,23 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 
        runtime_enabled = quota != RUNTIME_INF;
        runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
-       account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
+       /*
+        * If we need to toggle cfs_bandwidth_used, off->on must occur
+        * before making related changes, and on->off must occur afterwards
+        */
+       if (runtime_enabled && !runtime_was_enabled)
+               cfs_bandwidth_usage_inc();
        raw_spin_lock_irq(&cfs_b->lock);
        cfs_b->period = ns_to_ktime(period);
        cfs_b->quota = quota;
 
        __refill_cfs_bandwidth_runtime(cfs_b);
        /* restart the period timer (if active) to handle new period expiry */
-       if (runtime_enabled && cfs_b->timer_active) {
-               /* force a reprogram */
-               cfs_b->timer_active = 0;
-               __start_cfs_bandwidth(cfs_b);
-       }
+       if (runtime_enabled)
+               start_cfs_bandwidth(cfs_b);
        raw_spin_unlock_irq(&cfs_b->lock);
 
-       for_each_possible_cpu(i) {
+       for_each_online_cpu(i) {
                struct cfs_rq *cfs_rq = tg->cfs_rq[i];
                struct rq *rq = cfs_rq->rq;
 
@@ -7845,8 +8329,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
                        unthrottle_cfs_rq(cfs_rq);
                raw_spin_unlock_irq(&rq->lock);
        }
+       if (runtime_was_enabled && !runtime_enabled)
+               cfs_bandwidth_usage_dec();
 out_unlock:
        mutex_unlock(&cfs_constraints_mutex);
+       put_online_cpus();
 
        return ret;
 }
@@ -7897,26 +8384,28 @@ long tg_get_cfs_period(struct task_group *tg)
        return cfs_period_us;
 }
 
-static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
+                                 struct cftype *cft)
 {
-       return tg_get_cfs_quota(cgroup_tg(cgrp));
+       return tg_get_cfs_quota(css_tg(css));
 }
 
-static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
-                               s64 cfs_quota_us)
+static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
+                                  struct cftype *cftype, s64 cfs_quota_us)
 {
-       return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
+       return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
 }
 
-static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
+                                  struct cftype *cft)
 {
-       return tg_get_cfs_period(cgroup_tg(cgrp));
+       return tg_get_cfs_period(css_tg(css));
 }
 
-static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
-                               u64 cfs_period_us)
+static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
+                                   struct cftype *cftype, u64 cfs_period_us)
 {
-       return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
+       return tg_set_cfs_period(css_tg(css), cfs_period_us);
 }
 
 struct cfs_schedulable_data {
@@ -7960,7 +8449,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
 
                quota = normalize_cfs_quota(tg, d);
-               parent_quota = parent_b->hierarchal_quota;
+               parent_quota = parent_b->hierarchical_quota;
 
                /*
                 * ensure max(child_quota) <= parent_quota, inherit when no
@@ -7971,7 +8460,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
                else if (parent_quota != RUNTIME_INF && quota > parent_quota)
                        return -EINVAL;
        }
-       cfs_b->hierarchal_quota = quota;
+       cfs_b->hierarchical_quota = quota;
 
        return 0;
 }
@@ -7997,15 +8486,14 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
        return ret;
 }
 
-static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
-               struct cgroup_map_cb *cb)
+static int cpu_stats_show(struct seq_file *sf, void *v)
 {
-       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *tg = css_tg(seq_css(sf));
        struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
 
-       cb->fill(cb, "nr_periods", cfs_b->nr_periods);
-       cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
-       cb->fill(cb, "throttled_time", cfs_b->throttled_time);
+       seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods);
+       seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled);
+       seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time);
 
        return 0;
 }
@@ -8013,26 +8501,28 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_RT_GROUP_SCHED
-static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
-                               s64 val)
+static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
+                               struct cftype *cft, s64 val)
 {
-       return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
+       return sched_group_set_rt_runtime(css_tg(css), val);
 }
 
-static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
+                              struct cftype *cft)
 {
-       return sched_group_rt_runtime(cgroup_tg(cgrp));
+       return sched_group_rt_runtime(css_tg(css));
 }
 
-static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
-               u64 rt_period_us)
+static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
+                                   struct cftype *cftype, u64 rt_period_us)
 {
-       return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
+       return sched_group_set_rt_period(css_tg(css), rt_period_us);
 }
 
-static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
+                                  struct cftype *cft)
 {
-       return sched_group_rt_period(cgroup_tg(cgrp));
+       return sched_group_rt_period(css_tg(css));
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
@@ -8057,7 +8547,7 @@ static struct cftype cpu_files[] = {
        },
        {
                .name = "stat",
-               .read_map = cpu_stats_show,
+               .seq_show = cpu_stats_show,
        },
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -8075,17 +8565,16 @@ static struct cftype cpu_files[] = {
        { }     /* terminate */
 };
 
-struct cgroup_subsys cpu_cgroup_subsys = {
-       .name           = "cpu",
+struct cgroup_subsys cpu_cgrp_subsys = {
        .css_alloc      = cpu_cgroup_css_alloc,
        .css_free       = cpu_cgroup_css_free,
        .css_online     = cpu_cgroup_css_online,
        .css_offline    = cpu_cgroup_css_offline,
+       .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
-       .subsys_id      = cpu_cgroup_subsys_id,
-       .base_cftypes   = cpu_files,
+       .legacy_cftypes = cpu_files,
        .early_init     = 1,
 };