Merge tag 'v3.18' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / fair.c
index e64b079..ef2b104 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/latencytop.h>
 #include <linux/sched.h>
 #include <linux/cpumask.h>
+#include <linux/cpuidle.h>
 #include <linux/slab.h>
 #include <linux/profile.h>
 #include <linux/interrupt.h>
@@ -322,13 +323,13 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
        list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 
 /* Do the two (enqueued) entities belong to the same group ? */
-static inline int
+static inline struct cfs_rq *
 is_same_group(struct sched_entity *se, struct sched_entity *pse)
 {
        if (se->cfs_rq == pse->cfs_rq)
-               return 1;
+               return se->cfs_rq;
 
-       return 0;
+       return NULL;
 }
 
 static inline struct sched_entity *parent_entity(struct sched_entity *se)
@@ -336,17 +337,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
        return se->parent;
 }
 
-/* return depth at which a sched entity is present in the hierarchy */
-static inline int depth_se(struct sched_entity *se)
-{
-       int depth = 0;
-
-       for_each_sched_entity(se)
-               depth++;
-
-       return depth;
-}
-
 static void
 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 {
@@ -360,8 +350,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
         */
 
        /* First walk up until both entities are at same depth */
-       se_depth = depth_se(*se);
-       pse_depth = depth_se(*pse);
+       se_depth = (*se)->depth;
+       pse_depth = (*pse)->depth;
 
        while (se_depth > pse_depth) {
                se_depth--;
@@ -426,12 +416,6 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
                for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
 
-static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
-{
-       return 1;
-}
-
 static inline struct sched_entity *parent_entity(struct sched_entity *se)
 {
        return NULL;
@@ -682,6 +666,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 }
 
 #ifdef CONFIG_SMP
+static int select_idle_sibling(struct task_struct *p, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
 static inline void __update_task_entity_contrib(struct sched_entity *se);
@@ -741,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
        account_cfs_rq_runtime(cfs_rq, delta_exec);
 }
 
+static void update_curr_fair(struct rq *rq)
+{
+       update_curr(cfs_rq_of(&rq->curr->se));
+}
+
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -819,14 +809,6 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
 unsigned int sysctl_numa_balancing_scan_delay = 1000;
 
-/*
- * After skipping a page migration on a shared page, skip N more numa page
- * migrations unconditionally. This reduces the number of NUMA migrations
- * in shared memory workloads, and has the effect of pulling tasks towards
- * where their memory lives, over pulling the memory towards the task.
- */
-unsigned int sysctl_numa_balancing_migrate_deferred = 16;
-
 static unsigned int task_nr_scan_windows(struct task_struct *p)
 {
        unsigned long rss = 0;
@@ -851,11 +833,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
 
 static unsigned int task_scan_min(struct task_struct *p)
 {
+       unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
        unsigned int scan, floor;
        unsigned int windows = 1;
 
-       if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
-               windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
+       if (scan_size < MAX_SCAN_WINDOW)
+               windows = MAX_SCAN_WINDOW / scan_size;
        floor = 1000 / windows;
 
        scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
@@ -872,15 +855,6 @@ static unsigned int task_scan_max(struct task_struct *p)
        return max(smin, smax);
 }
 
-/*
- * Once a preferred node is selected the scheduler balancer will prefer moving
- * a task to that node for sysctl_numa_balancing_settle_count number of PTE
- * scans. This will give the process the chance to accumulate more faults on
- * the preferred node but still allow the scheduler to move the task again if
- * the nodes CPUs are overloaded.
- */
-unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
-
 static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
 {
        rq->nr_numa_running += (p->numa_preferred_nid != -1);
@@ -902,10 +876,26 @@ struct numa_group {
        struct list_head task_list;
 
        struct rcu_head rcu;
+       nodemask_t active_nodes;
        unsigned long total_faults;
+       /*
+        * Faults_cpu is used to decide whether memory should move
+        * towards the CPU. As a consequence, these stats are weighted
+        * more by CPU use than by memory faults.
+        */
+       unsigned long *faults_cpu;
        unsigned long faults[0];
 };
 
+/* Shared or private faults. */
+#define NR_NUMA_HINT_FAULT_TYPES 2
+
+/* Memory and CPU locality */
+#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
+
+/* Averaged statistics, and temporary buffers. */
+#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
+
 pid_t task_numa_group_id(struct task_struct *p)
 {
        return p->numa_group ? p->numa_group->gid : 0;
@@ -913,16 +903,16 @@ pid_t task_numa_group_id(struct task_struct *p)
 
 static inline int task_faults_idx(int nid, int priv)
 {
-       return 2 * nid + priv;
+       return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
 }
 
 static inline unsigned long task_faults(struct task_struct *p, int nid)
 {
-       if (!p->numa_faults)
+       if (!p->numa_faults_memory)
                return 0;
 
-       return p->numa_faults[task_faults_idx(nid, 0)] +
-               p->numa_faults[task_faults_idx(nid, 1)];
+       return p->numa_faults_memory[task_faults_idx(nid, 0)] +
+               p->numa_faults_memory[task_faults_idx(nid, 1)];
 }
 
 static inline unsigned long group_faults(struct task_struct *p, int nid)
@@ -930,7 +920,14 @@ static inline unsigned long group_faults(struct task_struct *p, int nid)
        if (!p->numa_group)
                return 0;
 
-       return p->numa_group->faults[2*nid] + p->numa_group->faults[2*nid+1];
+       return p->numa_group->faults[task_faults_idx(nid, 0)] +
+               p->numa_group->faults[task_faults_idx(nid, 1)];
+}
+
+static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
+{
+       return group->faults_cpu[task_faults_idx(nid, 0)] +
+               group->faults_cpu[task_faults_idx(nid, 1)];
 }
 
 /*
@@ -943,7 +940,7 @@ static inline unsigned long task_weight(struct task_struct *p, int nid)
 {
        unsigned long total_faults;
 
-       if (!p->numa_faults)
+       if (!p->numa_faults_memory)
                return 0;
 
        total_faults = p->total_numa_faults;
@@ -962,10 +959,73 @@ static inline unsigned long group_weight(struct task_struct *p, int nid)
        return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
 }
 
+bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
+                               int src_nid, int dst_cpu)
+{
+       struct numa_group *ng = p->numa_group;
+       int dst_nid = cpu_to_node(dst_cpu);
+       int last_cpupid, this_cpupid;
+
+       this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
+
+       /*
+        * Multi-stage node selection is used in conjunction with a periodic
+        * migration fault to build a temporal task<->page relation. By using
+        * a two-stage filter we remove short/unlikely relations.
+        *
+        * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
+        * a task's usage of a particular page (n_p) per total usage of this
+        * page (n_t) (in a given time-span) to a probability.
+        *
+        * Our periodic faults will sample this probability and getting the
+        * same result twice in a row, given these samples are fully
+        * independent, is then given by P(n)^2, provided our sample period
+        * is sufficiently short compared to the usage pattern.
+        *
+        * This quadric squishes small probabilities, making it less likely we
+        * act on an unlikely task<->page relation.
+        */
+       last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+       if (!cpupid_pid_unset(last_cpupid) &&
+                               cpupid_to_nid(last_cpupid) != dst_nid)
+               return false;
+
+       /* Always allow migrate on private faults */
+       if (cpupid_match_pid(p, last_cpupid))
+               return true;
+
+       /* A shared fault, but p->numa_group has not been set up yet. */
+       if (!ng)
+               return true;
+
+       /*
+        * Do not migrate if the destination is not a node that
+        * is actively used by this numa group.
+        */
+       if (!node_isset(dst_nid, ng->active_nodes))
+               return false;
+
+       /*
+        * Source is a node that is not actively used by this
+        * numa group, while the destination is. Migrate.
+        */
+       if (!node_isset(src_nid, ng->active_nodes))
+               return true;
+
+       /*
+        * Both source and destination are nodes in active
+        * use by this numa group. Maximize memory bandwidth
+        * by migrating from more heavily used groups, to less
+        * heavily used ones, spreading the load around.
+        * Use a 1/4 hysteresis to avoid spurious page movement.
+        */
+       return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
+}
+
 static unsigned long weighted_cpuload(const int cpu);
 static unsigned long source_load(int cpu, int type);
 static unsigned long target_load(int cpu, int type);
-static unsigned long power_of(int cpu);
+static unsigned long capacity_of(int cpu);
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
 
 /* Cached statistics for all CPUs within a node */
@@ -974,11 +1034,11 @@ struct numa_stats {
        unsigned long load;
 
        /* Total compute capacity of CPUs on a node */
-       unsigned long power;
+       unsigned long compute_capacity;
 
        /* Approximate capacity in terms of runnable tasks on a node */
-       unsigned long capacity;
-       int has_capacity;
+       unsigned long task_capacity;
+       int has_free_capacity;
 };
 
 /*
@@ -986,7 +1046,8 @@ struct numa_stats {
  */
 static void update_numa_stats(struct numa_stats *ns, int nid)
 {
-       int cpu, cpus = 0;
+       int smt, cpu, cpus = 0;
+       unsigned long capacity;
 
        memset(ns, 0, sizeof(*ns));
        for_each_cpu(cpu, cpumask_of_node(nid)) {
@@ -994,7 +1055,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
 
                ns->nr_running += rq->nr_running;
                ns->load += weighted_cpuload(cpu);
-               ns->power += power_of(cpu);
+               ns->compute_capacity += capacity_of(cpu);
 
                cpus++;
        }
@@ -1004,15 +1065,19 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
         * the @ns structure is NULL'ed and task_numa_compare() will
         * not find this node attractive.
         *
-        * We'll either bail at !has_capacity, or we'll detect a huge imbalance
-        * and bail there.
+        * We'll either bail at !has_free_capacity, or we'll detect a huge
+        * imbalance and bail there.
         */
        if (!cpus)
                return;
 
-       ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
-       ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
-       ns->has_capacity = (ns->nr_running < ns->capacity);
+       /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
+       smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
+       capacity = cpus / smt; /* cores */
+
+       ns->task_capacity = min_t(unsigned, capacity,
+               DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
+       ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
 }
 
 struct task_numa_env {
@@ -1023,7 +1088,7 @@ struct task_numa_env {
 
        struct numa_stats src_stats, dst_stats;
 
-       int imbalance_pct, idx;
+       int imbalance_pct;
 
        struct task_struct *best_task;
        long best_imp;
@@ -1043,6 +1108,50 @@ static void task_numa_assign(struct task_numa_env *env,
        env->best_cpu = env->dst_cpu;
 }
 
+static bool load_too_imbalanced(long src_load, long dst_load,
+                               struct task_numa_env *env)
+{
+       long imb, old_imb;
+       long orig_src_load, orig_dst_load;
+       long src_capacity, dst_capacity;
+
+       /*
+        * The load is corrected for the CPU capacity available on each node.
+        *
+        * src_load        dst_load
+        * ------------ vs ---------
+        * src_capacity    dst_capacity
+        */
+       src_capacity = env->src_stats.compute_capacity;
+       dst_capacity = env->dst_stats.compute_capacity;
+
+       /* We care about the slope of the imbalance, not the direction. */
+       if (dst_load < src_load)
+               swap(dst_load, src_load);
+
+       /* Is the difference below the threshold? */
+       imb = dst_load * src_capacity * 100 -
+             src_load * dst_capacity * env->imbalance_pct;
+       if (imb <= 0)
+               return false;
+
+       /*
+        * The imbalance is above the allowed threshold.
+        * Compare it with the old imbalance.
+        */
+       orig_src_load = env->src_stats.load;
+       orig_dst_load = env->dst_stats.load;
+
+       if (orig_dst_load < orig_src_load)
+               swap(orig_dst_load, orig_src_load);
+
+       old_imb = orig_dst_load * src_capacity * 100 -
+                 orig_src_load * dst_capacity * env->imbalance_pct;
+
+       /* Would this change make things worse? */
+       return (imb > old_imb);
+}
+
 /*
  * This checks if the overall compute and NUMA accesses of the system would
  * be improved if the source tasks was migrated to the target dst_cpu taking
@@ -1055,14 +1164,32 @@ static void task_numa_compare(struct task_numa_env *env,
        struct rq *src_rq = cpu_rq(env->src_cpu);
        struct rq *dst_rq = cpu_rq(env->dst_cpu);
        struct task_struct *cur;
-       long dst_load, src_load;
+       long src_load, dst_load;
        long load;
-       long imp = (groupimp > 0) ? groupimp : taskimp;
+       long imp = env->p->numa_group ? groupimp : taskimp;
+       long moveimp = imp;
 
        rcu_read_lock();
-       cur = ACCESS_ONCE(dst_rq->curr);
-       if (cur->pid == 0) /* idle */
+
+       raw_spin_lock_irq(&dst_rq->lock);
+       cur = dst_rq->curr;
+       /*
+        * No need to move the exiting task, and this ensures that ->curr
+        * wasn't reaped and thus get_task_struct() in task_numa_assign()
+        * is safe under RCU read lock.
+        * Note that rcu_read_lock() itself can't protect from the final
+        * put_task_struct() after the last schedule().
+        */
+       if ((cur->flags & PF_EXITING) || is_idle_task(cur))
                cur = NULL;
+       raw_spin_unlock_irq(&dst_rq->lock);
+
+       /*
+        * Because we have preemption enabled we can get migrated around and
+        * end try selecting ourselves (current == env->p) as a swap candidate.
+        */
+       if (cur == env->p)
+               goto unlock;
 
        /*
         * "imp" is the fault differential for the source task between the
@@ -1095,11 +1222,6 @@ static void task_numa_compare(struct task_numa_env *env,
                         * itself (not part of a group), use the task weight
                         * instead.
                         */
-                       if (env->p->numa_group)
-                               imp = groupimp;
-                       else
-                               imp = taskimp;
-
                        if (cur->numa_group)
                                imp += group_weight(cur, env->src_nid) -
                                       group_weight(cur, env->dst_nid);
@@ -1109,33 +1231,47 @@ static void task_numa_compare(struct task_numa_env *env,
                }
        }
 
-       if (imp < env->best_imp)
+       if (imp <= env->best_imp && moveimp <= env->best_imp)
                goto unlock;
 
        if (!cur) {
                /* Is there capacity at our destination? */
-               if (env->src_stats.has_capacity &&
-                   !env->dst_stats.has_capacity)
+               if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
+                   !env->dst_stats.has_free_capacity)
                        goto unlock;
 
                goto balance;
        }
 
        /* Balance doesn't matter much if we're running a task per cpu */
-       if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
+       if (imp > env->best_imp && src_rq->nr_running == 1 &&
+                       dst_rq->nr_running == 1)
                goto assign;
 
        /*
         * In the overloaded case, try and keep the load balanced.
         */
 balance:
-       dst_load = env->dst_stats.load;
-       src_load = env->src_stats.load;
-
-       /* XXX missing power terms */
        load = task_h_load(env->p);
-       dst_load += load;
-       src_load -= load;
+       dst_load = env->dst_stats.load + load;
+       src_load = env->src_stats.load - load;
+
+       if (moveimp > imp && moveimp > env->best_imp) {
+               /*
+                * If the improvement from just moving env->p direction is
+                * better than swapping tasks around, check if a move is
+                * possible. Store a slightly smaller score than moveimp,
+                * so an actually idle CPU will win.
+                */
+               if (!load_too_imbalanced(src_load, dst_load, env)) {
+                       imp = moveimp - 1;
+                       cur = NULL;
+                       goto assign;
+               }
+       }
+
+       if (imp <= env->best_imp)
+               goto unlock;
 
        if (cur) {
                load = task_h_load(cur);
@@ -1143,13 +1279,16 @@ balance:
                src_load += load;
        }
 
-       /* make src_load the smaller */
-       if (dst_load < src_load)
-               swap(dst_load, src_load);
-
-       if (src_load * env->imbalance_pct < dst_load * 100)
+       if (load_too_imbalanced(src_load, dst_load, env))
                goto unlock;
 
+       /*
+        * One idle CPU per node is evaluated for a task numa move.
+        * Call select_idle_sibling to maybe find a better one.
+        */
+       if (!cur)
+               env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
+
 assign:
        task_numa_assign(env, cur, imp);
 unlock:
@@ -1211,7 +1350,7 @@ static int task_numa_migrate(struct task_struct *p)
         * elsewhere, so there is no point in (re)trying.
         */
        if (unlikely(!sd)) {
-               p->numa_preferred_nid = cpu_to_node(task_cpu(p));
+               p->numa_preferred_nid = task_node(p);
                return -EINVAL;
        }
 
@@ -1223,9 +1362,8 @@ static int task_numa_migrate(struct task_struct *p)
        groupimp = group_weight(p, env.dst_nid) - groupweight;
        update_numa_stats(&env.dst_stats, env.dst_nid);
 
-       /* If the preferred nid has capacity, try to use it. */
-       if (env.dst_stats.has_capacity)
-               task_numa_find_cpu(&env, taskimp, groupimp);
+       /* Try to find a spot on the preferred nid. */
+       task_numa_find_cpu(&env, taskimp, groupimp);
 
        /* No space available on the preferred nid. Look elsewhere. */
        if (env.best_cpu == -1) {
@@ -1245,12 +1383,28 @@ static int task_numa_migrate(struct task_struct *p)
                }
        }
 
+       /*
+        * If the task is part of a workload that spans multiple NUMA nodes,
+        * and is migrating into one of the workload's active nodes, remember
+        * this node as the task's preferred numa node, so the workload can
+        * settle down.
+        * A task that migrated to a second choice node will be better off
+        * trying for a better one later. Do not set the preferred node here.
+        */
+       if (p->numa_group) {
+               if (env.best_cpu == -1)
+                       nid = env.src_nid;
+               else
+                       nid = env.dst_nid;
+
+               if (node_isset(nid, p->numa_group->active_nodes))
+                       sched_setnuma(p, env.dst_nid);
+       }
+
        /* No better CPU than the current one was found. */
        if (env.best_cpu == -1)
                return -EAGAIN;
 
-       sched_setnuma(p, env.dst_nid);
-
        /*
         * Reset the scan period if the task is being rescheduled on an
         * alternative node to recheck if the tasks is now properly placed.
@@ -1258,11 +1412,15 @@ static int task_numa_migrate(struct task_struct *p)
        p->numa_scan_period = task_scan_min(p);
 
        if (env.best_task == NULL) {
-               int ret = migrate_task_to(p, env.best_cpu);
+               ret = migrate_task_to(p, env.best_cpu);
+               if (ret != 0)
+                       trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
                return ret;
        }
 
        ret = migrate_swap(p, env.best_task);
+       if (ret != 0)
+               trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
        put_task_struct(env.best_task);
        return ret;
 }
@@ -1270,30 +1428,65 @@ static int task_numa_migrate(struct task_struct *p)
 /* Attempt to migrate a task to a CPU on the preferred node. */
 static void numa_migrate_preferred(struct task_struct *p)
 {
+       unsigned long interval = HZ;
+
        /* This task has no NUMA fault statistics yet */
-       if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
+       if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
                return;
 
        /* Periodically retry migrating the task to the preferred node */
-       p->numa_migrate_retry = jiffies + HZ;
+       interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
+       p->numa_migrate_retry = jiffies + interval;
 
        /* Success if task is already running on preferred CPU */
-       if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
+       if (task_node(p) == p->numa_preferred_nid)
                return;
 
        /* Otherwise, try migrate to a CPU on the preferred node */
        task_numa_migrate(p);
 }
 
+/*
+ * Find the nodes on which the workload is actively running. We do this by
+ * tracking the nodes from which NUMA hinting faults are triggered. This can
+ * be different from the set of nodes where the workload's memory is currently
+ * located.
+ *
+ * The bitmask is used to make smarter decisions on when to do NUMA page
+ * migrations, To prevent flip-flopping, and excessive page migrations, nodes
+ * are added when they cause over 6/16 of the maximum number of faults, but
+ * only removed when they drop below 3/16.
+ */
+static void update_numa_active_node_mask(struct numa_group *numa_group)
+{
+       unsigned long faults, max_faults = 0;
+       int nid;
+
+       for_each_online_node(nid) {
+               faults = group_faults_cpu(numa_group, nid);
+               if (faults > max_faults)
+                       max_faults = faults;
+       }
+
+       for_each_online_node(nid) {
+               faults = group_faults_cpu(numa_group, nid);
+               if (!node_isset(nid, numa_group->active_nodes)) {
+                       if (faults > max_faults * 6 / 16)
+                               node_set(nid, numa_group->active_nodes);
+               } else if (faults < max_faults * 3 / 16)
+                       node_clear(nid, numa_group->active_nodes);
+       }
+}
+
 /*
  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
  * increments. The more local the fault statistics are, the higher the scan
- * period will be for the next scan window. If local/remote ratio is below
- * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
- * scan period will decrease
+ * period will be for the next scan window. If local/(local+remote) ratio is
+ * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
+ * the scan period will decrease. Aim for 70% local accesses.
  */
 #define NUMA_PERIOD_SLOTS 10
-#define NUMA_PERIOD_THRESHOLD 3
+#define NUMA_PERIOD_THRESHOLD 7
 
 /*
  * Increase the scan period (slow down scanning) if the majority of
@@ -1350,8 +1543,7 @@ static void update_task_scan_period(struct task_struct *p,
                 * scanning faster if shared accesses dominate as it may
                 * simply bounce migrations uselessly
                 */
-               period_slot = DIV_ROUND_UP(diff, NUMA_PERIOD_SLOTS);
-               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
+               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
                diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
        }
 
@@ -1360,11 +1552,41 @@ static void update_task_scan_period(struct task_struct *p,
        memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
 }
 
+/*
+ * Get the fraction of time the task has been running since the last
+ * NUMA placement cycle. The scheduler keeps similar statistics, but
+ * decays those on a 32ms period, which is orders of magnitude off
+ * from the dozens-of-seconds NUMA balancing period. Use the scheduler
+ * stats only if the task is so new there are no NUMA statistics yet.
+ */
+static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
+{
+       u64 runtime, delta, now;
+       /* Use the start of this time slice to avoid calculations. */
+       now = p->se.exec_start;
+       runtime = p->se.sum_exec_runtime;
+
+       if (p->last_task_numa_placement) {
+               delta = runtime - p->last_sum_exec_runtime;
+               *period = now - p->last_task_numa_placement;
+       } else {
+               delta = p->se.avg.runnable_avg_sum;
+               *period = p->se.avg.runnable_avg_period;
+       }
+
+       p->last_sum_exec_runtime = runtime;
+       p->last_task_numa_placement = now;
+
+       return delta;
+}
+
 static void task_numa_placement(struct task_struct *p)
 {
        int seq, nid, max_nid = -1, max_group_nid = -1;
        unsigned long max_faults = 0, max_group_faults = 0;
        unsigned long fault_types[2] = { 0, 0 };
+       unsigned long total_faults;
+       u64 runtime, period;
        spinlock_t *group_lock = NULL;
 
        seq = ACCESS_ONCE(p->mm->numa_scan_seq);
@@ -1373,10 +1595,14 @@ static void task_numa_placement(struct task_struct *p)
        p->numa_scan_seq = seq;
        p->numa_scan_period_max = task_scan_max(p);
 
+       total_faults = p->numa_faults_locality[0] +
+                      p->numa_faults_locality[1];
+       runtime = numa_get_avg_runtime(p, &period);
+
        /* If the task is part of a group prevent parallel updates to group stats */
        if (p->numa_group) {
                group_lock = &p->numa_group->lock;
-               spin_lock(group_lock);
+               spin_lock_irq(group_lock);
        }
 
        /* Find the node with the highest number of faults */
@@ -1384,24 +1610,37 @@ static void task_numa_placement(struct task_struct *p)
                unsigned long faults = 0, group_faults = 0;
                int priv, i;
 
-               for (priv = 0; priv < 2; priv++) {
-                       long diff;
+               for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
+                       long diff, f_diff, f_weight;
 
                        i = task_faults_idx(nid, priv);
-                       diff = -p->numa_faults[i];
 
                        /* Decay existing window, copy faults since last scan */
-                       p->numa_faults[i] >>= 1;
-                       p->numa_faults[i] += p->numa_faults_buffer[i];
-                       fault_types[priv] += p->numa_faults_buffer[i];
-                       p->numa_faults_buffer[i] = 0;
+                       diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
+                       fault_types[priv] += p->numa_faults_buffer_memory[i];
+                       p->numa_faults_buffer_memory[i] = 0;
 
-                       faults += p->numa_faults[i];
-                       diff += p->numa_faults[i];
+                       /*
+                        * Normalize the faults_from, so all tasks in a group
+                        * count according to CPU use, instead of by the raw
+                        * number of faults. Tasks with little runtime have
+                        * little over-all impact on throughput, and thus their
+                        * faults are less important.
+                        */
+                       f_weight = div64_u64(runtime << 16, period + 1);
+                       f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
+                                  (total_faults + 1);
+                       f_diff = f_weight - p->numa_faults_cpu[i] / 2;
+                       p->numa_faults_buffer_cpu[i] = 0;
+
+                       p->numa_faults_memory[i] += diff;
+                       p->numa_faults_cpu[i] += f_diff;
+                       faults += p->numa_faults_memory[i];
                        p->total_numa_faults += diff;
                        if (p->numa_group) {
                                /* safe because we can only change our own group */
                                p->numa_group->faults[i] += diff;
+                               p->numa_group->faults_cpu[i] += f_diff;
                                p->numa_group->total_faults += diff;
                                group_faults += p->numa_group->faults[i];
                        }
@@ -1421,30 +1660,18 @@ static void task_numa_placement(struct task_struct *p)
        update_task_scan_period(p, fault_types[0], fault_types[1]);
 
        if (p->numa_group) {
-               /*
-                * If the preferred task and group nids are different,
-                * iterate over the nodes again to find the best place.
-                */
-               if (max_nid != max_group_nid) {
-                       unsigned long weight, max_weight = 0;
-
-                       for_each_online_node(nid) {
-                               weight = task_weight(p, nid) + group_weight(p, nid);
-                               if (weight > max_weight) {
-                                       max_weight = weight;
-                                       max_nid = nid;
-                               }
-                       }
-               }
-
-               spin_unlock(group_lock);
+               update_numa_active_node_mask(p->numa_group);
+               spin_unlock_irq(group_lock);
+               max_nid = max_group_nid;
        }
 
-       /* Preferred node as the node with the most faults */
-       if (max_faults && max_nid != p->numa_preferred_nid) {
-               /* Update the preferred nid and migrate task if possible */
-               sched_setnuma(p, max_nid);
-               numa_migrate_preferred(p);
+       if (max_faults) {
+               /* Set the new preferred node */
+               if (max_nid != p->numa_preferred_nid)
+                       sched_setnuma(p, max_nid);
+
+               if (task_node(p) != p->numa_preferred_nid)
+                       numa_migrate_preferred(p);
        }
 }
 
@@ -1470,7 +1697,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
 
        if (unlikely(!p->numa_group)) {
                unsigned int size = sizeof(struct numa_group) +
-                                   2*nr_node_ids*sizeof(unsigned long);
+                                   4*nr_node_ids*sizeof(unsigned long);
 
                grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
                if (!grp)
@@ -1480,9 +1707,14 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
                spin_lock_init(&grp->lock);
                INIT_LIST_HEAD(&grp->task_list);
                grp->gid = p->pid;
+               /* Second half of the array tracks nids where faults happen */
+               grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
+                                               nr_node_ids;
 
-               for (i = 0; i < 2*nr_node_ids; i++)
-                       grp->faults[i] = p->numa_faults[i];
+               node_set(task_node(current), grp->active_nodes);
+
+               for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+                       grp->faults[i] = p->numa_faults_memory[i];
 
                grp->total_faults = p->total_numa_faults;
 
@@ -1537,11 +1769,12 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        if (!join)
                return;
 
-       double_lock(&my_grp->lock, &grp->lock);
+       BUG_ON(irqs_disabled());
+       double_lock_irq(&my_grp->lock, &grp->lock);
 
-       for (i = 0; i < 2*nr_node_ids; i++) {
-               my_grp->faults[i] -= p->numa_faults[i];
-               grp->faults[i] += p->numa_faults[i];
+       for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
+               my_grp->faults[i] -= p->numa_faults_memory[i];
+               grp->faults[i] += p->numa_faults_memory[i];
        }
        my_grp->total_faults -= p->total_numa_faults;
        grp->total_faults += p->total_numa_faults;
@@ -1551,7 +1784,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        grp->nr_tasks++;
 
        spin_unlock(&my_grp->lock);
-       spin_unlock(&grp->lock);
+       spin_unlock_irq(&grp->lock);
 
        rcu_assign_pointer(p->numa_group, grp);
 
@@ -1566,34 +1799,39 @@ no_join:
 void task_numa_free(struct task_struct *p)
 {
        struct numa_group *grp = p->numa_group;
+       void *numa_faults = p->numa_faults_memory;
+       unsigned long flags;
        int i;
-       void *numa_faults = p->numa_faults;
 
        if (grp) {
-               spin_lock(&grp->lock);
-               for (i = 0; i < 2*nr_node_ids; i++)
-                       grp->faults[i] -= p->numa_faults[i];
+               spin_lock_irqsave(&grp->lock, flags);
+               for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+                       grp->faults[i] -= p->numa_faults_memory[i];
                grp->total_faults -= p->total_numa_faults;
 
                list_del(&p->numa_entry);
                grp->nr_tasks--;
-               spin_unlock(&grp->lock);
-               rcu_assign_pointer(p->numa_group, NULL);
+               spin_unlock_irqrestore(&grp->lock, flags);
+               RCU_INIT_POINTER(p->numa_group, NULL);
                put_numa_group(grp);
        }
 
-       p->numa_faults = NULL;
-       p->numa_faults_buffer = NULL;
+       p->numa_faults_memory = NULL;
+       p->numa_faults_buffer_memory = NULL;
+       p->numa_faults_cpu= NULL;
+       p->numa_faults_buffer_cpu = NULL;
        kfree(numa_faults);
 }
 
 /*
  * Got a PROT_NONE fault for a page on @node.
  */
-void task_numa_fault(int last_cpupid, int node, int pages, int flags)
+void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 {
        struct task_struct *p = current;
        bool migrated = flags & TNF_MIGRATED;
+       int cpu_node = task_node(current);
+       int local = !!(flags & TNF_FAULT_LOCAL);
        int priv;
 
        if (!numabalancing_enabled)
@@ -1603,21 +1841,25 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
        if (!p->mm)
                return;
 
-       /* Do not worry about placement if exiting */
-       if (p->state == TASK_DEAD)
-               return;
-
        /* Allocate buffer to track faults on a per-node basis */
-       if (unlikely(!p->numa_faults)) {
-               int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
+       if (unlikely(!p->numa_faults_memory)) {
+               int size = sizeof(*p->numa_faults_memory) *
+                          NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
 
-               /* numa_faults and numa_faults_buffer share the allocation */
-               p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
-               if (!p->numa_faults)
+               p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
+               if (!p->numa_faults_memory)
                        return;
 
-               BUG_ON(p->numa_faults_buffer);
-               p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
+               BUG_ON(p->numa_faults_buffer_memory);
+               /*
+                * The averaged statistics, shared & private, memory & cpu,
+                * occupy the first half of the array. The second half of the
+                * array is for current counters, which are averaged into the
+                * first set by task_numa_placement.
+                */
+               p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
+               p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
+               p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
                p->total_numa_faults = 0;
                memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
        }
@@ -1634,6 +1876,17 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
                        task_numa_group(p, last_cpupid, flags, &priv);
        }
 
+       /*
+        * If a workload spans multiple NUMA nodes, a shared fault that
+        * occurs wholly within the set of nodes that the workload is
+        * actively using should be counted as local. This allows the
+        * scan rate to slow down when a workload has settled down.
+        */
+       if (!priv && !local && p->numa_group &&
+                       node_isset(cpu_node, p->numa_group->active_nodes) &&
+                       node_isset(mem_node, p->numa_group->active_nodes))
+               local = 1;
+
        task_numa_placement(p);
 
        /*
@@ -1646,8 +1899,9 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
        if (migrated)
                p->numa_pages_migrated += pages;
 
-       p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
-       p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
+       p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
+       p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
+       p->numa_faults_locality[local] += pages;
 }
 
 static void reset_ptenuma_scan(struct task_struct *p)
@@ -1725,7 +1979,7 @@ void task_numa_work(struct callback_head *work)
                vma = mm->mmap;
        }
        for (; vma; vma = vma->vm_next) {
-               if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
+               if (!vma_migratable(vma) || !vma_policy_mof(vma))
                        continue;
 
                /*
@@ -1762,6 +2016,8 @@ void task_numa_work(struct callback_head *work)
                        start = end;
                        if (pages <= 0)
                                goto out;
+
+                       cond_resched();
                } while (end != vma->vm_end);
        }
 
@@ -1988,8 +2244,8 @@ static __always_inline u64 decay_load(u64 val, u64 n)
 
        /*
         * As y^PERIOD = 1/2, we can combine
-        *    y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
-        * With a look-up table which covers k^n (n<PERIOD)
+        *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
+        * With a look-up table which covers y^n (n<PERIOD)
         *
         * To achieve constant time decay_load.
         */
@@ -2154,6 +2410,9 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
        tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
        tg_contrib -= cfs_rq->tg_load_contrib;
 
+       if (!tg_contrib)
+               return;
+
        if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
                atomic_long_add(tg_contrib, &tg->load_avg);
                cfs_rq->tg_load_contrib += tg_contrib;
@@ -2222,13 +2481,20 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
                se->avg.load_avg_contrib >>= NICE_0_SHIFT;
        }
 }
-#else
+
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
+{
+       __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
+       __update_tg_runnable_avg(&rq->avg, &rq->cfs);
+}
+#else /* CONFIG_FAIR_GROUP_SCHED */
 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
                                                 int force_update) {}
 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
                                                  struct cfs_rq *cfs_rq) {}
 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
-#endif
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
+#endif /* CONFIG_FAIR_GROUP_SCHED */
 
 static inline void __update_task_entity_contrib(struct sched_entity *se)
 {
@@ -2326,12 +2592,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
        __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
 }
 
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
-{
-       __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
-       __update_tg_runnable_avg(&rq->avg, &rq->cfs);
-}
-
 /* Add the load generated by se into cfs_rq's child load-average */
 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
                                                  struct sched_entity *se,
@@ -2365,13 +2625,7 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
                }
                wakeup = 0;
        } else {
-               /*
-                * Task re-woke on same cpu (or else migrate_task_rq_fair()
-                * would have made count negative); we must be careful to avoid
-                * double-accounting blocked time after synchronizing decays.
-                */
-               se->avg.last_runnable_update += __synchronize_entity_decay(se)
-                                                       << 20;
+               __synchronize_entity_decay(se);
        }
 
        /* migrated tasks did not contribute to our blocked load */
@@ -2425,7 +2679,10 @@ void idle_exit_fair(struct rq *this_rq)
        update_rq_runnable_avg(this_rq, 0);
 }
 
-#else
+static int idle_balance(struct rq *this_rq);
+
+#else /* CONFIG_SMP */
+
 static inline void update_entity_load_avg(struct sched_entity *se,
                                          int update_cfs_rq) {}
 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
@@ -2437,7 +2694,13 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
                                           int sleep) {}
 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
                                              int force_update) {}
-#endif
+
+static inline int idle_balance(struct rq *rq)
+{
+       return 0;
+}
+
+#endif /* CONFIG_SMP */
 
 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -2587,10 +2850,10 @@ static void __clear_buddies_last(struct sched_entity *se)
 {
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
-               if (cfs_rq->last == se)
-                       cfs_rq->last = NULL;
-               else
+               if (cfs_rq->last != se)
                        break;
+
+               cfs_rq->last = NULL;
        }
 }
 
@@ -2598,10 +2861,10 @@ static void __clear_buddies_next(struct sched_entity *se)
 {
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
-               if (cfs_rq->next == se)
-                       cfs_rq->next = NULL;
-               else
+               if (cfs_rq->next != se)
                        break;
+
+               cfs_rq->next = NULL;
        }
 }
 
@@ -2609,10 +2872,10 @@ static void __clear_buddies_skip(struct sched_entity *se)
 {
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
-               if (cfs_rq->skip == se)
-                       cfs_rq->skip = NULL;
-               else
+               if (cfs_rq->skip != se)
                        break;
+
+               cfs_rq->skip = NULL;
        }
 }
 
@@ -2688,7 +2951,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
        if (delta_exec > ideal_runtime) {
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
                /*
                 * The current task ran long enough, ensure it doesn't get
                 * re-elected due to buddy favours.
@@ -2712,7 +2975,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
                return;
 
        if (delta > ideal_runtime)
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
 }
 
 static void
@@ -2755,17 +3018,36 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
  * 3) pick the "last" process, for cache locality
  * 4) do not run the "skip" process, if something else is available
  */
-static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
-       struct sched_entity *se = __pick_first_entity(cfs_rq);
-       struct sched_entity *left = se;
+       struct sched_entity *left = __pick_first_entity(cfs_rq);
+       struct sched_entity *se;
+
+       /*
+        * If curr is set we have to see if its left of the leftmost entity
+        * still in the tree, provided there was anything in the tree at all.
+        */
+       if (!left || (curr && entity_before(curr, left)))
+               left = curr;
+
+       se = left; /* ideally we run the leftmost entity */
 
        /*
         * Avoid running the skip buddy, if running something else can
         * be done without getting too unfair.
         */
        if (cfs_rq->skip == se) {
-               struct sched_entity *second = __pick_next_entity(se);
+               struct sched_entity *second;
+
+               if (se == curr) {
+                       second = __pick_first_entity(cfs_rq);
+               } else {
+                       second = __pick_next_entity(se);
+                       if (!second || (curr && entity_before(curr, second)))
+                               second = curr;
+               }
+
                if (second && wakeup_preempt_entity(second, left) < 1)
                        se = second;
        }
@@ -2787,7 +3069,7 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
        return se;
 }
 
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
 
 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 {
@@ -2833,7 +3115,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         * validating it and just reschedule.
         */
        if (queued) {
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
                return;
        }
        /*
@@ -2951,7 +3233,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
                 */
                if (!cfs_b->timer_active) {
                        __refill_cfs_bandwidth_runtime(cfs_b);
-                       __start_cfs_bandwidth(cfs_b);
+                       __start_cfs_bandwidth(cfs_b, false);
                }
 
                if (cfs_b->runtime > 0) {
@@ -2996,10 +3278,12 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
         * has not truly expired.
         *
         * Fortunately we can check determine whether this the case by checking
-        * whether the global deadline has advanced.
+        * whether the global deadline has advanced. It is valid to compare
+        * cfs_b->runtime_expires without any locks since we only care about
+        * exact equality, so a partial write will still work.
         */
 
-       if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
+       if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
                /* extend local deadline, drift is bounded above by 2 ticks */
                cfs_rq->runtime_expires += TICK_NSEC;
        } else {
@@ -3022,7 +3306,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
         * hierarchy can be throttled
         */
        if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
 }
 
 static __always_inline
@@ -3123,14 +3407,18 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        }
 
        if (!se)
-               rq->nr_running -= task_delta;
+               sub_nr_running(rq, task_delta);
 
        cfs_rq->throttled = 1;
        cfs_rq->throttled_clock = rq_clock(rq);
        raw_spin_lock(&cfs_b->lock);
-       list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+       /*
+        * Add to the _head_ of the list, so that an already-started
+        * distribute_cfs_runtime will not see us
+        */
+       list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
        if (!cfs_b->timer_active)
-               __start_cfs_bandwidth(cfs_b);
+               __start_cfs_bandwidth(cfs_b, false);
        raw_spin_unlock(&cfs_b->lock);
 }
 
@@ -3174,18 +3462,19 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        }
 
        if (!se)
-               rq->nr_running += task_delta;
+               add_nr_running(rq, task_delta);
 
        /* determine whether we need to wake up potentially idle cpu */
        if (rq->curr == rq->idle && rq->cfs.nr_running)
-               resched_task(rq->curr);
+               resched_curr(rq);
 }
 
 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
                u64 remaining, u64 expires)
 {
        struct cfs_rq *cfs_rq;
-       u64 runtime = remaining;
+       u64 runtime;
+       u64 starting_runtime = remaining;
 
        rcu_read_lock();
        list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
@@ -3216,7 +3505,7 @@ next:
        }
        rcu_read_unlock();
 
-       return remaining;
+       return starting_runtime - remaining;
 }
 
 /*
@@ -3228,21 +3517,21 @@ next:
 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
 {
        u64 runtime, runtime_expires;
-       int idle = 1, throttled;
+       int throttled;
 
-       raw_spin_lock(&cfs_b->lock);
        /* no need to continue the timer with no bandwidth constraint */
        if (cfs_b->quota == RUNTIME_INF)
-               goto out_unlock;
+               goto out_deactivate;
 
        throttled = !list_empty(&cfs_b->throttled_cfs_rq);
-       /* idle depends on !throttled (for the case of a large deficit) */
-       idle = cfs_b->idle && !throttled;
        cfs_b->nr_periods += overrun;
 
-       /* if we're going inactive then everything else can be deferred */
-       if (idle)
-               goto out_unlock;
+       /*
+        * idle depends on !throttled (for the case of a large deficit), and if
+        * we're going inactive then everything else can be deferred
+        */
+       if (cfs_b->idle && !throttled)
+               goto out_deactivate;
 
        /*
         * if we have relooped after returning idle once, we need to update our
@@ -3256,28 +3545,23 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
        if (!throttled) {
                /* mark as potentially idle for the upcoming period */
                cfs_b->idle = 1;
-               goto out_unlock;
+               return 0;
        }
 
        /* account preceding periods in which throttling occurred */
        cfs_b->nr_throttled += overrun;
 
-       /*
-        * There are throttled entities so we must first use the new bandwidth
-        * to unthrottle them before making it generally available.  This
-        * ensures that all existing debts will be paid before a new cfs_rq is
-        * allowed to run.
-        */
-       runtime = cfs_b->runtime;
        runtime_expires = cfs_b->runtime_expires;
-       cfs_b->runtime = 0;
 
        /*
-        * This check is repeated as we are holding onto the new bandwidth
-        * while we unthrottle.  This can potentially race with an unthrottled
-        * group trying to acquire new bandwidth from the global pool.
+        * This check is repeated as we are holding onto the new bandwidth while
+        * we unthrottle. This can potentially race with an unthrottled group
+        * trying to acquire new bandwidth from the global pool. This can result
+        * in us over-using our runtime if it is all used during this loop, but
+        * only by limited amounts in that extreme case.
         */
-       while (throttled && runtime > 0) {
+       while (throttled && cfs_b->runtime > 0) {
+               runtime = cfs_b->runtime;
                raw_spin_unlock(&cfs_b->lock);
                /* we can't nest cfs_b->lock while distributing bandwidth */
                runtime = distribute_cfs_runtime(cfs_b, runtime,
@@ -3285,10 +3569,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
                raw_spin_lock(&cfs_b->lock);
 
                throttled = !list_empty(&cfs_b->throttled_cfs_rq);
+
+               cfs_b->runtime -= min(runtime, cfs_b->runtime);
        }
 
-       /* return (any) remaining runtime */
-       cfs_b->runtime = runtime;
        /*
         * While we are ensured activity in the period following an
         * unthrottle, this also covers the case in which the new bandwidth is
@@ -3296,12 +3580,12 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
         * timer to remain active while there are any throttled entities.)
         */
        cfs_b->idle = 0;
-out_unlock:
-       if (idle)
-               cfs_b->timer_active = 0;
-       raw_spin_unlock(&cfs_b->lock);
 
-       return idle;
+       return 0;
+
+out_deactivate:
+       cfs_b->timer_active = 0;
+       return 1;
 }
 
 /* a cfs_rq won't donate quota below this amount */
@@ -3399,10 +3683,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
                return;
        }
 
-       if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
+       if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
                runtime = cfs_b->runtime;
-               cfs_b->runtime = 0;
-       }
+
        expires = cfs_b->runtime_expires;
        raw_spin_unlock(&cfs_b->lock);
 
@@ -3413,7 +3696,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
 
        raw_spin_lock(&cfs_b->lock);
        if (expires == cfs_b->runtime_expires)
-               cfs_b->runtime = runtime;
+               cfs_b->runtime -= min(runtime, cfs_b->runtime);
        raw_spin_unlock(&cfs_b->lock);
 }
 
@@ -3442,22 +3725,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
 }
 
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
        if (!cfs_bandwidth_used())
-               return;
+               return false;
 
        if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
-               return;
+               return false;
 
        /*
         * it's possible for a throttled entity to be forced into a running
         * state (e.g. set_curr_task), in this case we're finished.
         */
        if (cfs_rq_throttled(cfs_rq))
-               return;
+               return true;
 
        throttle_cfs_rq(cfs_rq);
+       return true;
 }
 
 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
@@ -3477,6 +3761,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
        int overrun;
        int idle = 0;
 
+       raw_spin_lock(&cfs_b->lock);
        for (;;) {
                now = hrtimer_cb_get_time(timer);
                overrun = hrtimer_forward(timer, now, cfs_b->period);
@@ -3486,6 +3771,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 
                idle = do_sched_cfs_period_timer(cfs_b, overrun);
        }
+       raw_spin_unlock(&cfs_b->lock);
 
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 }
@@ -3511,7 +3797,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 }
 
 /* requires cfs_b->lock, may release to reprogram timer */
-void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
 {
        /*
         * The timer may be active because we're trying to set a new bandwidth
@@ -3526,7 +3812,7 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
                cpu_relax();
                raw_spin_lock(&cfs_b->lock);
                /* if someone else restarted the timer then we're done */
-               if (cfs_b->timer_active)
+               if (!force && cfs_b->timer_active)
                        return;
        }
 
@@ -3540,13 +3826,24 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        hrtimer_cancel(&cfs_b->slack_timer);
 }
 
-static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
+static void __maybe_unused update_runtime_enabled(struct rq *rq)
 {
        struct cfs_rq *cfs_rq;
 
        for_each_leaf_cfs_rq(rq, cfs_rq) {
-               struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+               struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
+
+               raw_spin_lock(&cfs_b->lock);
+               cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
+               raw_spin_unlock(&cfs_b->lock);
+       }
+}
+
+static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
+{
+       struct cfs_rq *cfs_rq;
 
+       for_each_leaf_cfs_rq(rq, cfs_rq) {
                if (!cfs_rq->runtime_enabled)
                        continue;
 
@@ -3554,7 +3851,13 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
                 * clock_task is not advancing so we just need to make sure
                 * there's some valid quota amount
                 */
-               cfs_rq->runtime_remaining = cfs_b->quota;
+               cfs_rq->runtime_remaining = 1;
+               /*
+                * Offline rq is schedulable till cpu is completely disabled
+                * in take_cpu_down(), so we prevent new cfs throttling here.
+                */
+               cfs_rq->runtime_enabled = 0;
+
                if (cfs_rq_throttled(cfs_rq))
                        unthrottle_cfs_rq(cfs_rq);
        }
@@ -3567,7 +3870,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
 }
 
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 
@@ -3598,6 +3901,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
        return NULL;
 }
 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
+static inline void update_runtime_enabled(struct rq *rq) {}
 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
 
 #endif /* CONFIG_CFS_BANDWIDTH */
@@ -3621,17 +3925,9 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 
                if (delta < 0) {
                        if (rq->curr == p)
-                               resched_task(p);
+                               resched_curr(rq);
                        return;
                }
-
-               /*
-                * Don't schedule slices shorter than 10000ns, that just
-                * doesn't make sense. Rely on vruntime for fairness.
-                */
-               if (rq->curr != p)
-                       delta = max_t(s64, 10000LL, delta);
-
                hrtick_start(rq, delta);
        }
 }
@@ -3705,7 +4001,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 
        if (!se) {
                update_rq_runnable_avg(rq, rq->nr_running);
-               inc_nr_running(rq);
+               add_nr_running(rq, 1);
        }
        hrtick_update(rq);
 }
@@ -3765,7 +4061,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        }
 
        if (!se) {
-               dec_nr_running(rq);
+               sub_nr_running(rq, 1);
                update_rq_runnable_avg(rq, 1);
        }
        hrtick_update(rq);
@@ -3811,15 +4107,15 @@ static unsigned long target_load(int cpu, int type)
        return max(rq->cpu_load[type-1], total);
 }
 
-static unsigned long power_of(int cpu)
+static unsigned long capacity_of(int cpu)
 {
-       return cpu_rq(cpu)->cpu_power;
+       return cpu_rq(cpu)->cpu_capacity;
 }
 
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
+       unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
        unsigned long load_avg = rq->cfs.runnable_load_avg;
 
        if (nr_running)
@@ -3835,8 +4131,8 @@ static void record_wakee(struct task_struct *p)
         * about the boundary, really active task won't care
         * about the loss.
         */
-       if (jiffies > current->wakee_flip_decay_ts + HZ) {
-               current->wakee_flips = 0;
+       if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
+               current->wakee_flips >>= 1;
                current->wakee_flip_decay_ts = jiffies;
        }
 
@@ -4008,8 +4304,8 @@ static int wake_wide(struct task_struct *p)
 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 {
        s64 this_load, load;
+       s64 this_eff_load, prev_eff_load;
        int idx, this_cpu, prev_cpu;
-       unsigned long tl_per_task;
        struct task_group *tg;
        unsigned long weight;
        int balanced;
@@ -4052,47 +4348,30 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * Otherwise check if either cpus are near enough in load to allow this
         * task to be woken on this_cpu.
         */
-       if (this_load > 0) {
-               s64 this_eff_load, prev_eff_load;
+       this_eff_load = 100;
+       this_eff_load *= capacity_of(prev_cpu);
 
-               this_eff_load = 100;
-               this_eff_load *= power_of(prev_cpu);
-               this_eff_load *= this_load +
+       prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
+       prev_eff_load *= capacity_of(this_cpu);
+
+       if (this_load > 0) {
+               this_eff_load *= this_load +
                        effective_load(tg, this_cpu, weight, weight);
 
-               prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
-               prev_eff_load *= power_of(this_cpu);
                prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
+       }
 
-               balanced = this_eff_load <= prev_eff_load;
-       } else
-               balanced = true;
-
-       /*
-        * If the currently running task will sleep within
-        * a reasonable amount of time then attract this newly
-        * woken task:
-        */
-       if (sync && balanced)
-               return 1;
+       balanced = this_eff_load <= prev_eff_load;
 
        schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
-       tl_per_task = cpu_avg_load_per_task(this_cpu);
 
-       if (balanced ||
-           (this_load <= load &&
-            this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
-               /*
-                * This domain has SD_WAKE_AFFINE and
-                * p is cache cold in this domain, and
-                * there is no bad imbalance.
-                */
-               schedstat_inc(sd, ttwu_move_affine);
-               schedstat_inc(p, se.statistics.nr_wakeups_affine);
+       if (!balanced)
+               return 0;
 
-               return 1;
-       }
-       return 0;
+       schedstat_inc(sd, ttwu_move_affine);
+       schedstat_inc(p, se.statistics.nr_wakeups_affine);
+
+       return 1;
 }
 
 /*
@@ -4101,12 +4380,16 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
  */
 static struct sched_group *
 find_idlest_group(struct sched_domain *sd, struct task_struct *p,
-                 int this_cpu, int load_idx)
+                 int this_cpu, int sd_flag)
 {
        struct sched_group *idlest = NULL, *group = sd->groups;
        unsigned long min_load = ULONG_MAX, this_load = 0;
+       int load_idx = sd->forkexec_idx;
        int imbalance = 100 + (sd->imbalance_pct-100)/2;
 
+       if (sd_flag & SD_BALANCE_WAKE)
+               load_idx = sd->wake_idx;
+
        do {
                unsigned long load, avg_load;
                int local_group;
@@ -4133,8 +4416,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                        avg_load += load;
                }
 
-               /* Adjust by relative CPU power of the group */
-               avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
+               /* Adjust by relative CPU capacity of the group */
+               avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
 
                if (local_group) {
                        this_load = avg_load;
@@ -4156,20 +4439,46 @@ static int
 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 {
        unsigned long load, min_load = ULONG_MAX;
-       int idlest = -1;
+       unsigned int min_exit_latency = UINT_MAX;
+       u64 latest_idle_timestamp = 0;
+       int least_loaded_cpu = this_cpu;
+       int shallowest_idle_cpu = -1;
        int i;
 
        /* Traverse only the allowed CPUs */
        for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
-               load = weighted_cpuload(i);
-
-               if (load < min_load || (load == min_load && i == this_cpu)) {
-                       min_load = load;
-                       idlest = i;
+               if (idle_cpu(i)) {
+                       struct rq *rq = cpu_rq(i);
+                       struct cpuidle_state *idle = idle_get_state(rq);
+                       if (idle && idle->exit_latency < min_exit_latency) {
+                               /*
+                                * We give priority to a CPU whose idle state
+                                * has the smallest exit latency irrespective
+                                * of any idle timestamp.
+                                */
+                               min_exit_latency = idle->exit_latency;
+                               latest_idle_timestamp = rq->idle_stamp;
+                               shallowest_idle_cpu = i;
+                       } else if ((!idle || idle->exit_latency == min_exit_latency) &&
+                                  rq->idle_stamp > latest_idle_timestamp) {
+                               /*
+                                * If equal or no active idle state, then
+                                * the most recently idled CPU might have
+                                * a warmer cache.
+                                */
+                               latest_idle_timestamp = rq->idle_stamp;
+                               shallowest_idle_cpu = i;
+                       }
+               } else {
+                       load = weighted_cpuload(i);
+                       if (load < min_load || (load == min_load && i == this_cpu)) {
+                               min_load = load;
+                               least_loaded_cpu = i;
+                       }
                }
        }
 
-       return idlest;
+       return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 }
 
 /*
@@ -4218,13 +4527,14 @@ done:
 }
 
 /*
- * sched_balance_self: balance the current task (running on cpu) in domains
- * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
- * SD_BALANCE_EXEC.
+ * select_task_rq_fair: Select target runqueue for the waking task in domains
+ * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
+ * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
  *
- * Balance, ie. select the least loaded group.
+ * Balances load by selecting the idlest cpu in the idlest group, or under
+ * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
  *
- * Returns the target CPU number, or the same CPU if no balancing is needed.
+ * Returns the target cpu number.
  *
  * preempt must be disabled.
  */
@@ -4240,11 +4550,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
        if (p->nr_cpus_allowed == 1)
                return prev_cpu;
 
-       if (sd_flag & SD_BALANCE_WAKE) {
-               if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
-                       want_affine = 1;
-               new_cpu = prev_cpu;
-       }
+       if (sd_flag & SD_BALANCE_WAKE)
+               want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
 
        rcu_read_lock();
        for_each_domain(cpu, tmp) {
@@ -4265,16 +4572,15 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
                        sd = tmp;
        }
 
-       if (affine_sd) {
-               if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
-                       prev_cpu = cpu;
+       if (affine_sd && cpu != prev_cpu && wake_affine(affine_sd, p, sync))
+               prev_cpu = cpu;
 
+       if (sd_flag & SD_BALANCE_WAKE) {
                new_cpu = select_idle_sibling(p, prev_cpu);
                goto unlock;
        }
 
        while (sd) {
-               int load_idx = sd->forkexec_idx;
                struct sched_group *group;
                int weight;
 
@@ -4283,10 +4589,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
                        continue;
                }
 
-               if (sd_flag & SD_BALANCE_WAKE)
-                       load_idx = sd->wake_idx;
-
-               group = find_idlest_group(sd, p, cpu, load_idx);
+               group = find_idlest_group(sd, p, cpu, sd_flag);
                if (!group) {
                        sd = sd->child;
                        continue;
@@ -4340,6 +4643,9 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
                atomic_long_add(se->avg.load_avg_contrib,
                                                &cfs_rq->removed_load);
        }
+
+       /* We have migrated, no longer consider this task hot */
+       se->exec_start = 0;
 }
 #endif /* CONFIG_SMP */
 
@@ -4432,7 +4738,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
                return;
 
        /*
-        * This is possible from callers such as move_task(), in which we
+        * This is possible from callers such as attach_tasks(), in which we
         * unconditionally check_prempt_curr() after an enqueue (which may have
         * lead to a throttle).  This both saves work and prevents false
         * next-buddy nomination below.
@@ -4486,7 +4792,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        return;
 
 preempt:
-       resched_task(curr);
+       resched_curr(rq);
        /*
         * Only set the backward buddy when the current task is still
         * on the rq. This can happen when a wakeup gets interleaved
@@ -4503,26 +4809,124 @@ preempt:
                set_last_buddy(se);
 }
 
-static struct task_struct *pick_next_task_fair(struct rq *rq)
+static struct task_struct *
+pick_next_task_fair(struct rq *rq, struct task_struct *prev)
 {
-       struct task_struct *p;
        struct cfs_rq *cfs_rq = &rq->cfs;
        struct sched_entity *se;
+       struct task_struct *p;
+       int new_tasks;
 
+again:
+#ifdef CONFIG_FAIR_GROUP_SCHED
        if (!cfs_rq->nr_running)
-               return NULL;
+               goto idle;
+
+       if (prev->sched_class != &fair_sched_class)
+               goto simple;
+
+       /*
+        * Because of the set_next_buddy() in dequeue_task_fair() it is rather
+        * likely that a next task is from the same cgroup as the current.
+        *
+        * Therefore attempt to avoid putting and setting the entire cgroup
+        * hierarchy, only change the part that actually changes.
+        */
 
        do {
-               se = pick_next_entity(cfs_rq);
+               struct sched_entity *curr = cfs_rq->curr;
+
+               /*
+                * Since we got here without doing put_prev_entity() we also
+                * have to consider cfs_rq->curr. If it is still a runnable
+                * entity, update_curr() will update its vruntime, otherwise
+                * forget we've ever seen it.
+                */
+               if (curr && curr->on_rq)
+                       update_curr(cfs_rq);
+               else
+                       curr = NULL;
+
+               /*
+                * This call to check_cfs_rq_runtime() will do the throttle and
+                * dequeue its entity in the parent(s). Therefore the 'simple'
+                * nr_running test will indeed be correct.
+                */
+               if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+                       goto simple;
+
+               se = pick_next_entity(cfs_rq, curr);
+               cfs_rq = group_cfs_rq(se);
+       } while (cfs_rq);
+
+       p = task_of(se);
+
+       /*
+        * Since we haven't yet done put_prev_entity and if the selected task
+        * is a different task than we started out with, try and touch the
+        * least amount of cfs_rqs.
+        */
+       if (prev != p) {
+               struct sched_entity *pse = &prev->se;
+
+               while (!(cfs_rq = is_same_group(se, pse))) {
+                       int se_depth = se->depth;
+                       int pse_depth = pse->depth;
+
+                       if (se_depth <= pse_depth) {
+                               put_prev_entity(cfs_rq_of(pse), pse);
+                               pse = parent_entity(pse);
+                       }
+                       if (se_depth >= pse_depth) {
+                               set_next_entity(cfs_rq_of(se), se);
+                               se = parent_entity(se);
+                       }
+               }
+
+               put_prev_entity(cfs_rq, pse);
+               set_next_entity(cfs_rq, se);
+       }
+
+       if (hrtick_enabled(rq))
+               hrtick_start_fair(rq, p);
+
+       return p;
+simple:
+       cfs_rq = &rq->cfs;
+#endif
+
+       if (!cfs_rq->nr_running)
+               goto idle;
+
+       put_prev_task(rq, prev);
+
+       do {
+               se = pick_next_entity(cfs_rq, NULL);
                set_next_entity(cfs_rq, se);
                cfs_rq = group_cfs_rq(se);
        } while (cfs_rq);
 
        p = task_of(se);
+
        if (hrtick_enabled(rq))
                hrtick_start_fair(rq, p);
 
        return p;
+
+idle:
+       new_tasks = idle_balance(rq);
+       /*
+        * Because idle_balance() releases (and re-acquires) rq->lock, it is
+        * possible for any higher priority task to appear. In that case we
+        * must re-start the pick_next_entity() loop.
+        */
+       if (new_tasks < 0)
+               return RETRY_TASK;
+
+       if (new_tasks > 0)
+               goto again;
+
+       return NULL;
 }
 
 /*
@@ -4616,14 +5020,14 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
  *
  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
  *
- * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
+ * C_i is the compute capacity of cpu i, typically it is the
  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
  * can also include other factors [XXX].
  *
  * To achieve this balance we define a measure of imbalance which follows
  * directly from (1):
  *
- *   imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j }    (4)
+ *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
  *
  * We them move tasks around to minimize the imbalance. In the continuous
  * function space it is obvious this converges, in the discrete case we get
@@ -4742,28 +5146,18 @@ struct lb_env {
        unsigned int            loop_max;
 
        enum fbq_type           fbq_type;
+       struct list_head        tasks;
 };
 
-/*
- * move_task - move a task from one runqueue to another runqueue.
- * Both runqueues must be locked.
- */
-static void move_task(struct task_struct *p, struct lb_env *env)
-{
-       deactivate_task(env->src_rq, p, 0);
-       set_task_cpu(p, env->dst_cpu);
-       activate_task(env->dst_rq, p, 0);
-       check_preempt_curr(env->dst_rq, p, 0);
-}
-
 /*
  * Is this task likely cache-hot:
  */
-static int
-task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+static int task_hot(struct task_struct *p, struct lb_env *env)
 {
        s64 delta;
 
+       lockdep_assert_held(&env->src_rq->lock);
+
        if (p->sched_class != &fair_sched_class)
                return 0;
 
@@ -4773,7 +5167,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
        /*
         * Buddy candidates are cache hot:
         */
-       if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
+       if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
                        (&p->se == cfs_rq_of(&p->se)->next ||
                         &p->se == cfs_rq_of(&p->se)->last))
                return 1;
@@ -4783,7 +5177,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
        if (sysctl_sched_migration_cost == 0)
                return 0;
 
-       delta = now - p->se.exec_start;
+       delta = rq_clock_task(env->src_rq) - p->se.exec_start;
 
        return delta < (s64)sysctl_sched_migration_cost;
 }
@@ -4792,9 +5186,10 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 /* Returns true if the destination node has incurred more faults */
 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
 {
+       struct numa_group *numa_group = rcu_dereference(p->numa_group);
        int src_nid, dst_nid;
 
-       if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
+       if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
            !(env->sd->flags & SD_NUMA)) {
                return false;
        }
@@ -4805,27 +5200,35 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
        if (src_nid == dst_nid)
                return false;
 
-       /* Always encourage migration to the preferred node. */
-       if (dst_nid == p->numa_preferred_nid)
-               return true;
+       if (numa_group) {
+               /* Task is already in the group's interleave set. */
+               if (node_isset(src_nid, numa_group->active_nodes))
+                       return false;
+
+               /* Task is moving into the group's interleave set. */
+               if (node_isset(dst_nid, numa_group->active_nodes))
+                       return true;
+
+               return group_faults(p, dst_nid) > group_faults(p, src_nid);
+       }
 
-       /* If both task and group weight improve, this move is a winner. */
-       if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
-           group_weight(p, dst_nid) > group_weight(p, src_nid))
+       /* Encourage migration to the preferred node. */
+       if (dst_nid == p->numa_preferred_nid)
                return true;
 
-       return false;
+       return task_faults(p, dst_nid) > task_faults(p, src_nid);
 }
 
 
 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
 {
+       struct numa_group *numa_group = rcu_dereference(p->numa_group);
        int src_nid, dst_nid;
 
        if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
                return false;
 
-       if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
+       if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
                return false;
 
        src_nid = cpu_to_node(env->src_cpu);
@@ -4834,16 +5237,23 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
        if (src_nid == dst_nid)
                return false;
 
+       if (numa_group) {
+               /* Task is moving within/into the group's interleave set. */
+               if (node_isset(dst_nid, numa_group->active_nodes))
+                       return false;
+
+               /* Task is moving out of the group's interleave set. */
+               if (node_isset(src_nid, numa_group->active_nodes))
+                       return true;
+
+               return group_faults(p, dst_nid) < group_faults(p, src_nid);
+       }
+
        /* Migrating away from the preferred node is always bad. */
        if (src_nid == p->numa_preferred_nid)
                return true;
 
-       /* If either task or group weight get worse, don't do it. */
-       if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
-           group_weight(p, dst_nid) < group_weight(p, src_nid))
-               return true;
-
-       return false;
+       return task_faults(p, dst_nid) < task_faults(p, src_nid);
 }
 
 #else
@@ -4867,6 +5277,9 @@ static
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
        int tsk_cache_hot = 0;
+
+       lockdep_assert_held(&env->src_rq->lock);
+
        /*
         * We do not migrate tasks that are:
         * 1) throttled_lb_pair, or
@@ -4921,28 +5334,16 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
         * 2) task is cache cold, or
         * 3) too many balance attempts have failed.
         */
-       tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
+       tsk_cache_hot = task_hot(p, env);
        if (!tsk_cache_hot)
                tsk_cache_hot = migrate_degrades_locality(p, env);
 
-       if (migrate_improves_locality(p, env)) {
-#ifdef CONFIG_SCHEDSTATS
+       if (migrate_improves_locality(p, env) || !tsk_cache_hot ||
+           env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
                if (tsk_cache_hot) {
                        schedstat_inc(env->sd, lb_hot_gained[env->idle]);
                        schedstat_inc(p, se.statistics.nr_forced_migrations);
                }
-#endif
-               return 1;
-       }
-
-       if (!tsk_cache_hot ||
-               env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
-
-               if (tsk_cache_hot) {
-                       schedstat_inc(env->sd, lb_hot_gained[env->idle]);
-                       schedstat_inc(p, se.statistics.nr_forced_migrations);
-               }
-
                return 1;
        }
 
@@ -4951,47 +5352,63 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
 }
 
 /*
- * move_one_task tries to move exactly one task from busiest to this_rq, as
+ * detach_task() -- detach the task for the migration specified in env
+ */
+static void detach_task(struct task_struct *p, struct lb_env *env)
+{
+       lockdep_assert_held(&env->src_rq->lock);
+
+       deactivate_task(env->src_rq, p, 0);
+       p->on_rq = TASK_ON_RQ_MIGRATING;
+       set_task_cpu(p, env->dst_cpu);
+}
+
+/*
+ * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
  * part of active balancing operations within "domain".
- * Returns 1 if successful and 0 otherwise.
  *
- * Called with both runqueues locked.
+ * Returns a task if successful and NULL otherwise.
  */
-static int move_one_task(struct lb_env *env)
+static struct task_struct *detach_one_task(struct lb_env *env)
 {
        struct task_struct *p, *n;
 
+       lockdep_assert_held(&env->src_rq->lock);
+
        list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
                if (!can_migrate_task(p, env))
                        continue;
 
-               move_task(p, env);
+               detach_task(p, env);
+
                /*
-                * Right now, this is only the second place move_task()
-                * is called, so we can safely collect move_task()
-                * stats here rather than inside move_task().
+                * Right now, this is only the second place where
+                * lb_gained[env->idle] is updated (other is detach_tasks)
+                * so we can safely collect stats here rather than
+                * inside detach_tasks().
                 */
                schedstat_inc(env->sd, lb_gained[env->idle]);
-               return 1;
+               return p;
        }
-       return 0;
+       return NULL;
 }
 
 static const unsigned int sched_nr_migrate_break = 32;
 
 /*
- * move_tasks tries to move up to imbalance weighted load from busiest to
- * this_rq, as part of a balancing operation within domain "sd".
- * Returns 1 if successful and 0 otherwise.
+ * detach_tasks() -- tries to detach up to imbalance weighted load from
+ * busiest_rq, as part of a balancing operation within domain "sd".
  *
- * Called with both runqueues locked.
+ * Returns number of detached tasks if successful and 0 otherwise.
  */
-static int move_tasks(struct lb_env *env)
+static int detach_tasks(struct lb_env *env)
 {
        struct list_head *tasks = &env->src_rq->cfs_tasks;
        struct task_struct *p;
        unsigned long load;
-       int pulled = 0;
+       int detached = 0;
+
+       lockdep_assert_held(&env->src_rq->lock);
 
        if (env->imbalance <= 0)
                return 0;
@@ -5022,14 +5439,16 @@ static int move_tasks(struct lb_env *env)
                if ((load / 2) > env->imbalance)
                        goto next;
 
-               move_task(p, env);
-               pulled++;
+               detach_task(p, env);
+               list_add(&p->se.group_node, &env->tasks);
+
+               detached++;
                env->imbalance -= load;
 
 #ifdef CONFIG_PREEMPT
                /*
                 * NEWIDLE balancing is a source of latency, so preemptible
-                * kernels will stop after the first task is pulled to minimize
+                * kernels will stop after the first task is detached to minimize
                 * the critical section.
                 */
                if (env->idle == CPU_NEWLY_IDLE)
@@ -5049,13 +5468,58 @@ next:
        }
 
        /*
-        * Right now, this is one of only two places move_task() is called,
-        * so we can safely collect move_task() stats here rather than
-        * inside move_task().
+        * Right now, this is one of only two places we collect this stat
+        * so we can safely collect detach_one_task() stats here rather
+        * than inside detach_one_task().
         */
-       schedstat_add(env->sd, lb_gained[env->idle], pulled);
+       schedstat_add(env->sd, lb_gained[env->idle], detached);
+
+       return detached;
+}
+
+/*
+ * attach_task() -- attach the task detached by detach_task() to its new rq.
+ */
+static void attach_task(struct rq *rq, struct task_struct *p)
+{
+       lockdep_assert_held(&rq->lock);
+
+       BUG_ON(task_rq(p) != rq);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       activate_task(rq, p, 0);
+       check_preempt_curr(rq, p, 0);
+}
+
+/*
+ * attach_one_task() -- attaches the task returned from detach_one_task() to
+ * its new rq.
+ */
+static void attach_one_task(struct rq *rq, struct task_struct *p)
+{
+       raw_spin_lock(&rq->lock);
+       attach_task(rq, p);
+       raw_spin_unlock(&rq->lock);
+}
+
+/*
+ * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
+ * new rq.
+ */
+static void attach_tasks(struct lb_env *env)
+{
+       struct list_head *tasks = &env->tasks;
+       struct task_struct *p;
+
+       raw_spin_lock(&env->dst_rq->lock);
 
-       return pulled;
+       while (!list_empty(tasks)) {
+               p = list_first_entry(tasks, struct task_struct, se.group_node);
+               list_del_init(&p->se.group_node);
+
+               attach_task(env->dst_rq, p);
+       }
+
+       raw_spin_unlock(&env->dst_rq->lock);
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -5174,6 +5638,13 @@ static unsigned long task_h_load(struct task_struct *p)
 #endif
 
 /********** Helpers for find_busiest_group ************************/
+
+enum group_type {
+       group_other = 0,
+       group_imbalanced,
+       group_overloaded,
+};
+
 /*
  * sg_lb_stats - stats of a sched_group required for load_balancing
  */
@@ -5182,13 +5653,13 @@ struct sg_lb_stats {
        unsigned long group_load; /* Total load over the CPUs of the group */
        unsigned long sum_weighted_load; /* Weighted load of group's tasks */
        unsigned long load_per_task;
-       unsigned long group_power;
+       unsigned long group_capacity;
        unsigned int sum_nr_running; /* Nr tasks running in the group */
-       unsigned int group_capacity;
+       unsigned int group_capacity_factor;
        unsigned int idle_cpus;
        unsigned int group_weight;
-       int group_imb; /* Is there an imbalance in the group ? */
-       int group_has_capacity; /* Is there extra capacity in the group? */
+       enum group_type group_type;
+       int group_has_free_capacity;
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int nr_numa_running;
        unsigned int nr_preferred_running;
@@ -5203,7 +5674,7 @@ struct sd_lb_stats {
        struct sched_group *busiest;    /* Busiest group in this sd */
        struct sched_group *local;      /* Local group in this sd */
        unsigned long total_load;       /* Total load of all groups in sd */
-       unsigned long total_pwr;        /* Total power of all groups in sd */
+       unsigned long total_capacity;   /* Total capacity of all groups in sd */
        unsigned long avg_load; /* Average load across all groups in sd */
 
        struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
@@ -5222,9 +5693,11 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
                .busiest = NULL,
                .local = NULL,
                .total_load = 0UL,
-               .total_pwr = 0UL,
+               .total_capacity = 0UL,
                .busiest_stat = {
                        .avg_load = 0UL,
+                       .sum_nr_running = 0,
+                       .group_type = group_other,
                },
        };
 }
@@ -5257,35 +5730,34 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
        return load_idx;
 }
 
-static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
+static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
 {
-       return SCHED_POWER_SCALE;
+       return SCHED_CAPACITY_SCALE;
 }
 
-unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
+unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
 {
-       return default_scale_freq_power(sd, cpu);
+       return default_scale_capacity(sd, cpu);
 }
 
-static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
+static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = sd->span_weight;
-       unsigned long smt_gain = sd->smt_gain;
-
-       smt_gain /= weight;
+       if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+               return sd->smt_gain / sd->span_weight;
 
-       return smt_gain;
+       return SCHED_CAPACITY_SCALE;
 }
 
-unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
+unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       return default_scale_smt_power(sd, cpu);
+       return default_scale_cpu_capacity(sd, cpu);
 }
 
-static unsigned long scale_rt_power(int cpu)
+static unsigned long scale_rt_capacity(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        u64 total, available, age_stamp, avg;
+       s64 delta;
 
        /*
         * Since we're reading these variables without serialization make sure
@@ -5294,74 +5766,75 @@ static unsigned long scale_rt_power(int cpu)
        age_stamp = ACCESS_ONCE(rq->age_stamp);
        avg = ACCESS_ONCE(rq->rt_avg);
 
-       total = sched_avg_period() + (rq_clock(rq) - age_stamp);
+       delta = rq_clock(rq) - age_stamp;
+       if (unlikely(delta < 0))
+               delta = 0;
+
+       total = sched_avg_period() + delta;
 
        if (unlikely(total < avg)) {
-               /* Ensures that power won't end up being negative */
+               /* Ensures that capacity won't end up being negative */
                available = 0;
        } else {
                available = total - avg;
        }
 
-       if (unlikely((s64)total < SCHED_POWER_SCALE))
-               total = SCHED_POWER_SCALE;
+       if (unlikely((s64)total < SCHED_CAPACITY_SCALE))
+               total = SCHED_CAPACITY_SCALE;
 
-       total >>= SCHED_POWER_SHIFT;
+       total >>= SCHED_CAPACITY_SHIFT;
 
        return div_u64(available, total);
 }
 
-static void update_cpu_power(struct sched_domain *sd, int cpu)
+static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = sd->span_weight;
-       unsigned long power = SCHED_POWER_SCALE;
+       unsigned long capacity = SCHED_CAPACITY_SCALE;
        struct sched_group *sdg = sd->groups;
 
-       if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
-               if (sched_feat(ARCH_POWER))
-                       power *= arch_scale_smt_power(sd, cpu);
-               else
-                       power *= default_scale_smt_power(sd, cpu);
+       if (sched_feat(ARCH_CAPACITY))
+               capacity *= arch_scale_cpu_capacity(sd, cpu);
+       else
+               capacity *= default_scale_cpu_capacity(sd, cpu);
 
-               power >>= SCHED_POWER_SHIFT;
-       }
+       capacity >>= SCHED_CAPACITY_SHIFT;
 
-       sdg->sgp->power_orig = power;
+       sdg->sgc->capacity_orig = capacity;
 
-       if (sched_feat(ARCH_POWER))
-               power *= arch_scale_freq_power(sd, cpu);
+       if (sched_feat(ARCH_CAPACITY))
+               capacity *= arch_scale_freq_capacity(sd, cpu);
        else
-               power *= default_scale_freq_power(sd, cpu);
+               capacity *= default_scale_capacity(sd, cpu);
 
-       power >>= SCHED_POWER_SHIFT;
+       capacity >>= SCHED_CAPACITY_SHIFT;
 
-       power *= scale_rt_power(cpu);
-       power >>= SCHED_POWER_SHIFT;
+       capacity *= scale_rt_capacity(cpu);
+       capacity >>= SCHED_CAPACITY_SHIFT;
 
-       if (!power)
-               power = 1;
+       if (!capacity)
+               capacity = 1;
 
-       cpu_rq(cpu)->cpu_power = power;
-       sdg->sgp->power = power;
+       cpu_rq(cpu)->cpu_capacity = capacity;
+       sdg->sgc->capacity = capacity;
 }
 
-void update_group_power(struct sched_domain *sd, int cpu)
+void update_group_capacity(struct sched_domain *sd, int cpu)
 {
        struct sched_domain *child = sd->child;
        struct sched_group *group, *sdg = sd->groups;
-       unsigned long power, power_orig;
+       unsigned long capacity, capacity_orig;
        unsigned long interval;
 
        interval = msecs_to_jiffies(sd->balance_interval);
        interval = clamp(interval, 1UL, max_load_balance_interval);
-       sdg->sgp->next_update = jiffies + interval;
+       sdg->sgc->next_update = jiffies + interval;
 
        if (!child) {
-               update_cpu_power(sd, cpu);
+               update_cpu_capacity(sd, cpu);
                return;
        }
 
-       power_orig = power = 0;
+       capacity_orig = capacity = 0;
 
        if (child->flags & SD_OVERLAP) {
                /*
@@ -5370,31 +5843,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
                 */
 
                for_each_cpu(cpu, sched_group_cpus(sdg)) {
-                       struct sched_group_power *sgp;
+                       struct sched_group_capacity *sgc;
                        struct rq *rq = cpu_rq(cpu);
 
                        /*
-                        * build_sched_domains() -> init_sched_groups_power()
+                        * build_sched_domains() -> init_sched_groups_capacity()
                         * gets here before we've attached the domains to the
                         * runqueues.
                         *
-                        * Use power_of(), which is set irrespective of domains
-                        * in update_cpu_power().
+                        * Use capacity_of(), which is set irrespective of domains
+                        * in update_cpu_capacity().
                         *
-                        * This avoids power/power_orig from being 0 and
+                        * This avoids capacity/capacity_orig from being 0 and
                         * causing divide-by-zero issues on boot.
                         *
-                        * Runtime updates will correct power_orig.
+                        * Runtime updates will correct capacity_orig.
                         */
                        if (unlikely(!rq->sd)) {
-                               power_orig += power_of(cpu);
-                               power += power_of(cpu);
+                               capacity_orig += capacity_of(cpu);
+                               capacity += capacity_of(cpu);
                                continue;
                        }
 
-                       sgp = rq->sd->groups->sgp;
-                       power_orig += sgp->power_orig;
-                       power += sgp->power;
+                       sgc = rq->sd->groups->sgc;
+                       capacity_orig += sgc->capacity_orig;
+                       capacity += sgc->capacity;
                }
        } else  {
                /*
@@ -5404,14 +5877,14 @@ void update_group_power(struct sched_domain *sd, int cpu)
 
                group = child->groups;
                do {
-                       power_orig += group->sgp->power_orig;
-                       power += group->sgp->power;
+                       capacity_orig += group->sgc->capacity_orig;
+                       capacity += group->sgc->capacity;
                        group = group->next;
                } while (group != child->groups);
        }
 
-       sdg->sgp->power_orig = power_orig;
-       sdg->sgp->power = power;
+       sdg->sgc->capacity_orig = capacity_orig;
+       sdg->sgc->capacity = capacity;
 }
 
 /*
@@ -5425,15 +5898,15 @@ static inline int
 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
 {
        /*
-        * Only siblings can have significantly less than SCHED_POWER_SCALE
+        * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
         */
-       if (!(sd->flags & SD_SHARE_CPUPOWER))
+       if (!(sd->flags & SD_SHARE_CPUCAPACITY))
                return 0;
 
        /*
-        * If ~90% of the cpu_power is still there, we're good.
+        * If ~90% of the cpu_capacity is still there, we're good.
         */
-       if (group->sgp->power * 32 > group->sgp->power_orig * 29)
+       if (group->sgc->capacity * 32 > group->sgc->capacity_orig * 29)
                return 1;
 
        return 0;
@@ -5470,34 +5943,47 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
 
 static inline int sg_imbalanced(struct sched_group *group)
 {
-       return group->sgp->imbalance;
+       return group->sgc->imbalance;
 }
 
 /*
- * Compute the group capacity.
+ * Compute the group capacity factor.
  *
- * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
+ * Avoid the issue where N*frac(smt_capacity) >= 1 creates 'phantom' cores by
  * first dividing out the smt factor and computing the actual number of cores
- * and limit power unit capacity with that.
+ * and limit unit capacity with that.
  */
-static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
+static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *group)
 {
-       unsigned int capacity, smt, cpus;
-       unsigned int power, power_orig;
+       unsigned int capacity_factor, smt, cpus;
+       unsigned int capacity, capacity_orig;
 
-       power = group->sgp->power;
-       power_orig = group->sgp->power_orig;
+       capacity = group->sgc->capacity;
+       capacity_orig = group->sgc->capacity_orig;
        cpus = group->group_weight;
 
-       /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
-       smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
-       capacity = cpus / smt; /* cores */
+       /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */
+       smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig);
+       capacity_factor = cpus / smt; /* cores */
 
-       capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
-       if (!capacity)
-               capacity = fix_small_capacity(env->sd, group);
+       capacity_factor = min_t(unsigned,
+               capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE));
+       if (!capacity_factor)
+               capacity_factor = fix_small_capacity(env->sd, group);
+
+       return capacity_factor;
+}
+
+static enum group_type
+group_classify(struct sched_group *group, struct sg_lb_stats *sgs)
+{
+       if (sgs->sum_nr_running > sgs->group_capacity_factor)
+               return group_overloaded;
+
+       if (sg_imbalanced(group))
+               return group_imbalanced;
 
-       return capacity;
+       return group_other;
 }
 
 /**
@@ -5507,12 +5993,13 @@ static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
  * @local_group: Does group contain this_cpu.
  * @sgs: variable to hold the statistics for this group.
+ * @overload: Indicate more than one runnable task for any CPU.
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
                        struct sched_group *group, int load_idx,
-                       int local_group, struct sg_lb_stats *sgs)
+                       int local_group, struct sg_lb_stats *sgs,
+                       bool *overload)
 {
-       unsigned long nr_running;
        unsigned long load;
        int i;
 
@@ -5521,8 +6008,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
        for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
                struct rq *rq = cpu_rq(i);
 
-               nr_running = rq->nr_running;
-
                /* Bias balancing toward cpus of our domain */
                if (local_group)
                        load = target_load(i, load_idx);
@@ -5530,7 +6015,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                        load = source_load(i, load_idx);
 
                sgs->group_load += load;
-               sgs->sum_nr_running += nr_running;
+               sgs->sum_nr_running += rq->cfs.h_nr_running;
+
+               if (rq->nr_running > 1)
+                       *overload = true;
+
 #ifdef CONFIG_NUMA_BALANCING
                sgs->nr_numa_running += rq->nr_numa_running;
                sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -5540,20 +6029,19 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                        sgs->idle_cpus++;
        }
 
-       /* Adjust by relative CPU power of the group */
-       sgs->group_power = group->sgp->power;
-       sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
+       /* Adjust by relative CPU capacity of the group */
+       sgs->group_capacity = group->sgc->capacity;
+       sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
 
        if (sgs->sum_nr_running)
                sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 
        sgs->group_weight = group->group_weight;
+       sgs->group_capacity_factor = sg_capacity_factor(env, group);
+       sgs->group_type = group_classify(group, sgs);
 
-       sgs->group_imb = sg_imbalanced(group);
-       sgs->group_capacity = sg_capacity(env, group);
-
-       if (sgs->group_capacity > sgs->sum_nr_running)
-               sgs->group_has_capacity = 1;
+       if (sgs->group_capacity_factor > sgs->sum_nr_running)
+               sgs->group_has_free_capacity = 1;
 }
 
 /**
@@ -5574,13 +6062,19 @@ static bool update_sd_pick_busiest(struct lb_env *env,
                                   struct sched_group *sg,
                                   struct sg_lb_stats *sgs)
 {
-       if (sgs->avg_load <= sds->busiest_stat.avg_load)
-               return false;
+       struct sg_lb_stats *busiest = &sds->busiest_stat;
 
-       if (sgs->sum_nr_running > sgs->group_capacity)
+       if (sgs->group_type > busiest->group_type)
                return true;
 
-       if (sgs->group_imb)
+       if (sgs->group_type < busiest->group_type)
+               return false;
+
+       if (sgs->avg_load <= busiest->avg_load)
+               return false;
+
+       /* This is the busiest node in its class. */
+       if (!(env->sd->flags & SD_ASYM_PACKING))
                return true;
 
        /*
@@ -5588,8 +6082,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
         * numbered CPUs in the group, therefore mark all groups
         * higher than ourself as busy.
         */
-       if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
-           env->dst_cpu < group_first_cpu(sg)) {
+       if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
                if (!sds->busiest)
                        return true;
 
@@ -5641,6 +6134,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats tmp_sgs;
        int load_idx, prefer_sibling = 0;
+       bool overload = false;
 
        if (child && child->flags & SD_PREFER_SIBLING)
                prefer_sibling = 1;
@@ -5657,28 +6151,29 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
                        sgs = &sds->local_stat;
 
                        if (env->idle != CPU_NEWLY_IDLE ||
-                           time_after_eq(jiffies, sg->sgp->next_update))
-                               update_group_power(env->sd, env->dst_cpu);
+                           time_after_eq(jiffies, sg->sgc->next_update))
+                               update_group_capacity(env->sd, env->dst_cpu);
                }
 
-               update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
+               update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
+                                               &overload);
 
                if (local_group)
                        goto next_group;
 
                /*
                 * In case the child domain prefers tasks go to siblings
-                * first, lower the sg capacity to one so that we'll try
+                * first, lower the sg capacity factor to one so that we'll try
                 * and move all the excess tasks away. We lower the capacity
                 * of a group only if the local group has the capacity to fit
-                * these excess tasks, i.e. nr_running < group_capacity. The
+                * these excess tasks, i.e. nr_running < group_capacity_factor. The
                 * extra check prevents the case where you always pull from the
                 * heaviest group when it is already under-utilized (possible
                 * with a large weight task outweighs the tasks on the system).
                 */
                if (prefer_sibling && sds->local &&
-                   sds->local_stat.group_has_capacity)
-                       sgs->group_capacity = min(sgs->group_capacity, 1U);
+                   sds->local_stat.group_has_free_capacity)
+                       sgs->group_capacity_factor = min(sgs->group_capacity_factor, 1U);
 
                if (update_sd_pick_busiest(env, sds, sg, sgs)) {
                        sds->busiest = sg;
@@ -5688,13 +6183,20 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 next_group:
                /* Now, start updating sd_lb_stats */
                sds->total_load += sgs->group_load;
-               sds->total_pwr += sgs->group_power;
+               sds->total_capacity += sgs->group_capacity;
 
                sg = sg->next;
        } while (sg != env->sd->groups);
 
        if (env->sd->flags & SD_NUMA)
                env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+
+       if (!env->sd->parent) {
+               /* update overload indicator if we are at root domain */
+               if (env->dst_rq->rd->overload != overload)
+                       env->dst_rq->rd->overload = overload;
+       }
+
 }
 
 /**
@@ -5735,8 +6237,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
                return 0;
 
        env->imbalance = DIV_ROUND_CLOSEST(
-               sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
-               SCHED_POWER_SCALE);
+               sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
+               SCHED_CAPACITY_SCALE);
 
        return 1;
 }
@@ -5751,7 +6253,7 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
 static inline
 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
 {
-       unsigned long tmp, pwr_now = 0, pwr_move = 0;
+       unsigned long tmp, capa_now = 0, capa_move = 0;
        unsigned int imbn = 2;
        unsigned long scaled_busy_load_per_task;
        struct sg_lb_stats *local, *busiest;
@@ -5765,8 +6267,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
                imbn = 1;
 
        scaled_busy_load_per_task =
-               (busiest->load_per_task * SCHED_POWER_SCALE) /
-               busiest->group_power;
+               (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
+               busiest->group_capacity;
 
        if (busiest->avg_load + scaled_busy_load_per_task >=
            local->avg_load + (scaled_busy_load_per_task * imbn)) {
@@ -5776,40 +6278,38 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
 
        /*
         * OK, we don't have enough imbalance to justify moving tasks,
-        * however we may be able to increase total CPU power used by
+        * however we may be able to increase total CPU capacity used by
         * moving them.
         */
 
-       pwr_now += busiest->group_power *
+       capa_now += busiest->group_capacity *
                        min(busiest->load_per_task, busiest->avg_load);
-       pwr_now += local->group_power *
+       capa_now += local->group_capacity *
                        min(local->load_per_task, local->avg_load);
-       pwr_now /= SCHED_POWER_SCALE;
+       capa_now /= SCHED_CAPACITY_SCALE;
 
        /* Amount of load we'd subtract */
-       tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
-               busiest->group_power;
-       if (busiest->avg_load > tmp) {
-               pwr_move += busiest->group_power *
+       if (busiest->avg_load > scaled_busy_load_per_task) {
+               capa_move += busiest->group_capacity *
                            min(busiest->load_per_task,
-                               busiest->avg_load - tmp);
+                               busiest->avg_load - scaled_busy_load_per_task);
        }
 
        /* Amount of load we'd add */
-       if (busiest->avg_load * busiest->group_power <
-           busiest->load_per_task * SCHED_POWER_SCALE) {
-               tmp = (busiest->avg_load * busiest->group_power) /
-                     local->group_power;
+       if (busiest->avg_load * busiest->group_capacity <
+           busiest->load_per_task * SCHED_CAPACITY_SCALE) {
+               tmp = (busiest->avg_load * busiest->group_capacity) /
+                     local->group_capacity;
        } else {
-               tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
-                     local->group_power;
+               tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
+                     local->group_capacity;
        }
-       pwr_move += local->group_power *
+       capa_move += local->group_capacity *
                    min(local->load_per_task, local->avg_load + tmp);
-       pwr_move /= SCHED_POWER_SCALE;
+       capa_move /= SCHED_CAPACITY_SCALE;
 
        /* Move if we gain throughput */
-       if (pwr_move > pwr_now)
+       if (capa_move > capa_now)
                env->imbalance = busiest->load_per_task;
 }
 
@@ -5827,7 +6327,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
        local = &sds->local_stat;
        busiest = &sds->busiest_stat;
 
-       if (busiest->group_imb) {
+       if (busiest->group_type == group_imbalanced) {
                /*
                 * In the group_imb case we cannot rely on group-wide averages
                 * to ensure cpu-load equilibrium, look at wider averages. XXX
@@ -5839,7 +6339,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
        /*
         * In the presence of smp nice balancing, certain scenarios can have
         * max load less than avg load(as we skip the groups at or below
-        * its cpu_power, while calculating max_load..)
+        * its cpu_capacity, while calculating max_load..)
         */
        if (busiest->avg_load <= sds->avg_load ||
            local->avg_load >= sds->avg_load) {
@@ -5847,17 +6347,16 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
                return fix_small_imbalance(env, sds);
        }
 
-       if (!busiest->group_imb) {
-               /*
-                * Don't want to pull so many tasks that a group would go idle.
-                * Except of course for the group_imb case, since then we might
-                * have to drop below capacity to reach cpu-load equilibrium.
-                */
+       /*
+        * If there aren't any idle cpus, avoid creating some.
+        */
+       if (busiest->group_type == group_overloaded &&
+           local->group_type   == group_overloaded) {
                load_above_capacity =
-                       (busiest->sum_nr_running - busiest->group_capacity);
+                       (busiest->sum_nr_running - busiest->group_capacity_factor);
 
-               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
-               load_above_capacity /= busiest->group_power;
+               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE);
+               load_above_capacity /= busiest->group_capacity;
        }
 
        /*
@@ -5872,9 +6371,9 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
 
        /* How much load to actually move to equalise the imbalance */
        env->imbalance = min(
-               max_pull * busiest->group_power,
-               (sds->avg_load - local->avg_load) * local->group_power
-       ) / SCHED_POWER_SCALE;
+               max_pull * busiest->group_capacity,
+               (sds->avg_load - local->avg_load) * local->group_capacity
+       ) / SCHED_CAPACITY_SCALE;
 
        /*
         * if *imbalance is less than the average load per runnable task
@@ -5928,23 +6427,24 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
        if (!sds.busiest || busiest->sum_nr_running == 0)
                goto out_balanced;
 
-       sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
+       sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
+                                               / sds.total_capacity;
 
        /*
         * If the busiest group is imbalanced the below checks don't
         * work because they assume all things are equal, which typically
         * isn't true due to cpus_allowed constraints and the like.
         */
-       if (busiest->group_imb)
+       if (busiest->group_type == group_imbalanced)
                goto force_balance;
 
        /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
-       if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
-           !busiest->group_has_capacity)
+       if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
+           !busiest->group_has_free_capacity)
                goto force_balance;
 
        /*
-        * If the local group is more busy than the selected busiest group
+        * If the local group is busier than the selected busiest group
         * don't try and pull any tasks.
         */
        if (local->avg_load >= busiest->avg_load)
@@ -5959,13 +6459,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
 
        if (env->idle == CPU_IDLE) {
                /*
-                * This cpu is idle. If the busiest group load doesn't
-                * have more tasks than the number of available cpu's and
-                * there is no imbalance between this and busiest group
-                * wrt to idle cpu's, it is balanced.
+                * This cpu is idle. If the busiest group is not overloaded
+                * and there is no imbalance between this and busiest group
+                * wrt idle cpus, it is balanced. The imbalance becomes
+                * significant if the diff is greater than 1 otherwise we
+                * might end up to just move the imbalance on another group
                 */
-               if ((local->idle_cpus < busiest->idle_cpus) &&
-                   busiest->sum_nr_running <= busiest->group_weight)
+               if ((busiest->group_type != group_overloaded) &&
+                               (local->idle_cpus <= (busiest->idle_cpus + 1)))
                        goto out_balanced;
        } else {
                /*
@@ -5994,11 +6495,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                                     struct sched_group *group)
 {
        struct rq *busiest = NULL, *rq;
-       unsigned long busiest_load = 0, busiest_power = 1;
+       unsigned long busiest_load = 0, busiest_capacity = 1;
        int i;
 
        for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
-               unsigned long power, capacity, wl;
+               unsigned long capacity, capacity_factor, wl;
                enum fbq_type rt;
 
                rq = cpu_rq(i);
@@ -6026,34 +6527,34 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                if (rt > env->fbq_type)
                        continue;
 
-               power = power_of(i);
-               capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
-               if (!capacity)
-                       capacity = fix_small_capacity(env->sd, group);
+               capacity = capacity_of(i);
+               capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE);
+               if (!capacity_factor)
+                       capacity_factor = fix_small_capacity(env->sd, group);
 
                wl = weighted_cpuload(i);
 
                /*
                 * When comparing with imbalance, use weighted_cpuload()
-                * which is not scaled with the cpu power.
+                * which is not scaled with the cpu capacity.
                 */
-               if (capacity && rq->nr_running == 1 && wl > env->imbalance)
+               if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance)
                        continue;
 
                /*
                 * For the load comparisons with the other cpu's, consider
-                * the weighted_cpuload() scaled with the cpu power, so that
-                * the load can be moved away from the cpu that is potentially
-                * running at a lower capacity.
+                * the weighted_cpuload() scaled with the cpu capacity, so
+                * that the load can be moved away from the cpu that is
+                * potentially running at a lower capacity.
                 *
-                * Thus we're looking for max(wl_i / power_i), crosswise
+                * Thus we're looking for max(wl_i / capacity_i), crosswise
                 * multiplication to rid ourselves of the division works out
-                * to: wl_i * power_j > wl_j * power_i;  where j is our
-                * previous maximum.
+                * to: wl_i * capacity_j > wl_j * capacity_i;  where j is
+                * our previous maximum.
                 */
-               if (wl * busiest_power > busiest_load * power) {
+               if (wl * busiest_capacity > busiest_load * capacity) {
                        busiest_load = wl;
-                       busiest_power = power;
+                       busiest_capacity = capacity;
                        busiest = rq;
                }
        }
@@ -6137,7 +6638,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        struct sched_group *group;
        struct rq *busiest;
        unsigned long flags;
-       struct cpumask *cpus = __get_cpu_var(load_balance_mask);
+       struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
 
        struct lb_env env = {
                .sd             = sd,
@@ -6148,6 +6649,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
                .loop_break     = sched_nr_migrate_break,
                .cpus           = cpus,
                .fbq_type       = all,
+               .tasks          = LIST_HEAD_INIT(env.tasks),
        };
 
        /*
@@ -6197,23 +6699,30 @@ redo:
                env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 
 more_balance:
-               local_irq_save(flags);
-               double_rq_lock(env.dst_rq, busiest);
+               raw_spin_lock_irqsave(&busiest->lock, flags);
 
                /*
                 * cur_ld_moved - load moved in current iteration
                 * ld_moved     - cumulative load moved across iterations
                 */
-               cur_ld_moved = move_tasks(&env);
-               ld_moved += cur_ld_moved;
-               double_rq_unlock(env.dst_rq, busiest);
-               local_irq_restore(flags);
+               cur_ld_moved = detach_tasks(&env);
 
                /*
-                * some other cpu did the load balance for us.
+                * We've detached some tasks from busiest_rq. Every
+                * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
+                * unlock busiest->lock, and we are able to be sure
+                * that nobody can manipulate the tasks in parallel.
+                * See task_rq_lock() family for the details.
                 */
-               if (cur_ld_moved && env.dst_cpu != smp_processor_id())
-                       resched_cpu(env.dst_cpu);
+
+               raw_spin_unlock(&busiest->lock);
+
+               if (cur_ld_moved) {
+                       attach_tasks(&env);
+                       ld_moved += cur_ld_moved;
+               }
+
+               local_irq_restore(flags);
 
                if (env.flags & LBF_NEED_BREAK) {
                        env.flags &= ~LBF_NEED_BREAK;
@@ -6261,12 +6770,10 @@ more_balance:
                 * We failed to reach balance because of affinity.
                 */
                if (sd_parent) {
-                       int *group_imbalance = &sd_parent->groups->sgp->imbalance;
+                       int *group_imbalance = &sd_parent->groups->sgc->imbalance;
 
-                       if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
+                       if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
                                *group_imbalance = 1;
-                       } else if (*group_imbalance)
-                               *group_imbalance = 0;
                }
 
                /* All tasks on this runqueue were pinned by CPU affinity */
@@ -6277,7 +6784,7 @@ more_balance:
                                env.loop_break = sched_nr_migrate_break;
                                goto redo;
                        }
-                       goto out_balanced;
+                       goto out_all_pinned;
                }
        }
 
@@ -6342,7 +6849,7 @@ more_balance:
                 * If we've begun active balancing, start to back off. This
                 * case may not be covered by the all_pinned logic if there
                 * is only 1 task on the busy runqueue (because we don't call
-                * move_tasks).
+                * detach_tasks).
                 */
                if (sd->balance_interval < sd->max_interval)
                        sd->balance_interval *= 2;
@@ -6351,6 +6858,23 @@ more_balance:
        goto out;
 
 out_balanced:
+       /*
+        * We reach balance although we may have faced some affinity
+        * constraints. Clear the imbalance flag if it was set.
+        */
+       if (sd_parent) {
+               int *group_imbalance = &sd_parent->groups->sgc->imbalance;
+
+               if (*group_imbalance)
+                       *group_imbalance = 0;
+       }
+
+out_all_pinned:
+       /*
+        * We reach balance because all tasks are pinned at this level so
+        * we can't migrate them. Let the imbalance flag set so parent level
+        * can try to migrate them.
+        */
        schedstat_inc(sd, lb_balanced[idle]);
 
        sd->nr_balance_failed = 0;
@@ -6367,21 +6891,63 @@ out:
        return ld_moved;
 }
 
+static inline unsigned long
+get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
+{
+       unsigned long interval = sd->balance_interval;
+
+       if (cpu_busy)
+               interval *= sd->busy_factor;
+
+       /* scale ms to jiffies */
+       interval = msecs_to_jiffies(interval);
+       interval = clamp(interval, 1UL, max_load_balance_interval);
+
+       return interval;
+}
+
+static inline void
+update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
+{
+       unsigned long interval, next;
+
+       interval = get_sd_balance_interval(sd, cpu_busy);
+       next = sd->last_balance + interval;
+
+       if (time_after(*next_balance, next))
+               *next_balance = next;
+}
+
 /*
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-void idle_balance(int this_cpu, struct rq *this_rq)
+static int idle_balance(struct rq *this_rq)
 {
+       unsigned long next_balance = jiffies + HZ;
+       int this_cpu = this_rq->cpu;
        struct sched_domain *sd;
        int pulled_task = 0;
-       unsigned long next_balance = jiffies + HZ;
        u64 curr_cost = 0;
 
+       idle_enter_fair(this_rq);
+
+       /*
+        * We must set idle_stamp _before_ calling idle_balance(), such that we
+        * measure the duration of idle_balance() as idle time.
+        */
        this_rq->idle_stamp = rq_clock(this_rq);
 
-       if (this_rq->avg_idle < sysctl_sched_migration_cost)
-               return;
+       if (this_rq->avg_idle < sysctl_sched_migration_cost ||
+           !this_rq->rd->overload) {
+               rcu_read_lock();
+               sd = rcu_dereference_check_sched_domain(this_rq->sd);
+               if (sd)
+                       update_next_balance(sd, 0, &next_balance);
+               rcu_read_unlock();
+
+               goto out;
+       }
 
        /*
         * Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6391,20 +6957,20 @@ void idle_balance(int this_cpu, struct rq *this_rq)
        update_blocked_averages(this_cpu);
        rcu_read_lock();
        for_each_domain(this_cpu, sd) {
-               unsigned long interval;
                int continue_balancing = 1;
                u64 t0, domain_cost;
 
                if (!(sd->flags & SD_LOAD_BALANCE))
                        continue;
 
-               if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
+               if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
+                       update_next_balance(sd, 0, &next_balance);
                        break;
+               }
 
                if (sd->flags & SD_BALANCE_NEWIDLE) {
                        t0 = sched_clock_cpu(this_cpu);
 
-                       /* If we've pulled tasks over stop searching: */
                        pulled_task = load_balance(this_cpu, this_rq,
                                                   sd, CPU_NEWLY_IDLE,
                                                   &continue_balancing);
@@ -6416,28 +6982,45 @@ void idle_balance(int this_cpu, struct rq *this_rq)
                        curr_cost += domain_cost;
                }
 
-               interval = msecs_to_jiffies(sd->balance_interval);
-               if (time_after(next_balance, sd->last_balance + interval))
-                       next_balance = sd->last_balance + interval;
-               if (pulled_task) {
-                       this_rq->idle_stamp = 0;
+               update_next_balance(sd, 0, &next_balance);
+
+               /*
+                * Stop searching for tasks to pull if there are
+                * now runnable tasks on this rq.
+                */
+               if (pulled_task || this_rq->nr_running > 0)
                        break;
-               }
        }
        rcu_read_unlock();
 
        raw_spin_lock(&this_rq->lock);
 
-       if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
-               /*
-                * We are going idle. next_balance may be set based on
-                * a busy processor. So reset next_balance.
-                */
+       if (curr_cost > this_rq->max_idle_balance_cost)
+               this_rq->max_idle_balance_cost = curr_cost;
+
+       /*
+        * While browsing the domains, we released the rq lock, a task could
+        * have been enqueued in the meantime. Since we're not going idle,
+        * pretend we pulled a task.
+        */
+       if (this_rq->cfs.h_nr_running && !pulled_task)
+               pulled_task = 1;
+
+out:
+       /* Move the next balance forward */
+       if (time_after(this_rq->next_balance, next_balance))
                this_rq->next_balance = next_balance;
+
+       /* Is there a task of a high priority class? */
+       if (this_rq->nr_running != this_rq->cfs.h_nr_running)
+               pulled_task = -1;
+
+       if (pulled_task) {
+               idle_exit_fair(this_rq);
+               this_rq->idle_stamp = 0;
        }
 
-       if (curr_cost > this_rq->max_idle_balance_cost)
-               this_rq->max_idle_balance_cost = curr_cost;
+       return pulled_task;
 }
 
 /*
@@ -6453,6 +7036,7 @@ static int active_load_balance_cpu_stop(void *data)
        int target_cpu = busiest_rq->push_cpu;
        struct rq *target_rq = cpu_rq(target_cpu);
        struct sched_domain *sd;
+       struct task_struct *p = NULL;
 
        raw_spin_lock_irq(&busiest_rq->lock);
 
@@ -6472,9 +7056,6 @@ static int active_load_balance_cpu_stop(void *data)
         */
        BUG_ON(busiest_rq == target_rq);
 
-       /* move a task from busiest_rq to target_rq */
-       double_lock_balance(busiest_rq, target_rq);
-
        /* Search for an sd spanning us and the target CPU. */
        rcu_read_lock();
        for_each_domain(target_cpu, sd) {
@@ -6495,19 +7076,30 @@ static int active_load_balance_cpu_stop(void *data)
 
                schedstat_inc(sd, alb_count);
 
-               if (move_one_task(&env))
+               p = detach_one_task(&env);
+               if (p)
                        schedstat_inc(sd, alb_pushed);
                else
                        schedstat_inc(sd, alb_failed);
        }
        rcu_read_unlock();
-       double_unlock_balance(busiest_rq, target_rq);
 out_unlock:
        busiest_rq->active_balance = 0;
-       raw_spin_unlock_irq(&busiest_rq->lock);
+       raw_spin_unlock(&busiest_rq->lock);
+
+       if (p)
+               attach_one_task(target_rq, p);
+
+       local_irq_enable();
+
        return 0;
 }
 
+static inline int on_null_domain(struct rq *rq)
+{
+       return unlikely(!rcu_dereference_sched(rq->sd));
+}
+
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * idle load balancing details
@@ -6521,7 +7113,7 @@ static struct {
        unsigned long next_balance;     /* in jiffy units */
 } nohz ____cacheline_aligned;
 
-static inline int find_new_ilb(int call_cpu)
+static inline int find_new_ilb(void)
 {
        int ilb = cpumask_first(nohz.idle_cpus_mask);
 
@@ -6536,13 +7128,13 @@ static inline int find_new_ilb(int call_cpu)
  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
  * CPU (if there is one).
  */
-static void nohz_balancer_kick(int cpu)
+static void nohz_balancer_kick(void)
 {
        int ilb_cpu;
 
        nohz.next_balance++;
 
-       ilb_cpu = find_new_ilb(cpu);
+       ilb_cpu = find_new_ilb();
 
        if (ilb_cpu >= nr_cpu_ids)
                return;
@@ -6562,8 +7154,13 @@ static void nohz_balancer_kick(int cpu)
 static inline void nohz_balance_exit_idle(int cpu)
 {
        if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
-               cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
-               atomic_dec(&nohz.nr_cpus);
+               /*
+                * Completely isolated CPUs don't ever set, so we must test.
+                */
+               if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
+                       cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+                       atomic_dec(&nohz.nr_cpus);
+               }
                clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
        }
 }
@@ -6580,7 +7177,7 @@ static inline void set_cpu_sd_state_busy(void)
                goto unlock;
        sd->nohz_idle = 0;
 
-       atomic_inc(&sd->groups->sgp->nr_busy_cpus);
+       atomic_inc(&sd->groups->sgc->nr_busy_cpus);
 unlock:
        rcu_read_unlock();
 }
@@ -6597,7 +7194,7 @@ void set_cpu_sd_state_idle(void)
                goto unlock;
        sd->nohz_idle = 1;
 
-       atomic_dec(&sd->groups->sgp->nr_busy_cpus);
+       atomic_dec(&sd->groups->sgc->nr_busy_cpus);
 unlock:
        rcu_read_unlock();
 }
@@ -6617,6 +7214,12 @@ void nohz_balance_enter_idle(int cpu)
        if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
                return;
 
+       /*
+        * If we're a completely isolated CPU, we don't play.
+        */
+       if (on_null_domain(cpu_rq(cpu)))
+               return;
+
        cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
        atomic_inc(&nohz.nr_cpus);
        set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
@@ -6652,10 +7255,10 @@ void update_max_interval(void)
  *
  * Balancing parameters are set up in init_sched_domains.
  */
-static void rebalance_domains(int cpu, enum cpu_idle_type idle)
+static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
 {
        int continue_balancing = 1;
-       struct rq *rq = cpu_rq(cpu);
+       int cpu = rq->cpu;
        unsigned long interval;
        struct sched_domain *sd;
        /* Earliest time when we have to do rebalance again */
@@ -6694,16 +7297,9 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
                        break;
                }
 
-               interval = sd->balance_interval;
-               if (idle != CPU_IDLE)
-                       interval *= sd->busy_factor;
-
-               /* scale ms to jiffies */
-               interval = msecs_to_jiffies(interval);
-               interval = clamp(interval, 1UL, max_load_balance_interval);
+               interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
 
                need_serialize = sd->flags & SD_SERIALIZE;
-
                if (need_serialize) {
                        if (!spin_trylock(&balancing))
                                goto out;
@@ -6719,6 +7315,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
                                idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
                        }
                        sd->last_balance = jiffies;
+                       interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
                }
                if (need_serialize)
                        spin_unlock(&balancing);
@@ -6752,9 +7349,9 @@ out:
  * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
  * rebalancing for all the cpus for whom scheduler ticks are stopped.
  */
-static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
+static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 {
-       struct rq *this_rq = cpu_rq(this_cpu);
+       int this_cpu = this_rq->cpu;
        struct rq *rq;
        int balance_cpu;
 
@@ -6776,12 +7373,17 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
 
                rq = cpu_rq(balance_cpu);
 
-               raw_spin_lock_irq(&rq->lock);
-               update_rq_clock(rq);
-               update_idle_cpu_load(rq);
-               raw_spin_unlock_irq(&rq->lock);
-
-               rebalance_domains(balance_cpu, CPU_IDLE);
+               /*
+                * If time for next balance is due,
+                * do the balance.
+                */
+               if (time_after_eq(jiffies, rq->next_balance)) {
+                       raw_spin_lock_irq(&rq->lock);
+                       update_rq_clock(rq);
+                       update_idle_cpu_load(rq);
+                       raw_spin_unlock_irq(&rq->lock);
+                       rebalance_domains(rq, CPU_IDLE);
+               }
 
                if (time_after(this_rq->next_balance, rq->next_balance))
                        this_rq->next_balance = rq->next_balance;
@@ -6796,18 +7398,18 @@ end:
  * of an idle cpu is the system.
  *   - This rq has more than one task.
  *   - At any scheduler domain level, this cpu's scheduler group has multiple
- *     busy cpu's exceeding the group's power.
+ *     busy cpu's exceeding the group's capacity.
  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
  *     domain span are idle.
  */
-static inline int nohz_kick_needed(struct rq *rq, int cpu)
+static inline int nohz_kick_needed(struct rq *rq)
 {
        unsigned long now = jiffies;
        struct sched_domain *sd;
-       struct sched_group_power *sgp;
-       int nr_busy;
+       struct sched_group_capacity *sgc;
+       int nr_busy, cpu = rq->cpu;
 
-       if (unlikely(idle_cpu(cpu)))
+       if (unlikely(rq->idle_balance))
                return 0;
 
        /*
@@ -6834,8 +7436,8 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
        sd = rcu_dereference(per_cpu(sd_busy, cpu));
 
        if (sd) {
-               sgp = sd->groups->sgp;
-               nr_busy = atomic_read(&sgp->nr_busy_cpus);
+               sgc = sd->groups->sgc;
+               nr_busy = atomic_read(&sgc->nr_busy_cpus);
 
                if (nr_busy > 1)
                        goto need_kick_unlock;
@@ -6856,7 +7458,7 @@ need_kick:
        return 1;
 }
 #else
-static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
+static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
 #endif
 
 /*
@@ -6865,44 +7467,42 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
  */
 static void run_rebalance_domains(struct softirq_action *h)
 {
-       int this_cpu = smp_processor_id();
-       struct rq *this_rq = cpu_rq(this_cpu);
+       struct rq *this_rq = this_rq();
        enum cpu_idle_type idle = this_rq->idle_balance ?
                                                CPU_IDLE : CPU_NOT_IDLE;
 
-       rebalance_domains(this_cpu, idle);
+       rebalance_domains(this_rq, idle);
 
        /*
         * If this cpu has a pending nohz_balance_kick, then do the
         * balancing on behalf of the other idle cpus whose ticks are
         * stopped.
         */
-       nohz_idle_balance(this_cpu, idle);
-}
-
-static inline int on_null_domain(int cpu)
-{
-       return !rcu_dereference_sched(cpu_rq(cpu)->sd);
+       nohz_idle_balance(this_rq, idle);
 }
 
 /*
  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
  */
-void trigger_load_balance(struct rq *rq, int cpu)
+void trigger_load_balance(struct rq *rq)
 {
        /* Don't need to rebalance while attached to NULL domain */
-       if (time_after_eq(jiffies, rq->next_balance) &&
-           likely(!on_null_domain(cpu)))
+       if (unlikely(on_null_domain(rq)))
+               return;
+
+       if (time_after_eq(jiffies, rq->next_balance))
                raise_softirq(SCHED_SOFTIRQ);
 #ifdef CONFIG_NO_HZ_COMMON
-       if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
-               nohz_balancer_kick(cpu);
+       if (nohz_kick_needed(rq))
+               nohz_balancer_kick();
 #endif
 }
 
 static void rq_online_fair(struct rq *rq)
 {
        update_sysctl();
+
+       update_runtime_enabled(rq);
 }
 
 static void rq_offline_fair(struct rq *rq)
@@ -6976,7 +7576,7 @@ static void task_fork_fair(struct task_struct *p)
                 * 'current' within the tree based on its new key value.
                 */
                swap(curr->vruntime, se->vruntime);
-               resched_task(rq->curr);
+               resched_curr(rq);
        }
 
        se->vruntime -= cfs_rq->min_vruntime;
@@ -6991,7 +7591,7 @@ static void task_fork_fair(struct task_struct *p)
 static void
 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
 {
-       if (!p->se.on_rq)
+       if (!task_on_rq_queued(p))
                return;
 
        /*
@@ -7001,7 +7601,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
         */
        if (rq->curr == p) {
                if (p->prio > oldprio)
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        } else
                check_preempt_curr(rq, p, 0);
 }
@@ -7012,15 +7612,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
        /*
-        * Ensure the task's vruntime is normalized, so that when its
+        * Ensure the task's vruntime is normalized, so that when it's
         * switched back to the fair class the enqueue_entity(.flags=0) will
         * do the right thing.
         *
-        * If it was on_rq, then the dequeue_entity(.flags=0) will already
-        * have normalized the vruntime, if it was !on_rq, then only when
+        * If it's queued, then the dequeue_entity(.flags=0) will already
+        * have normalized the vruntime, if it's !queued, then only when
         * the task is sleeping will it still have non-normalized vruntime.
         */
-       if (!se->on_rq && p->state != TASK_RUNNING) {
+       if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
                /*
                 * Fix up our vruntime so that the current sleep doesn't
                 * cause 'unlimited' sleep bonus.
@@ -7047,7 +7647,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
  */
 static void switched_to_fair(struct rq *rq, struct task_struct *p)
 {
-       if (!p->se.on_rq)
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       struct sched_entity *se = &p->se;
+       /*
+        * Since the real-depth could have been changed (only FAIR
+        * class maintain depth value), reset depth properly.
+        */
+       se->depth = se->parent ? se->parent->depth + 1 : 0;
+#endif
+       if (!task_on_rq_queued(p))
                return;
 
        /*
@@ -7056,7 +7664,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
         * if we can still preempt the current task.
         */
        if (rq->curr == p)
-               resched_task(rq->curr);
+               resched_curr(rq);
        else
                check_preempt_curr(rq, p, 0);
 }
@@ -7093,9 +7701,11 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static void task_move_group_fair(struct task_struct *p, int on_rq)
+static void task_move_group_fair(struct task_struct *p, int queued)
 {
+       struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq;
+
        /*
         * If the task was not on the rq at the time of this cgroup movement
         * it must have been asleep, sleeping tasks keep their ->vruntime
@@ -7110,7 +7720,7 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
         * fair sleeper stuff for the first placement, but who cares.
         */
        /*
-        * When !on_rq, vruntime of the task has usually NOT been normalized.
+        * When !queued, vruntime of the task has usually NOT been normalized.
         * But there are some cases where it has already been normalized:
         *
         * - Moving a forked child which is waiting for being woken up by
@@ -7121,23 +7731,24 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
         * To prevent boost or penalty in the new cfs_rq caused by delta
         * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
         */
-       if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
-               on_rq = 1;
+       if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
+               queued = 1;
 
-       if (!on_rq)
-               p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
+       if (!queued)
+               se->vruntime -= cfs_rq_of(se)->min_vruntime;
        set_task_rq(p, task_cpu(p));
-       if (!on_rq) {
-               cfs_rq = cfs_rq_of(&p->se);
-               p->se.vruntime += cfs_rq->min_vruntime;
+       se->depth = se->parent ? se->parent->depth + 1 : 0;
+       if (!queued) {
+               cfs_rq = cfs_rq_of(se);
+               se->vruntime += cfs_rq->min_vruntime;
 #ifdef CONFIG_SMP
                /*
                 * migrate_task_rq_fair() will have removed our previous
                 * contribution, but we must synchronize for ongoing future
                 * decay.
                 */
-               p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
-               cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
+               se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
+               cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
 #endif
        }
 }
@@ -7233,10 +7844,13 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
        if (!se)
                return;
 
-       if (!parent)
+       if (!parent) {
                se->cfs_rq = &rq->cfs;
-       else
+               se->depth = 0;
+       } else {
                se->cfs_rq = parent->my_q;
+               se->depth = parent->depth + 1;
+       }
 
        se->my_q = cfs_rq;
        /* guarantee group entities always have weight */
@@ -7347,6 +7961,8 @@ const struct sched_class fair_sched_class = {
 
        .get_rr_interval        = get_rr_interval_fair,
 
+       .update_curr            = update_curr_fair,
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
        .task_move_group        = task_move_group_fair,
 #endif