Merge tag 'v4.1' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / fair.c
index ef2b104..c2980e8 100644 (file)
@@ -670,17 +670,18 @@ static int select_idle_sibling(struct task_struct *p, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
 static inline void __update_task_entity_contrib(struct sched_entity *se);
+static inline void __update_task_entity_utilization(struct sched_entity *se);
 
 /* Give new task start runnable values to heavy its load in infant time */
 void init_task_runnable_average(struct task_struct *p)
 {
        u32 slice;
 
-       p->se.avg.decay_count = 0;
        slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
-       p->se.avg.runnable_avg_sum = slice;
-       p->se.avg.runnable_avg_period = slice;
+       p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = slice;
+       p->se.avg.avg_period = slice;
        __update_task_entity_contrib(&p->se);
+       __update_task_entity_utilization(&p->se);
 }
 #else
 void init_task_runnable_average(struct task_struct *p)
@@ -873,7 +874,6 @@ struct numa_group {
        spinlock_t lock; /* nr_tasks, tasks */
        int nr_tasks;
        pid_t gid;
-       struct list_head task_list;
 
        struct rcu_head rcu;
        nodemask_t active_nodes;
@@ -901,18 +901,24 @@ pid_t task_numa_group_id(struct task_struct *p)
        return p->numa_group ? p->numa_group->gid : 0;
 }
 
-static inline int task_faults_idx(int nid, int priv)
+/*
+ * The averaged statistics, shared & private, memory & cpu,
+ * occupy the first half of the array. The second half of the
+ * array is for current counters, which are averaged into the
+ * first set by task_numa_placement.
+ */
+static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
 {
-       return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
+       return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
 }
 
 static inline unsigned long task_faults(struct task_struct *p, int nid)
 {
-       if (!p->numa_faults_memory)
+       if (!p->numa_faults)
                return 0;
 
-       return p->numa_faults_memory[task_faults_idx(nid, 0)] +
-               p->numa_faults_memory[task_faults_idx(nid, 1)];
+       return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
+               p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
 }
 
 static inline unsigned long group_faults(struct task_struct *p, int nid)
@@ -920,14 +926,79 @@ static inline unsigned long group_faults(struct task_struct *p, int nid)
        if (!p->numa_group)
                return 0;
 
-       return p->numa_group->faults[task_faults_idx(nid, 0)] +
-               p->numa_group->faults[task_faults_idx(nid, 1)];
+       return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
+               p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
 }
 
 static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
 {
-       return group->faults_cpu[task_faults_idx(nid, 0)] +
-               group->faults_cpu[task_faults_idx(nid, 1)];
+       return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
+               group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
+}
+
+/* Handle placement on systems where not all nodes are directly connected. */
+static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
+                                       int maxdist, bool task)
+{
+       unsigned long score = 0;
+       int node;
+
+       /*
+        * All nodes are directly connected, and the same distance
+        * from each other. No need for fancy placement algorithms.
+        */
+       if (sched_numa_topology_type == NUMA_DIRECT)
+               return 0;
+
+       /*
+        * This code is called for each node, introducing N^2 complexity,
+        * which should be ok given the number of nodes rarely exceeds 8.
+        */
+       for_each_online_node(node) {
+               unsigned long faults;
+               int dist = node_distance(nid, node);
+
+               /*
+                * The furthest away nodes in the system are not interesting
+                * for placement; nid was already counted.
+                */
+               if (dist == sched_max_numa_distance || node == nid)
+                       continue;
+
+               /*
+                * On systems with a backplane NUMA topology, compare groups
+                * of nodes, and move tasks towards the group with the most
+                * memory accesses. When comparing two nodes at distance
+                * "hoplimit", only nodes closer by than "hoplimit" are part
+                * of each group. Skip other nodes.
+                */
+               if (sched_numa_topology_type == NUMA_BACKPLANE &&
+                                       dist > maxdist)
+                       continue;
+
+               /* Add up the faults from nearby nodes. */
+               if (task)
+                       faults = task_faults(p, node);
+               else
+                       faults = group_faults(p, node);
+
+               /*
+                * On systems with a glueless mesh NUMA topology, there are
+                * no fixed "groups of nodes". Instead, nodes that are not
+                * directly connected bounce traffic through intermediate
+                * nodes; a numa_group can occupy any set of nodes.
+                * The further away a node is, the less the faults count.
+                * This seems to result in good task placement.
+                */
+               if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
+                       faults *= (sched_max_numa_distance - dist);
+                       faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
+               }
+
+               score += faults;
+       }
+
+       return score;
 }
 
 /*
@@ -936,11 +1007,12 @@ static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
  * larger multiplier, in order to group tasks together that are almost
  * evenly spread out between numa nodes.
  */
-static inline unsigned long task_weight(struct task_struct *p, int nid)
+static inline unsigned long task_weight(struct task_struct *p, int nid,
+                                       int dist)
 {
-       unsigned long total_faults;
+       unsigned long faults, total_faults;
 
-       if (!p->numa_faults_memory)
+       if (!p->numa_faults)
                return 0;
 
        total_faults = p->total_numa_faults;
@@ -948,15 +1020,29 @@ static inline unsigned long task_weight(struct task_struct *p, int nid)
        if (!total_faults)
                return 0;
 
-       return 1000 * task_faults(p, nid) / total_faults;
+       faults = task_faults(p, nid);
+       faults += score_nearby_nodes(p, nid, dist, true);
+
+       return 1000 * faults / total_faults;
 }
 
-static inline unsigned long group_weight(struct task_struct *p, int nid)
+static inline unsigned long group_weight(struct task_struct *p, int nid,
+                                        int dist)
 {
-       if (!p->numa_group || !p->numa_group->total_faults)
+       unsigned long faults, total_faults;
+
+       if (!p->numa_group)
+               return 0;
+
+       total_faults = p->numa_group->total_faults;
+
+       if (!total_faults)
                return 0;
 
-       return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
+       faults = group_faults(p, nid);
+       faults += score_nearby_nodes(p, nid, dist, false);
+
+       return 1000 * faults / total_faults;
 }
 
 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
@@ -1089,6 +1175,7 @@ struct task_numa_env {
        struct numa_stats src_stats, dst_stats;
 
        int imbalance_pct;
+       int dist;
 
        struct task_struct *best_task;
        long best_imp;
@@ -1111,9 +1198,11 @@ static void task_numa_assign(struct task_numa_env *env,
 static bool load_too_imbalanced(long src_load, long dst_load,
                                struct task_numa_env *env)
 {
-       long imb, old_imb;
-       long orig_src_load, orig_dst_load;
        long src_capacity, dst_capacity;
+       long orig_src_load;
+       long load_a, load_b;
+       long moved_load;
+       long imb;
 
        /*
         * The load is corrected for the CPU capacity available on each node.
@@ -1126,30 +1215,39 @@ static bool load_too_imbalanced(long src_load, long dst_load,
        dst_capacity = env->dst_stats.compute_capacity;
 
        /* We care about the slope of the imbalance, not the direction. */
-       if (dst_load < src_load)
-               swap(dst_load, src_load);
+       load_a = dst_load;
+       load_b = src_load;
+       if (load_a < load_b)
+               swap(load_a, load_b);
 
        /* Is the difference below the threshold? */
-       imb = dst_load * src_capacity * 100 -
-             src_load * dst_capacity * env->imbalance_pct;
+       imb = load_a * src_capacity * 100 -
+               load_b * dst_capacity * env->imbalance_pct;
        if (imb <= 0)
                return false;
 
        /*
         * The imbalance is above the allowed threshold.
-        * Compare it with the old imbalance.
+        * Allow a move that brings us closer to a balanced situation,
+        * without moving things past the point of balance.
         */
        orig_src_load = env->src_stats.load;
-       orig_dst_load = env->dst_stats.load;
-
-       if (orig_dst_load < orig_src_load)
-               swap(orig_dst_load, orig_src_load);
 
-       old_imb = orig_dst_load * src_capacity * 100 -
-                 orig_src_load * dst_capacity * env->imbalance_pct;
+       /*
+        * In a task swap, there will be one load moving from src to dst,
+        * and another moving back. This is the net sum of both moves.
+        * A simple task move will always have a positive value.
+        * Allow the move if it brings the system closer to a balanced
+        * situation, without crossing over the balance point.
+        */
+       moved_load = orig_src_load - src_load;
 
-       /* Would this change make things worse? */
-       return (imb > old_imb);
+       if (moved_load > 0)
+               /* Moving src -> dst. Did we overshoot balance? */
+               return src_load * dst_capacity < dst_load * src_capacity;
+       else
+               /* Moving dst -> src. Did we overshoot balance? */
+               return dst_load * src_capacity < src_load * dst_capacity;
 }
 
 /*
@@ -1168,6 +1266,7 @@ static void task_numa_compare(struct task_numa_env *env,
        long load;
        long imp = env->p->numa_group ? groupimp : taskimp;
        long moveimp = imp;
+       int dist = env->dist;
 
        rcu_read_lock();
 
@@ -1208,8 +1307,8 @@ static void task_numa_compare(struct task_numa_env *env,
                 * in any group then look only at task weights.
                 */
                if (cur->numa_group == env->p->numa_group) {
-                       imp = taskimp + task_weight(cur, env->src_nid) -
-                             task_weight(cur, env->dst_nid);
+                       imp = taskimp + task_weight(cur, env->src_nid, dist) -
+                             task_weight(cur, env->dst_nid, dist);
                        /*
                         * Add some hysteresis to prevent swapping the
                         * tasks within a group over tiny differences.
@@ -1223,11 +1322,11 @@ static void task_numa_compare(struct task_numa_env *env,
                         * instead.
                         */
                        if (cur->numa_group)
-                               imp += group_weight(cur, env->src_nid) -
-                                      group_weight(cur, env->dst_nid);
+                               imp += group_weight(cur, env->src_nid, dist) -
+                                      group_weight(cur, env->dst_nid, dist);
                        else
-                               imp += task_weight(cur, env->src_nid) -
-                                      task_weight(cur, env->dst_nid);
+                               imp += task_weight(cur, env->src_nid, dist) -
+                                      task_weight(cur, env->dst_nid, dist);
                }
        }
 
@@ -1326,7 +1425,7 @@ static int task_numa_migrate(struct task_struct *p)
        };
        struct sched_domain *sd;
        unsigned long taskweight, groupweight;
-       int nid, ret;
+       int nid, ret, dist;
        long taskimp, groupimp;
 
        /*
@@ -1354,29 +1453,45 @@ static int task_numa_migrate(struct task_struct *p)
                return -EINVAL;
        }
 
-       taskweight = task_weight(p, env.src_nid);
-       groupweight = group_weight(p, env.src_nid);
-       update_numa_stats(&env.src_stats, env.src_nid);
        env.dst_nid = p->numa_preferred_nid;
-       taskimp = task_weight(p, env.dst_nid) - taskweight;
-       groupimp = group_weight(p, env.dst_nid) - groupweight;
+       dist = env.dist = node_distance(env.src_nid, env.dst_nid);
+       taskweight = task_weight(p, env.src_nid, dist);
+       groupweight = group_weight(p, env.src_nid, dist);
+       update_numa_stats(&env.src_stats, env.src_nid);
+       taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
+       groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
        update_numa_stats(&env.dst_stats, env.dst_nid);
 
        /* Try to find a spot on the preferred nid. */
        task_numa_find_cpu(&env, taskimp, groupimp);
 
-       /* No space available on the preferred nid. Look elsewhere. */
-       if (env.best_cpu == -1) {
+       /*
+        * Look at other nodes in these cases:
+        * - there is no space available on the preferred_nid
+        * - the task is part of a numa_group that is interleaved across
+        *   multiple NUMA nodes; in order to better consolidate the group,
+        *   we need to check other locations.
+        */
+       if (env.best_cpu == -1 || (p->numa_group &&
+                       nodes_weight(p->numa_group->active_nodes) > 1)) {
                for_each_online_node(nid) {
                        if (nid == env.src_nid || nid == p->numa_preferred_nid)
                                continue;
 
+                       dist = node_distance(env.src_nid, env.dst_nid);
+                       if (sched_numa_topology_type == NUMA_BACKPLANE &&
+                                               dist != env.dist) {
+                               taskweight = task_weight(p, env.src_nid, dist);
+                               groupweight = group_weight(p, env.src_nid, dist);
+                       }
+
                        /* Only consider nodes where both task and groups benefit */
-                       taskimp = task_weight(p, nid) - taskweight;
-                       groupimp = group_weight(p, nid) - groupweight;
+                       taskimp = task_weight(p, nid, dist) - taskweight;
+                       groupimp = group_weight(p, nid, dist) - groupweight;
                        if (taskimp < 0 && groupimp < 0)
                                continue;
 
+                       env.dist = dist;
                        env.dst_nid = nid;
                        update_numa_stats(&env.dst_stats, env.dst_nid);
                        task_numa_find_cpu(&env, taskimp, groupimp);
@@ -1431,7 +1546,7 @@ static void numa_migrate_preferred(struct task_struct *p)
        unsigned long interval = HZ;
 
        /* This task has no NUMA fault statistics yet */
-       if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
+       if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
                return;
 
        /* Periodically retry migrating the task to the preferred node */
@@ -1507,9 +1622,11 @@ static void update_task_scan_period(struct task_struct *p,
        /*
         * If there were no record hinting faults then either the task is
         * completely idle or all activity is areas that are not of interest
-        * to automatic numa balancing. Scan slower
+        * to automatic numa balancing. Related to that, if there were failed
+        * migration then it implies we are migrating too quickly or the local
+        * node is overloaded. In either case, scan slower
         */
-       if (local + shared == 0) {
+       if (local + shared == 0 || p->numa_faults_locality[2]) {
                p->numa_scan_period = min(p->numa_scan_period_max,
                        p->numa_scan_period << 1);
 
@@ -1571,7 +1688,7 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
                *period = now - p->last_task_numa_placement;
        } else {
                delta = p->se.avg.runnable_avg_sum;
-               *period = p->se.avg.runnable_avg_period;
+               *period = p->se.avg.avg_period;
        }
 
        p->last_sum_exec_runtime = runtime;
@@ -1580,6 +1697,94 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
        return delta;
 }
 
+/*
+ * Determine the preferred nid for a task in a numa_group. This needs to
+ * be done in a way that produces consistent results with group_weight,
+ * otherwise workloads might not converge.
+ */
+static int preferred_group_nid(struct task_struct *p, int nid)
+{
+       nodemask_t nodes;
+       int dist;
+
+       /* Direct connections between all NUMA nodes. */
+       if (sched_numa_topology_type == NUMA_DIRECT)
+               return nid;
+
+       /*
+        * On a system with glueless mesh NUMA topology, group_weight
+        * scores nodes according to the number of NUMA hinting faults on
+        * both the node itself, and on nearby nodes.
+        */
+       if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
+               unsigned long score, max_score = 0;
+               int node, max_node = nid;
+
+               dist = sched_max_numa_distance;
+
+               for_each_online_node(node) {
+                       score = group_weight(p, node, dist);
+                       if (score > max_score) {
+                               max_score = score;
+                               max_node = node;
+                       }
+               }
+               return max_node;
+       }
+
+       /*
+        * Finding the preferred nid in a system with NUMA backplane
+        * interconnect topology is more involved. The goal is to locate
+        * tasks from numa_groups near each other in the system, and
+        * untangle workloads from different sides of the system. This requires
+        * searching down the hierarchy of node groups, recursively searching
+        * inside the highest scoring group of nodes. The nodemask tricks
+        * keep the complexity of the search down.
+        */
+       nodes = node_online_map;
+       for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
+               unsigned long max_faults = 0;
+               nodemask_t max_group = NODE_MASK_NONE;
+               int a, b;
+
+               /* Are there nodes at this distance from each other? */
+               if (!find_numa_distance(dist))
+                       continue;
+
+               for_each_node_mask(a, nodes) {
+                       unsigned long faults = 0;
+                       nodemask_t this_group;
+                       nodes_clear(this_group);
+
+                       /* Sum group's NUMA faults; includes a==b case. */
+                       for_each_node_mask(b, nodes) {
+                               if (node_distance(a, b) < dist) {
+                                       faults += group_faults(p, b);
+                                       node_set(b, this_group);
+                                       node_clear(b, nodes);
+                               }
+                       }
+
+                       /* Remember the top group. */
+                       if (faults > max_faults) {
+                               max_faults = faults;
+                               max_group = this_group;
+                               /*
+                                * subtle: at the smallest distance there is
+                                * just one node left in each "group", the
+                                * winner is the preferred nid.
+                                */
+                               nid = a;
+                       }
+               }
+               /* Next round, evaluate the nodes within max_group. */
+               if (!max_faults)
+                       break;
+               nodes = max_group;
+       }
+       return nid;
+}
+
 static void task_numa_placement(struct task_struct *p)
 {
        int seq, nid, max_nid = -1, max_group_nid = -1;
@@ -1607,18 +1812,23 @@ static void task_numa_placement(struct task_struct *p)
 
        /* Find the node with the highest number of faults */
        for_each_online_node(nid) {
+               /* Keep track of the offsets in numa_faults array */
+               int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
                unsigned long faults = 0, group_faults = 0;
-               int priv, i;
+               int priv;
 
                for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
                        long diff, f_diff, f_weight;
 
-                       i = task_faults_idx(nid, priv);
+                       mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
+                       membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
+                       cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
+                       cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
 
                        /* Decay existing window, copy faults since last scan */
-                       diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
-                       fault_types[priv] += p->numa_faults_buffer_memory[i];
-                       p->numa_faults_buffer_memory[i] = 0;
+                       diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
+                       fault_types[priv] += p->numa_faults[membuf_idx];
+                       p->numa_faults[membuf_idx] = 0;
 
                        /*
                         * Normalize the faults_from, so all tasks in a group
@@ -1628,21 +1838,27 @@ static void task_numa_placement(struct task_struct *p)
                         * faults are less important.
                         */
                        f_weight = div64_u64(runtime << 16, period + 1);
-                       f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
+                       f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
                                   (total_faults + 1);
-                       f_diff = f_weight - p->numa_faults_cpu[i] / 2;
-                       p->numa_faults_buffer_cpu[i] = 0;
+                       f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
+                       p->numa_faults[cpubuf_idx] = 0;
 
-                       p->numa_faults_memory[i] += diff;
-                       p->numa_faults_cpu[i] += f_diff;
-                       faults += p->numa_faults_memory[i];
+                       p->numa_faults[mem_idx] += diff;
+                       p->numa_faults[cpu_idx] += f_diff;
+                       faults += p->numa_faults[mem_idx];
                        p->total_numa_faults += diff;
                        if (p->numa_group) {
-                               /* safe because we can only change our own group */
-                               p->numa_group->faults[i] += diff;
-                               p->numa_group->faults_cpu[i] += f_diff;
+                               /*
+                                * safe because we can only change our own group
+                                *
+                                * mem_idx represents the offset for a given
+                                * nid and priv in a specific region because it
+                                * is at the beginning of the numa_faults array.
+                                */
+                               p->numa_group->faults[mem_idx] += diff;
+                               p->numa_group->faults_cpu[mem_idx] += f_diff;
                                p->numa_group->total_faults += diff;
-                               group_faults += p->numa_group->faults[i];
+                               group_faults += p->numa_group->faults[mem_idx];
                        }
                }
 
@@ -1662,7 +1878,7 @@ static void task_numa_placement(struct task_struct *p)
        if (p->numa_group) {
                update_numa_active_node_mask(p->numa_group);
                spin_unlock_irq(group_lock);
-               max_nid = max_group_nid;
+               max_nid = preferred_group_nid(p, max_group_nid);
        }
 
        if (max_faults) {
@@ -1705,7 +1921,6 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
 
                atomic_set(&grp->refcount, 1);
                spin_lock_init(&grp->lock);
-               INIT_LIST_HEAD(&grp->task_list);
                grp->gid = p->pid;
                /* Second half of the array tracks nids where faults happen */
                grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
@@ -1714,11 +1929,10 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
                node_set(task_node(current), grp->active_nodes);
 
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
-                       grp->faults[i] = p->numa_faults_memory[i];
+                       grp->faults[i] = p->numa_faults[i];
 
                grp->total_faults = p->total_numa_faults;
 
-               list_add(&p->numa_entry, &grp->task_list);
                grp->nr_tasks++;
                rcu_assign_pointer(p->numa_group, grp);
        }
@@ -1773,13 +1987,12 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        double_lock_irq(&my_grp->lock, &grp->lock);
 
        for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
-               my_grp->faults[i] -= p->numa_faults_memory[i];
-               grp->faults[i] += p->numa_faults_memory[i];
+               my_grp->faults[i] -= p->numa_faults[i];
+               grp->faults[i] += p->numa_faults[i];
        }
        my_grp->total_faults -= p->total_numa_faults;
        grp->total_faults += p->total_numa_faults;
 
-       list_move(&p->numa_entry, &grp->task_list);
        my_grp->nr_tasks--;
        grp->nr_tasks++;
 
@@ -1799,27 +2012,23 @@ no_join:
 void task_numa_free(struct task_struct *p)
 {
        struct numa_group *grp = p->numa_group;
-       void *numa_faults = p->numa_faults_memory;
+       void *numa_faults = p->numa_faults;
        unsigned long flags;
        int i;
 
        if (grp) {
                spin_lock_irqsave(&grp->lock, flags);
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
-                       grp->faults[i] -= p->numa_faults_memory[i];
+                       grp->faults[i] -= p->numa_faults[i];
                grp->total_faults -= p->total_numa_faults;
 
-               list_del(&p->numa_entry);
                grp->nr_tasks--;
                spin_unlock_irqrestore(&grp->lock, flags);
                RCU_INIT_POINTER(p->numa_group, NULL);
                put_numa_group(grp);
        }
 
-       p->numa_faults_memory = NULL;
-       p->numa_faults_buffer_memory = NULL;
-       p->numa_faults_cpu= NULL;
-       p->numa_faults_buffer_cpu = NULL;
+       p->numa_faults = NULL;
        kfree(numa_faults);
 }
 
@@ -1842,24 +2051,14 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
                return;
 
        /* Allocate buffer to track faults on a per-node basis */
-       if (unlikely(!p->numa_faults_memory)) {
-               int size = sizeof(*p->numa_faults_memory) *
+       if (unlikely(!p->numa_faults)) {
+               int size = sizeof(*p->numa_faults) *
                           NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
 
-               p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
-               if (!p->numa_faults_memory)
+               p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
+               if (!p->numa_faults)
                        return;
 
-               BUG_ON(p->numa_faults_buffer_memory);
-               /*
-                * The averaged statistics, shared & private, memory & cpu,
-                * occupy the first half of the array. The second half of the
-                * array is for current counters, which are averaged into the
-                * first set by task_numa_placement.
-                */
-               p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
-               p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
-               p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
                p->total_numa_faults = 0;
                memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
        }
@@ -1898,9 +2097,11 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 
        if (migrated)
                p->numa_pages_migrated += pages;
+       if (flags & TNF_MIGRATE_FAIL)
+               p->numa_faults_locality[2] += pages;
 
-       p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
-       p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
+       p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
+       p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
        p->numa_faults_locality[local] += pages;
 }
 
@@ -1979,8 +2180,10 @@ void task_numa_work(struct callback_head *work)
                vma = mm->mmap;
        }
        for (; vma; vma = vma->vm_next) {
-               if (!vma_migratable(vma) || !vma_policy_mof(vma))
+               if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
+                       is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
                        continue;
+               }
 
                /*
                 * Shared library pages mapped by multiple processes are not
@@ -2315,13 +2518,15 @@ static u32 __compute_runnable_contrib(u64 n)
  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
  */
-static __always_inline int __update_entity_runnable_avg(u64 now,
+static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
                                                        struct sched_avg *sa,
-                                                       int runnable)
+                                                       int runnable,
+                                                       int running)
 {
        u64 delta, periods;
        u32 runnable_contrib;
        int delta_w, decayed = 0;
+       unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
 
        delta = now - sa->last_runnable_update;
        /*
@@ -2343,7 +2548,7 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
        sa->last_runnable_update = now;
 
        /* delta_w is the amount already accumulated against our next period */
-       delta_w = sa->runnable_avg_period % 1024;
+       delta_w = sa->avg_period % 1024;
        if (delta + delta_w >= 1024) {
                /* period roll-over */
                decayed = 1;
@@ -2356,7 +2561,10 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
                delta_w = 1024 - delta_w;
                if (runnable)
                        sa->runnable_avg_sum += delta_w;
-               sa->runnable_avg_period += delta_w;
+               if (running)
+                       sa->running_avg_sum += delta_w * scale_freq
+                               >> SCHED_CAPACITY_SHIFT;
+               sa->avg_period += delta_w;
 
                delta -= delta_w;
 
@@ -2366,20 +2574,28 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
 
                sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
                                                  periods + 1);
-               sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
+               sa->running_avg_sum = decay_load(sa->running_avg_sum,
+                                                 periods + 1);
+               sa->avg_period = decay_load(sa->avg_period,
                                                     periods + 1);
 
                /* Efficiently calculate \sum (1..n_period) 1024*y^i */
                runnable_contrib = __compute_runnable_contrib(periods);
                if (runnable)
                        sa->runnable_avg_sum += runnable_contrib;
-               sa->runnable_avg_period += runnable_contrib;
+               if (running)
+                       sa->running_avg_sum += runnable_contrib * scale_freq
+                               >> SCHED_CAPACITY_SHIFT;
+               sa->avg_period += runnable_contrib;
        }
 
        /* Remainder of delta accrued against u_0` */
        if (runnable)
                sa->runnable_avg_sum += delta;
-       sa->runnable_avg_period += delta;
+       if (running)
+               sa->running_avg_sum += delta * scale_freq
+                       >> SCHED_CAPACITY_SHIFT;
+       sa->avg_period += delta;
 
        return decayed;
 }
@@ -2391,11 +2607,13 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
        u64 decays = atomic64_read(&cfs_rq->decay_counter);
 
        decays -= se->avg.decay_count;
+       se->avg.decay_count = 0;
        if (!decays)
                return 0;
 
        se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
-       se->avg.decay_count = 0;
+       se->avg.utilization_avg_contrib =
+               decay_load(se->avg.utilization_avg_contrib, decays);
 
        return decays;
 }
@@ -2431,7 +2649,7 @@ static inline void __update_tg_runnable_avg(struct sched_avg *sa,
 
        /* The fraction of a cpu used by this cfs_rq */
        contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
-                         sa->runnable_avg_period + 1);
+                         sa->avg_period + 1);
        contrib -= cfs_rq->tg_runnable_contrib;
 
        if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
@@ -2484,7 +2702,8 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
 
 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
 {
-       __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
+       __update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg,
+                       runnable, runnable);
        __update_tg_runnable_avg(&rq->avg, &rq->cfs);
 }
 #else /* CONFIG_FAIR_GROUP_SCHED */
@@ -2502,7 +2721,7 @@ static inline void __update_task_entity_contrib(struct sched_entity *se)
 
        /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
        contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
-       contrib /= (se->avg.runnable_avg_period + 1);
+       contrib /= (se->avg.avg_period + 1);
        se->avg.load_avg_contrib = scale_load(contrib);
 }
 
@@ -2521,6 +2740,30 @@ static long __update_entity_load_avg_contrib(struct sched_entity *se)
        return se->avg.load_avg_contrib - old_contrib;
 }
 
+
+static inline void __update_task_entity_utilization(struct sched_entity *se)
+{
+       u32 contrib;
+
+       /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
+       contrib = se->avg.running_avg_sum * scale_load_down(SCHED_LOAD_SCALE);
+       contrib /= (se->avg.avg_period + 1);
+       se->avg.utilization_avg_contrib = scale_load(contrib);
+}
+
+static long __update_entity_utilization_avg_contrib(struct sched_entity *se)
+{
+       long old_contrib = se->avg.utilization_avg_contrib;
+
+       if (entity_is_task(se))
+               __update_task_entity_utilization(se);
+       else
+               se->avg.utilization_avg_contrib =
+                                       group_cfs_rq(se)->utilization_load_avg;
+
+       return se->avg.utilization_avg_contrib - old_contrib;
+}
+
 static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
                                                 long load_contrib)
 {
@@ -2537,7 +2780,8 @@ static inline void update_entity_load_avg(struct sched_entity *se,
                                          int update_cfs_rq)
 {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
-       long contrib_delta;
+       long contrib_delta, utilization_delta;
+       int cpu = cpu_of(rq_of(cfs_rq));
        u64 now;
 
        /*
@@ -2549,18 +2793,22 @@ static inline void update_entity_load_avg(struct sched_entity *se,
        else
                now = cfs_rq_clock_task(group_cfs_rq(se));
 
-       if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
+       if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq,
+                                       cfs_rq->curr == se))
                return;
 
        contrib_delta = __update_entity_load_avg_contrib(se);
+       utilization_delta = __update_entity_utilization_avg_contrib(se);
 
        if (!update_cfs_rq)
                return;
 
-       if (se->on_rq)
+       if (se->on_rq) {
                cfs_rq->runnable_load_avg += contrib_delta;
-       else
+               cfs_rq->utilization_load_avg += utilization_delta;
+       } else {
                subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
+       }
 }
 
 /*
@@ -2635,6 +2883,7 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
        }
 
        cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
+       cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib;
        /* we force update consideration on load-balancer moves */
        update_cfs_rq_blocked_load(cfs_rq, !wakeup);
 }
@@ -2653,6 +2902,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
        update_cfs_rq_blocked_load(cfs_rq, !sleep);
 
        cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
+       cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib;
        if (sleep) {
                cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
                se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
@@ -2990,6 +3240,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
                 */
                update_stats_wait_end(cfs_rq, se);
                __dequeue_entity(cfs_rq, se);
+               update_entity_load_avg(se, 1);
        }
 
        update_stats_curr_start(cfs_rq, se);
@@ -3822,6 +4073,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
+       /* init_cfs_bandwidth() was not called */
+       if (!cfs_b->throttled_cfs_rq.next)
+               return;
+
        hrtimer_cancel(&cfs_b->period_timer);
        hrtimer_cancel(&cfs_b->slack_timer);
 }
@@ -4112,6 +4367,11 @@ static unsigned long capacity_of(int cpu)
        return cpu_rq(cpu)->cpu_capacity;
 }
 
+static unsigned long capacity_orig_of(int cpu)
+{
+       return cpu_rq(cpu)->cpu_capacity_orig;
+}
+
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -4241,7 +4501,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                 * wl = S * s'_i; see (2)
                 */
                if (W > 0 && w < W)
-                       wl = (w * tg->shares) / W;
+                       wl = (w * (long)tg->shares) / W;
                else
                        wl = tg->shares;
 
@@ -4469,7 +4729,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
                                latest_idle_timestamp = rq->idle_stamp;
                                shallowest_idle_cpu = i;
                        }
-               } else {
+               } else if (shallowest_idle_cpu == -1) {
                        load = weighted_cpuload(i);
                        if (load < min_load || (load == min_load && i == this_cpu)) {
                                min_load = load;
@@ -4525,6 +4785,33 @@ next:
 done:
        return target;
 }
+/*
+ * get_cpu_usage returns the amount of capacity of a CPU that is used by CFS
+ * tasks. The unit of the return value must be the one of capacity so we can
+ * compare the usage with the capacity of the CPU that is available for CFS
+ * task (ie cpu_capacity).
+ * cfs.utilization_load_avg is the sum of running time of runnable tasks on a
+ * CPU. It represents the amount of utilization of a CPU in the range
+ * [0..SCHED_LOAD_SCALE].  The usage of a CPU can't be higher than the full
+ * capacity of the CPU because it's about the running time on this CPU.
+ * Nevertheless, cfs.utilization_load_avg can be higher than SCHED_LOAD_SCALE
+ * because of unfortunate rounding in avg_period and running_load_avg or just
+ * after migrating tasks until the average stabilizes with the new running
+ * time. So we need to check that the usage stays into the range
+ * [0..cpu_capacity_orig] and cap if necessary.
+ * Without capping the usage, a group could be seen as overloaded (CPU0 usage
+ * at 121% + CPU1 usage at 80%) whereas CPU1 has 20% of available capacity
+ */
+static int get_cpu_usage(int cpu)
+{
+       unsigned long usage = cpu_rq(cpu)->cfs.utilization_load_avg;
+       unsigned long capacity = capacity_orig_of(cpu);
+
+       if (usage >= SCHED_LOAD_SCALE)
+               return capacity;
+
+       return (usage * capacity) >> SCHED_LOAD_SHIFT;
+}
 
 /*
  * select_task_rq_fair: Select target runqueue for the waking task in domains
@@ -4547,9 +4834,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
        int want_affine = 0;
        int sync = wake_flags & WF_SYNC;
 
-       if (p->nr_cpus_allowed == 1)
-               return prev_cpu;
-
        if (sd_flag & SD_BALANCE_WAKE)
                want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
 
@@ -4973,7 +5257,7 @@ static void yield_task_fair(struct rq *rq)
                 * so we don't do microscopic update in schedule()
                 * and double the fastpath cost.
                 */
-                rq->skip_clock_update = 1;
+               rq_clock_skip_update(rq, true);
        }
 
        set_skip_buddy(se);
@@ -5189,7 +5473,7 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
        struct numa_group *numa_group = rcu_dereference(p->numa_group);
        int src_nid, dst_nid;
 
-       if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
+       if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
            !(env->sd->flags & SD_NUMA)) {
                return false;
        }
@@ -5228,7 +5512,7 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
        if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
                return false;
 
-       if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
+       if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
                return false;
 
        src_nid = cpu_to_node(env->src_cpu);
@@ -5654,12 +5938,12 @@ struct sg_lb_stats {
        unsigned long sum_weighted_load; /* Weighted load of group's tasks */
        unsigned long load_per_task;
        unsigned long group_capacity;
+       unsigned long group_usage; /* Total usage of the group */
        unsigned int sum_nr_running; /* Nr tasks running in the group */
-       unsigned int group_capacity_factor;
        unsigned int idle_cpus;
        unsigned int group_weight;
        enum group_type group_type;
-       int group_has_free_capacity;
+       int group_no_capacity;
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int nr_numa_running;
        unsigned int nr_preferred_running;
@@ -5730,16 +6014,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
        return load_idx;
 }
 
-static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
-{
-       return SCHED_CAPACITY_SCALE;
-}
-
-unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
-{
-       return default_scale_capacity(sd, cpu);
-}
-
 static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
        if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
@@ -5756,7 +6030,7 @@ unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 static unsigned long scale_rt_capacity(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       u64 total, available, age_stamp, avg;
+       u64 total, used, age_stamp, avg;
        s64 delta;
 
        /*
@@ -5765,26 +6039,19 @@ static unsigned long scale_rt_capacity(int cpu)
         */
        age_stamp = ACCESS_ONCE(rq->age_stamp);
        avg = ACCESS_ONCE(rq->rt_avg);
+       delta = __rq_clock_broken(rq) - age_stamp;
 
-       delta = rq_clock(rq) - age_stamp;
        if (unlikely(delta < 0))
                delta = 0;
 
        total = sched_avg_period() + delta;
 
-       if (unlikely(total < avg)) {
-               /* Ensures that capacity won't end up being negative */
-               available = 0;
-       } else {
-               available = total - avg;
-       }
-
-       if (unlikely((s64)total < SCHED_CAPACITY_SCALE))
-               total = SCHED_CAPACITY_SCALE;
+       used = div_u64(avg, total);
 
-       total >>= SCHED_CAPACITY_SHIFT;
+       if (likely(used < SCHED_CAPACITY_SCALE))
+               return SCHED_CAPACITY_SCALE - used;
 
-       return div_u64(available, total);
+       return 1;
 }
 
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -5799,14 +6066,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 
        capacity >>= SCHED_CAPACITY_SHIFT;
 
-       sdg->sgc->capacity_orig = capacity;
-
-       if (sched_feat(ARCH_CAPACITY))
-               capacity *= arch_scale_freq_capacity(sd, cpu);
-       else
-               capacity *= default_scale_capacity(sd, cpu);
-
-       capacity >>= SCHED_CAPACITY_SHIFT;
+       cpu_rq(cpu)->cpu_capacity_orig = capacity;
 
        capacity *= scale_rt_capacity(cpu);
        capacity >>= SCHED_CAPACITY_SHIFT;
@@ -5822,7 +6082,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
 {
        struct sched_domain *child = sd->child;
        struct sched_group *group, *sdg = sd->groups;
-       unsigned long capacity, capacity_orig;
+       unsigned long capacity;
        unsigned long interval;
 
        interval = msecs_to_jiffies(sd->balance_interval);
@@ -5834,7 +6094,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
                return;
        }
 
-       capacity_orig = capacity = 0;
+       capacity = 0;
 
        if (child->flags & SD_OVERLAP) {
                /*
@@ -5854,19 +6114,15 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
                         * Use capacity_of(), which is set irrespective of domains
                         * in update_cpu_capacity().
                         *
-                        * This avoids capacity/capacity_orig from being 0 and
+                        * This avoids capacity from being 0 and
                         * causing divide-by-zero issues on boot.
-                        *
-                        * Runtime updates will correct capacity_orig.
                         */
                        if (unlikely(!rq->sd)) {
-                               capacity_orig += capacity_of(cpu);
                                capacity += capacity_of(cpu);
                                continue;
                        }
 
                        sgc = rq->sd->groups->sgc;
-                       capacity_orig += sgc->capacity_orig;
                        capacity += sgc->capacity;
                }
        } else  {
@@ -5877,39 +6133,24 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
 
                group = child->groups;
                do {
-                       capacity_orig += group->sgc->capacity_orig;
                        capacity += group->sgc->capacity;
                        group = group->next;
                } while (group != child->groups);
        }
 
-       sdg->sgc->capacity_orig = capacity_orig;
        sdg->sgc->capacity = capacity;
 }
 
 /*
- * Try and fix up capacity for tiny siblings, this is needed when
- * things like SD_ASYM_PACKING need f_b_g to select another sibling
- * which on its own isn't powerful enough.
- *
- * See update_sd_pick_busiest() and check_asym_packing().
+ * Check whether the capacity of the rq has been noticeably reduced by side
+ * activity. The imbalance_pct is used for the threshold.
+ * Return true is the capacity is reduced
  */
 static inline int
-fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
+check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
 {
-       /*
-        * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
-        */
-       if (!(sd->flags & SD_SHARE_CPUCAPACITY))
-               return 0;
-
-       /*
-        * If ~90% of the cpu_capacity is still there, we're good.
-        */
-       if (group->sgc->capacity * 32 > group->sgc->capacity_orig * 29)
-               return 1;
-
-       return 0;
+       return ((rq->cpu_capacity * sd->imbalance_pct) <
+                               (rq->cpu_capacity_orig * 100));
 }
 
 /*
@@ -5947,37 +6188,56 @@ static inline int sg_imbalanced(struct sched_group *group)
 }
 
 /*
- * Compute the group capacity factor.
- *
- * Avoid the issue where N*frac(smt_capacity) >= 1 creates 'phantom' cores by
- * first dividing out the smt factor and computing the actual number of cores
- * and limit unit capacity with that.
+ * group_has_capacity returns true if the group has spare capacity that could
+ * be used by some tasks.
+ * We consider that a group has spare capacity if the  * number of task is
+ * smaller than the number of CPUs or if the usage is lower than the available
+ * capacity for CFS tasks.
+ * For the latter, we use a threshold to stabilize the state, to take into
+ * account the variance of the tasks' load and to return true if the available
+ * capacity in meaningful for the load balancer.
+ * As an example, an available capacity of 1% can appear but it doesn't make
+ * any benefit for the load balance.
  */
-static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *group)
+static inline bool
+group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
 {
-       unsigned int capacity_factor, smt, cpus;
-       unsigned int capacity, capacity_orig;
+       if (sgs->sum_nr_running < sgs->group_weight)
+               return true;
+
+       if ((sgs->group_capacity * 100) >
+                       (sgs->group_usage * env->sd->imbalance_pct))
+               return true;
 
-       capacity = group->sgc->capacity;
-       capacity_orig = group->sgc->capacity_orig;
-       cpus = group->group_weight;
+       return false;
+}
 
-       /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */
-       smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig);
-       capacity_factor = cpus / smt; /* cores */
+/*
+ *  group_is_overloaded returns true if the group has more tasks than it can
+ *  handle.
+ *  group_is_overloaded is not equals to !group_has_capacity because a group
+ *  with the exact right number of tasks, has no more spare capacity but is not
+ *  overloaded so both group_has_capacity and group_is_overloaded return
+ *  false.
+ */
+static inline bool
+group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
+{
+       if (sgs->sum_nr_running <= sgs->group_weight)
+               return false;
 
-       capacity_factor = min_t(unsigned,
-               capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE));
-       if (!capacity_factor)
-               capacity_factor = fix_small_capacity(env->sd, group);
+       if ((sgs->group_capacity * 100) <
+                       (sgs->group_usage * env->sd->imbalance_pct))
+               return true;
 
-       return capacity_factor;
+       return false;
 }
 
-static enum group_type
-group_classify(struct sched_group *group, struct sg_lb_stats *sgs)
+static enum group_type group_classify(struct lb_env *env,
+               struct sched_group *group,
+               struct sg_lb_stats *sgs)
 {
-       if (sgs->sum_nr_running > sgs->group_capacity_factor)
+       if (sgs->group_no_capacity)
                return group_overloaded;
 
        if (sg_imbalanced(group))
@@ -6015,6 +6275,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                        load = source_load(i, load_idx);
 
                sgs->group_load += load;
+               sgs->group_usage += get_cpu_usage(i);
                sgs->sum_nr_running += rq->cfs.h_nr_running;
 
                if (rq->nr_running > 1)
@@ -6037,11 +6298,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 
        sgs->group_weight = group->group_weight;
-       sgs->group_capacity_factor = sg_capacity_factor(env, group);
-       sgs->group_type = group_classify(group, sgs);
 
-       if (sgs->group_capacity_factor > sgs->sum_nr_running)
-               sgs->group_has_free_capacity = 1;
+       sgs->group_no_capacity = group_is_overloaded(env, sgs);
+       sgs->group_type = group_classify(env, group, sgs);
 }
 
 /**
@@ -6163,17 +6422,20 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 
                /*
                 * In case the child domain prefers tasks go to siblings
-                * first, lower the sg capacity factor to one so that we'll try
+                * first, lower the sg capacity so that we'll try
                 * and move all the excess tasks away. We lower the capacity
                 * of a group only if the local group has the capacity to fit
-                * these excess tasks, i.e. nr_running < group_capacity_factor. The
-                * extra check prevents the case where you always pull from the
-                * heaviest group when it is already under-utilized (possible
-                * with a large weight task outweighs the tasks on the system).
+                * these excess tasks. The extra check prevents the case where
+                * you always pull from the heaviest group when it is already
+                * under-utilized (possible with a large weight task outweighs
+                * the tasks on the system).
                 */
                if (prefer_sibling && sds->local &&
-                   sds->local_stat.group_has_free_capacity)
-                       sgs->group_capacity_factor = min(sgs->group_capacity_factor, 1U);
+                   group_has_capacity(env, &sds->local_stat) &&
+                   (sgs->sum_nr_running > 1)) {
+                       sgs->group_no_capacity = 1;
+                       sgs->group_type = group_overloaded;
+               }
 
                if (update_sd_pick_busiest(env, sds, sg, sgs)) {
                        sds->busiest = sg;
@@ -6352,11 +6614,12 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
         */
        if (busiest->group_type == group_overloaded &&
            local->group_type   == group_overloaded) {
-               load_above_capacity =
-                       (busiest->sum_nr_running - busiest->group_capacity_factor);
-
-               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE);
-               load_above_capacity /= busiest->group_capacity;
+               load_above_capacity = busiest->sum_nr_running *
+                                       SCHED_LOAD_SCALE;
+               if (load_above_capacity > busiest->group_capacity)
+                       load_above_capacity -= busiest->group_capacity;
+               else
+                       load_above_capacity = ~0UL;
        }
 
        /*
@@ -6419,6 +6682,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
        local = &sds.local_stat;
        busiest = &sds.busiest_stat;
 
+       /* ASYM feature bypasses nice load balance check */
        if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
            check_asym_packing(env, &sds))
                return sds.busiest;
@@ -6439,8 +6703,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
                goto force_balance;
 
        /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
-       if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
-           !busiest->group_has_free_capacity)
+       if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
+           busiest->group_no_capacity)
                goto force_balance;
 
        /*
@@ -6499,7 +6763,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
        int i;
 
        for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
-               unsigned long capacity, capacity_factor, wl;
+               unsigned long capacity, wl;
                enum fbq_type rt;
 
                rq = cpu_rq(i);
@@ -6528,9 +6792,6 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                        continue;
 
                capacity = capacity_of(i);
-               capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE);
-               if (!capacity_factor)
-                       capacity_factor = fix_small_capacity(env->sd, group);
 
                wl = weighted_cpuload(i);
 
@@ -6538,7 +6799,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                 * When comparing with imbalance, use weighted_cpuload()
                 * which is not scaled with the cpu capacity.
                 */
-               if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance)
+
+               if (rq->nr_running == 1 && wl > env->imbalance &&
+                   !check_cpu_capacity(rq, env->sd))
                        continue;
 
                /*
@@ -6586,6 +6849,19 @@ static int need_active_balance(struct lb_env *env)
                        return 1;
        }
 
+       /*
+        * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
+        * It's worth migrating the task if the src_cpu's capacity is reduced
+        * because of other sched_class or IRQs if more capacity stays
+        * available on dst_cpu.
+        */
+       if ((env->idle != CPU_NOT_IDLE) &&
+           (env->src_rq->cfs.h_nr_running == 1)) {
+               if ((check_cpu_capacity(env->src_rq, sd)) &&
+                   (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
+                       return 1;
+       }
+
        return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
 }
 
@@ -6685,6 +6961,9 @@ redo:
 
        schedstat_add(sd, lb_imbalance[idle], env.imbalance);
 
+       env.src_cpu = busiest->cpu;
+       env.src_rq = busiest;
+
        ld_moved = 0;
        if (busiest->nr_running > 1) {
                /*
@@ -6694,8 +6973,6 @@ redo:
                 * correctly treated as an imbalance.
                 */
                env.flags |= LBF_ALL_PINNED;
-               env.src_cpu   = busiest->cpu;
-               env.src_rq    = busiest;
                env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 
 more_balance:
@@ -7395,22 +7672,25 @@ end:
 
 /*
  * Current heuristic for kicking the idle load balancer in the presence
- * of an idle cpu is the system.
+ * of an idle cpu in the system.
  *   - This rq has more than one task.
- *   - At any scheduler domain level, this cpu's scheduler group has multiple
- *     busy cpu's exceeding the group's capacity.
+ *   - This rq has at least one CFS task and the capacity of the CPU is
+ *     significantly reduced because of RT tasks or IRQs.
+ *   - At parent of LLC scheduler domain level, this cpu's scheduler group has
+ *     multiple busy cpu.
  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
  *     domain span are idle.
  */
-static inline int nohz_kick_needed(struct rq *rq)
+static inline bool nohz_kick_needed(struct rq *rq)
 {
        unsigned long now = jiffies;
        struct sched_domain *sd;
        struct sched_group_capacity *sgc;
        int nr_busy, cpu = rq->cpu;
+       bool kick = false;
 
        if (unlikely(rq->idle_balance))
-               return 0;
+               return false;
 
        /*
        * We may be recently in ticked or tickless idle mode. At the first
@@ -7424,38 +7704,46 @@ static inline int nohz_kick_needed(struct rq *rq)
         * balancing.
         */
        if (likely(!atomic_read(&nohz.nr_cpus)))
-               return 0;
+               return false;
 
        if (time_before(now, nohz.next_balance))
-               return 0;
+               return false;
 
        if (rq->nr_running >= 2)
-               goto need_kick;
+               return true;
 
        rcu_read_lock();
        sd = rcu_dereference(per_cpu(sd_busy, cpu));
-
        if (sd) {
                sgc = sd->groups->sgc;
                nr_busy = atomic_read(&sgc->nr_busy_cpus);
 
-               if (nr_busy > 1)
-                       goto need_kick_unlock;
+               if (nr_busy > 1) {
+                       kick = true;
+                       goto unlock;
+               }
+
        }
 
-       sd = rcu_dereference(per_cpu(sd_asym, cpu));
+       sd = rcu_dereference(rq->sd);
+       if (sd) {
+               if ((rq->cfs.h_nr_running >= 1) &&
+                               check_cpu_capacity(rq, sd)) {
+                       kick = true;
+                       goto unlock;
+               }
+       }
 
+       sd = rcu_dereference(per_cpu(sd_asym, cpu));
        if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
-                                 sched_domain_span(sd)) < cpu))
-               goto need_kick_unlock;
-
-       rcu_read_unlock();
-       return 0;
+                                 sched_domain_span(sd)) < cpu)) {
+               kick = true;
+               goto unlock;
+       }
 
-need_kick_unlock:
+unlock:
        rcu_read_unlock();
-need_kick:
-       return 1;
+       return kick;
 }
 #else
 static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
@@ -7471,14 +7759,16 @@ static void run_rebalance_domains(struct softirq_action *h)
        enum cpu_idle_type idle = this_rq->idle_balance ?
                                                CPU_IDLE : CPU_NOT_IDLE;
 
-       rebalance_domains(this_rq, idle);
-
        /*
         * If this cpu has a pending nohz_balance_kick, then do the
         * balancing on behalf of the other idle cpus whose ticks are
-        * stopped.
+        * stopped. Do nohz_idle_balance *before* rebalance_domains to
+        * give the idle cpus a chance to load balance. Else we may
+        * load balance only within the local sched_domain hierarchy
+        * and abort nohz_idle_balance altogether if we pull some load.
         */
        nohz_idle_balance(this_rq, idle);
+       rebalance_domains(this_rq, idle);
 }
 
 /*