Merge tag 'v3.18' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / fair.c
index 8cbe2d2..ef2b104 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/latencytop.h>
 #include <linux/sched.h>
 #include <linux/cpumask.h>
+#include <linux/cpuidle.h>
 #include <linux/slab.h>
 #include <linux/profile.h>
 #include <linux/interrupt.h>
@@ -665,6 +666,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 }
 
 #ifdef CONFIG_SMP
+static int select_idle_sibling(struct task_struct *p, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
 static inline void __update_task_entity_contrib(struct sched_entity *se);
@@ -724,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
        account_cfs_rq_runtime(cfs_rq, delta_exec);
 }
 
+static void update_curr_fair(struct rq *rq)
+{
+       update_curr(cfs_rq_of(&rq->curr->se));
+}
+
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -826,11 +833,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
 
 static unsigned int task_scan_min(struct task_struct *p)
 {
+       unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
        unsigned int scan, floor;
        unsigned int windows = 1;
 
-       if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
-               windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
+       if (scan_size < MAX_SCAN_WINDOW)
+               windows = MAX_SCAN_WINDOW / scan_size;
        floor = 1000 / windows;
 
        scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
@@ -1017,7 +1025,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
 static unsigned long weighted_cpuload(const int cpu);
 static unsigned long source_load(int cpu, int type);
 static unsigned long target_load(int cpu, int type);
-static unsigned long power_of(int cpu);
+static unsigned long capacity_of(int cpu);
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
 
 /* Cached statistics for all CPUs within a node */
@@ -1026,11 +1034,11 @@ struct numa_stats {
        unsigned long load;
 
        /* Total compute capacity of CPUs on a node */
-       unsigned long power;
+       unsigned long compute_capacity;
 
        /* Approximate capacity in terms of runnable tasks on a node */
-       unsigned long capacity;
-       int has_capacity;
+       unsigned long task_capacity;
+       int has_free_capacity;
 };
 
 /*
@@ -1038,7 +1046,8 @@ struct numa_stats {
  */
 static void update_numa_stats(struct numa_stats *ns, int nid)
 {
-       int cpu, cpus = 0;
+       int smt, cpu, cpus = 0;
+       unsigned long capacity;
 
        memset(ns, 0, sizeof(*ns));
        for_each_cpu(cpu, cpumask_of_node(nid)) {
@@ -1046,7 +1055,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
 
                ns->nr_running += rq->nr_running;
                ns->load += weighted_cpuload(cpu);
-               ns->power += power_of(cpu);
+               ns->compute_capacity += capacity_of(cpu);
 
                cpus++;
        }
@@ -1056,15 +1065,19 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
         * the @ns structure is NULL'ed and task_numa_compare() will
         * not find this node attractive.
         *
-        * We'll either bail at !has_capacity, or we'll detect a huge imbalance
-        * and bail there.
+        * We'll either bail at !has_free_capacity, or we'll detect a huge
+        * imbalance and bail there.
         */
        if (!cpus)
                return;
 
-       ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
-       ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
-       ns->has_capacity = (ns->nr_running < ns->capacity);
+       /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
+       smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
+       capacity = cpus / smt; /* cores */
+
+       ns->task_capacity = min_t(unsigned, capacity,
+               DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
+       ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
 }
 
 struct task_numa_env {
@@ -1095,6 +1108,50 @@ static void task_numa_assign(struct task_numa_env *env,
        env->best_cpu = env->dst_cpu;
 }
 
+static bool load_too_imbalanced(long src_load, long dst_load,
+                               struct task_numa_env *env)
+{
+       long imb, old_imb;
+       long orig_src_load, orig_dst_load;
+       long src_capacity, dst_capacity;
+
+       /*
+        * The load is corrected for the CPU capacity available on each node.
+        *
+        * src_load        dst_load
+        * ------------ vs ---------
+        * src_capacity    dst_capacity
+        */
+       src_capacity = env->src_stats.compute_capacity;
+       dst_capacity = env->dst_stats.compute_capacity;
+
+       /* We care about the slope of the imbalance, not the direction. */
+       if (dst_load < src_load)
+               swap(dst_load, src_load);
+
+       /* Is the difference below the threshold? */
+       imb = dst_load * src_capacity * 100 -
+             src_load * dst_capacity * env->imbalance_pct;
+       if (imb <= 0)
+               return false;
+
+       /*
+        * The imbalance is above the allowed threshold.
+        * Compare it with the old imbalance.
+        */
+       orig_src_load = env->src_stats.load;
+       orig_dst_load = env->dst_stats.load;
+
+       if (orig_dst_load < orig_src_load)
+               swap(orig_dst_load, orig_src_load);
+
+       old_imb = orig_dst_load * src_capacity * 100 -
+                 orig_src_load * dst_capacity * env->imbalance_pct;
+
+       /* Would this change make things worse? */
+       return (imb > old_imb);
+}
+
 /*
  * This checks if the overall compute and NUMA accesses of the system would
  * be improved if the source tasks was migrated to the target dst_cpu taking
@@ -1107,14 +1164,32 @@ static void task_numa_compare(struct task_numa_env *env,
        struct rq *src_rq = cpu_rq(env->src_cpu);
        struct rq *dst_rq = cpu_rq(env->dst_cpu);
        struct task_struct *cur;
-       long dst_load, src_load;
+       long src_load, dst_load;
        long load;
-       long imp = (groupimp > 0) ? groupimp : taskimp;
+       long imp = env->p->numa_group ? groupimp : taskimp;
+       long moveimp = imp;
 
        rcu_read_lock();
-       cur = ACCESS_ONCE(dst_rq->curr);
-       if (cur->pid == 0) /* idle */
+
+       raw_spin_lock_irq(&dst_rq->lock);
+       cur = dst_rq->curr;
+       /*
+        * No need to move the exiting task, and this ensures that ->curr
+        * wasn't reaped and thus get_task_struct() in task_numa_assign()
+        * is safe under RCU read lock.
+        * Note that rcu_read_lock() itself can't protect from the final
+        * put_task_struct() after the last schedule().
+        */
+       if ((cur->flags & PF_EXITING) || is_idle_task(cur))
                cur = NULL;
+       raw_spin_unlock_irq(&dst_rq->lock);
+
+       /*
+        * Because we have preemption enabled we can get migrated around and
+        * end try selecting ourselves (current == env->p) as a swap candidate.
+        */
+       if (cur == env->p)
+               goto unlock;
 
        /*
         * "imp" is the fault differential for the source task between the
@@ -1147,11 +1222,6 @@ static void task_numa_compare(struct task_numa_env *env,
                         * itself (not part of a group), use the task weight
                         * instead.
                         */
-                       if (env->p->numa_group)
-                               imp = groupimp;
-                       else
-                               imp = taskimp;
-
                        if (cur->numa_group)
                                imp += group_weight(cur, env->src_nid) -
                                       group_weight(cur, env->dst_nid);
@@ -1161,33 +1231,47 @@ static void task_numa_compare(struct task_numa_env *env,
                }
        }
 
-       if (imp < env->best_imp)
+       if (imp <= env->best_imp && moveimp <= env->best_imp)
                goto unlock;
 
        if (!cur) {
                /* Is there capacity at our destination? */
-               if (env->src_stats.has_capacity &&
-                   !env->dst_stats.has_capacity)
+               if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
+                   !env->dst_stats.has_free_capacity)
                        goto unlock;
 
                goto balance;
        }
 
        /* Balance doesn't matter much if we're running a task per cpu */
-       if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
+       if (imp > env->best_imp && src_rq->nr_running == 1 &&
+                       dst_rq->nr_running == 1)
                goto assign;
 
        /*
         * In the overloaded case, try and keep the load balanced.
         */
 balance:
-       dst_load = env->dst_stats.load;
-       src_load = env->src_stats.load;
-
-       /* XXX missing power terms */
        load = task_h_load(env->p);
-       dst_load += load;
-       src_load -= load;
+       dst_load = env->dst_stats.load + load;
+       src_load = env->src_stats.load - load;
+
+       if (moveimp > imp && moveimp > env->best_imp) {
+               /*
+                * If the improvement from just moving env->p direction is
+                * better than swapping tasks around, check if a move is
+                * possible. Store a slightly smaller score than moveimp,
+                * so an actually idle CPU will win.
+                */
+               if (!load_too_imbalanced(src_load, dst_load, env)) {
+                       imp = moveimp - 1;
+                       cur = NULL;
+                       goto assign;
+               }
+       }
+
+       if (imp <= env->best_imp)
+               goto unlock;
 
        if (cur) {
                load = task_h_load(cur);
@@ -1195,13 +1279,16 @@ balance:
                src_load += load;
        }
 
-       /* make src_load the smaller */
-       if (dst_load < src_load)
-               swap(dst_load, src_load);
-
-       if (src_load * env->imbalance_pct < dst_load * 100)
+       if (load_too_imbalanced(src_load, dst_load, env))
                goto unlock;
 
+       /*
+        * One idle CPU per node is evaluated for a task numa move.
+        * Call select_idle_sibling to maybe find a better one.
+        */
+       if (!cur)
+               env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
+
 assign:
        task_numa_assign(env, cur, imp);
 unlock:
@@ -1275,9 +1362,8 @@ static int task_numa_migrate(struct task_struct *p)
        groupimp = group_weight(p, env.dst_nid) - groupweight;
        update_numa_stats(&env.dst_stats, env.dst_nid);
 
-       /* If the preferred nid has capacity, try to use it. */
-       if (env.dst_stats.has_capacity)
-               task_numa_find_cpu(&env, taskimp, groupimp);
+       /* Try to find a spot on the preferred nid. */
+       task_numa_find_cpu(&env, taskimp, groupimp);
 
        /* No space available on the preferred nid. Look elsewhere. */
        if (env.best_cpu == -1) {
@@ -1297,12 +1383,28 @@ static int task_numa_migrate(struct task_struct *p)
                }
        }
 
+       /*
+        * If the task is part of a workload that spans multiple NUMA nodes,
+        * and is migrating into one of the workload's active nodes, remember
+        * this node as the task's preferred numa node, so the workload can
+        * settle down.
+        * A task that migrated to a second choice node will be better off
+        * trying for a better one later. Do not set the preferred node here.
+        */
+       if (p->numa_group) {
+               if (env.best_cpu == -1)
+                       nid = env.src_nid;
+               else
+                       nid = env.dst_nid;
+
+               if (node_isset(nid, p->numa_group->active_nodes))
+                       sched_setnuma(p, env.dst_nid);
+       }
+
        /* No better CPU than the current one was found. */
        if (env.best_cpu == -1)
                return -EAGAIN;
 
-       sched_setnuma(p, env.dst_nid);
-
        /*
         * Reset the scan period if the task is being rescheduled on an
         * alternative node to recheck if the tasks is now properly placed.
@@ -1326,12 +1428,15 @@ static int task_numa_migrate(struct task_struct *p)
 /* Attempt to migrate a task to a CPU on the preferred node. */
 static void numa_migrate_preferred(struct task_struct *p)
 {
+       unsigned long interval = HZ;
+
        /* This task has no NUMA fault statistics yet */
        if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
                return;
 
        /* Periodically retry migrating the task to the preferred node */
-       p->numa_migrate_retry = jiffies + HZ;
+       interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
+       p->numa_migrate_retry = jiffies + interval;
 
        /* Success if task is already running on preferred CPU */
        if (task_node(p) == p->numa_preferred_nid)
@@ -1376,12 +1481,12 @@ static void update_numa_active_node_mask(struct numa_group *numa_group)
 /*
  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
  * increments. The more local the fault statistics are, the higher the scan
- * period will be for the next scan window. If local/remote ratio is below
- * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
- * scan period will decrease
+ * period will be for the next scan window. If local/(local+remote) ratio is
+ * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
+ * the scan period will decrease. Aim for 70% local accesses.
  */
 #define NUMA_PERIOD_SLOTS 10
-#define NUMA_PERIOD_THRESHOLD 3
+#define NUMA_PERIOD_THRESHOLD 7
 
 /*
  * Increase the scan period (slow down scanning) if the majority of
@@ -1438,7 +1543,7 @@ static void update_task_scan_period(struct task_struct *p,
                 * scanning faster if shared accesses dominate as it may
                 * simply bounce migrations uselessly
                 */
-               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
+               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
                diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
        }
 
@@ -1556,30 +1661,17 @@ static void task_numa_placement(struct task_struct *p)
 
        if (p->numa_group) {
                update_numa_active_node_mask(p->numa_group);
-               /*
-                * If the preferred task and group nids are different,
-                * iterate over the nodes again to find the best place.
-                */
-               if (max_nid != max_group_nid) {
-                       unsigned long weight, max_weight = 0;
-
-                       for_each_online_node(nid) {
-                               weight = task_weight(p, nid) + group_weight(p, nid);
-                               if (weight > max_weight) {
-                                       max_weight = weight;
-                                       max_nid = nid;
-                               }
-                       }
-               }
-
                spin_unlock_irq(group_lock);
+               max_nid = max_group_nid;
        }
 
-       /* Preferred node as the node with the most faults */
-       if (max_faults && max_nid != p->numa_preferred_nid) {
-               /* Update the preferred nid and migrate task if possible */
-               sched_setnuma(p, max_nid);
-               numa_migrate_preferred(p);
+       if (max_faults) {
+               /* Set the new preferred node */
+               if (max_nid != p->numa_preferred_nid)
+                       sched_setnuma(p, max_nid);
+
+               if (task_node(p) != p->numa_preferred_nid)
+                       numa_migrate_preferred(p);
        }
 }
 
@@ -1720,7 +1812,7 @@ void task_numa_free(struct task_struct *p)
                list_del(&p->numa_entry);
                grp->nr_tasks--;
                spin_unlock_irqrestore(&grp->lock, flags);
-               rcu_assign_pointer(p->numa_group, NULL);
+               RCU_INIT_POINTER(p->numa_group, NULL);
                put_numa_group(grp);
        }
 
@@ -1739,6 +1831,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
        struct task_struct *p = current;
        bool migrated = flags & TNF_MIGRATED;
        int cpu_node = task_node(current);
+       int local = !!(flags & TNF_FAULT_LOCAL);
        int priv;
 
        if (!numabalancing_enabled)
@@ -1748,10 +1841,6 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
        if (!p->mm)
                return;
 
-       /* Do not worry about placement if exiting */
-       if (p->state == TASK_DEAD)
-               return;
-
        /* Allocate buffer to track faults on a per-node basis */
        if (unlikely(!p->numa_faults_memory)) {
                int size = sizeof(*p->numa_faults_memory) *
@@ -1787,6 +1876,17 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
                        task_numa_group(p, last_cpupid, flags, &priv);
        }
 
+       /*
+        * If a workload spans multiple NUMA nodes, a shared fault that
+        * occurs wholly within the set of nodes that the workload is
+        * actively using should be counted as local. This allows the
+        * scan rate to slow down when a workload has settled down.
+        */
+       if (!priv && !local && p->numa_group &&
+                       node_isset(cpu_node, p->numa_group->active_nodes) &&
+                       node_isset(mem_node, p->numa_group->active_nodes))
+               local = 1;
+
        task_numa_placement(p);
 
        /*
@@ -1801,7 +1901,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 
        p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
        p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
-       p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
+       p->numa_faults_locality[local] += pages;
 }
 
 static void reset_ptenuma_scan(struct task_struct *p)
@@ -1879,7 +1979,7 @@ void task_numa_work(struct callback_head *work)
                vma = mm->mmap;
        }
        for (; vma; vma = vma->vm_next) {
-               if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
+               if (!vma_migratable(vma) || !vma_policy_mof(vma))
                        continue;
 
                /*
@@ -2144,8 +2244,8 @@ static __always_inline u64 decay_load(u64 val, u64 n)
 
        /*
         * As y^PERIOD = 1/2, we can combine
-        *    y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
-        * With a look-up table which covers k^n (n<PERIOD)
+        *    y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
+        * With a look-up table which covers y^n (n<PERIOD)
         *
         * To achieve constant time decay_load.
         */
@@ -2310,6 +2410,9 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
        tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
        tg_contrib -= cfs_rq->tg_load_contrib;
 
+       if (!tg_contrib)
+               return;
+
        if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
                atomic_long_add(tg_contrib, &tg->load_avg);
                cfs_rq->tg_load_contrib += tg_contrib;
@@ -2848,7 +2951,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
        if (delta_exec > ideal_runtime) {
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
                /*
                 * The current task ran long enough, ensure it doesn't get
                 * re-elected due to buddy favours.
@@ -2872,7 +2975,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
                return;
 
        if (delta > ideal_runtime)
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
 }
 
 static void
@@ -3012,7 +3115,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         * validating it and just reschedule.
         */
        if (queued) {
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
                return;
        }
        /*
@@ -3175,10 +3278,12 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
         * has not truly expired.
         *
         * Fortunately we can check determine whether this the case by checking
-        * whether the global deadline has advanced.
+        * whether the global deadline has advanced. It is valid to compare
+        * cfs_b->runtime_expires without any locks since we only care about
+        * exact equality, so a partial write will still work.
         */
 
-       if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
+       if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
                /* extend local deadline, drift is bounded above by 2 ticks */
                cfs_rq->runtime_expires += TICK_NSEC;
        } else {
@@ -3201,7 +3306,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
         * hierarchy can be throttled
         */
        if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
 }
 
 static __always_inline
@@ -3302,12 +3407,16 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        }
 
        if (!se)
-               rq->nr_running -= task_delta;
+               sub_nr_running(rq, task_delta);
 
        cfs_rq->throttled = 1;
        cfs_rq->throttled_clock = rq_clock(rq);
        raw_spin_lock(&cfs_b->lock);
-       list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+       /*
+        * Add to the _head_ of the list, so that an already-started
+        * distribute_cfs_runtime will not see us
+        */
+       list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
        if (!cfs_b->timer_active)
                __start_cfs_bandwidth(cfs_b, false);
        raw_spin_unlock(&cfs_b->lock);
@@ -3353,18 +3462,19 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
        }
 
        if (!se)
-               rq->nr_running += task_delta;
+               add_nr_running(rq, task_delta);
 
        /* determine whether we need to wake up potentially idle cpu */
        if (rq->curr == rq->idle && rq->cfs.nr_running)
-               resched_task(rq->curr);
+               resched_curr(rq);
 }
 
 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
                u64 remaining, u64 expires)
 {
        struct cfs_rq *cfs_rq;
-       u64 runtime = remaining;
+       u64 runtime;
+       u64 starting_runtime = remaining;
 
        rcu_read_lock();
        list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
@@ -3395,7 +3505,7 @@ next:
        }
        rcu_read_unlock();
 
-       return remaining;
+       return starting_runtime - remaining;
 }
 
 /*
@@ -3407,21 +3517,21 @@ next:
 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
 {
        u64 runtime, runtime_expires;
-       int idle = 1, throttled;
+       int throttled;
 
-       raw_spin_lock(&cfs_b->lock);
        /* no need to continue the timer with no bandwidth constraint */
        if (cfs_b->quota == RUNTIME_INF)
-               goto out_unlock;
+               goto out_deactivate;
 
        throttled = !list_empty(&cfs_b->throttled_cfs_rq);
-       /* idle depends on !throttled (for the case of a large deficit) */
-       idle = cfs_b->idle && !throttled;
        cfs_b->nr_periods += overrun;
 
-       /* if we're going inactive then everything else can be deferred */
-       if (idle)
-               goto out_unlock;
+       /*
+        * idle depends on !throttled (for the case of a large deficit), and if
+        * we're going inactive then everything else can be deferred
+        */
+       if (cfs_b->idle && !throttled)
+               goto out_deactivate;
 
        /*
         * if we have relooped after returning idle once, we need to update our
@@ -3435,28 +3545,23 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
        if (!throttled) {
                /* mark as potentially idle for the upcoming period */
                cfs_b->idle = 1;
-               goto out_unlock;
+               return 0;
        }
 
        /* account preceding periods in which throttling occurred */
        cfs_b->nr_throttled += overrun;
 
-       /*
-        * There are throttled entities so we must first use the new bandwidth
-        * to unthrottle them before making it generally available.  This
-        * ensures that all existing debts will be paid before a new cfs_rq is
-        * allowed to run.
-        */
-       runtime = cfs_b->runtime;
        runtime_expires = cfs_b->runtime_expires;
-       cfs_b->runtime = 0;
 
        /*
-        * This check is repeated as we are holding onto the new bandwidth
-        * while we unthrottle.  This can potentially race with an unthrottled
-        * group trying to acquire new bandwidth from the global pool.
+        * This check is repeated as we are holding onto the new bandwidth while
+        * we unthrottle. This can potentially race with an unthrottled group
+        * trying to acquire new bandwidth from the global pool. This can result
+        * in us over-using our runtime if it is all used during this loop, but
+        * only by limited amounts in that extreme case.
         */
-       while (throttled && runtime > 0) {
+       while (throttled && cfs_b->runtime > 0) {
+               runtime = cfs_b->runtime;
                raw_spin_unlock(&cfs_b->lock);
                /* we can't nest cfs_b->lock while distributing bandwidth */
                runtime = distribute_cfs_runtime(cfs_b, runtime,
@@ -3464,10 +3569,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
                raw_spin_lock(&cfs_b->lock);
 
                throttled = !list_empty(&cfs_b->throttled_cfs_rq);
+
+               cfs_b->runtime -= min(runtime, cfs_b->runtime);
        }
 
-       /* return (any) remaining runtime */
-       cfs_b->runtime = runtime;
        /*
         * While we are ensured activity in the period following an
         * unthrottle, this also covers the case in which the new bandwidth is
@@ -3475,12 +3580,12 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
         * timer to remain active while there are any throttled entities.)
         */
        cfs_b->idle = 0;
-out_unlock:
-       if (idle)
-               cfs_b->timer_active = 0;
-       raw_spin_unlock(&cfs_b->lock);
 
-       return idle;
+       return 0;
+
+out_deactivate:
+       cfs_b->timer_active = 0;
+       return 1;
 }
 
 /* a cfs_rq won't donate quota below this amount */
@@ -3578,10 +3683,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
                return;
        }
 
-       if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
+       if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
                runtime = cfs_b->runtime;
-               cfs_b->runtime = 0;
-       }
+
        expires = cfs_b->runtime_expires;
        raw_spin_unlock(&cfs_b->lock);
 
@@ -3592,7 +3696,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
 
        raw_spin_lock(&cfs_b->lock);
        if (expires == cfs_b->runtime_expires)
-               cfs_b->runtime = runtime;
+               cfs_b->runtime -= min(runtime, cfs_b->runtime);
        raw_spin_unlock(&cfs_b->lock);
 }
 
@@ -3657,6 +3761,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
        int overrun;
        int idle = 0;
 
+       raw_spin_lock(&cfs_b->lock);
        for (;;) {
                now = hrtimer_cb_get_time(timer);
                overrun = hrtimer_forward(timer, now, cfs_b->period);
@@ -3666,6 +3771,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 
                idle = do_sched_cfs_period_timer(cfs_b, overrun);
        }
+       raw_spin_unlock(&cfs_b->lock);
 
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 }
@@ -3720,13 +3826,24 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        hrtimer_cancel(&cfs_b->slack_timer);
 }
 
-static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
+static void __maybe_unused update_runtime_enabled(struct rq *rq)
 {
        struct cfs_rq *cfs_rq;
 
        for_each_leaf_cfs_rq(rq, cfs_rq) {
-               struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
+               struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
 
+               raw_spin_lock(&cfs_b->lock);
+               cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
+               raw_spin_unlock(&cfs_b->lock);
+       }
+}
+
+static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
+{
+       struct cfs_rq *cfs_rq;
+
+       for_each_leaf_cfs_rq(rq, cfs_rq) {
                if (!cfs_rq->runtime_enabled)
                        continue;
 
@@ -3734,7 +3851,13 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
                 * clock_task is not advancing so we just need to make sure
                 * there's some valid quota amount
                 */
-               cfs_rq->runtime_remaining = cfs_b->quota;
+               cfs_rq->runtime_remaining = 1;
+               /*
+                * Offline rq is schedulable till cpu is completely disabled
+                * in take_cpu_down(), so we prevent new cfs throttling here.
+                */
+               cfs_rq->runtime_enabled = 0;
+
                if (cfs_rq_throttled(cfs_rq))
                        unthrottle_cfs_rq(cfs_rq);
        }
@@ -3778,6 +3901,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
        return NULL;
 }
 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
+static inline void update_runtime_enabled(struct rq *rq) {}
 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
 
 #endif /* CONFIG_CFS_BANDWIDTH */
@@ -3801,17 +3925,9 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 
                if (delta < 0) {
                        if (rq->curr == p)
-                               resched_task(p);
+                               resched_curr(rq);
                        return;
                }
-
-               /*
-                * Don't schedule slices shorter than 10000ns, that just
-                * doesn't make sense. Rely on vruntime for fairness.
-                */
-               if (rq->curr != p)
-                       delta = max_t(s64, 10000LL, delta);
-
                hrtick_start(rq, delta);
        }
 }
@@ -3885,7 +4001,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 
        if (!se) {
                update_rq_runnable_avg(rq, rq->nr_running);
-               inc_nr_running(rq);
+               add_nr_running(rq, 1);
        }
        hrtick_update(rq);
 }
@@ -3945,7 +4061,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
        }
 
        if (!se) {
-               dec_nr_running(rq);
+               sub_nr_running(rq, 1);
                update_rq_runnable_avg(rq, 1);
        }
        hrtick_update(rq);
@@ -3991,15 +4107,15 @@ static unsigned long target_load(int cpu, int type)
        return max(rq->cpu_load[type-1], total);
 }
 
-static unsigned long power_of(int cpu)
+static unsigned long capacity_of(int cpu)
 {
-       return cpu_rq(cpu)->cpu_power;
+       return cpu_rq(cpu)->cpu_capacity;
 }
 
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
+       unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
        unsigned long load_avg = rq->cfs.runnable_load_avg;
 
        if (nr_running)
@@ -4015,8 +4131,8 @@ static void record_wakee(struct task_struct *p)
         * about the boundary, really active task won't care
         * about the loss.
         */
-       if (jiffies > current->wakee_flip_decay_ts + HZ) {
-               current->wakee_flips = 0;
+       if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
+               current->wakee_flips >>= 1;
                current->wakee_flip_decay_ts = jiffies;
        }
 
@@ -4188,8 +4304,8 @@ static int wake_wide(struct task_struct *p)
 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 {
        s64 this_load, load;
+       s64 this_eff_load, prev_eff_load;
        int idx, this_cpu, prev_cpu;
-       unsigned long tl_per_task;
        struct task_group *tg;
        unsigned long weight;
        int balanced;
@@ -4232,47 +4348,30 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
         * Otherwise check if either cpus are near enough in load to allow this
         * task to be woken on this_cpu.
         */
-       if (this_load > 0) {
-               s64 this_eff_load, prev_eff_load;
+       this_eff_load = 100;
+       this_eff_load *= capacity_of(prev_cpu);
 
-               this_eff_load = 100;
-               this_eff_load *= power_of(prev_cpu);
+       prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
+       prev_eff_load *= capacity_of(this_cpu);
+
+       if (this_load > 0) {
                this_eff_load *= this_load +
                        effective_load(tg, this_cpu, weight, weight);
 
-               prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
-               prev_eff_load *= power_of(this_cpu);
                prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
+       }
 
-               balanced = this_eff_load <= prev_eff_load;
-       } else
-               balanced = true;
-
-       /*
-        * If the currently running task will sleep within
-        * a reasonable amount of time then attract this newly
-        * woken task:
-        */
-       if (sync && balanced)
-               return 1;
+       balanced = this_eff_load <= prev_eff_load;
 
        schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
-       tl_per_task = cpu_avg_load_per_task(this_cpu);
 
-       if (balanced ||
-           (this_load <= load &&
-            this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
-               /*
-                * This domain has SD_WAKE_AFFINE and
-                * p is cache cold in this domain, and
-                * there is no bad imbalance.
-                */
-               schedstat_inc(sd, ttwu_move_affine);
-               schedstat_inc(p, se.statistics.nr_wakeups_affine);
+       if (!balanced)
+               return 0;
 
-               return 1;
-       }
-       return 0;
+       schedstat_inc(sd, ttwu_move_affine);
+       schedstat_inc(p, se.statistics.nr_wakeups_affine);
+
+       return 1;
 }
 
 /*
@@ -4317,8 +4416,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
                        avg_load += load;
                }
 
-               /* Adjust by relative CPU power of the group */
-               avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
+               /* Adjust by relative CPU capacity of the group */
+               avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
 
                if (local_group) {
                        this_load = avg_load;
@@ -4340,20 +4439,46 @@ static int
 find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 {
        unsigned long load, min_load = ULONG_MAX;
-       int idlest = -1;
+       unsigned int min_exit_latency = UINT_MAX;
+       u64 latest_idle_timestamp = 0;
+       int least_loaded_cpu = this_cpu;
+       int shallowest_idle_cpu = -1;
        int i;
 
        /* Traverse only the allowed CPUs */
        for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
-               load = weighted_cpuload(i);
-
-               if (load < min_load || (load == min_load && i == this_cpu)) {
-                       min_load = load;
-                       idlest = i;
+               if (idle_cpu(i)) {
+                       struct rq *rq = cpu_rq(i);
+                       struct cpuidle_state *idle = idle_get_state(rq);
+                       if (idle && idle->exit_latency < min_exit_latency) {
+                               /*
+                                * We give priority to a CPU whose idle state
+                                * has the smallest exit latency irrespective
+                                * of any idle timestamp.
+                                */
+                               min_exit_latency = idle->exit_latency;
+                               latest_idle_timestamp = rq->idle_stamp;
+                               shallowest_idle_cpu = i;
+                       } else if ((!idle || idle->exit_latency == min_exit_latency) &&
+                                  rq->idle_stamp > latest_idle_timestamp) {
+                               /*
+                                * If equal or no active idle state, then
+                                * the most recently idled CPU might have
+                                * a warmer cache.
+                                */
+                               latest_idle_timestamp = rq->idle_stamp;
+                               shallowest_idle_cpu = i;
+                       }
+               } else {
+                       load = weighted_cpuload(i);
+                       if (load < min_load || (load == min_load && i == this_cpu)) {
+                               min_load = load;
+                               least_loaded_cpu = i;
+                       }
                }
        }
 
-       return idlest;
+       return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
 }
 
 /*
@@ -4425,11 +4550,8 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
        if (p->nr_cpus_allowed == 1)
                return prev_cpu;
 
-       if (sd_flag & SD_BALANCE_WAKE) {
-               if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
-                       want_affine = 1;
-               new_cpu = prev_cpu;
-       }
+       if (sd_flag & SD_BALANCE_WAKE)
+               want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
 
        rcu_read_lock();
        for_each_domain(cpu, tmp) {
@@ -4450,10 +4572,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
                        sd = tmp;
        }
 
-       if (affine_sd) {
-               if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
-                       prev_cpu = cpu;
+       if (affine_sd && cpu != prev_cpu && wake_affine(affine_sd, p, sync))
+               prev_cpu = cpu;
 
+       if (sd_flag & SD_BALANCE_WAKE) {
                new_cpu = select_idle_sibling(p, prev_cpu);
                goto unlock;
        }
@@ -4521,6 +4643,9 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
                atomic_long_add(se->avg.load_avg_contrib,
                                                &cfs_rq->removed_load);
        }
+
+       /* We have migrated, no longer consider this task hot */
+       se->exec_start = 0;
 }
 #endif /* CONFIG_SMP */
 
@@ -4613,7 +4738,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
                return;
 
        /*
-        * This is possible from callers such as move_task(), in which we
+        * This is possible from callers such as attach_tasks(), in which we
         * unconditionally check_prempt_curr() after an enqueue (which may have
         * lead to a throttle).  This both saves work and prevents false
         * next-buddy nomination below.
@@ -4667,7 +4792,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        return;
 
 preempt:
-       resched_task(curr);
+       resched_curr(rq);
        /*
         * Only set the backward buddy when the current task is still
         * on the rq. This can happen when a wakeup gets interleaved
@@ -4895,14 +5020,14 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
  *
  *   W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0               (3)
  *
- * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
+ * C_i is the compute capacity of cpu i, typically it is the
  * fraction of 'recent' time available for SCHED_OTHER task execution. But it
  * can also include other factors [XXX].
  *
  * To achieve this balance we define a measure of imbalance which follows
  * directly from (1):
  *
- *   imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j }    (4)
+ *   imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j }    (4)
  *
  * We them move tasks around to minimize the imbalance. In the continuous
  * function space it is obvious this converges, in the discrete case we get
@@ -5021,28 +5146,18 @@ struct lb_env {
        unsigned int            loop_max;
 
        enum fbq_type           fbq_type;
+       struct list_head        tasks;
 };
 
-/*
- * move_task - move a task from one runqueue to another runqueue.
- * Both runqueues must be locked.
- */
-static void move_task(struct task_struct *p, struct lb_env *env)
-{
-       deactivate_task(env->src_rq, p, 0);
-       set_task_cpu(p, env->dst_cpu);
-       activate_task(env->dst_rq, p, 0);
-       check_preempt_curr(env->dst_rq, p, 0);
-}
-
 /*
  * Is this task likely cache-hot:
  */
-static int
-task_hot(struct task_struct *p, u64 now)
+static int task_hot(struct task_struct *p, struct lb_env *env)
 {
        s64 delta;
 
+       lockdep_assert_held(&env->src_rq->lock);
+
        if (p->sched_class != &fair_sched_class)
                return 0;
 
@@ -5052,7 +5167,7 @@ task_hot(struct task_struct *p, u64 now)
        /*
         * Buddy candidates are cache hot:
         */
-       if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
+       if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
                        (&p->se == cfs_rq_of(&p->se)->next ||
                         &p->se == cfs_rq_of(&p->se)->last))
                return 1;
@@ -5062,7 +5177,7 @@ task_hot(struct task_struct *p, u64 now)
        if (sysctl_sched_migration_cost == 0)
                return 0;
 
-       delta = now - p->se.exec_start;
+       delta = rq_clock_task(env->src_rq) - p->se.exec_start;
 
        return delta < (s64)sysctl_sched_migration_cost;
 }
@@ -5071,6 +5186,7 @@ task_hot(struct task_struct *p, u64 now)
 /* Returns true if the destination node has incurred more faults */
 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
 {
+       struct numa_group *numa_group = rcu_dereference(p->numa_group);
        int src_nid, dst_nid;
 
        if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
@@ -5084,21 +5200,29 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
        if (src_nid == dst_nid)
                return false;
 
-       /* Always encourage migration to the preferred node. */
-       if (dst_nid == p->numa_preferred_nid)
-               return true;
+       if (numa_group) {
+               /* Task is already in the group's interleave set. */
+               if (node_isset(src_nid, numa_group->active_nodes))
+                       return false;
+
+               /* Task is moving into the group's interleave set. */
+               if (node_isset(dst_nid, numa_group->active_nodes))
+                       return true;
 
-       /* If both task and group weight improve, this move is a winner. */
-       if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
-           group_weight(p, dst_nid) > group_weight(p, src_nid))
+               return group_faults(p, dst_nid) > group_faults(p, src_nid);
+       }
+
+       /* Encourage migration to the preferred node. */
+       if (dst_nid == p->numa_preferred_nid)
                return true;
 
-       return false;
+       return task_faults(p, dst_nid) > task_faults(p, src_nid);
 }
 
 
 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
 {
+       struct numa_group *numa_group = rcu_dereference(p->numa_group);
        int src_nid, dst_nid;
 
        if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
@@ -5113,16 +5237,23 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
        if (src_nid == dst_nid)
                return false;
 
+       if (numa_group) {
+               /* Task is moving within/into the group's interleave set. */
+               if (node_isset(dst_nid, numa_group->active_nodes))
+                       return false;
+
+               /* Task is moving out of the group's interleave set. */
+               if (node_isset(src_nid, numa_group->active_nodes))
+                       return true;
+
+               return group_faults(p, dst_nid) < group_faults(p, src_nid);
+       }
+
        /* Migrating away from the preferred node is always bad. */
        if (src_nid == p->numa_preferred_nid)
                return true;
 
-       /* If either task or group weight get worse, don't do it. */
-       if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
-           group_weight(p, dst_nid) < group_weight(p, src_nid))
-               return true;
-
-       return false;
+       return task_faults(p, dst_nid) < task_faults(p, src_nid);
 }
 
 #else
@@ -5146,6 +5277,9 @@ static
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
        int tsk_cache_hot = 0;
+
+       lockdep_assert_held(&env->src_rq->lock);
+
        /*
         * We do not migrate tasks that are:
         * 1) throttled_lb_pair, or
@@ -5200,28 +5334,16 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
         * 2) task is cache cold, or
         * 3) too many balance attempts have failed.
         */
-       tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq));
+       tsk_cache_hot = task_hot(p, env);
        if (!tsk_cache_hot)
                tsk_cache_hot = migrate_degrades_locality(p, env);
 
-       if (migrate_improves_locality(p, env)) {
-#ifdef CONFIG_SCHEDSTATS
+       if (migrate_improves_locality(p, env) || !tsk_cache_hot ||
+           env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
                if (tsk_cache_hot) {
                        schedstat_inc(env->sd, lb_hot_gained[env->idle]);
                        schedstat_inc(p, se.statistics.nr_forced_migrations);
                }
-#endif
-               return 1;
-       }
-
-       if (!tsk_cache_hot ||
-               env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
-
-               if (tsk_cache_hot) {
-                       schedstat_inc(env->sd, lb_hot_gained[env->idle]);
-                       schedstat_inc(p, se.statistics.nr_forced_migrations);
-               }
-
                return 1;
        }
 
@@ -5230,47 +5352,63 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
 }
 
 /*
- * move_one_task tries to move exactly one task from busiest to this_rq, as
+ * detach_task() -- detach the task for the migration specified in env
+ */
+static void detach_task(struct task_struct *p, struct lb_env *env)
+{
+       lockdep_assert_held(&env->src_rq->lock);
+
+       deactivate_task(env->src_rq, p, 0);
+       p->on_rq = TASK_ON_RQ_MIGRATING;
+       set_task_cpu(p, env->dst_cpu);
+}
+
+/*
+ * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
  * part of active balancing operations within "domain".
- * Returns 1 if successful and 0 otherwise.
  *
- * Called with both runqueues locked.
+ * Returns a task if successful and NULL otherwise.
  */
-static int move_one_task(struct lb_env *env)
+static struct task_struct *detach_one_task(struct lb_env *env)
 {
        struct task_struct *p, *n;
 
+       lockdep_assert_held(&env->src_rq->lock);
+
        list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
                if (!can_migrate_task(p, env))
                        continue;
 
-               move_task(p, env);
+               detach_task(p, env);
+
                /*
-                * Right now, this is only the second place move_task()
-                * is called, so we can safely collect move_task()
-                * stats here rather than inside move_task().
+                * Right now, this is only the second place where
+                * lb_gained[env->idle] is updated (other is detach_tasks)
+                * so we can safely collect stats here rather than
+                * inside detach_tasks().
                 */
                schedstat_inc(env->sd, lb_gained[env->idle]);
-               return 1;
+               return p;
        }
-       return 0;
+       return NULL;
 }
 
 static const unsigned int sched_nr_migrate_break = 32;
 
 /*
- * move_tasks tries to move up to imbalance weighted load from busiest to
- * this_rq, as part of a balancing operation within domain "sd".
- * Returns 1 if successful and 0 otherwise.
+ * detach_tasks() -- tries to detach up to imbalance weighted load from
+ * busiest_rq, as part of a balancing operation within domain "sd".
  *
- * Called with both runqueues locked.
+ * Returns number of detached tasks if successful and 0 otherwise.
  */
-static int move_tasks(struct lb_env *env)
+static int detach_tasks(struct lb_env *env)
 {
        struct list_head *tasks = &env->src_rq->cfs_tasks;
        struct task_struct *p;
        unsigned long load;
-       int pulled = 0;
+       int detached = 0;
+
+       lockdep_assert_held(&env->src_rq->lock);
 
        if (env->imbalance <= 0)
                return 0;
@@ -5301,14 +5439,16 @@ static int move_tasks(struct lb_env *env)
                if ((load / 2) > env->imbalance)
                        goto next;
 
-               move_task(p, env);
-               pulled++;
+               detach_task(p, env);
+               list_add(&p->se.group_node, &env->tasks);
+
+               detached++;
                env->imbalance -= load;
 
 #ifdef CONFIG_PREEMPT
                /*
                 * NEWIDLE balancing is a source of latency, so preemptible
-                * kernels will stop after the first task is pulled to minimize
+                * kernels will stop after the first task is detached to minimize
                 * the critical section.
                 */
                if (env->idle == CPU_NEWLY_IDLE)
@@ -5328,13 +5468,58 @@ next:
        }
 
        /*
-        * Right now, this is one of only two places move_task() is called,
-        * so we can safely collect move_task() stats here rather than
-        * inside move_task().
+        * Right now, this is one of only two places we collect this stat
+        * so we can safely collect detach_one_task() stats here rather
+        * than inside detach_one_task().
         */
-       schedstat_add(env->sd, lb_gained[env->idle], pulled);
+       schedstat_add(env->sd, lb_gained[env->idle], detached);
+
+       return detached;
+}
+
+/*
+ * attach_task() -- attach the task detached by detach_task() to its new rq.
+ */
+static void attach_task(struct rq *rq, struct task_struct *p)
+{
+       lockdep_assert_held(&rq->lock);
 
-       return pulled;
+       BUG_ON(task_rq(p) != rq);
+       p->on_rq = TASK_ON_RQ_QUEUED;
+       activate_task(rq, p, 0);
+       check_preempt_curr(rq, p, 0);
+}
+
+/*
+ * attach_one_task() -- attaches the task returned from detach_one_task() to
+ * its new rq.
+ */
+static void attach_one_task(struct rq *rq, struct task_struct *p)
+{
+       raw_spin_lock(&rq->lock);
+       attach_task(rq, p);
+       raw_spin_unlock(&rq->lock);
+}
+
+/*
+ * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
+ * new rq.
+ */
+static void attach_tasks(struct lb_env *env)
+{
+       struct list_head *tasks = &env->tasks;
+       struct task_struct *p;
+
+       raw_spin_lock(&env->dst_rq->lock);
+
+       while (!list_empty(tasks)) {
+               p = list_first_entry(tasks, struct task_struct, se.group_node);
+               list_del_init(&p->se.group_node);
+
+               attach_task(env->dst_rq, p);
+       }
+
+       raw_spin_unlock(&env->dst_rq->lock);
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -5453,6 +5638,13 @@ static unsigned long task_h_load(struct task_struct *p)
 #endif
 
 /********** Helpers for find_busiest_group ************************/
+
+enum group_type {
+       group_other = 0,
+       group_imbalanced,
+       group_overloaded,
+};
+
 /*
  * sg_lb_stats - stats of a sched_group required for load_balancing
  */
@@ -5461,13 +5653,13 @@ struct sg_lb_stats {
        unsigned long group_load; /* Total load over the CPUs of the group */
        unsigned long sum_weighted_load; /* Weighted load of group's tasks */
        unsigned long load_per_task;
-       unsigned long group_power;
+       unsigned long group_capacity;
        unsigned int sum_nr_running; /* Nr tasks running in the group */
-       unsigned int group_capacity;
+       unsigned int group_capacity_factor;
        unsigned int idle_cpus;
        unsigned int group_weight;
-       int group_imb; /* Is there an imbalance in the group ? */
-       int group_has_capacity; /* Is there extra capacity in the group? */
+       enum group_type group_type;
+       int group_has_free_capacity;
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int nr_numa_running;
        unsigned int nr_preferred_running;
@@ -5482,7 +5674,7 @@ struct sd_lb_stats {
        struct sched_group *busiest;    /* Busiest group in this sd */
        struct sched_group *local;      /* Local group in this sd */
        unsigned long total_load;       /* Total load of all groups in sd */
-       unsigned long total_pwr;        /* Total power of all groups in sd */
+       unsigned long total_capacity;   /* Total capacity of all groups in sd */
        unsigned long avg_load; /* Average load across all groups in sd */
 
        struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
@@ -5501,9 +5693,11 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
                .busiest = NULL,
                .local = NULL,
                .total_load = 0UL,
-               .total_pwr = 0UL,
+               .total_capacity = 0UL,
                .busiest_stat = {
                        .avg_load = 0UL,
+                       .sum_nr_running = 0,
+                       .group_type = group_other,
                },
        };
 }
@@ -5536,35 +5730,34 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
        return load_idx;
 }
 
-static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
+static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
 {
-       return SCHED_POWER_SCALE;
+       return SCHED_CAPACITY_SCALE;
 }
 
-unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
+unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
 {
-       return default_scale_freq_power(sd, cpu);
+       return default_scale_capacity(sd, cpu);
 }
 
-static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
+static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = sd->span_weight;
-       unsigned long smt_gain = sd->smt_gain;
+       if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
+               return sd->smt_gain / sd->span_weight;
 
-       smt_gain /= weight;
-
-       return smt_gain;
+       return SCHED_CAPACITY_SCALE;
 }
 
-unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
+unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       return default_scale_smt_power(sd, cpu);
+       return default_scale_cpu_capacity(sd, cpu);
 }
 
-static unsigned long scale_rt_power(int cpu)
+static unsigned long scale_rt_capacity(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        u64 total, available, age_stamp, avg;
+       s64 delta;
 
        /*
         * Since we're reading these variables without serialization make sure
@@ -5573,74 +5766,75 @@ static unsigned long scale_rt_power(int cpu)
        age_stamp = ACCESS_ONCE(rq->age_stamp);
        avg = ACCESS_ONCE(rq->rt_avg);
 
-       total = sched_avg_period() + (rq_clock(rq) - age_stamp);
+       delta = rq_clock(rq) - age_stamp;
+       if (unlikely(delta < 0))
+               delta = 0;
+
+       total = sched_avg_period() + delta;
 
        if (unlikely(total < avg)) {
-               /* Ensures that power won't end up being negative */
+               /* Ensures that capacity won't end up being negative */
                available = 0;
        } else {
                available = total - avg;
        }
 
-       if (unlikely((s64)total < SCHED_POWER_SCALE))
-               total = SCHED_POWER_SCALE;
+       if (unlikely((s64)total < SCHED_CAPACITY_SCALE))
+               total = SCHED_CAPACITY_SCALE;
 
-       total >>= SCHED_POWER_SHIFT;
+       total >>= SCHED_CAPACITY_SHIFT;
 
        return div_u64(available, total);
 }
 
-static void update_cpu_power(struct sched_domain *sd, int cpu)
+static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long weight = sd->span_weight;
-       unsigned long power = SCHED_POWER_SCALE;
+       unsigned long capacity = SCHED_CAPACITY_SCALE;
        struct sched_group *sdg = sd->groups;
 
-       if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
-               if (sched_feat(ARCH_POWER))
-                       power *= arch_scale_smt_power(sd, cpu);
-               else
-                       power *= default_scale_smt_power(sd, cpu);
+       if (sched_feat(ARCH_CAPACITY))
+               capacity *= arch_scale_cpu_capacity(sd, cpu);
+       else
+               capacity *= default_scale_cpu_capacity(sd, cpu);
 
-               power >>= SCHED_POWER_SHIFT;
-       }
+       capacity >>= SCHED_CAPACITY_SHIFT;
 
-       sdg->sgp->power_orig = power;
+       sdg->sgc->capacity_orig = capacity;
 
-       if (sched_feat(ARCH_POWER))
-               power *= arch_scale_freq_power(sd, cpu);
+       if (sched_feat(ARCH_CAPACITY))
+               capacity *= arch_scale_freq_capacity(sd, cpu);
        else
-               power *= default_scale_freq_power(sd, cpu);
+               capacity *= default_scale_capacity(sd, cpu);
 
-       power >>= SCHED_POWER_SHIFT;
+       capacity >>= SCHED_CAPACITY_SHIFT;
 
-       power *= scale_rt_power(cpu);
-       power >>= SCHED_POWER_SHIFT;
+       capacity *= scale_rt_capacity(cpu);
+       capacity >>= SCHED_CAPACITY_SHIFT;
 
-       if (!power)
-               power = 1;
+       if (!capacity)
+               capacity = 1;
 
-       cpu_rq(cpu)->cpu_power = power;
-       sdg->sgp->power = power;
+       cpu_rq(cpu)->cpu_capacity = capacity;
+       sdg->sgc->capacity = capacity;
 }
 
-void update_group_power(struct sched_domain *sd, int cpu)
+void update_group_capacity(struct sched_domain *sd, int cpu)
 {
        struct sched_domain *child = sd->child;
        struct sched_group *group, *sdg = sd->groups;
-       unsigned long power, power_orig;
+       unsigned long capacity, capacity_orig;
        unsigned long interval;
 
        interval = msecs_to_jiffies(sd->balance_interval);
        interval = clamp(interval, 1UL, max_load_balance_interval);
-       sdg->sgp->next_update = jiffies + interval;
+       sdg->sgc->next_update = jiffies + interval;
 
        if (!child) {
-               update_cpu_power(sd, cpu);
+               update_cpu_capacity(sd, cpu);
                return;
        }
 
-       power_orig = power = 0;
+       capacity_orig = capacity = 0;
 
        if (child->flags & SD_OVERLAP) {
                /*
@@ -5649,31 +5843,31 @@ void update_group_power(struct sched_domain *sd, int cpu)
                 */
 
                for_each_cpu(cpu, sched_group_cpus(sdg)) {
-                       struct sched_group_power *sgp;
+                       struct sched_group_capacity *sgc;
                        struct rq *rq = cpu_rq(cpu);
 
                        /*
-                        * build_sched_domains() -> init_sched_groups_power()
+                        * build_sched_domains() -> init_sched_groups_capacity()
                         * gets here before we've attached the domains to the
                         * runqueues.
                         *
-                        * Use power_of(), which is set irrespective of domains
-                        * in update_cpu_power().
+                        * Use capacity_of(), which is set irrespective of domains
+                        * in update_cpu_capacity().
                         *
-                        * This avoids power/power_orig from being 0 and
+                        * This avoids capacity/capacity_orig from being 0 and
                         * causing divide-by-zero issues on boot.
                         *
-                        * Runtime updates will correct power_orig.
+                        * Runtime updates will correct capacity_orig.
                         */
                        if (unlikely(!rq->sd)) {
-                               power_orig += power_of(cpu);
-                               power += power_of(cpu);
+                               capacity_orig += capacity_of(cpu);
+                               capacity += capacity_of(cpu);
                                continue;
                        }
 
-                       sgp = rq->sd->groups->sgp;
-                       power_orig += sgp->power_orig;
-                       power += sgp->power;
+                       sgc = rq->sd->groups->sgc;
+                       capacity_orig += sgc->capacity_orig;
+                       capacity += sgc->capacity;
                }
        } else  {
                /*
@@ -5683,14 +5877,14 @@ void update_group_power(struct sched_domain *sd, int cpu)
 
                group = child->groups;
                do {
-                       power_orig += group->sgp->power_orig;
-                       power += group->sgp->power;
+                       capacity_orig += group->sgc->capacity_orig;
+                       capacity += group->sgc->capacity;
                        group = group->next;
                } while (group != child->groups);
        }
 
-       sdg->sgp->power_orig = power_orig;
-       sdg->sgp->power = power;
+       sdg->sgc->capacity_orig = capacity_orig;
+       sdg->sgc->capacity = capacity;
 }
 
 /*
@@ -5704,15 +5898,15 @@ static inline int
 fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
 {
        /*
-        * Only siblings can have significantly less than SCHED_POWER_SCALE
+        * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
         */
-       if (!(sd->flags & SD_SHARE_CPUPOWER))
+       if (!(sd->flags & SD_SHARE_CPUCAPACITY))
                return 0;
 
        /*
-        * If ~90% of the cpu_power is still there, we're good.
+        * If ~90% of the cpu_capacity is still there, we're good.
         */
-       if (group->sgp->power * 32 > group->sgp->power_orig * 29)
+       if (group->sgc->capacity * 32 > group->sgc->capacity_orig * 29)
                return 1;
 
        return 0;
@@ -5749,34 +5943,47 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
 
 static inline int sg_imbalanced(struct sched_group *group)
 {
-       return group->sgp->imbalance;
+       return group->sgc->imbalance;
 }
 
 /*
- * Compute the group capacity.
+ * Compute the group capacity factor.
  *
- * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
+ * Avoid the issue where N*frac(smt_capacity) >= 1 creates 'phantom' cores by
  * first dividing out the smt factor and computing the actual number of cores
- * and limit power unit capacity with that.
+ * and limit unit capacity with that.
  */
-static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
+static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *group)
 {
-       unsigned int capacity, smt, cpus;
-       unsigned int power, power_orig;
+       unsigned int capacity_factor, smt, cpus;
+       unsigned int capacity, capacity_orig;
 
-       power = group->sgp->power;
-       power_orig = group->sgp->power_orig;
+       capacity = group->sgc->capacity;
+       capacity_orig = group->sgc->capacity_orig;
        cpus = group->group_weight;
 
-       /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
-       smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
-       capacity = cpus / smt; /* cores */
+       /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */
+       smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig);
+       capacity_factor = cpus / smt; /* cores */
 
-       capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
-       if (!capacity)
-               capacity = fix_small_capacity(env->sd, group);
+       capacity_factor = min_t(unsigned,
+               capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE));
+       if (!capacity_factor)
+               capacity_factor = fix_small_capacity(env->sd, group);
+
+       return capacity_factor;
+}
 
-       return capacity;
+static enum group_type
+group_classify(struct sched_group *group, struct sg_lb_stats *sgs)
+{
+       if (sgs->sum_nr_running > sgs->group_capacity_factor)
+               return group_overloaded;
+
+       if (sg_imbalanced(group))
+               return group_imbalanced;
+
+       return group_other;
 }
 
 /**
@@ -5786,10 +5993,12 @@ static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
  * @local_group: Does group contain this_cpu.
  * @sgs: variable to hold the statistics for this group.
+ * @overload: Indicate more than one runnable task for any CPU.
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
                        struct sched_group *group, int load_idx,
-                       int local_group, struct sg_lb_stats *sgs)
+                       int local_group, struct sg_lb_stats *sgs,
+                       bool *overload)
 {
        unsigned long load;
        int i;
@@ -5806,7 +6015,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                        load = source_load(i, load_idx);
 
                sgs->group_load += load;
-               sgs->sum_nr_running += rq->nr_running;
+               sgs->sum_nr_running += rq->cfs.h_nr_running;
+
+               if (rq->nr_running > 1)
+                       *overload = true;
+
 #ifdef CONFIG_NUMA_BALANCING
                sgs->nr_numa_running += rq->nr_numa_running;
                sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -5816,20 +6029,19 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                        sgs->idle_cpus++;
        }
 
-       /* Adjust by relative CPU power of the group */
-       sgs->group_power = group->sgp->power;
-       sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
+       /* Adjust by relative CPU capacity of the group */
+       sgs->group_capacity = group->sgc->capacity;
+       sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
 
        if (sgs->sum_nr_running)
                sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 
        sgs->group_weight = group->group_weight;
+       sgs->group_capacity_factor = sg_capacity_factor(env, group);
+       sgs->group_type = group_classify(group, sgs);
 
-       sgs->group_imb = sg_imbalanced(group);
-       sgs->group_capacity = sg_capacity(env, group);
-
-       if (sgs->group_capacity > sgs->sum_nr_running)
-               sgs->group_has_capacity = 1;
+       if (sgs->group_capacity_factor > sgs->sum_nr_running)
+               sgs->group_has_free_capacity = 1;
 }
 
 /**
@@ -5850,13 +6062,19 @@ static bool update_sd_pick_busiest(struct lb_env *env,
                                   struct sched_group *sg,
                                   struct sg_lb_stats *sgs)
 {
-       if (sgs->avg_load <= sds->busiest_stat.avg_load)
-               return false;
+       struct sg_lb_stats *busiest = &sds->busiest_stat;
 
-       if (sgs->sum_nr_running > sgs->group_capacity)
+       if (sgs->group_type > busiest->group_type)
                return true;
 
-       if (sgs->group_imb)
+       if (sgs->group_type < busiest->group_type)
+               return false;
+
+       if (sgs->avg_load <= busiest->avg_load)
+               return false;
+
+       /* This is the busiest node in its class. */
+       if (!(env->sd->flags & SD_ASYM_PACKING))
                return true;
 
        /*
@@ -5864,8 +6082,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
         * numbered CPUs in the group, therefore mark all groups
         * higher than ourself as busy.
         */
-       if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
-           env->dst_cpu < group_first_cpu(sg)) {
+       if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
                if (!sds->busiest)
                        return true;
 
@@ -5917,6 +6134,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats tmp_sgs;
        int load_idx, prefer_sibling = 0;
+       bool overload = false;
 
        if (child && child->flags & SD_PREFER_SIBLING)
                prefer_sibling = 1;
@@ -5933,28 +6151,29 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
                        sgs = &sds->local_stat;
 
                        if (env->idle != CPU_NEWLY_IDLE ||
-                           time_after_eq(jiffies, sg->sgp->next_update))
-                               update_group_power(env->sd, env->dst_cpu);
+                           time_after_eq(jiffies, sg->sgc->next_update))
+                               update_group_capacity(env->sd, env->dst_cpu);
                }
 
-               update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
+               update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
+                                               &overload);
 
                if (local_group)
                        goto next_group;
 
                /*
                 * In case the child domain prefers tasks go to siblings
-                * first, lower the sg capacity to one so that we'll try
+                * first, lower the sg capacity factor to one so that we'll try
                 * and move all the excess tasks away. We lower the capacity
                 * of a group only if the local group has the capacity to fit
-                * these excess tasks, i.e. nr_running < group_capacity. The
+                * these excess tasks, i.e. nr_running < group_capacity_factor. The
                 * extra check prevents the case where you always pull from the
                 * heaviest group when it is already under-utilized (possible
                 * with a large weight task outweighs the tasks on the system).
                 */
                if (prefer_sibling && sds->local &&
-                   sds->local_stat.group_has_capacity)
-                       sgs->group_capacity = min(sgs->group_capacity, 1U);
+                   sds->local_stat.group_has_free_capacity)
+                       sgs->group_capacity_factor = min(sgs->group_capacity_factor, 1U);
 
                if (update_sd_pick_busiest(env, sds, sg, sgs)) {
                        sds->busiest = sg;
@@ -5964,13 +6183,20 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
 next_group:
                /* Now, start updating sd_lb_stats */
                sds->total_load += sgs->group_load;
-               sds->total_pwr += sgs->group_power;
+               sds->total_capacity += sgs->group_capacity;
 
                sg = sg->next;
        } while (sg != env->sd->groups);
 
        if (env->sd->flags & SD_NUMA)
                env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+
+       if (!env->sd->parent) {
+               /* update overload indicator if we are at root domain */
+               if (env->dst_rq->rd->overload != overload)
+                       env->dst_rq->rd->overload = overload;
+       }
+
 }
 
 /**
@@ -6011,8 +6237,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
                return 0;
 
        env->imbalance = DIV_ROUND_CLOSEST(
-               sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
-               SCHED_POWER_SCALE);
+               sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
+               SCHED_CAPACITY_SCALE);
 
        return 1;
 }
@@ -6027,7 +6253,7 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
 static inline
 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
 {
-       unsigned long tmp, pwr_now = 0, pwr_move = 0;
+       unsigned long tmp, capa_now = 0, capa_move = 0;
        unsigned int imbn = 2;
        unsigned long scaled_busy_load_per_task;
        struct sg_lb_stats *local, *busiest;
@@ -6041,8 +6267,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
                imbn = 1;
 
        scaled_busy_load_per_task =
-               (busiest->load_per_task * SCHED_POWER_SCALE) /
-               busiest->group_power;
+               (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
+               busiest->group_capacity;
 
        if (busiest->avg_load + scaled_busy_load_per_task >=
            local->avg_load + (scaled_busy_load_per_task * imbn)) {
@@ -6052,38 +6278,38 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
 
        /*
         * OK, we don't have enough imbalance to justify moving tasks,
-        * however we may be able to increase total CPU power used by
+        * however we may be able to increase total CPU capacity used by
         * moving them.
         */
 
-       pwr_now += busiest->group_power *
+       capa_now += busiest->group_capacity *
                        min(busiest->load_per_task, busiest->avg_load);
-       pwr_now += local->group_power *
+       capa_now += local->group_capacity *
                        min(local->load_per_task, local->avg_load);
-       pwr_now /= SCHED_POWER_SCALE;
+       capa_now /= SCHED_CAPACITY_SCALE;
 
        /* Amount of load we'd subtract */
        if (busiest->avg_load > scaled_busy_load_per_task) {
-               pwr_move += busiest->group_power *
+               capa_move += busiest->group_capacity *
                            min(busiest->load_per_task,
                                busiest->avg_load - scaled_busy_load_per_task);
        }
 
        /* Amount of load we'd add */
-       if (busiest->avg_load * busiest->group_power <
-           busiest->load_per_task * SCHED_POWER_SCALE) {
-               tmp = (busiest->avg_load * busiest->group_power) /
-                     local->group_power;
+       if (busiest->avg_load * busiest->group_capacity <
+           busiest->load_per_task * SCHED_CAPACITY_SCALE) {
+               tmp = (busiest->avg_load * busiest->group_capacity) /
+                     local->group_capacity;
        } else {
-               tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
-                     local->group_power;
+               tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
+                     local->group_capacity;
        }
-       pwr_move += local->group_power *
+       capa_move += local->group_capacity *
                    min(local->load_per_task, local->avg_load + tmp);
-       pwr_move /= SCHED_POWER_SCALE;
+       capa_move /= SCHED_CAPACITY_SCALE;
 
        /* Move if we gain throughput */
-       if (pwr_move > pwr_now)
+       if (capa_move > capa_now)
                env->imbalance = busiest->load_per_task;
 }
 
@@ -6101,7 +6327,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
        local = &sds->local_stat;
        busiest = &sds->busiest_stat;
 
-       if (busiest->group_imb) {
+       if (busiest->group_type == group_imbalanced) {
                /*
                 * In the group_imb case we cannot rely on group-wide averages
                 * to ensure cpu-load equilibrium, look at wider averages. XXX
@@ -6113,7 +6339,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
        /*
         * In the presence of smp nice balancing, certain scenarios can have
         * max load less than avg load(as we skip the groups at or below
-        * its cpu_power, while calculating max_load..)
+        * its cpu_capacity, while calculating max_load..)
         */
        if (busiest->avg_load <= sds->avg_load ||
            local->avg_load >= sds->avg_load) {
@@ -6121,17 +6347,16 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
                return fix_small_imbalance(env, sds);
        }
 
-       if (!busiest->group_imb) {
-               /*
-                * Don't want to pull so many tasks that a group would go idle.
-                * Except of course for the group_imb case, since then we might
-                * have to drop below capacity to reach cpu-load equilibrium.
-                */
+       /*
+        * If there aren't any idle cpus, avoid creating some.
+        */
+       if (busiest->group_type == group_overloaded &&
+           local->group_type   == group_overloaded) {
                load_above_capacity =
-                       (busiest->sum_nr_running - busiest->group_capacity);
+                       (busiest->sum_nr_running - busiest->group_capacity_factor);
 
-               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
-               load_above_capacity /= busiest->group_power;
+               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE);
+               load_above_capacity /= busiest->group_capacity;
        }
 
        /*
@@ -6146,9 +6371,9 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
 
        /* How much load to actually move to equalise the imbalance */
        env->imbalance = min(
-               max_pull * busiest->group_power,
-               (sds->avg_load - local->avg_load) * local->group_power
-       ) / SCHED_POWER_SCALE;
+               max_pull * busiest->group_capacity,
+               (sds->avg_load - local->avg_load) * local->group_capacity
+       ) / SCHED_CAPACITY_SCALE;
 
        /*
         * if *imbalance is less than the average load per runnable task
@@ -6202,23 +6427,24 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
        if (!sds.busiest || busiest->sum_nr_running == 0)
                goto out_balanced;
 
-       sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
+       sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
+                                               / sds.total_capacity;
 
        /*
         * If the busiest group is imbalanced the below checks don't
         * work because they assume all things are equal, which typically
         * isn't true due to cpus_allowed constraints and the like.
         */
-       if (busiest->group_imb)
+       if (busiest->group_type == group_imbalanced)
                goto force_balance;
 
        /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
-       if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
-           !busiest->group_has_capacity)
+       if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
+           !busiest->group_has_free_capacity)
                goto force_balance;
 
        /*
-        * If the local group is more busy than the selected busiest group
+        * If the local group is busier than the selected busiest group
         * don't try and pull any tasks.
         */
        if (local->avg_load >= busiest->avg_load)
@@ -6233,13 +6459,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
 
        if (env->idle == CPU_IDLE) {
                /*
-                * This cpu is idle. If the busiest group load doesn't
-                * have more tasks than the number of available cpu's and
-                * there is no imbalance between this and busiest group
-                * wrt to idle cpu's, it is balanced.
+                * This cpu is idle. If the busiest group is not overloaded
+                * and there is no imbalance between this and busiest group
+                * wrt idle cpus, it is balanced. The imbalance becomes
+                * significant if the diff is greater than 1 otherwise we
+                * might end up to just move the imbalance on another group
                 */
-               if ((local->idle_cpus < busiest->idle_cpus) &&
-                   busiest->sum_nr_running <= busiest->group_weight)
+               if ((busiest->group_type != group_overloaded) &&
+                               (local->idle_cpus <= (busiest->idle_cpus + 1)))
                        goto out_balanced;
        } else {
                /*
@@ -6268,11 +6495,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                                     struct sched_group *group)
 {
        struct rq *busiest = NULL, *rq;
-       unsigned long busiest_load = 0, busiest_power = 1;
+       unsigned long busiest_load = 0, busiest_capacity = 1;
        int i;
 
        for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
-               unsigned long power, capacity, wl;
+               unsigned long capacity, capacity_factor, wl;
                enum fbq_type rt;
 
                rq = cpu_rq(i);
@@ -6300,34 +6527,34 @@ static struct rq *find_busiest_queue(struct lb_env *env,
                if (rt > env->fbq_type)
                        continue;
 
-               power = power_of(i);
-               capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
-               if (!capacity)
-                       capacity = fix_small_capacity(env->sd, group);
+               capacity = capacity_of(i);
+               capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE);
+               if (!capacity_factor)
+                       capacity_factor = fix_small_capacity(env->sd, group);
 
                wl = weighted_cpuload(i);
 
                /*
                 * When comparing with imbalance, use weighted_cpuload()
-                * which is not scaled with the cpu power.
+                * which is not scaled with the cpu capacity.
                 */
-               if (capacity && rq->nr_running == 1 && wl > env->imbalance)
+               if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance)
                        continue;
 
                /*
                 * For the load comparisons with the other cpu's, consider
-                * the weighted_cpuload() scaled with the cpu power, so that
-                * the load can be moved away from the cpu that is potentially
-                * running at a lower capacity.
+                * the weighted_cpuload() scaled with the cpu capacity, so
+                * that the load can be moved away from the cpu that is
+                * potentially running at a lower capacity.
                 *
-                * Thus we're looking for max(wl_i / power_i), crosswise
+                * Thus we're looking for max(wl_i / capacity_i), crosswise
                 * multiplication to rid ourselves of the division works out
-                * to: wl_i * power_j > wl_j * power_i;  where j is our
-                * previous maximum.
+                * to: wl_i * capacity_j > wl_j * capacity_i;  where j is
+                * our previous maximum.
                 */
-               if (wl * busiest_power > busiest_load * power) {
+               if (wl * busiest_capacity > busiest_load * capacity) {
                        busiest_load = wl;
-                       busiest_power = power;
+                       busiest_capacity = capacity;
                        busiest = rq;
                }
        }
@@ -6411,7 +6638,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        struct sched_group *group;
        struct rq *busiest;
        unsigned long flags;
-       struct cpumask *cpus = __get_cpu_var(load_balance_mask);
+       struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
 
        struct lb_env env = {
                .sd             = sd,
@@ -6422,6 +6649,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
                .loop_break     = sched_nr_migrate_break,
                .cpus           = cpus,
                .fbq_type       = all,
+               .tasks          = LIST_HEAD_INIT(env.tasks),
        };
 
        /*
@@ -6471,23 +6699,30 @@ redo:
                env.loop_max  = min(sysctl_sched_nr_migrate, busiest->nr_running);
 
 more_balance:
-               local_irq_save(flags);
-               double_rq_lock(env.dst_rq, busiest);
+               raw_spin_lock_irqsave(&busiest->lock, flags);
 
                /*
                 * cur_ld_moved - load moved in current iteration
                 * ld_moved     - cumulative load moved across iterations
                 */
-               cur_ld_moved = move_tasks(&env);
-               ld_moved += cur_ld_moved;
-               double_rq_unlock(env.dst_rq, busiest);
-               local_irq_restore(flags);
+               cur_ld_moved = detach_tasks(&env);
 
                /*
-                * some other cpu did the load balance for us.
+                * We've detached some tasks from busiest_rq. Every
+                * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
+                * unlock busiest->lock, and we are able to be sure
+                * that nobody can manipulate the tasks in parallel.
+                * See task_rq_lock() family for the details.
                 */
-               if (cur_ld_moved && env.dst_cpu != smp_processor_id())
-                       resched_cpu(env.dst_cpu);
+
+               raw_spin_unlock(&busiest->lock);
+
+               if (cur_ld_moved) {
+                       attach_tasks(&env);
+                       ld_moved += cur_ld_moved;
+               }
+
+               local_irq_restore(flags);
 
                if (env.flags & LBF_NEED_BREAK) {
                        env.flags &= ~LBF_NEED_BREAK;
@@ -6535,12 +6770,10 @@ more_balance:
                 * We failed to reach balance because of affinity.
                 */
                if (sd_parent) {
-                       int *group_imbalance = &sd_parent->groups->sgp->imbalance;
+                       int *group_imbalance = &sd_parent->groups->sgc->imbalance;
 
-                       if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
+                       if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
                                *group_imbalance = 1;
-                       } else if (*group_imbalance)
-                               *group_imbalance = 0;
                }
 
                /* All tasks on this runqueue were pinned by CPU affinity */
@@ -6551,7 +6784,7 @@ more_balance:
                                env.loop_break = sched_nr_migrate_break;
                                goto redo;
                        }
-                       goto out_balanced;
+                       goto out_all_pinned;
                }
        }
 
@@ -6616,7 +6849,7 @@ more_balance:
                 * If we've begun active balancing, start to back off. This
                 * case may not be covered by the all_pinned logic if there
                 * is only 1 task on the busy runqueue (because we don't call
-                * move_tasks).
+                * detach_tasks).
                 */
                if (sd->balance_interval < sd->max_interval)
                        sd->balance_interval *= 2;
@@ -6625,6 +6858,23 @@ more_balance:
        goto out;
 
 out_balanced:
+       /*
+        * We reach balance although we may have faced some affinity
+        * constraints. Clear the imbalance flag if it was set.
+        */
+       if (sd_parent) {
+               int *group_imbalance = &sd_parent->groups->sgc->imbalance;
+
+               if (*group_imbalance)
+                       *group_imbalance = 0;
+       }
+
+out_all_pinned:
+       /*
+        * We reach balance because all tasks are pinned at this level so
+        * we can't migrate them. Let the imbalance flag set so parent level
+        * can try to migrate them.
+        */
        schedstat_inc(sd, lb_balanced[idle]);
 
        sd->nr_balance_failed = 0;
@@ -6641,17 +6891,44 @@ out:
        return ld_moved;
 }
 
+static inline unsigned long
+get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
+{
+       unsigned long interval = sd->balance_interval;
+
+       if (cpu_busy)
+               interval *= sd->busy_factor;
+
+       /* scale ms to jiffies */
+       interval = msecs_to_jiffies(interval);
+       interval = clamp(interval, 1UL, max_load_balance_interval);
+
+       return interval;
+}
+
+static inline void
+update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
+{
+       unsigned long interval, next;
+
+       interval = get_sd_balance_interval(sd, cpu_busy);
+       next = sd->last_balance + interval;
+
+       if (time_after(*next_balance, next))
+               *next_balance = next;
+}
+
 /*
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
 static int idle_balance(struct rq *this_rq)
 {
+       unsigned long next_balance = jiffies + HZ;
+       int this_cpu = this_rq->cpu;
        struct sched_domain *sd;
        int pulled_task = 0;
-       unsigned long next_balance = jiffies + HZ;
        u64 curr_cost = 0;
-       int this_cpu = this_rq->cpu;
 
        idle_enter_fair(this_rq);
 
@@ -6661,8 +6938,16 @@ static int idle_balance(struct rq *this_rq)
         */
        this_rq->idle_stamp = rq_clock(this_rq);
 
-       if (this_rq->avg_idle < sysctl_sched_migration_cost)
+       if (this_rq->avg_idle < sysctl_sched_migration_cost ||
+           !this_rq->rd->overload) {
+               rcu_read_lock();
+               sd = rcu_dereference_check_sched_domain(this_rq->sd);
+               if (sd)
+                       update_next_balance(sd, 0, &next_balance);
+               rcu_read_unlock();
+
                goto out;
+       }
 
        /*
         * Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6672,20 +6957,20 @@ static int idle_balance(struct rq *this_rq)
        update_blocked_averages(this_cpu);
        rcu_read_lock();
        for_each_domain(this_cpu, sd) {
-               unsigned long interval;
                int continue_balancing = 1;
                u64 t0, domain_cost;
 
                if (!(sd->flags & SD_LOAD_BALANCE))
                        continue;
 
-               if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
+               if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
+                       update_next_balance(sd, 0, &next_balance);
                        break;
+               }
 
                if (sd->flags & SD_BALANCE_NEWIDLE) {
                        t0 = sched_clock_cpu(this_cpu);
 
-                       /* If we've pulled tasks over stop searching: */
                        pulled_task = load_balance(this_cpu, this_rq,
                                                   sd, CPU_NEWLY_IDLE,
                                                   &continue_balancing);
@@ -6697,10 +6982,13 @@ static int idle_balance(struct rq *this_rq)
                        curr_cost += domain_cost;
                }
 
-               interval = msecs_to_jiffies(sd->balance_interval);
-               if (time_after(next_balance, sd->last_balance + interval))
-                       next_balance = sd->last_balance + interval;
-               if (pulled_task)
+               update_next_balance(sd, 0, &next_balance);
+
+               /*
+                * Stop searching for tasks to pull if there are
+                * now runnable tasks on this rq.
+                */
+               if (pulled_task || this_rq->nr_running > 0)
                        break;
        }
        rcu_read_unlock();
@@ -6718,20 +7006,13 @@ static int idle_balance(struct rq *this_rq)
        if (this_rq->cfs.h_nr_running && !pulled_task)
                pulled_task = 1;
 
-       if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
-               /*
-                * We are going idle. next_balance may be set based on
-                * a busy processor. So reset next_balance.
-                */
+out:
+       /* Move the next balance forward */
+       if (time_after(this_rq->next_balance, next_balance))
                this_rq->next_balance = next_balance;
-       }
 
-out:
        /* Is there a task of a high priority class? */
-       if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
-           ((this_rq->stop && this_rq->stop->on_rq) ||
-            this_rq->dl.dl_nr_running ||
-            (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
+       if (this_rq->nr_running != this_rq->cfs.h_nr_running)
                pulled_task = -1;
 
        if (pulled_task) {
@@ -6755,6 +7036,7 @@ static int active_load_balance_cpu_stop(void *data)
        int target_cpu = busiest_rq->push_cpu;
        struct rq *target_rq = cpu_rq(target_cpu);
        struct sched_domain *sd;
+       struct task_struct *p = NULL;
 
        raw_spin_lock_irq(&busiest_rq->lock);
 
@@ -6774,9 +7056,6 @@ static int active_load_balance_cpu_stop(void *data)
         */
        BUG_ON(busiest_rq == target_rq);
 
-       /* move a task from busiest_rq to target_rq */
-       double_lock_balance(busiest_rq, target_rq);
-
        /* Search for an sd spanning us and the target CPU. */
        rcu_read_lock();
        for_each_domain(target_cpu, sd) {
@@ -6797,16 +7076,22 @@ static int active_load_balance_cpu_stop(void *data)
 
                schedstat_inc(sd, alb_count);
 
-               if (move_one_task(&env))
+               p = detach_one_task(&env);
+               if (p)
                        schedstat_inc(sd, alb_pushed);
                else
                        schedstat_inc(sd, alb_failed);
        }
        rcu_read_unlock();
-       double_unlock_balance(busiest_rq, target_rq);
 out_unlock:
        busiest_rq->active_balance = 0;
-       raw_spin_unlock_irq(&busiest_rq->lock);
+       raw_spin_unlock(&busiest_rq->lock);
+
+       if (p)
+               attach_one_task(target_rq, p);
+
+       local_irq_enable();
+
        return 0;
 }
 
@@ -6892,7 +7177,7 @@ static inline void set_cpu_sd_state_busy(void)
                goto unlock;
        sd->nohz_idle = 0;
 
-       atomic_inc(&sd->groups->sgp->nr_busy_cpus);
+       atomic_inc(&sd->groups->sgc->nr_busy_cpus);
 unlock:
        rcu_read_unlock();
 }
@@ -6909,7 +7194,7 @@ void set_cpu_sd_state_idle(void)
                goto unlock;
        sd->nohz_idle = 1;
 
-       atomic_dec(&sd->groups->sgp->nr_busy_cpus);
+       atomic_dec(&sd->groups->sgc->nr_busy_cpus);
 unlock:
        rcu_read_unlock();
 }
@@ -7012,16 +7297,9 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
                        break;
                }
 
-               interval = sd->balance_interval;
-               if (idle != CPU_IDLE)
-                       interval *= sd->busy_factor;
-
-               /* scale ms to jiffies */
-               interval = msecs_to_jiffies(interval);
-               interval = clamp(interval, 1UL, max_load_balance_interval);
+               interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
 
                need_serialize = sd->flags & SD_SERIALIZE;
-
                if (need_serialize) {
                        if (!spin_trylock(&balancing))
                                goto out;
@@ -7037,6 +7315,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
                                idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
                        }
                        sd->last_balance = jiffies;
+                       interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
                }
                if (need_serialize)
                        spin_unlock(&balancing);
@@ -7094,12 +7373,17 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
 
                rq = cpu_rq(balance_cpu);
 
-               raw_spin_lock_irq(&rq->lock);
-               update_rq_clock(rq);
-               update_idle_cpu_load(rq);
-               raw_spin_unlock_irq(&rq->lock);
-
-               rebalance_domains(rq, CPU_IDLE);
+               /*
+                * If time for next balance is due,
+                * do the balance.
+                */
+               if (time_after_eq(jiffies, rq->next_balance)) {
+                       raw_spin_lock_irq(&rq->lock);
+                       update_rq_clock(rq);
+                       update_idle_cpu_load(rq);
+                       raw_spin_unlock_irq(&rq->lock);
+                       rebalance_domains(rq, CPU_IDLE);
+               }
 
                if (time_after(this_rq->next_balance, rq->next_balance))
                        this_rq->next_balance = rq->next_balance;
@@ -7114,7 +7398,7 @@ end:
  * of an idle cpu is the system.
  *   - This rq has more than one task.
  *   - At any scheduler domain level, this cpu's scheduler group has multiple
- *     busy cpu's exceeding the group's power.
+ *     busy cpu's exceeding the group's capacity.
  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
  *     domain span are idle.
  */
@@ -7122,7 +7406,7 @@ static inline int nohz_kick_needed(struct rq *rq)
 {
        unsigned long now = jiffies;
        struct sched_domain *sd;
-       struct sched_group_power *sgp;
+       struct sched_group_capacity *sgc;
        int nr_busy, cpu = rq->cpu;
 
        if (unlikely(rq->idle_balance))
@@ -7152,8 +7436,8 @@ static inline int nohz_kick_needed(struct rq *rq)
        sd = rcu_dereference(per_cpu(sd_busy, cpu));
 
        if (sd) {
-               sgp = sd->groups->sgp;
-               nr_busy = atomic_read(&sgp->nr_busy_cpus);
+               sgc = sd->groups->sgc;
+               nr_busy = atomic_read(&sgc->nr_busy_cpus);
 
                if (nr_busy > 1)
                        goto need_kick_unlock;
@@ -7217,6 +7501,8 @@ void trigger_load_balance(struct rq *rq)
 static void rq_online_fair(struct rq *rq)
 {
        update_sysctl();
+
+       update_runtime_enabled(rq);
 }
 
 static void rq_offline_fair(struct rq *rq)
@@ -7290,7 +7576,7 @@ static void task_fork_fair(struct task_struct *p)
                 * 'current' within the tree based on its new key value.
                 */
                swap(curr->vruntime, se->vruntime);
-               resched_task(rq->curr);
+               resched_curr(rq);
        }
 
        se->vruntime -= cfs_rq->min_vruntime;
@@ -7305,7 +7591,7 @@ static void task_fork_fair(struct task_struct *p)
 static void
 prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
 {
-       if (!p->se.on_rq)
+       if (!task_on_rq_queued(p))
                return;
 
        /*
@@ -7315,7 +7601,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
         */
        if (rq->curr == p) {
                if (p->prio > oldprio)
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        } else
                check_preempt_curr(rq, p, 0);
 }
@@ -7330,11 +7616,11 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
         * switched back to the fair class the enqueue_entity(.flags=0) will
         * do the right thing.
         *
-        * If it's on_rq, then the dequeue_entity(.flags=0) will already
-        * have normalized the vruntime, if it's !on_rq, then only when
+        * If it's queued, then the dequeue_entity(.flags=0) will already
+        * have normalized the vruntime, if it's !queued, then only when
         * the task is sleeping will it still have non-normalized vruntime.
         */
-       if (!p->on_rq && p->state != TASK_RUNNING) {
+       if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
                /*
                 * Fix up our vruntime so that the current sleep doesn't
                 * cause 'unlimited' sleep bonus.
@@ -7361,15 +7647,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
  */
 static void switched_to_fair(struct rq *rq, struct task_struct *p)
 {
-       struct sched_entity *se = &p->se;
 #ifdef CONFIG_FAIR_GROUP_SCHED
+       struct sched_entity *se = &p->se;
        /*
         * Since the real-depth could have been changed (only FAIR
         * class maintain depth value), reset depth properly.
         */
        se->depth = se->parent ? se->parent->depth + 1 : 0;
 #endif
-       if (!se->on_rq)
+       if (!task_on_rq_queued(p))
                return;
 
        /*
@@ -7378,7 +7664,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
         * if we can still preempt the current task.
         */
        if (rq->curr == p)
-               resched_task(rq->curr);
+               resched_curr(rq);
        else
                check_preempt_curr(rq, p, 0);
 }
@@ -7415,7 +7701,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static void task_move_group_fair(struct task_struct *p, int on_rq)
+static void task_move_group_fair(struct task_struct *p, int queued)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq;
@@ -7434,7 +7720,7 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
         * fair sleeper stuff for the first placement, but who cares.
         */
        /*
-        * When !on_rq, vruntime of the task has usually NOT been normalized.
+        * When !queued, vruntime of the task has usually NOT been normalized.
         * But there are some cases where it has already been normalized:
         *
         * - Moving a forked child which is waiting for being woken up by
@@ -7445,14 +7731,14 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
         * To prevent boost or penalty in the new cfs_rq caused by delta
         * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
         */
-       if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING))
-               on_rq = 1;
+       if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
+               queued = 1;
 
-       if (!on_rq)
+       if (!queued)
                se->vruntime -= cfs_rq_of(se)->min_vruntime;
        set_task_rq(p, task_cpu(p));
        se->depth = se->parent ? se->parent->depth + 1 : 0;
-       if (!on_rq) {
+       if (!queued) {
                cfs_rq = cfs_rq_of(se);
                se->vruntime += cfs_rq->min_vruntime;
 #ifdef CONFIG_SMP
@@ -7675,6 +7961,8 @@ const struct sched_class fair_sched_class = {
 
        .get_rr_interval        = get_rr_interval_fair,
 
+       .update_curr            = update_curr_fair,
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
        .task_move_group        = task_move_group_fair,
 #endif