Merge tag 'v4.1' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / cfs / debug.c
index e076bdd..a245c1f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 #include <linux/kallsyms.h>
 #include <linux/utsname.h>
+#include <linux/mempolicy.h>
 
 #include "sched.h"
 
@@ -70,7 +71,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
        if (!se) {
                struct sched_avg *avg = &cpu_rq(cpu)->avg;
                P(avg->runnable_avg_sum);
-               P(avg->runnable_avg_period);
+               P(avg->avg_period);
                return;
        }
 
@@ -93,8 +94,10 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
        P(se->load.weight);
 #ifdef CONFIG_SMP
        P(se->avg.runnable_avg_sum);
-       P(se->avg.runnable_avg_period);
+       P(se->avg.running_avg_sum);
+       P(se->avg.avg_period);
        P(se->avg.load_avg_contrib);
+       P(se->avg.utilization_avg_contrib);
        P(se->avg.decay_count);
 #endif
 #undef PN
@@ -110,8 +113,7 @@ static char *task_group_path(struct task_group *tg)
        if (autogroup_path(tg, group_path, PATH_MAX))
                return group_path;
 
-       cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
-       return group_path;
+       return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
 }
 #endif
 
@@ -124,7 +126,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                SEQ_printf(m, " ");
 
        SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
-               p->comm, p->pid,
+               p->comm, task_pid_nr(p),
                SPLIT_NS(p->se.vruntime),
                (long long)(p->nvcsw + p->nivcsw),
                p->prio);
@@ -137,6 +139,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
        SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
                0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
 #endif
+#ifdef CONFIG_NUMA_BALANCING
+       SEQ_printf(m, " %d", task_node(p));
+#endif
 #ifdef CONFIG_CGROUP_SCHED
        SEQ_printf(m, " %s", task_group_path(task_group(p)));
 #endif
@@ -147,7 +152,6 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
 {
        struct task_struct *g, *p;
-       unsigned long flags;
 
        SEQ_printf(m,
        "\nrunnable tasks:\n"
@@ -156,16 +160,14 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
        "------------------------------------------------------"
        "----------------------------------------------------\n");
 
-       read_lock_irqsave(&tasklist_lock, flags);
-
-       do_each_thread(g, p) {
-               if (!p->on_rq || task_cpu(p) != rq_cpu)
+       rcu_read_lock();
+       for_each_process_thread(g, p) {
+               if (task_cpu(p) != rq_cpu)
                        continue;
 
                print_task(m, rq, p);
-       } while_each_thread(g, p);
-
-       read_unlock_irqrestore(&tasklist_lock, flags);
+       }
+       rcu_read_unlock();
 }
 
 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
@@ -214,6 +216,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
                        cfs_rq->runnable_load_avg);
        SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
                        cfs_rq->blocked_load_avg);
+       SEQ_printf(m, "  .%-30s: %ld\n", "utilization_load_avg",
+                       cfs_rq->utilization_load_avg);
 #ifdef CONFIG_FAIR_GROUP_SCHED
        SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_contrib",
                        cfs_rq->tg_load_contrib);
@@ -225,6 +229,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
                        atomic_read(&cfs_rq->tg->runnable_avg));
 #endif
 #endif
+#ifdef CONFIG_CFS_BANDWIDTH
+       SEQ_printf(m, "  .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
+                       cfs_rq->tg->cfs_bandwidth.timer_active);
+       SEQ_printf(m, "  .%-30s: %d\n", "throttled",
+                       cfs_rq->throttled);
+       SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
+                       cfs_rq->throttle_count);
+#endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        print_cfs_group_stats(m, cpu, cfs_rq->tg);
@@ -253,6 +265,12 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 #undef P
 }
 
+void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
+{
+       SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
+       SEQ_printf(m, "  .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
+}
+
 extern __read_mostly int sched_clock_running;
 
 static void print_cpu(struct seq_file *m, int cpu)
@@ -289,8 +307,9 @@ do {                                                                        \
        P(nr_load_updates);
        P(nr_uninterruptible);
        PN(next_balance);
-       P(curr->pid);
+       SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
        PN(clock);
+       PN(clock_task);
        P(cpu_load[0]);
        P(cpu_load[1]);
        P(cpu_load[2]);
@@ -309,6 +328,7 @@ do {                                                                        \
        P(sched_goidle);
 #ifdef CONFIG_SMP
        P64(avg_idle);
+       P64(max_idle_balance_cost);
 #endif
 
        P(ttwu_count);
@@ -320,10 +340,9 @@ do {                                                                       \
        spin_lock_irqsave(&sched_debug_lock, flags);
        print_cfs_stats(m, cpu);
        print_rt_stats(m, cpu);
+       print_dl_stats(m, cpu);
 
-       rcu_read_lock();
        print_rq(m, rq, cpu);
-       rcu_read_unlock();
        spin_unlock_irqrestore(&sched_debug_lock, flags);
        SEQ_printf(m, "\n");
 }
@@ -345,7 +364,7 @@ static void sched_debug_header(struct seq_file *m)
        cpu_clk = local_clock();
        local_irq_restore(flags);
 
-       SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
+       SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
                init_utsname()->release,
                (int)strcspn(init_utsname()->version, " "),
                init_utsname()->version);
@@ -359,7 +378,7 @@ static void sched_debug_header(struct seq_file *m)
        PN(cpu_clk);
        P(jiffies);
 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-       P(sched_clock_stable);
+       P(sched_clock_stable());
 #endif
 #undef PN
 #undef P
@@ -488,11 +507,61 @@ static int __init init_sched_debug_procfs(void)
 
 __initcall(init_sched_debug_procfs);
 
+#define __P(F) \
+       SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
+#define P(F) \
+       SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
+#define __PN(F) \
+       SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
+#define PN(F) \
+       SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
+
+
+static void sched_show_numa(struct task_struct *p, struct seq_file *m)
+{
+#ifdef CONFIG_NUMA_BALANCING
+       struct mempolicy *pol;
+       int node, i;
+
+       if (p->mm)
+               P(mm->numa_scan_seq);
+
+       task_lock(p);
+       pol = p->mempolicy;
+       if (pol && !(pol->flags & MPOL_F_MORON))
+               pol = NULL;
+       mpol_get(pol);
+       task_unlock(p);
+
+       SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
+
+       for_each_online_node(node) {
+               for (i = 0; i < 2; i++) {
+                       unsigned long nr_faults = -1;
+                       int cpu_current, home_node;
+
+                       if (p->numa_faults)
+                               nr_faults = p->numa_faults[2*node + i];
+
+                       cpu_current = !i ? (task_node(p) == node) :
+                               (pol && node_isset(node, pol->v.nodes));
+
+                       home_node = (p->numa_preferred_nid == node);
+
+                       SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
+                               i, node, cpu_current, home_node, nr_faults);
+               }
+       }
+
+       mpol_put(pol);
+#endif
+}
+
 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 {
        unsigned long nr_switches;
 
-       SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
+       SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
                                                get_nr_threads(p));
        SEQ_printf(m,
                "---------------------------------------------------------"
@@ -546,7 +615,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
                avg_atom = p->se.sum_exec_runtime;
                if (nr_switches)
-                       do_div(avg_atom, nr_switches);
+                       avg_atom = div64_ul(avg_atom, nr_switches);
                else
                        avg_atom = -1LL;
 
@@ -571,8 +640,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        P(se.load.weight);
 #ifdef CONFIG_SMP
        P(se.avg.runnable_avg_sum);
-       P(se.avg.runnable_avg_period);
+       P(se.avg.running_avg_sum);
+       P(se.avg.avg_period);
        P(se.avg.load_avg_contrib);
+       P(se.avg.utilization_avg_contrib);
        P(se.avg.decay_count);
 #endif
        P(policy);
@@ -591,6 +662,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
                SEQ_printf(m, "%-45s:%21Ld\n",
                           "clock-delta", (long long)(t1-t0));
        }
+
+       sched_show_numa(p, m);
 }
 
 void proc_sched_set_task(struct task_struct *p)