Thread (PID=292) does not go to sleep correctly
authorJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 16 Jul 2013 16:10:56 +0000 (18:10 +0200)
committerJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 16 Jul 2013 16:10:56 +0000 (18:10 +0200)
kernel/sched.new/core.c
kernel/sched.new/sched.h
kernel/sched/core.c
kernel/softirq.c

index 3f81992..a33acb3 100644 (file)
 #include <asm/mmu_context.h>
 #include <linux/completion.h>
 #include <linux/kernel_stat.h>
+#include <linux/blkdev.h>
 #include <linux/syscalls.h>
 #include <linux/kprobes.h>
+#include <linux/delayacct.h>
 #include <linux/export.h>
 #include <linux/context_tracking.h>
 #include <linux/kthread.h>
@@ -136,7 +138,7 @@ void sched_init(void)
 
 
 
-//             raw_spin_lock_init(&rq->lock);
+               raw_spin_lock_init(&rq->lock);
                rq->nr_running = 0;
 
 //             rq->calc_load_active = 0;
@@ -153,7 +155,7 @@ void sched_init(void)
 //             rq->last_load_update_tick = jiffies;
 
 //             init_rq_hrtick(rq);
-//             atomic_set(&rq->nr_iowait, 0);
+               atomic_set(&rq->nr_iowait, 0);
        }
 
 //     set_load_weight(&init_task);
@@ -161,8 +163,8 @@ void sched_init(void)
        /*
         * The boot idle thread does lazy MMU switching as well:
         */
-//     atomic_inc(&init_mm.mm_count);
-//     enter_lazy_tlb(&init_mm, current);
+       atomic_inc(&init_mm.mm_count);
+       enter_lazy_tlb(&init_mm, current);
 
        /*
         * Make us the idle thread. Technically, schedule() should not be
@@ -206,17 +208,10 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
 
        rq->curr = rq->idle = idle;
 
-       //idle->tasks = rq->rq_list;
-
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
        task_thread_info(idle)->preempt_count = 0;
-
-       /*
-        * The idle tasks have their own, simple scheduling class:
-        */
-//     idle->sched_class = &idle_sched_class;
 }
 
 /*
@@ -246,6 +241,12 @@ void sched_fork(struct task_struct *p)
         */
        p->prio = current->normal_prio;
 
+#ifdef CONFIG_PREEMPT_COUNT
+       /* Want to start with kernel preemption disabled. */
+       task_thread_info(p)->preempt_count = 1;
+#endif
+
+       put_cpu();
 }
 
 /*
@@ -344,7 +345,7 @@ int can_nice(const struct task_struct *p, const int nice)
  */
 struct task_struct *idle_task(int cpu)
 {
-       //printk("\ntask_struct");
+       printk("\ntask_struct");
 
        return 0;
 }
@@ -356,7 +357,7 @@ struct task_struct *idle_task(int cpu)
  */
 int idle_cpu(int cpu)
 {
-       //printk("\nidle_cpu");
+       printk("\nidle_cpu");
 
        return 0;
 }
@@ -379,7 +380,7 @@ void __cpuinit init_idle_bootup_task(struct task_struct *idle)
  */
 void normalize_rt_tasks(void)
 {
-       //printk("\nnormalize_rt_tasks");
+       printk("\nnormalize_rt_tasks");
 
        return;
 }
@@ -394,25 +395,35 @@ void normalize_rt_tasks(void)
  */
 unsigned long nr_running(void)
 {
-       //printk("\nnr_running");
+       printk("\nnr_running");
 
        return 0;
 }
 
 unsigned long long nr_context_switches(void)
 {
-       //printk("\nnr_context_switches");
+       printk("\nnr_context_switches");
 
        return 0;
 }
 
 /*
- * /kernel/sched/core.c:2017
+ * /kernel/sched/core.c:2008
  * number of threads waiting on IO
  */
 unsigned long nr_iowait(void)
 {
-       //printk("\nnr_iowait");
+       printk("\nnr_iowait");
+
+       return 0;
+}
+
+/*
+ * kernel/sched/core.c:2018
+ */
+unsigned long nr_iowait_cpu(int cpu)
+{
+       printk("\nnr_iowait_cpu");
 
        return 0;
 }
@@ -429,7 +440,7 @@ unsigned long nr_iowait(void)
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
-       //printk("\nrt_mutex_setprio");
+       printk("\nrt_mutex_setprio");
 
        return;
 }
@@ -464,7 +475,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
  */
 long sched_getaffinity(pid_t pid, struct cpumask *mask)
 {
-       //printk("\nsched_getaffinity");
+       printk("\nsched_getaffinity");
 
        return 0;
 }
@@ -551,6 +562,15 @@ void sched_show_task(struct task_struct *p)
        return;
 }
 
+/**
+ * kernel/sched/core.c:652
+ */
+void resched_task(struct task_struct *p)
+{
+       assert_raw_spin_locked(&task_rq(p)->lock);
+       set_tsk_need_resched(p);
+}
+
 /**
  * kernel/sched/core.c:1826
  * finish_task_switch - clean up after a task-switch
@@ -615,7 +635,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        }
 }
 
-
 /**
  * kernel/sched/core.c:1905
  * schedule_tail - first thing a freshly forked thread must call.
@@ -667,6 +686,7 @@ void update_rq_clock(struct rq *rq)
  */
 void scheduler_tick(void)
 {
+//     printk("\nscheduler_tick");
 //     int cpu = smp_processor_id();
 //     struct rq *rq = cpu_rq(cpu);
 //     struct task_struct *curr = rq->curr;
@@ -686,8 +706,6 @@ void scheduler_tick(void)
 //     trigger_load_balance(rq, cpu);
 //#endif
 //
-
-
 }
 
 /*
@@ -762,10 +780,16 @@ void update_cpu_load_nohz(void)
  */
 void wake_up_new_task(struct task_struct *tsk)
 {
+       unsigned long flags;
        struct rq *rq = cpu_rq(0);
 
+       raw_spin_lock_irqsave(&tsk->pi_lock, flags);
+
        list_add(&tsk->rq_tasks, &rq->rq_list);
        tsk->on_rq = 1;
+
+       if (rq->curr == rq->idle)
+               resched_task(rq->curr);
 }
 
 /*
@@ -781,6 +805,7 @@ int __sched _cond_resched(void)
        }
        return 0;
 }
+EXPORT_SYMBOL(_cond_resched);
 
 /*
  * kernel/sched/core.c:1931
@@ -836,8 +861,8 @@ context_switch(struct rq *rq, struct task_struct *prev,
         * CPUs since it called schedule(), thus the 'rq' on its stack
         * frame will be invalid.
         */
-//     finish_task_switch(this_rq(), prev);
-       rq->prev_mm = NULL;
+       finish_task_switch(this_rq(), prev);
+
 }
 
 /*
@@ -943,6 +968,8 @@ need_resched:
        else
                next = rq->idle;
 
+       if ((int)next->pid == 292)
+               printk("\nKernel Thread 292");
 
        clear_tsk_need_resched(prev);
 //     rq->skip_clock_update = 0;
@@ -983,15 +1010,6 @@ asmlinkage void __sched schedule(void)
 }
 EXPORT_SYMBOL(schedule);
 
-/**
- * kernel/sched/core.c:652
- */
-void resched_task(struct task_struct *p)
-{
-       assert_raw_spin_locked(&task_rq(p)->lock);
-       set_tsk_need_resched(p);
-}
-
 /**
  * kernel/sched/core.c:1439
  * try_to_wake_up - wake up a thread
@@ -1016,6 +1034,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 
        struct rq *rq = cpu_rq(0);
 
+       smp_wmb();
        raw_spin_lock_irqsave(&p->pi_lock, flags);
        if (!(p->state & state))
                goto out;
@@ -1052,21 +1071,19 @@ out:
  */
 static void try_to_wake_up_local(struct task_struct *p)
 {
-       //struct rq *rq = task_rq(p);
+       struct rq *rq = task_rq(p);
 
-       struct rq *rq = cpu_rq(0);
+       if (WARN_ON_ONCE(rq != this_rq()) ||
+           WARN_ON_ONCE(p == current))
+               return;
 
-//     if (WARN_ON_ONCE(rq != this_rq()) ||
-//         WARN_ON_ONCE(p == current))
-//             return;
+       lockdep_assert_held(&rq->lock);
 
-//     lockdep_assert_held(&rq->lock);
-//
-//     if (!raw_spin_trylock(&p->pi_lock)) {
-//             raw_spin_unlock(&rq->lock);
-//             raw_spin_lock(&p->pi_lock);
-//             raw_spin_lock(&rq->lock);
-//     }
+       if (!raw_spin_trylock(&p->pi_lock)) {
+               raw_spin_unlock(&rq->lock);
+               raw_spin_lock(&p->pi_lock);
+               raw_spin_lock(&rq->lock);
+       }
 
        if (!(p->state & TASK_NORMAL))
                goto out;
@@ -1075,19 +1092,17 @@ static void try_to_wake_up_local(struct task_struct *p)
        p->state = TASK_RUNNING;
        list_add(&p->rq_tasks, &rq->rq_list);
 
+       /* if a worker is waking up, notify workqueue */
+       if (p->flags & PF_WQ_WORKER)
+               wq_worker_waking_up(p, cpu_of(rq));
+
        if (rq->curr == rq->idle)
                resched_task(rq->curr);
 
-//     if (!p->on_rq)
-//             ttwu_activate(rq, p, ENQUEUE_WAKEUP);
-
-//     ttwu_do_wakeup(rq, p, 0);
-//     ttwu_stat(p, smp_processor_id(), 0);
 out:
        raw_spin_unlock(&p->pi_lock);
 }
 
-
 /*
  * kernel/sched/core.c:3125
  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
@@ -1123,7 +1138,8 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  * It may be assumed that this function implies a write memory barrier before
  * changing the task state if and only if any tasks are woken up.
  */
-void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key)
+void __wake_up(wait_queue_head_t *q, unsigned int mode,
+                       int nr_exclusive, void *key)
 {
        unsigned long flags;
 
@@ -1150,6 +1166,7 @@ int wake_up_process(struct task_struct *p)
        WARN_ON(task_is_stopped_or_traced(p));
        return try_to_wake_up(p, TASK_NORMAL, 0);
 }
+EXPORT_SYMBOL(wake_up_process);
 
 static inline long __sched
 do_wait_for_common(struct completion *x,
@@ -1177,7 +1194,6 @@ do_wait_for_common(struct completion *x,
        return timeout ?: 1;
 }
 
-
 static inline long __sched
 __wait_for_common(struct completion *x,
                  long (*action)(long), long timeout, int state)
@@ -1190,14 +1206,12 @@ __wait_for_common(struct completion *x,
        return timeout;
 }
 
-
 static long __sched
 wait_for_common(struct completion *x, long timeout, int state)
 {
        return __wait_for_common(x, schedule_timeout, timeout, state);
 }
 
-
 /**
  * kernel/sched/core.c:3322
  * wait_for_completion: - waits for completion of a task
@@ -1211,34 +1225,9 @@ wait_for_common(struct completion *x, long timeout, int state)
  */
 void __sched wait_for_completion(struct completion *x)
 {
-
-       long timeout = MAX_SCHEDULE_TIMEOUT;
-       int state = TASK_UNINTERRUPTIBLE;
-
-       might_sleep();
-
-       spin_lock_irq(&x->wait.lock);
-       if (!x->done) {
-               DECLARE_WAITQUEUE(wait, current);
-
-               __add_wait_queue_tail_exclusive(&x->wait, &wait);
-               do {
-                       if (signal_pending_state(state, current)) {
-                               timeout = -ERESTARTSYS;
-                               break;
-                       }
-                       __set_current_state(state);
-                       spin_unlock_irq(&x->wait.lock);
-                       timeout = schedule_timeout(timeout);
-                       spin_lock_irq(&x->wait.lock);
-               } while (!x->done && timeout);
-               __remove_wait_queue(&x->wait, &wait);
-               if (!x->done)
-                       return;
-       }
-       x->done--;
-       spin_unlock_irq(&x->wait.lock);
+       wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
 }
+EXPORT_SYMBOL(wait_for_completion);
 
 /**
  * kernel/sched/core.c:3231
@@ -1372,8 +1361,10 @@ EXPORT_SYMBOL_GPL(__wake_up_sync_key);
  */
 int wake_up_state(struct task_struct *p, unsigned int state)
 {
+       WARN_ON(task_is_stopped_or_traced(p));
        return try_to_wake_up(p, state, 0);
 }
+EXPORT_SYMBOL(wake_up_process);
 
 /**
  * kernel/sched/core.c:4389
@@ -1449,16 +1440,6 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
        return;
 }
 
-/*
- * kernel/sched/core.c:2018
- */
-unsigned long nr_iowait_cpu(int cpu)
-{
-       //printk("\nnr_iowait_cpu");
-
-       return 0;
-}
-
 /*
  * kernel/sched/core.c:4474
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
@@ -1467,18 +1448,37 @@ unsigned long nr_iowait_cpu(int cpu)
 void __sched io_schedule(void)
 {
 //     printk("\nio_schedule");
+
+       struct rq *rq = raw_rq();
+
+       delayacct_blkio_start();
+       atomic_inc(&rq->nr_iowait);
+       blk_flush_plug(current);
+       current->in_iowait = 1;
        schedule();
-       return;
+       current->in_iowait = 0;
+       atomic_dec(&rq->nr_iowait);
+       delayacct_blkio_end();
 }
+EXPORT_SYMBOL(io_schedule);
 
 /*
  * kernel/sched/core.c:4489
  */
 long __sched io_schedule_timeout(long timeout)
 {
-       long ret;
 //     printk("\nio_schedule_timeout");
+       struct rq *rq = raw_rq();
+       long ret;
+
+       delayacct_blkio_start();
+       atomic_inc(&rq->nr_iowait);
+       blk_flush_plug(current);
+       current->in_iowait = 1;
        ret = schedule_timeout(timeout);
+       current->in_iowait = 0;
+       atomic_dec(&rq->nr_iowait);
+       delayacct_blkio_end();
        return ret;
 }
 
@@ -1501,7 +1501,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
  */
 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 {
-       //printk("\n__wake_up_sync");
+       printk("\n__wake_up_sync");
 
        return;
 }
@@ -1512,7 +1512,7 @@ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
  */
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
 {
-       //printk("\n__wake_up_locked");
+       printk("\n__wake_up_locked");
 
        return;
 }
@@ -1590,7 +1590,7 @@ wait_for_completion_timeout(struct completion *x, unsigned long timeout)
 {
        return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
 }
-
+EXPORT_SYMBOL(wait_for_completion_timeout);
 
 
 
index c8a19b7..d8222de 100644 (file)
@@ -24,17 +24,30 @@ struct rq {
 
        u64 clock;
        u64 clock_task;
+
+       atomic_t nr_iowait;
+
        int skip_clock_update;
 
 };
 
-DECLARE_PER_CPU(struct rq, runqueues);
-
+static inline int cpu_of(struct rq *rq)
+{
+#ifdef CONFIG_SMP
+       return rq->cpu;
+#else
+       return 0;
+#endif
+}
 
+DECLARE_PER_CPU(struct rq, runqueues);
 
 #define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
 #define this_rq()              (&__get_cpu_var(runqueues))
 #define task_rq(p)             cpu_rq(task_cpu(p))
+#define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
+#define raw_rq()               (&__raw_get_cpu_var(runqueues))
+
 
 void account_idle_ticks(unsigned long ticks);
 
index 67d0465..933fcde 100644 (file)
@@ -1542,8 +1542,10 @@ EXPORT_SYMBOL(wake_up_process);
 
 int wake_up_state(struct task_struct *p, unsigned int state)
 {
+       WARN_ON(task_is_stopped_or_traced(p));
        return try_to_wake_up(p, state, 0);
 }
+EXPORT_SYMBOL(wake_up_process);
 
 /*
  * Perform scheduler related setup for a newly forked process p.
index 1c2e7d5..f7bd017 100644 (file)
@@ -73,9 +73,9 @@ static void wakeup_softirqd(void)
        /* Interrupts are disabled: no need to stop preemption */
        struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
-       printk("\nTask struct address: %p", (void*)tsk);
-       if (tsk)
-               printk("\nTask struct state: %ld", (long int)tsk->state);
+//     printk("\nTask struct address: %p", (void*)tsk);
+//     if (tsk)
+//             printk("\nTask struct state: %ld", (long int)tsk->state);
 
        if (tsk && tsk->state != TASK_RUNNING)
                wake_up_process(tsk);