Fixed multiple race conditions for our runqueues. Still have problems with
authorJens Krieg <jkrieg@mailbox.tu-berlin.de>
Mon, 29 Jul 2013 08:44:10 +0000 (10:44 +0200)
committerJens Krieg <jkrieg@mailbox.tu-berlin.de>
Mon, 29 Jul 2013 08:44:10 +0000 (10:44 +0200)
task list.

kernel/kthread.c
kernel/sched.new/core.c
kernel/softirq.c
kernel/workqueue.c

index 40304fd..8021955 100644 (file)
@@ -140,7 +140,6 @@ void kthread_parkme(void)
        __kthread_parkme(to_kthread(current));
 }
 
-volatile unsigned int thread_counter = 0;
 static int kthread(void *_create)
 {
        /* Copy data: it's on kthread's stack */
@@ -150,8 +149,6 @@ static int kthread(void *_create)
        struct kthread self;
        int ret;
 
-       thread_counter++;
-
        self.flags = 0;
        self.data = data;
        init_completion(&self.exited);
@@ -171,9 +168,6 @@ static int kthread(void *_create)
                ret = threadfn(data);
        }
 
-       printk("\nKilled Thread(%p) PID: %i", (void*)current, (int)current->pid);
-       thread_counter--;
-       printk("\nThread counter: %i", thread_counter);
        /* we can't just return, we must preserve "self" on stack */
        do_exit(ret);
 }
index 93307b5..aabccae 100644 (file)
 #include "../workqueue_internal.h"
 
 
-
-volatile int point_of_return = 0;
-volatile struct task_struct *test_tsk = 0;
-volatile int test_tsk_state = 0;
-volatile int test_tsk_pid = 292;
-
-
 //
 // Variables
 //
@@ -187,10 +180,6 @@ void sched_init(void)
 
 //     init_sched_fair_class();
 
-
-       if (CONFIG_HAVE_UNSTABLE_SCHED_CLOCK)
-               printk("----> CONFIG_HAVE_UNSTABLE_SCHED_CLOCK is set!\n");
-
        scheduler_running = 1;
 }
 
@@ -214,9 +203,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
-//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//             printk("task(%i): init_idle -> %i\n", test_tsk_pid, TASK_RUNNING);
-//     }
        rq->curr = rq->idle = idle;
 
        raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -247,9 +233,6 @@ void sched_fork(struct task_struct *p)
         */
        p->state = TASK_RUNNING;
 
-//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//             printk("task(%i): sched_fork -> %i\n", test_tsk_pid, TASK_RUNNING);
-//     }
        /*
         * Make sure we do not leak PI boosting priority to the child.
         */
@@ -625,6 +608,10 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 //     finish_arch_switch(prev);
 //     perf_event_task_sched_in(prev, current);
 //     finish_lock_switch(rq, prev);
+#ifdef CONFIG_DEBUG_SPINLOCK
+       /* this is a valid case when another task releases the spinlock */
+       rq->lock.owner = current;
+#endif
        /*
         * If we are tracking spinlock dependencies then we have to
         * fix up the runqueue lock - which gets 'carried over' from
@@ -871,12 +858,16 @@ void wake_up_new_task(struct task_struct *tsk)
        struct rq *rq = cpu_rq(0);
 
        raw_spin_lock_irqsave(&tsk->pi_lock, flags);
+       raw_spin_lock(&rq->lock);
 
        list_add(&tsk->rq_tasks, &rq->rq_list);
        tsk->on_rq = 1;
 
        if (rq->curr == rq->idle)
                resched_task(rq->curr);
+
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock_irqrestore(&tsk->pi_lock, flags);
 }
 
 /*
@@ -990,6 +981,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
  *          - return from syscall or exception to user-space
  *          - return from interrupt-handler to user-space
  */
+volatile struct task_struct test_tsk;
 static void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
@@ -1011,26 +1003,14 @@ need_resched:
 
        raw_spin_lock_irq(&rq->lock);
 
-       if (point_of_return) {
-               point_of_return = 0;
-               test_tsk = prev;
-               test_tsk_state = prev->state;
-//             if (test_tsk->state != test_tsk_state) {
-//                     printk("state: %i on_rq: %i\n", (int)prev->state, (int)prev->on_rq);
-//                     printk("state changed from %i to %i\n", (int)test_tsk_state, (int)test_tsk->state);
-//             }
-       }
 
 //     switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
                        prev->state = TASK_RUNNING;
-//                     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//                             printk("task(%i): __schedule -> %i\n", test_tsk_pid, TASK_RUNNING);
-//                     }
                } else {
 //                     deactivate_task(rq, prev, DEQUEUE_SLEEP);
-                       list_del_init(&prev->rq_tasks);
+//                     list_del_init(&prev->rq_tasks);
                        prev->on_rq = 0;
 
                        /*
@@ -1057,27 +1037,28 @@ need_resched:
 //     put_prev_task(rq, prev);
 //     next = pick_next_task(rq);
 
+       if (prev != rq->idle && prev->on_rq)
+               list_add_tail(&prev->rq_tasks, &rq->rq_list);
+
        /*      In case the only runnable task gets deactivated, we need to schedule
         *      the idle tasks.
         */
        if(!list_empty(&rq->rq_list)) {
+               assert_raw_spin_locked(&rq->lock);
                next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
                list_del(&next->rq_tasks);
-               list_add_tail(&next->rq_tasks, &rq->rq_list);
+//             list_add_tail(&next->rq_tasks, &rq->rq_list);
                next->se.exec_start = rq->clock_task;
        }
        else
                next = rq->idle;
 
-       if ((int)next->pid == test_tsk_pid) {
-               printk("task(%i), address: %p\n", test_tsk_pid, (void*)prev);
-               printk("state: %i on_rq: %i\n", (int)prev->state, (int)prev->on_rq);
-               point_of_return = 1;
-       }
-
        clear_tsk_need_resched(prev);
 //     rq->skip_clock_update = 0;
 
+//     if ((long)next == 0xffff880007059e40)
+//             printk("task(%i) scheduled\n", (int)next->pid);
+
        if (likely(prev != next)) {
                rq->nr_switches++;
                rq->curr = next;
@@ -1135,6 +1116,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 {
        unsigned long flags;
        int cpu, success = 0;
+       int ret = 0;
 
        struct rq *rq = cpu_rq(0);
 
@@ -1143,14 +1125,24 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        if (!(p->state & state))
                goto out;
 
+       raw_spin_lock(&rq->lock);
+       assert_raw_spin_locked(&rq->lock);
+
+       if (p->on_rq) {
+               if (rq->curr == rq->idle)
+                       resched_task(rq->curr);
+               ret = 1;
+       }
+
        success = 1; /* we're going to change ->state */
        cpu = task_cpu(p);
 
+       if (ret)
+               goto stat;
+
        p->on_rq = 1;
        p->state = TASK_RUNNING;
-//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//             printk("task(%i): try_to_wake_up -> %i\n", test_tsk_pid, TASK_RUNNING);
-//     }
+
        list_add(&p->rq_tasks, &rq->rq_list);
 
        if (rq->curr == rq->idle)
@@ -1159,7 +1151,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        /* if a worker is waking up, notify workqueue */
        if (p->flags & PF_WQ_WORKER)
                wq_worker_waking_up(p, 0);
-
+stat:
+       raw_spin_unlock(&rq->lock);
 out:
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
@@ -1186,6 +1179,8 @@ static void try_to_wake_up_local(struct task_struct *p)
 
        lockdep_assert_held(&rq->lock);
 
+       assert_raw_spin_locked(&rq->lock);
+
        if (!raw_spin_trylock(&p->pi_lock)) {
                raw_spin_unlock(&rq->lock);
                raw_spin_lock(&p->pi_lock);
@@ -1197,9 +1192,7 @@ static void try_to_wake_up_local(struct task_struct *p)
 
        p->on_rq = 1;
        p->state = TASK_RUNNING;
-//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//             printk("task(%i): try_to_wake_up_local -> %i\n", test_tsk_pid, TASK_RUNNING);
-//     }
+
        list_add(&p->rq_tasks, &rq->rq_list);
 
        /* if a worker is waking up, notify workqueue */
@@ -1292,9 +1285,6 @@ do_wait_for_common(struct completion *x,
                                break;
                        }
                        __set_current_state(state);
-//                     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//                             printk("task(%i): do_wait_for_common -> %i (%ld)\n", test_tsk_pid, state, (long)timeout);
-//                     }
                        spin_unlock_irq(&x->wait.lock);
                        timeout = action(timeout);
                        spin_lock_irq(&x->wait.lock);
index f7bd017..14d7758 100644 (file)
@@ -73,10 +73,6 @@ static void wakeup_softirqd(void)
        /* Interrupts are disabled: no need to stop preemption */
        struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
-//     printk("\nTask struct address: %p", (void*)tsk);
-//     if (tsk)
-//             printk("\nTask struct state: %ld", (long int)tsk->state);
-
        if (tsk && tsk->state != TASK_RUNNING)
                wake_up_process(tsk);
 }
index 929ffcd..b48cd59 100644 (file)
@@ -44,8 +44,7 @@
 #include <linux/hashtable.h>
 
 #include "workqueue_internal.h"
-extern volatile struct task_struct *test_tsk;
-extern volatile int test_tsk_pid;
+
 enum {
        /*
         * worker_pool flags
@@ -1934,9 +1933,6 @@ restart:
                        break;
 
                __set_current_state(TASK_INTERRUPTIBLE);
-//             if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//                     printk("task(%i): maybe_create_worker -> %i\n", test_tsk_pid, TASK_INTERRUPTIBLE);
-//             }
                schedule_timeout(CREATE_COOLDOWN);
 
                if (!need_to_create_worker(pool))
@@ -2301,9 +2297,6 @@ sleep:
         */
        worker_enter_idle(worker);
        __set_current_state(TASK_INTERRUPTIBLE);
-//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//             printk("task(%i): worker_thread -> %i\n", test_tsk_pid, TASK_INTERRUPTIBLE);
-//     }
        spin_unlock_irq(&pool->lock);
        schedule();
        goto woke_up;
@@ -2345,15 +2338,9 @@ static int rescuer_thread(void *__rescuer)
        rescuer->task->flags |= PF_WQ_WORKER;
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
-//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//             printk("task(%i): rescuer_thread -> %i\n", test_tsk_pid, TASK_INTERRUPTIBLE);
-//     }
 
        if (kthread_should_stop()) {
                __set_current_state(TASK_RUNNING);
-//             if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//                     printk("task(%i): rescuer_thread -> %i\n", test_tsk_pid, TASK_RUNNING);
-//             }
                rescuer->task->flags &= ~PF_WQ_WORKER;
                return 0;
        }
@@ -2369,9 +2356,6 @@ repeat:
                struct work_struct *work, *n;
 
                __set_current_state(TASK_RUNNING);
-//             if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-//                     printk("task(%i): rescuer_thread -> %i\n", test_tsk_pid, TASK_RUNNING);
-//             }
                mayday_clear_cpu(cpu, wq->mayday_mask);
 
                /* migrate to the target cpu if possible */