Added runqueue list, seems to be working. Threads get scheduled but
authorJens Krieg <jkrieg@mailbox.tu-berlin.de>
Wed, 5 Jun 2013 15:25:20 +0000 (17:25 +0200)
committerJens Krieg <jkrieg@mailbox.tu-berlin.de>
Wed, 5 Jun 2013 15:25:20 +0000 (17:25 +0200)
wait_for_completion and wake_up_porcess needs to be implemented

include/linux/sched.h
init/main.c
kernel/kthread.c
kernel/sched.new/core.c
kernel/sched.new/sched.h

index e692a02..fadb104 100644 (file)
@@ -1258,6 +1258,7 @@ struct task_struct {
        struct sched_info sched_info;
 #endif
 
+       struct list_head rq_tasks;
        struct list_head tasks;
 #ifdef CONFIG_SMP
        struct plist_node pushable_tasks;
index 63534a1..b749ff4 100644 (file)
@@ -626,6 +626,7 @@ asmlinkage void __init start_kernel(void)
 #endif
        cgroup_init();
        cpuset_init();
+
        taskstats_init_early();
        delayacct_init();
 
@@ -863,6 +864,7 @@ static noinline void __init kernel_init_freeable(void)
        /*
         * Wait until kthreadd is all set-up.
         */
+
        wait_for_completion(&kthreadd_done);
 
        /* Now the scheduler is fully set up and can do blocking allocations */
@@ -872,6 +874,7 @@ static noinline void __init kernel_init_freeable(void)
         * init can allocate pages on any node
         */
        set_mems_allowed(node_states[N_MEMORY]);
+
        /*
         * init can run on any cpu.
         */
index 9eb7fed..c3f5261 100644 (file)
@@ -140,7 +140,7 @@ void kthread_parkme(void)
        __kthread_parkme(to_kthread(current));
 }
 
-static int kthread(void *_create)
+static int (void *_create)
 {
        /* Copy data: it's on kthread's stack */
        struct kthread_create_info *create = _create;
index d2a2e4d..bd8f63d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/syscalls.h>
 #include <linux/export.h>
 #include <linux/context_tracking.h>
+#include <linux/kthread.h>
 
 #include <asm/switch_to.h>
 #include <linux/cgroup.h>
@@ -113,6 +114,8 @@ void sched_init(void)
 
                INIT_LIST_HEAD(&rq->rq_list);
 
+
+
 //             raw_spin_lock_init(&rq->lock);
                rq->nr_running = 0;
 
@@ -183,10 +186,12 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
 
        rq->curr = rq->idle = idle;
 
+       //idle->tasks = rq->rq_list;
+
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
-//     task_thread_info(idle)->preempt_count = 0;
+       task_thread_info(idle)->preempt_count = 0;
 
        /*
         * The idle tasks have their own, simple scheduling class:
@@ -200,6 +205,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
  */
 void sched_fork(struct task_struct *p)
 {
+
        p->on_rq                        = 0;
 
        p->se.on_rq                     = 0;
@@ -207,7 +213,6 @@ void sched_fork(struct task_struct *p)
        p->se.sum_exec_runtime          = 0;
        p->se.prev_sum_exec_runtime     = 0;
        p->se.vruntime                  = 0;
-       INIT_LIST_HEAD(&p->se.group_node);
 
        /*
         * We mark the process as running here. This guarantees that
@@ -220,6 +225,7 @@ void sched_fork(struct task_struct *p)
         * Make sure we do not leak PI boosting priority to the child.
         */
        p->prio = current->normal_prio;
+
 }
 
 /*
@@ -492,6 +498,9 @@ void sched_show_task(struct task_struct *p)
  */
 void schedule_tail(struct task_struct *prev)
 {
+       arch_local_irq_enable();
+       preempt_enable();
+
        return;
 }
 
@@ -569,7 +578,7 @@ void wake_up_new_task(struct task_struct *tsk)
 {
        struct rq *rq = cpu_rq(0);
 
-       list_add_tail(tsk->se->group_node, rq->rq_list);
+       list_add(&tsk->rq_tasks, &rq->rq_list);
        tsk->on_rq = 1;
 }
 
@@ -677,20 +686,24 @@ context_switch(struct rq *rq, struct task_struct *prev,
  *          - return from syscall or exception to user-space
  *          - return from interrupt-handler to user-space
  */
+volatile int kernel_blub = 0;
 static void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
-       unsigned long *switch_count;
+//     unsigned long *switch_count = 0;
        struct rq *rq;
        int cpu;
 
+       if (irqs_disabled())
+                       kernel_blub++;
 //need_resched:
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        rcu_note_context_switch(cpu);
        prev = rq->curr;
-
+       if (irqs_disabled())
+                               kernel_blub++;
 //     schedule_debug(prev);
 
 //     if (sched_feat(HRTICK))
@@ -729,9 +742,19 @@ static void __sched __schedule(void)
 
 //     put_prev_task(rq, prev);
 //     next = pick_next_task(rq);
-       next = rq->rq_list->
+
+       next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
+       list_del(&next->rq_tasks);
+//     if(prev->pid != 0) {
+               list_add_tail(&prev->rq_tasks, &rq->rq_list);
+//     }
+
+       if (irqs_disabled())
+                               kernel_blub++;
+
+
 //     clear_tsk_need_resched(prev);
-       rq->skip_clock_update = 0;
+//     rq->skip_clock_update = 0;
 
        if (likely(prev != next)) {
                rq->nr_switches++;
@@ -739,6 +762,8 @@ static void __sched __schedule(void)
 //             ++*switch_count;
 
                context_switch(rq, prev, next); /* unlocks the rq */
+               arch_local_irq_enable();
+               //irqs_enable();
                /*
                 * The context switch have flipped the stack from under us
                 * and restored the local variables which were saved when
@@ -752,7 +777,8 @@ static void __sched __schedule(void)
                raw_spin_unlock_irq(&rq->lock);
 
 //     post_schedule(rq);
-
+       if (irqs_disabled())
+                               kernel_blub++;
        sched_preempt_enable_no_resched();
 //     if (need_resched())
 //             goto need_resched;
index abd908b..c6bdf5b 100644 (file)
@@ -8,28 +8,15 @@
 struct task_group {
 };
 
+
 struct rq {
        /* runqueue lock: */
        raw_spinlock_t lock;
 
-       struct list_head rq_list;
-
-       /*
-        * nr_running and cpu_load should be in the same cacheline because
-        * remote CPUs use both these fields when doing load calculation.
-        */
-       unsigned int nr_running;
-
-
        u64 nr_switches;
+       unsigned int nr_running;
 
-       /*
-        * This is part of a global counter where only the total sum
-        * over all CPUs matters. A task can increase this counter on
-        * one CPU and if it got migrated afterwards it may decrease
-        * it on another CPU. Always updated under the runqueue lock:
-        */
-       unsigned long nr_uninterruptible;
+       struct list_head rq_list;
 
        struct task_struct *curr, *idle, *stop;