single core framework is running on smp kernel
authorRené Sechting <sechting@mailbox.tu-berlin.de>
Mon, 28 Oct 2013 09:57:41 +0000 (10:57 +0100)
committerRené Sechting <sechting@mailbox.tu-berlin.de>
Mon, 28 Oct 2013 09:57:41 +0000 (10:57 +0100)
yepp, with the current smp kernel is the framework again running. but only with
deactivated smp functionality. the only difference between the former buggy
version is that the framework invokation is now always with a locked rq performed.
but only with the linux rq, not the module specific one. maybe that should be
changed, idk whats better.

debug_qemu
framework/fw_list.c
framework/modules/mod_sched_framework.h
framework/modules/rr_module.c
framework/os/linux/fw_task_linux.c
framework/pub_sub.c
framework/pub_sub.h
kernel/fork.c
kernel/sched.new/core.c
kernel/sched/core.c
run_qemu

index 065fb09..a339025 100755 (executable)
@@ -1,3 +1,3 @@
 #!/bin/sh
 
-qemu-system-x86_64 -machine accel=tcg -nographic -kernel build/arch/x86_64/boot/bzImage -s -S -append "console=ttyS0 init=/bin/busybox sh"
+qemu-system-x86_64 -nographic -kernel build/arch/x86_64/boot/bzImage -s -S -append "console=ttyS0 init=/bin/busybox sh"
index 8c9bd14..4d4e339 100644 (file)
@@ -78,6 +78,7 @@ void list_fw_del(struct list_fw_head *entry)
                entry->next = LIST_POISON1;
                entry->prev = LIST_POISON2;
        }
+       entry->next = entry->prev = entry;
 }
 
 struct list_fw_head *__list_fw_pop(struct list_fw_head *head)
index b840fcc..d125a19 100644 (file)
@@ -13,6 +13,7 @@
 #include<mod_sched_defines.h>
 
 //TODO: This should be the enums from pub_sub.h
+#define WAKE_UP_TASK 0
 #define NEW_TASK 1
 #define TASK_FINISHED 2 
 #define TICK 3
index 38b9aa9..587339d 100644 (file)
@@ -25,15 +25,9 @@ static struct fw_task *idle;
 
 static struct fw_task *task_received(struct fw_task *task)
 {
-//     printfw("\ntask-state: %d",fw_get_state(task));
-
        struct rr_info *rq_ele;
-       /*
-       if(task->real_task == get_idle_task()->real_task)
-               return NULL;
-       */
-       printfw("new task %d\n", fw_get_pid(task));
-
+//     if(system_state == SYSTEM_RUNNING)
+//             printfw("new task %d\n", fw_get_pid(task));
        rq_ele = (struct rr_info*)((unsigned long)task+offset);
        list_fw_add_tail(&rq_ele->head, &rr_rq->queue);
        rq_ele->on_rq = 1;
@@ -43,16 +37,17 @@ static struct fw_task *task_received(struct fw_task *task)
 
 static struct fw_task *task_finished(struct fw_task *finished)
 {
-       struct list_fw_head *next_elem;
+       struct rr_info *next_elem;
        struct fw_task *task;
-       printfw("task finished %d\n",fw_get_pid(finished));
+//     printfw("task finished %d\n",fw_get_pid(finished));
 
+//     next_elem = (struct rr_info *)((unsigned long)finished+offset);
+//     next_elem->on_rq = 0;
        if(!list_fw_empty(&rr_rq->queue)){
                //get the next task running!
-               next_elem = __list_fw_pop(&(rr_rq->queue));
+               next_elem = (struct rr_info *)__list_fw_pop(&(rr_rq->queue));
                task = (struct fw_task*)((unsigned long)next_elem-offset);
                rr_rq->curr = task;
-               //rr_rq->curr = NULL;
                return task;
        }else{
                //every task is done, go to idle mode
@@ -65,6 +60,10 @@ static struct fw_task *tick_received(void)
 {
        struct fw_task *task = rr_rq->curr;
        struct rr_info *next_elem;
+//     if(system_state == SYSTEM_RUNNING)
+//             printfw(".");
+       if(list_fw_empty(&rr_rq->queue))
+               return NULL;
 
        if((task == idle)&&!list_fw_empty(&rr_rq->queue)){
                next_elem = (struct rr_info *)__list_fw_pop(&(rr_rq->queue));
@@ -82,10 +81,11 @@ static struct fw_task *tick_received(void)
                next_elem = NULL;
                next_elem = (struct rr_info *)__list_fw_pop(&(rr_rq->queue));
                task = (struct fw_task*)((unsigned long)next_elem-offset);
+               next_elem->on_rq = 0;
                rr_rq->curr = task;
                return task;
        }
-       return NULL;
+       return idle;
 }
 
 static struct fw_task *task_yielded(struct fw_task *yielded)
@@ -93,15 +93,24 @@ static struct fw_task *task_yielded(struct fw_task *yielded)
 
 //     printfw("yield-state: %d",fw_get_state(yielded));
        struct rr_info *next_elem;
+       struct rr_info *pos;
        struct fw_task *task;
-       //printfw("task has yielded\n");
+//     printfw("task has yielded %d\n",fw_get_pid(yielded));
        next_elem = (struct rr_info *) ((unsigned long)yielded + offset);
        next_elem->on_rq = 0;
        next_elem = NULL;
-//     next_elem = (struct rr_info*)((unsigned long)yielded+offset);
-//     next_elem->on_rq = 0;
        if(!list_fw_empty(&rr_rq->queue)){
                next_elem = (struct rr_info *)__list_fw_pop(&(rr_rq->queue));
+/*             if(system_state == SYSTEM_RUNNING){
+                       printfw("tasklist: ");
+                       fw_list_for_each_entry(pos, &rr_rq->queue, head){
+                               task = (struct fw_task*)((unsigned long)pos-offset);
+                               printfw("%d ",fw_get_pid(task));
+                               printfw("hallo");
+                       }
+                       printfw("\n");
+               }
+*/
                next_elem->on_rq = 0;
                task = (struct fw_task*)((unsigned long)next_elem-offset);
                rr_rq->curr = task;
@@ -110,20 +119,17 @@ static struct fw_task *task_yielded(struct fw_task *yielded)
                rr_rq->curr = idle;
                return idle;
        }
-       return NULL;
 }
 
 static struct fw_task *wake_up_task(struct fw_task *wake_up)
 {
-
+//     if(system_state == SYSTEM_RUNNING)
+//             printfw("aufwachen!!! %d\n",fw_get_pid(wake_up));
        struct rr_info *to_wake_up = (struct rr_info*)((unsigned long)wake_up+offset);
        if(!to_wake_up->on_rq){
                list_fw_add_tail(&to_wake_up->head,&(rr_rq->queue));
                to_wake_up->on_rq = 1;
        }
-
-//     printfw("\n wake_up called for task: %d",fw_get_pid(wake_up));
-
        return NULL;
 }
 
@@ -143,7 +149,7 @@ int init_sched_rr_module(void)
        subscribe_to_topic(TASK_FINISHED, &task_finished);
        subscribe_to_topic(TICK, &tick_received);
        subscribe_to_topic(TASK_YIELD, &task_yielded);
-       subscribe_to_topic(0,&wake_up_task);
+       subscribe_to_topic(WAKE_UP_TASK,&wake_up_task);
        printfw("SCHEDULER INITIALIZED!\n");
 
        return 0;
index f72bc54..9dec82f 100644 (file)
@@ -31,22 +31,23 @@ void insert_fw_task(struct fw_task *new, void *tsk)
        new->real_task = tsk;
 }
 
-
-//TODO: lock se shizzle aut off se tasks wenn juh äksess se task wälljuhs
-int fw_get_state(struct fw_task *task)
+int fw_get_pid(struct fw_task *task)
 {
        struct task_struct *ptr = task->real_task;
-       return ptr->state;
+       return (int)ptr->pid;
 //     return 0;
 }
 
-int fw_get_pid(struct fw_task *task)
+//TODO: lock se shizzle aut off se tasks wenn juh äksess se task wälljuhs
+int fw_get_state(struct fw_task *task)
 {
-       struct task_struct *ptr = task->real_task;
-       return (int)ptr->pid;
-//     return 0;
+//     struct task_struct *ptr = task->real_task;
+//     return ptr->state;
+       return 0;
 }
 
+
+
 int fw_get_flags(struct fw_task *task)
 {
 //     struct task_struct *ptr = task->real_task;
index 232f945..06e1c2a 100644 (file)
@@ -189,11 +189,8 @@ void *send_to_topic(int topic_id, ...) {
                                }
                                va_end(list);
                        }
-                       if(ret){
+                       if(ret)
                                to_schedule = find_real_task(ret);
-                       }else{
-                               to_schedule = NULL;
-                       }
        }
        return to_schedule;
 }
index dca9890..215e484 100644 (file)
@@ -31,6 +31,7 @@ typedef enum {ACTIVATED, DEACTIVATED, COMING, SOON} topic_t;
 /**
  * Callbacks for the pubsub system
  */
+
 typedef void *topic_0_callback(struct fw_task *wake_up);
 typedef void *topic_1_callback(struct fw_task *task);
 typedef void *topic_2_callback(struct fw_task *finished);
index fb190c3..337c1bf 100644 (file)
@@ -1614,6 +1614,9 @@ long do_fork(unsigned long clone_flags,
                        get_task_struct(p);
                }
 
+               //TODO: FRAMEWORK STUFF
+                       p->fw_task = NULL;
+                       wake_up_new_task(p);
 
                /* forking complete and child started to run, tell ptracer */
                if (unlikely(trace))
@@ -1624,9 +1627,7 @@ long do_fork(unsigned long clone_flags,
                                ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
                }
 
-               //TODO: FRAMEWORK STUFF
-                       p->fw_task = NULL;
-                       wake_up_new_task(p);
+
 
        } else {
                nr = PTR_ERR(p);
index 8e702a4..bdc824b 100644 (file)
@@ -379,6 +379,7 @@ static void __sched_fork(struct task_struct *p)
  */
 void sched_fork(struct task_struct *p)
 {
+//     printk(">>sched_fork\n");
        unsigned long flags;
        int cpu = get_cpu();
 
@@ -808,6 +809,8 @@ void resched_task(struct task_struct *p)
        smp_mb();
        if (!tsk_is_polling(p))
                smp_send_reschedule(cpu);
+
+//     printk(">>resched_task %d\n",p->pid);
 }
 
 /**
@@ -923,6 +926,7 @@ static inline void post_schedule(struct rq *rq)
 asmlinkage void schedule_tail(struct task_struct *prev)
        __releases(rq->lock)
 {
+//     printk(">>schedule_tail %d\n",prev->pid);
        struct rq *rq = this_rq();
 
        finish_task_switch(rq, prev);
@@ -1126,7 +1130,7 @@ unsigned long this_cpu_load(void)
  */
 void update_cpu_load_nohz(void)
 {
-       //printk("\nupdate_cpu_load_nohz");
+       printk("\nupdate_cpu_load_nohz");
 
        return;
 }
@@ -1139,6 +1143,7 @@ void update_cpu_load_nohz(void)
 static inline
 int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
 {
+//     printk(">>select_task_rq");
        int cpu = task_cpu(p);
 //     int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
 
@@ -1164,15 +1169,15 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
  */
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
 {
-       printk("activate_task %d\n",p->pid);
-//     if (task_contributes_to_load(p))
-//             rq->nr_uninterruptible--;
+//     printk(">>activate_task %d\n",p->pid);
+       if (task_contributes_to_load(p))
+               rq->nr_uninterruptible--;
 
 //     enqueue_task(rq, p, flags);
 //     list_add(&p->rq_tasks, &rq->rq_list);
 
 //TODO: FRAMEWORK STUFF OR not?
-       send_to_topic(1,p);
+//     send_to_topic(0,p);
 }
 
 /*
@@ -1180,12 +1185,12 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
  */
 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
-       printk("deactivate task aufgerufen!\n");
+       printk(">>deactivate_task\n");
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible++;
 
 //     dequeue_task(rq, p, flags);
-       list_del(&p->rq_tasks);
+//     list_del(&p->rq_tasks);
 }
 
 /*
@@ -1193,12 +1198,17 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
  */
 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
 {
+//     printk(">>ttwu_activate %d\n",p->pid);
        activate_task(rq, p, en_flags);
        p->on_rq = 1;
 
        /* if a worker is waking up, notify workqueue */
        if (p->flags & PF_WQ_WORKER)
+       {
+               //TODO: remove the print and klammerns later
+//             printk(">>EFFING worker here\n");
                wq_worker_waking_up(p, cpu_of(rq));
+       }
 }
 
 /*
@@ -1238,7 +1248,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 static void
 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 {
-       printk("ttwu_do_wakeup task %d\n",p->pid);
+//     printk(">>ttwu_do_wakeup task %d\n",p->pid);
 //     trace_sched_wakeup(p, true);
        check_preempt_curr(rq, p, wake_flags);
 
@@ -1266,7 +1276,7 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 static void
 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
 {
-       printk("ttwu_do_activate\n");
+//     printk(">>ttwu_do_activate\n");
 #ifdef CONFIG_SMP
        if (p->sched_contributes_to_load)
                rq->nr_uninterruptible--;
@@ -1281,7 +1291,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
  */
 static void ttwu_queue_remote(struct task_struct *p, int cpu)
 {
-       printk("ttwu_queue_remote\n");
+//     printk(">>ttwu_queue_remote\n");
 #if defined(CONFIG_SMP)
        if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
                smp_send_reschedule(cpu);
@@ -1294,7 +1304,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
 static void ttwu_queue(struct task_struct *p, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       printk("ttwu_queue task %d : cpu %d\n",p->pid, cpu);
+//     printk(">>ttwu_queue task %d : cpu %d\n",p->pid, cpu);
 
 #if defined(CONFIG_SMP)
        if (/*sched_feat(TTWU_QUEUE) && */!cpus_share_cache(smp_processor_id(), cpu)) {
@@ -1323,6 +1333,9 @@ void wake_up_new_task(struct task_struct *p)
        struct rq *rq;
 //     int cpu = 255;
 
+//     TODO: FRAMEWORK ZEUGS
+       send_to_topic(1,p);
+
        raw_spin_lock_irqsave(&p->pi_lock, flags);
 
 #ifdef CONFIG_SMP
@@ -1339,8 +1352,11 @@ void wake_up_new_task(struct task_struct *p)
 
 #endif
 
+
        rq = __task_rq_lock(p);
-//     activate_task(rq, p, 0);
+//     send_to_topic(1,p);
+//     printk(">>wake_up_new_task:");
+       activate_task(rq, p, 0);
        p->on_rq = 1;
 //     trace_sched_wakeup_new(p, true);
        check_preempt_curr(rq, p, WF_FORK);
@@ -1350,11 +1366,12 @@ void wake_up_new_task(struct task_struct *p)
 //#endif
        task_rq_unlock(rq, p, &flags);
 
-       printk("wake_up_new_task:");
-       activate_task(rq, p, 0);
-
        //TODO: FRAMEWORK ZEUGS
 //     send_to_topic(1,p);
+
+//     printk(">>wake_up_new_task:");
+//     activate_task(rq, p, 0);
+
 }
 
 /*
@@ -1366,6 +1383,7 @@ void wake_up_new_task(struct task_struct *p)
  */
 static int ttwu_remote(struct task_struct *p, int wake_flags)
 {
+//     printk(">>ttwu_remote %d\n",p->pid);
        struct rq *rq;
        int ret = 0;
 
@@ -1398,6 +1416,8 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
 static int
 try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 {
+//     printk(">>try_to_wake_up %d\n",p->pid);
+
        unsigned long flags;
        int cpu, success = 0;
 
@@ -1412,6 +1432,9 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        if (p->on_rq && ttwu_remote(p, wake_flags))
                goto stat;
 
+//     TODO:framework zeugs
+       send_to_topic(0,p);
+
 #ifdef CONFIG_SMP
        /*
         * If the owning (remote) cpu is still in the middle of schedule() with
@@ -1447,7 +1470,7 @@ out:
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
 //TODO: FRAMEWORK
-send_to_topic(0,p);
+//send_to_topic(0,p);
 
        return success;
 }
@@ -1463,6 +1486,7 @@ send_to_topic(0,p);
  */
 static void try_to_wake_up_local(struct task_struct *p)
 {
+//     printk(">>try_to_wake_up_local %d\n",p->pid);
        struct rq *rq = task_rq(p);
 
        if (WARN_ON_ONCE(rq != this_rq()) ||
@@ -1498,6 +1522,7 @@ static inline void
 context_switch(struct rq *rq, struct task_struct *prev,
               struct task_struct *next)
 {
+//     printk(">>context_switch\n");
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
@@ -1630,10 +1655,11 @@ need_resched:
 //             switch_count = &prev->nvcsw;
        }
 
-       if(prev->flags & TASK_DEAD){
+//     if(prev->state & TASK_DEAD){
                //task dead
-               next = (struct task_struct *)send_to_topic(2,prev);
-       }else if(prev->state && !(preempt_count() & PREEMPT_ACTIVE)){
+//             next = (struct task_struct *)send_to_topic(2,prev);
+//     }else
+       if(prev->state && !(preempt_count() & PREEMPT_ACTIVE)){
                //yield
                next = (struct task_struct *)send_to_topic(4,prev);
        }else{
@@ -1648,16 +1674,16 @@ need_resched:
 //             idle_balance(cpu, rq);
 
 //     put_prev_task(rq, prev);
-       if ((prev != rq->idle) && prev->on_rq) {
-               list_add_tail(&prev->rq_tasks, &rq->rq_list);
-       }
-
+//     if ((prev != rq->idle) && prev->on_rq) {
+//             list_add_tail(&prev->rq_tasks, &rq->rq_list);
+//     }
+//
        /*      In case the only runnable task gets deactivated, we need to schedule
         *      the idle tasks.
         */
 //     next = pick_next_task(rq);
 //
-       if (!list_empty(&rq->rq_list)) {
+/*     if (!list_empty(&rq->rq_list)) {
                assert_raw_spin_locked(&rq->lock);
 //             next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
 //             list_del(&next->rq_tasks);
@@ -1665,8 +1691,9 @@ need_resched:
        else {
 //             next = rq->idle;
        }
-//     next->se.exec_start = rq->clock_task;
-//
+*/
+       if(next)
+               next->se.exec_start = rq->clock_task;
 
        clear_tsk_need_resched(prev);
        rq->skip_clock_update = 0;
@@ -1676,7 +1703,7 @@ need_resched:
                rq->curr = next;
 //             ++*switch_count;
 
-               printk("wechsel! PID: %d | CPU :%d | allows: %d\n",next->pid, cpu, next->cpus_allowed);
+//             printk("wechsel! PID: %d | CPU :%d | allows: %d\n",next->pid, cpu, next->cpus_allowed);
                context_switch(rq, prev, next); /* unlocks the rq */
 
                // TODO: remove irq enable
@@ -1728,6 +1755,7 @@ EXPORT_SYMBOL(schedule);
 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
                        int nr_exclusive, int wake_flags, void *key)
 {
+//     printk(">>__wake_up_common\n");
        wait_queue_t *curr, *next;
 
        list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
@@ -1775,6 +1803,7 @@ EXPORT_SYMBOL(__wake_up);
  */
 int wake_up_process(struct task_struct *p)
 {
+//     printk(">>wake_up_process %d\n",p->pid);
        WARN_ON(task_is_stopped_or_traced(p));
        return try_to_wake_up(p, TASK_NORMAL, 0);
 }
@@ -1856,6 +1885,7 @@ EXPORT_SYMBOL(wait_for_completion);
  */
 void complete(struct completion *x)
 {
+//     printk(">>complete\n");
        unsigned long flags;
 
        spin_lock_irqsave(&x->wait.lock, flags);
index 3a2dbbe..8bbad4d 100644 (file)
@@ -24,8 +24,6 @@
  *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
  *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
  *              Thomas Gleixner, Mike Kravetz
- *  2013-06-14  Pub/Sub Scheduler integration by Daniel Röhrig and
- *                     Christian Rene Sechting
  */
 
 #include <linux/mm.h>
@@ -88,8 +86,6 @@
 #include "../workqueue_internal.h"
 #include "../smpboot.h"
 
-//#include "pub_sub.h"
-
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
index a8e2f4b..339c6da 100755 (executable)
--- a/run_qemu
+++ b/run_qemu
@@ -1,4 +1,4 @@
 #!/bin/sh
 
 #qemu-system-x86_64 -machine accel=tcg -nographic -kernel build/arch/x86_64/boot/bzImage -append "console=ttyS0 init=/bin/busybox sh" --enable-kvm
-qemu-system-x86_64 -machine accel=tcg -nographic -kernel build/arch/x86_64/boot/bzImage -append "console=ttyS0" -smp 2,maxcpu=2,cores=2,threads=2,sockets=2
+qemu-system-x86_64 -nographic -kernel build/arch/x86_64/boot/bzImage -append "console=ttyS0" --enable-kvm #-smp 2