fail:smp p/sechting/derenkernel
authorRené Sechting <sechting@mailbox.tu-berlin.de>
Wed, 11 Dec 2013 15:43:40 +0000 (16:43 +0100)
committerRené Sechting <sechting@mailbox.tu-berlin.de>
Wed, 11 Dec 2013 15:43:40 +0000 (16:43 +0100)
nothing is working, seems like the framework isnt sufficient now. but there
are changes coming even for singlecore usage... so cancel!

debug_qemu
framework/fw.c
framework/fw.h
framework/fw_comm.h
framework/modules/rr_module.c
framework/os/linux/os.c
framework/os/os.h
kernel/sched.new/core.c
run_qemu

index 24951e5..85b2562 100755 (executable)
@@ -1,3 +1,3 @@
 #!/bin/sh
 
-qemu-system-x86_64 -nographic -kernel build/arch/x86_64/boot/bzImage -s -S -append "console=ttyS0 init=/bin/busybox sh" -no-kvm
+qemu-system-x86_64 -nographic -kernel build/arch/x86_64/boot/bzImage -s -S -append "console=ttyS0 init=/bin/busybox sh" -no-kvm -smp 2
index c3b418b..dc5400b 100644 (file)
@@ -55,3 +55,13 @@ void fw_init()
        fw_init_comm();
        fw_modules_init();
 }
+
+int fw_num_cpus()
+{
+       return os_num_cpus();
+}
+
+int fw_cpu()
+{
+       return os_cpu();
+}
index 7faa2f8..1c97301 100644 (file)
@@ -57,4 +57,8 @@ int fw_register_module(void);
 
 void fw_init(void);
 
+int fw_num_cpus(void);
+
+int fw_cpu(void);
+
 #endif /* FW_H_ */
index 29eea69..146e955 100644 (file)
@@ -11,7 +11,7 @@
  * RELEASE:            void (fw_task_t *task);
  *
  */
-typedef enum {FIRST, SCHEDULE, ADMIT, RELEASE, DISPATCH, TIME_OUT, EVENT_WAIT, EVENT_OCCURS, LAST} topic_t; //5 State Process Model, Stalings "Operating Systems" P 137
+typedef enum {FIRST, SCHEDULE, ADMIT, RELEASE, DISPATCH, TIME_OUT, EVENT_WAIT, EVENT_OCCURS, SMP_IDLE_THREAD, LAST} topic_t; //5 State Process Model, Stalings "Operating Systems" P 137
 
 void fw_subscribe(topic_t topic, void *callback);
 
index 6058e50..aa8df80 100644 (file)
@@ -40,18 +40,30 @@ static int module_id;
 //     fw_print("\n");
 //}
 
+static struct rq *get_rq(void){
+       return &rr_rq[fw_cpu()];
+}
+
 static struct fw_task *task_received(struct fw_task *task)
 {
        struct rr_info *rq_ele;
+       struct rq *rq;
+
+//     fw_printf(">>ADMIT: task auf cpu %d\n",fw_cpu());
+
 //     if(system_state == SYSTEM_RUNNING)
 //             fw_printf("new task %d\n", fw_get_pid(task));
 //     rq_ele = (struct rr_info*)((unsigned long)task+offset);
 //     fw_list_add_tail(&rq_ele->head, &rr_rq->queue);
+       rq = get_rq();
+
+//     if(rq->idle == NULL)
+//             rq->idle = task;
 
        rq_ele = (struct rr_info *)fw_malloc(sizeof(struct rr_info));
        task->module_data[module_id] = (void *)rq_ele;
        rq_ele->task = task;
-       fw_list_add_tail(&rq_ele->head, &rr_rq->queue);
+       fw_list_add_tail(&rq_ele->head, &rq->queue);
 
        rq_ele->on_rq = 1;
 
@@ -62,20 +74,22 @@ static struct fw_task *task_finished(struct fw_task *finished)
 {
        struct rr_info *next_elem;
        struct fw_task *task;
+       struct rq* rq;
 //     fw_printf("task finished %d\n",fw_get_pid(finished));
 
 //     next_elem = (struct rr_info *)((unsigned long)finished+offset);
 //     next_elem->on_rq = 0;
-       if(!fw_list_empty(&rr_rq->queue)){
+       rq = get_rq();
+       if(!fw_list_empty(&rq->queue)){
                //get the next task running!
-               next_elem = (struct rr_info *)__fw_list_pop(&(rr_rq->queue));
+               next_elem = (struct rr_info *)__fw_list_pop(&(rq->queue));
                task = next_elem->task;
-               rr_rq->curr = task;
+               rq->curr = task;
                fw_dispatch(task);
                return task;
        }else{
                //every task is done, go to idle mode
-               rr_rq->curr = idle;
+               rq->curr = idle;
                fw_dispatch(idle);
                return idle;
        }
@@ -85,41 +99,42 @@ static struct fw_task *tick_received(struct fw_task *preempted)
 {
        struct fw_task *task;
        struct rr_info *next_elem;
+       struct rq *rq;
 //     if(system_state == SYSTEM_RUNNING)
 //             fw_printf(".");
 
 //     rr_print_rq();
-
-       if(fw_list_empty(&rr_rq->queue)){
+       rq = get_rq();
+       if(fw_list_empty(&rq->queue)){
 //             fw_printf("kein kontextwechsel\n");
                fw_dispatch(preempted);
                return NULL;
        }
 
        if((preempted == idle)){
-               next_elem = (struct rr_info *)__fw_list_pop(&(rr_rq->queue));
+               next_elem = (struct rr_info *)__fw_list_pop(&(rq->queue));
 //             task = (struct fw_task*)((unsigned long)next_elem-offset);
                task = next_elem->task;
-               rr_rq->curr = task;
+               rq->curr = task;
                fw_dispatch(task);
                return task;
        }else{
                //insert curr task to queue
                next_elem = (struct rr_info*)preempted->module_data[module_id];
 //                     next_elem = (struct rr_info*)((unsigned long)task+offset);
-               fw_list_add_tail(&next_elem->head,&rr_rq->queue);
+               fw_list_add_tail(&next_elem->head,&rq->queue);
                next_elem->on_rq = 1;
                //get the new task out of da queue and schedule the hell out of it
 //             next_elem = NULL;
-               next_elem = (struct rr_info *)__fw_list_pop(&(rr_rq->queue));
+               next_elem = (struct rr_info *)__fw_list_pop(&(rq->queue));
 //             task = (struct fw_task*)((unsigned long)next_elem-offset);
                task = next_elem->task;
                next_elem->on_rq = 0;
-               rr_rq->curr = task;
+               rq->curr = task;
                fw_dispatch(task);
                return task;
        }
-       rr_rq->curr = idle;
+       rq->curr = idle;
        fw_dispatch(idle);
        return idle;
 }
@@ -127,25 +142,25 @@ static struct fw_task *tick_received(struct fw_task *preempted)
 static struct fw_task *task_yielded(struct fw_task *yielded)
 {
 
-//     fw_printf("yield-state: %d",fw_get_state(yielded));
        struct rr_info *next_elem;
-//     struct rr_info *pos;
        struct fw_task *task;
+       struct rq *rq;
 //     fw_printf("task has yielded %d\n",fw_get_pid(yielded));
 //     next_elem = (struct rr_info *) ((unsigned long)yielded + offset);
+       rq = get_rq();
        next_elem = (struct rr_info*)yielded->module_data[module_id];
        next_elem->on_rq = 0;
 //     next_elem = NULL;
-       if(!fw_list_empty(&rr_rq->queue)){
-               next_elem = (struct rr_info *)__fw_list_pop(&(rr_rq->queue));
+       if(!fw_list_empty(&rq->queue)){
+               next_elem = (struct rr_info *)__fw_list_pop(&(rq->queue));
                next_elem->on_rq = 0;
 //             task = (struct fw_task*)((unsigned long)next_elem-offset);
                task = next_elem->task;
-               rr_rq->curr = task;
+               rq->curr = task;
                fw_dispatch(task);
                return task;
        }else{
-               rr_rq->curr = idle;
+               rq->curr = idle;
                fw_dispatch(idle);
                return idle;
        }
@@ -156,36 +171,59 @@ static struct fw_task *wake_up_task(struct fw_task *wake_up)
 //     if(system_state == SYSTEM_RUNNING)
 //             fw_printf("aufwachen!!! %d\n",fw_get_pid(wake_up));
 //     struct rr_info *to_wake_up = (struct rr_info*)((unsigned long)wake_up+offset);
+       struct rq *rq;
        struct rr_info *to_wake_up = (struct rr_info*)wake_up->module_data[module_id];
+       rq = get_rq();
        if(!to_wake_up->on_rq){
-               fw_list_add_tail(&to_wake_up->head,&(rr_rq->queue));
+               fw_list_add_tail(&to_wake_up->head,&(rq->queue));
                to_wake_up->on_rq = 1;
        }
+//     if(rq->idle==NULL) rq->idle == wake_up;
        return NULL;
 }
 
+static void smp_init_idle(struct fw_task *idle)
+{
+       int cpu;
+       cpu = fw_cpu();
+       fw_printf(">>idletask für cpu %d\n",cpu);
+       rr_rq[cpu].idle = idle;
+       rr_rq[cpu].curr = idle;
+       return;
+}
+
 //__init_module(init_sched_rr_module)
 int init_sched_rr_module(void)
 {
+       int i;
        fw_printf("ROUND_ROBIN_MODULE INITIALIZING\n");
        module_id = fw_register_module();
        if(module_id == -1)
                return -1;
-       rr_rq = (struct rq*)fw_malloc(sizeof(struct rq));
        idle = fw_idle_task;
-       rr_rq->idle = idle;
-       rr_rq->curr = idle;
-       FW_LIST_INIT(rr_rq->queue);
-//     subscribe_to_topic(1, &task_received);
+       rr_rq = (struct rq*)fw_malloc(sizeof(struct rq)*fw_num_cpus());
+       for(i=0; i<=fw_num_cpus();i++){
+//             if(i==0){
+//             rr_rq[i].idle = idle;
+//             rr_rq[i].curr = idle;
+//             }else{
+//             rr_rq[i].idle = NULL;
+//             rr_rq[i].curr = NULL;
+//             }
+               FW_LIST_INIT(rr_rq[i].queue);
+       }
+       rr_rq[0].idle = idle;
+       rr_rq[0].curr = idle;
+//     rr_rq = (struct rq*)fw_malloc(sizeof(struct rq));
+//     rr_rq->idle = idle;
+//     rr_rq->curr = idle;
+//     FW_LIST_INIT(rr_rq->queue);
        fw_subscribe(ADMIT, &task_received);
-//     subscribe_to_topic(2, &task_finished);
        fw_subscribe(RELEASE, &task_finished);
-       //subscribe_to_topic(3, &tick_received);
        fw_subscribe(TIME_OUT, &tick_received);
-       //subscribe_to_topic(4, &task_yielded);
        fw_subscribe(EVENT_WAIT, &task_yielded);
-       //subscribe_to_topic(0, &wake_up_task);
        fw_subscribe(EVENT_OCCURS, &wake_up_task);
+       fw_subscribe(SMP_IDLE_THREAD, &smp_init_idle);
        fw_printf("SCHEDULER INITIALIZED!\n");
 
        return 0;
index 2780dbf..d12212d 100644 (file)
@@ -11,6 +11,7 @@
 #include <fw_types.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/cpumask.h>
 //TODO This avoids copying the sched.h into the folder
 #include "../../../kernel/sched.new/sched.h"
 #include <fw.h>
@@ -34,25 +35,37 @@ void os_create_task(fw_task_t *task)
        return;
 }
 
+extern void set_task_cpu(struct task_struct *p, unsigned int new_cpu);
 
 void os_dispatch_mp(fw_task_t *task, int cpu)
 {
        struct rq *rq;
        struct task_struct *prev, *next;
 
-       cpu = smp_processor_id();
+
+//     cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        prev = rq->curr;
        next = (struct task_struct*)task->real_task;
 
+//     if(cpu){
+//             fw_printf(">>DISPATCH task %d auf cpu %d\n",next->pid,cpu);
+//     }
+
        next->se.exec_start = rq->clock_task;
        clear_tsk_need_resched(prev);
        rq->skip_clock_update = 0;
        rq->nr_switches++;
        rq->curr = next;
 
+//     printk("context_switch task (%d) erlaubt auf cpus: %d\n",next->pid,next->cpus_allowed);
+//     printk("cpus: %x\n",num_possible_cpus());
+
        if(prev != next){
+               set_task_cpu(next,cpu);
+//             fw_printf(".");
                context_switch(rq,prev,next);
+//             fw_printf(";");
                arch_local_irq_enable();
        }else{
                raw_spin_unlock_irq(&rq->lock);
@@ -74,3 +87,13 @@ int os_init_os(void)
        fw_idle_task->real_task = ptr;
        return 0;
 }
+
+int os_num_cpus(void)
+{
+       return num_possible_cpus();
+}
+
+int os_cpu(void)
+{
+       return smp_processor_id();
+}
index 43426e2..457be6e 100644 (file)
@@ -57,6 +57,10 @@ void os_spinunlock(fw_spinlock_t *lock);
 
 const char *os_task_info(fw_task_t *task);
 
+int os_num_cpus(void);
+
+int os_cpu(void);
+
 /*
 #if 1
 #include "bertos/os.h"
index 7463399..4a9a6e5 100644 (file)
@@ -436,6 +436,10 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        __set_task_cpu(idle, cpu);
        rcu_read_unlock();
 
+       //TODO: framework stuff
+       if(idle->pid != 0)
+               fw_notify(SMP_IDLE_THREAD,idle->fw_task);
+
        rq->curr = rq->idle = idle;
 #if defined(CONFIG_SMP)
        idle->on_cpu = 1;
@@ -448,6 +452,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
 #if defined(CONFIG_SMP)
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
+
 }
 
 /*
@@ -1323,7 +1328,7 @@ void wake_up_new_task(struct task_struct *p)
        struct fw_task *new_task = (struct fw_task *)fw_malloc(sizeof(struct fw_task));
        p->fw_task = new_task;
        new_task->real_task = p;
-
+//     printk("neuer task (%d) erlaubt auf cpus: %d|%d\n",p->pid, p->cpus_allowed, p->nr_cpus_allowed);
        fw_notify(ADMIT,new_task);
 
        raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -1371,7 +1376,7 @@ void wake_up_new_task(struct task_struct *p)
  */
 static int ttwu_remote(struct task_struct *p, int wake_flags)
 {
-//     printk(">>ttwu_remote %d\n",p->pid);
+       printk(">>ttwu_remote %d\n",p->pid);
        struct rq *rq;
        int ret = 0;
 
@@ -1419,8 +1424,9 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 
        if (p->on_rq && ttwu_remote(p, wake_flags))
                goto stat;
-
-
+//     if(p->nr_cpus_allowed < 8){
+//             printk(">>wake up task %d erlaubt auf cpu %d bin auf cpu %d\n",p->pid, p->cpus_allowed, smp_processor_id());
+//     }
        fw_notify(EVENT_OCCURS, p->fw_task);
 
 #ifdef CONFIG_SMP
@@ -1606,6 +1612,9 @@ need_resched:
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        prev = rq->curr;
+//     if(cpu == 1){
+//             printk("CPU 1 schedule! %p\n",prev);
+//     }
 
        raw_spin_lock_irq(&rq->lock);
 
@@ -2235,8 +2244,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
        struct rq *rq_dest, *rq_src;
        int ret = 0;
 
-       //TODO: testen!
-       printk("!!MIGRIERE deb task %d zur cpu %d\n",p->pid,dest_cpu);
+       printk(">>__migrate_task | task %d zu cpu %d\n",p->pid,dest_cpu);
 
        if (unlikely(!cpu_active(dest_cpu)))
                return ret;
@@ -2288,7 +2296,7 @@ fail:
 static int migration_cpu_stop(void *data)
 {
        struct migration_arg *arg = data;
-
+       printk(">>migration_cpu_stop\n");
        /*
         * The original target cpu might have gone down and we might
         * be on another cpu but it doesn't matter.
@@ -2325,13 +2333,17 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
        unsigned long flags;
        struct rq *rq = cpu_rq(cpu);
 
+//     printk(">>migration call cpu %d >> ",cpu);
+
        switch (action & ~CPU_TASKS_FROZEN) {
 
        case CPU_UP_PREPARE:
+//             printk("CPU_UP_PREPARE");
 //             rq->calc_load_update = calc_load_update;
                break;
 
        case CPU_ONLINE:
+//             printk("CPU_ONLINE");
                /* Update our root-domain */
                raw_spin_lock_irqsave(&rq->lock, flags);
 //             if (rq->rd) {
@@ -2344,6 +2356,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DYING:
+//             printk("CPU_DYING");
                sched_ttwu_pending();
                /* Update our root-domain */
                raw_spin_lock_irqsave(&rq->lock, flags);
@@ -2357,12 +2370,14 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                break;
 
        case CPU_DEAD:
+//             printk("CPU_DEAD");
 //             calc_load_migrate(rq);
                break;
 #endif
        }
 
 //     update_max_interval();
+//     printk("<<DONE!\n");
 
        return NOTIFY_OK;
 }
@@ -2380,6 +2395,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
 static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
+//     printk(">>sched_cpu_active\n");
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_STARTING:
        case CPU_DOWN_FAILED:
@@ -2393,6 +2409,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
 static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
+//     printk(">>sched_cpu_inactive %d\n",action);
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
                set_cpu_active((long)hcpu, false);
@@ -2406,7 +2423,7 @@ static int __init migration_init(void)
 {
        void *cpu = (void *)(long)smp_processor_id();
        int err;
-
+//     printk(">>migration_init\n");
        /* Initialize migration for the boot CPU */
        err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
        BUG_ON(err == NOTIFY_BAD);
@@ -2435,6 +2452,7 @@ void do_set_cpus_allowed(struct task_struct *p,
 
 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 {
+//     printk(">>set_cpus_allowed_ptr\n");
        unsigned long flags;
        struct rq *rq;
        unsigned int dest_cpu;
@@ -2479,6 +2497,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 
 static void sched_ttwu_pending(void)
 {
+//     printk(">>sched_ttwu_pending\n");
        struct rq *rq = this_rq();
        struct llist_node *llist = llist_del_all(&rq->wake_list);
        struct task_struct *p;
@@ -2496,6 +2515,7 @@ static void sched_ttwu_pending(void)
 
 void scheduler_ipi(void)
 {
+//     printk(">>scheduler_ipi\n");
        if (llist_empty(&this_rq()->wake_list)) // && !got_nohz_idle_kick())
                        return;
 
@@ -2544,6 +2564,7 @@ void scheduler_ipi(void)
  */
 unsigned long wait_task_inactive(struct task_struct *p, long match_state)
 {
+//     printk(">>wait_task_inactive...");
        unsigned long flags;
        int running, on_rq;
        unsigned long ncsw;
@@ -2630,6 +2651,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
                 */
                break;
        }
+//     printk("done!\n");
 
        return ncsw;
 }
@@ -2650,6 +2672,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  */
 void kick_process(struct task_struct *p)
 {
+       printk(">>kick_process\n");
        int cpu;
 
        preempt_disable();
@@ -2662,12 +2685,12 @@ EXPORT_SYMBOL_GPL(kick_process);
 
 void sched_set_stop_task(int cpu, struct task_struct *stop)
 {
-       printk("\nsched_set_stop_task");
+//     printk(">>sched_set_stop_task\n");
 }
 
 bool completion_done(struct completion *x)
 {
-       printk("\ncompletion_done");
+//     printk(">>completion_done\n");
 
        return 0;
 }
@@ -2679,6 +2702,7 @@ bool completion_done(struct completion *x)
  */
 void sched_exec(void)
 {
+       printk(">>sched_exec\n");
        struct task_struct *p = current;
        unsigned long flags;
        int dest_cpu;
index 339c6da..0e55d35 100755 (executable)
--- a/run_qemu
+++ b/run_qemu
@@ -1,4 +1,4 @@
 #!/bin/sh
 
 #qemu-system-x86_64 -machine accel=tcg -nographic -kernel build/arch/x86_64/boot/bzImage -append "console=ttyS0 init=/bin/busybox sh" --enable-kvm
-qemu-system-x86_64 -nographic -kernel build/arch/x86_64/boot/bzImage -append "console=ttyS0" --enable-kvm #-smp 2
+qemu-system-x86_64 -nographic -kernel build/arch/x86_64/boot/bzImage -append "console=ttyS0" --enable-kvm -smp 2