contextswitch inside the framework
authorRené Sechting <sechting@mailbox.tu-berlin.de>
Thu, 14 Nov 2013 16:20:23 +0000 (17:20 +0100)
committerRené Sechting <sechting@mailbox.tu-berlin.de>
Thu, 14 Nov 2013 16:20:23 +0000 (17:20 +0100)
finally working, it will not win a miss contest, but it is a first step in the
hopefully right direction.

debug_qemu
framework/modules/rr_module.c
framework/os/linux/os.c
kernel/sched.new/core.c
kernel/sched.new/sched.h

index a339025..24951e5 100755 (executable)
@@ -1,3 +1,3 @@
 #!/bin/sh
 
-qemu-system-x86_64 -nographic -kernel build/arch/x86_64/boot/bzImage -s -S -append "console=ttyS0 init=/bin/busybox sh"
+qemu-system-x86_64 -nographic -kernel build/arch/x86_64/boot/bzImage -s -S -append "console=ttyS0 init=/bin/busybox sh" -no-kvm
index cd0e29f..750a877 100644 (file)
@@ -29,6 +29,18 @@ static struct fw_task *idle;
 
 static int module_id;
 
+//static void rr_print_rq(){
+//     struct fw_list_head *pos;
+//     struct rr_info *dingens;
+//     fw_printf("rq: ");
+//     for(pos = rr_rq->queue->next;pos != rr_rq->queue;pos = pos->next){
+//             dingens = (struct rr_info *)pos;
+//             dingens->task
+//             fw_printf("task: %p |", dingens->task);
+//     }
+//     fw_print("\n");
+//}
+
 static struct fw_task *task_received(struct fw_task *task)
 {
        struct rr_info *rq_ele;
@@ -58,7 +70,7 @@ static struct fw_task *task_finished(struct fw_task *finished)
        if(!fw_list_empty(&rr_rq->queue)){
                //get the next task running!
                next_elem = (struct rr_info *)__fw_list_pop(&(rr_rq->queue));
-               task = (struct fw_task*)((unsigned long)next_elem-offset);
+               task = next_elem->task;
                rr_rq->curr = task;
                fw_dispatch(task);
                return task;
@@ -76,8 +88,14 @@ static struct fw_task *tick_received(void)
        struct rr_info *next_elem;
 //     if(system_state == SYSTEM_RUNNING)
 //             fw_printf(".");
-       if(fw_list_empty(&rr_rq->queue))
+
+//     rr_print_rq();
+
+       if(fw_list_empty(&rr_rq->queue)){
+//             fw_printf("kein kontextwechsel\n");
+               fw_dispatch(task);
                return NULL;
+       }
 
        if((task == idle)&&!fw_list_empty(&rr_rq->queue)){
                next_elem = (struct rr_info *)__fw_list_pop(&(rr_rq->queue));
index df206bc..2401866 100644 (file)
@@ -53,12 +53,18 @@ void os_dispatch(fw_task_t *task)
        prev = rq->curr;
        next = (struct task_struct*)task->real_task;
 
+//     printk("rq: %p",rq);
+
        next->se.exec_start = rq->clock_task;
-//     clear_tsk_need_resched(prev);
-//     rq->skip_clock_update = 0;
+       clear_tsk_need_resched(prev);
+       clear_tsk_need_resched(next);
+       rq->skip_clock_update = 0;
        rq->nr_switches++;
        rq->curr = next;
-       context_switch(rq,prev,next);
+       rq->prev = prev;
+       if(prev != next){
+               context_switch(rq,prev,next);
+       }
 //     arch_local_irq_enable();
 }
 
index b054bc8..66f6b56 100644 (file)
@@ -1613,7 +1613,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
 static void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
-//     unsigned long *switch_count = 0;
        struct rq *rq;
        int cpu;
 
@@ -1624,19 +1623,12 @@ need_resched:
        rcu_note_context_switch(cpu);
        prev = rq->curr;
 
-//     schedule_debug(prev);
-
-//     if (sched_feat(HRTICK))
-//             hrtick_clear(rq);
-
        raw_spin_lock_irq(&rq->lock);
 
-//     switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
                        prev->state = TASK_RUNNING;
                } else {
-//                     deactivate_task(rq, prev, DEQUEUE_SLEEP);
                        prev->on_rq = 0;
 
                        /*
@@ -1652,62 +1644,34 @@ need_resched:
                                        try_to_wake_up_local(to_wakeup);
                        }
                }
-//             switch_count = &prev->nvcsw;
        }
 
-//     if(prev->state & TASK_DEAD){
+       if(prev->state & TASK_DEAD){
                //task dead
-//             next = (struct task_struct *)send_to_topic(2,prev);
-//     }else
-       if(prev->state && !(preempt_count() & PREEMPT_ACTIVE)){
+               send_to_topic(2,prev);
+       }else if(prev->state && !(preempt_count() & PREEMPT_ACTIVE)){
                //yield
-               next = (struct task_struct *)send_to_topic(4,prev);
+               send_to_topic(4,prev);
+//             printk("yield next\n");
        }else{
                //tick
-               //printk("tick on cpu: %d\n",cpu);
-               next = (struct task_struct *)send_to_topic(3,NULL);
+               send_to_topic(3,NULL);
+//             printk("tick next\n");
        }
 
        pre_schedule(rq, prev);
 
-//     if (unlikely(!rq->nr_running))
-//             idle_balance(cpu, rq);
-
-//     put_prev_task(rq, prev);
-//     if ((prev != rq->idle) && prev->on_rq) {
-//             list_add_tail(&prev->rq_tasks, &rq->rq_list);
-//     }
-//
-       /*      In case the only runnable task gets deactivated, we need to schedule
-        *      the idle tasks.
-        */
-//     next = pick_next_task(rq);
-//
-/*     if (!list_empty(&rq->rq_list)) {
-               assert_raw_spin_locked(&rq->lock);
-//             next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
-//             list_del(&next->rq_tasks);
-       }
-       else {
-//             next = rq->idle;
-       }
-*/
+       cpu = smp_processor_id();
+       rq = cpu_rq(cpu);
+       prev = rq->prev;
+       next = rq->curr;
 
-//     if(next)
-//             next->se.exec_start = rq->clock_task;
-//
        clear_tsk_need_resched(prev);
        rq->skip_clock_update = 0;
 //
-       if (likely(prev != next)&&(next!=NULL)) {
-//     if (likely(prev != rq->curr)) {
-//             rq->nr_switches++;
-//             rq->curr = next;
-////           ++*switch_count;
-//
-////           printk("wechsel! PID: %d | CPU :%d | allows: %d\n",next->pid, cpu, next->cpus_allowed);
-//             context_switch(rq, prev, next); /* unlocks the rq */
-//
+//     if (likely(prev != next)&&(next!=NULL)) {
+       if (likely(prev != next)) {
+
 //             // TODO: remove irq enable
                arch_local_irq_enable();
 //
index 105adad..4e3a85d 100644 (file)
@@ -28,7 +28,7 @@ struct rq {
         */
        unsigned long nr_uninterruptible;
 
-       struct task_struct *curr, *idle, *stop;
+       struct task_struct *curr, *idle, *stop, *prev;
 
        struct mm_struct *prev_mm;