Linux with fw again compilable
authorRené Sechting <sechting@mailbox.tu-berlin.de>
Tue, 17 Dec 2013 14:58:21 +0000 (15:58 +0100)
committerRené Sechting <sechting@mailbox.tu-berlin.de>
Tue, 17 Dec 2013 14:58:21 +0000 (15:58 +0100)
the kernel is not working, but compiles.
* Makefile - if we want to compile the kernel with framework we extend the
includepath
* include/linux/sched.h - the fw_task is part of the struct task_struct and
therefor doesnt need to be allocated again.
* kernel/fork.c - just moved away the fw_task initialisation from fork.c to
the core.c
*kernel/sched/modsched/core.c - initialisation from the fw_task in
wake_up_new_task() and other stuff that was needed to get it compiled

Makefile
include/linux/sched.h
kernel/fork.c
kernel/sched/modsched/core.c

index c6ace67..f0fa9fe 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -374,6 +374,7 @@ KBUILD_CFLAGS   := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                   -Werror-implicit-function-declaration \
                   -Wno-format-security \
                   -fno-delete-null-pointer-checks
+
 KBUILD_AFLAGS_KERNEL :=
 KBUILD_CFLAGS_KERNEL :=
 KBUILD_AFLAGS   := -D__ASSEMBLY__
@@ -1433,4 +1434,7 @@ FORCE:
 # The Frame for the modular Schedular is here
 MOD_SCHED_DIR ?= $(PWD)/framework
 OS=linux
+ifeq ($(CONFIG_MOD_SCHED),y)              
+KBUILD_CFLAGS += -Ikernel/sched/modsched/framework/include
+endif
 export MOD_SCHED_DIR OS
index abc6b51..4890e2e 100644 (file)
@@ -55,6 +55,10 @@ struct sched_param {
 
 #include <asm/processor.h>
 
+#ifdef CONFIG_MOD_SCHED
+#include <fw_task.h>
+#endif
+
 struct exec_domain;
 struct futex_pi_state;
 struct robust_list_head;
@@ -1198,9 +1202,10 @@ enum perf_event_task_context {
        perf_nr_task_contexts,
 };
 
-struct fw_task;
 struct task_struct {
-       struct fw_task *fw_task; //easier and faster than handling a all task list
+#ifdef CONFIG_MOD_SCHED
+       struct fw_task fw_task; //easier and faster than handling a all task list
+#endif
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        void *stack;
        atomic_t usage;
index 337c1bf..4da77a1 100644 (file)
@@ -1614,8 +1614,6 @@ long do_fork(unsigned long clone_flags,
                        get_task_struct(p);
                }
 
-               //TODO: FRAMEWORK STUFF
-                       p->fw_task = NULL;
                        wake_up_new_task(p);
 
                /* forking complete and child started to run, tell ptracer */
index ec5473e..21a90cc 100644 (file)
@@ -1332,7 +1332,7 @@ void wake_up_new_task(struct task_struct *p)
 //     int cpu = 255;
 
 //     TODO: FRAMEWORK ZEUGS
-       fw_notify(FW_ADMIT,p->fw_task);
+       fw_notify(FW_ADMIT,&p->fw_task);
 
        raw_spin_lock_irqsave(&p->pi_lock, flags);
 
@@ -1431,7 +1431,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                goto stat;
 
 //     TODO:framework zeugs
-       fw_notify(FW_EVENT_OCCURS,p->fw_task);
+       fw_notify(FW_EVENT_OCCURS,&p->fw_task);
 
 #ifdef CONFIG_SMP
        /*
@@ -1622,19 +1622,12 @@ need_resched:
        rcu_note_context_switch(cpu);
        prev = rq->curr;
 
-//     schedule_debug(prev);
-
-//     if (sched_feat(HRTICK))
-//             hrtick_clear(rq);
-
        raw_spin_lock_irq(&rq->lock);
 
-//     switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
                        prev->state = TASK_RUNNING;
                } else {
-//                     deactivate_task(rq, prev, DEQUEUE_SLEEP);
                        prev->on_rq = 0;
 
                        /*
@@ -1650,49 +1643,15 @@ need_resched:
                                        try_to_wake_up_local(to_wakeup);
                        }
                }
-//             switch_count = &prev->nvcsw;
        }
 
-//     if(prev->state & TASK_DEAD){
-               //task dead
-//             next = (struct task_struct *)send_to_topic(2,prev);
-//     }else
+
        if(prev->state && !(preempt_count() & PREEMPT_ACTIVE)){
                //yield
-//             next = (struct task_struct *)send_to_topic(4,prev);
-               fw_notify(FW_EVENT_WAIT,prev->fw_task);
-       }else{
-               //tick
-               //printk("tick on cpu: %d\n",cpu);
-//             next = (struct task_struct *)send_to_topic(3);
-//             fw_notify(FW_TIMEOUT,prev->fw_task);
+               fw_notify(FW_EVENT_WAIT,&prev->fw_task);
        }
-
        fw_notify(FW_DISPATCH, NULL);
-//     pre_schedule(rq, prev);
-
-//     if (unlikely(!rq->nr_running))
-//             idle_balance(cpu, rq);
 
-//     put_prev_task(rq, prev);
-//     if ((prev != rq->idle) && prev->on_rq) {
-//             list_add_tail(&prev->rq_tasks, &rq->rq_list);
-//     }
-//
-       /*      In case the only runnable task gets deactivated, we need to schedule
-        *      the idle tasks.
-        */
-//     next = pick_next_task(rq);
-//
-/*     if (!list_empty(&rq->rq_list)) {
-               assert_raw_spin_locked(&rq->lock);
-//             next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
-//             list_del(&next->rq_tasks);
-       }
-       else {
-//             next = rq->idle;
-       }
-*/
 //     if(next)
 //             next->se.exec_start = rq->clock_task;
 //