moved some functions from core.c to framework
authorRené Sechting <sechting@mailbox.tu-berlin.de>
Thu, 2 Jan 2014 14:57:47 +0000 (15:57 +0100)
committerRené Sechting <sechting@mailbox.tu-berlin.de>
Thu, 2 Jan 2014 14:57:47 +0000 (15:57 +0100)
wake_up_new_task, and try_to_wake_up are now inside the framework

kernel/sched/modsched/core.c

index 8c5ebe2..30470c9 100644 (file)
@@ -1170,9 +1170,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
 
 //     enqueue_task(rq, p, flags);
 //     list_add(&p->rq_tasks, &rq->rq_list);
-
-//TODO: FRAMEWORK STUFF OR not?
-//     send_to_topic(0,p);
 }
 
 /*
@@ -1193,15 +1190,13 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
  */
 static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
 {
-//     printk(">>ttwu_activate %d\n",p->pid);
-       activate_task(rq, p, en_flags);
+//     activate_task(rq, p, en_flags);
        p->on_rq = 1;
 
        /* if a worker is waking up, notify workqueue */
        if (p->flags & PF_WQ_WORKER)
        {
-               //TODO: remove the print and klammerns later
-//             printk(">>EFFING worker here\n");
+               //eso es muy importante mucho
                wq_worker_waking_up(p, cpu_of(rq));
        }
 }
@@ -1298,7 +1293,8 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
 /*
  * kernel/sched/core.c:1406
  */
-static void ttwu_queue(struct task_struct *p, int cpu)
+//static void ttwu_queue(struct task_struct *p, int cpu)
+void ttwu_queue(struct task_struct *p, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
 //     printk(">>ttwu_queue task %d : cpu %d\n",p->pid, cpu);
@@ -1316,63 +1312,63 @@ static void ttwu_queue(struct task_struct *p, int cpu)
        raw_spin_unlock(&rq->lock);
 }
 
-/*
- * kernel/sched/core.c:1703
- * wake_up_new_task - wake up a newly created task for the first time.
- *
- * This function will do some initial scheduler statistics housekeeping
- * that must be done for every newly created context, then puts the task
- * on the runqueue and wakes it.
- */
-void wake_up_new_task(struct task_struct *p)
-{
-       unsigned long flags;
-       struct rq *rq;
-       struct fw_task *fw_task;
-//     int cpu = 255;
-
-//     TODO: FRAMEWORK ZEUGS
-       fw_task = &(p->fw_task);
-       fw_task->real_task = p;
-       fw_notify(FW_ADMIT,fw_task);
-
-       raw_spin_lock_irqsave(&p->pi_lock, flags);
-
-#ifdef CONFIG_SMP
-       /*
-        * Fork balancing, do it here and not earlier because:
-        *  - cpus_allowed can change in the fork path
-        *  - any previously selected cpu might disappear through hotplug
-        */
-       set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
-//     printk("new thread\n");
-//     for_each_cpu(cpu, &(p->cpus_allowed)){
-//             printk("Asked for CPU #%d\n", cpu);
-//     }
-
-#endif
-
-
-       rq = __task_rq_lock(p);
-//     send_to_topic(1,p);
-//     printk(">>wake_up_new_task:");
-       activate_task(rq, p, 0);
-       p->on_rq = 1;
-//     trace_sched_wakeup_new(p, true);
-       check_preempt_curr(rq, p, WF_FORK);
+///*
+// * kernel/sched/core.c:1703
+// * wake_up_new_task - wake up a newly created task for the first time.
+// *
+// * This function will do some initial scheduler statistics housekeeping
+// * that must be done for every newly created context, then puts the task
+// * on the runqueue and wakes it.
+// */
+//void wake_up_new_task(struct task_struct *p)
+//{
+//     unsigned long flags;
+//     struct rq *rq;
+//     struct fw_task *fw_task;
+////   int cpu = 255;
+//
+////   TODO: FRAMEWORK ZEUGS
+//     fw_task = &(p->fw_task);
+//     fw_task->real_task = p;
+//     fw_notify(FW_ADMIT,fw_task);
+//
+//     raw_spin_lock_irqsave(&p->pi_lock, flags);
+//
 //#ifdef CONFIG_SMP
-//     if (p->sched_class->task_woken)
-//             p->sched_class->task_woken(rq, p);
+//     /*
+//      * Fork balancing, do it here and not earlier because:
+//      *  - cpus_allowed can change in the fork path
+//      *  - any previously selected cpu might disappear through hotplug
+//      */
+//     set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
+////   printk("new thread\n");
+////   for_each_cpu(cpu, &(p->cpus_allowed)){
+////           printk("Asked for CPU #%d\n", cpu);
+////   }
+//
 //#endif
-       task_rq_unlock(rq, p, &flags);
-
-       //TODO: FRAMEWORK ZEUGS
-//     send_to_topic(1,p);
-
-//     printk(">>wake_up_new_task:");
+//
+//
+//     rq = __task_rq_lock(p);
+////   send_to_topic(1,p);
+////   printk(">>wake_up_new_task:");
 //     activate_task(rq, p, 0);
-
-}
+//     p->on_rq = 1;
+////   trace_sched_wakeup_new(p, true);
+//     check_preempt_curr(rq, p, WF_FORK);
+////#ifdef CONFIG_SMP
+////   if (p->sched_class->task_woken)
+////           p->sched_class->task_woken(rq, p);
+////#endif
+//     task_rq_unlock(rq, p, &flags);
+//
+//     //TODO: FRAMEWORK ZEUGS
+////   send_to_topic(1,p);
+//
+////   printk(">>wake_up_new_task:");
+////   activate_task(rq, p, 0);
+//
+//}
 
 /*
  * kernel/sched/core:1330
@@ -1381,7 +1377,8 @@ void wake_up_new_task(struct task_struct *p)
  * since all we need to do is flip p->state to TASK_RUNNING, since
  * the task is still ->on_rq.
  */
-static int ttwu_remote(struct task_struct *p, int wake_flags)
+//static int ttwu_remote(struct task_struct *p, int wake_flags)
+int ttwu_remote(struct task_struct *p, int wake_flags)
 {
 //     printk(">>ttwu_remote %d\n",p->pid);
        struct rq *rq;
@@ -1397,82 +1394,83 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
        return ret;
 }
 
-/**
- * kernel/sched/core.c:1439
- * try_to_wake_up - wake up a thread
- * @p: the thread to be awakened
- * @state: the mask of task states that can be woken
- * @wake_flags: wake modifier flags (WF_*)
- *
- * Put it on the run-queue if it's not already there. The "current"
- * thread is always on the run-queue (except when the actual
- * re-schedule is in progress), and as such you're allowed to do
- * the simpler "current->state = TASK_RUNNING" to mark yourself
- * runnable without the overhead of this.
- *
- * Returns %true if @p was woken up, %false if it was already running
- * or @state didn't match @p's state.
- */
-static int
-try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
-{
-//     printk(">>try_to_wake_up %d\n",p->pid);
-
-       unsigned long flags;
-       int cpu, success = 0;
-
-       smp_wmb();
-       raw_spin_lock_irqsave(&p->pi_lock, flags);
-       if (!(p->state & state))
-               goto out;
-
-       success = 1; /* we're going to change ->state */
-       cpu = task_cpu(p);
-
-       if (p->on_rq && ttwu_remote(p, wake_flags))
-               goto stat;
-
-//     TODO:framework zeugs
-       fw_notify(FW_EVENT_OCCURS,&p->fw_task);
-
-#ifdef CONFIG_SMP
-       /*
-        * If the owning (remote) cpu is still in the middle of schedule() with
-        * this task as prev, wait until its done referencing the task.
-        */
-       while (p->on_cpu)
-               cpu_relax();
-       /*
-        * Pairs with the smp_wmb() in finish_lock_switch().
-        */
-       smp_rmb();
-
-//     p->sched_contributes_to_load = !!task_contributes_to_load(p);
-       p->state = TASK_WAKING;
-
-//     if (p->sched_class->task_waking)
-//             p->sched_class->task_waking(p);
-
-       // TODO: simply not using select_task_rq :)
-       cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
-       if (task_cpu(p) != cpu) {
-               wake_flags |= WF_MIGRATED;
-               set_task_cpu(p, cpu);
-       }
-#endif /* CONFIG_SMP */
-
-       ttwu_queue(p, cpu);
-stat:
-//     raw_spin_unlock(&rq->lock);
-out:
-       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-
-//     TODO:framework zeugs
+///**
+// * kernel/sched/core.c:1439
+// * try_to_wake_up - wake up a thread
+// * @p: the thread to be awakened
+// * @state: the mask of task states that can be woken
+// * @wake_flags: wake modifier flags (WF_*)
+// *
+// * Put it on the run-queue if it's not already there. The "current"
+// * thread is always on the run-queue (except when the actual
+// * re-schedule is in progress), and as such you're allowed to do
+// * the simpler "current->state = TASK_RUNNING" to mark yourself
+// * runnable without the overhead of this.
+// *
+// * Returns %true if @p was woken up, %false if it was already running
+// * or @state didn't match @p's state.
+// */
+//static int
+//try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+extern int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags);
+//{
+////   printk(">>try_to_wake_up %d\n",p->pid);
+//
+//     unsigned long flags;
+//     int cpu, success = 0;
+//
+//     smp_wmb();
+//     raw_spin_lock_irqsave(&p->pi_lock, flags);
+//     if (!(p->state & state))
+//             goto out;
+//
+//     success = 1; /* we're going to change ->state */
+//     cpu = task_cpu(p);
+//
+//     if (p->on_rq && ttwu_remote(p, wake_flags))
+//             goto stat;
+//
+////   TODO:framework zeugs
 //     fw_notify(FW_EVENT_OCCURS,&p->fw_task);
-
-       return success;
-//     return 1;
-}
+//
+//#ifdef CONFIG_SMP
+//     /*
+//      * If the owning (remote) cpu is still in the middle of schedule() with
+//      * this task as prev, wait until its done referencing the task.
+//      */
+//     while (p->on_cpu)
+//             cpu_relax();
+//     /*
+//      * Pairs with the smp_wmb() in finish_lock_switch().
+//      */
+//     smp_rmb();
+//
+////   p->sched_contributes_to_load = !!task_contributes_to_load(p);
+//     p->state = TASK_WAKING;
+//
+////   if (p->sched_class->task_waking)
+////           p->sched_class->task_waking(p);
+//
+//     // TODO: simply not using select_task_rq :)
+//     cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+//     if (task_cpu(p) != cpu) {
+//             wake_flags |= WF_MIGRATED;
+//             set_task_cpu(p, cpu);
+//     }
+//#endif /* CONFIG_SMP */
+//
+//     ttwu_queue(p, cpu);
+//stat:
+////   raw_spin_unlock(&rq->lock);
+//out:
+//     raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+//
+////   TODO:framework zeugs
+////   fw_notify(FW_EVENT_OCCURS,&p->fw_task);
+//
+//     return success;
+////   return 1;
+//}
 
 /**
  * kernel/sched/core.c:1497