Enabled SMP in kernel config and added the missing functions to our
authorJens Krieg <jkrieg@mailbox.tu-berlin.de>
Mon, 5 Aug 2013 11:29:32 +0000 (13:29 +0200)
committerJens Krieg <jkrieg@mailbox.tu-berlin.de>
Mon, 5 Aug 2013 11:29:32 +0000 (13:29 +0200)
scheduler.
* Code is compiling properly but boot stops while initializing CPU.

kernel/sched.new/core.c
kernel/sched.new/sched.h
kernel/sched/core.c

index 5afa6c7..8ecf0fa 100644 (file)
@@ -105,15 +105,87 @@ __read_mostly int scheduler_running;
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
 
-//
-// Prototypes
-//
-static void try_to_wake_up_local(struct task_struct *p);
 
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+       lockdep_assert_held(&p->pi_lock);
+
+       for (;;) {
+               rq = task_rq(p);
+               raw_spin_lock(&rq->lock);
+               if (likely(rq == task_rq(p)))
+                       return rq;
+               raw_spin_unlock(&rq->lock);
+       }
+}
+
+
+
+/*
+ * Lock/unlock task from runqueue
+ */
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+       __acquires(p->pi_lock)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+       for (;;) {
+               raw_spin_lock_irqsave(&p->pi_lock, *flags);
+               rq = task_rq(p);
+               raw_spin_lock(&rq->lock);
+               if (likely(rq == task_rq(p)))
+                       return rq;
+               raw_spin_unlock(&rq->lock);
+               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+       }
+}
+
+//static void __task_rq_unlock(struct rq *rq)
+//     __releases(rq->lock)
+//{
+//     raw_spin_unlock(&rq->lock);
+//}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+       __releases(rq->lock)
+       __releases(p->pi_lock)
+{
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+}
 
+///*
+// * this_rq_lock - lock this runqueue and disable interrupts.
+// */
+//static struct rq *this_rq_lock(void)
+//     __acquires(rq->lock)
+//{
+//     struct rq *rq;
 //
-// Functions
+//     local_irq_disable();
+//     rq = this_rq();
+//     raw_spin_lock(&rq->lock);
 //
+//     return rq;
+//}
+
+
+
+/*
+ * Functions
+ */
 
 /**
  * kernel/sched/core.c:6872
@@ -123,6 +195,11 @@ void sched_init(void)
 {
        int i;
 
+       // TODO: SMP
+//#ifdef CONFIG_SMP
+//     init_defrootdomain();
+//#endif
+
 //     init_rt_bandwidth(&def_rt_bandwidth,
 //                     global_rt_period(), global_rt_runtime());
 
@@ -133,7 +210,26 @@ void sched_init(void)
 
                INIT_LIST_HEAD(&rq->rq_list);
 
-
+//#ifdef CONFIG_SMP
+//             rq->sd = NULL;
+//             rq->rd = NULL;
+////           rq->cpu_power = SCHED_POWER_SCALE;
+//             rq->post_schedule = 0;
+////           rq->active_balance = 0;
+////           rq->next_balance = jiffies;
+//             rq->push_cpu = 0;
+//             rq->cpu = i;
+//             rq->online = 0;
+////           rq->idle_stamp = 0;
+////           rq->avg_idle = 2*sysctl_sched_migration_cost;
+//
+//             INIT_LIST_HEAD(&rq->cfs_tasks);
+//
+//             rq_attach_root(rq, &def_root_domain);
+//#ifdef CONFIG_NO_HZ
+//             rq->nohz_flags = 0;
+//#endif
+//#endif
 
                raw_spin_lock_init(&rq->lock);
                rq->nr_running = 0;
@@ -151,6 +247,8 @@ void sched_init(void)
 
 //             rq->last_load_update_tick = jiffies;
 
+               // TODO: SMP
+
 //             init_rq_hrtick(rq);
                atomic_set(&rq->nr_iowait, 0);
        }
@@ -178,6 +276,8 @@ void sched_init(void)
         */
 //     current->sched_class = &fair_sched_class;
 
+       // TODO: SMP
+
 //     init_sched_fair_class();
 
        scheduler_running = 1;
@@ -239,12 +339,18 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
+       // TODO: SMP
+
        rq->curr = rq->idle = idle;
 
+       // TODO: SMP
+
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
        task_thread_info(idle)->preempt_count = 0;
+
+       // TODO: SMP
 }
 
 /*
@@ -255,6 +361,8 @@ void sched_fork(struct task_struct *p)
 {
        get_cpu();
 
+       // TODO: SMP, see __sched_fork
+
        p->on_rq                        = 0;
 
        p->se.on_rq                     = 0;
@@ -381,6 +489,8 @@ struct task_struct *idle_task(int cpu)
 {
        printk("\ntask_struct");
 
+       // TODO: SMP, return the CPU
+
        return 0;
 }
 
@@ -393,6 +503,8 @@ int idle_cpu(int cpu)
 {
        printk("\nidle_cpu");
 
+       // TODO: SMP
+
        return 0;
 }
 
@@ -431,14 +543,22 @@ unsigned long nr_running(void)
 {
        printk("\nnr_running");
 
+       // TODO: SMP
+
        return 0;
 }
 
 unsigned long long nr_context_switches(void)
 {
-       printk("\nnr_context_switches");
+//     printk("\nnr_context_switches");
 
-       return 0;
+       int i;
+       unsigned long long sum = 0;
+
+       for_each_possible_cpu(i)
+               sum += cpu_rq(i)->nr_switches;
+
+       return sum;
 }
 
 /*
@@ -449,6 +569,8 @@ unsigned long nr_iowait(void)
 {
        printk("\nnr_iowait");
 
+       // TODO: SMP
+
        return 0;
 }
 
@@ -459,6 +581,8 @@ unsigned long nr_iowait_cpu(int cpu)
 {
        printk("\nnr_iowait_cpu");
 
+       // TODO: SMP
+
        return 0;
 }
 
@@ -511,6 +635,8 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
 {
        printk("\nsched_getaffinity");
 
+       // TODO: SMP
+
        return 0;
 }
 
@@ -521,6 +647,8 @@ void sched_init_smp(void)
 {
        //printk("\nsched_init_smp");
 
+       // TODO: SMP
+
        return;
 }
 
@@ -603,6 +731,8 @@ void resched_task(struct task_struct *p)
 {
        assert_raw_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
+
+       // TODO: SMP
 }
 
 /**
@@ -645,6 +775,9 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 //     finish_arch_switch(prev);
 //     perf_event_task_sched_in(prev, current);
 //     finish_lock_switch(rq, prev);
+
+       // TODO: SMP, in finish_lock_switch
+
 #ifdef CONFIG_DEBUG_SPINLOCK
        /* this is a valid case when another task releases the spinlock */
        rq->lock.owner = current;
@@ -673,6 +806,30 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        }
 }
 
+#ifdef CONFIG_SMP
+
+/* assumes rq->lock is held */
+static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
+{
+}
+
+/* rq->lock is NOT held, but preemption is disabled */
+static inline void post_schedule(struct rq *rq)
+{
+}
+
+#else
+
+static inline void pre_schedule(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void post_schedule(struct rq *rq)
+{
+}
+
+#endif
+
 /**
  * kernel/sched/core.c:1905
  * schedule_tail - first thing a freshly forked thread must call.
@@ -689,8 +846,10 @@ asmlinkage void schedule_tail(struct task_struct *prev)
         * FIXME: do we need to worry about rq being invalidated by the
         * task_switch?
         */
-//     post_schedule(rq);
+       // TODO: SMP
+       post_schedule(rq);
 
+       // TODO: replace this irq enable, maybe inside post_schedule
        arch_local_irq_enable();
 
 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
@@ -773,7 +932,7 @@ void update_rq_clock(struct rq *rq)
        if (rq->skip_clock_update > 0)
                return;
 
-       delta = sched_clock_cpu(0) - rq->clock;
+       delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
        rq->clock += delta;
        update_rq_clock_task(rq, delta);
 }
@@ -789,6 +948,7 @@ void scheduler_tick(void)
        int cpu = smp_processor_id();
        struct rq *rq = cpu_rq(cpu);
        struct task_struct *curr = rq->curr;
+
        u64 now = rq->clock_task;
        unsigned long delta_exec;
 
@@ -817,6 +977,8 @@ void scheduler_tick(void)
        }
 
        raw_spin_unlock(&rq->lock);
+
+       // TODO: SMP
 }
 
 /*
@@ -827,6 +989,8 @@ unsigned long long task_delta_exec(struct task_struct *p)
 {
        //printk("\ntask_delta_exec");
 
+       // TODO: SMP
+
        return 0;
 }
 
@@ -867,6 +1031,8 @@ unsigned long this_cpu_load(void)
 {
        //printk("\nthis_cpu_load");
 
+       // TODO: SMP, needed in case of load balancing per CPU
+
        return 0;
 }
 
@@ -895,6 +1061,9 @@ void wake_up_new_task(struct task_struct *tsk)
        struct rq *rq = cpu_rq(0);
 
        raw_spin_lock_irqsave(&tsk->pi_lock, flags);
+
+       // TODO: SMP
+
        raw_spin_lock(&rq->lock);
 
 //     if (list_empty(&rq->rq_list)) {
@@ -906,10 +1075,134 @@ void wake_up_new_task(struct task_struct *tsk)
        if (rq->curr == rq->idle)
                resched_task(rq->curr);
 
+       // TODO: SMP
+
        raw_spin_unlock(&rq->lock);
        raw_spin_unlock_irqrestore(&tsk->pi_lock, flags);
 }
 
+
+/**
+ * kernel/sched/core.c:1439
+ * try_to_wake_up - wake up a thread
+ * @p: the thread to be awakened
+ * @state: the mask of task states that can be woken
+ * @wake_flags: wake modifier flags (WF_*)
+ *
+ * Put it on the run-queue if it's not already there. The "current"
+ * thread is always on the run-queue (except when the actual
+ * re-schedule is in progress), and as such you're allowed to do
+ * the simpler "current->state = TASK_RUNNING" to mark yourself
+ * runnable without the overhead of this.
+ *
+ * Returns %true if @p was woken up, %false if it was already running
+ * or @state didn't match @p's state.
+ */
+static int
+try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+{
+       unsigned long flags;
+       int cpu, success = 0;
+       int ret = 0;
+
+       struct rq *rq = cpu_rq(0);
+
+       smp_wmb();
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
+       if (!(p->state & state))
+               goto out;
+
+       success = 1; /* we're going to change ->state */
+       cpu = task_cpu(p);
+
+       raw_spin_lock(&rq->lock);
+       assert_raw_spin_locked(&rq->lock);
+
+       if (p->on_rq) {
+               if (rq->curr == rq->idle)
+                       resched_task(rq->curr);
+               ret = 1;
+       }
+
+       if (ret)
+               goto stat;
+
+       // TODO: SMP
+
+       p->on_rq = 1;
+       p->state = TASK_RUNNING;
+
+//     if (list_empty(&rq->rq_list)) {
+//             printk("Liste leer (try_to_wake_up) -> Task: %p\n", (void*)p);
+//     }
+       list_add(&p->rq_tasks, &rq->rq_list);
+
+       if (rq->curr == rq->idle)
+               resched_task(rq->curr);
+
+       /* if a worker is waking up, notify workqueue */
+       if (p->flags & PF_WQ_WORKER)
+               wq_worker_waking_up(p, 0);
+stat:
+       raw_spin_unlock(&rq->lock);
+out:
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+       return success;
+}
+
+/**
+ * kernel/sched/core.c:1497
+ * try_to_wake_up_local - try to wake up a local task with rq lock held
+ * @p: the thread to be awakened
+ *
+ * Put @p on the run-queue if it's not already there. The caller must
+ * ensure that this_rq() is locked, @p is bound to this_rq() and not
+ * the current task.
+ */
+static void try_to_wake_up_local(struct task_struct *p)
+{
+       struct rq *rq = task_rq(p);
+
+       if (WARN_ON_ONCE(rq != this_rq()) ||
+           WARN_ON_ONCE(p == current))
+               return;
+
+       lockdep_assert_held(&rq->lock);
+
+       assert_raw_spin_locked(&rq->lock);
+
+       if (!raw_spin_trylock(&p->pi_lock)) {
+               raw_spin_unlock(&rq->lock);
+               raw_spin_lock(&p->pi_lock);
+               raw_spin_lock(&rq->lock);
+       }
+
+       if (!(p->state & TASK_NORMAL))
+               goto out;
+
+       p->on_rq = 1;
+       p->state = TASK_RUNNING;
+
+       if (!&rq->lock) {
+               printk("Lock not set!!!!!\n");
+       }
+
+       list_add(&p->rq_tasks, &rq->rq_list);
+
+       /* if a worker is waking up, notify workqueue */
+       if (p->flags & PF_WQ_WORKER)
+               wq_worker_waking_up(p, cpu_of(rq));
+
+       if (rq->curr == rq->idle)
+               resched_task(rq->curr);
+
+       // TODO: SMP, used in original but just for statistics
+
+out:
+       raw_spin_unlock(&p->pi_lock);
+}
+
 /*
  * kernel/sched/core.c:1931
  * context_switch - switch to the new MM and the new
@@ -1028,6 +1321,7 @@ need_resched:
 
        raw_spin_lock_irq(&rq->lock);
 
+//     printk("Schedule on CPU %i\n", cpu);
 
 //     switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
@@ -1054,7 +1348,7 @@ need_resched:
 //             switch_count = &prev->nvcsw;
        }
 
-//     pre_schedule(rq, prev);
+       pre_schedule(rq, prev);
 
 //     if (unlikely(!rq->nr_running))
 //             idle_balance(cpu, rq);
@@ -1093,6 +1387,8 @@ need_resched:
 //             ++*switch_count;
 
                context_switch(rq, prev, next); /* unlocks the rq */
+
+               // TODO: remove irq enable
                arch_local_irq_enable();
 
                /*
@@ -1107,7 +1403,7 @@ need_resched:
        else
                raw_spin_unlock_irq(&rq->lock);
 
-//     post_schedule(rq);
+       post_schedule(rq);
 
        sched_preempt_enable_no_resched();
        if (need_resched())
@@ -1119,128 +1415,15 @@ need_resched:
  */
 asmlinkage void __sched schedule(void)
 {
+//     struct task_struct *tsk = current;
+//
+//     if (!tsk->state || tsk_is_pi_blocked(tsk))
+//             return;
+
        __schedule();
 }
 EXPORT_SYMBOL(schedule);
 
-/**
- * kernel/sched/core.c:1439
- * try_to_wake_up - wake up a thread
- * @p: the thread to be awakened
- * @state: the mask of task states that can be woken
- * @wake_flags: wake modifier flags (WF_*)
- *
- * Put it on the run-queue if it's not already there. The "current"
- * thread is always on the run-queue (except when the actual
- * re-schedule is in progress), and as such you're allowed to do
- * the simpler "current->state = TASK_RUNNING" to mark yourself
- * runnable without the overhead of this.
- *
- * Returns %true if @p was woken up, %false if it was already running
- * or @state didn't match @p's state.
- */
-static int
-try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
-{
-       unsigned long flags;
-       int cpu, success = 0;
-       int ret = 0;
-
-       struct rq *rq = cpu_rq(0);
-
-       smp_wmb();
-       raw_spin_lock_irqsave(&p->pi_lock, flags);
-       if (!(p->state & state))
-               goto out;
-
-       raw_spin_lock(&rq->lock);
-       assert_raw_spin_locked(&rq->lock);
-
-       if (p->on_rq) {
-               if (rq->curr == rq->idle)
-                       resched_task(rq->curr);
-               ret = 1;
-       }
-
-       success = 1; /* we're going to change ->state */
-       cpu = task_cpu(p);
-
-       if (ret)
-               goto stat;
-
-       p->on_rq = 1;
-       p->state = TASK_RUNNING;
-
-//     if (list_empty(&rq->rq_list)) {
-//             printk("Liste leer (try_to_wake_up) -> Task: %p\n", (void*)p);
-//     }
-       list_add(&p->rq_tasks, &rq->rq_list);
-
-       if (rq->curr == rq->idle)
-               resched_task(rq->curr);
-
-       /* if a worker is waking up, notify workqueue */
-       if (p->flags & PF_WQ_WORKER)
-               wq_worker_waking_up(p, 0);
-stat:
-       raw_spin_unlock(&rq->lock);
-out:
-       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-
-       return success;
-}
-
-
-/**
- * kernel/sched/core.c:1497
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- *
- * Put @p on the run-queue if it's not already there. The caller must
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
-static void try_to_wake_up_local(struct task_struct *p)
-{
-       struct rq *rq = task_rq(p);
-
-       if (WARN_ON_ONCE(rq != this_rq()) ||
-           WARN_ON_ONCE(p == current))
-               return;
-
-       lockdep_assert_held(&rq->lock);
-
-       assert_raw_spin_locked(&rq->lock);
-
-       if (!raw_spin_trylock(&p->pi_lock)) {
-               raw_spin_unlock(&rq->lock);
-               raw_spin_lock(&p->pi_lock);
-               raw_spin_lock(&rq->lock);
-       }
-
-       if (!(p->state & TASK_NORMAL))
-               goto out;
-
-       p->on_rq = 1;
-       p->state = TASK_RUNNING;
-
-       if (!&rq->lock) {
-               printk("Lock not set!!!!!\n");
-       }
-
-       list_add(&p->rq_tasks, &rq->rq_list);
-
-       /* if a worker is waking up, notify workqueue */
-       if (p->flags & PF_WQ_WORKER)
-               wq_worker_waking_up(p, cpu_of(rq));
-
-       if (rq->curr == rq->idle)
-               resched_task(rq->curr);
-
-out:
-       raw_spin_unlock(&p->pi_lock);
-}
-
 /*
  * kernel/sched/core.c:3125
  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
@@ -1556,6 +1739,8 @@ void __sched yield(void)
 {
        printk("\nyield");
 
+       // TODO: SMP
+
        return;
 }
 
@@ -1568,6 +1753,8 @@ inline int task_curr(const struct task_struct *p)
 {
        printk("\ntask_curr");
 
+       // TODO: SMP
+
        return 0;
 }
 
@@ -1777,6 +1964,309 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
 
 
 
+/*
+ *
+ * SMP
+ *
+ */
+#ifdef CONFIG_SMP
+
+///*
+// * kernel/sched/core.c:4822
+// * Move (not current) task off this cpu, onto dest cpu. We're doing
+// * this because either it can't run here any more (set_cpus_allowed()
+// * away from this CPU, or CPU going down), or because we're
+// * attempting to rebalance this task on exec (sched_exec).
+// *
+// * So we race with normal scheduler movements, but that's OK, as long
+// * as the task is no longer on this CPU.
+// *
+// * Returns non-zero if task was successfully migrated.
+// */
+//static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+//{
+//     struct rq *rq_dest, *rq_src;
+//     int ret = 0;
+//
+//     if (unlikely(!cpu_active(dest_cpu)))
+//             return ret;
+//
+//     rq_src = cpu_rq(src_cpu);
+//     rq_dest = cpu_rq(dest_cpu);
+//
+//     raw_spin_lock(&p->pi_lock);
+//     double_rq_lock(rq_src, rq_dest);
+//     /* Already moved. */
+//     if (task_cpu(p) != src_cpu)
+//             goto done;
+//     /* Affinity changed (again). */
+//     if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+//             goto fail;
+//
+//     /*
+//      * If we're not on a rq, the next wake-up will ensure we're
+//      * placed properly.
+//      */
+//     if (p->on_rq) {
+////           dequeue_task(rq_src, p, 0);
+////           list_del(&next->rq_tasks);
+////           set_task_cpu(p, dest_cpu);
+////           enqueue_task(rq_dest, p, 0);
+////           check_preempt_curr(rq_dest, p, 0);
+//     }
+//done:
+//     ret = 1;
+//fail:
+//     double_rq_unlock(rq_src, rq_dest);
+//     raw_spin_unlock(&p->pi_lock);
+//     return ret;
+//}
+
+void do_set_cpus_allowed(struct task_struct *p,
+                              const struct cpumask *new_mask)
+{
+//     if (p->sched_class && p->sched_class->set_cpus_allowed)
+//             p->sched_class->set_cpus_allowed(p, new_mask);
+//
+//     cpumask_copy(&p->cpus_allowed, new_mask);
+//     p->nr_cpus_allowed = cpumask_weight(new_mask);
+}
+
+int set_cpus_allowed_ptr(struct task_struct *p,
+                               const struct cpumask *new_mask)
+{
+//     unsigned long flags;
+//     struct rq *rq;
+//     unsigned int dest_cpu;
+//     int ret = 0;
+//
+//     rq = task_rq_lock(p, &flags);
+//
+//     if (cpumask_equal(&p->cpus_allowed, new_mask))
+//             goto out;
+//
+//     if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+//             ret = -EINVAL;
+//             goto out;
+//     }
+//
+//     if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
+//             ret = -EINVAL;
+//             goto out;
+//     }
+//
+//     do_set_cpus_allowed(p, new_mask);
+//
+//     /* Can the task run on the task's current CPU? If so, we're done */
+//     if (cpumask_test_cpu(task_cpu(p), new_mask))
+//             goto out;
+//
+//     dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+//     if (p->on_rq) {
+//             /* Need help from migration thread: drop lock and wait. */
+//             task_rq_unlock(rq, p, &flags);
+//             stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+//
+//             local_irq_disable();
+//             __migrate_task(p, raw_smp_processor_id(), dest_cpu);
+//             local_irq_enable();
+//
+//             tlb_migrate_finish(p->mm);
+//             return 0;
+//     }
+//out:
+//     task_rq_unlock(rq, p, &flags);
+//
+//     return ret;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
+void scheduler_ipi(void)
+{
+
+}
+
+/*
+ * kernel/sched/core.c:1011
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+ * If @match_state is nonzero, it's the @p->state value just checked and
+ * not expected to change.  If it changes, i.e. @p might have woken up,
+ * then return zero.  When we succeed in waiting for @p to be off its CPU,
+ * we return a positive number (its total switch count).  If a second call
+ * a short while later returns the same number, the caller can be sure that
+ * @p has remained unscheduled the whole time.
+ *
+ * The caller must ensure that the task *will* unschedule sometime soon,
+ * else this function might spin for a *long* time. This function can't
+ * be called with interrupts off, or it may introduce deadlock with
+ * smp_call_function() if an IPI is sent by the same process we are
+ * waiting to become inactive.
+ */
+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+{
+       unsigned long flags;
+       int running, on_rq;
+       unsigned long ncsw;
+       struct rq *rq;
+
+       for (;;) {
+               /*
+                * We do the initial early heuristics without holding
+                * any task-queue locks at all. We'll only try to get
+                * the runqueue lock when things look like they will
+                * work out!
+                */
+               rq = task_rq(p);
+
+               /*
+                * If the task is actively running on another CPU
+                * still, just relax and busy-wait without holding
+                * any locks.
+                *
+                * NOTE! Since we don't hold any locks, it's not
+                * even sure that "rq" stays as the right runqueue!
+                * But we don't care, since "task_running()" will
+                * return false if the runqueue has changed and p
+                * is actually now running somewhere else!
+                */
+               while (task_running(rq, p)) {
+                       if (match_state && unlikely(p->state != match_state))
+                               return 0;
+                       cpu_relax();
+               }
+
+               /*
+                * Ok, time to look more closely! We need the rq
+                * lock now, to be *sure*. If we're wrong, we'll
+                * just go back and repeat.
+                */
+               rq = task_rq_lock(p, &flags);
+//             trace_sched_wait_task(p);
+               running = task_running(rq, p);
+               on_rq = p->on_rq;
+               ncsw = 0;
+               if (!match_state || p->state == match_state)
+                       ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+               task_rq_unlock(rq, p, &flags);
+
+               /*
+                * If it changed from the expected state, bail out now.
+                */
+               if (unlikely(!ncsw))
+                       break;
+
+               /*
+                * Was it really running after all now that we
+                * checked with the proper locks actually held?
+                *
+                * Oops. Go back and try again..
+                */
+               if (unlikely(running)) {
+                       cpu_relax();
+                       continue;
+               }
+
+               /*
+                * It's not enough that it's not actively running,
+                * it must be off the runqueue _entirely_, and not
+                * preempted!
+                *
+                * So if it was still runnable (but just not actively
+                * running right now), it's preempted, and we should
+                * yield - it could be a while.
+                */
+               if (unlikely(on_rq)) {
+                       ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
+
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       schedule_hrtimeout(&to, HRTIMER_MODE_REL);
+                       continue;
+               }
+
+               /*
+                * Ahh, all good. It wasn't running, and it wasn't
+                * runnable, which means that it will never become
+                * running in the future either. We're all done!
+                */
+               break;
+       }
+
+       return ncsw;
+}
+
+void kick_process(struct task_struct *tsk)
+{
+
+}
+
+void sched_set_stop_task(int cpu, struct task_struct *stop)
+{
+
+}
+
+bool completion_done(struct completion *x)
+{
+       return 0;
+}
+
+void sched_exec(void)
+{
+
+}
+
+bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+       return 0;
+}
+
+///*
+// * kernel/sched/core.c:5512
+// */
+//static int init_rootdomain(struct root_domain *rd)
+//{
+//     memset(rd, 0, sizeof(*rd));
+//
+//     if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
+//             goto out;
+//     if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
+//             goto free_span;
+//     if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+//             goto free_online;
+//
+//     if (cpupri_init(&rd->cpupri) != 0)
+//             goto free_rto_mask;
+//     return 0;
+//
+//free_rto_mask:
+//     free_cpumask_var(rd->rto_mask);
+//free_online:
+//     free_cpumask_var(rd->online);
+//free_span:
+//     free_cpumask_var(rd->span);
+//out:
+//     return -ENOMEM;
+//}
+//
+///*
+// * By default the system creates a single root-domain with all cpus as
+// * members (mimicking the global state we have today).
+// */
+//struct root_domain def_root_domain;
+//
+///*
+// * kernel/sched/core.c:5543
+// */
+//static void init_defrootdomain(void)
+//{
+//     init_rootdomain(&def_root_domain);
+//
+//     atomic_set(&def_root_domain.refcount, 1);
+//}
+#endif
+
+
 
 /*
  * Syscalls
@@ -1826,6 +2316,9 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
 {
        printk("SYSCALL sched_setaffinity\n");
+
+       // TODO: SMP
+
        return 0;
 }
 
index d8222de..07dd7d6 100644 (file)
@@ -29,6 +29,14 @@ struct rq {
 
        int skip_clock_update;
 
+#ifdef CONFIG_SMP
+
+       /* cpu of this runqueue: */
+       int cpu;
+
+
+#endif
+
 };
 
 static inline int cpu_of(struct rq *rq)
@@ -172,3 +180,16 @@ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive);
 
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
 
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+       return rq->curr == p;
+}
+
+static inline int task_running(struct rq *rq, struct task_struct *p)
+{
+#ifdef CONFIG_SMP
+       return p->on_cpu;
+#else
+       return task_current(rq, p);
+#endif
+}
index 0c8253d..1184dbc 100644 (file)
@@ -137,23 +137,6 @@ const_debug unsigned int sysctl_sched_features =
 #include "features.h"
        0;
 
-
-
-
-
-
-
-volatile int point_of_return = 0;
-volatile struct task_struct *test_tsk = 0;
-volatile int test_tsk_state = 0;
-volatile int test_tsk_pid = 203;
-
-
-
-
-
-
-
 #undef SCHED_FEAT
 
 #ifdef CONFIG_SCHED_DEBUG
@@ -2916,16 +2899,6 @@ need_resched:
 
        raw_spin_lock_irq(&rq->lock);
 
-       if (point_of_return) {
-               point_of_return = 0;
-               test_tsk = prev;
-               test_tsk_state = prev->state;
-               if (test_tsk->state != test_tsk_state) {
-                       printk("state: %i on_rq: %i\n", (int)prev->state, (int)prev->on_rq);
-                       printk("state changed from %i to %i\n", (int)test_tsk_state, (int)test_tsk->state);
-               }
-       }
-
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2960,12 +2933,6 @@ need_resched:
        clear_tsk_need_resched(prev);
        rq->skip_clock_update = 0;
 
-       if ((int)next->pid == test_tsk_pid) {
-               printk("task(%i), address: %p\n", test_tsk_pid, (void*)prev);
-               printk("state: %i on_rq: %i\n", (int)prev->state, (int)prev->on_rq);
-               point_of_return = 1;
-       }
-
        if (likely(prev != next)) {
                rq->nr_switches++;
                rq->curr = next;
@@ -3312,9 +3279,6 @@ do_wait_for_common(struct completion *x,
                                break;
                        }
                        __set_current_state(state);
-                       if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
-                               printk("task(%i): do_wait_for_common -> %i (%ld)\n", test_tsk_pid, state, (long)timeout);
-                       }
                        spin_unlock_irq(&x->wait.lock);
                        timeout = action(timeout);
                        spin_lock_irq(&x->wait.lock);