Simple RR is running perfectly :)
authorJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 30 Jul 2013 13:30:44 +0000 (15:30 +0200)
committerJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 30 Jul 2013 13:30:44 +0000 (15:30 +0200)
arch/x86/kernel/process.c
kernel/sched.new/core.c

index cee1e7c..cca29e0 100644 (file)
@@ -321,6 +321,7 @@ void cpu_idle(void)
 
        while (1) {
                tick_nohz_idle_enter();
+               //printk("Idle process running\n");
                while (!need_resched()) {
                        rmb();
 
index aabccae..5afa6c7 100644 (file)
@@ -183,6 +183,42 @@ void sched_init(void)
        scheduler_running = 1;
 }
 
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+static inline int preempt_count_equals(int preempt_offset)
+{
+       int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
+
+       return (nested == preempt_offset);
+}
+
+void __might_sleep(const char *file, int line, int preempt_offset)
+{
+       static unsigned long prev_jiffy;        /* ratelimiting */
+
+       rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
+       if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
+           system_state != SYSTEM_RUNNING || oops_in_progress)
+               return;
+       if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+               return;
+       prev_jiffy = jiffies;
+
+       printk(KERN_ERR
+               "BUG: sleeping function called from invalid context at %s:%d\n",
+                       file, line);
+       printk(KERN_ERR
+               "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+                       in_atomic(), irqs_disabled(),
+                       current->pid, current->comm);
+
+       debug_show_held_locks(current);
+       if (irqs_disabled())
+               print_irqtrace_events(current);
+       dump_stack();
+}
+EXPORT_SYMBOL(__might_sleep);
+#endif
+
 /**
  * /kernel/sched/core.c:4674
  * init_idle - set up an idle thread for a given CPU
@@ -217,6 +253,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
  */
 void sched_fork(struct task_struct *p)
 {
+       get_cpu();
 
        p->on_rq                        = 0;
 
@@ -860,6 +897,9 @@ void wake_up_new_task(struct task_struct *tsk)
        raw_spin_lock_irqsave(&tsk->pi_lock, flags);
        raw_spin_lock(&rq->lock);
 
+//     if (list_empty(&rq->rq_list)) {
+//             printk("Liste leer (wake_up_new_task) -> Task: %p\n", (void*)tsk);
+//     }
        list_add(&tsk->rq_tasks, &rq->rq_list);
        tsk->on_rq = 1;
 
@@ -870,21 +910,6 @@ void wake_up_new_task(struct task_struct *tsk)
        raw_spin_unlock_irqrestore(&tsk->pi_lock, flags);
 }
 
-/*
- * kernel/sched/core.c:4315
- */
-int __sched _cond_resched(void)
-{
-       if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE)) {
-               add_preempt_count(PREEMPT_ACTIVE);
-               schedule();
-               sub_preempt_count(PREEMPT_ACTIVE);
-               return 1;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(_cond_resched);
-
 /*
  * kernel/sched/core.c:1931
  * context_switch - switch to the new MM and the new
@@ -1048,16 +1073,19 @@ need_resched:
                next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
                list_del(&next->rq_tasks);
 //             list_add_tail(&next->rq_tasks, &rq->rq_list);
-               next->se.exec_start = rq->clock_task;
        }
-       else
+       else {
                next = rq->idle;
+//             printk("Liste leer (schedule)\n");
+       }
+       next->se.exec_start = rq->clock_task;
+
 
        clear_tsk_need_resched(prev);
 //     rq->skip_clock_update = 0;
 
-//     if ((long)next == 0xffff880007059e40)
-//             printk("task(%i) scheduled\n", (int)next->pid);
+//     if (next->pid == 3)
+//             printk("task(3) scheduled\n");
 
        if (likely(prev != next)) {
                rq->nr_switches++;
@@ -1143,6 +1171,9 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
        p->on_rq = 1;
        p->state = TASK_RUNNING;
 
+//     if (list_empty(&rq->rq_list)) {
+//             printk("Liste leer (try_to_wake_up) -> Task: %p\n", (void*)p);
+//     }
        list_add(&p->rq_tasks, &rq->rq_list);
 
        if (rq->curr == rq->idle)
@@ -1193,6 +1224,10 @@ static void try_to_wake_up_local(struct task_struct *p)
        p->on_rq = 1;
        p->state = TASK_RUNNING;
 
+       if (!&rq->lock) {
+               printk("Lock not set!!!!!\n");
+       }
+
        list_add(&p->rq_tasks, &rq->rq_list);
 
        /* if a worker is waking up, notify workqueue */
@@ -1395,6 +1430,31 @@ int __cond_resched_lock(spinlock_t *lock)
        return 0;
 }
 
+/*
+ * kernel/sched/core.c:4315
+ */
+static inline int should_resched(void)
+{
+       return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
+}
+
+static void __cond_resched(void)
+{
+       add_preempt_count(PREEMPT_ACTIVE);
+       __schedule();
+       sub_preempt_count(PREEMPT_ACTIVE);
+}
+
+int __sched _cond_resched(void)
+{
+       if (should_resched()) {
+               __cond_resched();
+               return 1;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(_cond_resched);
+
 /*
  * kernel/sched/core.c:4333
  */
@@ -1644,6 +1704,26 @@ void __sched wait_for_completion_io(struct completion *x)
 }
 EXPORT_SYMBOL(wait_for_completion_io);
 
+/**
+ * kernel/sched/core.c:3416
+ * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
+ * @x:  holds the state of this particular completion
+ * @timeout:  timeout value in jiffies
+ *
+ * This waits for either a completion of a specific task to be signaled or for a
+ * specified timeout to expire. The timeout is in jiffies. It is not
+ * interruptible. The caller is accounted as waiting for IO.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
+ */
+unsigned long __sched
+wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
+{
+       return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(wait_for_completion_io_timeout);
+
 /*
  * kernel/sched/core.c:4634
  */
@@ -1717,6 +1797,7 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
  */
 SYSCALL_DEFINE1(nice, int, increment)
 {
+       printk("SYSCALL nice\n");
        return 0;
 }
 
@@ -1730,6 +1811,7 @@ SYSCALL_DEFINE1(nice, int, increment)
 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
 {
+       printk("SYSCALL sched_getaffinity\n");
        return 0;
 }
 
@@ -1743,6 +1825,7 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
 SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
 {
+       printk("SYSCALL sched_setaffinity\n");
        return 0;
 }
 
@@ -1758,6 +1841,7 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
 SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                struct timespec __user *, interval)
 {
+       printk("SYSCALL sched_rr_get_interval\n");
        return 0;
 }
 
@@ -1770,6 +1854,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
  */
 SYSCALL_DEFINE0(sched_yield)
 {
+       printk("SYSCALL sched_yield\n");
        return 0;
 }