Fixed scheduler_tick. Successful bootup, but just once
authorJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 23 Jul 2013 12:03:49 +0000 (14:03 +0200)
committerJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 23 Jul 2013 12:03:49 +0000 (14:03 +0200)
kernel/sched.new/core.c
kernel/sched/core.c
kernel/workqueue.c

index a33acb3..93307b5 100644 (file)
 
 
 
+volatile int point_of_return = 0;
+volatile struct task_struct *test_tsk = 0;
+volatile int test_tsk_state = 0;
+volatile int test_tsk_pid = 292;
 
 
 //
@@ -183,6 +187,10 @@ void sched_init(void)
 
 //     init_sched_fair_class();
 
+
+       if (CONFIG_HAVE_UNSTABLE_SCHED_CLOCK)
+               printk("----> CONFIG_HAVE_UNSTABLE_SCHED_CLOCK is set!\n");
+
        scheduler_running = 1;
 }
 
@@ -206,6 +214,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
+//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//             printk("task(%i): init_idle -> %i\n", test_tsk_pid, TASK_RUNNING);
+//     }
        rq->curr = rq->idle = idle;
 
        raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -236,6 +247,9 @@ void sched_fork(struct task_struct *p)
         */
        p->state = TASK_RUNNING;
 
+//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//             printk("task(%i): sched_fork -> %i\n", test_tsk_pid, TASK_RUNNING);
+//     }
        /*
         * Make sure we do not leak PI boosting priority to the child.
         */
@@ -664,8 +678,70 @@ asmlinkage void schedule_tail(struct task_struct *prev)
 }
 
 
-//static void update_rq_clock_task(struct rq *rq, s64 delta);
+/**
+ * kernel/sched/core.c:769
+ */
+static void update_rq_clock_task(struct rq *rq, s64 delta)
+{
+/*
+ * In theory, the compile should just see 0 here, and optimize out the call
+ * to sched_rt_avg_update. But I don't trust it...
+ */
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+       s64 steal = 0, irq_delta = 0;
+#endif
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+       irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
+
+       /*
+        * Since irq_time is only updated on {soft,}irq_exit, we might run into
+        * this case when a previous update_rq_clock() happened inside a
+        * {soft,}irq region.
+        *
+        * When this happens, we stop ->clock_task and only update the
+        * prev_irq_time stamp to account for the part that fit, so that a next
+        * update will consume the rest. This ensures ->clock_task is
+        * monotonic.
+        *
+        * It does however cause some slight miss-attribution of {soft,}irq
+        * time, a more accurate solution would be to update the irq_time using
+        * the current rq->clock timestamp, except that would require using
+        * atomic ops.
+        */
+       if (irq_delta > delta)
+               irq_delta = delta;
+
+       rq->prev_irq_time += irq_delta;
+       delta -= irq_delta;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+       if (static_key_false((&paravirt_steal_rq_enabled))) {
+               u64 st;
+
+               steal = paravirt_steal_clock(cpu_of(rq));
+               steal -= rq->prev_steal_time_rq;
+
+               if (unlikely(steal > delta))
+                       steal = delta;
+
+               st = steal_ticks(steal);
+               steal = st * TICK_NSEC;
+
+               rq->prev_steal_time_rq += steal;
+
+               delta -= steal;
+       }
+#endif
+
+       rq->clock_task += delta;
+
+#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+       if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
+               sched_rt_avg_update(rq, irq_delta + steal);
+#endif
+}
 
+//static void update_rq_clock_task(struct rq *rq, s64 delta);
 void update_rq_clock(struct rq *rq)
 {
        s64 delta;
@@ -675,7 +751,7 @@ void update_rq_clock(struct rq *rq)
 
        delta = sched_clock_cpu(0) - rq->clock;
        rq->clock += delta;
-//     update_rq_clock_task(rq, delta);
+       update_rq_clock_task(rq, delta);
 }
 
 
@@ -686,26 +762,37 @@ void update_rq_clock(struct rq *rq)
  */
 void scheduler_tick(void)
 {
-//     printk("\nscheduler_tick");
-//     int cpu = smp_processor_id();
-//     struct rq *rq = cpu_rq(cpu);
-//     struct task_struct *curr = rq->curr;
-//
-//     sched_clock_tick();
-//
-//     raw_spin_lock(&rq->lock);
-////   update_rq_clock(rq);
-////   update_cpu_load_active(rq);
-//     curr->sched_class->task_tick(rq, curr, 0);
-//     raw_spin_unlock(&rq->lock);
-//
-//     perf_event_task_tick();
-//
-//#ifdef CONFIG_SMP
-//     rq->idle_balance = idle_cpu(cpu);
-//     trigger_load_balance(rq, cpu);
-//#endif
-//
+       int cpu = smp_processor_id();
+       struct rq *rq = cpu_rq(cpu);
+       struct task_struct *curr = rq->curr;
+       u64 now = rq->clock_task;
+       unsigned long delta_exec;
+
+       sched_clock_tick();
+
+       raw_spin_lock(&rq->lock);
+       update_rq_clock(rq);
+
+       /*
+        * Update run-time statistics of the 'current'.
+        */
+       if (unlikely(!curr)) {
+               raw_spin_unlock(&rq->lock);
+               return;
+       }
+
+       /*
+        * Get the amount of time the current task was running
+        * since the last time we changed load (this cannot
+        * overflow on 32 bits):
+        */
+       delta_exec = (unsigned long)(now - curr->se.exec_start);
+
+       if (delta_exec > RR_TIMESLICE) {
+               resched_task(curr);
+       }
+
+       raw_spin_unlock(&rq->lock);
 }
 
 /*
@@ -924,10 +1011,23 @@ need_resched:
 
        raw_spin_lock_irq(&rq->lock);
 
+       if (point_of_return) {
+               point_of_return = 0;
+               test_tsk = prev;
+               test_tsk_state = prev->state;
+//             if (test_tsk->state != test_tsk_state) {
+//                     printk("state: %i on_rq: %i\n", (int)prev->state, (int)prev->on_rq);
+//                     printk("state changed from %i to %i\n", (int)test_tsk_state, (int)test_tsk->state);
+//             }
+       }
+
 //     switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
                        prev->state = TASK_RUNNING;
+//                     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//                             printk("task(%i): __schedule -> %i\n", test_tsk_pid, TASK_RUNNING);
+//                     }
                } else {
 //                     deactivate_task(rq, prev, DEQUEUE_SLEEP);
                        list_del_init(&prev->rq_tasks);
@@ -964,12 +1064,16 @@ need_resched:
                next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
                list_del(&next->rq_tasks);
                list_add_tail(&next->rq_tasks, &rq->rq_list);
+               next->se.exec_start = rq->clock_task;
        }
        else
                next = rq->idle;
 
-       if ((int)next->pid == 292)
-               printk("\nKernel Thread 292");
+       if ((int)next->pid == test_tsk_pid) {
+               printk("task(%i), address: %p\n", test_tsk_pid, (void*)prev);
+               printk("state: %i on_rq: %i\n", (int)prev->state, (int)prev->on_rq);
+               point_of_return = 1;
+       }
 
        clear_tsk_need_resched(prev);
 //     rq->skip_clock_update = 0;
@@ -1044,6 +1148,9 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 
        p->on_rq = 1;
        p->state = TASK_RUNNING;
+//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//             printk("task(%i): try_to_wake_up -> %i\n", test_tsk_pid, TASK_RUNNING);
+//     }
        list_add(&p->rq_tasks, &rq->rq_list);
 
        if (rq->curr == rq->idle)
@@ -1090,6 +1197,9 @@ static void try_to_wake_up_local(struct task_struct *p)
 
        p->on_rq = 1;
        p->state = TASK_RUNNING;
+//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//             printk("task(%i): try_to_wake_up_local -> %i\n", test_tsk_pid, TASK_RUNNING);
+//     }
        list_add(&p->rq_tasks, &rq->rq_list);
 
        /* if a worker is waking up, notify workqueue */
@@ -1182,6 +1292,9 @@ do_wait_for_common(struct completion *x,
                                break;
                        }
                        __set_current_state(state);
+//                     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//                             printk("task(%i): do_wait_for_common -> %i (%ld)\n", test_tsk_pid, state, (long)timeout);
+//                     }
                        spin_unlock_irq(&x->wait.lock);
                        timeout = action(timeout);
                        spin_lock_irq(&x->wait.lock);
index 933fcde..0c8253d 100644 (file)
@@ -137,6 +137,23 @@ const_debug unsigned int sysctl_sched_features =
 #include "features.h"
        0;
 
+
+
+
+
+
+
+volatile int point_of_return = 0;
+volatile struct task_struct *test_tsk = 0;
+volatile int test_tsk_state = 0;
+volatile int test_tsk_pid = 203;
+
+
+
+
+
+
+
 #undef SCHED_FEAT
 
 #ifdef CONFIG_SCHED_DEBUG
@@ -1483,6 +1500,10 @@ stat:
 out:
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
+//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//             printk("task(%i): try_to_wake_up -> %i\n", test_tsk_pid, TASK_RUNNING);
+//     }
+
        return success;
 }
 
@@ -2895,6 +2916,16 @@ need_resched:
 
        raw_spin_lock_irq(&rq->lock);
 
+       if (point_of_return) {
+               point_of_return = 0;
+               test_tsk = prev;
+               test_tsk_state = prev->state;
+               if (test_tsk->state != test_tsk_state) {
+                       printk("state: %i on_rq: %i\n", (int)prev->state, (int)prev->on_rq);
+                       printk("state changed from %i to %i\n", (int)test_tsk_state, (int)test_tsk->state);
+               }
+       }
+
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2929,6 +2960,12 @@ need_resched:
        clear_tsk_need_resched(prev);
        rq->skip_clock_update = 0;
 
+       if ((int)next->pid == test_tsk_pid) {
+               printk("task(%i), address: %p\n", test_tsk_pid, (void*)prev);
+               printk("state: %i on_rq: %i\n", (int)prev->state, (int)prev->on_rq);
+               point_of_return = 1;
+       }
+
        if (likely(prev != next)) {
                rq->nr_switches++;
                rq->curr = next;
@@ -3275,6 +3312,9 @@ do_wait_for_common(struct completion *x,
                                break;
                        }
                        __set_current_state(state);
+                       if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+                               printk("task(%i): do_wait_for_common -> %i (%ld)\n", test_tsk_pid, state, (long)timeout);
+                       }
                        spin_unlock_irq(&x->wait.lock);
                        timeout = action(timeout);
                        spin_lock_irq(&x->wait.lock);
index b48cd59..929ffcd 100644 (file)
@@ -44,7 +44,8 @@
 #include <linux/hashtable.h>
 
 #include "workqueue_internal.h"
-
+extern volatile struct task_struct *test_tsk;
+extern volatile int test_tsk_pid;
 enum {
        /*
         * worker_pool flags
@@ -1933,6 +1934,9 @@ restart:
                        break;
 
                __set_current_state(TASK_INTERRUPTIBLE);
+//             if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//                     printk("task(%i): maybe_create_worker -> %i\n", test_tsk_pid, TASK_INTERRUPTIBLE);
+//             }
                schedule_timeout(CREATE_COOLDOWN);
 
                if (!need_to_create_worker(pool))
@@ -2297,6 +2301,9 @@ sleep:
         */
        worker_enter_idle(worker);
        __set_current_state(TASK_INTERRUPTIBLE);
+//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//             printk("task(%i): worker_thread -> %i\n", test_tsk_pid, TASK_INTERRUPTIBLE);
+//     }
        spin_unlock_irq(&pool->lock);
        schedule();
        goto woke_up;
@@ -2338,9 +2345,15 @@ static int rescuer_thread(void *__rescuer)
        rescuer->task->flags |= PF_WQ_WORKER;
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
+//     if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//             printk("task(%i): rescuer_thread -> %i\n", test_tsk_pid, TASK_INTERRUPTIBLE);
+//     }
 
        if (kthread_should_stop()) {
                __set_current_state(TASK_RUNNING);
+//             if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//                     printk("task(%i): rescuer_thread -> %i\n", test_tsk_pid, TASK_RUNNING);
+//             }
                rescuer->task->flags &= ~PF_WQ_WORKER;
                return 0;
        }
@@ -2356,6 +2369,9 @@ repeat:
                struct work_struct *work, *n;
 
                __set_current_state(TASK_RUNNING);
+//             if (test_tsk && ((int)test_tsk->pid == test_tsk_pid)) {
+//                     printk("task(%i): rescuer_thread -> %i\n", test_tsk_pid, TASK_RUNNING);
+//             }
                mayday_clear_cpu(cpu, wq->mayday_mask);
 
                /* migrate to the target cpu if possible */