Moved core.c and sched.h into framework.
authorJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 15 Apr 2014 14:01:33 +0000 (16:01 +0200)
committerJens Krieg <jkrieg@mailbox.tu-berlin.de>
Tue, 15 Apr 2014 14:01:33 +0000 (16:01 +0200)
* Added linker script to linux linker script. Needs to be changed in
* future!!!
* Modified makefile due to new framework related files.

arch/x86/kernel/vmlinux.lds.S
kernel/sched/modsched/Makefile
kernel/sched/modsched/core.c
kernel/sched/modsched/sched.h

index 22a1530..cb42c79 100644 (file)
@@ -171,6 +171,13 @@ SECTIONS
 
        } :data
 
+       .fw.modules.init . : {
+               __fw_modules_init_start = .;
+               *(.fw.modules.init)
+               __fw_modules_init_end = .;
+               __data_end = .;
+       }
+
        . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
 
 #endif /* CONFIG_X86_64 */
index 8395e77..8b056af 100644 (file)
@@ -13,5 +13,8 @@ endif
 
 obj-y += core.o clock.o cputime.o
 
-obj-y += framework/
+KBUILD_CFLAGS += -DFW_OS_LINUX
+KBUILD_CFLAGS +=       -Ikernel/sched/modsched/framework/include \
+                                       -Ikernel/sched/modsched/framework/include/generated 
 
+obj-y += framework/os/linux/
index c4eb6f3..63679e6 100644 (file)
-/*
- *  kernel/sched/core.c
- *
- *  Kernel scheduler and related syscalls
- *
- *  Copyright (C) 1991-2002  Linus Torvalds
- *
- *
- */
-
-#include <linux/init.h>
-#include <asm/mmu_context.h>
-#include <linux/completion.h>
-#include <linux/kernel_stat.h>
-#include <linux/blkdev.h>
-#include <linux/syscalls.h>
-#include <linux/kprobes.h>
-#include <linux/delayacct.h>
-#include <linux/export.h>
-#include <linux/context_tracking.h>
-#include <linux/kthread.h>
-#include <linux/init_task.h>
-
-#include <asm/switch_to.h>
-#include <asm/tlb.h>
-#include <linux/cgroup.h>
-#include "sched.h"
-#include "../workqueue_internal.h"
-#include "../smpboot.h"
-
-
-//
-// Variables
-//
-
-/*
- * kernel/sched/rt.c:10
- * default timeslice is 100 msecs (used only for SCHED_RR tasks).
- * Timeslices get refilled after they expire. RR_TIMESLICE is defined as
- * (100 * HZ / 1000) and is assigned to sched_rr_timeslice.
- */
-int sched_rr_timeslice = RR_TIMESLICE;
-
-/*
- * kernel/sched/fair.c:80
- * After fork, child runs first. If set to 0 (default) then
- * parent will (try to) run first.
- */
-unsigned int sysctl_sched_child_runs_first = 0;
-
-/*
- * kernel/sched/core.c:289
- * Period over which we measure -rt task cpu usage in us.
- * default: 1s (1000000)
- */
-unsigned int sysctl_sched_rt_period = 1000000;
-
-/*
- * /kernel/sched/core.c:2081
- * Variables and functions for calc_load
- */
-unsigned long avenrun[3];
-
-/*
- * kernel/sched/core.c:297
- * part of the period that we allow rt tasks to run in us.
- * default: 0.95s (950000)
- */
-int sysctl_sched_rt_runtime = 950000;
-
-/*
- * /kernel/sched/core.c:6866
- *
- */
-struct task_group root_task_group;
-
-/*
- * /kernel/sched/core.c:6582
- * Special case: If a kmalloc of a doms_cur partition (array of
- * cpumask) fails, then fallback to a single sched domain,
- * as determined by the single cpumask fallback_doms.
- */
-static cpumask_var_t fallback_doms;
-
-/*
- * /kernel/sched/core.c:5682
- * cpus with isolated domains
- */
-static cpumask_var_t cpu_isolated_map;
-
-/*
- * /kernel/sched/core.c:5323
- */
-DEFINE_PER_CPU(int, sd_llc_id);
-
-/*
- * /kernel/sched/core.c:2623
- * unknown
- */
-DEFINE_PER_CPU(struct kernel_stat, kstat);
-DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
-
-
-/*
- * /kernel/sched/core.c:291
- */
-__read_mostly int scheduler_running;
-
-/*
- * kernel/sched/core.c:113
- */
-DEFINE_MUTEX(sched_domains_mutex);
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-
-
-
-/*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       lockdep_assert_held(&p->pi_lock);
-
-       for (;;) {
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-       }
-}
-
-
-
-/*
- * Lock/unlock task from runqueue
- */
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-       __acquires(p->pi_lock)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       for (;;) {
-               raw_spin_lock_irqsave(&p->pi_lock, *flags);
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-       }
-}
-
-static void __task_rq_unlock(struct rq *rq)
-       __releases(rq->lock)
-{
-       raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-       __releases(rq->lock)
-       __releases(p->pi_lock)
-{
-       raw_spin_unlock(&rq->lock);
-       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
-
-///*
-// * this_rq_lock - lock this runqueue and disable interrupts.
-// */
-//static struct rq *this_rq_lock(void)
+///*
+// *  kernel/sched/core.c
+// *
+// *  Kernel scheduler and related syscalls
+// *
+// *  Copyright (C) 1991-2002  Linus Torvalds
+// *
+// *
+// */
+//
+//#include <linux/init.h>
+//#include <asm/mmu_context.h>
+//#include <linux/completion.h>
+//#include <linux/kernel_stat.h>
+//#include <linux/blkdev.h>
+//#include <linux/syscalls.h>
+//#include <linux/kprobes.h>
+//#include <linux/delayacct.h>
+//#include <linux/export.h>
+//#include <linux/context_tracking.h>
+//#include <linux/kthread.h>
+//#include <linux/init_task.h>
+//
+//#include <asm/switch_to.h>
+//#include <asm/tlb.h>
+//#include <linux/cgroup.h>
+//#include "sched.h"
+//#include "../workqueue_internal.h"
+//#include "../smpboot.h"
+//
+//
+////
+//// Variables
+////
+//
+///*
+// * kernel/sched/rt.c:10
+// * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+// * Timeslices get refilled after they expire. RR_TIMESLICE is defined as
+// * (100 * HZ / 1000) and is assigned to sched_rr_timeslice.
+// */
+//int sched_rr_timeslice = RR_TIMESLICE;
+//
+///*
+// * kernel/sched/fair.c:80
+// * After fork, child runs first. If set to 0 (default) then
+// * parent will (try to) run first.
+// */
+//unsigned int sysctl_sched_child_runs_first = 0;
+//
+///*
+// * kernel/sched/core.c:289
+// * Period over which we measure -rt task cpu usage in us.
+// * default: 1s (1000000)
+// */
+//unsigned int sysctl_sched_rt_period = 1000000;
+//
+///*
+// * /kernel/sched/core.c:2081
+// * Variables and functions for calc_load
+// */
+//unsigned long avenrun[3];
+//
+///*
+// * kernel/sched/core.c:297
+// * part of the period that we allow rt tasks to run in us.
+// * default: 0.95s (950000)
+// */
+//int sysctl_sched_rt_runtime = 950000;
+//
+///*
+// * /kernel/sched/core.c:6866
+// *
+// */
+//struct task_group root_task_group;
+//
+///*
+// * /kernel/sched/core.c:6582
+// * Special case: If a kmalloc of a doms_cur partition (array of
+// * cpumask) fails, then fallback to a single sched domain,
+// * as determined by the single cpumask fallback_doms.
+// */
+//static cpumask_var_t fallback_doms;
+//
+///*
+// * /kernel/sched/core.c:5682
+// * cpus with isolated domains
+// */
+//static cpumask_var_t cpu_isolated_map;
+//
+///*
+// * /kernel/sched/core.c:5323
+// */
+//DEFINE_PER_CPU(int, sd_llc_id);
+//
+///*
+// * /kernel/sched/core.c:2623
+// * unknown
+// */
+//DEFINE_PER_CPU(struct kernel_stat, kstat);
+//DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
+//
+//
+///*
+// * /kernel/sched/core.c:291
+// */
+//__read_mostly int scheduler_running;
+//
+///*
+// * kernel/sched/core.c:113
+// */
+//DEFINE_MUTEX(sched_domains_mutex);
+//DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+//
+//
+//
+///*
+// * __task_rq_lock - lock the rq @p resides on.
+// */
+//static inline struct rq *__task_rq_lock(struct task_struct *p)
 //     __acquires(rq->lock)
 //{
-//     struct rq *rq;
+//     struct rq *rq;
+//
+//     lockdep_assert_held(&p->pi_lock);
+//
+//     for (;;) {
+//             rq = task_rq(p);
+//             raw_spin_lock(&rq->lock);
+//             if (likely(rq == task_rq(p)))
+//                     return rq;
+//             raw_spin_unlock(&rq->lock);
+//     }
+//}
+//
+//
+//
+///*
+// * Lock/unlock task from runqueue
+// */
+//
+///*
+// * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+// */
+//static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+//     __acquires(p->pi_lock)
+//     __acquires(rq->lock)
+//{
+//     struct rq *rq;
+//
+//     for (;;) {
+//             raw_spin_lock_irqsave(&p->pi_lock, *flags);
+//             rq = task_rq(p);
+//             raw_spin_lock(&rq->lock);
+//             if (likely(rq == task_rq(p)))
+//                     return rq;
+//             raw_spin_unlock(&rq->lock);
+//             raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+//     }
+//}
+//
+//static void __task_rq_unlock(struct rq *rq)
+//     __releases(rq->lock)
+//{
+//     raw_spin_unlock(&rq->lock);
+//}
+//
+//static inline void
+//task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+//     __releases(rq->lock)
+//     __releases(p->pi_lock)
+//{
+//     raw_spin_unlock(&rq->lock);
+//     raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+//}
+//
+/////*
+//// * this_rq_lock - lock this runqueue and disable interrupts.
+//// */
+////static struct rq *this_rq_lock(void)
+////   __acquires(rq->lock)
+////{
+////   struct rq *rq;
+////
+////   local_irq_disable();
+////   rq = this_rq();
+////   raw_spin_lock(&rq->lock);
+////
+////   return rq;
+////}
+//
+//
+//
+///*
+// * Functions
+// */
+//
+///**
+// * kernel/sched/core.c:6872
+// * Initialize the scheduler
+// */
+//void sched_init(void)
+//{
+//     int i;
+//     unsigned long alloc_size = 0, ptr;
+//
+//#ifdef CONFIG_CPUMASK_OFFSTACK
+//     alloc_size += num_possible_cpus() * cpumask_size();
+//#endif
+//     if (alloc_size) {
+//             ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
+//     }
+//
+//     // TODO: SMP
+////#ifdef CONFIG_SMP
+////   init_defrootdomain();
+////#endif
+//
+////   init_rt_bandwidth(&def_rt_bandwidth,
+////                   global_rt_period(), global_rt_runtime());
+//
+//     for_each_possible_cpu(i) {
+//             struct rq *rq;
+//
+//             rq = cpu_rq(i);
+//             raw_spin_lock_init(&rq->lock);
+//             rq->nr_running = 0;
+//             INIT_LIST_HEAD(&rq->rq_list);
+//
+////           rq->calc_load_active = 0;
+////           rq->calc_load_update = jiffies + LOAD_FREQ;
+//
+////           init_cfs_rq(&rq->cfs);
+////           init_rt_rq(&rq->rt, rq);
+//
+////           rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
+//
+////           for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
+////                   rq->cpu_load[j] = 0;
+//
+////           rq->last_load_update_tick = jiffies;
+//
+//#ifdef CONFIG_SMP
+////           rq->sd = NULL;
+////           rq->rd = NULL;
+//////         rq->cpu_power = SCHED_POWER_SCALE;
+////           rq->post_schedule = 0;
+//////         rq->active_balance = 0;
+//////         rq->next_balance = jiffies;
+////           rq->push_cpu = 0;
+//             rq->cpu = i;
+//             rq->online = 0;
+//////         rq->idle_stamp = 0;
+//////         rq->avg_idle = 2*sysctl_sched_migration_cost;
+////
+////           INIT_LIST_HEAD(&rq->cfs_tasks);
+////
+////           rq_attach_root(rq, &def_root_domain);
+////#ifdef CONFIG_NO_HZ
+////           rq->nohz_flags = 0;
+////#endif
+//#endif
+////           init_rq_hrtick(rq);
+//             atomic_set(&rq->nr_iowait, 0);
+//     }
+//
+////   set_load_weight(&init_task);
+//
+//     /*
+//      * The boot idle thread does lazy MMU switching as well:
+//      */
+//     atomic_inc(&init_mm.mm_count);
+//     enter_lazy_tlb(&init_mm, current);
+//
+//     /*
+//      * Make us the idle thread. Technically, schedule() should not be
+//      * called from this thread, however somewhere below it might be,
+//      * but because we are the idle thread, we just pick up running again
+//      * when this runqueue becomes "idle".
+//      */
+//     init_idle(current, smp_processor_id());
+//
+////   calc_load_update = jiffies + LOAD_FREQ;
+//
+//     /*
+//      * During early bootup we pretend to be a normal task:
+//      */
+////   current->sched_class = &fair_sched_class;
+//
+//#ifdef CONFIG_SMP
+//     idle_thread_set_boot_cpu();
+//#endif
+////   init_sched_fair_class();
+//
+//     scheduler_running = 1;
+//}
+//
+//#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+//static inline int preempt_count_equals(int preempt_offset)
+//{
+//     int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
+//
+//     return (nested == preempt_offset);
+//}
+//
+//void __might_sleep(const char *file, int line, int preempt_offset)
+//{
+//     static unsigned long prev_jiffy;        /* ratelimiting */
+//
+//     rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
+//     if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
+//         system_state != SYSTEM_RUNNING || oops_in_progress)
+//             return;
+//     if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+//             return;
+//     prev_jiffy = jiffies;
+//
+//     printk(KERN_ERR
+//             "BUG: sleeping function called from invalid context at %s:%d\n",
+//                     file, line);
+//     printk(KERN_ERR
+//             "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+//                     in_atomic(), irqs_disabled(),
+//                     current->pid, current->comm);
+//
+//     debug_show_held_locks(current);
+//     if (irqs_disabled())
+//             print_irqtrace_events(current);
+//     dump_stack();
+//}
+//EXPORT_SYMBOL(__might_sleep);
+//#endif
+//
+///*
+// * kernel/sched/core.c:1560
+// * Perform scheduler related setup for a newly forked process p.
+// * p is forked by current.
+// *
+// * __sched_fork() is basic setup used by init_idle() too:
+// */
+//static void __sched_fork(struct task_struct *p)
+//{
+//     p->on_rq                                        = 0;
+//
+//     p->se.on_rq                                     = 0;
+//     p->se.exec_start                        = 0;
+//     p->se.sum_exec_runtime          = 0;
+//     p->se.prev_sum_exec_runtime     = 0;
+//     p->se.vruntime                          = 0;
+//}
+//
+///*
+// * kernel/sched/core.c:1622
+// * fork()/clone()-time setup:
+// */
+//void sched_fork(struct task_struct *p)
+//{
+//     unsigned long flags;
+//     int cpu = get_cpu();
+//
+//     __sched_fork(p);
+//
+//     /*
+//      * We mark the process as running here. This guarantees that
+//      * nobody will actually run it, and a signal or other external
+//      * event cannot wake it up and insert it on the runqueue either.
+//      */
+//     p->state = TASK_RUNNING;
+//
+//     /*
+//      * Make sure we do not leak PI boosting priority to the child.
+//      */
+//     p->prio = current->normal_prio;
+//
+//     raw_spin_lock_irqsave(&p->pi_lock, flags);
+//     set_task_cpu(p, cpu);
+//     raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+//
+//#if defined(CONFIG_SMP)
+//     p->on_cpu = 0;
+//#endif
+//#ifdef CONFIG_PREEMPT_COUNT
+//     /* Want to start with kernel preemption disabled. */
+//     task_thread_info(p)->preempt_count = 1;
+//#endif
+//
+//     put_cpu();
+//}
+//
+///**
+// * /kernel/sched/core.c:4674
+// * init_idle - set up an idle thread for a given CPU
+// * @idle: task in question
+// * @cpu: cpu the idle task belongs to
+// *
+// * NOTE: this function does not set the idle thread's NEED_RESCHED
+// * flag, to make booting more robust.
+// */
+//void __cpuinit init_idle(struct task_struct *idle, int cpu)
+//{
+//     struct rq *rq = cpu_rq(cpu);
+//     unsigned long flags;
+//
+//     raw_spin_lock_irqsave(&rq->lock, flags);
+//
+//     __sched_fork(idle);
+//     idle->state = TASK_RUNNING;
+//     idle->se.exec_start = sched_clock();
+//
+//     do_set_cpus_allowed(idle, cpumask_of(cpu));
+//     /*
+//      * We're having a chicken and egg problem, even though we are
+//      * holding rq->lock, the cpu isn't yet set to this cpu so the
+//      * lockdep check in task_group() will fail.
+//      *
+//      * Similar case to sched_fork(). / Alternatively we could
+//      * use task_rq_lock() here and obtain the other rq->lock.
+//      *
+//      * Silence PROVE_RCU
+//      */
+//     rcu_read_lock();
+//     __set_task_cpu(idle, cpu);
+//     rcu_read_unlock();
+//
+//     rq->curr = rq->idle = idle;
+//#if defined(CONFIG_SMP)
+//     idle->on_cpu = 1;
+//#endif
+//     raw_spin_unlock_irqrestore(&rq->lock, flags);
+//
+//     /* Set the preempt count _outside_ the spinlocks! */
+//     task_thread_info(idle)->preempt_count = 0;
+//
+//#if defined(CONFIG_SMP)
+//     sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+//#endif
+//}
+//
+///*
+// * /kernel/sched/cputime.c:436
+// * Account multiple ticks of idle time.
+// * @ticks: number of stolen ticks
+// */
+//void account_idle_ticks(unsigned long ticks)
+//{
+//     //printk("\naccount_idle_ticks");
+//
+//     return;
+//}
+//
+///*
+// * /kernel/sched/cputime.c:397
+// * Account a single tick of cpu time.
+// * @p: the process that the cpu time gets accounted to
+// * @user_tick: indicates if the tick is a user or a system tick
+// */
+//void account_process_tick(struct task_struct *p, int user_tick)
+//{
+//     //printk("\naccount_process_tick");
+//
+//     return;
+//}
+//
+///*
+// * /kernel/sched/core.c:2092
+// * get_avenrun - get the load average array
+// * @loads:    pointer to dest load array
+// * @offset:    offset to add
+// * @shift:    shift count to shift the result left
+// *
+// * These values are estimates at best, so no need for locking.
+// */
+//void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+//{
+//     //printk("\nget_avenrun");
+//
+//     return;
+//}
+//
+///*
+// * /kernel/sched/core.c:2363
+// * calc_load - update the avenrun load estimates 10 ticks after the
+// * CPUs have updated calc_load_tasks.
+// */
+//void calc_global_load(unsigned long ticks)
+//{
+////   printk("\ncalc_global_load");
+//
+//     return;
+//}
+//
+///*
+// * /kernel/sched/core.c:2197
+// * We're going into NOHZ mode, if there's any pending delta, fold it
+// * into the pending idle delta.
+// */
+///*void calc_load_enter_idle(void)
+//{
+//     return;
+//}*/
+//
+///*
+// * /kernel/sched/core.c:2213
+// * If we're still before the sample window, we're done.
+// *
+// * We woke inside or after the sample window, this means we're already
+// * accounted through the nohz accounting, so skip the entire deal and
+// * sync up for the next window.
+// */
+///*void calc_load_exit_idle(void)
+//{
+//     return;
+//}*/
+//
+///*
+// * /kernel/sched/core.c:3668
+// * Check if a task can reduce its nice value
+// * @p: task
+// * @nice: nice value
+// */
+//int can_nice(const struct task_struct *p, const int nice)
+//{
+//     //printk("\ncan_nice");
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:3768
+// * idle_task - return the idle task for a given cpu.
+// * @cpu: the processor in question.
+// */
+//struct task_struct *idle_task(int cpu)
+//{
+//     return cpu_rq(cpu)->idle;
+//}
+//
+///**
+// * /kernel/sched/core.c:3742
+// * idle_cpu - is a given cpu idle currently?
+// * @cpu: the processor in question.
+// */
+//int idle_cpu(int cpu)
+//{
+//     struct rq *rq = cpu_rq(cpu);
+//
+//     if (rq->curr != rq->idle)
+//             return 0;
+//
+//     if (rq->nr_running)
+//             return 0;
+//
+//#ifdef CONFIG_SMP
+//     if (!llist_empty(&rq->wake_list))
+//             return 0;
+//#endif
+//
+//     return 1;
+//}
+//
+///*
+// * /kernel/sched/core.c:4669
+// * Sets sched_class of idle task, see struct sched_class idle_sched_class;
+// */
+//void __cpuinit init_idle_bootup_task(struct task_struct *idle)
+//{
+//     //printk("\ninit_idle_bootup_task");
+//
+//     return;
+//}
+//
+///*
+// * /kernel/sched/core.c:7108
+// * Calls private function
+// * static void normalize_task(struct rq *rq, struct task_struct *p)
+// */
+//void normalize_rt_tasks(void)
+//{
+//     printk("\nnormalize_rt_tasks");
+//
+//     return;
+//}
+//
+///*
+// * /kernel/sched/core.c:1997
+// * nr_running and nr_context_switches:
+// *
+// * externally visible scheduler statistics:
+// *   current number of runnable threads
+// *   total number of context switches performed since bootup.
+// */
+//unsigned long nr_running(void)
+//{
+//     printk("\nnr_running");
+//
+//     // TODO: SMP
+//
+//     return 0;
+//}
+//
+//unsigned long long nr_context_switches(void)
+//{
+////   printk("\nnr_context_switches");
+//
+//     int i;
+//     unsigned long long sum = 0;
+//
+//     for_each_possible_cpu(i)
+//             sum += cpu_rq(i)->nr_switches;
+//
+//     return sum;
+//}
+//
+///*
+// * /kernel/sched/core.c:2008
+// * number of threads waiting on IO
+// */
+//unsigned long nr_iowait(void)
+//{
+//     printk("\nnr_iowait");
+//
+//     // TODO: SMP
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core.c:2018
+// */
+//unsigned long nr_iowait_cpu(int cpu)
+//{
+//     printk("\nnr_iowait_cpu");
+//
+//     // TODO: SMP
+//
+//     return 0;
+//}
+//
+///*
+// * rt_mutex_setprio - set the current priority of a task
+// * @p: task
+// * @prio: prio value (kernel-internal form)
+// *
+// * This function changes the 'effective' priority of a task. It does
+// * not touch ->normal_prio like __setscheduler().
+// *
+// * Used by the rt_mutex code to implement priority inheritance logic.
+// */
+//void rt_mutex_setprio(struct task_struct *p, int prio)
+//{
+//     printk("\nrt_mutex_setprio");
+//
+//     return;
+//}
+//
+///**
+// * sched_clock_cpu - returns current time in nanosec units
+// * using scheduler clock function.
+// * @param: cpu id
+// */
+////u64 sched_clock_cpu(int cpu)
+////{
+////   return 0;
+////}
+//
+///*
+// * kernel/sched/clock.c:350
+// * Initialize/Start scheduler clock.
+// */
+////void sched_clock_init(void)
+////{
+////   return;
+////}
+//
+///**
+// * kernel/sched/core.c:4213
+// * This functions stores the CPU affinity mask for the process or thread with the ID pid in the cpusetsize
+// * bytes long bitmap pointed to by cpuset. If successful, the function always initializes all bits in the
+// * cpu_set_t object and returns zero.
+// *
+// * If pid does not correspond to a process or thread on the system the or the function fails for some other
+// * reason, it returns -1 and errno is set to represent the error condition.
+// */
+//long sched_getaffinity(pid_t pid, struct cpumask *mask)
+//{
+//     printk("\nsched_getaffinity");
+//
+//     // TODO: SMP
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:7571
+// */
+//int sched_rr_handler(struct ctl_table *table, int write,
+//             void __user *buffer, size_t *lenp,
+//             loff_t *ppos)
+//{
+//     //printk("\nsched_rr_handler");
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4111
+// * This function installs the cpusetsize bytes long affinity mask pointed to by cpuset for the process or
+// * thread with the ID pid. If successful the function returns zero and the scheduler will in future take the
+// * affinity information into account.
+// */
+//long sched_setaffinity(pid_t pid, const struct cpumask *new_mask)
+//{
+//     //printk("\nsched_setaffinity");
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:3975
+// * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
+// * @p: the task in question.
+// * @policy: new policy.
+// * @param: structure containing the new RT priority.
+// *
+// * NOTE that the task may be already dead.
+// */
+//int sched_setscheduler(struct task_struct *p, int policy,
+//             const struct sched_param *param)
+//{
+//     //printk("\nsched_setscheduler");
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:3993
+// * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
+// * @p: the task in question.
+// * @policy: new policy.
+// * @param: structure containing the new RT priority.
+// *
+// * Just like sched_setscheduler, only don't bother checking if the
+// * current context has permission.  For example, this is needed in
+// * stop_machine(): we create temporary high priority worker threads,
+// * but our caller might not have that capability.
+// */
+//int sched_setscheduler_nocheck(struct task_struct *p, int policy,
+//const struct sched_param *param)
+//{
+////   //printk("\nsched_setscheduler_nocheck");
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4601
+// */
+//void sched_show_task(struct task_struct *p)
+//{
+//     //printk("\nsched_show_task");
+//
+//     return;
+//}
+//
+///**
+// * kernel/sched/core.c:652
+// */
+//void resched_task(struct task_struct *p)
+//{
+//     int cpu;
+//
+//     assert_raw_spin_locked(&task_rq(p)->lock);
+//
+//     if (test_tsk_need_resched(p))
+//             return;
+//
+//     set_tsk_need_resched(p);
+//
+//     cpu = task_cpu(p);
+//     if (cpu == smp_processor_id())
+//             return;
+//
+//     /* NEED_RESCHED must be visible before we test polling */
+//     smp_mb();
+//     if (!tsk_is_polling(p))
+//             smp_send_reschedule(cpu);
+//}
+//
+///**
+// * kernel/sched/core.c:1806
+// * prepare_task_switch - prepare to switch tasks
+// * @rq: the runqueue preparing to switch
+// * @prev: the current task that is being switched out
+// * @next: the task we are going to switch to.
+// *
+// * This is called with the rq lock held and interrupts off. It must
+// * be paired with a subsequent finish_task_switch after the context
+// * switch.
+// *
+// * prepare_task_switch sets up locking and calls architecture specific
+// * hooks.
+// */
+//static inline void
+//prepare_task_switch(struct rq *rq, struct task_struct *prev,
+//                 struct task_struct *next)
+//{
+////   trace_sched_switch(prev, next);
+////   sched_info_switch(prev, next);
+////   perf_event_task_sched_out(prev, next);
+////   fire_sched_out_preempt_notifiers(prev, next);
+//     prepare_lock_switch(rq, next);
+////   prepare_arch_switch(next);
+//}
+//
+///**
+// * kernel/sched/core.c:1826
+// * finish_task_switch - clean up after a task-switch
+// * @rq: runqueue associated with task-switch
+// * @prev: the thread we just switched away from.
+// *
+// * finish_task_switch must be called after the context switch, paired
+// * with a prepare_task_switch call before the context switch.
+// * finish_task_switch will reconcile locking set up by prepare_task_switch,
+// * and do any other architecture-specific cleanup actions.
+// *
+// * Note that we may have delayed dropping an mm in context_switch(). If
+// * so, we finish that here outside of the runqueue lock. (Doing it
+// * with the lock held can cause deadlocks; see schedule() for
+// * details.)
+// */
+//static void finish_task_switch(struct rq *rq, struct task_struct *prev)
+//     __releases(rq->lock)
+//{
+//     struct mm_struct *mm = rq->prev_mm;
+//     long prev_state;
+//
+//     rq->prev_mm = NULL;
+//
+//     /*
+//      * A task struct has one reference for the use as "current".
+//      * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+//      * schedule one last time. The schedule call will never return, and
+//      * the scheduled task must drop that reference.
+//      * The test for TASK_DEAD must occur while the runqueue locks are
+//      * still held, otherwise prev could be scheduled on another cpu, die
+//      * there before we look at prev->state, and then the reference would
+//      * be dropped twice.
+//      *              Manfred Spraul <manfred@colorfullife.com>
+//      */
+//     prev_state = prev->state;
+//     vtime_task_switch(prev);
+////   finish_arch_switch(prev);
+////   perf_event_task_sched_in(prev, current);
+//     finish_lock_switch(rq, prev);
+//     finish_arch_post_lock_switch();
+//
+////   fire_sched_in_preempt_notifiers(current);
+//     if (mm)
+//             mmdrop(mm);
+//     if (unlikely(prev_state == TASK_DEAD)) {
+//             /*
+//              * Remove function-return probe instances associated with this
+//              * task and put them back on the free list.
+//              */
+//             kprobe_flush_task(prev);
+//             put_task_struct(prev);
+//     }
+//}
+//
+//#ifdef CONFIG_SMP
+//
+///* assumes rq->lock is held */
+//static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
+//{
+//}
+//
+///* rq->lock is NOT held, but preemption is disabled */
+//static inline void post_schedule(struct rq *rq)
+//{
+//}
+//
+//#else
+//
+//static inline void pre_schedule(struct rq *rq, struct task_struct *p)
+//{
+//}
+//
+//static inline void post_schedule(struct rq *rq)
+//{
+//}
+//
+//#endif
+//
+///**
+// * kernel/sched/core.c:1905
+// * schedule_tail - first thing a freshly forked thread must call.
+// * @prev: the thread we just switched away from.
+// */
+//asmlinkage void schedule_tail(struct task_struct *prev)
+//     __releases(rq->lock)
+//{
+//     struct rq *rq = this_rq();
+//
+//     finish_task_switch(rq, prev);
+//
+//     /*
+//      * FIXME: do we need to worry about rq being invalidated by the
+//      * task_switch?
+//      */
+//     // TODO: SMP
+//     post_schedule(rq);
+//
+//     // TODO: replace this irq enable, maybe inside post_schedule
+//     arch_local_irq_enable();
+//
+//#ifdef __ARCH_WANT_UNLOCKED_CTXSW
+//     /* In this case, finish_task_switch does not reenable preemption */
+//     preempt_enable();
+//#endif
+//     if (current->set_child_tid)
+//             put_user(task_pid_vnr(current), current->set_child_tid);
+//}
+//
+//
+///**
+// * kernel/sched/core.c:769
+// */
+//static void update_rq_clock_task(struct rq *rq, s64 delta)
+//{
+///*
+// * In theory, the compile should just see 0 here, and optimize out the call
+// * to sched_rt_avg_update. But I don't trust it...
+// */
+//#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+//     s64 steal = 0, irq_delta = 0;
+//#endif
+//#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+//     irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
+//
+//     /*
+//      * Since irq_time is only updated on {soft,}irq_exit, we might run into
+//      * this case when a previous update_rq_clock() happened inside a
+//      * {soft,}irq region.
+//      *
+//      * When this happens, we stop ->clock_task and only update the
+//      * prev_irq_time stamp to account for the part that fit, so that a next
+//      * update will consume the rest. This ensures ->clock_task is
+//      * monotonic.
+//      *
+//      * It does however cause some slight miss-attribution of {soft,}irq
+//      * time, a more accurate solution would be to update the irq_time using
+//      * the current rq->clock timestamp, except that would require using
+//      * atomic ops.
+//      */
+//     if (irq_delta > delta)
+//             irq_delta = delta;
+//
+//     rq->prev_irq_time += irq_delta;
+//     delta -= irq_delta;
+//#endif
+//#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+//     if (static_key_false((&paravirt_steal_rq_enabled))) {
+//             u64 st;
+//
+//             steal = paravirt_steal_clock(cpu_of(rq));
+//             steal -= rq->prev_steal_time_rq;
+//
+//             if (unlikely(steal > delta))
+//                     steal = delta;
+//
+//             st = steal_ticks(steal);
+//             steal = st * TICK_NSEC;
+//
+//             rq->prev_steal_time_rq += steal;
+//
+//             delta -= steal;
+//     }
+//#endif
+//
+//     rq->clock_task += delta;
+//
+//#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
+//     if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
+//             sched_rt_avg_update(rq, irq_delta + steal);
+//#endif
+//}
+//
+////static void update_rq_clock_task(struct rq *rq, s64 delta);
+//void update_rq_clock(struct rq *rq)
+//{
+//     s64 delta;
+//
+//     if (rq->skip_clock_update > 0)
+//             return;
+//
+//     delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+//     rq->clock += delta;
+//     update_rq_clock_task(rq, delta);
+//}
+//
+///*
+// * kernel/sched/core.c:2684
+// * This function gets called by the timer code, with HZ frequency.
+// * We call it with interrupts disabled.
+// */
+//void scheduler_tick(void)
+//{
+//     int cpu = smp_processor_id();
+//     struct rq *rq = cpu_rq(cpu);
+//     struct task_struct *curr = rq->curr;
+//
+//     u64 now = rq->clock_task;
+//     unsigned long delta_exec;
+//
+//     sched_clock_tick();
+//
+//     raw_spin_lock(&rq->lock);
+//     update_rq_clock(rq);
+//
+//     /*
+//      * Update run-time statistics of the 'current'.
+//      */
+//     if (unlikely(!curr)) {
+//             raw_spin_unlock(&rq->lock);
+//             return;
+//     }
+//
+//     /*
+//      * Get the amount of time the current task was running
+//      * since the last time we changed load (this cannot
+//      * overflow on 32 bits):
+//      */
+//     delta_exec = (unsigned long)(now - curr->se.exec_start);
+//
+//     if (delta_exec > RR_TIMESLICE) {
+//             resched_task(curr);
+//     }
+//
+//     raw_spin_unlock(&rq->lock);
+//
+//     // TODO: SMP for load balancing
+//}
+//
+///*
+// * kernel/sched/core.c:2649
+// * Lock/unlock the current runqueue - to extract task statistics:
+// */
+//unsigned long long task_delta_exec(struct task_struct *p)
+//{
+//     printk("\ntask_delta_exec");
+//
+//     // TODO: SMP
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:3727
+// * task_prio - return the priority value of a given task.
+// * @p: the task in question.
+// *
+// * This is the priority value as seen by users in /proc.
+// * RT tasks are offset by -200. Normal tasks are centered
+// * around 0, value goes from -16 to +15.
+// */
+//int task_prio(const struct task_struct *p)
+//{
+//     //printk("\ntask_prio");
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core.c:2667
+// * Return accounted runtime for the task.
+// * In case the task is currently running, return the runtime plus current's
+// * pending runtime that have not been accounted yet.
+// */
+//unsigned long long task_sched_runtime(struct task_struct *task)
+//{
+//     //printk("\ntask_sched_runtime");
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core.c:2024
+// * this_cpu_load - returns load of the cpu
+// */
+//unsigned long this_cpu_load(void)
+//{
+//     //printk("\nthis_cpu_load");
+//
+//     // TODO: SMP, needed in case of load balancing per CPU
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core.c:2556
+// * update_cpu_load_nohz - called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
+// */
+//void update_cpu_load_nohz(void)
+//{
+//     //printk("\nupdate_cpu_load_nohz");
+//
+//     return;
+//}
+//
+//
+///*
+// * kernel/sched/core.c:1207
+// * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
+// */
+//static inline
+//int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
+//{
+//     int cpu = task_cpu(p);
+////   int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
+//
+//     /*
+//      * In order not to call set_task_cpu() on a blocking task we need
+//      * to rely on ttwu() to place the task on a valid ->cpus_allowed
+//      * cpu.
+//      *
+//      * Since this is common to all placement strategies, this lives here.
+//      *
+//      * [ this allows ->select_task() to simply return task_cpu(p) and
+//      *   not worry about this generic constraint ]
+//      */
+//     if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
+//                  !cpu_online(cpu)))
+//             cpu = cpumask_first(tsk_cpus_allowed(p)); //select_fallback_rq(task_cpu(p), p);
+//
+//     return cpu;
+//}
+//
+///*
+// * kernel/sched/core.c:736
+// */
+//void activate_task(struct rq *rq, struct task_struct *p, int flags)
+//{
+//     if (task_contributes_to_load(p))
+//             rq->nr_uninterruptible--;
+//
+////   enqueue_task(rq, p, flags);
+//     list_add(&p->rq_tasks, &rq->rq_list);
+//}
+//
+///*
+// * kernel/sched/core.c:744
+// */
+//void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
+//{
+//     if (task_contributes_to_load(p))
+//             rq->nr_uninterruptible++;
+//
+////   dequeue_task(rq, p, flags);
+//     list_del(&p->rq_tasks);
+//}
+//
+///*
+// * kernel/sched/core.c:1275
+// */
+//static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
+//{
+//     activate_task(rq, p, en_flags);
+//     p->on_rq = 1;
+//
+//     /* if a worker is waking up, notify workqueue */
+//     if (p->flags & PF_WQ_WORKER)
+//             wq_worker_waking_up(p, cpu_of(rq));
+//}
+//
+///*
+// * kernel/sched/core.c:909
+// */
+//void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+//{
+////   const struct sched_class *class;
+////
+////   if (p->sched_class == rq->curr->sched_class) {
+////           rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+////   } else {
+////           for_each_class(class) {
+////                   if (class == rq->curr->sched_class)
+////                           break;
+////                   if (class == p->sched_class) {
+////                           resched_task(rq->curr);
+////                           break;
+////                   }
+////           }
+////   }
+//     if (rq->curr == rq->idle)
+//                     resched_task(rq->curr);
+//
+//     /*
+//      * A queue event has occurred, and we're going to schedule.  In
+//      * this case, we can save a useless back to back clock update.
+//      */
+//     if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
+//             rq->skip_clock_update = 1;
+//}
+//
+///*
+// * kernel/sched/core:1289
+// * Mark the task runnable and perform wakeup-preemption.
+// */
+//static void
+//ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+//{
+////   trace_sched_wakeup(p, true);
+//     check_preempt_curr(rq, p, wake_flags);
+//
+//     p->state = TASK_RUNNING;
+////#ifdef CONFIG_SMP
+////   if (p->sched_class->task_woken)
+////           p->sched_class->task_woken(rq, p);
+////
+////   if (rq->idle_stamp) {
+////           u64 delta = rq->clock - rq->idle_stamp;
+////           u64 max = 2*sysctl_sched_migration_cost;
+////
+////           if (delta > max)
+////                   rq->avg_idle = max;
+////           else
+////                   update_avg(&rq->avg_idle, delta);
+////           rq->idle_stamp = 0;
+////   }
+////#endif
+//}
+//
+///*
+// * kernel/sched/core.c:1313
+// */
+//static void
+//ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
+//{
+//#ifdef CONFIG_SMP
+//     if (p->sched_contributes_to_load)
+//             rq->nr_uninterruptible--;
+//#endif
+//
+//     ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
+//     ttwu_do_wakeup(rq, p, wake_flags);
+//}
+//
+//#ifdef CONFIG_SMP
+///*
+// * kernel/sched/core.c:1394
+// */
+//static void ttwu_queue_remote(struct task_struct *p, int cpu)
+//{
+//     if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
+//             smp_send_reschedule(cpu);
+//}
+//#endif
+//
+///*
+// * kernel/sched/core.c:1406
+// */
+//static void ttwu_queue(struct task_struct *p, int cpu)
+//{
+//     struct rq *rq = cpu_rq(cpu);
+//
+//#if defined(CONFIG_SMP)
+//     if (/*sched_feat(TTWU_QUEUE) && */!cpus_share_cache(smp_processor_id(), cpu)) {
+//             sched_clock_cpu(cpu); /* sync clocks x-cpu */
+//             ttwu_queue_remote(p, cpu);
+//             return;
+//     }
+//#endif
+//
+//     raw_spin_lock(&rq->lock);
+//     ttwu_do_activate(rq, p, 0);
+//     raw_spin_unlock(&rq->lock);
+//}
+//
+///*
+// * kernel/sched/core.c:1703
+// * wake_up_new_task - wake up a newly created task for the first time.
+// *
+// * This function will do some initial scheduler statistics housekeeping
+// * that must be done for every newly created context, then puts the task
+// * on the runqueue and wakes it.
+// */
+//void wake_up_new_task(struct task_struct *p)
+//{
+//     unsigned long flags;
+//     struct rq *rq;
+////   int cpu = 255;
+//
+//     raw_spin_lock_irqsave(&p->pi_lock, flags);
+//
+//#ifdef CONFIG_SMP
+//     /*
+//      * Fork balancing, do it here and not earlier because:
+//      *  - cpus_allowed can change in the fork path
+//      *  - any previously selected cpu might disappear through hotplug
+//      */
+//     set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
+////   printk("new thread\n");
+////   for_each_cpu(cpu, &(p->cpus_allowed)){
+////           printk("Asked for CPU #%d\n", cpu);
+////   }
+//
+//#endif
+//
+//     rq = __task_rq_lock(p);
+//     activate_task(rq, p, 0);
+//     p->on_rq = 1;
+////   trace_sched_wakeup_new(p, true);
+//     check_preempt_curr(rq, p, WF_FORK);
+////#ifdef CONFIG_SMP
+////   if (p->sched_class->task_woken)
+////           p->sched_class->task_woken(rq, p);
+////#endif
+//     task_rq_unlock(rq, p, &flags);
+//}
+//
+///*
+// * kernel/sched/core:1330
+// * Called in case the task @p isn't fully descheduled from its runqueue,
+// * in this case we must do a remote wakeup. Its a 'light' wakeup though,
+// * since all we need to do is flip p->state to TASK_RUNNING, since
+// * the task is still ->on_rq.
+// */
+//static int ttwu_remote(struct task_struct *p, int wake_flags)
+//{
+//     struct rq *rq;
+//     int ret = 0;
+//
+//     rq = __task_rq_lock(p);
+//     if (p->on_rq) {
+//             ttwu_do_wakeup(rq, p, wake_flags);
+//             ret = 1;
+//     }
+//     __task_rq_unlock(rq);
+//
+//     return ret;
+//}
+//
+///**
+// * kernel/sched/core.c:1439
+// * try_to_wake_up - wake up a thread
+// * @p: the thread to be awakened
+// * @state: the mask of task states that can be woken
+// * @wake_flags: wake modifier flags (WF_*)
+// *
+// * Put it on the run-queue if it's not already there. The "current"
+// * thread is always on the run-queue (except when the actual
+// * re-schedule is in progress), and as such you're allowed to do
+// * the simpler "current->state = TASK_RUNNING" to mark yourself
+// * runnable without the overhead of this.
+// *
+// * Returns %true if @p was woken up, %false if it was already running
+// * or @state didn't match @p's state.
+// */
+//static int
+//try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+//{
+//     unsigned long flags;
+//     int cpu, success = 0;
+//
+//     smp_wmb();
+//     raw_spin_lock_irqsave(&p->pi_lock, flags);
+//     if (!(p->state & state))
+//             goto out;
+//
+//     success = 1; /* we're going to change ->state */
+//     cpu = task_cpu(p);
+//
+//     if (p->on_rq && ttwu_remote(p, wake_flags))
+//             goto stat;
+//
+//#ifdef CONFIG_SMP
+//     /*
+//      * If the owning (remote) cpu is still in the middle of schedule() with
+//      * this task as prev, wait until its done referencing the task.
+//      */
+//     while (p->on_cpu)
+//             cpu_relax();
+//     /*
+//      * Pairs with the smp_wmb() in finish_lock_switch().
+//      */
+//     smp_rmb();
+//
+////   p->sched_contributes_to_load = !!task_contributes_to_load(p);
+//     p->state = TASK_WAKING;
+//
+////   if (p->sched_class->task_waking)
+////           p->sched_class->task_waking(p);
+//
+//     // TODO: simply not using select_task_rq :)
+//     cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
+//     if (task_cpu(p) != cpu) {
+//             wake_flags |= WF_MIGRATED;
+//             set_task_cpu(p, cpu);
+//     }
+//#endif /* CONFIG_SMP */
+//
+//     ttwu_queue(p, cpu);
+//stat:
+////   raw_spin_unlock(&rq->lock);
+//out:
+//     raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+//
+//     return success;
+//}
+//
+///**
+// * kernel/sched/core.c:1497
+// * try_to_wake_up_local - try to wake up a local task with rq lock held
+// * @p: the thread to be awakened
+// *
+// * Put @p on the run-queue if it's not already there. The caller must
+// * ensure that this_rq() is locked, @p is bound to this_rq() and not
+// * the current task.
+// */
+//static void try_to_wake_up_local(struct task_struct *p)
+//{
+//     struct rq *rq = task_rq(p);
+//
+//     if (WARN_ON_ONCE(rq != this_rq()) ||
+//         WARN_ON_ONCE(p == current))
+//             return;
+//
+//     lockdep_assert_held(&rq->lock);
+//
+//     if (!raw_spin_trylock(&p->pi_lock)) {
+//             raw_spin_unlock(&rq->lock);
+//             raw_spin_lock(&p->pi_lock);
+//             raw_spin_lock(&rq->lock);
+//     }
+//
+//     if (!(p->state & TASK_NORMAL))
+//             goto out;
+//
+//     if (!p->on_rq)
+//             ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+//
+//     ttwu_do_wakeup(rq, p, 0);
+////   ttwu_stat(p, smp_processor_id(), 0);
+//out:
+//     raw_spin_unlock(&p->pi_lock);
+//}
+//
+///*
+// * kernel/sched/core.c:1931
+// * context_switch - switch to the new MM and the new
+// * thread's register state.
+// */
+//static inline void
+//context_switch(struct rq *rq, struct task_struct *prev,
+//            struct task_struct *next)
+//{
+//     struct mm_struct *mm, *oldmm;
+//
+//     prepare_task_switch(rq, prev, next);
+//
+//     mm = next->mm;
+//     oldmm = prev->active_mm;
+//     /*
+//      * For paravirt, this is coupled with an exit in switch_to to
+//      * combine the page table reload and the switch backend into
+//      * one hypercall.
+//      */
+////   arch_start_context_switch(prev);
+//
+//     if (!mm) {
+//             next->active_mm = oldmm;
+//             atomic_inc(&oldmm->mm_count);
+//             enter_lazy_tlb(oldmm, next);
+//     }
+//     else
+//             switch_mm(oldmm, mm, next);
+//
+//     if (!prev->mm) {
+//             prev->active_mm = NULL;
+//             rq->prev_mm = oldmm;
+//     }
+//     /*
+//      * Since the runqueue lock will be released by the next
+//      * task (which is an invalid locking op but in the case
+//      * of the scheduler it's an obvious special-case), so we
+//      * do an early lockdep release here:
+//      */
+//#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+//     spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
+//#endif
+//
+//     context_tracking_task_switch(prev, next);
+//     /* Here we just switch the register state and the stack. */
+//     switch_to(prev, next, prev);
+//
+//     barrier();
+//     /*
+//      * this_rq must be evaluated again because prev may have moved
+//      * CPUs since it called schedule(), thus the 'rq' on its stack
+//      * frame will be invalid.
+//      */
+//     finish_task_switch(this_rq(), prev);
+//
+//}
+//
+///*
+// * kernel/sched/core.c:2875
+// * __schedule() is the main scheduler function.
+// *
+// * The main means of driving the scheduler and thus entering this function are:
+// *
+// *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
+// *
+// *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
+// *      paths. For example, see arch/x86/entry_64.S.
+// *
+// *      To drive preemption between tasks, the scheduler sets the flag in timer
+// *      interrupt handler scheduler_tick().
+// *
+// *   3. Wakeups don't really cause entry into schedule(). They add a
+// *      task to the run-queue and that's it.
+// *
+// *      Now, if the new task added to the run-queue preempts the current
+// *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
+// *      called on the nearest possible occasion:
+// *
+// *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
+// *
+// *         - in syscall or exception context, at the next outmost
+// *           preempt_enable(). (this might be as soon as the wake_up()'s
+// *           spin_unlock()!)
+// *
+// *         - in IRQ context, return from interrupt-handler to
+// *           preemptible context
+// *
+// *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
+// *         then at the next:
+// *
+// *          - cond_resched() call
+// *          - explicit schedule() call
+// *          - return from syscall or exception to user-space
+// *          - return from interrupt-handler to user-space
+// */
+//static void __sched __schedule(void)
+//{
+//     struct task_struct *prev, *next;
+////   unsigned long *switch_count = 0;
+//     struct rq *rq;
+//     int cpu;
+//
+//need_resched:
+//     preempt_disable();
+//     cpu = smp_processor_id();
+//     rq = cpu_rq(cpu);
+//     rcu_note_context_switch(cpu);
+//     prev = rq->curr;
+//
+////   schedule_debug(prev);
+//
+////   if (sched_feat(HRTICK))
+////           hrtick_clear(rq);
+//
+//     raw_spin_lock_irq(&rq->lock);
+//
+////   switch_count = &prev->nivcsw;
+//     if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+//             if (unlikely(signal_pending_state(prev->state, prev))) {
+//                     prev->state = TASK_RUNNING;
+//             } else {
+////                   deactivate_task(rq, prev, DEQUEUE_SLEEP);
+//                     prev->on_rq = 0;
+//
+//                     /*
+//                      * If a worker went to sleep, notify and ask workqueue
+//                      * whether it wants to wake up a task to maintain
+//                      * concurrency.
+//                      */
+//                     if (prev->flags & PF_WQ_WORKER) {
+//                             struct task_struct *to_wakeup;
+//
+//                             to_wakeup = wq_worker_sleeping(prev, cpu);
+//                             if (to_wakeup)
+//                                     try_to_wake_up_local(to_wakeup);
+//                     }
+//             }
+////           switch_count = &prev->nvcsw;
+//     }
+//
+//     pre_schedule(rq, prev);
+//
+////   if (unlikely(!rq->nr_running))
+////           idle_balance(cpu, rq);
+//
+////   put_prev_task(rq, prev);
+//     if ((prev != rq->idle) && prev->on_rq) {
+//             list_add_tail(&prev->rq_tasks, &rq->rq_list);
+//     }
+//
+//     /*      In case the only runnable task gets deactivated, we need to schedule
+//      *      the idle tasks.
+//      */
+////   next = pick_next_task(rq);
+//     if (!list_empty(&rq->rq_list)) {
+//             assert_raw_spin_locked(&rq->lock);
+//             next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
+//             list_del(&next->rq_tasks);
+//     }
+//     else {
+//             next = rq->idle;
+//     }
+//     next->se.exec_start = rq->clock_task;
+//
+//
+//     clear_tsk_need_resched(prev);
+//     rq->skip_clock_update = 0;
+//
+//     if (likely(prev != next)) {
+//             rq->nr_switches++;
+//             rq->curr = next;
+////           ++*switch_count;
+//
+//             context_switch(rq, prev, next); /* unlocks the rq */
+//
+//             // TODO: remove irq enable
+//             arch_local_irq_enable();
+//
+//             /*
+//              * The context switch have flipped the stack from under us
+//              * and restored the local variables which were saved when
+//              * this task called schedule() in the past. prev == current
+//              * is still correct, but it can be moved to another cpu/rq.
+//              */
+//             cpu = smp_processor_id();
+//             rq = cpu_rq(cpu);
+//     }
+//     else
+//             raw_spin_unlock_irq(&rq->lock);
+//
+//     post_schedule(rq);
+//
+//     sched_preempt_enable_no_resched();
+//     if (need_resched())
+//             goto need_resched;
+//}
+//
+///*
+// * kernel/sched/core.c:2966
+// */
+//asmlinkage void __sched schedule(void)
+//{
+////   struct task_struct *tsk = current;
+////
+////   if (!tsk->state || tsk_is_pi_blocked(tsk))
+////           return;
+//
+//     __schedule();
+//}
+//EXPORT_SYMBOL(schedule);
+//
+///*
+// * kernel/sched/core.c:3125
+// * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+// * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
+// * number) then we wake all the non-exclusive tasks and one exclusive task.
+// *
+// * There are circumstances in which we can try to wake a task which has already
+// * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+// * zero in this (rare) case, and we handle it by continuing to scan the queue.
+// */
+//static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+//                     int nr_exclusive, int wake_flags, void *key)
+//{
+//     wait_queue_t *curr, *next;
+//
+//     list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
+//             unsigned flags = curr->flags;
+//
+//             if (curr->func(curr, mode, wake_flags, key) &&
+//                             (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
+//                     break;
+//     }
+//}
+//
+///**
+// * kernel/sched/core.c:3149
+// * __wake_up - wake up threads blocked on a waitqueue.
+// * @q: the waitqueue
+// * @mode: which threads
+// * @nr_exclusive: how many wake-one or wake-many threads to wake up
+// * @key: is directly passed to the wakeup function
+// *
+// * It may be assumed that this function implies a write memory barrier before
+// * changing the task state if and only if any tasks are woken up.
+// */
+//void __wake_up(wait_queue_head_t *q, unsigned int mode,
+//                     int nr_exclusive, void *key)
+//{
+//     unsigned long flags;
+//
+//     spin_lock_irqsave(&q->lock, flags);
+//     __wake_up_common(q, mode, nr_exclusive, 0, key);
+//     spin_unlock_irqrestore(&q->lock, flags);
+//}
+//EXPORT_SYMBOL(__wake_up);
+//
+///**
+// * kernel/sched/core.c:1536
+// * wake_up_process - Wake up a specific process
+// * @p: The process to be woken up.
+// *
+// * Attempt to wake up the nominated process and move it to the set of runnable
+// * processes.  Returns 1 if the process was woken up, 0 if it was already
+// * running.
+// *
+// * It may be assumed that this function implies a write memory barrier before
+// * changing the task state if and only if any tasks are woken up.
+// */
+//int wake_up_process(struct task_struct *p)
+//{
+//     WARN_ON(task_is_stopped_or_traced(p));
+//     return try_to_wake_up(p, TASK_NORMAL, 0);
+//}
+//EXPORT_SYMBOL(wake_up_process);
+//
+//static inline long __sched
+//do_wait_for_common(struct completion *x,
+//                long (*action)(long), long timeout, int state)
+//{
+//     if (!x->done) {
+//             DECLARE_WAITQUEUE(wait, current);
+//
+//             __add_wait_queue_tail_exclusive(&x->wait, &wait);
+//             do {
+//                     if (signal_pending_state(state, current)) {
+//                             timeout = -ERESTARTSYS;
+//                             break;
+//                     }
+//                     __set_current_state(state);
+//                     spin_unlock_irq(&x->wait.lock);
+//                     timeout = action(timeout);
+//                     spin_lock_irq(&x->wait.lock);
+//             } while (!x->done && timeout);
+//             __remove_wait_queue(&x->wait, &wait);
+//             if (!x->done)
+//                     return timeout;
+//     }
+//     x->done--;
+//     return timeout ?: 1;
+//}
+//
+//static inline long __sched
+//__wait_for_common(struct completion *x,
+//               long (*action)(long), long timeout, int state)
+//{
+//     might_sleep();
+//
+//     spin_lock_irq(&x->wait.lock);
+//     timeout = do_wait_for_common(x, action, timeout, state);
+//     spin_unlock_irq(&x->wait.lock);
+//     return timeout;
+//}
+//
+//static long __sched
+//wait_for_common(struct completion *x, long timeout, int state)
+//{
+//     return __wait_for_common(x, schedule_timeout, timeout, state);
+//}
+//
+///**
+// * kernel/sched/core.c:3322
+// * wait_for_completion: - waits for completion of a task
+// * @x:  holds the state of this particular completion
+// *
+// * This waits to be signaled for completion of a specific task. It is NOT
+// * interruptible and there is no timeout.
+// *
+// * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
+// * and interrupt capability. Also see complete().
+// */
+//void __sched wait_for_completion(struct completion *x)
+//{
+//     wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
+//}
+//EXPORT_SYMBOL(wait_for_completion);
+//
+///**
+// * kernel/sched/core.c:3231
+// * complete: - signals a single thread waiting on this completion
+// * @x:  holds the state of this particular completion
+// *
+// * This will wake up a single thread waiting on this completion. Threads will be
+// * awakened in the same order in which they were queued.
+// *
+// * See also complete_all(), wait_for_completion() and related routines.
+// *
+// * It may be assumed that this function implies a write memory barrier before
+// * changing the task state if and only if any tasks are woken up.
+// */
+//void complete(struct completion *x)
+//{
+//     unsigned long flags;
+//
+//     spin_lock_irqsave(&x->wait.lock, flags);
+//     x->done++;
+//     __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
+//     spin_unlock_irqrestore(&x->wait.lock, flags);
+//}
+//EXPORT_SYMBOL(complete);
+//
+///**
+// * kernel/sched/core.c:2995
+// * schedule_preempt_disabled - called with preemption disabled
+// *
+// * Returns with preemption disabled. Note: preempt_count must be 1
+// */
+//void __sched schedule_preempt_disabled(void)
+//{
+//     sched_preempt_enable_no_resched();
+//     schedule();
+//     preempt_disable();
+//}
+//
+///*
+// * kernel/sched/core.c:6858
+// */
+//int in_sched_functions(unsigned long addr)
+//{
+//     printk("\nin_sched_functions");
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core.c:4333
+// * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
+// * call schedule, and on return reacquire the lock.
+// *
+// * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+// * operations here to prevent schedule() from being called twice (once via
+// * spin_unlock(), once by hand).
+// */
+//int __cond_resched_lock(spinlock_t *lock)
+//{
+//     printk("\n__cond_resched_lock");
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core.c:4315
+// */
+//static inline int should_resched(void)
+//{
+//     return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
+//}
+//
+//static void __cond_resched(void)
+//{
+//     add_preempt_count(PREEMPT_ACTIVE);
+//     __schedule();
+//     sub_preempt_count(PREEMPT_ACTIVE);
+//}
+//
+//int __sched _cond_resched(void)
+//{
+//     if (should_resched()) {
+//             __cond_resched();
+//             return 1;
+//     }
+//     return 0;
+//}
+//EXPORT_SYMBOL(_cond_resched);
+//
+///*
+// * kernel/sched/core.c:4333
+// */
+//int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, void *key)
+//{
+//     return try_to_wake_up(curr->private, mode, wake_flags);
+//}
+//EXPORT_SYMBOL(default_wake_function);
+//
+///**
+// * kernel/sched/core.c:3426
+// * wait_for_completion_killable: - waits for completion of a task (killable)
+// * @x:  holds the state of this particular completion
+// *
+// * This waits to be signaled for completion of a specific task. It can be
+// * interrupted by a kill signal.
+// *
+// * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+// */
+//int __sched wait_for_completion_killable(struct completion *x)
+//{
+//     long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
+//     if (t == -ERESTARTSYS)
+//             return t;
+//     return 0;
+//}
+//EXPORT_SYMBOL(wait_for_completion_killable);
+//
+///**
+// * kernel/sched/core.c:3192
+// * __wake_up_sync_key - wake up threads blocked on a waitqueue.
+// * @q: the waitqueue
+// * @mode: which threads
+// * @nr_exclusive: how many wake-one or wake-many threads to wake up
+// * @key: opaque value to be passed to wakeup targets
+// *
+// * The sync wakeup differs that the waker knows that it will schedule
+// * away soon, so while the target thread will be woken up, it will not
+// * be migrated to another CPU - ie. the two threads are 'synchronized'
+// * with each other. This can prevent needless bouncing between CPUs.
+// *
+// * On UP it can prevent extra preemption.
+// *
+// * It may be assumed that this function implies a write memory barrier before
+// * changing the task state if and only if any tasks are woken up.
+// */
+//void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
+//                     int nr_exclusive, void *key)
+//{
+//     unsigned long flags;
+//     int wake_flags = WF_SYNC;
+//
+//     if (unlikely(!q))
+//             return;
+//
+//     if (unlikely(!nr_exclusive))
+//             wake_flags = 0;
+//
+//     spin_lock_irqsave(&q->lock, flags);
+//     __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
+//     spin_unlock_irqrestore(&q->lock, flags);
+//}
+//EXPORT_SYMBOL_GPL(__wake_up_sync_key);
+//
+///*
+// * kernel/sched/core.c:1543
+// */
+//int wake_up_state(struct task_struct *p, unsigned int state)
+//{
+//     WARN_ON(task_is_stopped_or_traced(p));
+//     return try_to_wake_up(p, state, 0);
+//}
+//EXPORT_SYMBOL(wake_up_process);
+//
+///**
+// * kernel/sched/core.c:4389
+// * yield - yield the current processor to other threads.
+// *
+// * Do not ever use this function, there's a 99% chance you're doing it wrong.
+// *
+// * The scheduler is at all times free to pick the calling task as the most
+// * eligible task to run, if removing the yield() call from your code breaks
+// * it, its already broken.
+// *
+// * Typical broken usage is:
+// *
+// * while (!event)
+// *   yield();
+// *
+// * where one assumes that yield() will let 'the other' process run that will
+// * make event true. If the current task is a SCHED_FIFO task that will never
+// * happen. Never use yield() as a progress guarantee!!
+// *
+// * If you want to use yield() to wait for something, use wait_event().
+// * If you want to use yield() to be 'nice' for others, use cond_resched().
+// * If you still want to use yield(), do not!
+// */
+//void __sched yield(void)
+//{
+//     printk("\nyield");
+//
+//     // TODO: SMP
+//
+//     return;
+//}
+//
+///**
+// * kernel/sched/core.c:892
+// * task_curr - is this task currently executing on a CPU?
+// * @p: the task in question.
+// */
+//inline int task_curr(const struct task_struct *p)
+//{
+//     printk("\ntask_curr");
+//
+//     // TODO: SMP
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:3736
+// * task_nice - return the nice value of a given task.
+// * @p: the task in question.
+// */
+//int task_nice(const struct task_struct *p)
+//{
+//     printk("\ntask_nice");
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core.c:3616
+// */
+//void set_user_nice(struct task_struct *p, long nice)
+//{
+////   printk("\nset_user_nice");
+//
+//     return;
+//}
+//
+///*
+// * kernel/sched/core.c:3169
+// */
+//void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
+//{
+//     printk("\n__wake_up_locked_key");
+//
+//     return;
+//}
+//
+///*
+// * kernel/sched/core.c:4474
+// * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+// * that process accounting knows that this is a task in IO wait state.
+// */
+//void __sched io_schedule(void)
+//{
+////   printk("\nio_schedule");
+//
+//     struct rq *rq = raw_rq();
+//
+//     delayacct_blkio_start();
+//     atomic_inc(&rq->nr_iowait);
+//     blk_flush_plug(current);
+//     current->in_iowait = 1;
+//     schedule();
+//     current->in_iowait = 0;
+//     atomic_dec(&rq->nr_iowait);
+//     delayacct_blkio_end();
+//}
+//EXPORT_SYMBOL(io_schedule);
+//
+///*
+// * kernel/sched/core.c:4489
+// */
+//long __sched io_schedule_timeout(long timeout)
+//{
+////   printk("\nio_schedule_timeout");
+//     struct rq *rq = raw_rq();
+//     long ret;
+//
+//     delayacct_blkio_start();
+//     atomic_inc(&rq->nr_iowait);
+//     blk_flush_plug(current);
+//     current->in_iowait = 1;
+//     ret = schedule_timeout(timeout);
+//     current->in_iowait = 0;
+//     atomic_dec(&rq->nr_iowait);
+//     delayacct_blkio_end();
+//     return ret;
+//}
+//
+//
+///*
+// * kernel/sched/core.c:7590
+// */
+//int sched_rt_handler(struct ctl_table *table, int write,
+//             void __user *buffer, size_t *lenp,
+//             loff_t *ppos)
+//{
+//     //printk("\nsched_rt_handler");
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core.c:3213
+// * __wake_up_sync - see __wake_up_sync_key()
+// */
+//void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+//{
+//     printk("\n__wake_up_sync");
+//
+//     return;
+//}
+//
+///*
+// * kernel/sched/core.c:3163
+// * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
+// */
+//void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
+//{
+//     printk("\n__wake_up_locked");
+//
+//     return;
+//}
+//
+///**
+// * kernel/sched/core.c:3307
+// */
+//static long __sched
+//wait_for_common_io(struct completion *x, long timeout, int state)
+//{
+//     return __wait_for_common(x, io_schedule_timeout, timeout, state);
+//}
+//
+///**
+// * kernel/sched/core.c:3355
+// * wait_for_completion_io: - waits for completion of a task
+// * @x:  holds the state of this particular completion
+// *
+// * This waits to be signaled for completion of a specific task. It is NOT
+// * interruptible and there is no timeout. The caller is accounted as waiting
+// * for IO.
+// */
+//void __sched wait_for_completion_io(struct completion *x)
+//{
+//     wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
+//}
+//EXPORT_SYMBOL(wait_for_completion_io);
+//
+///**
+// * kernel/sched/core.c:3416
+// * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
+// * @x:  holds the state of this particular completion
+// * @timeout:  timeout value in jiffies
+// *
+// * This waits for either a completion of a specific task to be signaled or for a
+// * specified timeout to expire. The timeout is in jiffies. It is not
+// * interruptible. The caller is accounted as waiting for IO.
+// *
+// * The return value is 0 if timed out, and positive (at least 1, or number of
+// * jiffies left till timeout) if completed.
+// */
+//unsigned long __sched
+//wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
+//{
+//     return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
+//}
+//EXPORT_SYMBOL(wait_for_completion_io_timeout);
+//
+///*
+// * kernel/sched/core.c:4634
+// */
+//void show_state_filter(unsigned long state_filter)
+//{
+//     //printk("\nshow_state_filter");
+//
+//     return;
+//}
+//
+///**
+// * kernel/sched/core.c:3251
+// * complete_all: - signals all threads waiting on this completion
+// * @x:  holds the state of this particular completion
+// *
+// * This will wake up all threads waiting on this particular completion event.
+// *
+// * It may be assumed that this function implies a write memory barrier before
+// * changing the task state if and only if any tasks are woken up.
+// */
+//void complete_all(struct completion *x)
+//{
+//     unsigned long flags;
+//
+//     spin_lock_irqsave(&x->wait.lock, flags);
+//     x->done += UINT_MAX/2;
+//     __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
+//     spin_unlock_irqrestore(&x->wait.lock, flags);
+//}
+//EXPORT_SYMBOL(complete_all);
+//
+///**
+// * kernel/sched/core.c:3341
+// * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
+// * @x:  holds the state of this particular completion
+// * @timeout:  timeout value in jiffies
+// *
+// * This waits for either a completion of a specific task to be signaled or for a
+// * specified timeout to expire. The timeout is in jiffies. It is not
+// * interruptible.
+// *
+// * The return value is 0 if timed out, and positive (at least 1, or number of
+// * jiffies left till timeout) if completed.
+// */
+//unsigned long __sched
+//wait_for_completion_timeout(struct completion *x, unsigned long timeout)
+//{
+//     return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
+//}
+//EXPORT_SYMBOL(wait_for_completion_timeout);
+//
 //
-//     local_irq_disable();
-//     rq = this_rq();
-//     raw_spin_lock(&rq->lock);
 //
-//     return rq;
-//}
-
-
-
-/*
- * Functions
- */
-
-/**
- * kernel/sched/core.c:6872
- * Initialize the scheduler
- */
-void sched_init(void)
-{
-       int i;
-       unsigned long alloc_size = 0, ptr;
-
-#ifdef CONFIG_CPUMASK_OFFSTACK
-       alloc_size += num_possible_cpus() * cpumask_size();
-#endif
-       if (alloc_size) {
-               ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
-       }
-
-       // TODO: SMP
+///*
+// *
+// * SMP
+// *
+// */
 //#ifdef CONFIG_SMP
-//     init_defrootdomain();
-//#endif
-
-//     init_rt_bandwidth(&def_rt_bandwidth,
-//                     global_rt_period(), global_rt_runtime());
-
-       for_each_possible_cpu(i) {
-               struct rq *rq;
-
-               rq = cpu_rq(i);
-               raw_spin_lock_init(&rq->lock);
-               rq->nr_running = 0;
-               INIT_LIST_HEAD(&rq->rq_list);
-
-//             rq->calc_load_active = 0;
-//             rq->calc_load_update = jiffies + LOAD_FREQ;
-
-//             init_cfs_rq(&rq->cfs);
-//             init_rt_rq(&rq->rt, rq);
-
-//             rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
-
-//             for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
-//                     rq->cpu_load[j] = 0;
-
-//             rq->last_load_update_tick = jiffies;
-
-#ifdef CONFIG_SMP
-//             rq->sd = NULL;
-//             rq->rd = NULL;
-////           rq->cpu_power = SCHED_POWER_SCALE;
-//             rq->post_schedule = 0;
-////           rq->active_balance = 0;
-////           rq->next_balance = jiffies;
-//             rq->push_cpu = 0;
-               rq->cpu = i;
-               rq->online = 0;
-////           rq->idle_stamp = 0;
-////           rq->avg_idle = 2*sysctl_sched_migration_cost;
 //
-//             INIT_LIST_HEAD(&rq->cfs_tasks);
+//struct migration_arg {
+//     struct task_struct *task;
+//     int dest_cpu;
+//};
+//
+///*
+// * kernel/sched/core.c:4822
+// * Move (not current) task off this cpu, onto dest cpu. We're doing
+// * this because either it can't run here any more (set_cpus_allowed()
+// * away from this CPU, or CPU going down), or because we're
+// * attempting to rebalance this task on exec (sched_exec).
+// *
+// * So we race with normal scheduler movements, but that's OK, as long
+// * as the task is no longer on this CPU.
+// *
+// * Returns non-zero if task was successfully migrated.
+// */
+//static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+//{
+//     struct rq *rq_dest, *rq_src;
+//     int ret = 0;
+//
+//     if (unlikely(!cpu_active(dest_cpu)))
+//             return ret;
+//
+//     rq_src = cpu_rq(src_cpu);
+//     rq_dest = cpu_rq(dest_cpu);
+//
+//     raw_spin_lock(&p->pi_lock);
+//     double_rq_lock(rq_src, rq_dest);
+//     /* Already moved. */
+//     if (task_cpu(p) != src_cpu)
+//             goto done;
+//     /* Affinity changed (again). */
+//     if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+//             goto fail;
+//
+//     /*
+//      * If we're not on a rq, the next wake-up will ensure we're
+//      * placed properly.
+//      */
+//     if (p->on_rq) {
+////           dequeue_task(rq_src, p, 0);
+//             list_del(&p->rq_tasks);
+//             // TODO: maybe not necessary hence double lock
+//             p->on_rq = 0;
+//             set_task_cpu(p, dest_cpu);
+////           enqueue_task(rq_dest, p, 0);
+//             list_add(&p->rq_tasks, &rq_dest->rq_list);
+//             // TODO: maybe not necessary hence double lock
+//             p->on_rq = 1;
+////           check_preempt_curr(rq_dest, p, 0);
+//             if (rq_dest->curr == rq_dest->idle)
+//                     resched_task(rq_dest->curr);
+//     }
+//done:
+//     ret = 1;
+//fail:
+//     double_rq_unlock(rq_src, rq_dest);
+//     raw_spin_unlock(&p->pi_lock);
+//     return ret;
+//}
+//
+///*
+// * kernel/sched/core:4865
+// * migration_cpu_stop - this will be executed by a highprio stopper thread
+// * and performs thread migration by bumping thread off CPU then
+// * 'pushing' onto another runqueue.
+// */
+//static int migration_cpu_stop(void *data)
+//{
+//     struct migration_arg *arg = data;
+//
+//     /*
+//      * The original target cpu might have gone down and we might
+//      * be on another cpu but it doesn't matter.
+//      */
+//     local_irq_disable();
+//     __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
+//     local_irq_enable();
+//     return 0;
+//}
+//
+//
+//static void set_rq_online(struct rq *rq)
+//{
+//     if (!rq->online)
+//             rq->online = 1;
+//}
+//
+//static void set_rq_offline(struct rq *rq)
+//{
+//     if (rq->online)
+//             rq->online = 0;
+//}
+//
+///*
+// * migration_call - callback that gets triggered when a CPU is added.
+// * Here we can start up the necessary migration thread for the new CPU.
+// */
+//static int __cpuinit
+//migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
+//{
+//     int cpu = (long)hcpu;
+//     unsigned long flags;
+//     struct rq *rq = cpu_rq(cpu);
+//
+//     switch (action & ~CPU_TASKS_FROZEN) {
+//
+//     case CPU_UP_PREPARE:
+////           rq->calc_load_update = calc_load_update;
+//             break;
+//
+//     case CPU_ONLINE:
+//             /* Update our root-domain */
+//             raw_spin_lock_irqsave(&rq->lock, flags);
+////           if (rq->rd) {
+////                   BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+////
+//                     set_rq_online(rq);
+////           }
+//             raw_spin_unlock_irqrestore(&rq->lock, flags);
+//             break;
 //
-//             rq_attach_root(rq, &def_root_domain);
-//#ifdef CONFIG_NO_HZ
-//             rq->nohz_flags = 0;
+//#ifdef CONFIG_HOTPLUG_CPU
+//     case CPU_DYING:
+//             sched_ttwu_pending();
+//             /* Update our root-domain */
+//             raw_spin_lock_irqsave(&rq->lock, flags);
+////           if (rq->rd) {
+////                   BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
+//                     set_rq_offline(rq);
+////           }
+//             migrate_tasks(cpu);
+//             BUG_ON(rq->nr_running != 1); /* the migration thread */
+//             raw_spin_unlock_irqrestore(&rq->lock, flags);
+//             break;
+//
+//     case CPU_DEAD:
+////           calc_load_migrate(rq);
+//             break;
 //#endif
-#endif
-//             init_rq_hrtick(rq);
-               atomic_set(&rq->nr_iowait, 0);
-       }
-
-//     set_load_weight(&init_task);
-
-       /*
-        * The boot idle thread does lazy MMU switching as well:
-        */
-       atomic_inc(&init_mm.mm_count);
-       enter_lazy_tlb(&init_mm, current);
-
-       /*
-        * Make us the idle thread. Technically, schedule() should not be
-        * called from this thread, however somewhere below it might be,
-        * but because we are the idle thread, we just pick up running again
-        * when this runqueue becomes "idle".
-        */
-       init_idle(current, smp_processor_id());
-
-//     calc_load_update = jiffies + LOAD_FREQ;
-
-       /*
-        * During early bootup we pretend to be a normal task:
-        */
-//     current->sched_class = &fair_sched_class;
-
-#ifdef CONFIG_SMP
-       idle_thread_set_boot_cpu();
-#endif
-//     init_sched_fair_class();
-
-       scheduler_running = 1;
-}
-
-#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-static inline int preempt_count_equals(int preempt_offset)
-{
-       int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
-
-       return (nested == preempt_offset);
-}
-
-void __might_sleep(const char *file, int line, int preempt_offset)
-{
-       static unsigned long prev_jiffy;        /* ratelimiting */
-
-       rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
-       if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
-           system_state != SYSTEM_RUNNING || oops_in_progress)
-               return;
-       if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
-               return;
-       prev_jiffy = jiffies;
-
-       printk(KERN_ERR
-               "BUG: sleeping function called from invalid context at %s:%d\n",
-                       file, line);
-       printk(KERN_ERR
-               "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-                       in_atomic(), irqs_disabled(),
-                       current->pid, current->comm);
-
-       debug_show_held_locks(current);
-       if (irqs_disabled())
-               print_irqtrace_events(current);
-       dump_stack();
-}
-EXPORT_SYMBOL(__might_sleep);
-#endif
-
-/*
- * kernel/sched/core.c:1560
- * Perform scheduler related setup for a newly forked process p.
- * p is forked by current.
- *
- * __sched_fork() is basic setup used by init_idle() too:
- */
-static void __sched_fork(struct task_struct *p)
-{
-       p->on_rq                                        = 0;
-
-       p->se.on_rq                                     = 0;
-       p->se.exec_start                        = 0;
-       p->se.sum_exec_runtime          = 0;
-       p->se.prev_sum_exec_runtime     = 0;
-       p->se.vruntime                          = 0;
-}
-
-/*
- * kernel/sched/core.c:1622
- * fork()/clone()-time setup:
- */
-void sched_fork(struct task_struct *p)
-{
-       unsigned long flags;
-       int cpu = get_cpu();
-
-       __sched_fork(p);
-
-       /*
-        * We mark the process as running here. This guarantees that
-        * nobody will actually run it, and a signal or other external
-        * event cannot wake it up and insert it on the runqueue either.
-        */
-       p->state = TASK_RUNNING;
-
-       /*
-        * Make sure we do not leak PI boosting priority to the child.
-        */
-       p->prio = current->normal_prio;
-
-       raw_spin_lock_irqsave(&p->pi_lock, flags);
-       set_task_cpu(p, cpu);
-       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-
-#if defined(CONFIG_SMP)
-       p->on_cpu = 0;
-#endif
-#ifdef CONFIG_PREEMPT_COUNT
-       /* Want to start with kernel preemption disabled. */
-       task_thread_info(p)->preempt_count = 1;
-#endif
-
-       put_cpu();
-}
-
-/**
- * /kernel/sched/core.c:4674
- * init_idle - set up an idle thread for a given CPU
- * @idle: task in question
- * @cpu: cpu the idle task belongs to
- *
- * NOTE: this function does not set the idle thread's NEED_RESCHED
- * flag, to make booting more robust.
- */
-void __cpuinit init_idle(struct task_struct *idle, int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&rq->lock, flags);
-
-       __sched_fork(idle);
-       idle->state = TASK_RUNNING;
-       idle->se.exec_start = sched_clock();
-
-       do_set_cpus_allowed(idle, cpumask_of(cpu));
-       /*
-        * We're having a chicken and egg problem, even though we are
-        * holding rq->lock, the cpu isn't yet set to this cpu so the
-        * lockdep check in task_group() will fail.
-        *
-        * Similar case to sched_fork(). / Alternatively we could
-        * use task_rq_lock() here and obtain the other rq->lock.
-        *
-        * Silence PROVE_RCU
-        */
-       rcu_read_lock();
-       __set_task_cpu(idle, cpu);
-       rcu_read_unlock();
-
-       rq->curr = rq->idle = idle;
-#if defined(CONFIG_SMP)
-       idle->on_cpu = 1;
-#endif
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
-
-       /* Set the preempt count _outside_ the spinlocks! */
-       task_thread_info(idle)->preempt_count = 0;
-
-#if defined(CONFIG_SMP)
-       sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
-#endif
-}
-
-/*
- * /kernel/sched/cputime.c:436
- * Account multiple ticks of idle time.
- * @ticks: number of stolen ticks
- */
-void account_idle_ticks(unsigned long ticks)
-{
-       //printk("\naccount_idle_ticks");
-
-       return;
-}
-
-/*
- * /kernel/sched/cputime.c:397
- * Account a single tick of cpu time.
- * @p: the process that the cpu time gets accounted to
- * @user_tick: indicates if the tick is a user or a system tick
- */
-void account_process_tick(struct task_struct *p, int user_tick)
-{
-       //printk("\naccount_process_tick");
-
-       return;
-}
-
-/*
- * /kernel/sched/core.c:2092
- * get_avenrun - get the load average array
- * @loads:    pointer to dest load array
- * @offset:    offset to add
- * @shift:    shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
-       //printk("\nget_avenrun");
-
-       return;
-}
-
-/*
- * /kernel/sched/core.c:2363
- * calc_load - update the avenrun load estimates 10 ticks after the
- * CPUs have updated calc_load_tasks.
- */
-void calc_global_load(unsigned long ticks)
-{
-//     printk("\ncalc_global_load");
-
-       return;
-}
-
-/*
- * /kernel/sched/core.c:2197
- * We're going into NOHZ mode, if there's any pending delta, fold it
- * into the pending idle delta.
- */
-/*void calc_load_enter_idle(void)
-{
-       return;
-}*/
-
-/*
- * /kernel/sched/core.c:2213
- * If we're still before the sample window, we're done.
- *
- * We woke inside or after the sample window, this means we're already
- * accounted through the nohz accounting, so skip the entire deal and
- * sync up for the next window.
- */
-/*void calc_load_exit_idle(void)
-{
-       return;
-}*/
-
-/*
- * /kernel/sched/core.c:3668
- * Check if a task can reduce its nice value
- * @p: task
- * @nice: nice value
- */
-int can_nice(const struct task_struct *p, const int nice)
-{
-       //printk("\ncan_nice");
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:3768
- * idle_task - return the idle task for a given cpu.
- * @cpu: the processor in question.
- */
-struct task_struct *idle_task(int cpu)
-{
-       return cpu_rq(cpu)->idle;
-}
-
-/**
- * /kernel/sched/core.c:3742
- * idle_cpu - is a given cpu idle currently?
- * @cpu: the processor in question.
- */
-int idle_cpu(int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-
-       if (rq->curr != rq->idle)
-               return 0;
-
-       if (rq->nr_running)
-               return 0;
-
-#ifdef CONFIG_SMP
-       if (!llist_empty(&rq->wake_list))
-               return 0;
-#endif
-
-       return 1;
-}
-
-/*
- * /kernel/sched/core.c:4669
- * Sets sched_class of idle task, see struct sched_class idle_sched_class;
- */
-void __cpuinit init_idle_bootup_task(struct task_struct *idle)
-{
-       //printk("\ninit_idle_bootup_task");
-
-       return;
-}
-
-/*
- * /kernel/sched/core.c:7108
- * Calls private function
- * static void normalize_task(struct rq *rq, struct task_struct *p)
- */
-void normalize_rt_tasks(void)
-{
-       printk("\nnormalize_rt_tasks");
-
-       return;
-}
-
-/*
- * /kernel/sched/core.c:1997
- * nr_running and nr_context_switches:
- *
- * externally visible scheduler statistics:
- *   current number of runnable threads
- *   total number of context switches performed since bootup.
- */
-unsigned long nr_running(void)
-{
-       printk("\nnr_running");
-
-       // TODO: SMP
-
-       return 0;
-}
-
-unsigned long long nr_context_switches(void)
-{
-//     printk("\nnr_context_switches");
-
-       int i;
-       unsigned long long sum = 0;
-
-       for_each_possible_cpu(i)
-               sum += cpu_rq(i)->nr_switches;
-
-       return sum;
-}
-
-/*
- * /kernel/sched/core.c:2008
- * number of threads waiting on IO
- */
-unsigned long nr_iowait(void)
-{
-       printk("\nnr_iowait");
-
-       // TODO: SMP
-
-       return 0;
-}
-
-/*
- * kernel/sched/core.c:2018
- */
-unsigned long nr_iowait_cpu(int cpu)
-{
-       printk("\nnr_iowait_cpu");
-
-       // TODO: SMP
-
-       return 0;
-}
-
-/*
- * rt_mutex_setprio - set the current priority of a task
- * @p: task
- * @prio: prio value (kernel-internal form)
- *
- * This function changes the 'effective' priority of a task. It does
- * not touch ->normal_prio like __setscheduler().
- *
- * Used by the rt_mutex code to implement priority inheritance logic.
- */
-void rt_mutex_setprio(struct task_struct *p, int prio)
-{
-       printk("\nrt_mutex_setprio");
-
-       return;
-}
-
-/**
- * sched_clock_cpu - returns current time in nanosec units
- * using scheduler clock function.
- * @param: cpu id
- */
-//u64 sched_clock_cpu(int cpu)
+//     }
+//
+////   update_max_interval();
+//
+//     return NOTIFY_OK;
+//}
+//
+///*
+// * Register at high priority so that task migration (migrate_all_tasks)
+// * happens before everything else.  This has to be lower priority than
+// * the notifier in the perf_event subsystem, though.
+// */
+//static struct notifier_block __cpuinitdata migration_notifier = {
+//     .notifier_call = migration_call,
+//     .priority = CPU_PRI_MIGRATION,
+//};
+//
+//static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
+//                                   unsigned long action, void *hcpu)
+//{
+//     switch (action & ~CPU_TASKS_FROZEN) {
+//     case CPU_STARTING:
+//     case CPU_DOWN_FAILED:
+//             set_cpu_active((long)hcpu, true);
+//             return NOTIFY_OK;
+//     default:
+//             return NOTIFY_DONE;
+//     }
+//}
+//
+//static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
+//                                     unsigned long action, void *hcpu)
+//{
+//     switch (action & ~CPU_TASKS_FROZEN) {
+//     case CPU_DOWN_PREPARE:
+//             set_cpu_active((long)hcpu, false);
+//             return NOTIFY_OK;
+//     default:
+//             return NOTIFY_DONE;
+//     }
+//}
+//
+//static int __init migration_init(void)
 //{
+//     void *cpu = (void *)(long)smp_processor_id();
+//     int err;
+//
+//     /* Initialize migration for the boot CPU */
+//     err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
+//     BUG_ON(err == NOTIFY_BAD);
+//     migration_call(&migration_notifier, CPU_ONLINE, cpu);
+//     register_cpu_notifier(&migration_notifier);
+//
+//     /* Register cpu active notifiers */
+//     cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
+//     cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
+//
 //     return 0;
 //}
-
-/*
- * kernel/sched/clock.c:350
- * Initialize/Start scheduler clock.
- */
-//void sched_clock_init(void)
+//early_initcall(migration_init);
+//
+//
+//
+//void do_set_cpus_allowed(struct task_struct *p,
+//                            const struct cpumask *new_mask)
 //{
-//     return;
+////   if (p->sched_class && p->sched_class->set_cpus_allowed)
+////           p->sched_class->set_cpus_allowed(p, new_mask);
+//
+//     cpumask_copy(&p->cpus_allowed, new_mask);
+//     p->nr_cpus_allowed = cpumask_weight(new_mask);
 //}
-
-/**
- * kernel/sched/core.c:4213
- * This functions stores the CPU affinity mask for the process or thread with the ID pid in the cpusetsize
- * bytes long bitmap pointed to by cpuset. If successful, the function always initializes all bits in the
- * cpu_set_t object and returns zero.
- *
- * If pid does not correspond to a process or thread on the system the or the function fails for some other
- * reason, it returns -1 and errno is set to represent the error condition.
- */
-long sched_getaffinity(pid_t pid, struct cpumask *mask)
-{
-       printk("\nsched_getaffinity");
-
-       // TODO: SMP
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:7571
- */
-int sched_rr_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *lenp,
-               loff_t *ppos)
-{
-       //printk("\nsched_rr_handler");
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4111
- * This function installs the cpusetsize bytes long affinity mask pointed to by cpuset for the process or
- * thread with the ID pid. If successful the function returns zero and the scheduler will in future take the
- * affinity information into account.
- */
-long sched_setaffinity(pid_t pid, const struct cpumask *new_mask)
-{
-       //printk("\nsched_setaffinity");
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:3975
- * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
- * @p: the task in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- *
- * NOTE that the task may be already dead.
- */
-int sched_setscheduler(struct task_struct *p, int policy,
-               const struct sched_param *param)
-{
-       //printk("\nsched_setscheduler");
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:3993
- * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
- * @p: the task in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- *
- * Just like sched_setscheduler, only don't bother checking if the
- * current context has permission.  For example, this is needed in
- * stop_machine(): we create temporary high priority worker threads,
- * but our caller might not have that capability.
- */
-int sched_setscheduler_nocheck(struct task_struct *p, int policy,
-const struct sched_param *param)
-{
-//     //printk("\nsched_setscheduler_nocheck");
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4601
- */
-void sched_show_task(struct task_struct *p)
-{
-       //printk("\nsched_show_task");
-
-       return;
-}
-
-/**
- * kernel/sched/core.c:652
- */
-void resched_task(struct task_struct *p)
-{
-       int cpu;
-
-       assert_raw_spin_locked(&task_rq(p)->lock);
-
-       if (test_tsk_need_resched(p))
-               return;
-
-       set_tsk_need_resched(p);
-
-       cpu = task_cpu(p);
-       if (cpu == smp_processor_id())
-               return;
-
-       /* NEED_RESCHED must be visible before we test polling */
-       smp_mb();
-       if (!tsk_is_polling(p))
-               smp_send_reschedule(cpu);
-}
-
-/**
- * kernel/sched/core.c:1806
- * prepare_task_switch - prepare to switch tasks
- * @rq: the runqueue preparing to switch
- * @prev: the current task that is being switched out
- * @next: the task we are going to switch to.
- *
- * This is called with the rq lock held and interrupts off. It must
- * be paired with a subsequent finish_task_switch after the context
- * switch.
- *
- * prepare_task_switch sets up locking and calls architecture specific
- * hooks.
- */
-static inline void
-prepare_task_switch(struct rq *rq, struct task_struct *prev,
-                   struct task_struct *next)
-{
-//     trace_sched_switch(prev, next);
-//     sched_info_switch(prev, next);
-//     perf_event_task_sched_out(prev, next);
-//     fire_sched_out_preempt_notifiers(prev, next);
-       prepare_lock_switch(rq, next);
-//     prepare_arch_switch(next);
-}
-
-/**
- * kernel/sched/core.c:1826
- * finish_task_switch - clean up after a task-switch
- * @rq: runqueue associated with task-switch
- * @prev: the thread we just switched away from.
- *
- * finish_task_switch must be called after the context switch, paired
- * with a prepare_task_switch call before the context switch.
- * finish_task_switch will reconcile locking set up by prepare_task_switch,
- * and do any other architecture-specific cleanup actions.
- *
- * Note that we may have delayed dropping an mm in context_switch(). If
- * so, we finish that here outside of the runqueue lock. (Doing it
- * with the lock held can cause deadlocks; see schedule() for
- * details.)
- */
-static void finish_task_switch(struct rq *rq, struct task_struct *prev)
-       __releases(rq->lock)
-{
-       struct mm_struct *mm = rq->prev_mm;
-       long prev_state;
-
-       rq->prev_mm = NULL;
-
-       /*
-        * A task struct has one reference for the use as "current".
-        * If a task dies, then it sets TASK_DEAD in tsk->state and calls
-        * schedule one last time. The schedule call will never return, and
-        * the scheduled task must drop that reference.
-        * The test for TASK_DEAD must occur while the runqueue locks are
-        * still held, otherwise prev could be scheduled on another cpu, die
-        * there before we look at prev->state, and then the reference would
-        * be dropped twice.
-        *              Manfred Spraul <manfred@colorfullife.com>
-        */
-       prev_state = prev->state;
-       vtime_task_switch(prev);
-//     finish_arch_switch(prev);
-//     perf_event_task_sched_in(prev, current);
-       finish_lock_switch(rq, prev);
-       finish_arch_post_lock_switch();
-
-//     fire_sched_in_preempt_notifiers(current);
-       if (mm)
-               mmdrop(mm);
-       if (unlikely(prev_state == TASK_DEAD)) {
-               /*
-                * Remove function-return probe instances associated with this
-                * task and put them back on the free list.
-                */
-               kprobe_flush_task(prev);
-               put_task_struct(prev);
-       }
-}
-
-#ifdef CONFIG_SMP
-
-/* assumes rq->lock is held */
-static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
-{
-}
-
-/* rq->lock is NOT held, but preemption is disabled */
-static inline void post_schedule(struct rq *rq)
-{
-}
-
-#else
-
-static inline void pre_schedule(struct rq *rq, struct task_struct *p)
-{
-}
-
-static inline void post_schedule(struct rq *rq)
-{
-}
-
-#endif
-
-/**
- * kernel/sched/core.c:1905
- * schedule_tail - first thing a freshly forked thread must call.
- * @prev: the thread we just switched away from.
- */
-asmlinkage void schedule_tail(struct task_struct *prev)
-       __releases(rq->lock)
-{
-       struct rq *rq = this_rq();
-
-       finish_task_switch(rq, prev);
-
-       /*
-        * FIXME: do we need to worry about rq being invalidated by the
-        * task_switch?
-        */
-       // TODO: SMP
-       post_schedule(rq);
-
-       // TODO: replace this irq enable, maybe inside post_schedule
-       arch_local_irq_enable();
-
-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
-       /* In this case, finish_task_switch does not reenable preemption */
-       preempt_enable();
-#endif
-       if (current->set_child_tid)
-               put_user(task_pid_vnr(current), current->set_child_tid);
-}
-
-
-/**
- * kernel/sched/core.c:769
- */
-static void update_rq_clock_task(struct rq *rq, s64 delta)
-{
-/*
- * In theory, the compile should just see 0 here, and optimize out the call
- * to sched_rt_avg_update. But I don't trust it...
- */
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       s64 steal = 0, irq_delta = 0;
-#endif
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-       irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
-
-       /*
-        * Since irq_time is only updated on {soft,}irq_exit, we might run into
-        * this case when a previous update_rq_clock() happened inside a
-        * {soft,}irq region.
-        *
-        * When this happens, we stop ->clock_task and only update the
-        * prev_irq_time stamp to account for the part that fit, so that a next
-        * update will consume the rest. This ensures ->clock_task is
-        * monotonic.
-        *
-        * It does however cause some slight miss-attribution of {soft,}irq
-        * time, a more accurate solution would be to update the irq_time using
-        * the current rq->clock timestamp, except that would require using
-        * atomic ops.
-        */
-       if (irq_delta > delta)
-               irq_delta = delta;
-
-       rq->prev_irq_time += irq_delta;
-       delta -= irq_delta;
-#endif
-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-       if (static_key_false((&paravirt_steal_rq_enabled))) {
-               u64 st;
-
-               steal = paravirt_steal_clock(cpu_of(rq));
-               steal -= rq->prev_steal_time_rq;
-
-               if (unlikely(steal > delta))
-                       steal = delta;
-
-               st = steal_ticks(steal);
-               steal = st * TICK_NSEC;
-
-               rq->prev_steal_time_rq += steal;
-
-               delta -= steal;
-       }
-#endif
-
-       rq->clock_task += delta;
-
-#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
-       if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
-               sched_rt_avg_update(rq, irq_delta + steal);
-#endif
-}
-
-//static void update_rq_clock_task(struct rq *rq, s64 delta);
-void update_rq_clock(struct rq *rq)
-{
-       s64 delta;
-
-       if (rq->skip_clock_update > 0)
-               return;
-
-       delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
-       rq->clock += delta;
-       update_rq_clock_task(rq, delta);
-}
-
-/*
- * kernel/sched/core.c:2684
- * This function gets called by the timer code, with HZ frequency.
- * We call it with interrupts disabled.
- */
-void scheduler_tick(void)
-{
-       int cpu = smp_processor_id();
-       struct rq *rq = cpu_rq(cpu);
-       struct task_struct *curr = rq->curr;
-
-       u64 now = rq->clock_task;
-       unsigned long delta_exec;
-
-       sched_clock_tick();
-
-       raw_spin_lock(&rq->lock);
-       update_rq_clock(rq);
-
-       /*
-        * Update run-time statistics of the 'current'.
-        */
-       if (unlikely(!curr)) {
-               raw_spin_unlock(&rq->lock);
-               return;
-       }
-
-       /*
-        * Get the amount of time the current task was running
-        * since the last time we changed load (this cannot
-        * overflow on 32 bits):
-        */
-       delta_exec = (unsigned long)(now - curr->se.exec_start);
-
-       if (delta_exec > RR_TIMESLICE) {
-               resched_task(curr);
-       }
-
-       raw_spin_unlock(&rq->lock);
-
-       // TODO: SMP for load balancing
-}
-
-/*
- * kernel/sched/core.c:2649
- * Lock/unlock the current runqueue - to extract task statistics:
- */
-unsigned long long task_delta_exec(struct task_struct *p)
-{
-       printk("\ntask_delta_exec");
-
-       // TODO: SMP
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:3727
- * task_prio - return the priority value of a given task.
- * @p: the task in question.
- *
- * This is the priority value as seen by users in /proc.
- * RT tasks are offset by -200. Normal tasks are centered
- * around 0, value goes from -16 to +15.
- */
-int task_prio(const struct task_struct *p)
-{
-       //printk("\ntask_prio");
-
-       return 0;
-}
-
-/*
- * kernel/sched/core.c:2667
- * Return accounted runtime for the task.
- * In case the task is currently running, return the runtime plus current's
- * pending runtime that have not been accounted yet.
- */
-unsigned long long task_sched_runtime(struct task_struct *task)
-{
-       //printk("\ntask_sched_runtime");
-
-       return 0;
-}
-
-/*
- * kernel/sched/core.c:2024
- * this_cpu_load - returns load of the cpu
- */
-unsigned long this_cpu_load(void)
-{
-       //printk("\nthis_cpu_load");
-
-       // TODO: SMP, needed in case of load balancing per CPU
-
-       return 0;
-}
-
-/*
- * kernel/sched/core.c:2556
- * update_cpu_load_nohz - called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
- */
-void update_cpu_load_nohz(void)
-{
-       //printk("\nupdate_cpu_load_nohz");
-
-       return;
-}
-
-
-/*
- * kernel/sched/core.c:1207
- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
- */
-static inline
-int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
-{
-       int cpu = task_cpu(p);
-//     int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
-
-       /*
-        * In order not to call set_task_cpu() on a blocking task we need
-        * to rely on ttwu() to place the task on a valid ->cpus_allowed
-        * cpu.
-        *
-        * Since this is common to all placement strategies, this lives here.
-        *
-        * [ this allows ->select_task() to simply return task_cpu(p) and
-        *   not worry about this generic constraint ]
-        */
-       if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
-                    !cpu_online(cpu)))
-               cpu = cpumask_first(tsk_cpus_allowed(p)); //select_fallback_rq(task_cpu(p), p);
-
-       return cpu;
-}
-
-/*
- * kernel/sched/core.c:736
- */
-void activate_task(struct rq *rq, struct task_struct *p, int flags)
-{
-       if (task_contributes_to_load(p))
-               rq->nr_uninterruptible--;
-
-//     enqueue_task(rq, p, flags);
-       list_add(&p->rq_tasks, &rq->rq_list);
-}
-
-/*
- * kernel/sched/core.c:744
- */
-void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
-{
-       if (task_contributes_to_load(p))
-               rq->nr_uninterruptible++;
-
-//     dequeue_task(rq, p, flags);
-       list_del(&p->rq_tasks);
-}
-
-/*
- * kernel/sched/core.c:1275
- */
-static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
-{
-       activate_task(rq, p, en_flags);
-       p->on_rq = 1;
-
-       /* if a worker is waking up, notify workqueue */
-       if (p->flags & PF_WQ_WORKER)
-               wq_worker_waking_up(p, cpu_of(rq));
-}
-
-/*
- * kernel/sched/core.c:909
- */
-void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
-{
-//     const struct sched_class *class;
-//
-//     if (p->sched_class == rq->curr->sched_class) {
-//             rq->curr->sched_class->check_preempt_curr(rq, p, flags);
-//     } else {
-//             for_each_class(class) {
-//                     if (class == rq->curr->sched_class)
-//                             break;
-//                     if (class == p->sched_class) {
-//                             resched_task(rq->curr);
-//                             break;
-//                     }
-//             }
+//
+//int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+//{
+//     unsigned long flags;
+//     struct rq *rq;
+//     unsigned int dest_cpu;
+//     int ret = 0;
+//
+//     rq = task_rq_lock(p, &flags);
+//
+//     if (cpumask_equal(&p->cpus_allowed, new_mask))
+//             goto out;
+//
+//     if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+//             ret = -EINVAL;
+//             goto out;
 //     }
-       if (rq->curr == rq->idle)
-                       resched_task(rq->curr);
-
-       /*
-        * A queue event has occurred, and we're going to schedule.  In
-        * this case, we can save a useless back to back clock update.
-        */
-       if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
-               rq->skip_clock_update = 1;
-}
-
-/*
- * kernel/sched/core:1289
- * Mark the task runnable and perform wakeup-preemption.
- */
-static void
-ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
-{
-//     trace_sched_wakeup(p, true);
-       check_preempt_curr(rq, p, wake_flags);
-
-       p->state = TASK_RUNNING;
-//#ifdef CONFIG_SMP
-//     if (p->sched_class->task_woken)
-//             p->sched_class->task_woken(rq, p);
-//
-//     if (rq->idle_stamp) {
-//             u64 delta = rq->clock - rq->idle_stamp;
-//             u64 max = 2*sysctl_sched_migration_cost;
-//
-//             if (delta > max)
-//                     rq->avg_idle = max;
-//             else
-//                     update_avg(&rq->avg_idle, delta);
-//             rq->idle_stamp = 0;
+//
+//     if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
+//             ret = -EINVAL;
+//             goto out;
 //     }
-//#endif
-}
-
-/*
- * kernel/sched/core.c:1313
- */
-static void
-ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
-{
-#ifdef CONFIG_SMP
-       if (p->sched_contributes_to_load)
-               rq->nr_uninterruptible--;
-#endif
-
-       ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
-       ttwu_do_wakeup(rq, p, wake_flags);
-}
-
-#ifdef CONFIG_SMP
-/*
- * kernel/sched/core.c:1394
- */
-static void ttwu_queue_remote(struct task_struct *p, int cpu)
-{
-       if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
-               smp_send_reschedule(cpu);
-}
-#endif
-
-/*
- * kernel/sched/core.c:1406
- */
-static void ttwu_queue(struct task_struct *p, int cpu)
-{
-       struct rq *rq = cpu_rq(cpu);
-
-#if defined(CONFIG_SMP)
-       if (/*sched_feat(TTWU_QUEUE) && */!cpus_share_cache(smp_processor_id(), cpu)) {
-               sched_clock_cpu(cpu); /* sync clocks x-cpu */
-               ttwu_queue_remote(p, cpu);
-               return;
-       }
-#endif
-
-       raw_spin_lock(&rq->lock);
-       ttwu_do_activate(rq, p, 0);
-       raw_spin_unlock(&rq->lock);
-}
-
-/*
- * kernel/sched/core.c:1703
- * wake_up_new_task - wake up a newly created task for the first time.
- *
- * This function will do some initial scheduler statistics housekeeping
- * that must be done for every newly created context, then puts the task
- * on the runqueue and wakes it.
- */
-void wake_up_new_task(struct task_struct *p)
-{
-       unsigned long flags;
-       struct rq *rq;
-//     int cpu = 255;
-
-       raw_spin_lock_irqsave(&p->pi_lock, flags);
-
-#ifdef CONFIG_SMP
-       /*
-        * Fork balancing, do it here and not earlier because:
-        *  - cpus_allowed can change in the fork path
-        *  - any previously selected cpu might disappear through hotplug
-        */
-       set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
-//     printk("new thread\n");
-//     for_each_cpu(cpu, &(p->cpus_allowed)){
-//             printk("Asked for CPU #%d\n", cpu);
+//
+//     do_set_cpus_allowed(p, new_mask);
+//
+//     /* Can the task run on the task's current CPU? If so, we're done */
+//     if (cpumask_test_cpu(task_cpu(p), new_mask))
+//             goto out;
+//
+//     dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+//     if (p->on_rq) {
+//             struct migration_arg arg = { p, dest_cpu };
+//             /* Need help from migration thread: drop lock and wait. */
+//             task_rq_unlock(rq, p, &flags);
+//             stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+//             tlb_migrate_finish(p->mm);
+//             return 0;
 //     }
-
-#endif
-
-       rq = __task_rq_lock(p);
-       activate_task(rq, p, 0);
-       p->on_rq = 1;
-//     trace_sched_wakeup_new(p, true);
-       check_preempt_curr(rq, p, WF_FORK);
-//#ifdef CONFIG_SMP
-//     if (p->sched_class->task_woken)
-//             p->sched_class->task_woken(rq, p);
-//#endif
-       task_rq_unlock(rq, p, &flags);
-}
-
-/*
- * kernel/sched/core:1330
- * Called in case the task @p isn't fully descheduled from its runqueue,
- * in this case we must do a remote wakeup. Its a 'light' wakeup though,
- * since all we need to do is flip p->state to TASK_RUNNING, since
- * the task is still ->on_rq.
- */
-static int ttwu_remote(struct task_struct *p, int wake_flags)
-{
-       struct rq *rq;
-       int ret = 0;
-
-       rq = __task_rq_lock(p);
-       if (p->on_rq) {
-               ttwu_do_wakeup(rq, p, wake_flags);
-               ret = 1;
-       }
-       __task_rq_unlock(rq);
-
-       return ret;
-}
-
-/**
- * kernel/sched/core.c:1439
- * try_to_wake_up - wake up a thread
- * @p: the thread to be awakened
- * @state: the mask of task states that can be woken
- * @wake_flags: wake modifier flags (WF_*)
- *
- * Put it on the run-queue if it's not already there. The "current"
- * thread is always on the run-queue (except when the actual
- * re-schedule is in progress), and as such you're allowed to do
- * the simpler "current->state = TASK_RUNNING" to mark yourself
- * runnable without the overhead of this.
- *
- * Returns %true if @p was woken up, %false if it was already running
- * or @state didn't match @p's state.
- */
-static int
-try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
-{
-       unsigned long flags;
-       int cpu, success = 0;
-
-       smp_wmb();
-       raw_spin_lock_irqsave(&p->pi_lock, flags);
-       if (!(p->state & state))
-               goto out;
-
-       success = 1; /* we're going to change ->state */
-       cpu = task_cpu(p);
-
-       if (p->on_rq && ttwu_remote(p, wake_flags))
-               goto stat;
-
-#ifdef CONFIG_SMP
-       /*
-        * If the owning (remote) cpu is still in the middle of schedule() with
-        * this task as prev, wait until its done referencing the task.
-        */
-       while (p->on_cpu)
-               cpu_relax();
-       /*
-        * Pairs with the smp_wmb() in finish_lock_switch().
-        */
-       smp_rmb();
-
-//     p->sched_contributes_to_load = !!task_contributes_to_load(p);
-       p->state = TASK_WAKING;
-
-//     if (p->sched_class->task_waking)
-//             p->sched_class->task_waking(p);
-
-       // TODO: simply not using select_task_rq :)
-       cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
-       if (task_cpu(p) != cpu) {
-               wake_flags |= WF_MIGRATED;
-               set_task_cpu(p, cpu);
-       }
-#endif /* CONFIG_SMP */
-
-       ttwu_queue(p, cpu);
-stat:
+//out:
+//     task_rq_unlock(rq, p, &flags);
+//
+//     return ret;
+//}
+//EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+//
+//static void sched_ttwu_pending(void)
+//{
+//     struct rq *rq = this_rq();
+//     struct llist_node *llist = llist_del_all(&rq->wake_list);
+//     struct task_struct *p;
+//
+//     raw_spin_lock(&rq->lock);
+//
+//     while (llist) {
+//             p = llist_entry(llist, struct task_struct, wake_entry);
+//             llist = llist_next(llist);
+//             ttwu_do_activate(rq, p, 0);
+//     }
+//
 //     raw_spin_unlock(&rq->lock);
-out:
-       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-
-       return success;
-}
-
-/**
- * kernel/sched/core.c:1497
- * try_to_wake_up_local - try to wake up a local task with rq lock held
- * @p: the thread to be awakened
- *
- * Put @p on the run-queue if it's not already there. The caller must
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
-static void try_to_wake_up_local(struct task_struct *p)
-{
-       struct rq *rq = task_rq(p);
-
-       if (WARN_ON_ONCE(rq != this_rq()) ||
-           WARN_ON_ONCE(p == current))
-               return;
-
-       lockdep_assert_held(&rq->lock);
-
-       if (!raw_spin_trylock(&p->pi_lock)) {
-               raw_spin_unlock(&rq->lock);
-               raw_spin_lock(&p->pi_lock);
-               raw_spin_lock(&rq->lock);
-       }
-
-       if (!(p->state & TASK_NORMAL))
-               goto out;
-
-       if (!p->on_rq)
-               ttwu_activate(rq, p, ENQUEUE_WAKEUP);
-
-       ttwu_do_wakeup(rq, p, 0);
-//     ttwu_stat(p, smp_processor_id(), 0);
-out:
-       raw_spin_unlock(&p->pi_lock);
-}
-
-/*
- * kernel/sched/core.c:1931
- * context_switch - switch to the new MM and the new
- * thread's register state.
- */
-static inline void
-context_switch(struct rq *rq, struct task_struct *prev,
-              struct task_struct *next)
-{
-       struct mm_struct *mm, *oldmm;
-
-       prepare_task_switch(rq, prev, next);
-
-       mm = next->mm;
-       oldmm = prev->active_mm;
-       /*
-        * For paravirt, this is coupled with an exit in switch_to to
-        * combine the page table reload and the switch backend into
-        * one hypercall.
-        */
-//     arch_start_context_switch(prev);
-
-       if (!mm) {
-               next->active_mm = oldmm;
-               atomic_inc(&oldmm->mm_count);
-               enter_lazy_tlb(oldmm, next);
-       }
-       else
-               switch_mm(oldmm, mm, next);
-
-       if (!prev->mm) {
-               prev->active_mm = NULL;
-               rq->prev_mm = oldmm;
-       }
-       /*
-        * Since the runqueue lock will be released by the next
-        * task (which is an invalid locking op but in the case
-        * of the scheduler it's an obvious special-case), so we
-        * do an early lockdep release here:
-        */
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
-       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-#endif
-
-       context_tracking_task_switch(prev, next);
-       /* Here we just switch the register state and the stack. */
-       switch_to(prev, next, prev);
-
-       barrier();
-       /*
-        * this_rq must be evaluated again because prev may have moved
-        * CPUs since it called schedule(), thus the 'rq' on its stack
-        * frame will be invalid.
-        */
-       finish_task_switch(this_rq(), prev);
-
-}
-
-/*
- * kernel/sched/core.c:2875
- * __schedule() is the main scheduler function.
- *
- * The main means of driving the scheduler and thus entering this function are:
- *
- *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
- *
- *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
- *      paths. For example, see arch/x86/entry_64.S.
- *
- *      To drive preemption between tasks, the scheduler sets the flag in timer
- *      interrupt handler scheduler_tick().
- *
- *   3. Wakeups don't really cause entry into schedule(). They add a
- *      task to the run-queue and that's it.
- *
- *      Now, if the new task added to the run-queue preempts the current
- *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
- *      called on the nearest possible occasion:
- *
- *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
- *
- *         - in syscall or exception context, at the next outmost
- *           preempt_enable(). (this might be as soon as the wake_up()'s
- *           spin_unlock()!)
- *
- *         - in IRQ context, return from interrupt-handler to
- *           preemptible context
- *
- *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
- *         then at the next:
- *
- *          - cond_resched() call
- *          - explicit schedule() call
- *          - return from syscall or exception to user-space
- *          - return from interrupt-handler to user-space
- */
-static void __sched __schedule(void)
-{
-       struct task_struct *prev, *next;
-//     unsigned long *switch_count = 0;
-       struct rq *rq;
-       int cpu;
-
-need_resched:
-       preempt_disable();
-       cpu = smp_processor_id();
-       rq = cpu_rq(cpu);
-       rcu_note_context_switch(cpu);
-       prev = rq->curr;
-
-//     schedule_debug(prev);
-
-//     if (sched_feat(HRTICK))
-//             hrtick_clear(rq);
-
-       raw_spin_lock_irq(&rq->lock);
-
-//     switch_count = &prev->nivcsw;
-       if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
-               if (unlikely(signal_pending_state(prev->state, prev))) {
-                       prev->state = TASK_RUNNING;
-               } else {
-//                     deactivate_task(rq, prev, DEQUEUE_SLEEP);
-                       prev->on_rq = 0;
-
-                       /*
-                        * If a worker went to sleep, notify and ask workqueue
-                        * whether it wants to wake up a task to maintain
-                        * concurrency.
-                        */
-                       if (prev->flags & PF_WQ_WORKER) {
-                               struct task_struct *to_wakeup;
-
-                               to_wakeup = wq_worker_sleeping(prev, cpu);
-                               if (to_wakeup)
-                                       try_to_wake_up_local(to_wakeup);
-                       }
-               }
-//             switch_count = &prev->nvcsw;
-       }
-
-       pre_schedule(rq, prev);
-
-//     if (unlikely(!rq->nr_running))
-//             idle_balance(cpu, rq);
-
-//     put_prev_task(rq, prev);
-       if ((prev != rq->idle) && prev->on_rq) {
-               list_add_tail(&prev->rq_tasks, &rq->rq_list);
-       }
-
-       /*      In case the only runnable task gets deactivated, we need to schedule
-        *      the idle tasks.
-        */
-//     next = pick_next_task(rq);
-       if (!list_empty(&rq->rq_list)) {
-               assert_raw_spin_locked(&rq->lock);
-               next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
-               list_del(&next->rq_tasks);
-       }
-       else {
-               next = rq->idle;
-       }
-       next->se.exec_start = rq->clock_task;
-
-
-       clear_tsk_need_resched(prev);
-       rq->skip_clock_update = 0;
-
-       if (likely(prev != next)) {
-               rq->nr_switches++;
-               rq->curr = next;
-//             ++*switch_count;
-
-               context_switch(rq, prev, next); /* unlocks the rq */
-
-               // TODO: remove irq enable
-               arch_local_irq_enable();
-
-               /*
-                * The context switch have flipped the stack from under us
-                * and restored the local variables which were saved when
-                * this task called schedule() in the past. prev == current
-                * is still correct, but it can be moved to another cpu/rq.
-                */
-               cpu = smp_processor_id();
-               rq = cpu_rq(cpu);
-       }
-       else
-               raw_spin_unlock_irq(&rq->lock);
-
-       post_schedule(rq);
-
-       sched_preempt_enable_no_resched();
-       if (need_resched())
-               goto need_resched;
-}
-
-/*
- * kernel/sched/core.c:2966
- */
-asmlinkage void __sched schedule(void)
-{
-//     struct task_struct *tsk = current;
-//
-//     if (!tsk->state || tsk_is_pi_blocked(tsk))
-//             return;
-
-       __schedule();
-}
-EXPORT_SYMBOL(schedule);
-
-/*
- * kernel/sched/core.c:3125
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
- * number) then we wake all the non-exclusive tasks and one exclusive task.
- *
- * There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
- * zero in this (rare) case, and we handle it by continuing to scan the queue.
- */
-static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, int wake_flags, void *key)
-{
-       wait_queue_t *curr, *next;
-
-       list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
-               unsigned flags = curr->flags;
-
-               if (curr->func(curr, mode, wake_flags, key) &&
-                               (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
-                       break;
-       }
-}
-
-/**
- * kernel/sched/core.c:3149
- * __wake_up - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
- * @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
- * @key: is directly passed to the wakeup function
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void __wake_up(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, void *key)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&q->lock, flags);
-       __wake_up_common(q, mode, nr_exclusive, 0, key);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(__wake_up);
-
-/**
- * kernel/sched/core.c:1536
- * wake_up_process - Wake up a specific process
- * @p: The process to be woken up.
- *
- * Attempt to wake up the nominated process and move it to the set of runnable
- * processes.  Returns 1 if the process was woken up, 0 if it was already
- * running.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-int wake_up_process(struct task_struct *p)
-{
-       WARN_ON(task_is_stopped_or_traced(p));
-       return try_to_wake_up(p, TASK_NORMAL, 0);
-}
-EXPORT_SYMBOL(wake_up_process);
-
-static inline long __sched
-do_wait_for_common(struct completion *x,
-                  long (*action)(long), long timeout, int state)
-{
-       if (!x->done) {
-               DECLARE_WAITQUEUE(wait, current);
-
-               __add_wait_queue_tail_exclusive(&x->wait, &wait);
-               do {
-                       if (signal_pending_state(state, current)) {
-                               timeout = -ERESTARTSYS;
-                               break;
-                       }
-                       __set_current_state(state);
-                       spin_unlock_irq(&x->wait.lock);
-                       timeout = action(timeout);
-                       spin_lock_irq(&x->wait.lock);
-               } while (!x->done && timeout);
-               __remove_wait_queue(&x->wait, &wait);
-               if (!x->done)
-                       return timeout;
-       }
-       x->done--;
-       return timeout ?: 1;
-}
-
-static inline long __sched
-__wait_for_common(struct completion *x,
-                 long (*action)(long), long timeout, int state)
-{
-       might_sleep();
-
-       spin_lock_irq(&x->wait.lock);
-       timeout = do_wait_for_common(x, action, timeout, state);
-       spin_unlock_irq(&x->wait.lock);
-       return timeout;
-}
-
-static long __sched
-wait_for_common(struct completion *x, long timeout, int state)
-{
-       return __wait_for_common(x, schedule_timeout, timeout, state);
-}
-
-/**
- * kernel/sched/core.c:3322
- * wait_for_completion: - waits for completion of a task
- * @x:  holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It is NOT
- * interruptible and there is no timeout.
- *
- * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
- * and interrupt capability. Also see complete().
- */
-void __sched wait_for_completion(struct completion *x)
-{
-       wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion);
-
-/**
- * kernel/sched/core.c:3231
- * complete: - signals a single thread waiting on this completion
- * @x:  holds the state of this particular completion
- *
- * This will wake up a single thread waiting on this completion. Threads will be
- * awakened in the same order in which they were queued.
- *
- * See also complete_all(), wait_for_completion() and related routines.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void complete(struct completion *x)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&x->wait.lock, flags);
-       x->done++;
-       __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-}
-EXPORT_SYMBOL(complete);
-
-/**
- * kernel/sched/core.c:2995
- * schedule_preempt_disabled - called with preemption disabled
- *
- * Returns with preemption disabled. Note: preempt_count must be 1
- */
-void __sched schedule_preempt_disabled(void)
-{
-       sched_preempt_enable_no_resched();
-       schedule();
-       preempt_disable();
-}
-
-/*
- * kernel/sched/core.c:6858
- */
-int in_sched_functions(unsigned long addr)
-{
-       printk("\nin_sched_functions");
-
-       return 0;
-}
-
-/*
- * kernel/sched/core.c:4333
- * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
- * call schedule, and on return reacquire the lock.
- *
- * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
- * operations here to prevent schedule() from being called twice (once via
- * spin_unlock(), once by hand).
- */
-int __cond_resched_lock(spinlock_t *lock)
-{
-       printk("\n__cond_resched_lock");
-
-       return 0;
-}
-
-/*
- * kernel/sched/core.c:4315
- */
-static inline int should_resched(void)
-{
-       return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
-}
-
-static void __cond_resched(void)
-{
-       add_preempt_count(PREEMPT_ACTIVE);
-       __schedule();
-       sub_preempt_count(PREEMPT_ACTIVE);
-}
-
-int __sched _cond_resched(void)
-{
-       if (should_resched()) {
-               __cond_resched();
-               return 1;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(_cond_resched);
-
-/*
- * kernel/sched/core.c:4333
- */
-int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, void *key)
-{
-       return try_to_wake_up(curr->private, mode, wake_flags);
-}
-EXPORT_SYMBOL(default_wake_function);
-
-/**
- * kernel/sched/core.c:3426
- * wait_for_completion_killable: - waits for completion of a task (killable)
- * @x:  holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It can be
- * interrupted by a kill signal.
- *
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
- */
-int __sched wait_for_completion_killable(struct completion *x)
-{
-       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
-       if (t == -ERESTARTSYS)
-               return t;
-       return 0;
-}
-EXPORT_SYMBOL(wait_for_completion_killable);
-
-/**
- * kernel/sched/core.c:3192
- * __wake_up_sync_key - wake up threads blocked on a waitqueue.
- * @q: the waitqueue
- * @mode: which threads
- * @nr_exclusive: how many wake-one or wake-many threads to wake up
- * @key: opaque value to be passed to wakeup targets
- *
- * The sync wakeup differs that the waker knows that it will schedule
- * away soon, so while the target thread will be woken up, it will not
- * be migrated to another CPU - ie. the two threads are 'synchronized'
- * with each other. This can prevent needless bouncing between CPUs.
- *
- * On UP it can prevent extra preemption.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, void *key)
-{
-       unsigned long flags;
-       int wake_flags = WF_SYNC;
-
-       if (unlikely(!q))
-               return;
-
-       if (unlikely(!nr_exclusive))
-               wake_flags = 0;
-
-       spin_lock_irqsave(&q->lock, flags);
-       __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
-       spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL_GPL(__wake_up_sync_key);
-
-/*
- * kernel/sched/core.c:1543
- */
-int wake_up_state(struct task_struct *p, unsigned int state)
-{
-       WARN_ON(task_is_stopped_or_traced(p));
-       return try_to_wake_up(p, state, 0);
-}
-EXPORT_SYMBOL(wake_up_process);
-
-/**
- * kernel/sched/core.c:4389
- * yield - yield the current processor to other threads.
- *
- * Do not ever use this function, there's a 99% chance you're doing it wrong.
- *
- * The scheduler is at all times free to pick the calling task as the most
- * eligible task to run, if removing the yield() call from your code breaks
- * it, its already broken.
- *
- * Typical broken usage is:
- *
- * while (!event)
- *     yield();
- *
- * where one assumes that yield() will let 'the other' process run that will
- * make event true. If the current task is a SCHED_FIFO task that will never
- * happen. Never use yield() as a progress guarantee!!
- *
- * If you want to use yield() to wait for something, use wait_event().
- * If you want to use yield() to be 'nice' for others, use cond_resched().
- * If you still want to use yield(), do not!
- */
-void __sched yield(void)
-{
-       printk("\nyield");
-
-       // TODO: SMP
-
-       return;
-}
-
-/**
- * kernel/sched/core.c:892
- * task_curr - is this task currently executing on a CPU?
- * @p: the task in question.
- */
-inline int task_curr(const struct task_struct *p)
-{
-       printk("\ntask_curr");
-
-       // TODO: SMP
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:3736
- * task_nice - return the nice value of a given task.
- * @p: the task in question.
- */
-int task_nice(const struct task_struct *p)
-{
-       printk("\ntask_nice");
-
-       return 0;
-}
-
-/*
- * kernel/sched/core.c:3616
- */
-void set_user_nice(struct task_struct *p, long nice)
-{
-//     printk("\nset_user_nice");
-
-       return;
-}
-
-/*
- * kernel/sched/core.c:3169
- */
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
-{
-       printk("\n__wake_up_locked_key");
-
-       return;
-}
-
-/*
- * kernel/sched/core.c:4474
- * This task is about to go to sleep on IO. Increment rq->nr_iowait so
- * that process accounting knows that this is a task in IO wait state.
- */
-void __sched io_schedule(void)
-{
-//     printk("\nio_schedule");
-
-       struct rq *rq = raw_rq();
-
-       delayacct_blkio_start();
-       atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
-       schedule();
-       current->in_iowait = 0;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
-}
-EXPORT_SYMBOL(io_schedule);
-
-/*
- * kernel/sched/core.c:4489
- */
-long __sched io_schedule_timeout(long timeout)
-{
-//     printk("\nio_schedule_timeout");
-       struct rq *rq = raw_rq();
-       long ret;
-
-       delayacct_blkio_start();
-       atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
-       ret = schedule_timeout(timeout);
-       current->in_iowait = 0;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
-       return ret;
-}
-
-
-/*
- * kernel/sched/core.c:7590
- */
-int sched_rt_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *lenp,
-               loff_t *ppos)
-{
-       //printk("\nsched_rt_handler");
-
-       return 0;
-}
-
-/*
- * kernel/sched/core.c:3213
- * __wake_up_sync - see __wake_up_sync_key()
- */
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
-{
-       printk("\n__wake_up_sync");
-
-       return;
-}
-
-/*
- * kernel/sched/core.c:3163
- * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
- */
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
-{
-       printk("\n__wake_up_locked");
-
-       return;
-}
-
-/**
- * kernel/sched/core.c:3307
- */
-static long __sched
-wait_for_common_io(struct completion *x, long timeout, int state)
-{
-       return __wait_for_common(x, io_schedule_timeout, timeout, state);
-}
-
-/**
- * kernel/sched/core.c:3355
- * wait_for_completion_io: - waits for completion of a task
- * @x:  holds the state of this particular completion
- *
- * This waits to be signaled for completion of a specific task. It is NOT
- * interruptible and there is no timeout. The caller is accounted as waiting
- * for IO.
- */
-void __sched wait_for_completion_io(struct completion *x)
-{
-       wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion_io);
-
-/**
- * kernel/sched/core.c:3416
- * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
- * @x:  holds the state of this particular completion
- * @timeout:  timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. The timeout is in jiffies. It is not
- * interruptible. The caller is accounted as waiting for IO.
- *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
- */
-unsigned long __sched
-wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
-{
-       return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion_io_timeout);
-
-/*
- * kernel/sched/core.c:4634
- */
-void show_state_filter(unsigned long state_filter)
-{
-       //printk("\nshow_state_filter");
-
-       return;
-}
-
-/**
- * kernel/sched/core.c:3251
- * complete_all: - signals all threads waiting on this completion
- * @x:  holds the state of this particular completion
- *
- * This will wake up all threads waiting on this particular completion event.
- *
- * It may be assumed that this function implies a write memory barrier before
- * changing the task state if and only if any tasks are woken up.
- */
-void complete_all(struct completion *x)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&x->wait.lock, flags);
-       x->done += UINT_MAX/2;
-       __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-}
-EXPORT_SYMBOL(complete_all);
-
-/**
- * kernel/sched/core.c:3341
- * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
- * @x:  holds the state of this particular completion
- * @timeout:  timeout value in jiffies
- *
- * This waits for either a completion of a specific task to be signaled or for a
- * specified timeout to expire. The timeout is in jiffies. It is not
- * interruptible.
- *
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
- */
-unsigned long __sched
-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
-{
-       return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
-}
-EXPORT_SYMBOL(wait_for_completion_timeout);
-
-
-
-/*
- *
- * SMP
- *
- */
-#ifdef CONFIG_SMP
-
-struct migration_arg {
-       struct task_struct *task;
-       int dest_cpu;
-};
-
-/*
- * kernel/sched/core.c:4822
- * Move (not current) task off this cpu, onto dest cpu. We're doing
- * this because either it can't run here any more (set_cpus_allowed()
- * away from this CPU, or CPU going down), or because we're
- * attempting to rebalance this task on exec (sched_exec).
- *
- * So we race with normal scheduler movements, but that's OK, as long
- * as the task is no longer on this CPU.
- *
- * Returns non-zero if task was successfully migrated.
- */
-static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
-{
-       struct rq *rq_dest, *rq_src;
-       int ret = 0;
-
-       if (unlikely(!cpu_active(dest_cpu)))
-               return ret;
-
-       rq_src = cpu_rq(src_cpu);
-       rq_dest = cpu_rq(dest_cpu);
-
-       raw_spin_lock(&p->pi_lock);
-       double_rq_lock(rq_src, rq_dest);
-       /* Already moved. */
-       if (task_cpu(p) != src_cpu)
-               goto done;
-       /* Affinity changed (again). */
-       if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
-               goto fail;
-
-       /*
-        * If we're not on a rq, the next wake-up will ensure we're
-        * placed properly.
-        */
-       if (p->on_rq) {
-//             dequeue_task(rq_src, p, 0);
-               list_del(&p->rq_tasks);
-               // TODO: maybe not necessary hence double lock
-               p->on_rq = 0;
-               set_task_cpu(p, dest_cpu);
-//             enqueue_task(rq_dest, p, 0);
-               list_add(&p->rq_tasks, &rq_dest->rq_list);
-               // TODO: maybe not necessary hence double lock
-               p->on_rq = 1;
-//             check_preempt_curr(rq_dest, p, 0);
-               if (rq_dest->curr == rq_dest->idle)
-                       resched_task(rq_dest->curr);
-       }
-done:
-       ret = 1;
-fail:
-       double_rq_unlock(rq_src, rq_dest);
-       raw_spin_unlock(&p->pi_lock);
-       return ret;
-}
-
-/*
- * kernel/sched/core:4865
- * migration_cpu_stop - this will be executed by a highprio stopper thread
- * and performs thread migration by bumping thread off CPU then
- * 'pushing' onto another runqueue.
- */
-static int migration_cpu_stop(void *data)
-{
-       struct migration_arg *arg = data;
-
-       /*
-        * The original target cpu might have gone down and we might
-        * be on another cpu but it doesn't matter.
-        */
-       local_irq_disable();
-       __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
-       local_irq_enable();
-       return 0;
-}
-
-
-static void set_rq_online(struct rq *rq)
-{
-       if (!rq->online)
-               rq->online = 1;
-}
-
-static void set_rq_offline(struct rq *rq)
-{
-       if (rq->online)
-               rq->online = 0;
-}
-
-/*
- * migration_call - callback that gets triggered when a CPU is added.
- * Here we can start up the necessary migration thread for the new CPU.
- */
-static int __cpuinit
-migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
-       int cpu = (long)hcpu;
-       unsigned long flags;
-       struct rq *rq = cpu_rq(cpu);
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-
-       case CPU_UP_PREPARE:
-//             rq->calc_load_update = calc_load_update;
-               break;
-
-       case CPU_ONLINE:
-               /* Update our root-domain */
-               raw_spin_lock_irqsave(&rq->lock, flags);
-//             if (rq->rd) {
-//                     BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-//
-                       set_rq_online(rq);
+//}
+//
+//void scheduler_ipi(void)
+//{
+//     if (llist_empty(&this_rq()->wake_list)) // && !got_nohz_idle_kick())
+//                     return;
+//
+//     /*
+//      * Not all reschedule IPI handlers call irq_enter/irq_exit, since
+//      * traditionally all their work was done from the interrupt return
+//      * path. Now that we actually do some work, we need to make sure
+//      * we do call them.
+//      *
+//      * Some archs already do call them, luckily irq_enter/exit nest
+//      * properly.
+//      *
+//      * Arguably we should visit all archs and update all handlers,
+//      * however a fair share of IPIs are still resched only so this would
+//      * somewhat pessimize the simple resched case.
+//      */
+//     irq_enter();
+//     sched_ttwu_pending();
+//
+//     /*
+//      * Check if someone kicked us for doing the nohz idle load balance.
+//      */
+//     if (unlikely(/*got_nohz_idle_kick() && */!need_resched())) {
+////           this_rq()->idle_balance = 1;
+//             raise_softirq_irqoff(SCHED_SOFTIRQ);
+//     }
+//     irq_exit();
+//}
+//
+///*
+// * kernel/sched/core.c:1011
+// * wait_task_inactive - wait for a thread to unschedule.
+// *
+// * If @match_state is nonzero, it's the @p->state value just checked and
+// * not expected to change.  If it changes, i.e. @p might have woken up,
+// * then return zero.  When we succeed in waiting for @p to be off its CPU,
+// * we return a positive number (its total switch count).  If a second call
+// * a short while later returns the same number, the caller can be sure that
+// * @p has remained unscheduled the whole time.
+// *
+// * The caller must ensure that the task *will* unschedule sometime soon,
+// * else this function might spin for a *long* time. This function can't
+// * be called with interrupts off, or it may introduce deadlock with
+// * smp_call_function() if an IPI is sent by the same process we are
+// * waiting to become inactive.
+// */
+//unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+//{
+//     unsigned long flags;
+//     int running, on_rq;
+//     unsigned long ncsw;
+//     struct rq *rq;
+//
+//     for (;;) {
+//             /*
+//              * We do the initial early heuristics without holding
+//              * any task-queue locks at all. We'll only try to get
+//              * the runqueue lock when things look like they will
+//              * work out!
+//              */
+//             rq = task_rq(p);
+//
+//             /*
+//              * If the task is actively running on another CPU
+//              * still, just relax and busy-wait without holding
+//              * any locks.
+//              *
+//              * NOTE! Since we don't hold any locks, it's not
+//              * even sure that "rq" stays as the right runqueue!
+//              * But we don't care, since "task_running()" will
+//              * return false if the runqueue has changed and p
+//              * is actually now running somewhere else!
+//              */
+//             while (task_running(rq, p)) {
+//                     if (match_state && unlikely(p->state != match_state))
+//                             return 0;
+//                     cpu_relax();
+//             }
+//
+//             /*
+//              * Ok, time to look more closely! We need the rq
+//              * lock now, to be *sure*. If we're wrong, we'll
+//              * just go back and repeat.
+//              */
+//             rq = task_rq_lock(p, &flags);
+////           trace_sched_wait_task(p);
+//             running = task_running(rq, p);
+//             on_rq = p->on_rq;
+//             ncsw = 0;
+//             if (!match_state || p->state == match_state)
+//                     ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+//             task_rq_unlock(rq, p, &flags);
+//
+//             /*
+//              * If it changed from the expected state, bail out now.
+//              */
+//             if (unlikely(!ncsw))
+//                     break;
+//
+//             /*
+//              * Was it really running after all now that we
+//              * checked with the proper locks actually held?
+//              *
+//              * Oops. Go back and try again..
+//              */
+//             if (unlikely(running)) {
+//                     cpu_relax();
+//                     continue;
 //             }
-               raw_spin_unlock_irqrestore(&rq->lock, flags);
-               break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       case CPU_DYING:
-               sched_ttwu_pending();
-               /* Update our root-domain */
-               raw_spin_lock_irqsave(&rq->lock, flags);
-//             if (rq->rd) {
-//                     BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
-                       set_rq_offline(rq);
+//
+//             /*
+//              * It's not enough that it's not actively running,
+//              * it must be off the runqueue _entirely_, and not
+//              * preempted!
+//              *
+//              * So if it was still runnable (but just not actively
+//              * running right now), it's preempted, and we should
+//              * yield - it could be a while.
+//              */
+//             if (unlikely(on_rq)) {
+//                     ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
+//
+//                     set_current_state(TASK_UNINTERRUPTIBLE);
+//                     schedule_hrtimeout(&to, HRTIMER_MODE_REL);
+//                     continue;
 //             }
-               migrate_tasks(cpu);
-               BUG_ON(rq->nr_running != 1); /* the migration thread */
-               raw_spin_unlock_irqrestore(&rq->lock, flags);
-               break;
-
-       case CPU_DEAD:
-//             calc_load_migrate(rq);
-               break;
-#endif
-       }
-
-//     update_max_interval();
-
-       return NOTIFY_OK;
-}
-
-/*
- * Register at high priority so that task migration (migrate_all_tasks)
- * happens before everything else.  This has to be lower priority than
- * the notifier in the perf_event subsystem, though.
- */
-static struct notifier_block __cpuinitdata migration_notifier = {
-       .notifier_call = migration_call,
-       .priority = CPU_PRI_MIGRATION,
-};
-
-static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
-                                     unsigned long action, void *hcpu)
-{
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_STARTING:
-       case CPU_DOWN_FAILED:
-               set_cpu_active((long)hcpu, true);
-               return NOTIFY_OK;
-       default:
-               return NOTIFY_DONE;
-       }
-}
-
-static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
-                                       unsigned long action, void *hcpu)
-{
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_DOWN_PREPARE:
-               set_cpu_active((long)hcpu, false);
-               return NOTIFY_OK;
-       default:
-               return NOTIFY_DONE;
-       }
-}
-
-static int __init migration_init(void)
-{
-       void *cpu = (void *)(long)smp_processor_id();
-       int err;
-
-       /* Initialize migration for the boot CPU */
-       err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
-       BUG_ON(err == NOTIFY_BAD);
-       migration_call(&migration_notifier, CPU_ONLINE, cpu);
-       register_cpu_notifier(&migration_notifier);
-
-       /* Register cpu active notifiers */
-       cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
-       cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
-
-       return 0;
-}
-early_initcall(migration_init);
-
-
-
-void do_set_cpus_allowed(struct task_struct *p,
-                              const struct cpumask *new_mask)
-{
-//     if (p->sched_class && p->sched_class->set_cpus_allowed)
-//             p->sched_class->set_cpus_allowed(p, new_mask);
-
-       cpumask_copy(&p->cpus_allowed, new_mask);
-       p->nr_cpus_allowed = cpumask_weight(new_mask);
-}
-
-int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-{
-       unsigned long flags;
-       struct rq *rq;
-       unsigned int dest_cpu;
-       int ret = 0;
-
-       rq = task_rq_lock(p, &flags);
-
-       if (cpumask_equal(&p->cpus_allowed, new_mask))
-               goto out;
-
-       if (!cpumask_intersects(new_mask, cpu_active_mask)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       do_set_cpus_allowed(p, new_mask);
-
-       /* Can the task run on the task's current CPU? If so, we're done */
-       if (cpumask_test_cpu(task_cpu(p), new_mask))
-               goto out;
-
-       dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-       if (p->on_rq) {
-               struct migration_arg arg = { p, dest_cpu };
-               /* Need help from migration thread: drop lock and wait. */
-               task_rq_unlock(rq, p, &flags);
-               stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
-               tlb_migrate_finish(p->mm);
-               return 0;
-       }
-out:
-       task_rq_unlock(rq, p, &flags);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
-
-static void sched_ttwu_pending(void)
-{
-       struct rq *rq = this_rq();
-       struct llist_node *llist = llist_del_all(&rq->wake_list);
-       struct task_struct *p;
-
-       raw_spin_lock(&rq->lock);
-
-       while (llist) {
-               p = llist_entry(llist, struct task_struct, wake_entry);
-               llist = llist_next(llist);
-               ttwu_do_activate(rq, p, 0);
-       }
-
-       raw_spin_unlock(&rq->lock);
-}
-
-void scheduler_ipi(void)
-{
-       if (llist_empty(&this_rq()->wake_list)) // && !got_nohz_idle_kick())
-                       return;
-
-       /*
-        * Not all reschedule IPI handlers call irq_enter/irq_exit, since
-        * traditionally all their work was done from the interrupt return
-        * path. Now that we actually do some work, we need to make sure
-        * we do call them.
-        *
-        * Some archs already do call them, luckily irq_enter/exit nest
-        * properly.
-        *
-        * Arguably we should visit all archs and update all handlers,
-        * however a fair share of IPIs are still resched only so this would
-        * somewhat pessimize the simple resched case.
-        */
-       irq_enter();
-       sched_ttwu_pending();
-
-       /*
-        * Check if someone kicked us for doing the nohz idle load balance.
-        */
-       if (unlikely(/*got_nohz_idle_kick() && */!need_resched())) {
-//             this_rq()->idle_balance = 1;
-               raise_softirq_irqoff(SCHED_SOFTIRQ);
-       }
-       irq_exit();
-}
-
-/*
- * kernel/sched/core.c:1011
- * wait_task_inactive - wait for a thread to unschedule.
- *
- * If @match_state is nonzero, it's the @p->state value just checked and
- * not expected to change.  If it changes, i.e. @p might have woken up,
- * then return zero.  When we succeed in waiting for @p to be off its CPU,
- * we return a positive number (its total switch count).  If a second call
- * a short while later returns the same number, the caller can be sure that
- * @p has remained unscheduled the whole time.
- *
- * The caller must ensure that the task *will* unschedule sometime soon,
- * else this function might spin for a *long* time. This function can't
- * be called with interrupts off, or it may introduce deadlock with
- * smp_call_function() if an IPI is sent by the same process we are
- * waiting to become inactive.
- */
-unsigned long wait_task_inactive(struct task_struct *p, long match_state)
-{
-       unsigned long flags;
-       int running, on_rq;
-       unsigned long ncsw;
-       struct rq *rq;
-
-       for (;;) {
-               /*
-                * We do the initial early heuristics without holding
-                * any task-queue locks at all. We'll only try to get
-                * the runqueue lock when things look like they will
-                * work out!
-                */
-               rq = task_rq(p);
-
-               /*
-                * If the task is actively running on another CPU
-                * still, just relax and busy-wait without holding
-                * any locks.
-                *
-                * NOTE! Since we don't hold any locks, it's not
-                * even sure that "rq" stays as the right runqueue!
-                * But we don't care, since "task_running()" will
-                * return false if the runqueue has changed and p
-                * is actually now running somewhere else!
-                */
-               while (task_running(rq, p)) {
-                       if (match_state && unlikely(p->state != match_state))
-                               return 0;
-                       cpu_relax();
-               }
-
-               /*
-                * Ok, time to look more closely! We need the rq
-                * lock now, to be *sure*. If we're wrong, we'll
-                * just go back and repeat.
-                */
-               rq = task_rq_lock(p, &flags);
-//             trace_sched_wait_task(p);
-               running = task_running(rq, p);
-               on_rq = p->on_rq;
-               ncsw = 0;
-               if (!match_state || p->state == match_state)
-                       ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-               task_rq_unlock(rq, p, &flags);
-
-               /*
-                * If it changed from the expected state, bail out now.
-                */
-               if (unlikely(!ncsw))
-                       break;
-
-               /*
-                * Was it really running after all now that we
-                * checked with the proper locks actually held?
-                *
-                * Oops. Go back and try again..
-                */
-               if (unlikely(running)) {
-                       cpu_relax();
-                       continue;
-               }
-
-               /*
-                * It's not enough that it's not actively running,
-                * it must be off the runqueue _entirely_, and not
-                * preempted!
-                *
-                * So if it was still runnable (but just not actively
-                * running right now), it's preempted, and we should
-                * yield - it could be a while.
-                */
-               if (unlikely(on_rq)) {
-                       ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
-
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_hrtimeout(&to, HRTIMER_MODE_REL);
-                       continue;
-               }
-
-               /*
-                * Ahh, all good. It wasn't running, and it wasn't
-                * runnable, which means that it will never become
-                * running in the future either. We're all done!
-                */
-               break;
-       }
-
-       return ncsw;
-}
-
-/***
- * kernel/sched/core:1116
- * kick_process - kick a running thread to enter/exit the kernel
- * @p: the to-be-kicked thread
- *
- * Cause a process which is running on another CPU to enter
- * kernel-mode, without any delay. (to get signals handled.)
- *
- * NOTE: this function doesn't have to take the runqueue lock,
- * because all it wants to ensure is that the remote task enters
- * the kernel. If the IPI races and the task has been migrated
- * to another CPU then no harm is done and the purpose has been
- * achieved as well.
- */
-void kick_process(struct task_struct *p)
-{
-       int cpu;
-
-       preempt_disable();
-       cpu = task_cpu(p);
-       if ((cpu != smp_processor_id()) && task_curr(p))
-               smp_send_reschedule(cpu);
-       preempt_enable();
-}
-EXPORT_SYMBOL_GPL(kick_process);
-
-void sched_set_stop_task(int cpu, struct task_struct *stop)
-{
-       printk("\nsched_set_stop_task");
-}
-
-bool completion_done(struct completion *x)
-{
-       printk("\ncompletion_done");
-
-       return 0;
-}
-
-/*
- * kernel/sched/core:2605
- * sched_exec - execve() is a valuable balancing opportunity, because at
- * this point the task has the smallest effective memory and cache footprint.
- */
-void sched_exec(void)
-{
-       struct task_struct *p = current;
-       unsigned long flags;
-       int dest_cpu;
-
-       raw_spin_lock_irqsave(&p->pi_lock, flags);
-       dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
-       if (dest_cpu == smp_processor_id())
-               goto unlock;
-
-       if (likely(cpu_active(dest_cpu))) {
-               struct migration_arg arg = { p, dest_cpu };
-
-               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-               stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
-               return;
-       }
-unlock:
-       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
-}
-
-void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
-{
-#ifdef CONFIG_SCHED_DEBUG
-       /*
-        * We should never call set_task_cpu() on a blocked task,
-        * ttwu() will sort out the placement.
-        */
-       WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
-                       !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
-
-#ifdef CONFIG_LOCKDEP
-       /*
-        * The caller should hold either p->pi_lock or rq->lock, when changing
-        * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
-        *
-        * sched_move_task() holds both and thus holding either pins the cgroup,
-        * see task_group().
-        *
-        * Furthermore, all task_rq users should acquire both locks, see
-        * task_rq_lock().
-        */
-       WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
-                                     lockdep_is_held(&task_rq(p)->lock)));
-#endif
-#endif
-
-       // TODO: SMP, needs to implemented while using load balancing
-//     trace_sched_migrate_task(p, new_cpu);
-//
-//     if (task_cpu(p) != new_cpu) {
-//             struct task_migration_notifier tmn;
-//
-//             if (p->sched_class->migrate_task_rq)
-//                     p->sched_class->migrate_task_rq(p, new_cpu);
-//             p->se.nr_migrations++;
-//             perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
-//
-//             tmn.task = p;
-//             tmn.from_cpu = task_cpu(p);
-//             tmn.to_cpu = new_cpu;
-//
-//             atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
+//
+//             /*
+//              * Ahh, all good. It wasn't running, and it wasn't
+//              * runnable, which means that it will never become
+//              * running in the future either. We're all done!
+//              */
+//             break;
+//     }
+//
+//     return ncsw;
+//}
+//
+///***
+// * kernel/sched/core:1116
+// * kick_process - kick a running thread to enter/exit the kernel
+// * @p: the to-be-kicked thread
+// *
+// * Cause a process which is running on another CPU to enter
+// * kernel-mode, without any delay. (to get signals handled.)
+// *
+// * NOTE: this function doesn't have to take the runqueue lock,
+// * because all it wants to ensure is that the remote task enters
+// * the kernel. If the IPI races and the task has been migrated
+// * to another CPU then no harm is done and the purpose has been
+// * achieved as well.
+// */
+//void kick_process(struct task_struct *p)
+//{
+//     int cpu;
+//
+//     preempt_disable();
+//     cpu = task_cpu(p);
+//     if ((cpu != smp_processor_id()) && task_curr(p))
+//             smp_send_reschedule(cpu);
+//     preempt_enable();
+//}
+//EXPORT_SYMBOL_GPL(kick_process);
+//
+//void sched_set_stop_task(int cpu, struct task_struct *stop)
+//{
+//     printk("\nsched_set_stop_task");
+//}
+//
+//bool completion_done(struct completion *x)
+//{
+//     printk("\ncompletion_done");
+//
+//     return 0;
+//}
+//
+///*
+// * kernel/sched/core:2605
+// * sched_exec - execve() is a valuable balancing opportunity, because at
+// * this point the task has the smallest effective memory and cache footprint.
+// */
+//void sched_exec(void)
+//{
+//     struct task_struct *p = current;
+//     unsigned long flags;
+//     int dest_cpu;
+//
+//     raw_spin_lock_irqsave(&p->pi_lock, flags);
+//     dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
+//     if (dest_cpu == smp_processor_id())
+//             goto unlock;
+//
+//     if (likely(cpu_active(dest_cpu))) {
+//             struct migration_arg arg = { p, dest_cpu };
+//
+//             raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+//             stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+//             return;
 //     }
-
-       __set_task_cpu(p, new_cpu);
-}
-
-/**
- * kernel/sched/core.c:6820
- */
-void __init sched_init_smp(void)
-{
-       cpumask_var_t non_isolated_cpus;
-
-       alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
-       alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
-
-//     sched_init_numa();
-
-       get_online_cpus();
-       mutex_lock(&sched_domains_mutex);
-//     init_sched_domains(cpu_active_mask);
-       cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
-       if (cpumask_empty(non_isolated_cpus))
-               cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
-       mutex_unlock(&sched_domains_mutex);
-       put_online_cpus();
-
-//     hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
-//     hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
-//     hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
-
-       /* RT runtime code needs to handle some hotplug events */
-//     hotcpu_notifier(update_runtime, 0);
-
-//     init_hrtick();
-
-       /* Move init over to a non-isolated CPU */
-       if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
-               BUG();
-//     sched_init_granularity();
-       free_cpumask_var(non_isolated_cpus);
-
-//     init_sched_rt_class();
-}
-
-bool cpus_share_cache(int this_cpu, int that_cpu)
-{
-       return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
-}
-
-#else
-
-/**
- * kernel/sched/core.c:6856
- */
-void sched_init_smp(void)
-{
-       //printk("\nsched_init_smp");
-
-       return;
-}
-#endif /* CONFIG_SMP */
-
-
-
-/*
- * Syscalls
- *
- * Help:
- * SYSCALL_DEFINEx will be replaced by asmlinkage data_type function_name
- * asmlinkage:         tells the compile that the arguments of the function are
- *                             not placed in the registers but rather to find on stack
- */
-
-/*
- * kernel/sched/core.c:3686
- * sys_nice - change the priority of the current process.
- * @increment: priority increment
- *
- * sys_setpriority is a more generic, but much slower function that
- * does similar things.
- */
-SYSCALL_DEFINE1(nice, int, increment)
-{
-       printk("SYSCALL nice\n");
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4248
- * sys_sched_getaffinity - get the cpu affinity of a process
- * @pid: pid of the process
- * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to hold the current cpu mask
- */
-SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
-               unsigned long __user *, user_mask_ptr)
-{
-       printk("SYSCALL sched_getaffinity\n");
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4197
- * sys_sched_setaffinity - set the cpu affinity of a process
- * @pid: pid of the process
- * @len: length in bytes of the bitmask pointed to by user_mask_ptr
- * @user_mask_ptr: user-space pointer to the new cpu mask
- */
-SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
-               unsigned long __user *, user_mask_ptr)
-{
-       printk("SYSCALL sched_setaffinity\n");
-
-       // TODO: SMP
-
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4562
- * sys_sched_rr_get_interval - return the default timeslice of a process.
- * @pid: pid of the process.
- * @interval: userspace pointer to the timeslice value.
- *
- * this syscall writes the default timeslice value of a given process
- * into the user-space timespec buffer. A value of '0' means infinity.
- */
-SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
-               struct timespec __user *, interval)
-{
-       printk("SYSCALL sched_rr_get_interval\n");
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4282
- * sys_sched_yield - yield the current processor to other threads.
- *
- * This function yields the current CPU to other tasks. If there are no
- * other threads running on this CPU then this function will return.
- */
-SYSCALL_DEFINE0(sched_yield)
-{
-       printk("SYSCALL sched_yield\n");
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4027
- * sys_sched_setscheduler - set/change the scheduler policy and RT priority
- * @pid: the pid in question.
- * @policy: new policy.
- * @param: structure containing the new RT priority.
- */
-SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
-               struct sched_param __user *, param)
-{
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4051
- * sys_sched_getscheduler - get the policy (scheduling class) of a thread
- * @pid: the pid in question.
- */
-SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
-{
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4512
- * sys_sched_get_priority_max - return maximum RT priority.
- * @policy: scheduling class.
- *
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
- */
-SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
-{
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4537
- * sys_sched_get_priority_min - return minimum RT priority.
- * @policy: scheduling class.
- *
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
- */
-SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
-{
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4042
- * sys_sched_setparam - set/change the RT priority of a thread
- * @pid: the pid in question.
- * @param: structure containing the new RT priority.
- */
-SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
-{
-       return 0;
-}
-
-/**
- * kernel/sched/core.c:4077
- * sys_sched_getparam - get the RT priority of a thread
- * @pid: the pid in question.
- * @param: structure containing the RT priority.
- */
-SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
-{
-       return 0;
-}
+//unlock:
+//     raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+//}
+//
+//void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+//{
+//#ifdef CONFIG_SCHED_DEBUG
+//     /*
+//      * We should never call set_task_cpu() on a blocked task,
+//      * ttwu() will sort out the placement.
+//      */
+//     WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
+//                     !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
+//
+//#ifdef CONFIG_LOCKDEP
+//     /*
+//      * The caller should hold either p->pi_lock or rq->lock, when changing
+//      * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+//      *
+//      * sched_move_task() holds both and thus holding either pins the cgroup,
+//      * see task_group().
+//      *
+//      * Furthermore, all task_rq users should acquire both locks, see
+//      * task_rq_lock().
+//      */
+//     WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
+//                                   lockdep_is_held(&task_rq(p)->lock)));
+//#endif
+//#endif
+//
+//     // TODO: SMP, needs to implemented while using load balancing
+////   trace_sched_migrate_task(p, new_cpu);
+////
+////   if (task_cpu(p) != new_cpu) {
+////           struct task_migration_notifier tmn;
+////
+////           if (p->sched_class->migrate_task_rq)
+////                   p->sched_class->migrate_task_rq(p, new_cpu);
+////           p->se.nr_migrations++;
+////           perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+////
+////           tmn.task = p;
+////           tmn.from_cpu = task_cpu(p);
+////           tmn.to_cpu = new_cpu;
+////
+////           atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
+////   }
+//
+//     __set_task_cpu(p, new_cpu);
+//}
+//
+///**
+// * kernel/sched/core.c:6820
+// */
+//void __init sched_init_smp(void)
+//{
+//     cpumask_var_t non_isolated_cpus;
+//
+//     alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
+//     alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
+//
+////   sched_init_numa();
+//
+//     get_online_cpus();
+//     mutex_lock(&sched_domains_mutex);
+////   init_sched_domains(cpu_active_mask);
+//     cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
+//     if (cpumask_empty(non_isolated_cpus))
+//             cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
+//     mutex_unlock(&sched_domains_mutex);
+//     put_online_cpus();
+//
+////   hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
+////   hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
+////   hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
+//
+//     /* RT runtime code needs to handle some hotplug events */
+////   hotcpu_notifier(update_runtime, 0);
+//
+////   init_hrtick();
+//
+//     /* Move init over to a non-isolated CPU */
+//     if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
+//             BUG();
+////   sched_init_granularity();
+//     free_cpumask_var(non_isolated_cpus);
+//
+////   init_sched_rt_class();
+//}
+//
+//bool cpus_share_cache(int this_cpu, int that_cpu)
+//{
+//     return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
+//}
+//
+//#else
+//
+///**
+// * kernel/sched/core.c:6856
+// */
+//void sched_init_smp(void)
+//{
+//     //printk("\nsched_init_smp");
+//
+//     return;
+//}
+//#endif /* CONFIG_SMP */
+//
+//
+//
+///*
+// * Syscalls
+// *
+// * Help:
+// * SYSCALL_DEFINEx will be replaced by asmlinkage data_type function_name
+// * asmlinkage:       tells the compile that the arguments of the function are
+// *                           not placed in the registers but rather to find on stack
+// */
+//
+///*
+// * kernel/sched/core.c:3686
+// * sys_nice - change the priority of the current process.
+// * @increment: priority increment
+// *
+// * sys_setpriority is a more generic, but much slower function that
+// * does similar things.
+// */
+//SYSCALL_DEFINE1(nice, int, increment)
+//{
+//     printk("SYSCALL nice\n");
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4248
+// * sys_sched_getaffinity - get the cpu affinity of a process
+// * @pid: pid of the process
+// * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+// * @user_mask_ptr: user-space pointer to hold the current cpu mask
+// */
+//SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+//             unsigned long __user *, user_mask_ptr)
+//{
+//     printk("SYSCALL sched_getaffinity\n");
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4197
+// * sys_sched_setaffinity - set the cpu affinity of a process
+// * @pid: pid of the process
+// * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+// * @user_mask_ptr: user-space pointer to the new cpu mask
+// */
+//SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
+//             unsigned long __user *, user_mask_ptr)
+//{
+//     printk("SYSCALL sched_setaffinity\n");
+//
+//     // TODO: SMP
+//
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4562
+// * sys_sched_rr_get_interval - return the default timeslice of a process.
+// * @pid: pid of the process.
+// * @interval: userspace pointer to the timeslice value.
+// *
+// * this syscall writes the default timeslice value of a given process
+// * into the user-space timespec buffer. A value of '0' means infinity.
+// */
+//SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+//             struct timespec __user *, interval)
+//{
+//     printk("SYSCALL sched_rr_get_interval\n");
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4282
+// * sys_sched_yield - yield the current processor to other threads.
+// *
+// * This function yields the current CPU to other tasks. If there are no
+// * other threads running on this CPU then this function will return.
+// */
+//SYSCALL_DEFINE0(sched_yield)
+//{
+//     printk("SYSCALL sched_yield\n");
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4027
+// * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+// * @pid: the pid in question.
+// * @policy: new policy.
+// * @param: structure containing the new RT priority.
+// */
+//SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
+//             struct sched_param __user *, param)
+//{
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4051
+// * sys_sched_getscheduler - get the policy (scheduling class) of a thread
+// * @pid: the pid in question.
+// */
+//SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
+//{
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4512
+// * sys_sched_get_priority_max - return maximum RT priority.
+// * @policy: scheduling class.
+// *
+// * this syscall returns the maximum rt_priority that can be used
+// * by a given scheduling class.
+// */
+//SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
+//{
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4537
+// * sys_sched_get_priority_min - return minimum RT priority.
+// * @policy: scheduling class.
+// *
+// * this syscall returns the minimum rt_priority that can be used
+// * by a given scheduling class.
+// */
+//SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
+//{
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4042
+// * sys_sched_setparam - set/change the RT priority of a thread
+// * @pid: the pid in question.
+// * @param: structure containing the new RT priority.
+// */
+//SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
+//{
+//     return 0;
+//}
+//
+///**
+// * kernel/sched/core.c:4077
+// * sys_sched_getparam - get the RT priority of a thread
+// * @pid: the pid in question.
+// * @param: structure containing the RT priority.
+// */
+//SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
+//{
+//     return 0;
+//}
index 105adad..be48cfc 100644 (file)
-#include <linux/sched.h>
-#include <linux/sched/sysctl.h>
-#include <linux/sched/rt.h>
-#include <linux/spinlock.h>
-#include <linux/stop_machine.h>
-
-#include <linux/list.h>
-
-
-struct task_group {
-};
-
-
-struct rq {
-       /* runqueue lock: */
-       raw_spinlock_t lock;
-
-       u64 nr_switches;
-       unsigned int nr_running;
-
-       struct list_head rq_list;
-
-       /*
-        * This is part of a global counter where only the total sum
-        * over all CPUs matters. A task can increase this counter on
-        * one CPU and if it got migrated afterwards it may decrease
-        * it on another CPU. Always updated under the runqueue lock:
-        */
-       unsigned long nr_uninterruptible;
-
-       struct task_struct *curr, *idle, *stop;
-
-       struct mm_struct *prev_mm;
-
-       u64 clock;
-       u64 clock_task;
-
-       atomic_t nr_iowait;
-
-       int skip_clock_update;
-
-#ifdef CONFIG_SMP
-
-       /* cpu of this runqueue: */
-       int cpu;
-       int online;
-
-       struct llist_head wake_list;
-
-#endif
-
-};
-
-static inline int cpu_of(struct rq *rq)
-{
-#ifdef CONFIG_SMP
-       return rq->cpu;
-#else
-       return 0;
-#endif
-}
-
-DECLARE_PER_CPU(struct rq, runqueues);
-
-#define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
-#define this_rq()              (&__get_cpu_var(runqueues))
-#define task_rq(p)             cpu_rq(task_cpu(p))
-#define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
-#define raw_rq()               (&__raw_get_cpu_var(runqueues))
-
-
-#ifdef CONFIG_SMP
-
-/*
- * kernel/sched/sched.h:1105
- * double_rq_lock - safely lock two runqueues
- *
- * Note this does not disable interrupts like task_rq_lock,
- * you need to do so manually before calling.
- */
-static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
-       __acquires(rq1->lock)
-       __acquires(rq2->lock)
-{
-       BUG_ON(!irqs_disabled());
-       if (rq1 == rq2) {
-               raw_spin_lock(&rq1->lock);
-               __acquire(rq2->lock);   /* Fake it out ;) */
-       } else {
-               if (rq1 < rq2) {
-                       raw_spin_lock(&rq1->lock);
-                       raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
-               } else {
-                       raw_spin_lock(&rq2->lock);
-                       raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
-               }
-       }
-}
-
-/*
- * kernel/sched/sched.h:1130
- * double_rq_unlock - safely unlock two runqueues
- *
- * Note this does not restore interrupts like task_rq_unlock,
- * you need to do so manually after calling.
- */
-static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
-       __releases(rq1->lock)
-       __releases(rq2->lock)
-{
-       raw_spin_unlock(&rq1->lock);
-       if (rq1 != rq2)
-               raw_spin_unlock(&rq2->lock);
-       else
-               __release(rq2->lock);
-}
-
-#endif
-
-#ifndef prepare_arch_switch
-# define prepare_arch_switch(next)     do { } while (0)
-#endif
-#ifndef finish_arch_switch
-# define finish_arch_switch(prev)      do { } while (0)
-#endif
-#ifndef finish_arch_post_lock_switch
-# define finish_arch_post_lock_switch()        do { } while (0)
-#endif
-
-#ifndef __ARCH_WANT_UNLOCKED_CTXSW
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
-       /*
-        * We can optimise this out completely for !SMP, because the
-        * SMP rebalancing from interrupt is the only thing that cares
-        * here.
-        */
-       next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
-       /*
-        * After ->on_cpu is cleared, the task can be moved to a different CPU.
-        * We must ensure this doesn't happen until the switch is completely
-        * finished.
-        */
-       smp_wmb();
-       prev->on_cpu = 0;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
-       /* this is a valid case when another task releases the spinlock */
-       rq->lock.owner = current;
-#endif
-       /*
-        * If we are tracking spinlock dependencies then we have to
-        * fix up the runqueue lock - which gets 'carried over' from
-        * prev into current:
-        */
-       spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
-       raw_spin_unlock_irq(&rq->lock);
-}
-
-#else /* __ARCH_WANT_UNLOCKED_CTXSW */
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
-       /*
-        * We can optimise this out completely for !SMP, because the
-        * SMP rebalancing from interrupt is the only thing that cares
-        * here.
-        */
-       next->on_cpu = 1;
-#endif
-       raw_spin_unlock(&rq->lock);
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
-       /*
-        * After ->on_cpu is cleared, the task can be moved to a different CPU.
-        * We must ensure this doesn't happen until the switch is completely
-        * finished.
-        */
-       smp_wmb();
-       prev->on_cpu = 0;
-#endif
-       local_irq_enable();
-}
-#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
-
-
-void account_idle_ticks(unsigned long ticks);
-
-void account_process_tick(struct task_struct *p, int user_tick);
-
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
-
-void calc_global_load(unsigned long ticks);
-
-//void calc_load_enter_idle(void);
-
-//void calc_load_exit_idle(void);
-
-int can_nice(const struct task_struct *p, const int nice);
-
-int idle_cpu(int cpu);
-
-void __cpuinit init_idle(struct task_struct *idle, int cpu);
-
-void __cpuinit init_idle_bootup_task(struct task_struct *idle);
-
-void normalize_rt_tasks(void);
-
-unsigned long nr_running(void);
-
-unsigned long long nr_context_switches(void);
-
-unsigned long nr_iowait(void);
-
-void rt_mutex_setprio(struct task_struct *p, int prio);
-
-u64 sched_clock_cpu(int cpu);
-
-void sched_clock_init(void);;
-
-void sched_fork(struct task_struct *p);
-
-long sched_getaffinity(pid_t pid, struct cpumask *mask);
-
-void sched_init(void);
-
-void sched_init_smp(void);
-
-int sched_rr_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos);
-
-long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
-
-int sched_setscheduler(struct task_struct *p, int policy, const struct sched_param *param);
-
-void sched_show_task(struct task_struct *p);
-
-void schedule_tail(struct task_struct *prev);
-
-void scheduler_tick(void);
-
-//void sched_clock_init(void);
-
-long sys_nice(int increment);
-
-long sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr);
-
-long sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr);
-
-unsigned long long task_delta_exec(struct task_struct *);
-
-int task_prio(const struct task_struct *p);
-
-unsigned long long task_sched_runtime(struct task_struct *task);
-
-unsigned long this_cpu_load(void);
-
-void update_cpu_load_nohz(void);
-
-void wake_up_new_task(struct task_struct *tsk);
-
-
-int __sched _cond_resched(void);
-
-asmlinkage void __sched schedule(void);
-
-void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key);
-
-int wake_up_process(struct task_struct *p);
-
-void __sched wait_for_completion(struct completion *x);
-
-void complete(struct completion *x);
-
-void __sched schedule_preempt_disabled(void);
-
-int in_sched_functions(unsigned long addr);
-
-void sched_clock_idle_sleep_event(void);
-
-void sched_clock_idle_wakeup_event(u64 delta_ns);
-
-int __cond_resched_lock(spinlock_t *lock);
-
-u64 local_clock(void);
-
-int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
-                         void *key);
-
-int __sched wait_for_completion_killable(struct completion *x);
-
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
-                       int nr_exclusive, void *key);
-
-int wake_up_state(struct task_struct *p, unsigned int state);
-
-void __sched yield(void);
-
-inline int task_curr(const struct task_struct *p);
-
-int task_nice(const struct task_struct *p);
-
-void set_user_nice(struct task_struct *p, long nice);
-
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
-
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive);
-
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
-
-static inline int task_current(struct rq *rq, struct task_struct *p)
-{
-       return rq->curr == p;
-}
-
-static inline int task_running(struct rq *rq, struct task_struct *p)
-{
-#ifdef CONFIG_SMP
-       return p->on_cpu;
-#else
-       return task_current(rq, p);
-#endif
-}
-
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
-#ifdef CONFIG_SMP
-       /*
-        * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
-        * successfuly executed on another CPU. We must ensure that updates of
-        * per-task data have been completed by this moment.
-        */
-       smp_wmb();
-       task_thread_info(p)->cpu = cpu;
-#endif
-}
+//#include <linux/sched.h>
+//#include <linux/sched/sysctl.h>
+//#include <linux/sched/rt.h>
+//#include <linux/spinlock.h>
+//#include <linux/stop_machine.h>
+//
+//#include <linux/list.h>
+//
+//
+//struct task_group {
+//};
+//
+//
+//struct rq {
+//     /* runqueue lock: */
+//     raw_spinlock_t lock;
+//
+//     u64 nr_switches;
+//     unsigned int nr_running;
+//
+//     struct list_head rq_list;
+//
+//     /*
+//      * This is part of a global counter where only the total sum
+//      * over all CPUs matters. A task can increase this counter on
+//      * one CPU and if it got migrated afterwards it may decrease
+//      * it on another CPU. Always updated under the runqueue lock:
+//      */
+//     unsigned long nr_uninterruptible;
+//
+//     struct task_struct *curr, *idle, *stop;
+//
+//     struct mm_struct *prev_mm;
+//
+//     u64 clock;
+//     u64 clock_task;
+//
+//     atomic_t nr_iowait;
+//
+//     int skip_clock_update;
+//
+//#ifdef CONFIG_SMP
+//
+//     /* cpu of this runqueue: */
+//     int cpu;
+//     int online;
+//
+//     struct llist_head wake_list;
+//
+//#endif
+//
+//};
+//
+//static inline int cpu_of(struct rq *rq)
+//{
+//#ifdef CONFIG_SMP
+//     return rq->cpu;
+//#else
+//     return 0;
+//#endif
+//}
+//
+//DECLARE_PER_CPU(struct rq, runqueues);
+//
+//#define cpu_rq(cpu)          (&per_cpu(runqueues, (cpu)))
+//#define this_rq()            (&__get_cpu_var(runqueues))
+//#define task_rq(p)           cpu_rq(task_cpu(p))
+//#define cpu_curr(cpu)                (cpu_rq(cpu)->curr)
+//#define raw_rq()             (&__raw_get_cpu_var(runqueues))
+//
+//
+//#ifdef CONFIG_SMP
+//
+///*
+// * kernel/sched/sched.h:1105
+// * double_rq_lock - safely lock two runqueues
+// *
+// * Note this does not disable interrupts like task_rq_lock,
+// * you need to do so manually before calling.
+// */
+//static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
+//     __acquires(rq1->lock)
+//     __acquires(rq2->lock)
+//{
+//     BUG_ON(!irqs_disabled());
+//     if (rq1 == rq2) {
+//             raw_spin_lock(&rq1->lock);
+//             __acquire(rq2->lock);   /* Fake it out ;) */
+//     } else {
+//             if (rq1 < rq2) {
+//                     raw_spin_lock(&rq1->lock);
+//                     raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+//             } else {
+//                     raw_spin_lock(&rq2->lock);
+//                     raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+//             }
+//     }
+//}
+//
+///*
+// * kernel/sched/sched.h:1130
+// * double_rq_unlock - safely unlock two runqueues
+// *
+// * Note this does not restore interrupts like task_rq_unlock,
+// * you need to do so manually after calling.
+// */
+//static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
+//     __releases(rq1->lock)
+//     __releases(rq2->lock)
+//{
+//     raw_spin_unlock(&rq1->lock);
+//     if (rq1 != rq2)
+//             raw_spin_unlock(&rq2->lock);
+//     else
+//             __release(rq2->lock);
+//}
+//
+//#endif
+//
+//#ifndef prepare_arch_switch
+//# define prepare_arch_switch(next)   do { } while (0)
+//#endif
+//#ifndef finish_arch_switch
+//# define finish_arch_switch(prev)    do { } while (0)
+//#endif
+//#ifndef finish_arch_post_lock_switch
+//# define finish_arch_post_lock_switch()      do { } while (0)
+//#endif
+//
+//#ifndef __ARCH_WANT_UNLOCKED_CTXSW
+//static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+//{
+//#ifdef CONFIG_SMP
+//     /*
+//      * We can optimise this out completely for !SMP, because the
+//      * SMP rebalancing from interrupt is the only thing that cares
+//      * here.
+//      */
+//     next->on_cpu = 1;
+//#endif
+//}
+//
+//static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+//{
+//#ifdef CONFIG_SMP
+//     /*
+//      * After ->on_cpu is cleared, the task can be moved to a different CPU.
+//      * We must ensure this doesn't happen until the switch is completely
+//      * finished.
+//      */
+//     smp_wmb();
+//     prev->on_cpu = 0;
+//#endif
+//#ifdef CONFIG_DEBUG_SPINLOCK
+//     /* this is a valid case when another task releases the spinlock */
+//     rq->lock.owner = current;
+//#endif
+//     /*
+//      * If we are tracking spinlock dependencies then we have to
+//      * fix up the runqueue lock - which gets 'carried over' from
+//      * prev into current:
+//      */
+//     spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+//
+//     raw_spin_unlock_irq(&rq->lock);
+//}
+//
+//#else /* __ARCH_WANT_UNLOCKED_CTXSW */
+//static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
+//{
+//#ifdef CONFIG_SMP
+//     /*
+//      * We can optimise this out completely for !SMP, because the
+//      * SMP rebalancing from interrupt is the only thing that cares
+//      * here.
+//      */
+//     next->on_cpu = 1;
+//#endif
+//     raw_spin_unlock(&rq->lock);
+//}
+//
+//static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+//{
+//#ifdef CONFIG_SMP
+//     /*
+//      * After ->on_cpu is cleared, the task can be moved to a different CPU.
+//      * We must ensure this doesn't happen until the switch is completely
+//      * finished.
+//      */
+//     smp_wmb();
+//     prev->on_cpu = 0;
+//#endif
+//     local_irq_enable();
+//}
+//#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
+//
+//
+//void account_idle_ticks(unsigned long ticks);
+//
+//void account_process_tick(struct task_struct *p, int user_tick);
+//
+//void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
+//
+//void calc_global_load(unsigned long ticks);
+//
+////void calc_load_enter_idle(void);
+//
+////void calc_load_exit_idle(void);
+//
+//int can_nice(const struct task_struct *p, const int nice);
+//
+//int idle_cpu(int cpu);
+//
+//void __cpuinit init_idle(struct task_struct *idle, int cpu);
+//
+//void __cpuinit init_idle_bootup_task(struct task_struct *idle);
+//
+//void normalize_rt_tasks(void);
+//
+//unsigned long nr_running(void);
+//
+//unsigned long long nr_context_switches(void);
+//
+//unsigned long nr_iowait(void);
+//
+//void rt_mutex_setprio(struct task_struct *p, int prio);
+//
+//u64 sched_clock_cpu(int cpu);
+//
+//void sched_clock_init(void);;
+//
+//void sched_fork(struct task_struct *p);
+//
+//long sched_getaffinity(pid_t pid, struct cpumask *mask);
+//
+//void sched_init(void);
+//
+//void sched_init_smp(void);
+//
+//int sched_rr_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos);
+//
+//long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+//
+//int sched_setscheduler(struct task_struct *p, int policy, const struct sched_param *param);
+//
+//void sched_show_task(struct task_struct *p);
+//
+//void schedule_tail(struct task_struct *prev);
+//
+//void scheduler_tick(void);
+//
+////void sched_clock_init(void);
+//
+//long sys_nice(int increment);
+//
+//long sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr);
+//
+//long sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr);
+//
+//unsigned long long task_delta_exec(struct task_struct *);
+//
+//int task_prio(const struct task_struct *p);
+//
+//unsigned long long task_sched_runtime(struct task_struct *task);
+//
+//unsigned long this_cpu_load(void);
+//
+//void update_cpu_load_nohz(void);
+//
+//void wake_up_new_task(struct task_struct *tsk);
+//
+//
+//int __sched _cond_resched(void);
+//
+//asmlinkage void __sched schedule(void);
+//
+//void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key);
+//
+//int wake_up_process(struct task_struct *p);
+//
+//void __sched wait_for_completion(struct completion *x);
+//
+//void complete(struct completion *x);
+//
+//void __sched schedule_preempt_disabled(void);
+//
+//int in_sched_functions(unsigned long addr);
+//
+//void sched_clock_idle_sleep_event(void);
+//
+//void sched_clock_idle_wakeup_event(u64 delta_ns);
+//
+//int __cond_resched_lock(spinlock_t *lock);
+//
+//u64 local_clock(void);
+//
+//int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
+//                       void *key);
+//
+//int __sched wait_for_completion_killable(struct completion *x);
+//
+//void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
+//                     int nr_exclusive, void *key);
+//
+//int wake_up_state(struct task_struct *p, unsigned int state);
+//
+//void __sched yield(void);
+//
+//inline int task_curr(const struct task_struct *p);
+//
+//int task_nice(const struct task_struct *p);
+//
+//void set_user_nice(struct task_struct *p, long nice);
+//
+//void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
+//
+//void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive);
+//
+//void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
+//
+//static inline int task_current(struct rq *rq, struct task_struct *p)
+//{
+//     return rq->curr == p;
+//}
+//
+//static inline int task_running(struct rq *rq, struct task_struct *p)
+//{
+//#ifdef CONFIG_SMP
+//     return p->on_cpu;
+//#else
+//     return task_current(rq, p);
+//#endif
+//}
+//
+//static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+//{
+//#ifdef CONFIG_SMP
+//     /*
+//      * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
+//      * successfuly executed on another CPU. We must ensure that updates of
+//      * per-task data have been completed by this moment.
+//      */
+//     smp_wmb();
+//     task_thread_info(p)->cpu = cpu;
+//#endif
+//}