Merge tag 'v3.14' into p/abusse/merge_upgrade
[projects/modsched/linux.git] / kernel / sched / modsched / core.c
1 ///*
2 // *  kernel/sched/core.c
3 // *
4 // *  Kernel scheduler and related syscalls
5 // *
6 // *  Copyright (C) 1991-2002  Linus Torvalds
7 // *
8 // *
9 // */
10 //
11 //#include <linux/init.h>
12 //#include <asm/mmu_context.h>
13 //#include <linux/completion.h>
14 //#include <linux/kernel_stat.h>
15 //#include <linux/blkdev.h>
16 //#include <linux/syscalls.h>
17 //#include <linux/kprobes.h>
18 //#include <linux/delayacct.h>
19 //#include <linux/export.h>
20 //#include <linux/context_tracking.h>
21 //#include <linux/kthread.h>
22 //#include <linux/init_task.h>
23 //
24 //#include <asm/switch_to.h>
25 //#include <asm/tlb.h>
26 //#include <linux/cgroup.h>
27 //#include "sched.h"
28 //#include "../workqueue_internal.h"
29 //#include "../smpboot.h"
30 //
31 //
32 ////
33 //// Variables
34 ////
35 //
36 ///*
37 // * kernel/sched/rt.c:10
38 // * default timeslice is 100 msecs (used only for SCHED_RR tasks).
39 // * Timeslices get refilled after they expire. RR_TIMESLICE is defined as
40 // * (100 * HZ / 1000) and is assigned to sched_rr_timeslice.
41 // */
42 //int sched_rr_timeslice = RR_TIMESLICE;
43 //
44 ///*
45 // * kernel/sched/fair.c:80
46 // * After fork, child runs first. If set to 0 (default) then
47 // * parent will (try to) run first.
48 // */
49 //unsigned int sysctl_sched_child_runs_first = 0;
50 //
51 ///*
52 // * kernel/sched/core.c:289
53 // * Period over which we measure -rt task cpu usage in us.
54 // * default: 1s (1000000)
55 // */
56 //unsigned int sysctl_sched_rt_period = 1000000;
57 //
58 ///*
59 // * /kernel/sched/core.c:2081
60 // * Variables and functions for calc_load
61 // */
62 //unsigned long avenrun[3];
63 //
64 ///*
65 // * kernel/sched/core.c:297
66 // * part of the period that we allow rt tasks to run in us.
67 // * default: 0.95s (950000)
68 // */
69 //int sysctl_sched_rt_runtime = 950000;
70 //
71 ///*
72 // * /kernel/sched/core.c:6866
73 // *
74 // */
75 //struct task_group root_task_group;
76 //
77 ///*
78 // * /kernel/sched/core.c:6582
79 // * Special case: If a kmalloc of a doms_cur partition (array of
80 // * cpumask) fails, then fallback to a single sched domain,
81 // * as determined by the single cpumask fallback_doms.
82 // */
83 //static cpumask_var_t fallback_doms;
84 //
85 ///*
86 // * /kernel/sched/core.c:5682
87 // * cpus with isolated domains
88 // */
89 //static cpumask_var_t cpu_isolated_map;
90 //
91 ///*
92 // * /kernel/sched/core.c:5323
93 // */
94 //DEFINE_PER_CPU(int, sd_llc_id);
95 //
96 ///*
97 // * /kernel/sched/core.c:2623
98 // * unknown
99 // */
100 //DEFINE_PER_CPU(struct kernel_stat, kstat);
101 //DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
102 //
103 //
104 ///*
105 // * /kernel/sched/core.c:291
106 // */
107 //__read_mostly int scheduler_running;
108 //
109 ///*
110 // * kernel/sched/core.c:113
111 // */
112 //DEFINE_MUTEX(sched_domains_mutex);
113 //DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
114 //
115 //
116 //
117 ///*
118 // * __task_rq_lock - lock the rq @p resides on.
119 // */
120 //static inline struct rq *__task_rq_lock(struct task_struct *p)
121 //      __acquires(rq->lock)
122 //{
123 //      struct rq *rq;
124 //
125 //      lockdep_assert_held(&p->pi_lock);
126 //
127 //      for (;;) {
128 //              rq = task_rq(p);
129 //              raw_spin_lock(&rq->lock);
130 //              if (likely(rq == task_rq(p)))
131 //                      return rq;
132 //              raw_spin_unlock(&rq->lock);
133 //      }
134 //}
135 //
136 //
137 //
138 ///*
139 // * Lock/unlock task from runqueue
140 // */
141 //
142 ///*
143 // * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
144 // */
145 //static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
146 //      __acquires(p->pi_lock)
147 //      __acquires(rq->lock)
148 //{
149 //      struct rq *rq;
150 //
151 //      for (;;) {
152 //              raw_spin_lock_irqsave(&p->pi_lock, *flags);
153 //              rq = task_rq(p);
154 //              raw_spin_lock(&rq->lock);
155 //              if (likely(rq == task_rq(p)))
156 //                      return rq;
157 //              raw_spin_unlock(&rq->lock);
158 //              raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
159 //      }
160 //}
161 //
162 //static void __task_rq_unlock(struct rq *rq)
163 //      __releases(rq->lock)
164 //{
165 //      raw_spin_unlock(&rq->lock);
166 //}
167 //
168 //static inline void
169 //task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
170 //      __releases(rq->lock)
171 //      __releases(p->pi_lock)
172 //{
173 //      raw_spin_unlock(&rq->lock);
174 //      raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
175 //}
176 //
177 /////*
178 //// * this_rq_lock - lock this runqueue and disable interrupts.
179 //// */
180 ////static struct rq *this_rq_lock(void)
181 ////    __acquires(rq->lock)
182 ////{
183 ////    struct rq *rq;
184 ////
185 ////    local_irq_disable();
186 ////    rq = this_rq();
187 ////    raw_spin_lock(&rq->lock);
188 ////
189 ////    return rq;
190 ////}
191 //
192 //
193 //
194 ///*
195 // * Functions
196 // */
197 //
198 ///**
199 // * kernel/sched/core.c:6872
200 // * Initialize the scheduler
201 // */
202 //void sched_init(void)
203 //{
204 //      int i;
205 //      unsigned long alloc_size = 0, ptr;
206 //
207 //#ifdef CONFIG_CPUMASK_OFFSTACK
208 //      alloc_size += num_possible_cpus() * cpumask_size();
209 //#endif
210 //      if (alloc_size) {
211 //              ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
212 //      }
213 //
214 //      // TODO: SMP
215 ////#ifdef CONFIG_SMP
216 ////    init_defrootdomain();
217 ////#endif
218 //
219 ////    init_rt_bandwidth(&def_rt_bandwidth,
220 ////                    global_rt_period(), global_rt_runtime());
221 //
222 //      for_each_possible_cpu(i) {
223 //              struct rq *rq;
224 //
225 //              rq = cpu_rq(i);
226 //              raw_spin_lock_init(&rq->lock);
227 //              rq->nr_running = 0;
228 //              INIT_LIST_HEAD(&rq->rq_list);
229 //
230 ////            rq->calc_load_active = 0;
231 ////            rq->calc_load_update = jiffies + LOAD_FREQ;
232 //
233 ////            init_cfs_rq(&rq->cfs);
234 ////            init_rt_rq(&rq->rt, rq);
235 //
236 ////            rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
237 //
238 ////            for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
239 ////                    rq->cpu_load[j] = 0;
240 //
241 ////            rq->last_load_update_tick = jiffies;
242 //
243 //#ifdef CONFIG_SMP
244 ////            rq->sd = NULL;
245 ////            rq->rd = NULL;
246 //////          rq->cpu_power = SCHED_POWER_SCALE;
247 ////            rq->post_schedule = 0;
248 //////          rq->active_balance = 0;
249 //////          rq->next_balance = jiffies;
250 ////            rq->push_cpu = 0;
251 //              rq->cpu = i;
252 //              rq->online = 0;
253 //////          rq->idle_stamp = 0;
254 //////          rq->avg_idle = 2*sysctl_sched_migration_cost;
255 ////
256 ////            INIT_LIST_HEAD(&rq->cfs_tasks);
257 ////
258 ////            rq_attach_root(rq, &def_root_domain);
259 ////#ifdef CONFIG_NO_HZ
260 ////            rq->nohz_flags = 0;
261 ////#endif
262 //#endif
263 ////            init_rq_hrtick(rq);
264 //              atomic_set(&rq->nr_iowait, 0);
265 //      }
266 //
267 ////    set_load_weight(&init_task);
268 //
269 //      /*
270 //       * The boot idle thread does lazy MMU switching as well:
271 //       */
272 //      atomic_inc(&init_mm.mm_count);
273 //      enter_lazy_tlb(&init_mm, current);
274 //
275 //      /*
276 //       * Make us the idle thread. Technically, schedule() should not be
277 //       * called from this thread, however somewhere below it might be,
278 //       * but because we are the idle thread, we just pick up running again
279 //       * when this runqueue becomes "idle".
280 //       */
281 //      init_idle(current, smp_processor_id());
282 //
283 ////    calc_load_update = jiffies + LOAD_FREQ;
284 //
285 //      /*
286 //       * During early bootup we pretend to be a normal task:
287 //       */
288 ////    current->sched_class = &fair_sched_class;
289 //
290 //#ifdef CONFIG_SMP
291 //      idle_thread_set_boot_cpu();
292 //#endif
293 ////    init_sched_fair_class();
294 //
295 //      scheduler_running = 1;
296 //}
297 //
298 //#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
299 //static inline int preempt_count_equals(int preempt_offset)
300 //{
301 //      int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
302 //
303 //      return (nested == preempt_offset);
304 //}
305 //
306 //void __might_sleep(const char *file, int line, int preempt_offset)
307 //{
308 //      static unsigned long prev_jiffy;        /* ratelimiting */
309 //
310 //      rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
311 //      if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
312 //          system_state != SYSTEM_RUNNING || oops_in_progress)
313 //              return;
314 //      if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
315 //              return;
316 //      prev_jiffy = jiffies;
317 //
318 //      printk(KERN_ERR
319 //              "BUG: sleeping function called from invalid context at %s:%d\n",
320 //                      file, line);
321 //      printk(KERN_ERR
322 //              "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
323 //                      in_atomic(), irqs_disabled(),
324 //                      current->pid, current->comm);
325 //
326 //      debug_show_held_locks(current);
327 //      if (irqs_disabled())
328 //              print_irqtrace_events(current);
329 //      dump_stack();
330 //}
331 //EXPORT_SYMBOL(__might_sleep);
332 //#endif
333 //
334 ///*
335 // * kernel/sched/core.c:1560
336 // * Perform scheduler related setup for a newly forked process p.
337 // * p is forked by current.
338 // *
339 // * __sched_fork() is basic setup used by init_idle() too:
340 // */
341 //static void __sched_fork(struct task_struct *p)
342 //{
343 //      p->on_rq                                        = 0;
344 //
345 //      p->se.on_rq                                     = 0;
346 //      p->se.exec_start                        = 0;
347 //      p->se.sum_exec_runtime          = 0;
348 //      p->se.prev_sum_exec_runtime     = 0;
349 //      p->se.vruntime                          = 0;
350 //}
351 //
352 ///*
353 // * kernel/sched/core.c:1622
354 // * fork()/clone()-time setup:
355 // */
356 //void sched_fork(struct task_struct *p)
357 //{
358 //      unsigned long flags;
359 //      int cpu = get_cpu();
360 //
361 //      __sched_fork(p);
362 //
363 //      /*
364 //       * We mark the process as running here. This guarantees that
365 //       * nobody will actually run it, and a signal or other external
366 //       * event cannot wake it up and insert it on the runqueue either.
367 //       */
368 //      p->state = TASK_RUNNING;
369 //
370 //      /*
371 //       * Make sure we do not leak PI boosting priority to the child.
372 //       */
373 //      p->prio = current->normal_prio;
374 //
375 //      raw_spin_lock_irqsave(&p->pi_lock, flags);
376 //      set_task_cpu(p, cpu);
377 //      raw_spin_unlock_irqrestore(&p->pi_lock, flags);
378 //
379 //#if defined(CONFIG_SMP)
380 //      p->on_cpu = 0;
381 //#endif
382 //#ifdef CONFIG_PREEMPT_COUNT
383 //      /* Want to start with kernel preemption disabled. */
384 //      task_thread_info(p)->preempt_count = 1;
385 //#endif
386 //
387 //      put_cpu();
388 //}
389 //
390 ///**
391 // * /kernel/sched/core.c:4674
392 // * init_idle - set up an idle thread for a given CPU
393 // * @idle: task in question
394 // * @cpu: cpu the idle task belongs to
395 // *
396 // * NOTE: this function does not set the idle thread's NEED_RESCHED
397 // * flag, to make booting more robust.
398 // */
399 //void __cpuinit init_idle(struct task_struct *idle, int cpu)
400 //{
401 //      struct rq *rq = cpu_rq(cpu);
402 //      unsigned long flags;
403 //
404 //      raw_spin_lock_irqsave(&rq->lock, flags);
405 //
406 //      __sched_fork(idle);
407 //      idle->state = TASK_RUNNING;
408 //      idle->se.exec_start = sched_clock();
409 //
410 //      do_set_cpus_allowed(idle, cpumask_of(cpu));
411 //      /*
412 //       * We're having a chicken and egg problem, even though we are
413 //       * holding rq->lock, the cpu isn't yet set to this cpu so the
414 //       * lockdep check in task_group() will fail.
415 //       *
416 //       * Similar case to sched_fork(). / Alternatively we could
417 //       * use task_rq_lock() here and obtain the other rq->lock.
418 //       *
419 //       * Silence PROVE_RCU
420 //       */
421 //      rcu_read_lock();
422 //      __set_task_cpu(idle, cpu);
423 //      rcu_read_unlock();
424 //
425 //      rq->curr = rq->idle = idle;
426 //#if defined(CONFIG_SMP)
427 //      idle->on_cpu = 1;
428 //#endif
429 //      raw_spin_unlock_irqrestore(&rq->lock, flags);
430 //
431 //      /* Set the preempt count _outside_ the spinlocks! */
432 //      task_thread_info(idle)->preempt_count = 0;
433 //
434 //#if defined(CONFIG_SMP)
435 //      sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
436 //#endif
437 //}
438 //
439 ///*
440 // * /kernel/sched/cputime.c:436
441 // * Account multiple ticks of idle time.
442 // * @ticks: number of stolen ticks
443 // */
444 //void account_idle_ticks(unsigned long ticks)
445 //{
446 //      //printk("\naccount_idle_ticks");
447 //
448 //      return;
449 //}
450 //
451 ///*
452 // * /kernel/sched/cputime.c:397
453 // * Account a single tick of cpu time.
454 // * @p: the process that the cpu time gets accounted to
455 // * @user_tick: indicates if the tick is a user or a system tick
456 // */
457 //void account_process_tick(struct task_struct *p, int user_tick)
458 //{
459 //      //printk("\naccount_process_tick");
460 //
461 //      return;
462 //}
463 //
464 ///*
465 // * /kernel/sched/core.c:2092
466 // * get_avenrun - get the load average array
467 // * @loads:    pointer to dest load array
468 // * @offset:    offset to add
469 // * @shift:    shift count to shift the result left
470 // *
471 // * These values are estimates at best, so no need for locking.
472 // */
473 //void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
474 //{
475 //      //printk("\nget_avenrun");
476 //
477 //      return;
478 //}
479 //
480 ///*
481 // * /kernel/sched/core.c:2363
482 // * calc_load - update the avenrun load estimates 10 ticks after the
483 // * CPUs have updated calc_load_tasks.
484 // */
485 //void calc_global_load(unsigned long ticks)
486 //{
487 ////    printk("\ncalc_global_load");
488 //
489 //      return;
490 //}
491 //
492 ///*
493 // * /kernel/sched/core.c:2197
494 // * We're going into NOHZ mode, if there's any pending delta, fold it
495 // * into the pending idle delta.
496 // */
497 ///*void calc_load_enter_idle(void)
498 //{
499 //      return;
500 //}*/
501 //
502 ///*
503 // * /kernel/sched/core.c:2213
504 // * If we're still before the sample window, we're done.
505 // *
506 // * We woke inside or after the sample window, this means we're already
507 // * accounted through the nohz accounting, so skip the entire deal and
508 // * sync up for the next window.
509 // */
510 ///*void calc_load_exit_idle(void)
511 //{
512 //      return;
513 //}*/
514 //
515 ///*
516 // * /kernel/sched/core.c:3668
517 // * Check if a task can reduce its nice value
518 // * @p: task
519 // * @nice: nice value
520 // */
521 //int can_nice(const struct task_struct *p, const int nice)
522 //{
523 //      //printk("\ncan_nice");
524 //
525 //      return 0;
526 //}
527 //
528 ///**
529 // * kernel/sched/core.c:3768
530 // * idle_task - return the idle task for a given cpu.
531 // * @cpu: the processor in question.
532 // */
533 //struct task_struct *idle_task(int cpu)
534 //{
535 //      return cpu_rq(cpu)->idle;
536 //}
537 //
538 ///**
539 // * /kernel/sched/core.c:3742
540 // * idle_cpu - is a given cpu idle currently?
541 // * @cpu: the processor in question.
542 // */
543 //int idle_cpu(int cpu)
544 //{
545 //      struct rq *rq = cpu_rq(cpu);
546 //
547 //      if (rq->curr != rq->idle)
548 //              return 0;
549 //
550 //      if (rq->nr_running)
551 //              return 0;
552 //
553 //#ifdef CONFIG_SMP
554 //      if (!llist_empty(&rq->wake_list))
555 //              return 0;
556 //#endif
557 //
558 //      return 1;
559 //}
560 //
561 ///*
562 // * /kernel/sched/core.c:4669
563 // * Sets sched_class of idle task, see struct sched_class idle_sched_class;
564 // */
565 //void __cpuinit init_idle_bootup_task(struct task_struct *idle)
566 //{
567 //      //printk("\ninit_idle_bootup_task");
568 //
569 //      return;
570 //}
571 //
572 ///*
573 // * /kernel/sched/core.c:7108
574 // * Calls private function
575 // * static void normalize_task(struct rq *rq, struct task_struct *p)
576 // */
577 //void normalize_rt_tasks(void)
578 //{
579 //      printk("\nnormalize_rt_tasks");
580 //
581 //      return;
582 //}
583 //
584 ///*
585 // * /kernel/sched/core.c:1997
586 // * nr_running and nr_context_switches:
587 // *
588 // * externally visible scheduler statistics:
589 // *   current number of runnable threads
590 // *   total number of context switches performed since bootup.
591 // */
592 //unsigned long nr_running(void)
593 //{
594 //      printk("\nnr_running");
595 //
596 //      // TODO: SMP
597 //
598 //      return 0;
599 //}
600 //
601 //unsigned long long nr_context_switches(void)
602 //{
603 ////    printk("\nnr_context_switches");
604 //
605 //      int i;
606 //      unsigned long long sum = 0;
607 //
608 //      for_each_possible_cpu(i)
609 //              sum += cpu_rq(i)->nr_switches;
610 //
611 //      return sum;
612 //}
613 //
614 ///*
615 // * /kernel/sched/core.c:2008
616 // * number of threads waiting on IO
617 // */
618 //unsigned long nr_iowait(void)
619 //{
620 //      printk("\nnr_iowait");
621 //
622 //      // TODO: SMP
623 //
624 //      return 0;
625 //}
626 //
627 ///*
628 // * kernel/sched/core.c:2018
629 // */
630 //unsigned long nr_iowait_cpu(int cpu)
631 //{
632 //      printk("\nnr_iowait_cpu");
633 //
634 //      // TODO: SMP
635 //
636 //      return 0;
637 //}
638 //
639 ///*
640 // * rt_mutex_setprio - set the current priority of a task
641 // * @p: task
642 // * @prio: prio value (kernel-internal form)
643 // *
644 // * This function changes the 'effective' priority of a task. It does
645 // * not touch ->normal_prio like __setscheduler().
646 // *
647 // * Used by the rt_mutex code to implement priority inheritance logic.
648 // */
649 //void rt_mutex_setprio(struct task_struct *p, int prio)
650 //{
651 //      printk("\nrt_mutex_setprio");
652 //
653 //      return;
654 //}
655 //
656 ///**
657 // * sched_clock_cpu - returns current time in nanosec units
658 // * using scheduler clock function.
659 // * @param: cpu id
660 // */
661 ////u64 sched_clock_cpu(int cpu)
662 ////{
663 ////    return 0;
664 ////}
665 //
666 ///*
667 // * kernel/sched/clock.c:350
668 // * Initialize/Start scheduler clock.
669 // */
670 ////void sched_clock_init(void)
671 ////{
672 ////    return;
673 ////}
674 //
675 ///**
676 // * kernel/sched/core.c:4213
677 // * This functions stores the CPU affinity mask for the process or thread with the ID pid in the cpusetsize
678 // * bytes long bitmap pointed to by cpuset. If successful, the function always initializes all bits in the
679 // * cpu_set_t object and returns zero.
680 // *
681 // * If pid does not correspond to a process or thread on the system the or the function fails for some other
682 // * reason, it returns -1 and errno is set to represent the error condition.
683 // */
684 //long sched_getaffinity(pid_t pid, struct cpumask *mask)
685 //{
686 //      printk("\nsched_getaffinity");
687 //
688 //      // TODO: SMP
689 //
690 //      return 0;
691 //}
692 //
693 ///**
694 // * kernel/sched/core.c:7571
695 // */
696 //int sched_rr_handler(struct ctl_table *table, int write,
697 //              void __user *buffer, size_t *lenp,
698 //              loff_t *ppos)
699 //{
700 //      //printk("\nsched_rr_handler");
701 //
702 //      return 0;
703 //}
704 //
705 ///**
706 // * kernel/sched/core.c:4111
707 // * This function installs the cpusetsize bytes long affinity mask pointed to by cpuset for the process or
708 // * thread with the ID pid. If successful the function returns zero and the scheduler will in future take the
709 // * affinity information into account.
710 // */
711 //long sched_setaffinity(pid_t pid, const struct cpumask *new_mask)
712 //{
713 //      //printk("\nsched_setaffinity");
714 //
715 //      return 0;
716 //}
717 //
718 ///**
719 // * kernel/sched/core.c:3975
720 // * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
721 // * @p: the task in question.
722 // * @policy: new policy.
723 // * @param: structure containing the new RT priority.
724 // *
725 // * NOTE that the task may be already dead.
726 // */
727 //int sched_setscheduler(struct task_struct *p, int policy,
728 //              const struct sched_param *param)
729 //{
730 //      //printk("\nsched_setscheduler");
731 //
732 //      return 0;
733 //}
734 //
735 ///**
736 // * kernel/sched/core.c:3993
737 // * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
738 // * @p: the task in question.
739 // * @policy: new policy.
740 // * @param: structure containing the new RT priority.
741 // *
742 // * Just like sched_setscheduler, only don't bother checking if the
743 // * current context has permission.  For example, this is needed in
744 // * stop_machine(): we create temporary high priority worker threads,
745 // * but our caller might not have that capability.
746 // */
747 //int sched_setscheduler_nocheck(struct task_struct *p, int policy,
748 //const struct sched_param *param)
749 //{
750 ////    //printk("\nsched_setscheduler_nocheck");
751 //
752 //      return 0;
753 //}
754 //
755 ///**
756 // * kernel/sched/core.c:4601
757 // */
758 //void sched_show_task(struct task_struct *p)
759 //{
760 //      //printk("\nsched_show_task");
761 //
762 //      return;
763 //}
764 //
765 ///**
766 // * kernel/sched/core.c:652
767 // */
768 //void resched_task(struct task_struct *p)
769 //{
770 //      int cpu;
771 //
772 //      assert_raw_spin_locked(&task_rq(p)->lock);
773 //
774 //      if (test_tsk_need_resched(p))
775 //              return;
776 //
777 //      set_tsk_need_resched(p);
778 //
779 //      cpu = task_cpu(p);
780 //      if (cpu == smp_processor_id())
781 //              return;
782 //
783 //      /* NEED_RESCHED must be visible before we test polling */
784 //      smp_mb();
785 //      if (!tsk_is_polling(p))
786 //              smp_send_reschedule(cpu);
787 //}
788 //
789 ///**
790 // * kernel/sched/core.c:1806
791 // * prepare_task_switch - prepare to switch tasks
792 // * @rq: the runqueue preparing to switch
793 // * @prev: the current task that is being switched out
794 // * @next: the task we are going to switch to.
795 // *
796 // * This is called with the rq lock held and interrupts off. It must
797 // * be paired with a subsequent finish_task_switch after the context
798 // * switch.
799 // *
800 // * prepare_task_switch sets up locking and calls architecture specific
801 // * hooks.
802 // */
803 //static inline void
804 //prepare_task_switch(struct rq *rq, struct task_struct *prev,
805 //                  struct task_struct *next)
806 //{
807 ////    trace_sched_switch(prev, next);
808 ////    sched_info_switch(prev, next);
809 ////    perf_event_task_sched_out(prev, next);
810 ////    fire_sched_out_preempt_notifiers(prev, next);
811 //      prepare_lock_switch(rq, next);
812 ////    prepare_arch_switch(next);
813 //}
814 //
815 ///**
816 // * kernel/sched/core.c:1826
817 // * finish_task_switch - clean up after a task-switch
818 // * @rq: runqueue associated with task-switch
819 // * @prev: the thread we just switched away from.
820 // *
821 // * finish_task_switch must be called after the context switch, paired
822 // * with a prepare_task_switch call before the context switch.
823 // * finish_task_switch will reconcile locking set up by prepare_task_switch,
824 // * and do any other architecture-specific cleanup actions.
825 // *
826 // * Note that we may have delayed dropping an mm in context_switch(). If
827 // * so, we finish that here outside of the runqueue lock. (Doing it
828 // * with the lock held can cause deadlocks; see schedule() for
829 // * details.)
830 // */
831 //static void finish_task_switch(struct rq *rq, struct task_struct *prev)
832 //      __releases(rq->lock)
833 //{
834 //      struct mm_struct *mm = rq->prev_mm;
835 //      long prev_state;
836 //
837 //      rq->prev_mm = NULL;
838 //
839 //      /*
840 //       * A task struct has one reference for the use as "current".
841 //       * If a task dies, then it sets TASK_DEAD in tsk->state and calls
842 //       * schedule one last time. The schedule call will never return, and
843 //       * the scheduled task must drop that reference.
844 //       * The test for TASK_DEAD must occur while the runqueue locks are
845 //       * still held, otherwise prev could be scheduled on another cpu, die
846 //       * there before we look at prev->state, and then the reference would
847 //       * be dropped twice.
848 //       *              Manfred Spraul <manfred@colorfullife.com>
849 //       */
850 //      prev_state = prev->state;
851 //      vtime_task_switch(prev);
852 ////    finish_arch_switch(prev);
853 ////    perf_event_task_sched_in(prev, current);
854 //      finish_lock_switch(rq, prev);
855 //      finish_arch_post_lock_switch();
856 //
857 ////    fire_sched_in_preempt_notifiers(current);
858 //      if (mm)
859 //              mmdrop(mm);
860 //      if (unlikely(prev_state == TASK_DEAD)) {
861 //              /*
862 //               * Remove function-return probe instances associated with this
863 //               * task and put them back on the free list.
864 //               */
865 //              kprobe_flush_task(prev);
866 //              put_task_struct(prev);
867 //      }
868 //}
869 //
870 //#ifdef CONFIG_SMP
871 //
872 ///* assumes rq->lock is held */
873 //static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
874 //{
875 //}
876 //
877 ///* rq->lock is NOT held, but preemption is disabled */
878 //static inline void post_schedule(struct rq *rq)
879 //{
880 //}
881 //
882 //#else
883 //
884 //static inline void pre_schedule(struct rq *rq, struct task_struct *p)
885 //{
886 //}
887 //
888 //static inline void post_schedule(struct rq *rq)
889 //{
890 //}
891 //
892 //#endif
893 //
894 ///**
895 // * kernel/sched/core.c:1905
896 // * schedule_tail - first thing a freshly forked thread must call.
897 // * @prev: the thread we just switched away from.
898 // */
899 //asmlinkage void schedule_tail(struct task_struct *prev)
900 //      __releases(rq->lock)
901 //{
902 //      struct rq *rq = this_rq();
903 //
904 //      finish_task_switch(rq, prev);
905 //
906 //      /*
907 //       * FIXME: do we need to worry about rq being invalidated by the
908 //       * task_switch?
909 //       */
910 //      // TODO: SMP
911 //      post_schedule(rq);
912 //
913 //      // TODO: replace this irq enable, maybe inside post_schedule
914 //      arch_local_irq_enable();
915 //
916 //#ifdef __ARCH_WANT_UNLOCKED_CTXSW
917 //      /* In this case, finish_task_switch does not reenable preemption */
918 //      preempt_enable();
919 //#endif
920 //      if (current->set_child_tid)
921 //              put_user(task_pid_vnr(current), current->set_child_tid);
922 //}
923 //
924 //
925 ///**
926 // * kernel/sched/core.c:769
927 // */
928 //static void update_rq_clock_task(struct rq *rq, s64 delta)
929 //{
930 ///*
931 // * In theory, the compile should just see 0 here, and optimize out the call
932 // * to sched_rt_avg_update. But I don't trust it...
933 // */
934 //#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
935 //      s64 steal = 0, irq_delta = 0;
936 //#endif
937 //#ifdef CONFIG_IRQ_TIME_ACCOUNTING
938 //      irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
939 //
940 //      /*
941 //       * Since irq_time is only updated on {soft,}irq_exit, we might run into
942 //       * this case when a previous update_rq_clock() happened inside a
943 //       * {soft,}irq region.
944 //       *
945 //       * When this happens, we stop ->clock_task and only update the
946 //       * prev_irq_time stamp to account for the part that fit, so that a next
947 //       * update will consume the rest. This ensures ->clock_task is
948 //       * monotonic.
949 //       *
950 //       * It does however cause some slight miss-attribution of {soft,}irq
951 //       * time, a more accurate solution would be to update the irq_time using
952 //       * the current rq->clock timestamp, except that would require using
953 //       * atomic ops.
954 //       */
955 //      if (irq_delta > delta)
956 //              irq_delta = delta;
957 //
958 //      rq->prev_irq_time += irq_delta;
959 //      delta -= irq_delta;
960 //#endif
961 //#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
962 //      if (static_key_false((&paravirt_steal_rq_enabled))) {
963 //              u64 st;
964 //
965 //              steal = paravirt_steal_clock(cpu_of(rq));
966 //              steal -= rq->prev_steal_time_rq;
967 //
968 //              if (unlikely(steal > delta))
969 //                      steal = delta;
970 //
971 //              st = steal_ticks(steal);
972 //              steal = st * TICK_NSEC;
973 //
974 //              rq->prev_steal_time_rq += steal;
975 //
976 //              delta -= steal;
977 //      }
978 //#endif
979 //
980 //      rq->clock_task += delta;
981 //
982 //#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
983 //      if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
984 //              sched_rt_avg_update(rq, irq_delta + steal);
985 //#endif
986 //}
987 //
988 ////static void update_rq_clock_task(struct rq *rq, s64 delta);
989 //void update_rq_clock(struct rq *rq)
990 //{
991 //      s64 delta;
992 //
993 //      if (rq->skip_clock_update > 0)
994 //              return;
995 //
996 //      delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
997 //      rq->clock += delta;
998 //      update_rq_clock_task(rq, delta);
999 //}
1000 //
1001 ///*
1002 // * kernel/sched/core.c:2684
1003 // * This function gets called by the timer code, with HZ frequency.
1004 // * We call it with interrupts disabled.
1005 // */
1006 //void scheduler_tick(void)
1007 //{
1008 //      int cpu = smp_processor_id();
1009 //      struct rq *rq = cpu_rq(cpu);
1010 //      struct task_struct *curr = rq->curr;
1011 //
1012 //      u64 now = rq->clock_task;
1013 //      unsigned long delta_exec;
1014 //
1015 //      sched_clock_tick();
1016 //
1017 //      raw_spin_lock(&rq->lock);
1018 //      update_rq_clock(rq);
1019 //
1020 //      /*
1021 //       * Update run-time statistics of the 'current'.
1022 //       */
1023 //      if (unlikely(!curr)) {
1024 //              raw_spin_unlock(&rq->lock);
1025 //              return;
1026 //      }
1027 //
1028 //      /*
1029 //       * Get the amount of time the current task was running
1030 //       * since the last time we changed load (this cannot
1031 //       * overflow on 32 bits):
1032 //       */
1033 //      delta_exec = (unsigned long)(now - curr->se.exec_start);
1034 //
1035 //      if (delta_exec > RR_TIMESLICE) {
1036 //              resched_task(curr);
1037 //      }
1038 //
1039 //      raw_spin_unlock(&rq->lock);
1040 //
1041 //      // TODO: SMP for load balancing
1042 //}
1043 //
1044 ///*
1045 // * kernel/sched/core.c:2649
1046 // * Lock/unlock the current runqueue - to extract task statistics:
1047 // */
1048 //unsigned long long task_delta_exec(struct task_struct *p)
1049 //{
1050 //      printk("\ntask_delta_exec");
1051 //
1052 //      // TODO: SMP
1053 //
1054 //      return 0;
1055 //}
1056 //
1057 ///**
1058 // * kernel/sched/core.c:3727
1059 // * task_prio - return the priority value of a given task.
1060 // * @p: the task in question.
1061 // *
1062 // * This is the priority value as seen by users in /proc.
1063 // * RT tasks are offset by -200. Normal tasks are centered
1064 // * around 0, value goes from -16 to +15.
1065 // */
1066 //int task_prio(const struct task_struct *p)
1067 //{
1068 //      //printk("\ntask_prio");
1069 //
1070 //      return 0;
1071 //}
1072 //
1073 ///*
1074 // * kernel/sched/core.c:2667
1075 // * Return accounted runtime for the task.
1076 // * In case the task is currently running, return the runtime plus current's
1077 // * pending runtime that have not been accounted yet.
1078 // */
1079 //unsigned long long task_sched_runtime(struct task_struct *task)
1080 //{
1081 //      //printk("\ntask_sched_runtime");
1082 //
1083 //      return 0;
1084 //}
1085 //
1086 ///*
1087 // * kernel/sched/core.c:2024
1088 // * this_cpu_load - returns load of the cpu
1089 // */
1090 //unsigned long this_cpu_load(void)
1091 //{
1092 //      //printk("\nthis_cpu_load");
1093 //
1094 //      // TODO: SMP, needed in case of load balancing per CPU
1095 //
1096 //      return 0;
1097 //}
1098 //
1099 ///*
1100 // * kernel/sched/core.c:2556
1101 // * update_cpu_load_nohz - called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
1102 // */
1103 //void update_cpu_load_nohz(void)
1104 //{
1105 //      //printk("\nupdate_cpu_load_nohz");
1106 //
1107 //      return;
1108 //}
1109 //
1110 //
1111 ///*
1112 // * kernel/sched/core.c:1207
1113 // * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1114 // */
1115 //static inline
1116 //int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
1117 //{
1118 //      int cpu = task_cpu(p);
1119 ////    int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
1120 //
1121 //      /*
1122 //       * In order not to call set_task_cpu() on a blocking task we need
1123 //       * to rely on ttwu() to place the task on a valid ->cpus_allowed
1124 //       * cpu.
1125 //       *
1126 //       * Since this is common to all placement strategies, this lives here.
1127 //       *
1128 //       * [ this allows ->select_task() to simply return task_cpu(p) and
1129 //       *   not worry about this generic constraint ]
1130 //       */
1131 //      if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1132 //                   !cpu_online(cpu)))
1133 //              cpu = cpumask_first(tsk_cpus_allowed(p)); //select_fallback_rq(task_cpu(p), p);
1134 //
1135 //      return cpu;
1136 //}
1137 //
1138 ///*
1139 // * kernel/sched/core.c:736
1140 // */
1141 //void activate_task(struct rq *rq, struct task_struct *p, int flags)
1142 //{
1143 //      if (task_contributes_to_load(p))
1144 //              rq->nr_uninterruptible--;
1145 //
1146 ////    enqueue_task(rq, p, flags);
1147 //      list_add(&p->rq_tasks, &rq->rq_list);
1148 //}
1149 //
1150 ///*
1151 // * kernel/sched/core.c:744
1152 // */
1153 //void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1154 //{
1155 //      if (task_contributes_to_load(p))
1156 //              rq->nr_uninterruptible++;
1157 //
1158 ////    dequeue_task(rq, p, flags);
1159 //      list_del(&p->rq_tasks);
1160 //}
1161 //
1162 ///*
1163 // * kernel/sched/core.c:1275
1164 // */
1165 //static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1166 //{
1167 //      activate_task(rq, p, en_flags);
1168 //      p->on_rq = 1;
1169 //
1170 //      /* if a worker is waking up, notify workqueue */
1171 //      if (p->flags & PF_WQ_WORKER)
1172 //              wq_worker_waking_up(p, cpu_of(rq));
1173 //}
1174 //
1175 ///*
1176 // * kernel/sched/core.c:909
1177 // */
1178 //void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1179 //{
1180 ////    const struct sched_class *class;
1181 ////
1182 ////    if (p->sched_class == rq->curr->sched_class) {
1183 ////            rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1184 ////    } else {
1185 ////            for_each_class(class) {
1186 ////                    if (class == rq->curr->sched_class)
1187 ////                            break;
1188 ////                    if (class == p->sched_class) {
1189 ////                            resched_task(rq->curr);
1190 ////                            break;
1191 ////                    }
1192 ////            }
1193 ////    }
1194 //      if (rq->curr == rq->idle)
1195 //                      resched_task(rq->curr);
1196 //
1197 //      /*
1198 //       * A queue event has occurred, and we're going to schedule.  In
1199 //       * this case, we can save a useless back to back clock update.
1200 //       */
1201 //      if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1202 //              rq->skip_clock_update = 1;
1203 //}
1204 //
1205 ///*
1206 // * kernel/sched/core:1289
1207 // * Mark the task runnable and perform wakeup-preemption.
1208 // */
1209 //static void
1210 //ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1211 //{
1212 ////    trace_sched_wakeup(p, true);
1213 //      check_preempt_curr(rq, p, wake_flags);
1214 //
1215 //      p->state = TASK_RUNNING;
1216 ////#ifdef CONFIG_SMP
1217 ////    if (p->sched_class->task_woken)
1218 ////            p->sched_class->task_woken(rq, p);
1219 ////
1220 ////    if (rq->idle_stamp) {
1221 ////            u64 delta = rq->clock - rq->idle_stamp;
1222 ////            u64 max = 2*sysctl_sched_migration_cost;
1223 ////
1224 ////            if (delta > max)
1225 ////                    rq->avg_idle = max;
1226 ////            else
1227 ////                    update_avg(&rq->avg_idle, delta);
1228 ////            rq->idle_stamp = 0;
1229 ////    }
1230 ////#endif
1231 //}
1232 //
1233 ///*
1234 // * kernel/sched/core.c:1313
1235 // */
1236 //static void
1237 //ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1238 //{
1239 //#ifdef CONFIG_SMP
1240 //      if (p->sched_contributes_to_load)
1241 //              rq->nr_uninterruptible--;
1242 //#endif
1243 //
1244 //      ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1245 //      ttwu_do_wakeup(rq, p, wake_flags);
1246 //}
1247 //
1248 //#ifdef CONFIG_SMP
1249 ///*
1250 // * kernel/sched/core.c:1394
1251 // */
1252 //static void ttwu_queue_remote(struct task_struct *p, int cpu)
1253 //{
1254 //      if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
1255 //              smp_send_reschedule(cpu);
1256 //}
1257 //#endif
1258 //
1259 ///*
1260 // * kernel/sched/core.c:1406
1261 // */
1262 //static void ttwu_queue(struct task_struct *p, int cpu)
1263 //{
1264 //      struct rq *rq = cpu_rq(cpu);
1265 //
1266 //#if defined(CONFIG_SMP)
1267 //      if (/*sched_feat(TTWU_QUEUE) && */!cpus_share_cache(smp_processor_id(), cpu)) {
1268 //              sched_clock_cpu(cpu); /* sync clocks x-cpu */
1269 //              ttwu_queue_remote(p, cpu);
1270 //              return;
1271 //      }
1272 //#endif
1273 //
1274 //      raw_spin_lock(&rq->lock);
1275 //      ttwu_do_activate(rq, p, 0);
1276 //      raw_spin_unlock(&rq->lock);
1277 //}
1278 //
1279 ///*
1280 // * kernel/sched/core.c:1703
1281 // * wake_up_new_task - wake up a newly created task for the first time.
1282 // *
1283 // * This function will do some initial scheduler statistics housekeeping
1284 // * that must be done for every newly created context, then puts the task
1285 // * on the runqueue and wakes it.
1286 // */
1287 //void wake_up_new_task(struct task_struct *p)
1288 //{
1289 //      unsigned long flags;
1290 //      struct rq *rq;
1291 ////    int cpu = 255;
1292 //
1293 //      raw_spin_lock_irqsave(&p->pi_lock, flags);
1294 //
1295 //#ifdef CONFIG_SMP
1296 //      /*
1297 //       * Fork balancing, do it here and not earlier because:
1298 //       *  - cpus_allowed can change in the fork path
1299 //       *  - any previously selected cpu might disappear through hotplug
1300 //       */
1301 //      set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
1302 ////    printk("new thread\n");
1303 ////    for_each_cpu(cpu, &(p->cpus_allowed)){
1304 ////            printk("Asked for CPU #%d\n", cpu);
1305 ////    }
1306 //
1307 //#endif
1308 //
1309 //      rq = __task_rq_lock(p);
1310 //      activate_task(rq, p, 0);
1311 //      p->on_rq = 1;
1312 ////    trace_sched_wakeup_new(p, true);
1313 //      check_preempt_curr(rq, p, WF_FORK);
1314 ////#ifdef CONFIG_SMP
1315 ////    if (p->sched_class->task_woken)
1316 ////            p->sched_class->task_woken(rq, p);
1317 ////#endif
1318 //      task_rq_unlock(rq, p, &flags);
1319 //}
1320 //
1321 ///*
1322 // * kernel/sched/core:1330
1323 // * Called in case the task @p isn't fully descheduled from its runqueue,
1324 // * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1325 // * since all we need to do is flip p->state to TASK_RUNNING, since
1326 // * the task is still ->on_rq.
1327 // */
1328 //static int ttwu_remote(struct task_struct *p, int wake_flags)
1329 //{
1330 //      struct rq *rq;
1331 //      int ret = 0;
1332 //
1333 //      rq = __task_rq_lock(p);
1334 //      if (p->on_rq) {
1335 //              ttwu_do_wakeup(rq, p, wake_flags);
1336 //              ret = 1;
1337 //      }
1338 //      __task_rq_unlock(rq);
1339 //
1340 //      return ret;
1341 //}
1342 //
1343 ///**
1344 // * kernel/sched/core.c:1439
1345 // * try_to_wake_up - wake up a thread
1346 // * @p: the thread to be awakened
1347 // * @state: the mask of task states that can be woken
1348 // * @wake_flags: wake modifier flags (WF_*)
1349 // *
1350 // * Put it on the run-queue if it's not already there. The "current"
1351 // * thread is always on the run-queue (except when the actual
1352 // * re-schedule is in progress), and as such you're allowed to do
1353 // * the simpler "current->state = TASK_RUNNING" to mark yourself
1354 // * runnable without the overhead of this.
1355 // *
1356 // * Returns %true if @p was woken up, %false if it was already running
1357 // * or @state didn't match @p's state.
1358 // */
1359 //static int
1360 //try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1361 //{
1362 //      unsigned long flags;
1363 //      int cpu, success = 0;
1364 //
1365 //      smp_wmb();
1366 //      raw_spin_lock_irqsave(&p->pi_lock, flags);
1367 //      if (!(p->state & state))
1368 //              goto out;
1369 //
1370 //      success = 1; /* we're going to change ->state */
1371 //      cpu = task_cpu(p);
1372 //
1373 //      if (p->on_rq && ttwu_remote(p, wake_flags))
1374 //              goto stat;
1375 //
1376 //#ifdef CONFIG_SMP
1377 //      /*
1378 //       * If the owning (remote) cpu is still in the middle of schedule() with
1379 //       * this task as prev, wait until its done referencing the task.
1380 //       */
1381 //      while (p->on_cpu)
1382 //              cpu_relax();
1383 //      /*
1384 //       * Pairs with the smp_wmb() in finish_lock_switch().
1385 //       */
1386 //      smp_rmb();
1387 //
1388 ////    p->sched_contributes_to_load = !!task_contributes_to_load(p);
1389 //      p->state = TASK_WAKING;
1390 //
1391 ////    if (p->sched_class->task_waking)
1392 ////            p->sched_class->task_waking(p);
1393 //
1394 //      // TODO: simply not using select_task_rq :)
1395 //      cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
1396 //      if (task_cpu(p) != cpu) {
1397 //              wake_flags |= WF_MIGRATED;
1398 //              set_task_cpu(p, cpu);
1399 //      }
1400 //#endif /* CONFIG_SMP */
1401 //
1402 //      ttwu_queue(p, cpu);
1403 //stat:
1404 ////    raw_spin_unlock(&rq->lock);
1405 //out:
1406 //      raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1407 //
1408 //      return success;
1409 //}
1410 //
1411 ///**
1412 // * kernel/sched/core.c:1497
1413 // * try_to_wake_up_local - try to wake up a local task with rq lock held
1414 // * @p: the thread to be awakened
1415 // *
1416 // * Put @p on the run-queue if it's not already there. The caller must
1417 // * ensure that this_rq() is locked, @p is bound to this_rq() and not
1418 // * the current task.
1419 // */
1420 //static void try_to_wake_up_local(struct task_struct *p)
1421 //{
1422 //      struct rq *rq = task_rq(p);
1423 //
1424 //      if (WARN_ON_ONCE(rq != this_rq()) ||
1425 //          WARN_ON_ONCE(p == current))
1426 //              return;
1427 //
1428 //      lockdep_assert_held(&rq->lock);
1429 //
1430 //      if (!raw_spin_trylock(&p->pi_lock)) {
1431 //              raw_spin_unlock(&rq->lock);
1432 //              raw_spin_lock(&p->pi_lock);
1433 //              raw_spin_lock(&rq->lock);
1434 //      }
1435 //
1436 //      if (!(p->state & TASK_NORMAL))
1437 //              goto out;
1438 //
1439 //      if (!p->on_rq)
1440 //              ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1441 //
1442 //      ttwu_do_wakeup(rq, p, 0);
1443 ////    ttwu_stat(p, smp_processor_id(), 0);
1444 //out:
1445 //      raw_spin_unlock(&p->pi_lock);
1446 //}
1447 //
1448 ///*
1449 // * kernel/sched/core.c:1931
1450 // * context_switch - switch to the new MM and the new
1451 // * thread's register state.
1452 // */
1453 //static inline void
1454 //context_switch(struct rq *rq, struct task_struct *prev,
1455 //             struct task_struct *next)
1456 //{
1457 //      struct mm_struct *mm, *oldmm;
1458 //
1459 //      prepare_task_switch(rq, prev, next);
1460 //
1461 //      mm = next->mm;
1462 //      oldmm = prev->active_mm;
1463 //      /*
1464 //       * For paravirt, this is coupled with an exit in switch_to to
1465 //       * combine the page table reload and the switch backend into
1466 //       * one hypercall.
1467 //       */
1468 ////    arch_start_context_switch(prev);
1469 //
1470 //      if (!mm) {
1471 //              next->active_mm = oldmm;
1472 //              atomic_inc(&oldmm->mm_count);
1473 //              enter_lazy_tlb(oldmm, next);
1474 //      }
1475 //      else
1476 //              switch_mm(oldmm, mm, next);
1477 //
1478 //      if (!prev->mm) {
1479 //              prev->active_mm = NULL;
1480 //              rq->prev_mm = oldmm;
1481 //      }
1482 //      /*
1483 //       * Since the runqueue lock will be released by the next
1484 //       * task (which is an invalid locking op but in the case
1485 //       * of the scheduler it's an obvious special-case), so we
1486 //       * do an early lockdep release here:
1487 //       */
1488 //#ifndef __ARCH_WANT_UNLOCKED_CTXSW
1489 //      spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1490 //#endif
1491 //
1492 //      context_tracking_task_switch(prev, next);
1493 //      /* Here we just switch the register state and the stack. */
1494 //      switch_to(prev, next, prev);
1495 //
1496 //      barrier();
1497 //      /*
1498 //       * this_rq must be evaluated again because prev may have moved
1499 //       * CPUs since it called schedule(), thus the 'rq' on its stack
1500 //       * frame will be invalid.
1501 //       */
1502 //      finish_task_switch(this_rq(), prev);
1503 //
1504 //}
1505 //
1506 ///*
1507 // * kernel/sched/core.c:2875
1508 // * __schedule() is the main scheduler function.
1509 // *
1510 // * The main means of driving the scheduler and thus entering this function are:
1511 // *
1512 // *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
1513 // *
1514 // *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
1515 // *      paths. For example, see arch/x86/entry_64.S.
1516 // *
1517 // *      To drive preemption between tasks, the scheduler sets the flag in timer
1518 // *      interrupt handler scheduler_tick().
1519 // *
1520 // *   3. Wakeups don't really cause entry into schedule(). They add a
1521 // *      task to the run-queue and that's it.
1522 // *
1523 // *      Now, if the new task added to the run-queue preempts the current
1524 // *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
1525 // *      called on the nearest possible occasion:
1526 // *
1527 // *       - If the kernel is preemptible (CONFIG_PREEMPT=y):
1528 // *
1529 // *         - in syscall or exception context, at the next outmost
1530 // *           preempt_enable(). (this might be as soon as the wake_up()'s
1531 // *           spin_unlock()!)
1532 // *
1533 // *         - in IRQ context, return from interrupt-handler to
1534 // *           preemptible context
1535 // *
1536 // *       - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
1537 // *         then at the next:
1538 // *
1539 // *          - cond_resched() call
1540 // *          - explicit schedule() call
1541 // *          - return from syscall or exception to user-space
1542 // *          - return from interrupt-handler to user-space
1543 // */
1544 //static void __sched __schedule(void)
1545 //{
1546 //      struct task_struct *prev, *next;
1547 ////    unsigned long *switch_count = 0;
1548 //      struct rq *rq;
1549 //      int cpu;
1550 //
1551 //need_resched:
1552 //      preempt_disable();
1553 //      cpu = smp_processor_id();
1554 //      rq = cpu_rq(cpu);
1555 //      rcu_note_context_switch(cpu);
1556 //      prev = rq->curr;
1557 //
1558 ////    schedule_debug(prev);
1559 //
1560 ////    if (sched_feat(HRTICK))
1561 ////            hrtick_clear(rq);
1562 //
1563 //      raw_spin_lock_irq(&rq->lock);
1564 //
1565 ////    switch_count = &prev->nivcsw;
1566 //      if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
1567 //              if (unlikely(signal_pending_state(prev->state, prev))) {
1568 //                      prev->state = TASK_RUNNING;
1569 //              } else {
1570 ////                    deactivate_task(rq, prev, DEQUEUE_SLEEP);
1571 //                      prev->on_rq = 0;
1572 //
1573 //                      /*
1574 //                       * If a worker went to sleep, notify and ask workqueue
1575 //                       * whether it wants to wake up a task to maintain
1576 //                       * concurrency.
1577 //                       */
1578 //                      if (prev->flags & PF_WQ_WORKER) {
1579 //                              struct task_struct *to_wakeup;
1580 //
1581 //                              to_wakeup = wq_worker_sleeping(prev, cpu);
1582 //                              if (to_wakeup)
1583 //                                      try_to_wake_up_local(to_wakeup);
1584 //                      }
1585 //              }
1586 ////            switch_count = &prev->nvcsw;
1587 //      }
1588 //
1589 //      pre_schedule(rq, prev);
1590 //
1591 ////    if (unlikely(!rq->nr_running))
1592 ////            idle_balance(cpu, rq);
1593 //
1594 ////    put_prev_task(rq, prev);
1595 //      if ((prev != rq->idle) && prev->on_rq) {
1596 //              list_add_tail(&prev->rq_tasks, &rq->rq_list);
1597 //      }
1598 //
1599 //      /*      In case the only runnable task gets deactivated, we need to schedule
1600 //       *      the idle tasks.
1601 //       */
1602 ////    next = pick_next_task(rq);
1603 //      if (!list_empty(&rq->rq_list)) {
1604 //              assert_raw_spin_locked(&rq->lock);
1605 //              next = list_first_entry(&rq->rq_list, struct task_struct, rq_tasks);
1606 //              list_del(&next->rq_tasks);
1607 //      }
1608 //      else {
1609 //              next = rq->idle;
1610 //      }
1611 //      next->se.exec_start = rq->clock_task;
1612 //
1613 //
1614 //      clear_tsk_need_resched(prev);
1615 //      rq->skip_clock_update = 0;
1616 //
1617 //      if (likely(prev != next)) {
1618 //              rq->nr_switches++;
1619 //              rq->curr = next;
1620 ////            ++*switch_count;
1621 //
1622 //              context_switch(rq, prev, next); /* unlocks the rq */
1623 //
1624 //              // TODO: remove irq enable
1625 //              arch_local_irq_enable();
1626 //
1627 //              /*
1628 //               * The context switch have flipped the stack from under us
1629 //               * and restored the local variables which were saved when
1630 //               * this task called schedule() in the past. prev == current
1631 //               * is still correct, but it can be moved to another cpu/rq.
1632 //               */
1633 //              cpu = smp_processor_id();
1634 //              rq = cpu_rq(cpu);
1635 //      }
1636 //      else
1637 //              raw_spin_unlock_irq(&rq->lock);
1638 //
1639 //      post_schedule(rq);
1640 //
1641 //      sched_preempt_enable_no_resched();
1642 //      if (need_resched())
1643 //              goto need_resched;
1644 //}
1645 //
1646 ///*
1647 // * kernel/sched/core.c:2966
1648 // */
1649 //asmlinkage void __sched schedule(void)
1650 //{
1651 ////    struct task_struct *tsk = current;
1652 ////
1653 ////    if (!tsk->state || tsk_is_pi_blocked(tsk))
1654 ////            return;
1655 //
1656 //      __schedule();
1657 //}
1658 //EXPORT_SYMBOL(schedule);
1659 //
1660 ///*
1661 // * kernel/sched/core.c:3125
1662 // * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
1663 // * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1664 // * number) then we wake all the non-exclusive tasks and one exclusive task.
1665 // *
1666 // * There are circumstances in which we can try to wake a task which has already
1667 // * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1668 // * zero in this (rare) case, and we handle it by continuing to scan the queue.
1669 // */
1670 //static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
1671 //                      int nr_exclusive, int wake_flags, void *key)
1672 //{
1673 //      wait_queue_t *curr, *next;
1674 //
1675 //      list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
1676 //              unsigned flags = curr->flags;
1677 //
1678 //              if (curr->func(curr, mode, wake_flags, key) &&
1679 //                              (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1680 //                      break;
1681 //      }
1682 //}
1683 //
1684 ///**
1685 // * kernel/sched/core.c:3149
1686 // * __wake_up - wake up threads blocked on a waitqueue.
1687 // * @q: the waitqueue
1688 // * @mode: which threads
1689 // * @nr_exclusive: how many wake-one or wake-many threads to wake up
1690 // * @key: is directly passed to the wakeup function
1691 // *
1692 // * It may be assumed that this function implies a write memory barrier before
1693 // * changing the task state if and only if any tasks are woken up.
1694 // */
1695 //void __wake_up(wait_queue_head_t *q, unsigned int mode,
1696 //                      int nr_exclusive, void *key)
1697 //{
1698 //      unsigned long flags;
1699 //
1700 //      spin_lock_irqsave(&q->lock, flags);
1701 //      __wake_up_common(q, mode, nr_exclusive, 0, key);
1702 //      spin_unlock_irqrestore(&q->lock, flags);
1703 //}
1704 //EXPORT_SYMBOL(__wake_up);
1705 //
1706 ///**
1707 // * kernel/sched/core.c:1536
1708 // * wake_up_process - Wake up a specific process
1709 // * @p: The process to be woken up.
1710 // *
1711 // * Attempt to wake up the nominated process and move it to the set of runnable
1712 // * processes.  Returns 1 if the process was woken up, 0 if it was already
1713 // * running.
1714 // *
1715 // * It may be assumed that this function implies a write memory barrier before
1716 // * changing the task state if and only if any tasks are woken up.
1717 // */
1718 //int wake_up_process(struct task_struct *p)
1719 //{
1720 //      WARN_ON(task_is_stopped_or_traced(p));
1721 //      return try_to_wake_up(p, TASK_NORMAL, 0);
1722 //}
1723 //EXPORT_SYMBOL(wake_up_process);
1724 //
1725 //static inline long __sched
1726 //do_wait_for_common(struct completion *x,
1727 //                 long (*action)(long), long timeout, int state)
1728 //{
1729 //      if (!x->done) {
1730 //              DECLARE_WAITQUEUE(wait, current);
1731 //
1732 //              __add_wait_queue_tail_exclusive(&x->wait, &wait);
1733 //              do {
1734 //                      if (signal_pending_state(state, current)) {
1735 //                              timeout = -ERESTARTSYS;
1736 //                              break;
1737 //                      }
1738 //                      __set_current_state(state);
1739 //                      spin_unlock_irq(&x->wait.lock);
1740 //                      timeout = action(timeout);
1741 //                      spin_lock_irq(&x->wait.lock);
1742 //              } while (!x->done && timeout);
1743 //              __remove_wait_queue(&x->wait, &wait);
1744 //              if (!x->done)
1745 //                      return timeout;
1746 //      }
1747 //      x->done--;
1748 //      return timeout ?: 1;
1749 //}
1750 //
1751 //static inline long __sched
1752 //__wait_for_common(struct completion *x,
1753 //                long (*action)(long), long timeout, int state)
1754 //{
1755 //      might_sleep();
1756 //
1757 //      spin_lock_irq(&x->wait.lock);
1758 //      timeout = do_wait_for_common(x, action, timeout, state);
1759 //      spin_unlock_irq(&x->wait.lock);
1760 //      return timeout;
1761 //}
1762 //
1763 //static long __sched
1764 //wait_for_common(struct completion *x, long timeout, int state)
1765 //{
1766 //      return __wait_for_common(x, schedule_timeout, timeout, state);
1767 //}
1768 //
1769 ///**
1770 // * kernel/sched/core.c:3322
1771 // * wait_for_completion: - waits for completion of a task
1772 // * @x:  holds the state of this particular completion
1773 // *
1774 // * This waits to be signaled for completion of a specific task. It is NOT
1775 // * interruptible and there is no timeout.
1776 // *
1777 // * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
1778 // * and interrupt capability. Also see complete().
1779 // */
1780 //void __sched wait_for_completion(struct completion *x)
1781 //{
1782 //      wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1783 //}
1784 //EXPORT_SYMBOL(wait_for_completion);
1785 //
1786 ///**
1787 // * kernel/sched/core.c:3231
1788 // * complete: - signals a single thread waiting on this completion
1789 // * @x:  holds the state of this particular completion
1790 // *
1791 // * This will wake up a single thread waiting on this completion. Threads will be
1792 // * awakened in the same order in which they were queued.
1793 // *
1794 // * See also complete_all(), wait_for_completion() and related routines.
1795 // *
1796 // * It may be assumed that this function implies a write memory barrier before
1797 // * changing the task state if and only if any tasks are woken up.
1798 // */
1799 //void complete(struct completion *x)
1800 //{
1801 //      unsigned long flags;
1802 //
1803 //      spin_lock_irqsave(&x->wait.lock, flags);
1804 //      x->done++;
1805 //      __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1806 //      spin_unlock_irqrestore(&x->wait.lock, flags);
1807 //}
1808 //EXPORT_SYMBOL(complete);
1809 //
1810 ///**
1811 // * kernel/sched/core.c:2995
1812 // * schedule_preempt_disabled - called with preemption disabled
1813 // *
1814 // * Returns with preemption disabled. Note: preempt_count must be 1
1815 // */
1816 //void __sched schedule_preempt_disabled(void)
1817 //{
1818 //      sched_preempt_enable_no_resched();
1819 //      schedule();
1820 //      preempt_disable();
1821 //}
1822 //
1823 ///*
1824 // * kernel/sched/core.c:6858
1825 // */
1826 //int in_sched_functions(unsigned long addr)
1827 //{
1828 //      printk("\nin_sched_functions");
1829 //
1830 //      return 0;
1831 //}
1832 //
1833 ///*
1834 // * kernel/sched/core.c:4333
1835 // * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1836 // * call schedule, and on return reacquire the lock.
1837 // *
1838 // * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1839 // * operations here to prevent schedule() from being called twice (once via
1840 // * spin_unlock(), once by hand).
1841 // */
1842 //int __cond_resched_lock(spinlock_t *lock)
1843 //{
1844 //      printk("\n__cond_resched_lock");
1845 //
1846 //      return 0;
1847 //}
1848 //
1849 ///*
1850 // * kernel/sched/core.c:4315
1851 // */
1852 //static inline int should_resched(void)
1853 //{
1854 //      return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
1855 //}
1856 //
1857 //static void __cond_resched(void)
1858 //{
1859 //      add_preempt_count(PREEMPT_ACTIVE);
1860 //      __schedule();
1861 //      sub_preempt_count(PREEMPT_ACTIVE);
1862 //}
1863 //
1864 //int __sched _cond_resched(void)
1865 //{
1866 //      if (should_resched()) {
1867 //              __cond_resched();
1868 //              return 1;
1869 //      }
1870 //      return 0;
1871 //}
1872 //EXPORT_SYMBOL(_cond_resched);
1873 //
1874 ///*
1875 // * kernel/sched/core.c:4333
1876 // */
1877 //int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, void *key)
1878 //{
1879 //      return try_to_wake_up(curr->private, mode, wake_flags);
1880 //}
1881 //EXPORT_SYMBOL(default_wake_function);
1882 //
1883 ///**
1884 // * kernel/sched/core.c:3426
1885 // * wait_for_completion_killable: - waits for completion of a task (killable)
1886 // * @x:  holds the state of this particular completion
1887 // *
1888 // * This waits to be signaled for completion of a specific task. It can be
1889 // * interrupted by a kill signal.
1890 // *
1891 // * The return value is -ERESTARTSYS if interrupted, 0 if completed.
1892 // */
1893 //int __sched wait_for_completion_killable(struct completion *x)
1894 //{
1895 //      long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
1896 //      if (t == -ERESTARTSYS)
1897 //              return t;
1898 //      return 0;
1899 //}
1900 //EXPORT_SYMBOL(wait_for_completion_killable);
1901 //
1902 ///**
1903 // * kernel/sched/core.c:3192
1904 // * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1905 // * @q: the waitqueue
1906 // * @mode: which threads
1907 // * @nr_exclusive: how many wake-one or wake-many threads to wake up
1908 // * @key: opaque value to be passed to wakeup targets
1909 // *
1910 // * The sync wakeup differs that the waker knows that it will schedule
1911 // * away soon, so while the target thread will be woken up, it will not
1912 // * be migrated to another CPU - ie. the two threads are 'synchronized'
1913 // * with each other. This can prevent needless bouncing between CPUs.
1914 // *
1915 // * On UP it can prevent extra preemption.
1916 // *
1917 // * It may be assumed that this function implies a write memory barrier before
1918 // * changing the task state if and only if any tasks are woken up.
1919 // */
1920 //void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
1921 //                      int nr_exclusive, void *key)
1922 //{
1923 //      unsigned long flags;
1924 //      int wake_flags = WF_SYNC;
1925 //
1926 //      if (unlikely(!q))
1927 //              return;
1928 //
1929 //      if (unlikely(!nr_exclusive))
1930 //              wake_flags = 0;
1931 //
1932 //      spin_lock_irqsave(&q->lock, flags);
1933 //      __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
1934 //      spin_unlock_irqrestore(&q->lock, flags);
1935 //}
1936 //EXPORT_SYMBOL_GPL(__wake_up_sync_key);
1937 //
1938 ///*
1939 // * kernel/sched/core.c:1543
1940 // */
1941 //int wake_up_state(struct task_struct *p, unsigned int state)
1942 //{
1943 //      WARN_ON(task_is_stopped_or_traced(p));
1944 //      return try_to_wake_up(p, state, 0);
1945 //}
1946 //EXPORT_SYMBOL(wake_up_process);
1947 //
1948 ///**
1949 // * kernel/sched/core.c:4389
1950 // * yield - yield the current processor to other threads.
1951 // *
1952 // * Do not ever use this function, there's a 99% chance you're doing it wrong.
1953 // *
1954 // * The scheduler is at all times free to pick the calling task as the most
1955 // * eligible task to run, if removing the yield() call from your code breaks
1956 // * it, its already broken.
1957 // *
1958 // * Typical broken usage is:
1959 // *
1960 // * while (!event)
1961 // *    yield();
1962 // *
1963 // * where one assumes that yield() will let 'the other' process run that will
1964 // * make event true. If the current task is a SCHED_FIFO task that will never
1965 // * happen. Never use yield() as a progress guarantee!!
1966 // *
1967 // * If you want to use yield() to wait for something, use wait_event().
1968 // * If you want to use yield() to be 'nice' for others, use cond_resched().
1969 // * If you still want to use yield(), do not!
1970 // */
1971 //void __sched yield(void)
1972 //{
1973 //      printk("\nyield");
1974 //
1975 //      // TODO: SMP
1976 //
1977 //      return;
1978 //}
1979 //
1980 ///**
1981 // * kernel/sched/core.c:892
1982 // * task_curr - is this task currently executing on a CPU?
1983 // * @p: the task in question.
1984 // */
1985 //inline int task_curr(const struct task_struct *p)
1986 //{
1987 //      printk("\ntask_curr");
1988 //
1989 //      // TODO: SMP
1990 //
1991 //      return 0;
1992 //}
1993 //
1994 ///**
1995 // * kernel/sched/core.c:3736
1996 // * task_nice - return the nice value of a given task.
1997 // * @p: the task in question.
1998 // */
1999 //int task_nice(const struct task_struct *p)
2000 //{
2001 //      printk("\ntask_nice");
2002 //
2003 //      return 0;
2004 //}
2005 //
2006 ///*
2007 // * kernel/sched/core.c:3616
2008 // */
2009 //void set_user_nice(struct task_struct *p, long nice)
2010 //{
2011 ////    printk("\nset_user_nice");
2012 //
2013 //      return;
2014 //}
2015 //
2016 ///*
2017 // * kernel/sched/core.c:3169
2018 // */
2019 //void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
2020 //{
2021 //      printk("\n__wake_up_locked_key");
2022 //
2023 //      return;
2024 //}
2025 //
2026 ///*
2027 // * kernel/sched/core.c:4474
2028 // * This task is about to go to sleep on IO. Increment rq->nr_iowait so
2029 // * that process accounting knows that this is a task in IO wait state.
2030 // */
2031 //void __sched io_schedule(void)
2032 //{
2033 ////    printk("\nio_schedule");
2034 //
2035 //      struct rq *rq = raw_rq();
2036 //
2037 //      delayacct_blkio_start();
2038 //      atomic_inc(&rq->nr_iowait);
2039 //      blk_flush_plug(current);
2040 //      current->in_iowait = 1;
2041 //      schedule();
2042 //      current->in_iowait = 0;
2043 //      atomic_dec(&rq->nr_iowait);
2044 //      delayacct_blkio_end();
2045 //}
2046 //EXPORT_SYMBOL(io_schedule);
2047 //
2048 ///*
2049 // * kernel/sched/core.c:4489
2050 // */
2051 //long __sched io_schedule_timeout(long timeout)
2052 //{
2053 ////    printk("\nio_schedule_timeout");
2054 //      struct rq *rq = raw_rq();
2055 //      long ret;
2056 //
2057 //      delayacct_blkio_start();
2058 //      atomic_inc(&rq->nr_iowait);
2059 //      blk_flush_plug(current);
2060 //      current->in_iowait = 1;
2061 //      ret = schedule_timeout(timeout);
2062 //      current->in_iowait = 0;
2063 //      atomic_dec(&rq->nr_iowait);
2064 //      delayacct_blkio_end();
2065 //      return ret;
2066 //}
2067 //
2068 //
2069 ///*
2070 // * kernel/sched/core.c:7590
2071 // */
2072 //int sched_rt_handler(struct ctl_table *table, int write,
2073 //              void __user *buffer, size_t *lenp,
2074 //              loff_t *ppos)
2075 //{
2076 //      //printk("\nsched_rt_handler");
2077 //
2078 //      return 0;
2079 //}
2080 //
2081 ///*
2082 // * kernel/sched/core.c:3213
2083 // * __wake_up_sync - see __wake_up_sync_key()
2084 // */
2085 //void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
2086 //{
2087 //      printk("\n__wake_up_sync");
2088 //
2089 //      return;
2090 //}
2091 //
2092 ///*
2093 // * kernel/sched/core.c:3163
2094 // * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
2095 // */
2096 //void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
2097 //{
2098 //      printk("\n__wake_up_locked");
2099 //
2100 //      return;
2101 //}
2102 //
2103 ///**
2104 // * kernel/sched/core.c:3307
2105 // */
2106 //static long __sched
2107 //wait_for_common_io(struct completion *x, long timeout, int state)
2108 //{
2109 //      return __wait_for_common(x, io_schedule_timeout, timeout, state);
2110 //}
2111 //
2112 ///**
2113 // * kernel/sched/core.c:3355
2114 // * wait_for_completion_io: - waits for completion of a task
2115 // * @x:  holds the state of this particular completion
2116 // *
2117 // * This waits to be signaled for completion of a specific task. It is NOT
2118 // * interruptible and there is no timeout. The caller is accounted as waiting
2119 // * for IO.
2120 // */
2121 //void __sched wait_for_completion_io(struct completion *x)
2122 //{
2123 //      wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
2124 //}
2125 //EXPORT_SYMBOL(wait_for_completion_io);
2126 //
2127 ///**
2128 // * kernel/sched/core.c:3416
2129 // * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
2130 // * @x:  holds the state of this particular completion
2131 // * @timeout:  timeout value in jiffies
2132 // *
2133 // * This waits for either a completion of a specific task to be signaled or for a
2134 // * specified timeout to expire. The timeout is in jiffies. It is not
2135 // * interruptible. The caller is accounted as waiting for IO.
2136 // *
2137 // * The return value is 0 if timed out, and positive (at least 1, or number of
2138 // * jiffies left till timeout) if completed.
2139 // */
2140 //unsigned long __sched
2141 //wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
2142 //{
2143 //      return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
2144 //}
2145 //EXPORT_SYMBOL(wait_for_completion_io_timeout);
2146 //
2147 ///*
2148 // * kernel/sched/core.c:4634
2149 // */
2150 //void show_state_filter(unsigned long state_filter)
2151 //{
2152 //      //printk("\nshow_state_filter");
2153 //
2154 //      return;
2155 //}
2156 //
2157 ///**
2158 // * kernel/sched/core.c:3251
2159 // * complete_all: - signals all threads waiting on this completion
2160 // * @x:  holds the state of this particular completion
2161 // *
2162 // * This will wake up all threads waiting on this particular completion event.
2163 // *
2164 // * It may be assumed that this function implies a write memory barrier before
2165 // * changing the task state if and only if any tasks are woken up.
2166 // */
2167 //void complete_all(struct completion *x)
2168 //{
2169 //      unsigned long flags;
2170 //
2171 //      spin_lock_irqsave(&x->wait.lock, flags);
2172 //      x->done += UINT_MAX/2;
2173 //      __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
2174 //      spin_unlock_irqrestore(&x->wait.lock, flags);
2175 //}
2176 //EXPORT_SYMBOL(complete_all);
2177 //
2178 ///**
2179 // * kernel/sched/core.c:3341
2180 // * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
2181 // * @x:  holds the state of this particular completion
2182 // * @timeout:  timeout value in jiffies
2183 // *
2184 // * This waits for either a completion of a specific task to be signaled or for a
2185 // * specified timeout to expire. The timeout is in jiffies. It is not
2186 // * interruptible.
2187 // *
2188 // * The return value is 0 if timed out, and positive (at least 1, or number of
2189 // * jiffies left till timeout) if completed.
2190 // */
2191 //unsigned long __sched
2192 //wait_for_completion_timeout(struct completion *x, unsigned long timeout)
2193 //{
2194 //      return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
2195 //}
2196 //EXPORT_SYMBOL(wait_for_completion_timeout);
2197 //
2198 //
2199 //
2200 ///*
2201 // *
2202 // * SMP
2203 // *
2204 // */
2205 //#ifdef CONFIG_SMP
2206 //
2207 //struct migration_arg {
2208 //      struct task_struct *task;
2209 //      int dest_cpu;
2210 //};
2211 //
2212 ///*
2213 // * kernel/sched/core.c:4822
2214 // * Move (not current) task off this cpu, onto dest cpu. We're doing
2215 // * this because either it can't run here any more (set_cpus_allowed()
2216 // * away from this CPU, or CPU going down), or because we're
2217 // * attempting to rebalance this task on exec (sched_exec).
2218 // *
2219 // * So we race with normal scheduler movements, but that's OK, as long
2220 // * as the task is no longer on this CPU.
2221 // *
2222 // * Returns non-zero if task was successfully migrated.
2223 // */
2224 //static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
2225 //{
2226 //      struct rq *rq_dest, *rq_src;
2227 //      int ret = 0;
2228 //
2229 //      if (unlikely(!cpu_active(dest_cpu)))
2230 //              return ret;
2231 //
2232 //      rq_src = cpu_rq(src_cpu);
2233 //      rq_dest = cpu_rq(dest_cpu);
2234 //
2235 //      raw_spin_lock(&p->pi_lock);
2236 //      double_rq_lock(rq_src, rq_dest);
2237 //      /* Already moved. */
2238 //      if (task_cpu(p) != src_cpu)
2239 //              goto done;
2240 //      /* Affinity changed (again). */
2241 //      if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
2242 //              goto fail;
2243 //
2244 //      /*
2245 //       * If we're not on a rq, the next wake-up will ensure we're
2246 //       * placed properly.
2247 //       */
2248 //      if (p->on_rq) {
2249 ////            dequeue_task(rq_src, p, 0);
2250 //              list_del(&p->rq_tasks);
2251 //              // TODO: maybe not necessary hence double lock
2252 //              p->on_rq = 0;
2253 //              set_task_cpu(p, dest_cpu);
2254 ////            enqueue_task(rq_dest, p, 0);
2255 //              list_add(&p->rq_tasks, &rq_dest->rq_list);
2256 //              // TODO: maybe not necessary hence double lock
2257 //              p->on_rq = 1;
2258 ////            check_preempt_curr(rq_dest, p, 0);
2259 //              if (rq_dest->curr == rq_dest->idle)
2260 //                      resched_task(rq_dest->curr);
2261 //      }
2262 //done:
2263 //      ret = 1;
2264 //fail:
2265 //      double_rq_unlock(rq_src, rq_dest);
2266 //      raw_spin_unlock(&p->pi_lock);
2267 //      return ret;
2268 //}
2269 //
2270 ///*
2271 // * kernel/sched/core:4865
2272 // * migration_cpu_stop - this will be executed by a highprio stopper thread
2273 // * and performs thread migration by bumping thread off CPU then
2274 // * 'pushing' onto another runqueue.
2275 // */
2276 //static int migration_cpu_stop(void *data)
2277 //{
2278 //      struct migration_arg *arg = data;
2279 //
2280 //      /*
2281 //       * The original target cpu might have gone down and we might
2282 //       * be on another cpu but it doesn't matter.
2283 //       */
2284 //      local_irq_disable();
2285 //      __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
2286 //      local_irq_enable();
2287 //      return 0;
2288 //}
2289 //
2290 //
2291 //static void set_rq_online(struct rq *rq)
2292 //{
2293 //      if (!rq->online)
2294 //              rq->online = 1;
2295 //}
2296 //
2297 //static void set_rq_offline(struct rq *rq)
2298 //{
2299 //      if (rq->online)
2300 //              rq->online = 0;
2301 //}
2302 //
2303 ///*
2304 // * migration_call - callback that gets triggered when a CPU is added.
2305 // * Here we can start up the necessary migration thread for the new CPU.
2306 // */
2307 //static int __cpuinit
2308 //migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
2309 //{
2310 //      int cpu = (long)hcpu;
2311 //      unsigned long flags;
2312 //      struct rq *rq = cpu_rq(cpu);
2313 //
2314 //      switch (action & ~CPU_TASKS_FROZEN) {
2315 //
2316 //      case CPU_UP_PREPARE:
2317 ////            rq->calc_load_update = calc_load_update;
2318 //              break;
2319 //
2320 //      case CPU_ONLINE:
2321 //              /* Update our root-domain */
2322 //              raw_spin_lock_irqsave(&rq->lock, flags);
2323 ////            if (rq->rd) {
2324 ////                    BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
2325 ////
2326 //                      set_rq_online(rq);
2327 ////            }
2328 //              raw_spin_unlock_irqrestore(&rq->lock, flags);
2329 //              break;
2330 //
2331 //#ifdef CONFIG_HOTPLUG_CPU
2332 //      case CPU_DYING:
2333 //              sched_ttwu_pending();
2334 //              /* Update our root-domain */
2335 //              raw_spin_lock_irqsave(&rq->lock, flags);
2336 ////            if (rq->rd) {
2337 ////                    BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
2338 //                      set_rq_offline(rq);
2339 ////            }
2340 //              migrate_tasks(cpu);
2341 //              BUG_ON(rq->nr_running != 1); /* the migration thread */
2342 //              raw_spin_unlock_irqrestore(&rq->lock, flags);
2343 //              break;
2344 //
2345 //      case CPU_DEAD:
2346 ////            calc_load_migrate(rq);
2347 //              break;
2348 //#endif
2349 //      }
2350 //
2351 ////    update_max_interval();
2352 //
2353 //      return NOTIFY_OK;
2354 //}
2355 //
2356 ///*
2357 // * Register at high priority so that task migration (migrate_all_tasks)
2358 // * happens before everything else.  This has to be lower priority than
2359 // * the notifier in the perf_event subsystem, though.
2360 // */
2361 //static struct notifier_block __cpuinitdata migration_notifier = {
2362 //      .notifier_call = migration_call,
2363 //      .priority = CPU_PRI_MIGRATION,
2364 //};
2365 //
2366 //static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
2367 //                                    unsigned long action, void *hcpu)
2368 //{
2369 //      switch (action & ~CPU_TASKS_FROZEN) {
2370 //      case CPU_STARTING:
2371 //      case CPU_DOWN_FAILED:
2372 //              set_cpu_active((long)hcpu, true);
2373 //              return NOTIFY_OK;
2374 //      default:
2375 //              return NOTIFY_DONE;
2376 //      }
2377 //}
2378 //
2379 //static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
2380 //                                      unsigned long action, void *hcpu)
2381 //{
2382 //      switch (action & ~CPU_TASKS_FROZEN) {
2383 //      case CPU_DOWN_PREPARE:
2384 //              set_cpu_active((long)hcpu, false);
2385 //              return NOTIFY_OK;
2386 //      default:
2387 //              return NOTIFY_DONE;
2388 //      }
2389 //}
2390 //
2391 //static int __init migration_init(void)
2392 //{
2393 //      void *cpu = (void *)(long)smp_processor_id();
2394 //      int err;
2395 //
2396 //      /* Initialize migration for the boot CPU */
2397 //      err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
2398 //      BUG_ON(err == NOTIFY_BAD);
2399 //      migration_call(&migration_notifier, CPU_ONLINE, cpu);
2400 //      register_cpu_notifier(&migration_notifier);
2401 //
2402 //      /* Register cpu active notifiers */
2403 //      cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
2404 //      cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
2405 //
2406 //      return 0;
2407 //}
2408 //early_initcall(migration_init);
2409 //
2410 //
2411 //
2412 //void do_set_cpus_allowed(struct task_struct *p,
2413 //                             const struct cpumask *new_mask)
2414 //{
2415 ////    if (p->sched_class && p->sched_class->set_cpus_allowed)
2416 ////            p->sched_class->set_cpus_allowed(p, new_mask);
2417 //
2418 //      cpumask_copy(&p->cpus_allowed, new_mask);
2419 //      p->nr_cpus_allowed = cpumask_weight(new_mask);
2420 //}
2421 //
2422 //int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
2423 //{
2424 //      unsigned long flags;
2425 //      struct rq *rq;
2426 //      unsigned int dest_cpu;
2427 //      int ret = 0;
2428 //
2429 //      rq = task_rq_lock(p, &flags);
2430 //
2431 //      if (cpumask_equal(&p->cpus_allowed, new_mask))
2432 //              goto out;
2433 //
2434 //      if (!cpumask_intersects(new_mask, cpu_active_mask)) {
2435 //              ret = -EINVAL;
2436 //              goto out;
2437 //      }
2438 //
2439 //      if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
2440 //              ret = -EINVAL;
2441 //              goto out;
2442 //      }
2443 //
2444 //      do_set_cpus_allowed(p, new_mask);
2445 //
2446 //      /* Can the task run on the task's current CPU? If so, we're done */
2447 //      if (cpumask_test_cpu(task_cpu(p), new_mask))
2448 //              goto out;
2449 //
2450 //      dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
2451 //      if (p->on_rq) {
2452 //              struct migration_arg arg = { p, dest_cpu };
2453 //              /* Need help from migration thread: drop lock and wait. */
2454 //              task_rq_unlock(rq, p, &flags);
2455 //              stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
2456 //              tlb_migrate_finish(p->mm);
2457 //              return 0;
2458 //      }
2459 //out:
2460 //      task_rq_unlock(rq, p, &flags);
2461 //
2462 //      return ret;
2463 //}
2464 //EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
2465 //
2466 //static void sched_ttwu_pending(void)
2467 //{
2468 //      struct rq *rq = this_rq();
2469 //      struct llist_node *llist = llist_del_all(&rq->wake_list);
2470 //      struct task_struct *p;
2471 //
2472 //      raw_spin_lock(&rq->lock);
2473 //
2474 //      while (llist) {
2475 //              p = llist_entry(llist, struct task_struct, wake_entry);
2476 //              llist = llist_next(llist);
2477 //              ttwu_do_activate(rq, p, 0);
2478 //      }
2479 //
2480 //      raw_spin_unlock(&rq->lock);
2481 //}
2482 //
2483 //void scheduler_ipi(void)
2484 //{
2485 //      if (llist_empty(&this_rq()->wake_list)) // && !got_nohz_idle_kick())
2486 //                      return;
2487 //
2488 //      /*
2489 //       * Not all reschedule IPI handlers call irq_enter/irq_exit, since
2490 //       * traditionally all their work was done from the interrupt return
2491 //       * path. Now that we actually do some work, we need to make sure
2492 //       * we do call them.
2493 //       *
2494 //       * Some archs already do call them, luckily irq_enter/exit nest
2495 //       * properly.
2496 //       *
2497 //       * Arguably we should visit all archs and update all handlers,
2498 //       * however a fair share of IPIs are still resched only so this would
2499 //       * somewhat pessimize the simple resched case.
2500 //       */
2501 //      irq_enter();
2502 //      sched_ttwu_pending();
2503 //
2504 //      /*
2505 //       * Check if someone kicked us for doing the nohz idle load balance.
2506 //       */
2507 //      if (unlikely(/*got_nohz_idle_kick() && */!need_resched())) {
2508 ////            this_rq()->idle_balance = 1;
2509 //              raise_softirq_irqoff(SCHED_SOFTIRQ);
2510 //      }
2511 //      irq_exit();
2512 //}
2513 //
2514 ///*
2515 // * kernel/sched/core.c:1011
2516 // * wait_task_inactive - wait for a thread to unschedule.
2517 // *
2518 // * If @match_state is nonzero, it's the @p->state value just checked and
2519 // * not expected to change.  If it changes, i.e. @p might have woken up,
2520 // * then return zero.  When we succeed in waiting for @p to be off its CPU,
2521 // * we return a positive number (its total switch count).  If a second call
2522 // * a short while later returns the same number, the caller can be sure that
2523 // * @p has remained unscheduled the whole time.
2524 // *
2525 // * The caller must ensure that the task *will* unschedule sometime soon,
2526 // * else this function might spin for a *long* time. This function can't
2527 // * be called with interrupts off, or it may introduce deadlock with
2528 // * smp_call_function() if an IPI is sent by the same process we are
2529 // * waiting to become inactive.
2530 // */
2531 //unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2532 //{
2533 //      unsigned long flags;
2534 //      int running, on_rq;
2535 //      unsigned long ncsw;
2536 //      struct rq *rq;
2537 //
2538 //      for (;;) {
2539 //              /*
2540 //               * We do the initial early heuristics without holding
2541 //               * any task-queue locks at all. We'll only try to get
2542 //               * the runqueue lock when things look like they will
2543 //               * work out!
2544 //               */
2545 //              rq = task_rq(p);
2546 //
2547 //              /*
2548 //               * If the task is actively running on another CPU
2549 //               * still, just relax and busy-wait without holding
2550 //               * any locks.
2551 //               *
2552 //               * NOTE! Since we don't hold any locks, it's not
2553 //               * even sure that "rq" stays as the right runqueue!
2554 //               * But we don't care, since "task_running()" will
2555 //               * return false if the runqueue has changed and p
2556 //               * is actually now running somewhere else!
2557 //               */
2558 //              while (task_running(rq, p)) {
2559 //                      if (match_state && unlikely(p->state != match_state))
2560 //                              return 0;
2561 //                      cpu_relax();
2562 //              }
2563 //
2564 //              /*
2565 //               * Ok, time to look more closely! We need the rq
2566 //               * lock now, to be *sure*. If we're wrong, we'll
2567 //               * just go back and repeat.
2568 //               */
2569 //              rq = task_rq_lock(p, &flags);
2570 ////            trace_sched_wait_task(p);
2571 //              running = task_running(rq, p);
2572 //              on_rq = p->on_rq;
2573 //              ncsw = 0;
2574 //              if (!match_state || p->state == match_state)
2575 //                      ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2576 //              task_rq_unlock(rq, p, &flags);
2577 //
2578 //              /*
2579 //               * If it changed from the expected state, bail out now.
2580 //               */
2581 //              if (unlikely(!ncsw))
2582 //                      break;
2583 //
2584 //              /*
2585 //               * Was it really running after all now that we
2586 //               * checked with the proper locks actually held?
2587 //               *
2588 //               * Oops. Go back and try again..
2589 //               */
2590 //              if (unlikely(running)) {
2591 //                      cpu_relax();
2592 //                      continue;
2593 //              }
2594 //
2595 //              /*
2596 //               * It's not enough that it's not actively running,
2597 //               * it must be off the runqueue _entirely_, and not
2598 //               * preempted!
2599 //               *
2600 //               * So if it was still runnable (but just not actively
2601 //               * running right now), it's preempted, and we should
2602 //               * yield - it could be a while.
2603 //               */
2604 //              if (unlikely(on_rq)) {
2605 //                      ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2606 //
2607 //                      set_current_state(TASK_UNINTERRUPTIBLE);
2608 //                      schedule_hrtimeout(&to, HRTIMER_MODE_REL);
2609 //                      continue;
2610 //              }
2611 //
2612 //              /*
2613 //               * Ahh, all good. It wasn't running, and it wasn't
2614 //               * runnable, which means that it will never become
2615 //               * running in the future either. We're all done!
2616 //               */
2617 //              break;
2618 //      }
2619 //
2620 //      return ncsw;
2621 //}
2622 //
2623 ///***
2624 // * kernel/sched/core:1116
2625 // * kick_process - kick a running thread to enter/exit the kernel
2626 // * @p: the to-be-kicked thread
2627 // *
2628 // * Cause a process which is running on another CPU to enter
2629 // * kernel-mode, without any delay. (to get signals handled.)
2630 // *
2631 // * NOTE: this function doesn't have to take the runqueue lock,
2632 // * because all it wants to ensure is that the remote task enters
2633 // * the kernel. If the IPI races and the task has been migrated
2634 // * to another CPU then no harm is done and the purpose has been
2635 // * achieved as well.
2636 // */
2637 //void kick_process(struct task_struct *p)
2638 //{
2639 //      int cpu;
2640 //
2641 //      preempt_disable();
2642 //      cpu = task_cpu(p);
2643 //      if ((cpu != smp_processor_id()) && task_curr(p))
2644 //              smp_send_reschedule(cpu);
2645 //      preempt_enable();
2646 //}
2647 //EXPORT_SYMBOL_GPL(kick_process);
2648 //
2649 //void sched_set_stop_task(int cpu, struct task_struct *stop)
2650 //{
2651 //      printk("\nsched_set_stop_task");
2652 //}
2653 //
2654 //bool completion_done(struct completion *x)
2655 //{
2656 //      printk("\ncompletion_done");
2657 //
2658 //      return 0;
2659 //}
2660 //
2661 ///*
2662 // * kernel/sched/core:2605
2663 // * sched_exec - execve() is a valuable balancing opportunity, because at
2664 // * this point the task has the smallest effective memory and cache footprint.
2665 // */
2666 //void sched_exec(void)
2667 //{
2668 //      struct task_struct *p = current;
2669 //      unsigned long flags;
2670 //      int dest_cpu;
2671 //
2672 //      raw_spin_lock_irqsave(&p->pi_lock, flags);
2673 //      dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
2674 //      if (dest_cpu == smp_processor_id())
2675 //              goto unlock;
2676 //
2677 //      if (likely(cpu_active(dest_cpu))) {
2678 //              struct migration_arg arg = { p, dest_cpu };
2679 //
2680 //              raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2681 //              stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2682 //              return;
2683 //      }
2684 //unlock:
2685 //      raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2686 //}
2687 //
2688 //void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2689 //{
2690 //#ifdef CONFIG_SCHED_DEBUG
2691 //      /*
2692 //       * We should never call set_task_cpu() on a blocked task,
2693 //       * ttwu() will sort out the placement.
2694 //       */
2695 //      WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2696 //                      !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2697 //
2698 //#ifdef CONFIG_LOCKDEP
2699 //      /*
2700 //       * The caller should hold either p->pi_lock or rq->lock, when changing
2701 //       * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2702 //       *
2703 //       * sched_move_task() holds both and thus holding either pins the cgroup,
2704 //       * see task_group().
2705 //       *
2706 //       * Furthermore, all task_rq users should acquire both locks, see
2707 //       * task_rq_lock().
2708 //       */
2709 //      WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2710 //                                    lockdep_is_held(&task_rq(p)->lock)));
2711 //#endif
2712 //#endif
2713 //
2714 //      // TODO: SMP, needs to implemented while using load balancing
2715 ////    trace_sched_migrate_task(p, new_cpu);
2716 ////
2717 ////    if (task_cpu(p) != new_cpu) {
2718 ////            struct task_migration_notifier tmn;
2719 ////
2720 ////            if (p->sched_class->migrate_task_rq)
2721 ////                    p->sched_class->migrate_task_rq(p, new_cpu);
2722 ////            p->se.nr_migrations++;
2723 ////            perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
2724 ////
2725 ////            tmn.task = p;
2726 ////            tmn.from_cpu = task_cpu(p);
2727 ////            tmn.to_cpu = new_cpu;
2728 ////
2729 ////            atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
2730 ////    }
2731 //
2732 //      __set_task_cpu(p, new_cpu);
2733 //}
2734 //
2735 ///**
2736 // * kernel/sched/core.c:6820
2737 // */
2738 //void __init sched_init_smp(void)
2739 //{
2740 //      cpumask_var_t non_isolated_cpus;
2741 //
2742 //      alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
2743 //      alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
2744 //
2745 ////    sched_init_numa();
2746 //
2747 //      get_online_cpus();
2748 //      mutex_lock(&sched_domains_mutex);
2749 ////    init_sched_domains(cpu_active_mask);
2750 //      cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
2751 //      if (cpumask_empty(non_isolated_cpus))
2752 //              cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
2753 //      mutex_unlock(&sched_domains_mutex);
2754 //      put_online_cpus();
2755 //
2756 ////    hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE);
2757 ////    hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
2758 ////    hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
2759 //
2760 //      /* RT runtime code needs to handle some hotplug events */
2761 ////    hotcpu_notifier(update_runtime, 0);
2762 //
2763 ////    init_hrtick();
2764 //
2765 //      /* Move init over to a non-isolated CPU */
2766 //      if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
2767 //              BUG();
2768 ////    sched_init_granularity();
2769 //      free_cpumask_var(non_isolated_cpus);
2770 //
2771 ////    init_sched_rt_class();
2772 //}
2773 //
2774 //bool cpus_share_cache(int this_cpu, int that_cpu)
2775 //{
2776 //      return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
2777 //}
2778 //
2779 //#else
2780 //
2781 ///**
2782 // * kernel/sched/core.c:6856
2783 // */
2784 //void sched_init_smp(void)
2785 //{
2786 //      //printk("\nsched_init_smp");
2787 //
2788 //      return;
2789 //}
2790 //#endif /* CONFIG_SMP */
2791 //
2792 //
2793 //
2794 ///*
2795 // * Syscalls
2796 // *
2797 // * Help:
2798 // * SYSCALL_DEFINEx will be replaced by asmlinkage data_type function_name
2799 // * asmlinkage:        tells the compile that the arguments of the function are
2800 // *                            not placed in the registers but rather to find on stack
2801 // */
2802 //
2803 ///*
2804 // * kernel/sched/core.c:3686
2805 // * sys_nice - change the priority of the current process.
2806 // * @increment: priority increment
2807 // *
2808 // * sys_setpriority is a more generic, but much slower function that
2809 // * does similar things.
2810 // */
2811 //SYSCALL_DEFINE1(nice, int, increment)
2812 //{
2813 //      printk("SYSCALL nice\n");
2814 //      return 0;
2815 //}
2816 //
2817 ///**
2818 // * kernel/sched/core.c:4248
2819 // * sys_sched_getaffinity - get the cpu affinity of a process
2820 // * @pid: pid of the process
2821 // * @len: length in bytes of the bitmask pointed to by user_mask_ptr
2822 // * @user_mask_ptr: user-space pointer to hold the current cpu mask
2823 // */
2824 //SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
2825 //              unsigned long __user *, user_mask_ptr)
2826 //{
2827 //      printk("SYSCALL sched_getaffinity\n");
2828 //      return 0;
2829 //}
2830 //
2831 ///**
2832 // * kernel/sched/core.c:4197
2833 // * sys_sched_setaffinity - set the cpu affinity of a process
2834 // * @pid: pid of the process
2835 // * @len: length in bytes of the bitmask pointed to by user_mask_ptr
2836 // * @user_mask_ptr: user-space pointer to the new cpu mask
2837 // */
2838 //SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
2839 //              unsigned long __user *, user_mask_ptr)
2840 //{
2841 //      printk("SYSCALL sched_setaffinity\n");
2842 //
2843 //      // TODO: SMP
2844 //
2845 //      return 0;
2846 //}
2847 //
2848 ///**
2849 // * kernel/sched/core.c:4562
2850 // * sys_sched_rr_get_interval - return the default timeslice of a process.
2851 // * @pid: pid of the process.
2852 // * @interval: userspace pointer to the timeslice value.
2853 // *
2854 // * this syscall writes the default timeslice value of a given process
2855 // * into the user-space timespec buffer. A value of '0' means infinity.
2856 // */
2857 //SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
2858 //              struct timespec __user *, interval)
2859 //{
2860 //      printk("SYSCALL sched_rr_get_interval\n");
2861 //      return 0;
2862 //}
2863 //
2864 ///**
2865 // * kernel/sched/core.c:4282
2866 // * sys_sched_yield - yield the current processor to other threads.
2867 // *
2868 // * This function yields the current CPU to other tasks. If there are no
2869 // * other threads running on this CPU then this function will return.
2870 // */
2871 //SYSCALL_DEFINE0(sched_yield)
2872 //{
2873 //      printk("SYSCALL sched_yield\n");
2874 //      return 0;
2875 //}
2876 //
2877 ///**
2878 // * kernel/sched/core.c:4027
2879 // * sys_sched_setscheduler - set/change the scheduler policy and RT priority
2880 // * @pid: the pid in question.
2881 // * @policy: new policy.
2882 // * @param: structure containing the new RT priority.
2883 // */
2884 //SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
2885 //              struct sched_param __user *, param)
2886 //{
2887 //      return 0;
2888 //}
2889 //
2890 ///**
2891 // * kernel/sched/core.c:4051
2892 // * sys_sched_getscheduler - get the policy (scheduling class) of a thread
2893 // * @pid: the pid in question.
2894 // */
2895 //SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
2896 //{
2897 //      return 0;
2898 //}
2899 //
2900 ///**
2901 // * kernel/sched/core.c:4512
2902 // * sys_sched_get_priority_max - return maximum RT priority.
2903 // * @policy: scheduling class.
2904 // *
2905 // * this syscall returns the maximum rt_priority that can be used
2906 // * by a given scheduling class.
2907 // */
2908 //SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
2909 //{
2910 //      return 0;
2911 //}
2912 //
2913 ///**
2914 // * kernel/sched/core.c:4537
2915 // * sys_sched_get_priority_min - return minimum RT priority.
2916 // * @policy: scheduling class.
2917 // *
2918 // * this syscall returns the minimum rt_priority that can be used
2919 // * by a given scheduling class.
2920 // */
2921 //SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
2922 //{
2923 //      return 0;
2924 //}
2925 //
2926 ///**
2927 // * kernel/sched/core.c:4042
2928 // * sys_sched_setparam - set/change the RT priority of a thread
2929 // * @pid: the pid in question.
2930 // * @param: structure containing the new RT priority.
2931 // */
2932 //SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
2933 //{
2934 //      return 0;
2935 //}
2936 //
2937 ///**
2938 // * kernel/sched/core.c:4077
2939 // * sys_sched_getparam - get the RT priority of a thread
2940 // * @pid: the pid in question.
2941 // * @param: structure containing the RT priority.
2942 // */
2943 //SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
2944 //{
2945 //      return 0;
2946 //}