Merge remote-tracking branch 'origin/clustered' into aks_dev_clus
[projects/modsched/linux.git] / kernel / process_server.c
index 23efc9c..f7fb5d0 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/pcn_perf.h> // performance measurement
 #include <linux/string.h>
 
-#include <linux/popcorn.h>
+#include <linux/popcorn_cpuinfo.h>
 
 #include <asm/pgtable.h>
 #include <asm/atomic.h>
 
 unsigned long get_percpu_old_rsp(void);
 
+#include <linux/futex.h>
+#define  NSIG 32
+
+#include<linux/signal.h>
+#include <linux/fcntl.h>
+#include "futex_remote.h"
 /**
  * General purpose configuration
  */
@@ -61,6 +67,7 @@ unsigned long get_percpu_old_rsp(void);
 // migrate in response to a mapping query.
 #define MAX_MAPPINGS 1
 
+extern sys_topen(const char __user * filename, int flags, int mode, int fd);
 /**
  * Use the preprocessor to turn off printk.
  */
@@ -241,7 +248,10 @@ static void perf_init(void) {
 #define PERF_INIT() 
 #define PERF_MEASURE_START(x) -1
 #define PERF_MEASURE_STOP(x, y, z)
-#endif /* !CONFIG_POPCORN_PERF */
+#endif
+
+
+static DECLARE_WAIT_QUEUE_HEAD( countq);
 
 /**
  * Library
@@ -342,6 +352,13 @@ typedef struct _clone_data {
     unsigned long previous_cpus;
     vma_data_t* vma_list;
     vma_data_t* pending_vma_list;
+    /*mklinux_akshay*/int origin_pid;
+    sigset_t remote_blocked, remote_real_blocked;
+    sigset_t remote_saved_sigmask;
+    struct sigpending remote_pending;
+    unsigned long sas_ss_sp;
+    size_t sas_ss_size;
+    struct k_sigaction action[_NSIG];
 } clone_data_t;
 
 /**
@@ -458,6 +475,13 @@ typedef struct _clone_request {
     int prio, static_prio, normal_prio; //from sched.c
        unsigned int rt_priority; //from sched.c
        int sched_class; //from sched.c but here we are using SCHED_NORMAL, SCHED_FIFO, etc.
+    /*mklinux_akshay*/int origin_pid;
+    sigset_t remote_blocked, remote_real_blocked;
+    sigset_t remote_saved_sigmask;
+    struct sigpending remote_pending;
+    unsigned long sas_ss_sp;
+    size_t sas_ss_size;
+    struct k_sigaction action[_NSIG];
     unsigned long previous_cpus;
 } clone_request_t;
 
@@ -3275,6 +3299,7 @@ void process_group_exit_item(struct work_struct* work) {
     group_exit_work_t* w = (group_exit_work_t*) work;
     struct task_struct *task = NULL;
     struct task_struct *g;
+    unsigned long flags;
 
     //int perf = PERF_MEASURE_START(&perf_process_group_exit_item);
     PSPRINTK("%s: entered\n",__func__);
@@ -3285,11 +3310,20 @@ void process_group_exit_item(struct work_struct* work) {
         if(task->tgroup_home_id == w->tgroup_home_id &&
            task->tgroup_home_cpu == w->tgroup_home_cpu) {
             
-            if(!task->represents_remote) {
-                // active, send sigkill
-                PSPRINTK("Issuing SIGKILL to pid %d\n",task->pid);
-                kill_pid(task_pid(task), SIGKILL, 1);
-            }
+            if (!task->represents_remote) { //similar to zap_other_threads
+                               exit_robust_list(task);
+                               task->robust_list = NULL;
+                               // active, send sigkill
+                               lock_task_sighand(task, &flags);
+
+                               task_clear_jobctl_pending(task, JOBCTL_PENDING_MASK);
+                               sigaddset(&task->pending.signal, SIGKILL);
+                               signal_wake_up(task, 1);
+                               clear_ti_thread_flag(task, _TIF_USER_RETURN_NOTIFY);
+
+                               unlock_task_sighand(task, &flags);
+
+                       }
 
             // If it is a shadow task, it will eventually
             // get killed when its corresponding active task
@@ -4437,6 +4471,19 @@ perf_cc = native_read_tsc();
     clone_data->sched_class = request->sched_class;
     clone_data->lock = __SPIN_LOCK_UNLOCKED(&clone_data->lock);
 
+    /*mklinux_akshay*/
+    clone_data->origin_pid =request->origin_pid;
+    clone_data->remote_blocked = request->remote_blocked ;
+    clone_data->remote_real_blocked = request->remote_real_blocked;
+    clone_data->remote_saved_sigmask = request->remote_saved_sigmask ;
+    clone_data->remote_pending = request->remote_pending;
+
+    clone_data->sas_ss_sp = request->sas_ss_sp;
+    clone_data->sas_ss_size = request->sas_ss_size;
+    int cnt=0;
+    for(cnt=0;cnt<_NSIG;cnt++)
+       clone_data->action[cnt] = request->action[cnt];
+
     /*
      * Pull in vma data
      */
@@ -4736,6 +4783,13 @@ int process_server_import_address_space(unsigned long* ip,
                         PSPRINTK("vma->pte_curr == null\n");
                     }
                     while(pte_curr) {
+                        // MAP it
+                        int domapping = 1;
+                        pte_t* remap_pte = NULL;
+                        pmd_t* remap_pmd = NULL;
+                        pud_t* remap_pud = NULL;
+                        pgd_t* remap_pgd = NULL;
+
                         PS_DOWN_WRITE(&current->mm->mmap_sem);
                         err = remap_pfn_range_remaining(current->mm,
                                                         vma,
@@ -4888,6 +4942,21 @@ int process_server_import_address_space(unsigned long* ip,
     current->thread.usersp = clone_data->thread_usersp;
    
 
+    //mklinux_akshay
+    current->origin_pid = clone_data->origin_pid;
+    sigorsets(&current->blocked,&current->blocked,&clone_data->remote_blocked) ;
+    sigorsets(&current->real_blocked,&current->real_blocked,&clone_data->remote_real_blocked);
+    sigorsets(&current->saved_sigmask,&current->saved_sigmask,&clone_data->remote_saved_sigmask);
+    current->pending = clone_data->remote_pending;
+    current->sas_ss_sp = clone_data->sas_ss_sp;
+    current->sas_ss_size = clone_data->sas_ss_size;
+
+    printk(KERN_ALERT "origin pid {%d}-{%d} \n",current->origin_pid,clone_data->origin_pid);
+
+    int cnt=0;
+     for(cnt=0;cnt<_NSIG;cnt++)
+        current->sighand->action[cnt] = clone_data->action[cnt];
+
     // Set output variables.
     *sp = clone_data->thread_usersp;
     *ip = clone_data->regs.ip;
@@ -4955,10 +5024,10 @@ int process_server_import_address_space(unsigned long* ip,
 
 
     perf_e = native_read_tsc();
-    printk("%s %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu (%d)\n",
+    printk("%s %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu (%d) {%d} \n",
             __func__,
             perf_aa, perf_bb, perf_cc, perf_dd, perf_ee,
-            perf_a, perf_b, perf_c, perf_d, perf_e, current->t_home_id);
+            perf_a, perf_b, perf_c, perf_d, perf_e, current->t_home_id,current->pid);
 
     return 0;
 }
@@ -5947,15 +6016,16 @@ int process_server_dup_task(struct task_struct* orig, struct task_struct* task)
         task->tgroup_home_cpu = orig->tgroup_home_cpu;
         task->tgroup_home_id = orig->tgroup_home_id;
         task->tgroup_distributed = 1;
+
+       // task->origin_pid = orig->origin_pid;
     } else {
         task->tgroup_home_cpu = home_kernel;
         task->tgroup_home_id = orig->tgid;
         task->tgroup_distributed = 0;
     }
-
+//printk(KERN_ALERT"TGID {%d} \n",task->tgid);
     return 1;
 }
-
 /**
  * @brief Migrate the specified task <task> to a CPU on which
  * it has not yet executed.
@@ -5988,15 +6058,40 @@ static int do_migration_to_new_cpu(struct task_struct* task, int cpu) {
     // This will be a placeholder process for the remote
     // process that is subsequently going to be started.
     // Block its execution.
-    __set_task_state(task,TASK_UNINTERRUPTIBLE);
 
+   // set_task_state(task,TASK_UNINTERRUPTIBLE); //mklinux_akshay modified to interruptible state
+
+
+    int sig;
+    struct task_struct *t=current;
     // Book keeping for previous cpu bitmask.
     set_bit(smp_processor_id(),&task->previous_cpus);
 
+    printk("Procee---Futex: blocked signals\n");
+    for (sig = 1; sig < NSIG; sig++) {
+       if (sigismember(&t->blocked, sig)) {
+               printk("POP: %d \n", sig);
+       }
+    }
+    printk("Procee---futex: pending signals\n");
+    for (sig = 1; sig < NSIG; sig++) {
+       if (sigismember(&t->pending.signal, sig)) {
+               printk("POP: %d \n", sig);
+       }
+    }
     // Book keeping for placeholder process.
     task->represents_remote = 1;
     task->t_distributed = 1;
 
+    /*mklinux_akshay*/
+    if(task->prev_pid==-1)
+       task->origin_pid=task->pid;
+    else
+       task->origin_pid=task->origin_pid;
+
+   struct task_struct *par = task->parent;
+
+
     // Book keeping for distributed threads.
     task->tgroup_distributed = 1;
     do_each_thread(g,tgroup_iterator) {
@@ -6095,7 +6190,22 @@ static int do_migration_to_new_cpu(struct task_struct* task, int cpu) {
     request->normal_prio = task->normal_prio;
     request->rt_priority = task->rt_priority;
     request->sched_class = task->policy;
-    
+
+    /*mklinux_akshay*/
+    if (task->prev_pid == -1)
+       request->origin_pid = task->pid;
+    else
+       request->origin_pid = task->origin_pid;
+    request->remote_blocked = task->blocked;
+    request->remote_real_blocked = task->real_blocked;
+    request->remote_saved_sigmask = task->saved_sigmask;
+    request->remote_pending = task->pending;
+    request->sas_ss_sp = task->sas_ss_sp;
+    request->sas_ss_size = task->sas_ss_size;
+    int cnt = 0;
+    for (cnt = 0; cnt < _NSIG; cnt++)
+       request->action[cnt] = task->sighand->action[cnt];
+
     // struct thread_struct -------------------------------------------------------
     // have a look at: copy_thread() arch/x86/kernel/process_64.c 
     // have a look at: struct thread_struct arch/x86/include/asm/processor.h
@@ -6171,7 +6281,9 @@ else
 
     //dump_task(task,regs,request->stack_ptr);
     
-    PERF_MEASURE_STOP(&perf_process_server_do_migration,"migration to new cpu",perf);
+   set_task_state(task,TASK_INTERRUPTIBLE);
+
+    PERF_MEASURE_STOP(&perf_process_server_do_migration," ",perf);
 
     return PROCESS_SERVER_CLONE_SUCCESS;
 
@@ -6260,6 +6372,7 @@ int process_server_do_migration(struct task_struct* task, int cpu) {
     int ret = 0;
 
 #ifndef SUPPORT_FOR_CLUSTERING
+    printk(KERN_ALERT"%s: normal migration\n",__func__);
     if(test_bit(cpu,&task->previous_cpus)) {
         ret = do_migration_back_to_previous_cpu(task,cpu);
     } else {
@@ -6271,6 +6384,7 @@ int process_server_do_migration(struct task_struct* task, int cpu) {
                       "(cpu: %d present_mask)\n", __func__, task, cpu);
         return -EBUSY;
     }
+    printk(KERN_ALERT"%s: clustering activated\n",__func__);
     // TODO seems like that David is using previous_cpus as a bitmask.. 
     // TODO well this must be upgraded to a cpumask, declared as usigned long in task_struct
     struct list_head *iter;
@@ -6282,6 +6396,7 @@ int process_server_do_migration(struct task_struct* task, int cpu) {
         cpuid = objPtr->_data._processor;
         pcpum = &(objPtr->_data._cpumask);
        if (cpumask_test_cpu(cpu, pcpum)) {
+       printk(KERN_ALERT"%s: cpuid {%d} \n",cpuid);
                if ( bitmap_intersects(cpumask_bits(pcpum),
                                       &(task->previous_cpus),
                                       (sizeof(unsigned long)*8)) )
@@ -6336,8 +6451,11 @@ static int __init process_server_init(void) {
     /*
      * Cache some local information.
      */
-    _cpu = smp_processor_id();
-
+#ifndef SUPPORT_FOR_CLUSTERING
+           _cpu= smp_processor_id();
+#else
+          _cpu = cpumask_first(cpu_present_mask);
+#endif
     /*
      * Init global semaphores
      */