Merge remote-tracking branch 'origin/davek' into aks_dev
authorakshay <akshay87@vt.edu>
Wed, 28 May 2014 04:27:31 +0000 (00:27 -0400)
committerakshay <akshay87@vt.edu>
Wed, 28 May 2014 04:27:31 +0000 (00:27 -0400)
Conflicts:
include/linux/kmod.h
include/linux/process_server.h
kernel/process_server.c
kernel/sched.c

1  2 
arch/x86/kernel/cpu/proc_remote.c
fs/binfmt_elf.c
include/linux/kmod.h
include/linux/pcn_kmsg.h
include/linux/popcorn.h
kernel/kinit.c
kernel/kmod.c
kernel/process_server.c
kernel/sched.c

index cc3a264,0000000..bfe0b23
mode 100644,000000..100644
--- /dev/null
@@@ -1,128 -1,0 +1,128 @@@
- #include <popcorn/cpuinfo.h>
 +/*
 + * This file for Obtaining Remote CPU info
 + *
 + * Akshay
 + */
 +
 +#include <linux/kernel.h>
 +#include <linux/kthread.h>
 +#include <linux/smp.h>
 +#include <linux/slab.h>
 +#include <linux/timer.h>
 +#include <linux/pcn_kmsg.h>
 +#include <linux/delay.h>
 +#include <linux/string.h>
 +#include <linux/seq_file.h>
 +
 +
++#include <linux/popcorn_cpuinfo.h>
 +
 +
 +#include <linux/list.h>
 +
 +#define PRINT_MESSAGES 0
 +#if PRINT_MESSAGES
 +#define PRINTK(...) printk(__VA_ARGS__)
 +#else
 +#define PRINTK(...) ;
 +#endif
 +
 +/*
 + *  Variables
 + */
 +extern struct list_head rlist_head;
 +static int _cpu=-1;
 +/*
 + * ******************************* Common Functions **********************************************************
 + */
 +
 +/*
 + * ************************************* Function (hook) to be called from other file ********************
 + */
 +int remote_proc_cpu_info(struct seq_file *m) {
 +
 +      int res = 0;
 +
 +      int result = 0;
 +      int i;
 +      int retval;
 +
 +      struct list_head *iter;
 +      _remote_cpu_info_list_t *objPtr;
 +
 +          list_for_each(iter, &rlist_head) {
 +                objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
 +
 +                                seq_printf(m, "*********Remote CPU*****\n");
 +
 +                                      seq_printf(m, "processor\t: %u\n"
 +                                                      "vendor_id\t: %s\n"
 +                                                      "cpu family\t: %d\n"
 +                                                      "model\t\t: %u\n"
 +                                                      "model name\t: %s\n", objPtr->_data._processor,
 +                                                      objPtr->_data._vendor_id, objPtr->_data._cpu_family,
 +                                                      objPtr->_data._model, objPtr->_data._model_name);
 +
 +                                      if (objPtr->_data._stepping != -1)
 +                                              seq_printf(m, "stepping\t: %d\n", objPtr->_data._stepping);
 +                                      else
 +                                              seq_printf(m, "stepping\t: unknown\n");
 +
 +                                      seq_printf(m, "microcode\t: 0x%x\n", objPtr->_data._microcode);
 +
 +                                      seq_printf(m, "cpu MHz\t\t: %u.%03u\n", objPtr->_data._cpu_freq);
 +
 +                                      seq_printf(m, "cache size\t: %d KB\n", objPtr->_data._cache_size);
 +
 +                                      seq_printf(m, "flags\t\t:");
 +
 +                                      seq_printf(m, " %s", objPtr->_data._flags);
 +
 +                                      seq_printf(m, "\nbogomips\t: %lu\n", objPtr->_data._nbogomips);
 +                                      // (c->loops_per_jiffy/(5000/HZ)) % 100);
 +
 +                                      seq_printf(m, "TLB size\t: %d 4K pages\n", objPtr->_data._TLB_size);
 +
 +                                      seq_printf(m, "clflush size\t: %u\n", objPtr->_data._clflush_size);
 +                                      seq_printf(m, "cache_alignment\t: %d\n",
 +                                                      objPtr->_data._cache_alignment);
 +                                      seq_printf(m,
 +                                                      "address sizes\t: %u bits physical, %u bits virtual\n",
 +                                                      objPtr->_data._bits_physical, objPtr->_data._bits_virtual);
 +
 +                                      seq_printf(m, "power management:");
 +                                      /*for (i = 0; i < 32; i++) {
 +                                       if (c->x86_power & (1 << i)) {
 +                                       if (i < ARRAY_SIZE(x86_power_flags) &&
 +                                       x86_power_flags[i])
 +                                       seq_printf(m, "%s%s",
 +                                       x86_power_flags[i][0] ? " " : "",
 +                                       x86_power_flags[i]);
 +                                       else
 +                                       seq_printf(m, " [%d]", i);
 +                                       }
 +                                       }*/
 +                                      seq_printf(m, "global cpumask available: \n");
 +                                      for_each_cpu(i,cpu_global_online_mask) {
 +                                              seq_printf(m,"%d,\t",i);
 +                                                               }
 +                                      seq_printf(m, "\n\n");
 +          }
 +
 +
 +
 +}
 +
 +static int __init proc_cpu_handler_init(void)
 +{
 +
 +
 +    _cpu = smp_processor_id();
 +
 +      return 0;
 +}
 +/**
 + * Register remote proc cpu info init function as
 + * module initialization function.
 + */
 +late_initcall(proc_cpu_handler_init);
diff --cc fs/binfmt_elf.c
Simple merge
  #include <linux/compiler.h>
  #include <linux/workqueue.h>
  #include <linux/sysctl.h>
 +
 +/*mklinux_akshay*/
 +#include <linux/signal.h>
+ #include <linux/process_server.h>
  
  #define KMOD_PATH_LEN 256
  
@@@ -78,11 -77,7 +80,12 @@@ struct subprocess_info 
      int   remote_cpu;
      int   clone_request_id;
      struct pt_regs remote_regs;
 +
 +    /*mklinux_akshay*/
 +    pid_t origin_pid;
 +    /*mklinux_akshay*/
 +
+ #endif
  };
  
  /* Allocate a subprocess_info structure */
Simple merge
diff --cc include/linux/popcorn.h
index e160e2c,e160e2c..0000000
deleted file mode 100644,100644
+++ /dev/null
@@@ -1,37 -1,37 +1,0 @@@
--
-- struct _remote_cpu_info_data
-- {
--                 unsigned int _processor;
--                 char _vendor_id[16];
--                 int _cpu_family;
--                unsigned int _model;
--                 char _model_name[64];
--                 int _stepping;
--                 unsigned long _microcode;
--                 unsigned _cpu_freq;
--                 int _cache_size;
--                 char _fpu[3];
--                 char _fpu_exception[3];
--                 int _cpuid_level;
--                 char _wp[3];
--                 char _flags[512];
--                 unsigned long _nbogomips;
--                int _TLB_size;
--                unsigned int _clflush_size;
--                int _cache_alignment;
--                unsigned int _bits_physical;
--                 unsigned int _bits_virtual;
--                char _power_management[64];
--                 struct cpumask _cpumask;
-- };
--
--
--typedef struct _remote_cpu_info_data _remote_cpu_info_data_t;
--struct _remote_cpu_info_list
-- {
--         _remote_cpu_info_data_t _data;
--         struct list_head cpu_list_member;
--
--  };
--typedef struct _remote_cpu_info_list _remote_cpu_info_list_t;
--
diff --cc kernel/kinit.c
  #include <linux/pcn_kmsg.h>
  #include <linux/delay.h>
  #include <linux/string.h>
 +#include <linux/jhash.h>
  #include <linux/cpufreq.h>
  
- #include <popcorn/cpuinfo.h>
++#include <linux/popcorn_cpuinfo.h>
  #include <linux/bootmem.h>
 -//#include <linux/multikernel.h>
 +#include <popcorn/remote_pfn.h>
 +
  
  extern unsigned long orig_boot_params;
  #define max_nodes 1 << 8
@@@ -62,8 -59,8 +62,6 @@@ static int __init popcorn_kernel_init(c
  early_param("kernel_init", popcorn_kernel_init);
  
  
--
--
  /*
   *  Variables
   */
@@@ -177,258 -168,9 +175,264 @@@ _pfn_range_list_t* d_pfn(struct list_he
  }
  
  
 -///////////////////////////////////////////////////////////////////////////////
 +static int handle_remote_pfn_response(
 +              struct pcn_kmsg_message* inc_msg) {
 +      _remote_pfn_response_t* msg = (_remote_pfn_response_t*) inc_msg;
 +
 +      printk("%s: Entered remote pfn response \n", "handle_remote_pfn_response");
 +
 +      wait_pfn_list = 1;
 +      if (msg != NULL)
 +              pfn_result = msg;
 +      wake_up_interruptible(&wq_pfn);
 +      printk("%s: response ---- wait_pfn_list{%d} \n", "handle_remote_pfn_response", wait_pfn_list);
 +
 +      pcn_kmsg_free_msg(inc_msg);
 +
 +      return 0;
 +}
 +
 +static int handle_remote_pfn_request(struct pcn_kmsg_message* inc_msg) {
 +
 +      printk("%s : %d!!!", "handle_remote_pfn_request",_cpu);
 +
 +      int i;
 +               printk("\n");
 +    _remote_pfn_request_t* msg = (_remote_pfn_request_t*) inc_msg;
 +    _remote_pfn_response_t response;
 +    _pfn_range_list_t data;
 +
 +      printk("%s: Entered remote  pfn request \n", "handle_remote_pfn_request");
 +
 +      // Finish constructing response
 +      response.header.type = PCN_KMSG_TYPE_REMOTE_PFN_RESPONSE;
 +      response.header.prio = PCN_KMSG_PRIO_NORMAL;
 +
 +      add_pfn_node(msg->_data.kernel_number,msg->_data.start_pfn_addr,msg->_data.end_pfn_addr,&pfn_list_head);
 +
 +      _pfn_range_list_t *temp = find_pfn(Kernel_Id,&pfn_list_head);
 +      data.kernel_number = temp->kernel_number;
 +      data.start_pfn_addr = temp->start_pfn_addr;
 +      data.end_pfn_addr = temp->end_pfn_addr;
 +
 +      response._data = data;
 +
 +      // Send response
 +      pcn_kmsg_send_long(msg->header.from_cpu,
 +                      (struct pcn_kmsg_message*) (&response),
 +                      sizeof(_remote_pfn_response_t) - sizeof(struct pcn_kmsg_hdr));
 +
 +      pcn_kmsg_free_msg(inc_msg);
 +
 +      return 0;
 +}
 +
 +int send_pfn_request(int KernelId) {
 +
 +      int res = -1;
 +      _remote_pfn_request_t* request = kmalloc(
 +                      sizeof(_remote_pfn_request_t),
 +                      GFP_KERNEL);
 +      // Build request
 +      request->header.type = PCN_KMSG_TYPE_REMOTE_PFN_REQUEST;
 +      request->header.prio = PCN_KMSG_PRIO_NORMAL;
 +      _pfn_range_list_t *t = find_pfn(Kernel_Id,&pfn_list_head);
 +    if(t!=NULL){
 +      request->_data.kernel_number = t->kernel_number;
 +      request->_data.start_pfn_addr = t->start_pfn_addr;
 +      request->_data.end_pfn_addr = t->end_pfn_addr;
 +      // Send response
 +      res = pcn_kmsg_send_long(KernelId, (struct pcn_kmsg_message*) (request),
 +                      sizeof(_remote_pfn_request_t) - sizeof(struct pcn_kmsg_hdr));
 +    }
 +      return res;
 +}
 +
 +int _init_remote_pfn(void)
 +{
 +      int i = 0;
 +
 +                      int result = 0;
 +                      int retval;
 +
 +                      for (i = 0; i < NR_CPUS; i++) {
 +
 +                              flush_pfn_var();
 +                              // Skip the current cpu
 +                              if (i == _cpu)
 +                                      continue;
 +                              result = send_pfn_request(i);
 +
 +                              if (!result) {
 +
 +                                      PRINTK("%s : go to sleep!!!!", __func__);
 +                                                              wait_event_interruptible(wq_pfn, wait_pfn_list != -1);
 +                                                              wait_pfn_list = -1;
 +
 +                                                              add_pfn_node(pfn_result->_data.kernel_number,pfn_result->_data.start_pfn_addr,pfn_result->_data.end_pfn_addr,&pfn_list_head);
 +
 +                              }
 +                      }
 +
 +                      return 0;
 +}
 +
 +/*
 + * ************************************* Function (hook) to be called from other file ********************
 + */
 +int _init_local_pfn(void)
 +{
 +
 +
 +      unsigned int i;
 +      printk("%s : %d!!!", "_init_local_pfn: ",_cpu);
 +
 +        printk("POP_INIT:Kernel id is %d\n",Kernel_Id);
 +        printk("POP_INIT: kernel start add is 0x%lx",kernel_start_addr);
 +        printk("POP_INIT:max_low_pfn id is 0x%lx\n",PFN_PHYS(max_low_pfn));
 +
 +
 +        add_pfn_node(Kernel_Id,kernel_start_addr,PFN_PHYS(max_low_pfn),&pfn_list_head);
 +
 +      return 0;
 +}
 +
 +int _init_RemotePFN(void)
 +{
 +       _init_local_pfn();
 +       _init_remote_pfn();
 +
 +        d_pfn(&pfn_list_head);
 +        return 0;
 +}
 +
 +void popcorn_init(void)
 +{
 +
 +      if(bucket_phys_addr != 0)
 +      {
 +      int i=0;
 +      ssize_t bucket_size =sizeof(long)*max_nodes;
 +
 +
 +      printk("%s: POP_INIT:kernel bucket_phys_addr: 0x%lx\n","popcorn_init",
 +                        (unsigned long) bucket_phys_addr);
 +      printk("%s:POP_INIT:Called popcorn_init boot id--max_nodes :%d! %d\n","popcorn_init",max_nodes);
 +
 +      token_bucket=ioremap_cache((resource_size_t)((void *) bucket_phys_addr),PAGE_SIZE);
 +
 +      if (!token_bucket) {
 +                              printk("Failed to kmalloc token_bucket !\n");
 +                              unsigned long pfn = (long) bucket_phys_addr >> PAGE_SHIFT;
 +                              struct page *shared_page;
 +                              shared_page = pfn_to_page(pfn);
 +                              token_bucket = page_address(shared_page);
 +                              void * kmap_addr = kmap(shared_page);
 +                      }
 +
 +      PRINTK("%s: POP_INIT:token_bucket addr: 0x%p\n",__func__, token_bucket);
 +                      for(i=0;i<max_nodes;i++)
 +                      {
 +                              if(token_bucket[i]==0)
 +                              {   token_bucket[i]=1;
 +                                      Kernel_Id=i+1;break;
 +                              }
 +                      }
 +
 +      PRINTK("%s: POP_INIT:token_bucket Initial values; \n",__func__);
 +      for(i=0;i<max_nodes;i++)
 +              {
 +              printk("%d\t",token_bucket[i]);
 +              }
 +
 +
 +      printk("POP_INIT:Virt add : 0x%p --- shm kernel id address: 0x%lx\n",token_bucket,bucket_phys_addr);
 +      }
 +
 +
 +      int cnt=0;
 +      int vendor_id=0;
 +      printk("POP_INIT:first_online_node{%d} cpumask_first{%d} \n",first_online_node,cpumask_first(cpu_present_mask));
 +      struct cpuinfo_x86 *c = &boot_cpu_data;
 +      
 +
 +      if(!strcmp(((const char *) c->x86_vendor_id),((const char *)"AuthenticAMD"))){
 +              vendor amd = AuthenticAMD;
 +              vendor_id = amd;
 +      }
 +      else if(!strcmp(((const char *) c->x86_vendor_id),((const char *) "GenuineIntel"))){
 +              vendor intel = GenuineIntel;
 +              vendor_id = intel;
 +      }
 +      printk("POP_INIT:vendor{%s} cpufam{%d} model{%u} cpucnt{%d} jhas{%u}\n",c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",c->x86,c->x86_model,vendor_id, (jhash_2words((u32)vendor_id,cpumask_first(cpu_present_mask), JHASH_INITVAL) & ((1<<8)-1)));
 +      
 +      
-       Kernel_Id=smp_processor_id();;
++      Kernel_Id=cpumask_first(cpu_present_mask);
 +
-       printk("POP_INIT:Kernel id is %d\n",Kernel_Id);
++    printk("POP_INIT:Kernel id is %d\n",Kernel_Id);
 +    printk("POP_INIT: kernel start add is 0x%lx",kernel_start_addr);
 +    printk("POP_INIT:max_low_pfn id is 0x%lx\n",PFN_PHYS(max_low_pfn));
 +    printk("POP_INIT:min_low_pfn id is 0x%lx\n",PFN_PHYS(min_low_pfn));
 +
 +}
 +
 +
  
 +/*
 + * ****************************** Message structures for obtaining PID status ********************************
 + */
  
- void add_node(_remote_cpu_info_data_t *arg, struct list_head *head)
- {
-    _remote_cpu_info_list_t *Ptr = (_remote_cpu_info_list_t *)kmalloc(sizeof(struct _remote_cpu_info_list),GFP_KERNEL);
-    // assert(Ptr != NULL);
-     Ptr->_data = *arg;
-     INIT_LIST_HEAD(&Ptr->cpu_list_member);
-     list_add(&Ptr->cpu_list_member, head);
- }
- int find_and_delete(int cpuno, struct list_head *head)
- {
-     struct list_head *iter;
-     _remote_cpu_info_list_t *objPtr;
-     list_for_each(iter, head) {
-         objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
-         if(objPtr->_data._processor == cpuno) {
-             list_del(&objPtr->cpu_list_member);
-             kfree(objPtr);
-             return 1;
-         }
-     }
- }
- void display(struct list_head *head)
- {
-     struct list_head *iter;
-     _remote_cpu_info_list_t *objPtr;
-     list_for_each(iter, head) {
-         objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
-         printk("%d \t", objPtr->_data._processor);
-         printk("%d \t", objPtr->_data._cpu_family);
-     }
-     printk("\n");
- }
++ void add_node(_remote_cpu_info_data_t *arg, struct list_head *head)
++ {
++   _remote_cpu_info_list_t *Ptr =
++         (_remote_cpu_info_list_t *)kmalloc(sizeof(_remote_cpu_info_list_t), GFP_KERNEL);
++   if (!Ptr) {
++     printk(KERN_ALERT"%s: can not allocate memory for kernel node descriptor\n", __func__);
++    return;
++   }
++   printk("%s: _remote_cpu_info_list_t %ld, _remote_cpu_info_data_t %ld\n",
++         __func__, sizeof(_remote_cpu_info_list_t), sizeof(_remote_cpu_info_data_t) );
++
++   INIT_LIST_HEAD(&(Ptr->cpu_list_member));
++  memcpy(&(Ptr->_data), arg, sizeof(_remote_cpu_info_data_t)); //Ptr->_data = *arg;
++   list_add(&Ptr->cpu_list_member, head);
++ }
++
++ int find_and_delete(int cpuno, struct list_head *head)
++ {
++     struct list_head *iter;
++     _remote_cpu_info_list_t *objPtr;
++
++     list_for_each(iter, head) {
++         objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
++         if(objPtr->_data._processor == cpuno) {
++             list_del(&objPtr->cpu_list_member);
++             kfree(objPtr);
++             return 1;
++         }
++     }
++     return 0;
++ }
++
++ #define DISPLAY_BUFFER 128
++ static void display(struct list_head *head)
++ {
++     struct list_head *iter;
++     _remote_cpu_info_list_t *objPtr;
++     char buffer[DISPLAY_BUFFER];
++
++     list_for_each(iter, head) {
++         objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
++
++         memset(buffer, 0, DISPLAY_BUFFER);
++         cpumask_scnprintf(buffer, (DISPLAY_BUFFER -1), &(objPtr->_data._cpumask));
++         printk("%s: cpu:%d fam:%d %s\n", __func__,
++                 objPtr->_data._processor, objPtr->_data._cpu_family,
++                 buffer);
++     }
++ }
++
++ ///////////////////////////////////////////////////////////////////////////////
  struct _remote_cpu_info_request {
        struct pcn_kmsg_hdr header;
        _remote_cpu_info_data_t _data;
@@@ -457,179 -199,86 +461,175 @@@ int flush_cpu_info_var() 
        wait_cpu_list = -1;
        return 0;
  }
 -struct cpumask cpu_global_online_mask;
 -#define for_each_global_online_cpu(cpu)   for_each_cpu((cpu), cpu_global_online_mask)
 -static int handle_remote_proc_cpu_info_response(struct pcn_kmsg_message* inc_msg)
 -{
 -  _remote_cpu_info_response_t* msg = (_remote_cpu_info_response_t*) inc_msg;
 -  printk("%s: Entered remote cpu info response \n", "handle_remote_proc_cpu_info_response");
  
 -  wait_cpu_list = 1;
 -  if (msg != NULL)
 -    cpu_result = msg;
 +static void *remote_c_start(loff_t *pos) {
 +      if (*pos == 0) /* just in case, cpu 0 is not the first */
 +              *pos = cpumask_first(cpu_online_mask);
 +      else
 +              *pos = cpumask_next(*pos - 1, cpu_online_mask);
 +      if ((*pos) < nr_cpu_ids)
 +              return &cpu_data(*pos);
 +      return NULL;
 +}
  
 -  wake_up_interruptible(&wq_cpu);
 -  printk("%s: response ---- wait_cpu_list{%d} \n", "handle_remote_proc_cpu_info_response", wait_cpu_list);
 +int fill_cpu_info(_remote_cpu_info_data_t *res) {
  
 -  pcn_kmsg_free_msg(inc_msg);
 -  return 0;
 -}
 +      void *p;
 +      loff_t pos = 0;
 +      p = remote_c_start(&pos);
  
 -static int handle_remote_proc_cpu_info_request(struct pcn_kmsg_message* inc_msg)
 -{
 -  int i;
 -  
 -  printk("%s : cpus online in kernel %d!!!", "handle_remote_proc_cpu_info_request",_cpu);
 -  for_each_online_cpu(i) {
 -    printk("%d ", i);
 -  }
 -  printk("\n");
 -  
 -  _remote_cpu_info_request_t* msg = (_remote_cpu_info_request_t*) inc_msg;
 -  _remote_cpu_info_response_t response;
 -
 -  printk("%s: Entered remote  cpu info request \n", "handle_remote_proc_cpu_info_request");
 -
 -  // Finish constructing response
 -  response.header.type = PCN_KMSG_TYPE_REMOTE_PROC_CPUINFO_RESPONSE;
 -  response.header.prio = PCN_KMSG_PRIO_NORMAL;
 -//  response._data._cpumask = kmalloc( sizeof(struct cpumask), GFP_KERNEL); //this is an error, how you can pass a pointer to another kernel?!
 -  memcpy(&(response._data._cpumask), cpu_present_mask, sizeof(cpu_present_mask));
 -extern int my_cpu;
 -  response._data._processor = my_cpu;
 -
 -/*  cpumask_or(&cpu_global_online_mask,&cpu_global_online_mask,(const struct cpumask *)(msg->_data._cpumask));
 -*/  add_node(&msg->_data,&rlist_head);
 -
 -  display(&rlist_head);
 -
 -  printk("%s : global cpus online in kernel %d!!!", "handle_remote_proc_cpu_info_request",_cpu);
 -  /*for_each_global_online_cpu(i) {
 -    printk("%d %t", i);
 -  }
 -  printk("\n");
 -*/
 -  // Send response
 -  pcn_kmsg_send_long(msg->header.from_cpu,
 -              (struct pcn_kmsg_message*) (&response),
 -              sizeof(_remote_cpu_info_response_t) - sizeof(struct pcn_kmsg_hdr));
 -  
 -  pcn_kmsg_free_msg(inc_msg);
 -  return 0;
 +      struct cpuinfo_x86 *c = p;
 +      unsigned int cpu = 0;
 +      int i;
 +
 +#ifdef CONFIG_SMP
 +      cpu = c->cpu_index;
 +#endif
 +
 +
 +      res->_processor = cpu;
 +      strcpy(res->_vendor_id, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown");
 +      res->_cpu_family = c->x86;
 +      res->_model = c->x86_model;
 +      strcpy(res->_model_name, c->x86_model_id[0] ? c->x86_model_id : "unknown");
 +
 +      if (c->x86_mask || c->cpuid_level >= 0)
 +              res->_stepping = c->x86_mask;
 +      else
 +              res->_stepping = -1;
 +
 +      if (c->microcode)
 +              res->_microcode = c->microcode;
 +
 +      if (cpu_has(c, X86_FEATURE_TSC)) {
 +              unsigned int freq = cpufreq_quick_get(cpu);
 +
 +              if (!freq)
 +                      freq = cpu_khz;
 +              res->_cpu_freq = freq / 1000, (freq % 1000);
 +      }
 +
 +      /* Cache size */
 +      if (c->x86_cache_size >= 0)
 +              res->_cache_size = c->x86_cache_size;
 +
 +      strcpy(res->_fpu, "yes");
 +      strcpy(res->_fpu_exception, "yes");
 +      res->_cpuid_level = c->cpuid_level;
 +      strcpy(res->_wp, "yes");
 +
 +      strcpy(res->_flags, "");
 +      //strcpy(res->_flags,"flags\t\t:");
 +      for (i = 0; i < 32 * NCAPINTS; i++)
 +              if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
 +                      strcat(res->_flags, x86_cap_flags[i]);
 +
 +      res->_nbogomips = c->loops_per_jiffy / (500000 / HZ);
 +      //(c->loops_per_jiffy/(5000/HZ)) % 100);
 +
 +#ifdef CONFIG_X86_64
 +      if (c->x86_tlbsize > 0)
 +      res->_TLB_size= c->x86_tlbsize;
 +#endif
 +      res->_clflush_size = c->x86_clflush_size;
 +      res->_cache_alignment = c->x86_cache_alignment;
 +      res->_bits_physical = c->x86_phys_bits;
 +      res->_bits_virtual = c->x86_virt_bits;
 +
 +      strcpy(res->_power_management, "");
 +      for (i = 0; i < 32; i++) {
 +              if (c->x86_power & (1 << i)) {
 +                      if (i < ARRAY_SIZE(x86_power_flags) && x86_power_flags[i])
 +                              strcat(res->_flags, x86_power_flags[i][0] ? " " : "");
 +                      //  x86_power_flags[i]);
 +
 +                      //seq_printf(m, " [%d]", i);
 +              }
 +      }
-       res->_cpumask = kmalloc(
-                               sizeof(struct cpumask),
-                               GFP_KERNEL);
-       res->_cpumask =cpu_present_mask;
 +
 +      return 0;
  }
  
 -int send_cpu_info_request(int KernelId)
 -{
 +static int handle_remote_proc_cpu_info_response(
 +              struct pcn_kmsg_message* inc_msg) {
 +      _remote_cpu_info_response_t* msg = (_remote_cpu_info_response_t*) inc_msg;
  
 -      int res = 0;
 -      _remote_cpu_info_request_t* request = kmalloc(
 -                      sizeof(_remote_cpu_info_request_t),
 -                      GFP_KERNEL);
 -      // Build request
 -      request->header.type = PCN_KMSG_TYPE_REMOTE_PROC_CPUINFO_REQUEST;
 -      request->header.prio = PCN_KMSG_PRIO_NORMAL;
 -//    request->_data._cpumask = kmalloc( sizeof(struct cpumask), GFP_KERNEL);
 -      memcpy(&(request->_data._cpumask), cpu_present_mask, sizeof(cpu_present_mask));
 +      printk("%s: Entered remote cpu info response \n", "handle_remote_proc_cpu_info_response");
 +
 +      wait_cpu_list = 1;
 +      if (msg != NULL)
 +              cpu_result = msg;
 +      wake_up_interruptible(&wq_cpu);
 +      printk("%s: response ---- wait_cpu_list{%d} \n", "handle_remote_proc_cpu_info_response", wait_cpu_list);
 +
 +      pcn_kmsg_free_msg(inc_msg);
 +
 +      return 0;
 +}
+ extern int my_cpu;
 -request->_data._processor = my_cpu;
 +static int handle_remote_proc_cpu_info_request(struct pcn_kmsg_message* inc_msg) {
 +
-       printk("%s : cpus online in kernel %d!!!", "handle_remote_proc_cpu_info_request",_cpu);
 +      int i;
-                for_each_online_cpu(i) {
-                        printk("%d %t", i);
-                }
-                printk("\n");
++
 +      _remote_cpu_info_request_t* msg = (_remote_cpu_info_request_t*) inc_msg;
 +      _remote_cpu_info_response_t response;
 +
 +      printk("%s: Entered remote  cpu info request \n", "handle_remote_proc_cpu_info_request");
 +
 +      // Finish constructing response
 +      response.header.type = PCN_KMSG_TYPE_REMOTE_PROC_CPUINFO_RESPONSE;
 +      response.header.prio = PCN_KMSG_PRIO_NORMAL;
 +
-       cpumask_or(cpu_global_online_mask,cpu_global_online_mask,(const struct cpumask *)(msg->_data._cpumask));
++      fill_cpu_info(&response._data);
 +
-       add_node(&msg->_data,&rlist_head);
++      memcpy(&(response._data._cpumask), cpu_present_mask, sizeof(struct cpumask));
++      response._data._processor = my_cpu;
 +
-       fill_cpu_info(&response._data);
++      add_node(&msg->_data,&rlist_head);
 +
 +      display(&rlist_head);
 +
 +      printk("%s : global cpus online in kernel %d!!!", "handle_remote_proc_cpu_info_request",_cpu);
 +
 +                      for_each_global_online_cpu(i) {
 +                              printk("%d %t", i);
 +                               }
 +                      printk("\n");
  
        // Send response
 -      res = pcn_kmsg_send_long(KernelId, (struct pcn_kmsg_message*) (request),
 -                      sizeof(_remote_cpu_info_request_t) - sizeof(struct pcn_kmsg_hdr));
 -      return res;
 +      pcn_kmsg_send_long(msg->header.from_cpu,
 +                      (struct pcn_kmsg_message*) (&response),
 +                      sizeof(_remote_cpu_info_response_t) - sizeof(struct pcn_kmsg_hdr));
 +
 +      pcn_kmsg_free_msg(inc_msg);
 +
 +      return 0;
 +}
 +
 +int send_cpu_info_request(int KernelId) {
 +
-       int res = 0;
-       _remote_cpu_info_request_t* request = kmalloc(
-                       sizeof(_remote_cpu_info_request_t),
-                       GFP_KERNEL);
-       // Build request
-       request->header.type = PCN_KMSG_TYPE_REMOTE_PROC_CPUINFO_REQUEST;
-       request->header.prio = PCN_KMSG_PRIO_NORMAL;
-       request->_data._cpumask = kmalloc(
-                       sizeof(struct cpumask),
-                       GFP_KERNEL);
-       //request->_data._cpumask =cpu_present_mask;
++       int res = 0;
++       _remote_cpu_info_request_t* request = kmalloc(
++                         sizeof(_remote_cpu_info_request_t),
++                               GFP_KERNEL);
++       // Build request
++       request->header.type = PCN_KMSG_TYPE_REMOTE_PROC_CPUINFO_REQUEST;
++       request->header.prio = PCN_KMSG_PRIO_NORMAL;
++       //      request->_data._cpumask = kmalloc( sizeof(struct cpumask), GFP_KERNEL);
 +
++       fill_cpu_info(&request->_data);
 +
-       fill_cpu_info(&request->_data);
-       // Send response
-       res = pcn_kmsg_send_long(KernelId, (struct pcn_kmsg_message*) (request),
-                       sizeof(_remote_cpu_info_request_t) - sizeof(struct pcn_kmsg_hdr));
-       return res;
++       memcpy(&(request->_data._cpumask), cpu_present_mask, sizeof(cpu_present_mask));
++       request->_data._processor = my_cpu;
++
++
++        // Send response
++        res = pcn_kmsg_send_long(KernelId, (struct pcn_kmsg_message*) (request),
++                               sizeof(_remote_cpu_info_request_t) - sizeof(struct pcn_kmsg_hdr));
++
++        //kfree(request);
++
++        return res;
  }
  
  /*
   */
  int _init_RemoteCPUMask(void)
  {
 -  unsigned int i;
 -  printk("%s : cpus online in kernel %d!!!", "_init_RemoteCPUMask",_cpu);
  
 -  flush_cpu_info_var();
 -  int res = 0;
  
 -  int result = 0;
 -  int retval;
 +      unsigned int i;
 +      printk("%s : cpus online in kernel %d!!!", "_init_RemoteCPUMask",_cpu);
  
 -//should we add self?!
  
 -  for (i = 0; i < NR_CPUS; i++) { 
 -    flush_cpu_info_var();
 -    
 -    // Skip the current cpu
 -    //if (i == _cpu)
 -    if (cpumask_test_cpu(i, cpu_present_mask)) {
 -printk("%s: cpu already known %i continue.\n", __func__,  i);
 -      continue;
 -}
 -    printk("%s: checking cpu %d.\n", __func__, i);
 -    result = send_cpu_info_request(i);
 -    if (!result) {
 -      PRINTK("%s : go to sleep!!!!", __func__);
 -      wait_event_interruptible(wq_cpu, wait_cpu_list != -1);
 -      wait_cpu_list = -1;
 -
 -// TODO      
 -//      cpumask_or(cpu_global_online_mask,cpu_global_online_mask,(const struct cpumask *)(cpu_result->_data._cpumask));
 -
 -      add_node(&cpu_result->_data,&rlist_head);
 -      display(&rlist_head);
 -    }
 -  }
 -
 -  printk("%s : global cpus online in kernel %d!!!", "_init_RemoteCPUMask",_cpu);
 -/*  for_each_cpu(i,cpu_global_online_mask) {
 -    printk("------%d %t", i);
 -  }
 -  
 -  printk("\n");
 -*/  return 0;
 +      flush_cpu_info_var();
-               int res = 0;
-               int result = 0;
-               int retval;
-               for (i = 0; i < NR_CPUS; i++) {
-                       flush_cpu_info_var();
-                       // Skip the current cpu
-                       if (i == _cpu)
-                               continue;
-                       result = send_cpu_info_request(i);
-                       if (!result) {
-                               PRINTK("%s : go to sleep!!!!", __func__);
-                                                       wait_event_interruptible(wq_cpu, wait_cpu_list != -1);
-                                                       wait_cpu_list = -1;
-                                                       cpumask_or(cpu_global_online_mask,cpu_global_online_mask,(const struct cpumask *)(cpu_result->_data._cpumask));
-                                                       add_node(&cpu_result->_data,&rlist_head);
-                                                       display(&rlist_head);
-                       }
-               }
++      int res = 0;
++      int result = 0;
++      int retval;
++
++        for (i = 0; i < NR_CPUS; i++) {
++                flush_cpu_info_var();
++
++           // Skip the current cpu
++           //if (i == _cpu)
++           if (cpumask_test_cpu(i, cpu_present_mask)) {
++       printk("%s: cpu already known %i continue.\n", __func__,  i);
++             continue;
++       }
++           printk("%s: checking cpu %d.\n", __func__, i);
++           result = send_cpu_info_request(i);
++           if (!result) {
++             PRINTK("%s : go to sleep!!!!", __func__);
++             wait_event_interruptible(wq_cpu, wait_cpu_list != -1);
++             wait_cpu_list = -1;
++
++       // TODO
++       //      cpumask_or(cpu_global_online_mask,cpu_global_online_mask,(const struct cpumask *)(cpu_result->_data._cpumask));
++
++             add_node(&cpu_result->_data,&rlist_head);
++             display(&rlist_head);
++           }
++         }
 +
 +              printk("%s : global cpus online in kernel %d!!!", "_init_RemoteCPUMask",_cpu);
 +
-               for_each_cpu(i,cpu_global_online_mask) {
-                       printk("------%d %t", i);
-                        }
-               printk("\n");
 +      return 0;
  }
  
  
  static int __init cpu_info_handler_init(void)
  {
 -    _cpu = smp_processor_id();
 +
-     _cpu = smp_processor_id();
++#ifndef SUPPORT_FOR_CLUSTERING
++   _cpu = smp_processor_id();
++#else
++   _cpu = my_cpu;
++#endif
  
      INIT_LIST_HEAD(&rlist_head);
  
-   /*  ptrlist = &rlist;
-     ptrlist->_data=NULL;
-     INIT_LIST_HEAD(&ptrlist->cpu_list);*/
 +    INIT_LIST_HEAD(&pfn_list_head);
 +
 +
        pcn_kmsg_register_callback(PCN_KMSG_TYPE_REMOTE_PROC_CPUINFO_REQUEST,
                                                handle_remote_proc_cpu_info_request);
        pcn_kmsg_register_callback(PCN_KMSG_TYPE_REMOTE_PROC_CPUINFO_RESPONSE,
diff --cc kernel/kmod.c
Simple merge
  #include <linux/pcn_kmsg.h> // Messaging
  #include <linux/pcn_perf.h> // performance measurement
  #include <linux/string.h>
- #include <linux/popcorn.h>
+ #include <linux/unistd.h>
+ #include <linux/tsacct_kern.h>
 -#include <linux/popcorn.h>
++#include <linux/popcorn_cpuinfo.h>
+ #include <linux/syscalls.h>
+ #include <linux/kernel.h>
+ #include <linux/proc_fs.h>
  
  #include <asm/pgtable.h>
  #include <asm/atomic.h>
@@@ -67,7 -65,11 +71,12 @@@ unsigned long get_percpu_old_rsp(void)
  // migrate in response to a mapping query.
  #define MAX_MAPPINGS 1
  
 +extern sys_topen(const char __user * filename, int flags, int mode, int fd);
+ // Whether or not to expose a proc entry that we can publish
+ // information to.
+ //#undef PROCESS_SERVER_HOST_PROC_ENTRY
+ #define PROCESS_SERVER_HOST_PROC_ENTRY
  /**
   * Use the preprocessor to turn off printk.
   */
@@@ -252,14 -256,15 +263,18 @@@ static void perf_init(void) 
  #define PERF_MEASURE_STOP(x, y, z)
  #endif
  
 +
 +static DECLARE_WAIT_QUEUE_HEAD( countq);
 +
  /**
-  * Constants
+  * Enums
   */
- #define RETURN_DISPOSITION_EXIT 0
- #define RETURN_DISPOSITION_MIGRATE 1
+ typedef enum _lamport_barrier_state {
+     LAMPORT_ENTRY_OWNED,
+     LAMPORT_ENTRY_OFF_LIMITS,
+     LAMPORT_ENTRY_CONTENDED
+ } lamport_barrier_state_t;
  
  /**
   * Library
@@@ -1358,55 -1734,164 +1758,164 @@@ static int is_vaddr_mapped(struct mm_st
  }
  
  /**
-  *  @brief Find the bounds of a physically consecutive mapped region.
-  *  The region must be contained within the specified VMA.
-  *
-  *  Hypothetical page table mappings for a given VMA:
-  *
-  *  *********************************
-  *  *    Vaddr      *   Paddr       *
-  *  *********************************
-  *  * 0x10000000    * 0x12341000    *
-  *  *********************************
-  *  * 0x10001000    * 0x12342000    *
-  *  *********************************
-  *  * 0x10002000    * 0x12343000    *
-  *  *********************************
-  *  * 0x10003000    * 0x43214000    *
-  *  *********************************
-  *  
-  *  This function, given a vaddr of 12342xxx will return:
-  *  *vaddr_mapping_start = 0x10000000
-  *  *paddr_mapping_start = 0x12341000
-  *  *paddr_mapping_sz    = 0x3000
-  *
-  *  Notice 0x10003000 and above is not included in the returned region, as
-  *  its paddr is not consecutive with the previous mappings.
-  *
-  */
- int find_consecutive_physically_mapped_region(struct mm_struct* mm,
-                                               struct vm_area_struct* vma,
-                                               unsigned long vaddr,
-                                               unsigned long* vaddr_mapping_start,
-                                               unsigned long* paddr_mapping_start,
-                                               size_t* paddr_mapping_sz) {
-     unsigned long paddr_curr = NULL;
-     unsigned long vaddr_curr = vaddr;
-     unsigned long vaddr_next = vaddr;
-     unsigned long paddr_next = NULL;
-     unsigned long paddr_start = NULL;
-     size_t sz = 0;
+  * @brief Determine if the specified vma can have cow mapings.
+  * @return 1 = yes, 0 = no.
 - */
 +
-     
-     // Initializes paddr_curr
-     if(get_physical_address(mm,vaddr_curr,&paddr_curr) < 0) {
-         return -1;
+ static int is_maybe_cow(struct vm_area_struct* vma) {
+     if((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) {
+         // Not a cow vma
+         return 0;
      }
-     paddr_start = paddr_curr;
-     *vaddr_mapping_start = vaddr_curr;
-     *paddr_mapping_start = paddr_curr;
-     
-     sz = PAGE_SIZE;
+     if(!(vma->vm_flags & VM_WRITE)) {
+         return 0;
+     }
+     return 1;
 -}
++}*/
+ /**
+  * @brief Break the COW page that contains "address", iff that page
+  * is a COW page.
+  * @return 1 = handled, 0 = not handled.
+  * @prerequisite Caller must grab mm->mmap_sem
+  */
+ static int break_cow(struct mm_struct *mm, struct vm_area_struct* vma, unsigned long address) {
+     pgd_t *pgd = NULL;
+     pud_t *pud = NULL;
+     pmd_t *pmd = NULL;
+     pte_t *ptep = NULL;
+     pte_t pte;
+     spinlock_t* ptl;
+ #ifdef PROCESS_SERVER_HOST_PROC_ENTRY
+     unsigned long long end_time = 0;
+     unsigned long long total_time = 0;
+     unsigned long long start_time = native_read_tsc();
+ #endif
+     //PSPRINTK("%s: entered\n",__func__);
+     // if it's not a cow mapping, return.
+     if((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) {
+         goto not_handled;
+     }
+     // if it's not writable in vm_flags, return.
+     if(!(vma->vm_flags & VM_WRITE)) {
+         goto not_handled;
+     }
+     pgd = pgd_offset(mm, address);
+     if(!pgd_present(*pgd)) {
+         goto not_handled_unlock;
+     }
+     pud = pud_offset(pgd,address);
+     if(!pud_present(*pud)) {
+         goto not_handled_unlock;
+     }
+     pmd = pmd_offset(pud,address);
+     if(!pmd_present(*pmd)) {
+         goto not_handled_unlock;
+     }
+     ptep = pte_offset_map(pmd,address);
+     if(!ptep || !pte_present(*ptep) || pte_none(*ptep)) {
+         pte_unmap(ptep);
+         goto not_handled_unlock;
+     }
+     pte = *ptep;
+     if(pte_write(pte)) {
+         goto not_handled_unlock;
+     }
+     
+     // break the cow!
+     ptl = pte_lockptr(mm,pmd);
+     PS_SPIN_LOCK(ptl);
+    
+     PSPRINTK("%s: proceeding on address %lx\n",__func__,address);
+     do_wp_page(mm,vma,address,ptep,pmd,ptl,pte);
+     // NOTE:
+     // Do not call pte_unmap_unlock(ptep,ptl), since do_wp_page does that!
+     
+     goto handled;
+ not_handled_unlock:
+ not_handled:
+ #ifdef PROCESS_SERVER_HOST_PROC_ENTRY
+     end_time = native_read_tsc();
+     total_time = end_time - start_time;
+     PS_PROC_DATA_TRACK(PS_PROC_DATA_BREAK_COW_TIME,total_time);
+ #endif
+     return 0;
+ handled:
+ #ifdef PROCESS_SERVER_HOST_PROC_ENTRY
+     end_time = native_read_tsc();
+     total_time = end_time - start_time;
+     PS_PROC_DATA_TRACK(PS_PROC_DATA_BREAK_COW_TIME,total_time);
+ #endif
+     return 1;
+ }
+ /**
+  *  @brief Find the bounds of a physically consecutive mapped region.
+  *  The region must be contained within the specified VMA.
+  *
+  *  Hypothetical page table mappings for a given VMA:
+  *
+  *  *********************************
+  *  *    Vaddr      *   Paddr       *
+  *  *********************************
+  *  * 0x10000000    * 0x12341000    *
+  *  *********************************
+  *  * 0x10001000    * 0x12342000    *
+  *  *********************************
+  *  * 0x10002000    * 0x12343000    *
+  *  *********************************
+  *  * 0x10003000    * 0x43214000    *
+  *  *********************************
+  *  
+  *  This function, given a vaddr of 12342xxx will return:
+  *  *vaddr_mapping_start = 0x10000000
+  *  *paddr_mapping_start = 0x12341000
+  *  *paddr_mapping_sz    = 0x3000
+  *
+  *  Notice 0x10003000 and above is not included in the returned region, as
+  *  its paddr is not consecutive with the previous mappings.
+  *
+  */
+ int find_consecutive_physically_mapped_region(struct mm_struct* mm,
+                                               struct vm_area_struct* vma,
+                                               unsigned long vaddr,
+                                               unsigned long* vaddr_mapping_start,
+                                               unsigned long* paddr_mapping_start,
+                                               size_t* paddr_mapping_sz,
+                                               int br_cow) {
+     unsigned long paddr_curr = NULL;
+     unsigned long vaddr_curr = vaddr;
+     unsigned long vaddr_next = vaddr;
+     unsigned long paddr_next = NULL;
+     unsigned long paddr_start = NULL;
+     size_t sz = 0;
+     
+     // Initializes paddr_curr
+     if(br_cow) {
+         break_cow(mm,vma,vaddr_curr);
+     }
+     if(get_physical_address(mm,vaddr_curr,&paddr_curr) < 0) {
+         return -1;
+     }
+     paddr_start = paddr_curr;
+     *vaddr_mapping_start = vaddr_curr;
+     *paddr_mapping_start = paddr_curr;
+     
+     sz = PAGE_SIZE;
  
      // seek up in memory
      // This stretches (sz) only while leaving
@@@ -2689,10 -3332,10 +3356,10 @@@ static int count_remote_thread_members(
      // the list does not include the current processor group descirptor (TODO)
      struct list_head *iter;
      _remote_cpu_info_list_t *objPtr;
- extern struct list_head rlist_head;
    extern struct list_head rlist_head;
      list_for_each(iter, &rlist_head) {
          objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
 -        i = objPtr->_data._processor;
 +        i = objPtr->_data._processor;*/
  #endif
          // Send the request to this cpu.
          s = pcn_kmsg_send(i,(struct pcn_kmsg_message*)(&request));
@@@ -2853,107 -3507,18 +3531,34 @@@ found
      PERF_MEASURE_STOP(&perf_process_tgroup_closed_item," ",perf);
  }
  
 +/**
 + * @brief Determine if the specified vma can have cow mapings.
 + * @return 1 = yes, 0 = no.
 + */
 +static int is_maybe_cow(struct vm_area_struct* vma) {
 +    if((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) {
 +        // Not a cow vma
 +        return 0;
 +    }
 +
 +    if(!(vma->vm_flags & VM_WRITE)) {
 +        return 0;
 +    }
 +
 +    return 1;
 +}
  
  /**
-  * @brief Break the COW page that contains "address", iff that page
-  * is a COW page.
-  * @return 1 = handled, 0 = not handled.
-  * @prerequisite Caller must grab mm->mmap_sem
-  */
- static int break_cow(struct mm_struct *mm, struct vm_area_struct* vma, unsigned long address) {
-     pgd_t *pgd = NULL;
-     pud_t *pud = NULL;
-     pmd_t *pmd = NULL;
-     pte_t *ptep = NULL;
-     pte_t pte;
-     spinlock_t* ptl;
-     //PSPRINTK("%s: entered\n",__func__);
-     // if it's not a cow mapping, return.
-     if((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) {
-         goto not_handled;
-     }
-     // if it's not writable in vm_flags, return.
-     if(!(vma->vm_flags & VM_WRITE)) {
-         goto not_handled;
-     }
-     pgd = pgd_offset(mm, address);
-     if(!pgd_present(*pgd)) {
-         goto not_handled_unlock;
-     }
-     pud = pud_offset(pgd,address);
-     if(!pud_present(*pud)) {
-         goto not_handled_unlock;
-     }
-     pmd = pmd_offset(pud,address);
-     if(!pmd_present(*pmd)) {
-         goto not_handled_unlock;
-     }
-     ptep = pte_offset_map(pmd,address);
-     if(!ptep || !pte_present(*ptep) || pte_none(*ptep)) {
-         pte_unmap(ptep);
-         goto not_handled_unlock;
-     }
-     pte = *ptep;
-     if(pte_write(pte)) {
-         goto not_handled_unlock;
-     }
-     
-     // break the cow!
-     ptl = pte_lockptr(mm,pmd);
-     PS_SPIN_LOCK(ptl);
-    
-     PSPRINTK("%s: proceeding\n",__func__);
-     do_wp_page(mm,vma,address,ptep,pmd,ptl,pte);
-     // NOTE:
-     // Do not call pte_unmap_unlock(ptep,ptl), since do_wp_page does that!
-     
-     goto handled;
- not_handled_unlock:
- not_handled:
-     return 0;
- handled:
-     return 1;
- }
- /**
-  * @brief Process a request made by a remote CPU for a mapping.  This function
-  * will search for mm's for the specified distributed thread group, and if found,
-  * will search that mm for entries that contain the address that was asked for.
-  * Prefetch is implemented in this function, so not only will the page that
-  * is asked for be communicated, but the entire contiguous range of virtual to
-  * physical addresses that the specified address lives in will be communicated.
-  * Other contiguous regions may also be communicated if they exist.  This is
-  * prefetch.
-  *
-  * <MEASURED perf_process_mapping_request>
+  * @brief Process a request made by a remote CPU for a mapping.  This function
+  * will search for mm's for the specified distributed thread group, and if found,
+  * will search that mm for entries that contain the address that was asked for.
+  * Prefetch is implemented in this function, so not only will the page that
+  * is asked for be communicated, but the entire contiguous range of virtual to
+  * physical addresses that the specified address lives in will be communicated.
+  * Other contiguous regions may also be communicated if they exist.  This is
+  * prefetch.
+  *
+  * <MEASURED perf_process_mapping_request>
   */
  void process_mapping_request(struct work_struct* work) {
      mapping_request_work_t* w = (mapping_request_work_t*) work;
@@@ -3302,7 -3925,11 +3965,12 @@@ void process_group_exit_item(struct wor
      group_exit_work_t* w = (group_exit_work_t*) work;
      struct task_struct *task = NULL;
      struct task_struct *g;
 +    unsigned long flags;
+ #ifdef PROCESS_SERVER_HOST_PROC_ENTRY
+     unsigned long long end_time;
+     unsigned long long total_time;
+     unsigned long long start_time = native_read_tsc();
+ #endif
  
      //int perf = PERF_MEASURE_START(&perf_process_group_exit_item);
      PSPRINTK("%s: entered\n",__func__);
@@@ -3480,29 -4124,18 +4174,33 @@@ void process_mprotect_item(struct work_
          if (task->tgroup_home_cpu == tgroup_home_cpu &&
              task->tgroup_home_id  == tgroup_home_id &&
              !(task->flags & PF_EXITING)) {
 -
 -            // Take note of the fact that an mm exists on the remote kernel
 +           /* 
 +            if (task->mm)
 +                // do_mprotect
 +                do_mprotect(task, start, len, prot,0);
 +//            task_unlock(task); //TODO consider to use this
 +          else
 +              printk("%s: task->mm task:%p mm:%p\n",
 +                      __func__, task, task->mm);
 +            */
 +            // doing mprotect here causes errors, I do not know why
 +            // for now I will unmap the region instead.
 +            //do_mprotect(task,start,len,prot,0);
 +            
 +           if (task && task->mm ) {
 +                   mm_to_munmap = task->mm;
 +           }
 +          // Take note of the fact that an mm exists on the remote kernel
              set_cpu_has_known_tgroup_mm(task,w->from_cpu);
  
-             // then quit
+             if(task->mm) {
+                 mm_to_munmap = task->mm;
+             }
+             else
+                 printk("%s: pirla\n",__func__);
+             
              goto done;
          }
- //    task_unlock(task); // TODO consider to use this
      } while_each_thread(g,task);
  done:
      read_unlock(&tasklist_lock);
@@@ -4954,22 -6081,10 +6166,26 @@@ int process_server_import_address_space
      current->thread.es = clone_data->thread_es;
      current->thread.ds = clone_data->thread_ds;
      current->thread.usersp = clone_data->thread_usersp;
+     current->thread.fsindex = clone_data->thread_fsindex;
+     current->thread.fs = clone_data->thread_fs;
+     current->thread.gs = clone_data->thread_gs;    
+     current->thread.gsindex = clone_data->thread_gsindex;
 +   
 +
 +    //mklinux_akshay
 +    current->origin_pid = clone_data->origin_pid;
 +    sigorsets(&current->blocked,&current->blocked,&clone_data->remote_blocked) ;
 +    sigorsets(&current->real_blocked,&current->real_blocked,&clone_data->remote_real_blocked);
 +    sigorsets(&current->saved_sigmask,&current->saved_sigmask,&clone_data->remote_saved_sigmask);
 +    current->pending = clone_data->remote_pending;
 +    current->sas_ss_sp = clone_data->sas_ss_sp;
 +    current->sas_ss_size = clone_data->sas_ss_size;
 +
 +    printk(KERN_ALERT "origin pid {%d}-{%d} \n",current->origin_pid,clone_data->origin_pid);
 +
 +    int cnt=0;
 +     for(cnt=0;cnt<_NSIG;cnt++)
 +       current->sighand->action[cnt] = clone_data->action[cnt];
  
      // Set output variables.
      *sp = clone_data->thread_usersp;
@@@ -5078,10 -6268,10 +6369,10 @@@ int process_server_do_group_exit(void) 
      // the list does not include the current processor group descirptor (TODO)
      struct list_head *iter;
      _remote_cpu_info_list_t *objPtr;
- extern struct list_head rlist_head;
    extern struct list_head rlist_head;
      list_for_each(iter, &rlist_head) {
          objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
 -        i = objPtr->_data._processor;
 +        i = objPtr->_data._processor;*/
  #endif
        // Send
          pcn_kmsg_send(i,(struct pcn_kmsg_message*)(&msg));
@@@ -5210,15 -6417,15 +6518,15 @@@ finished_membership_search
          // the list does not include the current processor group descirptor (TODO)
          struct list_head *iter;
          _remote_cpu_info_list_t *objPtr;
-       struct cpumask *pcpum =0;
- extern struct list_head rlist_head;
+         struct cpumask *pcpum =0;
        extern struct list_head rlist_head;
          list_for_each(iter, &rlist_head) {
-           objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
-           i = objPtr->_data._processor;
-           pcpum  = &(objPtr->_data._cpumask);
-         if ( bitmap_intersects(cpumask_bits(pcpum),  
+         objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
+         i = objPtr->_data._processor;
+         pcpum  = &(objPtr->_data._cpumask);
+         if ( bitmap_intersects(cpumask_bits(pcpum),  
                                &(current->previous_cpus),
 -                              (sizeof(unsigned long) *8)) )
 +                              (sizeof(unsigned long) *8)) )*/
  #endif
              pcn_kmsg_send(i, (struct pcn_kmsg_message*)&msg);
          }
@@@ -5723,16 -7021,19 +7122,19 @@@ int process_server_pull_remote_mappings
      for(i = 0; i < NR_CPUS; i++) {
          // Skip the current cpu
          if(i == _cpu) continue;
/*#else
-     // the list does not include the current processor group descirptor (TODO)
+ #else
+         // the list does not include the current processor group descirptor (TODO)
      struct list_head *iter;
      _remote_cpu_info_list_t *objPtr;
- extern struct list_head rlist_head;
    extern struct list_head rlist_head;
      list_for_each(iter, &rlist_head) { 
          objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
 -        i = objPtr->_data._processor;
 +        i = objPtr->_data._processor;*/
  #endif
          // Send the request to this cpu.
+ #ifdef PROCESS_SERVER_HOST_PROC_ENTRY
+         request.send_time = native_read_tsc();
+ #endif
          s = pcn_kmsg_send(i,(struct pcn_kmsg_message*)(&request));
          if(!s) {
              // A successful send operation, increase the number
@@@ -6111,17 -7473,10 +7591,19 @@@ static int do_migration_to_new_cpu(stru
      task->represents_remote = 1;
      task->t_distributed = 1;
  
 +    /*mklinux_akshay*/
 +    if(task->prev_pid==-1)
 +      task->origin_pid=task->pid;
 +    else
 +      task->origin_pid=task->origin_pid;
 +
 +   struct task_struct *par = task->parent;
 +
 +
      // Book keeping for distributed threads.
      task->tgroup_distributed = 1;
+     read_lock(&tasklist_lock);
      do_each_thread(g,tgroup_iterator) {
          if(tgroup_iterator != task) {
              if(tgroup_iterator->tgid == task->tgid) {
      request->normal_prio = task->normal_prio;
      request->rt_priority = task->rt_priority;
      request->sched_class = task->policy;
+     request->personality = task->personality;
+     
 +
 +    /*mklinux_akshay*/
 +    if (task->prev_pid == -1)
 +      request->origin_pid = task->pid;
 +    else
 +      request->origin_pid = task->origin_pid;
 +    request->remote_blocked = task->blocked;
 +    request->remote_real_blocked = task->real_blocked;
 +    request->remote_saved_sigmask = task->saved_sigmask;
 +    request->remote_pending = task->pending;
 +    request->sas_ss_sp = task->sas_ss_sp;
 +    request->sas_ss_size = task->sas_ss_size;
 +    int cnt = 0;
 +    for (cnt = 0; cnt < _NSIG; cnt++)
 +      request->action[cnt] = task->sighand->action[cnt];
 +
      // struct thread_struct -------------------------------------------------------
      // have a look at: copy_thread() arch/x86/kernel/process_64.c 
      // have a look at: struct thread_struct arch/x86/include/asm/processor.h
diff --cc kernel/sched.c
@@@ -74,7 -74,7 +74,7 @@@
  #include <linux/init_task.h>
  #include <linux/process_server.h>
  #ifdef SUPPORT_FOR_CLUSTERING
- //#include <linux/popcorn.h>
 -#include <linux/popcorn.h>
++#include <linux/popcorn_cpuinfo.h>
  #endif
  
  #include <asm/tlb.h>
@@@ -5616,25 -5606,25 +5616,25 @@@ if ( !cpumask_intersects(in_mask, cpu_p
          objPtr = list_entry(iter, _remote_cpu_info_list_t, cpu_list_member);
          i = objPtr->_data._processor;
          pcpum = &(objPtr->_data._cpumask);
-         if ( cpumask_intersects(in_mask, pcpum) ) {*/
+         if ( cpumask_intersects(in_mask, pcpum) ) {
  #endif
-         // TODO ask the global scheduler if there are multiple affinities    
-       // do the migration
+             // TODO ask the global scheduler if there are multiple affinities    
+               // do the migration
              get_task_struct(p);
              rcu_read_unlock();
 -            process_server_do_migration(p,i);
 +            ret =process_server_do_migration(p,i);
              put_task_struct(p);
              put_online_cpus();
 -
 -            do {
 -                spin = 0;
 -                schedule(); // this will save us from death
 -                if(current->return_disposition == RETURN_DISPOSITION_NONE) {
 -                    __set_task_state(current,TASK_UNINTERRUPTIBLE);
 -                    spin = 1;
 -                }
 -            } while (spin);
 -
 +            printk(KERN_ALERT"sched_setaffinity tsk{%d} state{%d} on run q{%d} RET{%d} current{%s} \n",p->pid,p->state,p->on_rq,ret,current->comm);
 +            schedule(); // this will save us from death
 +      /*      do {
 +                   spin = 0;
 +                   schedule(); // this will save us from death
 +                   if(current->return_disposition == RETURN_DISPOSITION_NONE) {
 +                        __set_task_state(current,TASK_UNINTERRUPTIBLE);
 +                         spin = 1;
 +                     }
 +                } while (spin);*/
              // We are here because of either the task is exiting,
              // or because the task is migrating back.  Let's handle
              // that now.  If we're migrating back, this function