Scheduler is now selectable in make menuconfig in kernel hacking.
[projects/modsched/linux.git] / kernel / sched / modsched / framework / modules / round_robin.c
1
2
3 #include "fw.h"
4 #include "os.h"
5 #include "fw_comm.h"
6 #include "fw_modules.h"
7
8
9
10
11 void rr_print_ready_list(void);
12 void rr_schedule(void);
13 void rr_task_admit(fw_task_t *task);
14 void rr_task_release(fw_task_t *task);
15 void rr_task_wait(fw_task_t *task);
16 void rr_task_event(fw_task_t *task);
17 void rr_init(void);
18
19 int module_uid;
20
21 int idle = 0;
22
23 FW_LIST_HEAD(rr_processes);
24 FW_SPINLOCK_CREATE(rr_lock);
25
26 struct rr_proc_list_elem {
27         fw_list_head_t list;
28         fw_task_t *task;
29 };
30
31 struct rr_proc_list_elem *rr_current_task = NULL;
32
33 struct prpc *current_process;
34
35 char rr_buf[120];
36
37
38
39 void rr_print_ready_list(void)
40 {
41         struct rr_proc_list_elem *tmp;
42
43         fw_printf("Task List:\n");
44
45         fw_list_for_each_entry(tmp, &rr_processes, list){
46                 fw_task_info(tmp->task, rr_buf, 120);
47                 fw_printf("\t%s\n", rr_buf);
48         }
49 }
50
51 void rr_schedule(void)
52 {
53         struct rr_proc_list_elem *next_task;
54
55         fw_printf("invoked schedule\n");
56         rr_print_ready_list();
57         //printf("invoked schedule! curent task state %d\n", fw_current_task(0)->state);
58
59         os_spinlock(&rr_lock);
60
61         if(fw_current_task(0)->state == RUNNING)
62                 fw_list_add_tail(&fw_current_task(0)->list, &rr_processes);
63
64         os_spinunlock(&rr_lock);
65
66         if(fw_list_empty(&rr_processes))
67                 fw_printf("start waiting\n");
68
69         while(fw_list_empty(&rr_processes)){
70 //              IRQ_ENABLE;
71 //              CPU_IDLE;
72 //              MEMORY_BARRIER;
73 //              IRQ_DISABLE;
74
75         }
76
77         fw_printf("finished waiting\n");
78
79         os_spinlock(&rr_lock);
80
81         fw_printf("ready tasks: %d\n", fw_list_length(&rr_processes));
82
83         next_task = fw_list_pop(&rr_processes, struct rr_proc_list_elem, list);
84
85         current_process = next_task->task->real_task;
86
87 //      os_spinlock(&rr_lock);
88         os_spinunlock(&rr_lock);
89
90         fw_printf("dispatching %s\n", os_task_info(next_task->task));
91
92         fw_dispatch(next_task->task);
93 }
94
95 void rr_task_admit(fw_task_t *task)
96 {
97         struct rr_proc_list_elem *new_task = fw_malloc((fw_size_t) sizeof(struct rr_proc_list_elem));
98
99
100         fw_printf("RR registered creation of %s (%p)\n", os_task_info(task), task);
101
102         new_task->task = task;
103
104         os_spinlock(&rr_lock);
105
106
107         fw_list_add_tail(&new_task->list, &rr_processes);
108         rr_current_task = new_task;
109
110         os_spinunlock(&rr_lock);
111 }
112
113 void rr_task_release(fw_task_t *task)
114 {
115         struct rr_proc_list_elem *tmp;
116
117         fw_printf("RR registered deletion of %s (%p)\n", os_task_info(task), task);
118
119         os_spinlock(&rr_lock);
120
121         fw_list_for_each_entry(tmp, &rr_processes, list)
122                 if(tmp->task == task)
123                 {
124                         fw_list_del(&tmp->list);
125                         fw_free(tmp);
126                         break;
127                 }
128
129         fw_printf("RR removed %s (%p)\n", os_task_info(task), task);
130
131         os_spinunlock(&rr_lock);
132 }
133
134 void rr_task_wait(fw_task_t *task)
135 {
136         struct rr_proc_list_elem *tmp;
137
138         fw_printf("RR registered suspension of %s (%p)\n", os_task_info(task), task);
139
140         os_spinlock(&rr_lock);
141
142         fw_printf("%d tasks available\n", fw_list_length(&rr_processes));
143
144         task->state = BLOCKED;
145
146 //      current_process = NULL;
147 //      os_set_current_task(NULL);
148
149         fw_list_for_each_entry(tmp, &rr_processes, list) {
150                 //fw_printf("considering %s (%p) for removal\n", os_task_info(tmp->task), tmp->task);
151
152                 if(tmp->task == task)
153                 {
154                         fw_list_del(&tmp->list);
155                         fw_free(tmp);
156                         break;
157                 }
158         }
159
160         fw_printf("%d tasks remaining\n", fw_list_length(&rr_processes));
161
162         os_spinunlock(&rr_lock);
163
164 }
165
166 void rr_task_event(fw_task_t *task)
167 {
168         struct rr_proc_list_elem *new_task = fw_malloc((fw_size_t) sizeof(struct rr_proc_list_elem));
169         new_task->task = task;
170
171         os_spinlock(&rr_lock);
172
173         task->state = READY;
174         fw_list_add_tail(&new_task->list, &rr_processes);
175
176         os_spinunlock(&rr_lock);
177         fw_printf("task submitted\n");
178 }
179
180 void rr_init(){
181         module_uid = fw_register_module();
182
183         fw_subscribe(ADMIT, &rr_task_admit);
184         fw_subscribe(RELEASE, &rr_task_release);
185         fw_subscribe(EVENT, &rr_task_event);
186         fw_subscribe(EVENT_WAIT, &rr_task_wait);
187         fw_subscribe(SCHEDULE, &rr_schedule);
188 }