00001
00025 #ifndef _XENO_NUCLEUS_SCHED_H
00026 #define _XENO_NUCLEUS_SCHED_H
00027
00031 #include <nucleus/thread.h>
00032
00033 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00034
00035 #include <nucleus/schedqueue.h>
00036 #include <nucleus/sched-tp.h>
00037 #include <nucleus/sched-sporadic.h>
00038
00039 #ifndef CONFIG_XENO_OPT_DEBUG_NUCLEUS
00040 #define CONFIG_XENO_OPT_DEBUG_NUCLEUS 0
00041 #endif
00042
00043
00044 #define XNKCOUT 0x80000000
00045 #define XNHTICK 0x40000000
00046 #define XNRPICK 0x20000000
00047 #define XNINTCK 0x10000000
00048 #define XNINIRQ 0x08000000
00049 #define XNSWLOCK 0x04000000
00050 #define XNRESCHED 0x02000000
00051 #define XNHDEFER 0x01000000
00052
00053 struct xnsched_rt {
00054 xnsched_queue_t runnable;
00055 #ifdef CONFIG_XENO_OPT_PRIOCPL
00056 xnsched_queue_t relaxed;
00057 #endif
00058 };
00059
00064 typedef struct xnsched {
00065
00066 xnflags_t status;
00067 int cpu;
00068 struct xnthread *curr;
00069 xnarch_cpumask_t resched;
00071 struct xnsched_rt rt;
00072 #ifdef CONFIG_XENO_OPT_SCHED_TP
00073 struct xnsched_tp tp;
00074 #endif
00075 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00076 struct xnsched_sporadic pss;
00077 #endif
00078
00079 xntimerq_t timerqueue;
00080 volatile unsigned inesting;
00081 struct xntimer htimer;
00082 struct xnthread *zombie;
00083 struct xnthread rootcb;
00085 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00086 struct xnthread *last;
00087 #endif
00088
00089 #ifdef CONFIG_XENO_HW_FPU
00090 struct xnthread *fpuholder;
00091 #endif
00092
00093 #ifdef CONFIG_XENO_OPT_WATCHDOG
00094 struct xntimer wdtimer;
00095 int wdcount;
00096 #endif
00097
00098 #ifdef CONFIG_XENO_OPT_STATS
00099 xnticks_t last_account_switch;
00100 xnstat_exectime_t *current_account;
00101 #endif
00102
00103 #ifdef CONFIG_XENO_OPT_PRIOCPL
00104 DECLARE_XNLOCK(rpilock);
00105 #endif
00106
00107 #ifdef CONFIG_XENO_OPT_PERVASIVE
00108 struct task_struct *gatekeeper;
00109 wait_queue_head_t gkwaitq;
00110 struct linux_semaphore gksync;
00111 struct xnthread *gktarget;
00112 #endif
00113
00114 } xnsched_t;
00115
00116 union xnsched_policy_param;
00117
00118 struct xnsched_class {
00119
00120 void (*sched_init)(struct xnsched *sched);
00121 void (*sched_enqueue)(struct xnthread *thread);
00122 void (*sched_dequeue)(struct xnthread *thread);
00123 void (*sched_requeue)(struct xnthread *thread);
00124 struct xnthread *(*sched_pick)(struct xnsched *sched);
00125 void (*sched_tick)(struct xnthread *curr);
00126 void (*sched_rotate)(struct xnsched *sched,
00127 const union xnsched_policy_param *p);
00128 void (*sched_migrate)(struct xnthread *thread,
00129 struct xnsched *sched);
00130 void (*sched_setparam)(struct xnthread *thread,
00131 const union xnsched_policy_param *p);
00132 void (*sched_getparam)(struct xnthread *thread,
00133 union xnsched_policy_param *p);
00134 void (*sched_trackprio)(struct xnthread *thread,
00135 const union xnsched_policy_param *p);
00136 int (*sched_declare)(struct xnthread *thread,
00137 const union xnsched_policy_param *p);
00138 void (*sched_forget)(struct xnthread *thread);
00139 #ifdef CONFIG_XENO_OPT_PRIOCPL
00140 struct xnthread *(*sched_push_rpi)(struct xnsched *sched,
00141 struct xnthread *thread);
00142 void (*sched_pop_rpi)(struct xnthread *thread);
00143 struct xnthread *(*sched_peek_rpi)(struct xnsched *sched);
00144 void (*sched_suspend_rpi)(struct xnthread *thread);
00145 void (*sched_resume_rpi)(struct xnthread *thread);
00146 #endif
00147 #ifdef CONFIG_PROC_FS
00148 void (*sched_init_proc)(struct proc_dir_entry *root);
00149 void (*sched_cleanup_proc)(struct proc_dir_entry *root);
00150 struct proc_dir_entry *proc;
00151 #endif
00152 int nthreads;
00153 struct xnsched_class *next;
00154 int weight;
00155 const char *name;
00156 };
00157
00158 #define XNSCHED_CLASS_MAX_THREADS 32768
00159 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_MAX_THREADS)
00160
00161
00162 #define XNSCHED_RUNPRIO 0x80000000
00163
00164 #ifdef CONFIG_SMP
00165 #define xnsched_cpu(__sched__) ((__sched__)->cpu)
00166 #else
00167 #define xnsched_cpu(__sched__) ({ (void)__sched__; 0; })
00168 #endif
00169
00170
00171 static inline int xnsched_resched_p(struct xnsched *sched)
00172 {
00173 return !xnarch_cpus_empty(sched->resched);
00174 }
00175
00176 static inline int xnsched_self_resched_p(struct xnsched *sched)
00177 {
00178 return xnarch_cpu_isset(xnsched_cpu(sched), sched->resched);
00179 }
00180
00181
00182 #define xnsched_set_self_resched(__sched__) do { \
00183 xnarch_cpu_set(xnsched_cpu(__sched__), (__sched__)->resched); \
00184 setbits((__sched__)->status, XNRESCHED); \
00185 } while (0)
00186
00187
00188 #define xnsched_set_resched(__sched__) do { \
00189 xnsched_t *current_sched = xnpod_current_sched(); \
00190 xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched); \
00191 setbits(current_sched->status, XNRESCHED); \
00192 } while (0)
00193
00194 void xnsched_zombie_hooks(struct xnthread *thread);
00195
00196 void __xnsched_finalize_zombie(struct xnsched *sched);
00197
00198 static inline void xnsched_finalize_zombie(struct xnsched *sched)
00199 {
00200 if (sched->zombie)
00201 __xnsched_finalize_zombie(sched);
00202 }
00203
00204 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00205
00206 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
00207
00208 #define xnsched_resched_after_unlocked_switch() xnpod_schedule()
00209
00210 #else
00211
00212 #ifdef CONFIG_SMP
00213 #define xnsched_finish_unlocked_switch(__sched__) \
00214 ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
00215 xnpod_current_sched(); })
00216 #else
00217 #define xnsched_finish_unlocked_switch(__sched__) \
00218 ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
00219 (__sched__); })
00220 #endif
00221
00222 #define xnsched_resched_after_unlocked_switch() do { } while(0)
00223
00224 #endif
00225
00226 #ifdef CONFIG_XENO_OPT_WATCHDOG
00227 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00228 {
00229 sched->wdcount = 0;
00230 }
00231 #else
00232 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00233 {
00234 }
00235 #endif
00236
00237 #include <nucleus/sched-idle.h>
00238 #include <nucleus/sched-rt.h>
00239
00240 void xnsched_init_proc(void);
00241
00242 void xnsched_cleanup_proc(void);
00243
00244 void xnsched_register_classes(void);
00245
00246 void xnsched_init(struct xnsched *sched, int cpu);
00247
00248 void xnsched_destroy(struct xnsched *sched);
00249
00250 struct xnthread *xnsched_pick_next(struct xnsched *sched);
00251
00252 void xnsched_putback(struct xnthread *thread);
00253
00254 int xnsched_set_policy(struct xnthread *thread,
00255 struct xnsched_class *sched_class,
00256 const union xnsched_policy_param *p);
00257
00258 void xnsched_track_policy(struct xnthread *thread,
00259 struct xnthread *target);
00260
00261 void xnsched_migrate(struct xnthread *thread,
00262 struct xnsched *sched);
00263
00264 void xnsched_migrate_passive(struct xnthread *thread,
00265 struct xnsched *sched);
00266
00298 static inline void xnsched_rotate(struct xnsched *sched,
00299 struct xnsched_class *sched_class,
00300 const union xnsched_policy_param *sched_param)
00301 {
00302 sched_class->sched_rotate(sched, sched_param);
00303 }
00304
00305 static inline int xnsched_init_tcb(struct xnthread *thread)
00306 {
00307 int ret = 0;
00308
00309 xnsched_idle_init_tcb(thread);
00310 xnsched_rt_init_tcb(thread);
00311 #ifdef CONFIG_XENO_OPT_SCHED_TP
00312 ret = xnsched_tp_init_tcb(thread);
00313 if (ret)
00314 return ret;
00315 #endif
00316 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00317 ret = xnsched_sporadic_init_tcb(thread);
00318 if (ret)
00319 return ret;
00320 #endif
00321 return ret;
00322 }
00323
00324 static inline int xnsched_root_priority(struct xnsched *sched)
00325 {
00326 return sched->rootcb.cprio;
00327 }
00328
00329 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
00330 {
00331 return sched->rootcb.sched_class;
00332 }
00333
00334 static inline void xnsched_tick(struct xnthread *curr, struct xntbase *tbase)
00335 {
00336 struct xnsched_class *sched_class = curr->sched_class;
00337
00338
00339
00340
00341
00342 if (xnthread_time_base(curr) == tbase &&
00343 sched_class != &xnsched_class_idle &&
00344 sched_class == curr->base_class &&
00345 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == XNRRB)
00346 sched_class->sched_tick(curr);
00347 }
00348
00349 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
00350
00351 static inline void xnsched_enqueue(struct xnthread *thread)
00352 {
00353 struct xnsched_class *sched_class = thread->sched_class;
00354
00355 if (sched_class != &xnsched_class_idle)
00356 sched_class->sched_enqueue(thread);
00357 }
00358
00359 static inline void xnsched_dequeue(struct xnthread *thread)
00360 {
00361 struct xnsched_class *sched_class = thread->sched_class;
00362
00363 if (sched_class != &xnsched_class_idle)
00364 sched_class->sched_dequeue(thread);
00365 }
00366
00367 static inline void xnsched_requeue(struct xnthread *thread)
00368 {
00369 struct xnsched_class *sched_class = thread->sched_class;
00370
00371 if (sched_class != &xnsched_class_idle)
00372 sched_class->sched_requeue(thread);
00373 }
00374
00375 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00376 {
00377 return thread->bprio + thread->sched_class->weight;
00378 }
00379
00380 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00381 {
00382 return thread->cprio + thread->sched_class->weight;
00383 }
00384
00385 static inline void xnsched_setparam(struct xnthread *thread,
00386 const union xnsched_policy_param *p)
00387 {
00388 thread->sched_class->sched_setparam(thread, p);
00389 }
00390
00391 static inline void xnsched_getparam(struct xnthread *thread,
00392 union xnsched_policy_param *p)
00393 {
00394 thread->sched_class->sched_getparam(thread, p);
00395 }
00396
00397 static inline void xnsched_trackprio(struct xnthread *thread,
00398 const union xnsched_policy_param *p)
00399 {
00400 thread->sched_class->sched_trackprio(thread, p);
00401 }
00402
00403 static inline void xnsched_forget(struct xnthread *thread)
00404 {
00405 struct xnsched_class *sched_class = thread->base_class;
00406
00407 --sched_class->nthreads;
00408
00409 if (sched_class->sched_forget)
00410 sched_class->sched_forget(thread);
00411 }
00412
00413 #ifdef CONFIG_XENO_OPT_PRIOCPL
00414
00415 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00416 struct xnthread *thread)
00417 {
00418 return thread->sched_class->sched_push_rpi(sched, thread);
00419 }
00420
00421 static inline void xnsched_pop_rpi(struct xnthread *thread)
00422 {
00423 thread->sched_class->sched_pop_rpi(thread);
00424 }
00425
00426 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00427 {
00428 struct xnsched_class *sched_class = thread->sched_class;
00429
00430 if (sched_class->sched_suspend_rpi)
00431 sched_class->sched_suspend_rpi(thread);
00432 }
00433
00434 static inline void xnsched_resume_rpi(struct xnthread *thread)
00435 {
00436 struct xnsched_class *sched_class = thread->sched_class;
00437
00438 if (sched_class->sched_resume_rpi)
00439 sched_class->sched_resume_rpi(thread);
00440 }
00441
00442 #endif
00443
00444 #else
00445
00446
00447
00448
00449
00450
00451 static inline void xnsched_enqueue(struct xnthread *thread)
00452 {
00453 struct xnsched_class *sched_class = thread->sched_class;
00454
00455 if (sched_class != &xnsched_class_idle)
00456 __xnsched_rt_enqueue(thread);
00457 }
00458
00459 static inline void xnsched_dequeue(struct xnthread *thread)
00460 {
00461 struct xnsched_class *sched_class = thread->sched_class;
00462
00463 if (sched_class != &xnsched_class_idle)
00464 __xnsched_rt_dequeue(thread);
00465 }
00466
00467 static inline void xnsched_requeue(struct xnthread *thread)
00468 {
00469 struct xnsched_class *sched_class = thread->sched_class;
00470
00471 if (sched_class != &xnsched_class_idle)
00472 __xnsched_rt_requeue(thread);
00473 }
00474
00475 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00476 {
00477 return thread->bprio;
00478 }
00479
00480 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00481 {
00482 return thread->cprio;
00483 }
00484
00485 static inline void xnsched_setparam(struct xnthread *thread,
00486 const union xnsched_policy_param *p)
00487 {
00488 struct xnsched_class *sched_class = thread->sched_class;
00489
00490 if (sched_class != &xnsched_class_idle)
00491 __xnsched_rt_setparam(thread, p);
00492 else
00493 __xnsched_idle_setparam(thread, p);
00494 }
00495
00496 static inline void xnsched_getparam(struct xnthread *thread,
00497 union xnsched_policy_param *p)
00498 {
00499 struct xnsched_class *sched_class = thread->sched_class;
00500
00501 if (sched_class != &xnsched_class_idle)
00502 __xnsched_rt_getparam(thread, p);
00503 else
00504 __xnsched_idle_getparam(thread, p);
00505 }
00506
00507 static inline void xnsched_trackprio(struct xnthread *thread,
00508 const union xnsched_policy_param *p)
00509 {
00510 struct xnsched_class *sched_class = thread->sched_class;
00511
00512 if (sched_class != &xnsched_class_idle)
00513 __xnsched_rt_trackprio(thread, p);
00514 else
00515 __xnsched_idle_trackprio(thread, p);
00516 }
00517
00518 static inline void xnsched_forget(struct xnthread *thread)
00519 {
00520 --thread->base_class->nthreads;
00521 __xnsched_rt_forget(thread);
00522 }
00523
00524 #ifdef CONFIG_XENO_OPT_PRIOCPL
00525
00526 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00527 struct xnthread *thread)
00528 {
00529 return __xnsched_rt_push_rpi(sched, thread);
00530 }
00531
00532 static inline void xnsched_pop_rpi(struct xnthread *thread)
00533 {
00534 __xnsched_rt_pop_rpi(thread);
00535 }
00536
00537 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00538 {
00539 __xnsched_rt_suspend_rpi(thread);
00540 }
00541
00542 static inline void xnsched_resume_rpi(struct xnthread *thread)
00543 {
00544 __xnsched_rt_resume_rpi(thread);
00545 }
00546
00547 #endif
00548
00549 #endif
00550
00551 void xnsched_renice_root(struct xnsched *sched,
00552 struct xnthread *target);
00553
00554 struct xnthread *xnsched_peek_rpi(struct xnsched *sched);
00555
00556 #else
00557
00558 #include <nucleus/sched-idle.h>
00559 #include <nucleus/sched-rt.h>
00560
00561 #endif
00562
00565 #endif