00001
00028 #ifndef _XENO_NUCLEUS_POD_H
00029 #define _XENO_NUCLEUS_POD_H
00030
00034 #include <nucleus/sched.h>
00035
00036
00037 #define XNFATAL 0x00000001
00038 #define XNPEXEC 0x00000002
00039
00040
00041 #define XNPOD_SPARE0 0x01000000
00042 #define XNPOD_SPARE1 0x02000000
00043 #define XNPOD_SPARE2 0x04000000
00044 #define XNPOD_SPARE3 0x08000000
00045 #define XNPOD_SPARE4 0x10000000
00046 #define XNPOD_SPARE5 0x20000000
00047 #define XNPOD_SPARE6 0x40000000
00048 #define XNPOD_SPARE7 0x80000000
00049
00050 #define XNPOD_NORMAL_EXIT 0x0
00051 #define XNPOD_FATAL_EXIT 0x1
00052
00053 #define XNPOD_ALL_CPUS XNARCH_CPU_MASK_ALL
00054
00055 #define XNPOD_FATAL_BUFSZ 16384
00056
00057 #define nkpod (&nkpod_struct)
00058
00059 struct xnsynch;
00060
00067 struct xnpod {
00068
00069 xnflags_t status;
00071 xnsched_t sched[XNARCH_NR_CPUS];
00073 xnqueue_t threadq;
00074 int threadq_rev;
00076 xnqueue_t tstartq,
00077 tswitchq,
00078 tdeleteq;
00080 atomic_counter_t timerlck;
00082 xntimer_t tslicer;
00083 int tsliced;
00085 int refcnt;
00087 #ifdef __XENO_SIM__
00088 void (*schedhook) (xnthread_t *thread, xnflags_t mask);
00089 #endif
00090 };
00091
00092 typedef struct xnpod xnpod_t;
00093
00094 DECLARE_EXTERN_XNLOCK(nklock);
00095
00096 extern u_long nklatency;
00097
00098 extern u_long nktimerlat;
00099
00100 extern char *nkmsgbuf;
00101
00102 extern xnarch_cpumask_t nkaffinity;
00103
00104 extern xnpod_t nkpod_struct;
00105
00106 #ifdef CONFIG_PROC_FS
00107 void xnpod_init_proc(void);
00108 void xnpod_cleanup_proc(void);
00109 #else
00110 static inline void xnpod_init_proc(void) {}
00111 static inline void xnpod_cleanup_proc(void) {}
00112 #endif
00113
00114 static inline void xnpod_mount(void)
00115 {
00116 xnsched_register_classes();
00117 xnpod_init_proc();
00118 }
00119
00120 static inline void xnpod_umount(void)
00121 {
00122 xnpod_cleanup_proc();
00123 }
00124
00125 #ifdef __cplusplus
00126 extern "C" {
00127 #endif
00128
00129 int __xnpod_set_thread_schedparam(struct xnthread *thread,
00130 struct xnsched_class *sched_class,
00131 const union xnsched_policy_param *sched_param,
00132 int propagate);
00133
00134 void __xnpod_reset_thread(struct xnthread *thread);
00135
00136 #ifdef CONFIG_XENO_HW_FPU
00137 void xnpod_switch_fpu(xnsched_t *sched);
00138 #endif
00139
00140 void __xnpod_schedule(struct xnsched *sched);
00141
00142
00143
00144 #define xnpod_sched_slot(cpu) \
00145 (&nkpod->sched[cpu])
00146
00147 #define xnpod_current_sched() \
00148 xnpod_sched_slot(xnarch_current_cpu())
00149
00150 #define xnpod_active_p() \
00151 testbits(nkpod->status, XNPEXEC)
00152
00153 #define xnpod_fatal_p() \
00154 testbits(nkpod->status, XNFATAL)
00155
00156 #define xnpod_interrupt_p() \
00157 testbits(xnpod_current_sched()->status, XNINIRQ)
00158
00159 #define xnpod_callout_p() \
00160 testbits(xnpod_current_sched()->status, XNKCOUT)
00161
00162 #define xnpod_asynch_p() \
00163 testbits(xnpod_current_sched()->status, XNKCOUT|XNINIRQ)
00164
00165 #define xnpod_current_thread() \
00166 (xnpod_current_sched()->curr)
00167
00168 #define xnpod_current_root() \
00169 (&xnpod_current_sched()->rootcb)
00170
00171 #ifdef CONFIG_XENO_OPT_PERVASIVE
00172 #define xnpod_current_p(thread) \
00173 ({ int __shadow_p = xnthread_test_state(thread, XNSHADOW); \
00174 int __curr_p = __shadow_p ? xnshadow_thread(current) == thread \
00175 : thread == xnpod_current_thread(); \
00176 __curr_p;})
00177 #else
00178 #define xnpod_current_p(thread) \
00179 (xnpod_current_thread() == (thread))
00180 #endif
00181
00182 #define xnpod_locked_p() \
00183 xnthread_test_state(xnpod_current_thread(), XNLOCK)
00184
00185 #define xnpod_unblockable_p() \
00186 (xnpod_asynch_p() || xnthread_test_state(xnpod_current_thread(), XNROOT))
00187
00188 #define xnpod_root_p() \
00189 xnthread_test_state(xnpod_current_thread(),XNROOT)
00190
00191 #define xnpod_shadow_p() \
00192 xnthread_test_state(xnpod_current_thread(),XNSHADOW)
00193
00194 #define xnpod_userspace_p() \
00195 xnthread_test_state(xnpod_current_thread(),XNROOT|XNSHADOW)
00196
00197 #define xnpod_primary_p() \
00198 (!(xnpod_asynch_p() || xnpod_root_p()))
00199
00200 #define xnpod_secondary_p() xnpod_root_p()
00201
00202 #define xnpod_idle_p() xnpod_root_p()
00203
00204 int xnpod_init(void);
00205
00206 int xnpod_enable_timesource(void);
00207
00208 void xnpod_disable_timesource(void);
00209
00210 void xnpod_shutdown(int xtype);
00211
00212 int xnpod_init_thread(struct xnthread *thread,
00213 const struct xnthread_init_attr *attr,
00214 struct xnsched_class *sched_class,
00215 const union xnsched_policy_param *sched_param);
00216
00217 int xnpod_start_thread(xnthread_t *thread,
00218 const struct xnthread_start_attr *attr);
00219
00220 void xnpod_stop_thread(xnthread_t *thread);
00221
00222 void xnpod_restart_thread(xnthread_t *thread);
00223
00224 void xnpod_delete_thread(xnthread_t *thread);
00225
00226 void xnpod_abort_thread(xnthread_t *thread);
00227
00228 xnflags_t xnpod_set_thread_mode(xnthread_t *thread,
00229 xnflags_t clrmask,
00230 xnflags_t setmask);
00231
00232 void xnpod_suspend_thread(xnthread_t *thread,
00233 xnflags_t mask,
00234 xnticks_t timeout,
00235 xntmode_t timeout_mode,
00236 struct xnsynch *wchan);
00237
00238 void xnpod_resume_thread(xnthread_t *thread,
00239 xnflags_t mask);
00240
00241 int xnpod_unblock_thread(xnthread_t *thread);
00242
00243 int xnpod_set_thread_schedparam(struct xnthread *thread,
00244 struct xnsched_class *sched_class,
00245 const union xnsched_policy_param *sched_param);
00246
00247 int xnpod_migrate_thread(int cpu);
00248
00249 void xnpod_dispatch_signals(void);
00250
00251 static inline void xnpod_schedule(void)
00252 {
00253 struct xnsched *sched;
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273 sched = xnpod_current_sched();
00274
00275
00276
00277
00278
00279 #ifdef CONFIG_XENO_OPT_DEBUG_NUCLEUS
00280 if (testbits(sched->status, XNKCOUT|XNINIRQ|XNSWLOCK))
00281 return;
00282 #else
00283 if (testbits(sched->status,
00284 XNKCOUT|XNINIRQ|XNSWLOCK|XNRESCHED) != XNRESCHED)
00285 return;
00286 #endif
00287
00288 __xnpod_schedule(sched);
00289 }
00290
00291 void xnpod_lock_sched(void);
00292
00293 void xnpod_unlock_sched(void);
00294
00295 void xnpod_fire_callouts(xnqueue_t *hookq,
00296 xnthread_t *thread);
00297
00298 static inline void xnpod_run_hooks(struct xnqueue *q,
00299 struct xnthread *thread, const char *type)
00300 {
00301 if (!emptyq_p(q) && !xnthread_test_state(thread, XNROOT)) {
00302 trace_mark(xn_nucleus, thread_callout,
00303 "thread %p thread_name %s hook %s",
00304 thread, xnthread_name(thread), type);
00305 xnpod_fire_callouts(q, thread);
00306 }
00307 }
00308
00309 int xnpod_set_thread_periodic(xnthread_t *thread,
00310 xnticks_t idate,
00311 xnticks_t period);
00312
00313 int xnpod_wait_thread_period(unsigned long *overruns_r);
00314
00315 int xnpod_set_thread_tslice(struct xnthread *thread,
00316 xnticks_t quantum);
00317
00318 static inline xntime_t xnpod_get_cpu_time(void)
00319 {
00320 return xnarch_get_cpu_time();
00321 }
00322
00323 int xnpod_add_hook(int type, void (*routine) (xnthread_t *));
00324
00325 int xnpod_remove_hook(int type, void (*routine) (xnthread_t *));
00326
00327 static inline void xnpod_yield(void)
00328 {
00329 xnpod_resume_thread(xnpod_current_thread(), 0);
00330 xnpod_schedule();
00331 }
00332
00333 static inline void xnpod_delay(xnticks_t timeout)
00334 {
00335 xnpod_suspend_thread(xnpod_current_thread(), XNDELAY, timeout, XN_RELATIVE, NULL);
00336 }
00337
00338 static inline void xnpod_suspend_self(void)
00339 {
00340 xnpod_suspend_thread(xnpod_current_thread(), XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);
00341 }
00342
00343 static inline void xnpod_delete_self(void)
00344 {
00345 xnpod_delete_thread(xnpod_current_thread());
00346 }
00347
00348 #ifdef __cplusplus
00349 }
00350 #endif
00351
00354 #endif