• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • Examples
  • File List
  • Globals

include/nucleus/pod.h

Go to the documentation of this file.
00001 
00028 #ifndef _XENO_NUCLEUS_POD_H
00029 #define _XENO_NUCLEUS_POD_H
00030 
00034 #include <nucleus/sched.h>
00035 
00036 /* Pod status flags */
00037 #define XNFATAL  0x00000001     /* Fatal error in progress */
00038 #define XNPEXEC  0x00000002     /* Pod is active (a skin is attached) */
00039 
00040 /* These flags are available to the real-time interfaces */
00041 #define XNPOD_SPARE0  0x01000000
00042 #define XNPOD_SPARE1  0x02000000
00043 #define XNPOD_SPARE2  0x04000000
00044 #define XNPOD_SPARE3  0x08000000
00045 #define XNPOD_SPARE4  0x10000000
00046 #define XNPOD_SPARE5  0x20000000
00047 #define XNPOD_SPARE6  0x40000000
00048 #define XNPOD_SPARE7  0x80000000
00049 
00050 #define XNPOD_NORMAL_EXIT  0x0
00051 #define XNPOD_FATAL_EXIT   0x1
00052 
00053 #define XNPOD_ALL_CPUS  XNARCH_CPU_MASK_ALL
00054 
00055 #define XNPOD_FATAL_BUFSZ  16384
00056 
00057 #define nkpod (&nkpod_struct)
00058 
00059 struct xnsynch;
00060 
00067 struct xnpod {
00068 
00069         xnflags_t status;       
00071         xnsched_t sched[XNARCH_NR_CPUS];        
00073         xnqueue_t threadq;      
00074 #ifdef CONFIG_XENO_OPT_VFILE
00075         struct xnvfile_rev_tag threadlist_tag;
00076 #endif
00077         xnqueue_t tstartq,      
00078          tswitchq,              
00079          tdeleteq;              
00081         atomic_counter_t timerlck;      
00083         xntimer_t tslicer;      
00084         int tsliced;            
00086         int refcnt;             
00088 #ifdef __XENO_SIM__
00089         void (*schedhook) (xnthread_t *thread, xnflags_t mask); 
00090 #endif  /* __XENO_SIM__ */
00091 };
00092 
00093 typedef struct xnpod xnpod_t;
00094 
00095 DECLARE_EXTERN_XNLOCK(nklock);
00096 
00097 extern u_long nklatency;
00098 
00099 extern u_long nktimerlat;
00100 
00101 extern xnarch_cpumask_t nkaffinity;
00102 
00103 extern xnpod_t nkpod_struct;
00104 
00105 #ifdef CONFIG_XENO_OPT_VFILE
00106 int xnpod_init_proc(void);
00107 void xnpod_cleanup_proc(void);
00108 #else /* !CONFIG_XENO_OPT_VFILE */
00109 static inline int xnpod_init_proc(void) { return 0; }
00110 static inline void xnpod_cleanup_proc(void) {}
00111 #endif /* !CONFIG_XENO_OPT_VFILE */
00112 
00113 static inline int xnpod_mount(void)
00114 {
00115         xnsched_register_classes();
00116         return xnpod_init_proc();
00117 }
00118 
00119 static inline void xnpod_umount(void)
00120 {
00121         xnpod_cleanup_proc();
00122 }
00123 
00124 #ifdef __cplusplus
00125 extern "C" {
00126 #endif
00127 
00128 int __xnpod_set_thread_schedparam(struct xnthread *thread,
00129                                   struct xnsched_class *sched_class,
00130                                   const union xnsched_policy_param *sched_param,
00131                                   int propagate);
00132 
00133 #ifdef CONFIG_XENO_HW_FPU
00134 void xnpod_switch_fpu(xnsched_t *sched);
00135 #endif /* CONFIG_XENO_HW_FPU */
00136 
00137 void __xnpod_schedule(struct xnsched *sched);
00138 
00139         /* -- Beginning of the exported interface */
00140 
00141 #define xnpod_sched_slot(cpu) \
00142     (&nkpod->sched[cpu])
00143 
00144 #define xnpod_current_sched() \
00145     xnpod_sched_slot(xnarch_current_cpu())
00146 
00147 #define xnpod_active_p() \
00148     testbits(nkpod->status, XNPEXEC)
00149 
00150 #define xnpod_fatal_p() \
00151     testbits(nkpod->status, XNFATAL)
00152 
00153 #define xnpod_interrupt_p() \
00154     testbits(xnpod_current_sched()->lflags, XNINIRQ)
00155 
00156 #define xnpod_callout_p() \
00157     testbits(xnpod_current_sched()->status, XNKCOUT)
00158 
00159 #define xnpod_asynch_p() \
00160         ({                                                              \
00161                 xnsched_t *sched = xnpod_current_sched();               \
00162                 testbits(sched->status | sched->lflags, XNKCOUT|XNINIRQ); \
00163         })
00164 
00165 #define xnpod_current_thread() \
00166     (xnpod_current_sched()->curr)
00167 
00168 #define xnpod_current_root() \
00169     (&xnpod_current_sched()->rootcb)
00170 
00171 #ifdef CONFIG_XENO_OPT_PERVASIVE
00172 #define xnpod_current_p(thread)                                         \
00173     ({ int __shadow_p = xnthread_test_state(thread, XNSHADOW);          \
00174        int __curr_p = __shadow_p ? xnshadow_thread(current) == thread   \
00175            : thread == xnpod_current_thread();                          \
00176        __curr_p;})
00177 #else
00178 #define xnpod_current_p(thread) \
00179     (xnpod_current_thread() == (thread))
00180 #endif
00181 
00182 #define xnpod_locked_p() \
00183     xnthread_test_state(xnpod_current_thread(), XNLOCK)
00184 
00185 #define xnpod_unblockable_p() \
00186     (xnpod_asynch_p() || xnthread_test_state(xnpod_current_thread(), XNROOT))
00187 
00188 #define xnpod_root_p() \
00189     xnthread_test_state(xnpod_current_thread(),XNROOT)
00190 
00191 #define xnpod_shadow_p() \
00192     xnthread_test_state(xnpod_current_thread(),XNSHADOW)
00193 
00194 #define xnpod_userspace_p() \
00195     xnthread_test_state(xnpod_current_thread(),XNROOT|XNSHADOW)
00196 
00197 #define xnpod_primary_p() \
00198     (!(xnpod_asynch_p() || xnpod_root_p()))
00199 
00200 #define xnpod_secondary_p()     xnpod_root_p()
00201 
00202 #define xnpod_idle_p()          xnpod_root_p()
00203 
00204 int xnpod_init(void);
00205 
00206 int xnpod_enable_timesource(void);
00207 
00208 void xnpod_disable_timesource(void);
00209 
00210 void xnpod_shutdown(int xtype);
00211 
00212 int xnpod_init_thread(struct xnthread *thread,
00213                       const struct xnthread_init_attr *attr,
00214                       struct xnsched_class *sched_class,
00215                       const union xnsched_policy_param *sched_param);
00216 
00217 int xnpod_start_thread(xnthread_t *thread,
00218                        const struct xnthread_start_attr *attr);
00219 
00220 void xnpod_stop_thread(xnthread_t *thread);
00221 
00222 void xnpod_restart_thread(xnthread_t *thread);
00223 
00224 void xnpod_delete_thread(xnthread_t *thread);
00225 
00226 void xnpod_abort_thread(xnthread_t *thread);
00227 
00228 xnflags_t xnpod_set_thread_mode(xnthread_t *thread,
00229                                 xnflags_t clrmask,
00230                                 xnflags_t setmask);
00231 
00232 void xnpod_suspend_thread(xnthread_t *thread,
00233                           xnflags_t mask,
00234                           xnticks_t timeout,
00235                           xntmode_t timeout_mode,
00236                           struct xnsynch *wchan);
00237 
00238 void xnpod_resume_thread(xnthread_t *thread,
00239                          xnflags_t mask);
00240 
00241 int xnpod_unblock_thread(xnthread_t *thread);
00242 
00243 int xnpod_set_thread_schedparam(struct xnthread *thread,
00244                                 struct xnsched_class *sched_class,
00245                                 const union xnsched_policy_param *sched_param);
00246 
00247 int xnpod_migrate_thread(int cpu);
00248 
00249 void xnpod_dispatch_signals(void);
00250 
00251 static inline void xnpod_schedule(void)
00252 {
00253         struct xnsched *sched;
00254         /*
00255          * NOTE: Since __xnpod_schedule() won't run if an escalation
00256          * to primary domain is needed, we won't use critical
00257          * scheduler information before we actually run in primary
00258          * mode; therefore we can first test the scheduler status then
00259          * escalate.  Running in the primary domain means that no
00260          * Linux-triggered CPU migration may occur from that point
00261          * either. Finally, since migration is always a self-directed
00262          * operation for Xenomai threads, we can safely read the
00263          * scheduler state bits without holding the nklock.
00264          *
00265          * Said differently, if we race here because of a CPU
00266          * migration, it must have been Linux-triggered because we run
00267          * in secondary mode; in which case we will escalate to the
00268          * primary domain, then unwind the current call frame without
00269          * running the rescheduling procedure in
00270          * __xnpod_schedule(). Therefore, the scheduler pointer will
00271          * be either valid, or unused.
00272          */
00273         sched = xnpod_current_sched();
00274         /*
00275          * No immediate rescheduling is possible if an ISR or callout
00276          * context is active, or if we are caught in the middle of a
00277          * unlocked context switch.
00278          */
00279 #if XENO_DEBUG(NUCLEUS)
00280         if (testbits(sched->status | sched->lflags,
00281                      XNKCOUT|XNINIRQ|XNINSW|XNINLOCK))
00282                 return;
00283 #else /* !XENO_DEBUG(NUCLEUS) */
00284         if (testbits(sched->status | sched->lflags,
00285                      XNKCOUT|XNINIRQ|XNINSW|XNRESCHED|XNINLOCK) != XNRESCHED)
00286                 return;
00287 #endif /* !XENO_DEBUG(NUCLEUS) */
00288 
00289         __xnpod_schedule(sched);
00290 }
00291 
00292 void ___xnpod_lock_sched(xnsched_t *sched);
00293 
00294 void ___xnpod_unlock_sched(xnsched_t *sched);
00295 
00296 static inline void __xnpod_lock_sched(void)
00297 {
00298         xnsched_t *sched;
00299 
00300         barrier();
00301         sched = xnpod_current_sched();
00302         ___xnpod_lock_sched(sched);
00303 }
00304 
00305 static inline void __xnpod_unlock_sched(void)
00306 {
00307         xnsched_t *sched;
00308 
00309         barrier();
00310         sched = xnpod_current_sched();
00311         ___xnpod_unlock_sched(sched);
00312 }
00313 
00314 static inline void xnpod_lock_sched(void)
00315 {
00316         xnsched_t *sched;
00317         spl_t s;
00318 
00319         xnlock_get_irqsave(&nklock, s);
00320         sched = xnpod_current_sched();
00321         ___xnpod_lock_sched(sched);
00322         xnlock_put_irqrestore(&nklock, s);
00323 }
00324 
00325 static inline void xnpod_unlock_sched(void)
00326 {
00327         xnsched_t *sched;
00328         spl_t s;
00329 
00330         xnlock_get_irqsave(&nklock, s);
00331         sched = xnpod_current_sched();
00332         ___xnpod_unlock_sched(sched);
00333         xnlock_put_irqrestore(&nklock, s);
00334 }
00335 
00336 void xnpod_fire_callouts(xnqueue_t *hookq,
00337                          xnthread_t *thread);
00338 
00339 static inline void xnpod_run_hooks(struct xnqueue *q,
00340                                    struct xnthread *thread, const char *type)
00341 {
00342         if (!emptyq_p(q)) {
00343                 trace_mark(xn_nucleus, thread_callout,
00344                            "thread %p thread_name %s hook %s",
00345                            thread, xnthread_name(thread), type);
00346                 xnpod_fire_callouts(q, thread);
00347         }
00348 }
00349 
00350 int xnpod_set_thread_periodic(xnthread_t *thread,
00351                               xnticks_t idate,
00352                               xnticks_t period);
00353 
00354 int xnpod_wait_thread_period(unsigned long *overruns_r);
00355 
00356 int xnpod_set_thread_tslice(struct xnthread *thread,
00357                             xnticks_t quantum);
00358 
00359 static inline xntime_t xnpod_get_cpu_time(void)
00360 {
00361         return xnarch_get_cpu_time();
00362 }
00363 
00364 int xnpod_add_hook(int type, void (*routine) (xnthread_t *));
00365 
00366 int xnpod_remove_hook(int type, void (*routine) (xnthread_t *));
00367 
00368 static inline void xnpod_yield(void)
00369 {
00370         xnpod_resume_thread(xnpod_current_thread(), 0);
00371         xnpod_schedule();
00372 }
00373 
00374 static inline void xnpod_delay(xnticks_t timeout)
00375 {
00376         xnpod_suspend_thread(xnpod_current_thread(), XNDELAY, timeout, XN_RELATIVE, NULL);
00377 }
00378 
00379 static inline void xnpod_suspend_self(void)
00380 {
00381         xnpod_suspend_thread(xnpod_current_thread(), XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);
00382 }
00383 
00384 static inline void xnpod_delete_self(void)
00385 {
00386         xnpod_delete_thread(xnpod_current_thread());
00387 }
00388 
00389 #ifdef __cplusplus
00390 }
00391 #endif
00392 
00395 #endif /* !_XENO_NUCLEUS_POD_H */

Generated on Wed Jan 23 2013 13:24:01 for Xenomai API by  doxygen 1.7.1