00001 
00002 
00003 
00004 
00005 
00006 
00007 
00008 
00009 
00010 
00011 
00012 
00013 
00014 
00015 
00016 
00017 
00018 
00019 #ifndef _POSIX_MUTEX_H
00020 #define _POSIX_MUTEX_H
00021 
00022 #include <asm/xenomai/atomic.h>
00023 #include <pthread.h>
00024 
00025 struct pse51_mutex;
00026 
00027 union __xeno_mutex {
00028         pthread_mutex_t native_mutex;
00029         struct __shadow_mutex {
00030                 unsigned magic;
00031                 unsigned lockcnt;
00032                 struct pse51_mutex *mutex;
00033                 xnarch_atomic_t lock;
00034 #ifdef CONFIG_XENO_FASTSYNCH
00035                 union {
00036                         unsigned owner_offset;
00037                         xnarch_atomic_t *owner;
00038                 };
00039                 struct pse51_mutexattr attr;
00040 #endif 
00041         } shadow_mutex;
00042 };
00043 
00044 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00045 
00046 #include <posix/internal.h>
00047 #include <posix/thread.h>
00048 #include <posix/cb_lock.h>
00049 
00050 typedef struct pse51_mutex {
00051         xnsynch_t synchbase;
00052         xnholder_t link;            
00053 
00054 #define link2mutex(laddr)                                               \
00055         ((pse51_mutex_t *)(((char *)laddr) - offsetof(pse51_mutex_t, link)))
00056 
00057         pthread_mutexattr_t attr;
00058         pse51_kqueues_t *owningq;
00059 } pse51_mutex_t;
00060 
00061 extern pthread_mutexattr_t pse51_default_mutex_attr;
00062 
00063 void pse51_mutexq_cleanup(pse51_kqueues_t *q);
00064 
00065 void pse51_mutex_pkg_init(void);
00066 
00067 void pse51_mutex_pkg_cleanup(void);
00068 
00069 
00070 int pse51_mutex_timedlock_break(struct __shadow_mutex *shadow,
00071                                 int timed, xnticks_t to);
00072 
00073 int pse51_mutex_check_init(struct __shadow_mutex *shadow,
00074                            const pthread_mutexattr_t *attr);
00075 
00076 int pse51_mutex_init_internal(struct __shadow_mutex *shadow,
00077                               pse51_mutex_t *mutex,
00078                               xnarch_atomic_t *ownerp,
00079                               const pthread_mutexattr_t *attr);
00080 
00081 void pse51_mutex_destroy_internal(pse51_mutex_t *mutex,
00082                                   pse51_kqueues_t *q);
00083 
00084 
00085 static inline int pse51_mutex_timedlock_internal(xnthread_t *cur,
00086                                                  struct __shadow_mutex *shadow,
00087                                                  unsigned count,
00088                                                  int timed,
00089                                                  xnticks_t abs_to)
00090 
00091 {
00092         pse51_mutex_t *mutex = shadow->mutex;
00093 
00094         if (xnpod_unblockable_p())
00095                 return -EPERM;
00096 
00097         if (!pse51_obj_active(shadow, PSE51_MUTEX_MAGIC, struct __shadow_mutex))
00098                 return -EINVAL;
00099 
00100 #if XENO_DEBUG(POSIX)
00101         if (mutex->owningq != pse51_kqueues(mutex->attr.pshared))
00102                 return -EPERM;
00103 #endif 
00104 
00105         if (xnsynch_owner_check(&mutex->synchbase, cur) == 0)
00106                 return -EBUSY;
00107 
00108         if (timed)
00109                 xnsynch_acquire(&mutex->synchbase, abs_to, XN_REALTIME);
00110         else
00111                 xnsynch_acquire(&mutex->synchbase, XN_INFINITE, XN_RELATIVE);
00112 
00113         if (unlikely(xnthread_test_info(cur, XNBREAK | XNRMID | XNTIMEO))) {
00114                 if (xnthread_test_info(cur, XNBREAK))
00115                         return -EINTR;
00116                 else if (xnthread_test_info(cur, XNTIMEO))
00117                         return -ETIMEDOUT;
00118                 else 
00119                         return -EINVAL;
00120         }
00121 
00122         shadow->lockcnt = count;
00123 
00124         return 0;
00125 }
00126 
00127 #endif 
00128 
00129 #endif