00001
00026 #ifndef _RTDM_DRIVER_H
00027 #define _RTDM_DRIVER_H
00028
00029 #ifndef __KERNEL__
00030 #error This header is for kernel space usage only. \
00031 You are likely looking for rtdm/rtdm.h...
00032 #endif
00033
00034 #include <asm/atomic.h>
00035 #include <linux/list.h>
00036
00037 #include <nucleus/xenomai.h>
00038 #include <nucleus/heap.h>
00039 #include <nucleus/pod.h>
00040 #include <nucleus/synch.h>
00041 #include <nucleus/select.h>
00042 #include <nucleus/vfile.h>
00043 #include <rtdm/rtdm.h>
00044
00045
00046 #include <nucleus/assert.h>
00047 #ifdef CONFIG_PCI
00048 #include <asm-generic/xenomai/pci_ids.h>
00049 #endif
00050
00051 #ifndef CONFIG_XENO_OPT_DEBUG_RTDM
00052 #define CONFIG_XENO_OPT_DEBUG_RTDM 0
00053 #endif
00054
00055 struct rtdm_dev_context;
00056 typedef struct xnselector rtdm_selector_t;
00057 enum rtdm_selecttype;
00058
00071 #define RTDM_EXCLUSIVE 0x0001
00072
00074 #define RTDM_NAMED_DEVICE 0x0010
00075
00078 #define RTDM_PROTOCOL_DEVICE 0x0020
00079
00081 #define RTDM_DEVICE_TYPE_MASK 0x00F0
00082
00091 #define RTDM_CREATED_IN_NRT 0
00092
00094 #define RTDM_CLOSING 1
00095
00097 #define RTDM_USER_CONTEXT_FLAG 8
00098
00107 #define RTDM_DEVICE_STRUCT_VER 5
00108
00110 #define RTDM_CONTEXT_STRUCT_VER 3
00111
00113 #define RTDM_SECURE_DEVICE 0x80000000
00114
00116 #define RTDM_DRIVER_VER(major, minor, patch) \
00117 (((major & 0xFF) << 16) | ((minor & 0xFF) << 8) | (patch & 0xFF))
00118
00120 #define RTDM_DRIVER_MAJOR_VER(ver) (((ver) >> 16) & 0xFF)
00121
00123 #define RTDM_DRIVER_MINOR_VER(ver) (((ver) >> 8) & 0xFF)
00124
00126 #define RTDM_DRIVER_PATCH_VER(ver) ((ver) & 0xFF)
00127
00139 enum rtdm_selecttype {
00141 RTDM_SELECTTYPE_READ = XNSELECT_READ,
00142
00144 RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE,
00145
00147 RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT
00148 };
00172 typedef int (*rtdm_open_handler_t)(struct rtdm_dev_context *context,
00173 rtdm_user_info_t *user_info, int oflag);
00174
00189 typedef int (*rtdm_socket_handler_t)(struct rtdm_dev_context *context,
00190 rtdm_user_info_t *user_info, int protocol);
00191
00212 typedef int (*rtdm_close_handler_t)(struct rtdm_dev_context *context,
00213 rtdm_user_info_t *user_info);
00214
00230 typedef int (*rtdm_ioctl_handler_t)(struct rtdm_dev_context *context,
00231 rtdm_user_info_t *user_info,
00232 unsigned int request, void __user *arg);
00233
00247 typedef int (*rtdm_select_bind_handler_t)(struct rtdm_dev_context *context,
00248 rtdm_selector_t *selector,
00249 enum rtdm_selecttype type,
00250 unsigned fd_index);
00251
00267 typedef ssize_t (*rtdm_read_handler_t)(struct rtdm_dev_context *context,
00268 rtdm_user_info_t *user_info,
00269 void *buf, size_t nbyte);
00270
00286 typedef ssize_t (*rtdm_write_handler_t)(struct rtdm_dev_context *context,
00287 rtdm_user_info_t *user_info,
00288 const void *buf, size_t nbyte);
00289
00306 typedef ssize_t (*rtdm_recvmsg_handler_t)(struct rtdm_dev_context *context,
00307 rtdm_user_info_t *user_info,
00308 struct msghdr *msg, int flags);
00309
00326 typedef ssize_t (*rtdm_sendmsg_handler_t)(struct rtdm_dev_context *context,
00327 rtdm_user_info_t *user_info,
00328 const struct msghdr *msg, int flags);
00331 typedef int (*rtdm_rt_handler_t)(struct rtdm_dev_context *context,
00332 rtdm_user_info_t *user_info, void *arg);
00336 struct rtdm_operations {
00341 rtdm_close_handler_t close_rt;
00343 rtdm_close_handler_t close_nrt;
00344
00346 rtdm_ioctl_handler_t ioctl_rt;
00348 rtdm_ioctl_handler_t ioctl_nrt;
00349
00351 rtdm_select_bind_handler_t select_bind;
00357 rtdm_read_handler_t read_rt;
00359 rtdm_read_handler_t read_nrt;
00360
00362 rtdm_write_handler_t write_rt;
00364 rtdm_write_handler_t write_nrt;
00370 rtdm_recvmsg_handler_t recvmsg_rt;
00372 rtdm_recvmsg_handler_t recvmsg_nrt;
00373
00375 rtdm_sendmsg_handler_t sendmsg_rt;
00377 rtdm_sendmsg_handler_t sendmsg_nrt;
00379 };
00380
00381 struct rtdm_devctx_reserved {
00382 void *owner;
00383 struct list_head cleanup;
00384 };
00385
00397 struct rtdm_dev_context {
00399 unsigned long context_flags;
00400
00402 int fd;
00403
00406 atomic_t close_lock_count;
00407
00409 struct rtdm_operations *ops;
00410
00412 struct rtdm_device *device;
00413
00415 struct rtdm_devctx_reserved reserved;
00416
00418 char dev_private[0];
00419 };
00420
00429 static inline void *
00430 rtdm_context_to_private(struct rtdm_dev_context *context)
00431 {
00432 return (void *)context->dev_private;
00433 }
00434
00443 static inline struct rtdm_dev_context *
00444 rtdm_private_to_context(void *dev_private)
00445 {
00446 return container_of(dev_private, struct rtdm_dev_context, dev_private);
00447 }
00448
00449 struct rtdm_dev_reserved {
00450 struct list_head entry;
00451 atomic_t refcount;
00452 struct rtdm_dev_context *exclusive_context;
00453 };
00454
00462 struct rtdm_device {
00465 int struct_version;
00466
00468 int device_flags;
00470 size_t context_size;
00471
00473 char device_name[RTDM_MAX_DEVNAME_LEN + 1];
00474
00476 int protocol_family;
00478 int socket_type;
00479
00484 rtdm_open_handler_t open_rt;
00487 rtdm_open_handler_t open_nrt;
00488
00494 rtdm_socket_handler_t socket_rt;
00497 rtdm_socket_handler_t socket_nrt;
00498
00500 struct rtdm_operations ops;
00501
00503 int device_class;
00506 int device_sub_class;
00508 int profile_version;
00510 const char *driver_name;
00512 int driver_version;
00515 const char *peripheral_name;
00517 const char *provider_name;
00518
00520 const char *proc_name;
00521 #ifdef CONFIG_XENO_OPT_VFILE
00522
00523 struct xnvfile_directory vfroot;
00524 struct xnvfile_regular info_vfile;
00525 #endif
00526
00528 int device_id;
00530 void *device_data;
00531
00533 struct rtdm_dev_reserved reserved;
00534 };
00537
00538
00539 int rtdm_dev_register(struct rtdm_device *device);
00540 int rtdm_dev_unregister(struct rtdm_device *device, unsigned int poll_delay);
00541
00542
00543
00544 #define rtdm_open rt_dev_open
00545 #define rtdm_socket rt_dev_socket
00546 #define rtdm_close rt_dev_close
00547 #define rtdm_ioctl rt_dev_ioctl
00548 #define rtdm_read rt_dev_read
00549 #define rtdm_write rt_dev_write
00550 #define rtdm_recvmsg rt_dev_recvmsg
00551 #define rtdm_recv rt_dev_recv
00552 #define rtdm_recvfrom rt_dev_recvfrom
00553 #define rtdm_sendmsg rt_dev_sendmsg
00554 #define rtdm_send rt_dev_send
00555 #define rtdm_sendto rt_dev_sendto
00556 #define rtdm_bind rt_dev_bind
00557 #define rtdm_listen rt_dev_listen
00558 #define rtdm_accept rt_dev_accept
00559 #define rtdm_getsockopt rt_dev_getsockopt
00560 #define rtdm_setsockopt rt_dev_setsockopt
00561 #define rtdm_getsockname rt_dev_getsockname
00562 #define rtdm_getpeername rt_dev_getpeername
00563 #define rtdm_shutdown rt_dev_shutdown
00564
00565 struct rtdm_dev_context *rtdm_context_get(int fd);
00566
00567 #ifndef DOXYGEN_CPP
00568
00569 #define CONTEXT_IS_LOCKED(context) \
00570 (atomic_read(&(context)->close_lock_count) > 1 || \
00571 (test_bit(RTDM_CLOSING, &(context)->context_flags) && \
00572 atomic_read(&(context)->close_lock_count) > 0))
00573
00574 static inline void rtdm_context_lock(struct rtdm_dev_context *context)
00575 {
00576 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
00577 );
00578 atomic_inc(&context->close_lock_count);
00579 }
00580
00581 extern int rtdm_apc;
00582
00583 static inline void rtdm_context_unlock(struct rtdm_dev_context *context)
00584 {
00585 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
00586 );
00587 smp_mb__before_atomic_dec();
00588 if (unlikely(atomic_dec_and_test(&context->close_lock_count)))
00589 rthal_apc_schedule(rtdm_apc);
00590 }
00591
00592 static inline void rtdm_context_put(struct rtdm_dev_context *context)
00593 {
00594 rtdm_context_unlock(context);
00595 }
00596
00597
00598 struct xntbase;
00599 extern struct xntbase *rtdm_tbase;
00600
00601 static inline nanosecs_abs_t rtdm_clock_read(void)
00602 {
00603 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_time(rtdm_tbase));
00604 }
00605
00606 static inline nanosecs_abs_t rtdm_clock_read_monotonic(void)
00607 {
00608 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_jiffies(rtdm_tbase));
00609 }
00610 #endif
00611
00617 int rtdm_select_bind(int fd, rtdm_selector_t *selector,
00618 enum rtdm_selecttype type, unsigned fd_index);
00619
00620
00658 #ifdef DOXYGEN_CPP
00659 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
00660 { \
00661 <ENTER_ATOMIC_SECTION> \
00662 code_block; \
00663 <LEAVE_ATOMIC_SECTION> \
00664 }
00665 #else
00666 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
00667 { \
00668 spl_t __rtdm_s; \
00669 \
00670 xnlock_get_irqsave(&nklock, __rtdm_s); \
00671 __xnpod_lock_sched(); \
00672 code_block; \
00673 __xnpod_unlock_sched(); \
00674 xnlock_put_irqrestore(&nklock, __rtdm_s); \
00675 }
00676 #endif
00677
00687 #define RTDM_LOCK_UNLOCKED RTHAL_SPIN_LOCK_UNLOCKED
00688
00690 typedef rthal_spinlock_t rtdm_lock_t;
00691
00693 typedef unsigned long rtdm_lockctx_t;
00694
00710 #define rtdm_lock_init(lock) rthal_spin_lock_init(lock)
00711
00728 #ifdef DOXYGEN_CPP
00729 #define rtdm_lock_get(lock) rthal_spin_lock(lock)
00730 #else
00731 #define rtdm_lock_get(lock) \
00732 do { \
00733 XENO_BUGON(RTDM, !rthal_local_irq_disabled()); \
00734 rthal_spin_lock(lock); \
00735 __xnpod_lock_sched(); \
00736 } while (0)
00737 #endif
00738
00755 #define rtdm_lock_put(lock) \
00756 do { \
00757 rthal_spin_unlock(lock); \
00758 __xnpod_unlock_sched(); \
00759 } while (0)
00760
00778 #define rtdm_lock_get_irqsave(lock, context) \
00779 do { \
00780 rthal_spin_lock_irqsave(lock, context); \
00781 __xnpod_lock_sched(); \
00782 } while (0)
00783
00801 #define rtdm_lock_put_irqrestore(lock, context) \
00802 do { \
00803 rthal_spin_unlock(lock); \
00804 __xnpod_unlock_sched(); \
00805 rthal_local_irq_restore(context); \
00806 } while (0)
00807
00824 #define rtdm_lock_irqsave(context) \
00825 rthal_local_irq_save(context)
00826
00843 #define rtdm_lock_irqrestore(context) \
00844 rthal_local_irq_restore(context)
00845
00849
00855 typedef xnintr_t rtdm_irq_t;
00856
00863 #define RTDM_IRQTYPE_SHARED XN_ISR_SHARED
00864
00866 #define RTDM_IRQTYPE_EDGE XN_ISR_EDGE
00867
00876 typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
00877
00884 #define RTDM_IRQ_NONE XN_ISR_NONE
00885
00886 #define RTDM_IRQ_HANDLED XN_ISR_HANDLED
00887
00906 #define rtdm_irq_get_arg(irq_handle, type) ((type *)irq_handle->cookie)
00907
00909 int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
00910 rtdm_irq_handler_t handler, unsigned long flags,
00911 const char *device_name, void *arg);
00912
00913 #ifndef DOXYGEN_CPP
00914 static inline int rtdm_irq_free(rtdm_irq_t *irq_handle)
00915 {
00916 XENO_ASSERT(RTDM, xnpod_root_p(), return -EPERM;);
00917 return xnintr_detach(irq_handle);
00918 }
00919
00920 static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle)
00921 {
00922 return xnintr_enable(irq_handle);
00923 }
00924
00925 static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle)
00926 {
00927 return xnintr_disable(irq_handle);
00928 }
00929 #endif
00930
00931
00932
00938 typedef unsigned rtdm_nrtsig_t;
00939
00950 typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t nrt_sig, void *arg);
00953 #ifndef DOXYGEN_CPP
00954 static inline int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
00955 rtdm_nrtsig_handler_t handler, void *arg)
00956 {
00957 *nrt_sig = rthal_alloc_virq();
00958
00959 if (*nrt_sig == 0)
00960 return -EAGAIN;
00961
00962 rthal_virtualize_irq(rthal_root_domain, *nrt_sig, handler, arg, NULL,
00963 IPIPE_HANDLE_MASK);
00964 return 0;
00965 }
00966
00967 static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig)
00968 {
00969 rthal_virtualize_irq(rthal_root_domain, *nrt_sig, NULL, NULL, NULL, 0);
00970
00971 rthal_free_virq(*nrt_sig);
00972 }
00973
00974 static inline void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig)
00975 {
00976 rthal_trigger_irq(*nrt_sig);
00977 }
00978 #endif
00979
00980
00981
00987 typedef xntimer_t rtdm_timer_t;
00988
00994 typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer);
00995
01001 enum rtdm_timer_mode {
01003 RTDM_TIMERMODE_RELATIVE = XN_RELATIVE,
01004
01006 RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE,
01007
01009 RTDM_TIMERMODE_REALTIME = XN_REALTIME
01010 };
01015 #ifndef DOXYGEN_CPP
01016 #define rtdm_timer_init(timer, handler, name) \
01017 ({ \
01018 xntimer_init((timer), rtdm_tbase, handler); \
01019 xntimer_set_name((timer), (name)); \
01020 0; \
01021 })
01022 #endif
01023
01024 void rtdm_timer_destroy(rtdm_timer_t *timer);
01025
01026 int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
01027 nanosecs_rel_t interval, enum rtdm_timer_mode mode);
01028
01029 void rtdm_timer_stop(rtdm_timer_t *timer);
01030
01031 #ifndef DOXYGEN_CPP
01032 static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer,
01033 nanosecs_abs_t expiry,
01034 nanosecs_rel_t interval,
01035 enum rtdm_timer_mode mode)
01036 {
01037 return xntimer_start(timer, xntbase_ns2ticks_ceil(rtdm_tbase, expiry),
01038 xntbase_ns2ticks_ceil(rtdm_tbase, interval),
01039 (xntmode_t)mode);
01040 }
01041
01042 static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer)
01043 {
01044 xntimer_stop(timer);
01045 }
01046 #endif
01047
01048
01054 typedef xnthread_t rtdm_task_t;
01055
01061 typedef void (*rtdm_task_proc_t)(void *arg);
01062
01067 #define RTDM_TASK_LOWEST_PRIORITY XNSCHED_LOW_PRIO
01068 #define RTDM_TASK_HIGHEST_PRIORITY XNSCHED_HIGH_PRIO
01069
01075 #define RTDM_TASK_RAISE_PRIORITY (+1)
01076 #define RTDM_TASK_LOWER_PRIORITY (-1)
01077
01081 int rtdm_task_init(rtdm_task_t *task, const char *name,
01082 rtdm_task_proc_t task_proc, void *arg,
01083 int priority, nanosecs_rel_t period);
01084 int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
01085 void rtdm_task_busy_sleep(nanosecs_rel_t delay);
01086
01087 #ifndef DOXYGEN_CPP
01088 static inline void rtdm_task_destroy(rtdm_task_t *task)
01089 {
01090 xnpod_delete_thread(task);
01091 }
01092
01093 void rtdm_task_join_nrt(rtdm_task_t *task, unsigned int poll_delay);
01094
01095 static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority)
01096 {
01097 union xnsched_policy_param param = { .rt = { .prio = priority } };
01098 xnpod_set_thread_schedparam(task, &xnsched_class_rt, ¶m);
01099 xnpod_schedule();
01100 }
01101
01102 static inline int rtdm_task_set_period(rtdm_task_t *task,
01103 nanosecs_rel_t period)
01104 {
01105 if (period < 0)
01106 period = 0;
01107 return xnpod_set_thread_periodic(task, XN_INFINITE,
01108 xntbase_ns2ticks_ceil
01109 (xnthread_time_base(task), period));
01110 }
01111
01112 static inline int rtdm_task_unblock(rtdm_task_t *task)
01113 {
01114 int res = xnpod_unblock_thread(task);
01115
01116 xnpod_schedule();
01117 return res;
01118 }
01119
01120 static inline rtdm_task_t *rtdm_task_current(void)
01121 {
01122 return xnpod_current_thread();
01123 }
01124
01125 static inline int rtdm_task_wait_period(void)
01126 {
01127 XENO_ASSERT(RTDM, !xnpod_unblockable_p(), return -EPERM;);
01128 return xnpod_wait_thread_period(NULL);
01129 }
01130
01131 static inline int rtdm_task_sleep(nanosecs_rel_t delay)
01132 {
01133 return __rtdm_task_sleep(delay, XN_RELATIVE);
01134 }
01135
01136 static inline int
01137 rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode)
01138 {
01139
01140 if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME)
01141 return -EINVAL;
01142 return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
01143 }
01144
01145
01146 static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
01147 {
01148 return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
01149 }
01150 #endif
01151
01152
01153
01154 typedef nanosecs_abs_t rtdm_toseq_t;
01155
01156 void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
01157
01158
01159
01160 typedef struct {
01161 xnsynch_t synch_base;
01162 DECLARE_XNSELECT(select_block);
01163 } rtdm_event_t;
01164
01165 #define RTDM_EVENT_PENDING XNSYNCH_SPARE1
01166
01167 void rtdm_event_init(rtdm_event_t *event, unsigned long pending);
01168 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
01169 int rtdm_event_select_bind(rtdm_event_t *event, rtdm_selector_t *selector,
01170 enum rtdm_selecttype type, unsigned fd_index);
01171 #else
01172 #define rtdm_event_select_bind(e, s, t, i) ({ (void)(e); -EBADF; })
01173 #endif
01174 int rtdm_event_wait(rtdm_event_t *event);
01175 int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
01176 rtdm_toseq_t *timeout_seq);
01177 void rtdm_event_signal(rtdm_event_t *event);
01178
01179 void rtdm_event_clear(rtdm_event_t *event);
01180
01181 #ifndef DOXYGEN_CPP
01182 void __rtdm_synch_flush(xnsynch_t *synch, unsigned long reason);
01183
01184 static inline void rtdm_event_pulse(rtdm_event_t *event)
01185 {
01186 trace_mark(xn_rtdm, event_pulse, "event %p", event);
01187 __rtdm_synch_flush(&event->synch_base, 0);
01188 }
01189
01190 static inline void rtdm_event_destroy(rtdm_event_t *event)
01191 {
01192 trace_mark(xn_rtdm, event_destroy, "event %p", event);
01193 __rtdm_synch_flush(&event->synch_base, XNRMID);
01194 xnselect_destroy(&event->select_block);
01195 }
01196 #endif
01197
01198
01199
01200 typedef struct {
01201 unsigned long value;
01202 xnsynch_t synch_base;
01203 DECLARE_XNSELECT(select_block);
01204 } rtdm_sem_t;
01205
01206 void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value);
01207 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
01208 int rtdm_sem_select_bind(rtdm_sem_t *sem, rtdm_selector_t *selector,
01209 enum rtdm_selecttype type, unsigned fd_index);
01210 #else
01211 #define rtdm_sem_select_bind(s, se, t, i) ({ (void)(s); -EBADF; })
01212 #endif
01213 int rtdm_sem_down(rtdm_sem_t *sem);
01214 int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
01215 rtdm_toseq_t *timeout_seq);
01216 void rtdm_sem_up(rtdm_sem_t *sem);
01217
01218 #ifndef DOXYGEN_CPP
01219 static inline void rtdm_sem_destroy(rtdm_sem_t *sem)
01220 {
01221 trace_mark(xn_rtdm, sem_destroy, "sem %p", sem);
01222 __rtdm_synch_flush(&sem->synch_base, XNRMID);
01223 xnselect_destroy(&sem->select_block);
01224 }
01225 #endif
01226
01227
01228
01229 typedef struct {
01230 xnsynch_t synch_base;
01231 } rtdm_mutex_t;
01232
01233 void rtdm_mutex_init(rtdm_mutex_t *mutex);
01234 int rtdm_mutex_lock(rtdm_mutex_t *mutex);
01235 int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
01236 rtdm_toseq_t *timeout_seq);
01237
01238 #ifndef DOXYGEN_CPP
01239 static inline void rtdm_mutex_unlock(rtdm_mutex_t *mutex)
01240 {
01241 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return;);
01242
01243 trace_mark(xn_rtdm, mutex_unlock, "mutex %p", mutex);
01244
01245 if (unlikely(xnsynch_release(&mutex->synch_base) != NULL))
01246 xnpod_schedule();
01247 }
01248
01249 static inline void rtdm_mutex_destroy(rtdm_mutex_t *mutex)
01250 {
01251 trace_mark(xn_rtdm, mutex_destroy, "mutex %p", mutex);
01252
01253 __rtdm_synch_flush(&mutex->synch_base, XNRMID);
01254 }
01255 #endif
01256
01257
01258
01259 #define rtdm_printk(format, ...) printk(format, ##__VA_ARGS__)
01260
01261 struct rtdm_ratelimit_state {
01262 rtdm_lock_t lock;
01263 nanosecs_abs_t interval;
01264 int burst;
01265 int printed;
01266 int missed;
01267 nanosecs_abs_t begin;
01268 };
01269
01270 int rtdm_ratelimit(struct rtdm_ratelimit_state *rs, const char *func);
01271
01272 #define DEFINE_RTDM_RATELIMIT_STATE(name, interval_init, burst_init) \
01273 struct rtdm_ratelimit_state name = { \
01274 .lock = RTDM_LOCK_UNLOCKED, \
01275 .interval = interval_init, \
01276 .burst = burst_init, \
01277 }
01278
01279
01280 #define DEF_RTDM_RATELIMIT_INTERVAL 5000000000LL
01281 #define DEF_RTDM_RATELIMIT_BURST 10
01282
01283 #define rtdm_printk_ratelimited(fmt, ...) ({ \
01284 static DEFINE_RTDM_RATELIMIT_STATE(_rs, \
01285 DEF_RTDM_RATELIMIT_INTERVAL, \
01286 DEF_RTDM_RATELIMIT_BURST); \
01287 \
01288 if (rtdm_ratelimit(&_rs, __func__)) \
01289 printk(fmt, ##__VA_ARGS__); \
01290 })
01291
01292 #ifndef DOXYGEN_CPP
01293 static inline void *rtdm_malloc(size_t size)
01294 {
01295 return xnmalloc(size);
01296 }
01297
01298 static inline void rtdm_free(void *ptr)
01299 {
01300 xnfree(ptr);
01301 }
01302
01303 #ifdef CONFIG_XENO_OPT_PERVASIVE
01304 int rtdm_mmap_to_user(rtdm_user_info_t *user_info,
01305 void *src_addr, size_t len,
01306 int prot, void **pptr,
01307 struct vm_operations_struct *vm_ops,
01308 void *vm_private_data);
01309 int rtdm_iomap_to_user(rtdm_user_info_t *user_info,
01310 phys_addr_t src_addr, size_t len,
01311 int prot, void **pptr,
01312 struct vm_operations_struct *vm_ops,
01313 void *vm_private_data);
01314 int rtdm_munmap(rtdm_user_info_t *user_info, void *ptr, size_t len);
01315
01316 static inline int rtdm_read_user_ok(rtdm_user_info_t *user_info,
01317 const void __user *ptr, size_t size)
01318 {
01319 return access_rok(ptr, size);
01320 }
01321
01322 static inline int rtdm_rw_user_ok(rtdm_user_info_t *user_info,
01323 const void __user *ptr, size_t size)
01324 {
01325 return access_wok(ptr, size);
01326 }
01327
01328 static inline int rtdm_copy_from_user(rtdm_user_info_t *user_info,
01329 void *dst, const void __user *src,
01330 size_t size)
01331 {
01332 return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
01333 }
01334
01335 static inline int rtdm_safe_copy_from_user(rtdm_user_info_t *user_info,
01336 void *dst, const void __user *src,
01337 size_t size)
01338 {
01339 return (!access_rok(src, size) ||
01340 __xn_copy_from_user(dst, src, size)) ? -EFAULT : 0;
01341 }
01342
01343 static inline int rtdm_copy_to_user(rtdm_user_info_t *user_info,
01344 void __user *dst, const void *src,
01345 size_t size)
01346 {
01347 return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
01348 }
01349
01350 static inline int rtdm_safe_copy_to_user(rtdm_user_info_t *user_info,
01351 void __user *dst, const void *src,
01352 size_t size)
01353 {
01354 return (!access_wok(dst, size) ||
01355 __xn_copy_to_user(dst, src, size)) ? -EFAULT : 0;
01356 }
01357
01358 static inline int rtdm_strncpy_from_user(rtdm_user_info_t *user_info,
01359 char *dst,
01360 const char __user *src, size_t count)
01361 {
01362 if (unlikely(!access_rok(src, 1)))
01363 return -EFAULT;
01364 return __xn_strncpy_from_user(dst, src, count);
01365 }
01366
01367 static inline int rtdm_rt_capable(rtdm_user_info_t *user_info)
01368 {
01369 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return 0;);
01370
01371 return (user_info ? xnshadow_thread(user_info) != NULL
01372 : !xnpod_root_p());
01373 }
01374
01375 #else
01376
01377 #define rtdm_mmap_to_user(...) ({ -ENOSYS; })
01378 #define rtdm_munmap(...) ({ -ENOSYS; })
01379 #define rtdm_read_user_ok(...) ({ 0; })
01380 #define rtdm_rw_user_ok(...) ({ 0; })
01381 #define rtdm_copy_from_user(...) ({ -ENOSYS; })
01382 #define rtdm_safe_copy_from_user(...) ({ -ENOSYS; })
01383 #define rtdm_copy_to_user(...) ({ -ENOSYS; })
01384 #define rtdm_safe_copy_to_user(...) ({ -ENOSYS; })
01385 #define rtdm_strncpy_from_user(...) ({ -ENOSYS; })
01386
01387 static inline int rtdm_rt_capable(rtdm_user_info_t *user_info)
01388 {
01389 XENO_ASSERT(RTDM, !xnpod_asynch_p(), return 0;);
01390
01391 return !xnpod_root_p();
01392 }
01393
01394 #endif
01395
01396 static inline int rtdm_in_rt_context(void)
01397 {
01398 return (rthal_current_domain != rthal_root_domain);
01399 }
01400
01401 #endif
01402
01403 int rtdm_exec_in_rt(struct rtdm_dev_context *context,
01404 rtdm_user_info_t *user_info, void *arg,
01405 rtdm_rt_handler_t handler);
01406
01407 #endif