Xenomai  3.1
lock.h
1 /*
2  * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13 
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 
19 #ifndef _BOILERPLATE_LOCK_H
20 #define _BOILERPLATE_LOCK_H
21 
22 #include <pthread.h>
23 #include <boilerplate/wrappers.h>
24 #include <boilerplate/debug.h>
25 
26 /*
27  * CANCEL_DEFER/RESTORE() should enclose any emulator code prior to
28  * holding a lock, or invoking inner boilerplate/copperplate services
29  * (which usually do so), to change the system state. A proper cleanup
30  * handler should be pushed prior to acquire such lock.
31  *
32  * Those macros ensure that cancellation type is switched to deferred
33  * mode while the section is traversed, then restored to its original
34  * value upon exit.
35  *
36  * WARNING: inner services MAY ASSUME that cancellability is deferred
37  * for the caller, so you really want to define protected sections as
38  * required in the higher interface layers.
39  */
40 struct service {
41  int cancel_type;
42 };
43 
44 #ifdef CONFIG_XENO_ASYNC_CANCEL
45 
46 #define CANCEL_DEFER(__s) \
47  do { \
48  pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, \
49  &(__s).cancel_type); \
50  } while (0)
51 
52 #define CANCEL_RESTORE(__s) \
53  do { \
54  pthread_setcanceltype((__s).cancel_type, NULL); \
55  backtrace_check(); \
56  } while (0)
57 
58 #else /* !CONFIG_XENO_ASYNC_CANCEL */
59 
60 #define CANCEL_DEFER(__s) do { (void)(__s); } while (0)
61 
62 #define CANCEL_RESTORE(__s) do { } while (0)
63 
64 #endif /* !CONFIG_XENO_ASYNC_CANCEL */
65 
66 struct cleanup_block {
67  pthread_mutex_t *lock;
68  void (*handler)(void *arg);
69  void *arg;
70 };
71 
72 #define __push_cleanup_args(__cb, __lock, __fn, __arg) \
73  ((__cb)->lock = (__lock)), \
74  ((__cb)->handler = (void (*)(void *))(__fn)), \
75  ((__cb)->arg = (__arg))
76 
77 #define push_cleanup_handler(__cb, __lock, __fn, __arg) \
78  pthread_cleanup_push((void (*)(void *))__run_cleanup_block, \
79  (__push_cleanup_args(__cb, __lock, __fn, __arg), (__cb)))
80 
81 #define pop_cleanup_handler(__cb) \
82  pthread_cleanup_pop(0)
83 
84 #define push_cleanup_lock(__lock) \
85  pthread_cleanup_push((void (*)(void *))__RT(pthread_mutex_unlock), (__lock))
86 
87 #define pop_cleanup_lock(__lock) \
88  pthread_cleanup_pop(0)
89 
90 #ifdef CONFIG_XENO_DEBUG
91 int __check_cancel_type(const char *locktype);
92 #else
93 #define __check_cancel_type(__locktype) \
94  ({ (void)__locktype; 0; })
95 #endif
96 
97 #define __do_lock(__lock, __op) \
98  ({ \
99  int __ret; \
100  __ret = -__RT(pthread_mutex_##__op(__lock)); \
101  __ret; \
102  })
103 
104 #define __do_lock_nocancel(__lock, __type, __op) \
105  ({ \
106  __bt(__check_cancel_type(#__op "_nocancel")); \
107  __do_lock(__lock, __op); \
108  })
109 
110 #define __do_unlock(__lock) \
111  ({ \
112  int __ret; \
113  __ret = -__RT(pthread_mutex_unlock(__lock)); \
114  __ret; \
115  })
116 /*
117  * Macros to enter/leave critical sections within inner
118  * routines. Actually, they are mainly aimed at self-documenting the
119  * code, by specifying basic assumption(s) about the code being
120  * traversed. In effect, they are currently aliases to the standard
121  * pthread_mutex_* API, except for the _safe form.
122  *
123  * The _nocancel suffix indicates that no cancellation point is
124  * traversed by the protected code, therefore we don't need any
125  * cleanup handler since we are guaranteed to run in deferred cancel
126  * mode after CANCEL_DEFER(). A runtime check is inserted in
127  * debug mode, which triggers when cancellability is not in deferred
128  * mode while an attempt is made to acquire a _nocancel lock.
129  *
130  * read/write_lock() forms must be enclosed within the scope of a
131  * cleanup handler since the protected code may reach cancellation
132  * points. push_cleanup_lock() is a simple shorthand to push
133  * pthread_mutex_unlock as the cleanup handler.
134  */
135 #define read_lock(__lock) \
136  __do_lock(__lock, lock)
137 
138 #define read_trylock(__lock) \
139  __do_lock(__lock, trylock)
140 
141 #define read_lock_nocancel(__lock) \
142  __do_lock_nocancel(__lock, read_lock, lock)
143 
144 #define read_trylock_nocancel(__lock) \
145  __do_lock_nocancel(__lock, read_trylock, trylock)
146 
147 #define read_unlock(__lock) \
148  __do_unlock(__lock)
149 
150 #define write_lock(__lock) \
151  __do_lock(__lock, lock)
152 
153 #define write_trylock(__lock) \
154  __do_lock(__lock, trylock)
155 
156 #define write_lock_nocancel(__lock) \
157  __do_lock_nocancel(__lock, write_lock, lock)
158 
159 #define write_trylock_nocancel(__lock) \
160  __do_lock_nocancel(__lock, write_trylock, trylock)
161 
162 #define write_unlock(__lock) \
163  __do_unlock(__lock)
164 
165 #define __do_lock_safe(__lock, __state, __op) \
166  ({ \
167  int __ret, __oldstate; \
168  __bt(__check_cancel_type(#__op "_safe")); \
169  pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &__oldstate); \
170  __ret = -__RT(pthread_mutex_##__op(__lock)); \
171  if (__ret) \
172  pthread_setcancelstate(__oldstate, NULL); \
173  __state = __oldstate; \
174  __ret; \
175  })
176 
177 #define __do_unlock_safe(__lock, __state) \
178  ({ \
179  int __ret, __restored_state = __state; \
180  __ret = -__RT(pthread_mutex_unlock(__lock)); \
181  pthread_setcancelstate(__restored_state, NULL); \
182  __ret; \
183  })
184 
185 /*
186  * The _safe call form is available when undoing the changes from an
187  * update section upon cancellation using a cleanup handler is not an
188  * option (e.g. too complex), or in situations where the protected
189  * code shall fully run; in such cases, cancellation is disabled
190  * throughout the section.
191  */
192 
193 #define write_lock_safe(__lock, __state) \
194  __do_lock_safe(__lock, __state, lock)
195 
196 #define write_trylock_safe(__lock, __state) \
197  __do_lock_safe(__lock, __state, trylock)
198 
199 #define write_unlock_safe(__lock, __state) \
200  __do_unlock_safe(__lock, __state)
201 
202 #define read_lock_safe(__lock, __state) \
203  __do_lock_safe(__lock, __state, lock)
204 
205 #define read_unlock_safe(__lock, __state) \
206  __do_unlock_safe(__lock, __state)
207 
208 #ifdef CONFIG_XENO_DEBUG
209 #define mutex_type_attribute PTHREAD_MUTEX_ERRORCHECK
210 #else
211 #define mutex_type_attribute PTHREAD_MUTEX_NORMAL
212 #endif
213 
214 #ifdef __cplusplus
215 extern "C" {
216 #endif
217 
218 void __run_cleanup_block(struct cleanup_block *cb);
219 
220 #ifdef __cplusplus
221 }
222 #endif
223 
224 #endif /* _BOILERPLATE_LOCK_H */