Xenomai  3.1
heapobj.h
1 /*
2  * Copyright (C) 2008-2011 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13 
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17  */
18 
19 #ifndef _COPPERPLATE_HEAPOBJ_H
20 #define _COPPERPLATE_HEAPOBJ_H
21 
22 #include <sys/types.h>
23 #include <stdint.h>
24 #include <string.h>
25 #include <assert.h>
26 #include <errno.h>
27 #include <pthread.h>
28 #include <xeno_config.h>
29 #include <boilerplate/wrappers.h>
30 #include <boilerplate/list.h>
31 #include <copperplate/reference.h>
32 #include <boilerplate/lock.h>
33 #include <copperplate/debug.h>
34 
35 struct heapobj {
36  union {
37  dref_type(void *) pool_ref;
38  void *pool;
39  };
40  size_t size;
41  char name[32];
42 #ifdef CONFIG_XENO_PSHARED
43  char fsname[256];
44 #endif
45 };
46 
47 struct sysgroup {
48  int thread_count;
49  struct listobj thread_list;
50  int heap_count;
51  struct listobj heap_list;
52  pthread_mutex_t lock;
53 };
54 
55 #ifdef __cplusplus
56 extern "C" {
57 #endif
58 
59 int heapobj_pkg_init_private(void);
60 
61 int __heapobj_init_private(struct heapobj *hobj, const char *name,
62  size_t size, void *mem);
63 
64 int heapobj_init_array_private(struct heapobj *hobj, const char *name,
65  size_t size, int elems);
66 #ifdef __cplusplus
67 }
68 #endif
69 
70 #ifdef CONFIG_XENO_TLSF
71 
72 size_t get_used_size(void *pool);
73 void destroy_memory_pool(void *pool);
74 size_t add_new_area(void *pool, size_t size, void *mem);
75 void *malloc_ex(size_t size, void *pool);
76 void free_ex(void *pool, void *ptr);
77 void *tlsf_malloc(size_t size);
78 void tlsf_free(void *ptr);
79 size_t malloc_usable_size_ex(void *ptr, void *pool);
80 
81 static inline
82 void pvheapobj_destroy(struct heapobj *hobj)
83 {
84  destroy_memory_pool(hobj->pool);
85 }
86 
87 static inline
88 int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem)
89 {
90  hobj->size = add_new_area(hobj->pool, size, mem);
91  if (hobj->size == (size_t)-1)
92  return __bt(-EINVAL);
93 
94  return 0;
95 }
96 
97 static inline
98 void *pvheapobj_alloc(struct heapobj *hobj, size_t size)
99 {
100  return malloc_ex(size, hobj->pool);
101 }
102 
103 static inline
104 void pvheapobj_free(struct heapobj *hobj, void *ptr)
105 {
106  free_ex(ptr, hobj->pool);
107 }
108 
109 static inline
110 size_t pvheapobj_validate(struct heapobj *hobj, void *ptr)
111 {
112  return malloc_usable_size_ex(ptr, hobj->pool);
113 }
114 
115 static inline
116 size_t pvheapobj_inquire(struct heapobj *hobj)
117 {
118  return get_used_size(hobj->pool);
119 }
120 
121 static inline void *pvmalloc(size_t size)
122 {
123  return tlsf_malloc(size);
124 }
125 
126 static inline void pvfree(void *ptr)
127 {
128  tlsf_free(ptr);
129 }
130 
131 static inline char *pvstrdup(const char *ptr)
132 {
133  char *str;
134 
135  str = (char *)pvmalloc(strlen(ptr) + 1);
136  if (str == NULL)
137  return NULL;
138 
139  return strcpy(str, ptr);
140 }
141 
142 #elif defined(CONFIG_XENO_HEAPMEM)
143 
144 #include <stdlib.h>
145 #include <boilerplate/heapmem.h>
146 
147 extern struct heap_memory heapmem_main;
148 
149 static inline
150 void pvheapobj_destroy(struct heapobj *hobj)
151 {
152  heapmem_destroy((struct heap_memory *)hobj->pool);
153  if (hobj->pool != (void *)&heapmem_main)
154  __STD(free(hobj->pool));
155 }
156 
157 static inline
158 int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem)
159 {
160  return heapmem_extend((struct heap_memory *)hobj->pool,
161  mem, size);
162 }
163 
164 static inline
165 void *pvheapobj_alloc(struct heapobj *hobj, size_t size)
166 {
167  return heapmem_alloc((struct heap_memory *)hobj->pool, size);
168 }
169 
170 static inline
171 void pvheapobj_free(struct heapobj *hobj, void *ptr)
172 {
173  heapmem_free((struct heap_memory *)hobj->pool, ptr);
174 }
175 
176 static inline
177 size_t pvheapobj_validate(struct heapobj *hobj, void *ptr)
178 {
179  ssize_t size = heapmem_check((struct heap_memory *)hobj->pool, ptr);
180  return size < 0 ? 0 : size;
181 }
182 
183 static inline
184 size_t pvheapobj_inquire(struct heapobj *hobj)
185 {
186  return heapmem_used_size((struct heap_memory *)hobj->pool);
187 }
188 
189 static inline void *pvmalloc(size_t size)
190 {
191  return heapmem_alloc(&heapmem_main, size);
192 }
193 
194 static inline void pvfree(void *ptr)
195 {
196  heapmem_free(&heapmem_main, ptr);
197 }
198 
199 static inline char *pvstrdup(const char *ptr)
200 {
201  char *str;
202 
203  str = (char *)pvmalloc(strlen(ptr) + 1);
204  if (str == NULL)
205  return NULL;
206 
207  return strcpy(str, ptr);
208 }
209 
210 #else /* !CONFIG_XENO_HEAPMEM, i.e. malloc */
211 
212 #include <stdlib.h>
213 
214 static inline void *pvmalloc(size_t size)
215 {
216  /*
217  * NOTE: We don't want debug _nrt assertions to trigger when
218  * running over Cobalt if the user picked this allocator, so
219  * we make sure to call the glibc directly, not the Cobalt
220  * wrappers.
221  */
222  return __STD(malloc(size));
223 }
224 
225 static inline void pvfree(void *ptr)
226 {
227  __STD(free(ptr));
228 }
229 
230 static inline char *pvstrdup(const char *ptr)
231 {
232  return strdup(ptr);
233 }
234 
235 void pvheapobj_destroy(struct heapobj *hobj);
236 
237 int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem);
238 
239 void *pvheapobj_alloc(struct heapobj *hobj, size_t size);
240 
241 void pvheapobj_free(struct heapobj *hobj, void *ptr);
242 
243 size_t pvheapobj_inquire(struct heapobj *hobj);
244 
245 size_t pvheapobj_validate(struct heapobj *hobj, void *ptr);
246 
247 #endif /* !CONFIG_XENO_HEAPMEM */
248 
249 #ifdef CONFIG_XENO_PSHARED
250 
251 extern void *__main_heap;
252 
253 extern struct hash_table *__main_catalog;
254 #define main_catalog (*((struct hash_table *)__main_catalog))
255 
256 extern struct sysgroup *__main_sysgroup;
257 
258 struct sysgroup_memspec {
260  struct holder next;
261 };
262 
263 static inline void *mainheap_ptr(memoff_t off)
264 {
265  return off ? (void *)__memptr(__main_heap, off) : NULL;
266 }
267 
268 static inline memoff_t mainheap_off(void *addr)
269 {
270  return addr ? (memoff_t)__memoff(__main_heap, addr) : 0;
271 }
272 
273 /*
274  * ptr shall point to a block of memory allocated within the main heap
275  * if non-null; such address is always 8-byte aligned. Handles of
276  * shared heap pointers are returned with bit #0 set, which serves as
277  * a special tag detected in mainhead_deref(). A null pointer is
278  * always translated as a null handle.
279  */
280 #define mainheap_ref(ptr, type) \
281  ({ \
282  type handle; \
283  assert(__builtin_types_compatible_p(typeof(type), unsigned long) || \
284  __builtin_types_compatible_p(typeof(type), uintptr_t)); \
285  assert(ptr == NULL || __memchk(__main_heap, ptr)); \
286  handle = (type)mainheap_off(ptr); \
287  handle|1; \
288  })
289 /*
290  * Handles of shared heap-based pointers have bit #0 set. Other values
291  * are not translated, and the return value is the original handle
292  * cast to a pointer. A null handle is always returned unchanged.
293  */
294 #define mainheap_deref(handle, type) \
295  ({ \
296  type *ptr; \
297  assert(__builtin_types_compatible_p(typeof(handle), unsigned long) || \
298  __builtin_types_compatible_p(typeof(handle), uintptr_t)); \
299  ptr = (handle & 1) ? (type *)mainheap_ptr(handle & ~1UL) : (type *)handle; \
300  ptr; \
301  })
302 
303 static inline void
304 __sysgroup_add(struct sysgroup_memspec *obj, struct listobj *q, int *countp)
305 {
306  write_lock_nocancel(&__main_sysgroup->lock);
307  (*countp)++;
308  list_append(&obj->next, q);
309  write_unlock(&__main_sysgroup->lock);
310 }
311 
312 #define sysgroup_add(__group, __obj) \
313  __sysgroup_add(__obj, &(__main_sysgroup->__group ## _list), \
314  &(__main_sysgroup->__group ## _count))
315 
316 static inline void
317 __sysgroup_remove(struct sysgroup_memspec *obj, int *countp)
318 {
319  write_lock_nocancel(&__main_sysgroup->lock);
320  (*countp)--;
321  list_remove(&obj->next);
322  write_unlock(&__main_sysgroup->lock);
323 }
324 
325 #define sysgroup_remove(__group, __obj) \
326  __sysgroup_remove(__obj, &(__main_sysgroup->__group ## _count))
327 
328 static inline void sysgroup_lock(void)
329 {
330  read_lock_nocancel(&__main_sysgroup->lock);
331 }
332 
333 static inline void sysgroup_unlock(void)
334 {
335  read_unlock(&__main_sysgroup->lock);
336 }
337 
338 #define sysgroup_count(__group) \
339  (__main_sysgroup->__group ## _count)
340 
341 #define for_each_sysgroup(__obj, __tmp, __group) \
342  list_for_each_entry_safe(__obj, __tmp, &(__main_sysgroup->__group ## _list), next)
343 
344 int heapobj_pkg_init_shared(void);
345 
346 int heapobj_init(struct heapobj *hobj, const char *name,
347  size_t size);
348 
349 static inline int __heapobj_init(struct heapobj *hobj, const char *name,
350  size_t size, void *unused)
351 {
352  /* Can't work on user-defined memory in shared mode. */
353  return heapobj_init(hobj, name, size);
354 }
355 
356 int heapobj_init_array(struct heapobj *hobj, const char *name,
357  size_t size, int elems);
358 
359 void heapobj_destroy(struct heapobj *hobj);
360 
361 int heapobj_extend(struct heapobj *hobj,
362  size_t size, void *mem);
363 
364 void *heapobj_alloc(struct heapobj *hobj,
365  size_t size);
366 
367 void heapobj_free(struct heapobj *hobj,
368  void *ptr);
369 
370 size_t heapobj_validate(struct heapobj *hobj,
371  void *ptr);
372 
373 size_t heapobj_inquire(struct heapobj *hobj);
374 
375 size_t heapobj_get_size(struct heapobj *hobj);
376 
377 int heapobj_bind_session(const char *session);
378 
379 void heapobj_unbind_session(void);
380 
381 int heapobj_unlink_session(const char *session);
382 
383 void *xnmalloc(size_t size);
384 
385 void xnfree(void *ptr);
386 
387 char *xnstrdup(const char *ptr);
388 
389 #else /* !CONFIG_XENO_PSHARED */
390 
391 struct sysgroup_memspec {
392 };
393 
394 /*
395  * Whether an object is laid in some shared heap. Never if pshared
396  * mode is disabled.
397  */
398 static inline int pshared_check(void *heap, void *addr)
399 {
400  return 0;
401 }
402 
403 #ifdef __cplusplus
404 #define __check_ref_width(__dst, __src) \
405  ({ \
406  assert(sizeof(__dst) >= sizeof(__src)); \
407  (typeof(__dst))__src; \
408  })
409 #else
410 #define __check_ref_width(__dst, __src) \
411  __builtin_choose_expr( \
412  sizeof(__dst) >= sizeof(__src), (typeof(__dst))__src, \
413  ((void)0))
414 #endif
415 
416 #define mainheap_ref(ptr, type) \
417  ({ \
418  type handle; \
419  handle = __check_ref_width(handle, ptr); \
420  assert(ptr == NULL || __memchk(__main_heap, ptr)); \
421  handle; \
422  })
423 #define mainheap_deref(handle, type) \
424  ({ \
425  type *ptr; \
426  ptr = __check_ref_width(ptr, handle); \
427  ptr; \
428  })
429 
430 #define sysgroup_add(__group, __obj) do { } while (0)
431 #define sysgroup_remove(__group, __obj) do { } while (0)
432 
433 static inline int heapobj_pkg_init_shared(void)
434 {
435  return 0;
436 }
437 
438 static inline int __heapobj_init(struct heapobj *hobj, const char *name,
439  size_t size, void *mem)
440 {
441  return __heapobj_init_private(hobj, name, size, mem);
442 }
443 
444 static inline int heapobj_init(struct heapobj *hobj, const char *name,
445  size_t size)
446 {
447  return __heapobj_init_private(hobj, name, size, NULL);
448 }
449 
450 static inline int heapobj_init_array(struct heapobj *hobj, const char *name,
451  size_t size, int elems)
452 {
453  return heapobj_init_array_private(hobj, name, size, elems);
454 }
455 
456 static inline void heapobj_destroy(struct heapobj *hobj)
457 {
458  pvheapobj_destroy(hobj);
459 }
460 
461 static inline int heapobj_extend(struct heapobj *hobj,
462  size_t size, void *mem)
463 {
464  return pvheapobj_extend(hobj, size, mem);
465 }
466 
467 static inline void *heapobj_alloc(struct heapobj *hobj,
468  size_t size)
469 {
470  return pvheapobj_alloc(hobj, size);
471 }
472 
473 static inline void heapobj_free(struct heapobj *hobj,
474  void *ptr)
475 {
476  pvheapobj_free(hobj, ptr);
477 }
478 
479 static inline size_t heapobj_validate(struct heapobj *hobj,
480  void *ptr)
481 {
482  return pvheapobj_validate(hobj, ptr);
483 }
484 
485 static inline size_t heapobj_inquire(struct heapobj *hobj)
486 {
487  return pvheapobj_inquire(hobj);
488 }
489 
490 static inline int heapobj_bind_session(const char *session)
491 {
492  return -ENOSYS;
493 }
494 
495 static inline int heapobj_unlink_session(const char *session)
496 {
497  return 0;
498 }
499 
500 static inline void heapobj_unbind_session(void) { }
501 
502 static inline void *xnmalloc(size_t size)
503 {
504  return pvmalloc(size);
505 }
506 
507 static inline void xnfree(void *ptr)
508 {
509  pvfree(ptr);
510 }
511 
512 static inline char *xnstrdup(const char *ptr)
513 {
514  return pvstrdup(ptr);
515 }
516 
517 #endif /* !CONFIG_XENO_PSHARED */
518 
519 static inline const char *heapobj_name(struct heapobj *hobj)
520 {
521  return hobj->name;
522 }
523 
524 static inline size_t heapobj_size(struct heapobj *hobj)
525 {
526  return hobj->size;
527 }
528 
529 #endif /* _COPPERPLATE_HEAPOBJ_H */