Xenomai 3.3.2
Loading...
Searching...
No Matches
heapobj.h
1/*
2 * Copyright (C) 2008-2011 Philippe Gerum <rpm@xenomai.org>.
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#ifndef _COPPERPLATE_HEAPOBJ_H
20#define _COPPERPLATE_HEAPOBJ_H
21
22#include <sys/types.h>
23#include <stdint.h>
24#include <string.h>
25#include <assert.h>
26#include <errno.h>
27#include <pthread.h>
28#include <xeno_config.h>
29#include <boilerplate/wrappers.h>
30#include <boilerplate/list.h>
31#include <copperplate/reference.h>
32#include <boilerplate/lock.h>
33#include <copperplate/debug.h>
34
35struct heapobj {
36 union {
37 dref_type(void *) pool_ref;
38 void *pool;
39 };
40 size_t size;
41 char name[32];
42#ifdef CONFIG_XENO_PSHARED
43 char fsname[256];
44#endif
45};
46
47struct sysgroup {
48 int thread_count;
49 struct listobj thread_list;
50 int heap_count;
51 struct listobj heap_list;
52 pthread_mutex_t lock;
53};
54
55#ifdef __cplusplus
56extern "C" {
57#endif
58
59int heapobj_pkg_init_private(void);
60
61int __heapobj_init_private(struct heapobj *hobj, const char *name,
62 size_t size, void *mem);
63
64int heapobj_init_array_private(struct heapobj *hobj, const char *name,
65 size_t size, int elems);
66#ifdef __cplusplus
67}
68#endif
69
70#ifdef CONFIG_XENO_TLSF
71
72size_t get_used_size(void *pool);
73void destroy_memory_pool(void *pool);
74size_t add_new_area(void *pool, size_t size, void *mem);
75void *malloc_ex(size_t size, void *pool);
76void free_ex(void *pool, void *ptr);
77void *tlsf_malloc(size_t size);
78void tlsf_free(void *ptr);
79size_t malloc_usable_size_ex(void *ptr, void *pool);
80
81static inline
82void pvheapobj_destroy(struct heapobj *hobj)
83{
84 destroy_memory_pool(hobj->pool);
85}
86
87static inline
88int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem)
89{
90 hobj->size = add_new_area(hobj->pool, size, mem);
91 if (hobj->size == (size_t)-1)
92 return __bt(-EINVAL);
93
94 return 0;
95}
96
97static inline
98void *pvheapobj_alloc(struct heapobj *hobj, size_t size)
99{
100 return malloc_ex(size, hobj->pool);
101}
102
103static inline
104void pvheapobj_free(struct heapobj *hobj, void *ptr)
105{
106 free_ex(ptr, hobj->pool);
107}
108
109static inline
110size_t pvheapobj_validate(struct heapobj *hobj, void *ptr)
111{
112 return malloc_usable_size_ex(ptr, hobj->pool);
113}
114
115static inline
116size_t pvheapobj_inquire(struct heapobj *hobj)
117{
118 return get_used_size(hobj->pool);
119}
120
121static inline void *pvmalloc(size_t size)
122{
123 return tlsf_malloc(size);
124}
125
126static inline void pvfree(void *ptr)
127{
128 tlsf_free(ptr);
129}
130
131static inline char *pvstrdup(const char *ptr)
132{
133 char *str;
134
135 str = (char *)pvmalloc(strlen(ptr) + 1);
136 if (str == NULL)
137 return NULL;
138
139 return strcpy(str, ptr);
140}
141
142#elif defined(CONFIG_XENO_HEAPMEM)
143
144#include <stdlib.h>
145#include <boilerplate/heapmem.h>
146
147extern struct heap_memory heapmem_main;
148
149static inline
150void pvheapobj_destroy(struct heapobj *hobj)
151{
152 heapmem_destroy((struct heap_memory *)hobj->pool);
153 if (hobj->pool != (void *)&heapmem_main)
154 __STD(free(hobj->pool));
155}
156
157static inline
158int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem)
159{
160 return heapmem_extend((struct heap_memory *)hobj->pool,
161 mem, size);
162}
163
164static inline
165void *pvheapobj_alloc(struct heapobj *hobj, size_t size)
166{
167 return heapmem_alloc((struct heap_memory *)hobj->pool, size);
168}
169
170static inline
171void pvheapobj_free(struct heapobj *hobj, void *ptr)
172{
173 heapmem_free((struct heap_memory *)hobj->pool, ptr);
174}
175
176static inline
177size_t pvheapobj_validate(struct heapobj *hobj, void *ptr)
178{
179 ssize_t size = heapmem_check((struct heap_memory *)hobj->pool, ptr);
180 return size < 0 ? 0 : (size_t)size;
181}
182
183static inline
184size_t pvheapobj_inquire(struct heapobj *hobj)
185{
186 return heapmem_used_size((struct heap_memory *)hobj->pool);
187}
188
189static inline void *pvmalloc(size_t size)
190{
191 return heapmem_alloc(&heapmem_main, size);
192}
193
194static inline void pvfree(void *ptr)
195{
196 heapmem_free(&heapmem_main, ptr);
197}
198
199static inline char *pvstrdup(const char *ptr)
200{
201 char *str;
202
203 str = (char *)pvmalloc(strlen(ptr) + 1);
204 if (str == NULL)
205 return NULL;
206
207 return strcpy(str, ptr);
208}
209
210#else /* !CONFIG_XENO_HEAPMEM, i.e. malloc */
211
212#include <stdlib.h>
213
214static inline void *pvmalloc(size_t size)
215{
216 /*
217 * NOTE: We don't want debug _nrt assertions to trigger when
218 * running over Cobalt if the user picked this allocator, so
219 * we make sure to call the glibc directly, not the Cobalt
220 * wrappers.
221 */
222 return __STD(malloc(size));
223}
224
225static inline void pvfree(void *ptr)
226{
227 __STD(free(ptr));
228}
229
230static inline char *pvstrdup(const char *ptr)
231{
232 return strdup(ptr);
233}
234
235void pvheapobj_destroy(struct heapobj *hobj);
236
237int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem);
238
239void *pvheapobj_alloc(struct heapobj *hobj, size_t size);
240
241void pvheapobj_free(struct heapobj *hobj, void *ptr);
242
243size_t pvheapobj_inquire(struct heapobj *hobj);
244
245size_t pvheapobj_validate(struct heapobj *hobj, void *ptr);
246
247#endif /* !CONFIG_XENO_HEAPMEM */
248
249#ifdef CONFIG_XENO_PSHARED
250
251extern void *__main_heap;
252
253extern struct hash_table *__main_catalog;
254#define main_catalog (*((struct hash_table *)__main_catalog))
255
256extern struct sysgroup *__main_sysgroup;
257
258struct sysgroup_memspec {
260 struct holder next;
261};
262
263static inline void *mainheap_ptr(memoff_t off)
264{
265 return off ? (void *)__memptr(__main_heap, off) : NULL;
266}
267
268static inline memoff_t mainheap_off(void *addr)
269{
270 return addr ? (memoff_t)__memoff(__main_heap, addr) : 0;
271}
272
273/*
274 * ptr shall point to a block of memory allocated within the main heap
275 * if non-null; such address is always 8-byte aligned. Handles of
276 * shared heap pointers are returned with bit #0 set, which serves as
277 * a special tag detected in mainhead_deref(). A null pointer is
278 * always translated as a null handle.
279 */
280#define mainheap_ref(ptr, type) \
281 ({ \
282 type handle; \
283 assert(__builtin_types_compatible_p(typeof(type), unsigned long) || \
284 __builtin_types_compatible_p(typeof(type), uintptr_t)); \
285 assert(ptr == NULL || __memchk(__main_heap, ptr)); \
286 handle = (type)mainheap_off(ptr); \
287 handle|1; \
288 })
289/*
290 * Handles of shared heap-based pointers have bit #0 set. Other values
291 * are not translated, and the return value is the original handle
292 * cast to a pointer. A null handle is always returned unchanged.
293 */
294#define mainheap_deref(handle, type) \
295 ({ \
296 type *ptr; \
297 assert(__builtin_types_compatible_p(typeof(handle), unsigned long) || \
298 __builtin_types_compatible_p(typeof(handle), uintptr_t)); \
299 ptr = (handle & 1) ? (type *)mainheap_ptr(handle & ~1UL) : (type *)handle; \
300 ptr; \
301 })
302
303static inline void
304__sysgroup_add(struct sysgroup_memspec *obj, struct listobj *q, int *countp)
305{
306 write_lock_nocancel(&__main_sysgroup->lock);
307 (*countp)++;
308 list_append(&obj->next, q);
309 write_unlock(&__main_sysgroup->lock);
310}
311
312#define sysgroup_add(__group, __obj) \
313 __sysgroup_add(__obj, &(__main_sysgroup->__group ## _list), \
314 &(__main_sysgroup->__group ## _count))
315
316static inline void
317__sysgroup_remove(struct sysgroup_memspec *obj, int *countp)
318{
319 write_lock_nocancel(&__main_sysgroup->lock);
320 (*countp)--;
321 list_remove(&obj->next);
322 write_unlock(&__main_sysgroup->lock);
323}
324
325#define sysgroup_remove(__group, __obj) \
326 __sysgroup_remove(__obj, &(__main_sysgroup->__group ## _count))
327
328static inline void sysgroup_lock(void)
329{
330 read_lock_nocancel(&__main_sysgroup->lock);
331}
332
333static inline void sysgroup_unlock(void)
334{
335 read_unlock(&__main_sysgroup->lock);
336}
337
338#define sysgroup_count(__group) \
339 (__main_sysgroup->__group ## _count)
340
341#define for_each_sysgroup(__obj, __tmp, __group) \
342 list_for_each_entry_safe(__obj, __tmp, &(__main_sysgroup->__group ## _list), next)
343
344int heapobj_pkg_init_shared(void);
345
346int heapobj_init(struct heapobj *hobj, const char *name,
347 size_t size);
348
349static inline int __heapobj_init(struct heapobj *hobj, const char *name,
350 size_t size, void *unused)
351{
352 /* Can't work on user-defined memory in shared mode. */
353 return heapobj_init(hobj, name, size);
354}
355
356int heapobj_init_array(struct heapobj *hobj, const char *name,
357 size_t size, int elems);
358
359void heapobj_destroy(struct heapobj *hobj);
360
361int heapobj_extend(struct heapobj *hobj,
362 size_t size, void *mem);
363
364void *heapobj_alloc(struct heapobj *hobj,
365 size_t size);
366
367void heapobj_free(struct heapobj *hobj,
368 void *ptr);
369
370size_t heapobj_validate(struct heapobj *hobj,
371 void *ptr);
372
373size_t heapobj_inquire(struct heapobj *hobj);
374
375size_t heapobj_get_size(struct heapobj *hobj);
376
377int heapobj_bind_session(const char *session);
378
379void heapobj_unbind_session(void);
380
381int heapobj_unlink_session(const char *session);
382
383void *xnmalloc(size_t size);
384
385void xnfree(void *ptr);
386
387char *xnstrdup(const char *ptr);
388
389#else /* !CONFIG_XENO_PSHARED */
390
391struct sysgroup_memspec {
392};
393
394/*
395 * Whether an object is laid in some shared heap. Never if pshared
396 * mode is disabled.
397 */
398static inline int pshared_check(void *heap, void *addr)
399{
400 return 0;
401}
402
403#ifdef __cplusplus
404#define __check_ref_width(__dst, __src) \
405 ({ \
406 assert(sizeof(__dst) >= sizeof(__src)); \
407 (typeof(__dst))__src; \
408 })
409#else
410#define __check_ref_width(__dst, __src) \
411 __builtin_choose_expr( \
412 sizeof(__dst) >= sizeof(__src), (typeof(__dst))__src, \
413 ((void)0))
414#endif
415
416#define mainheap_ref(ptr, type) \
417 ({ \
418 type handle; \
419 handle = __check_ref_width(handle, ptr); \
420 assert(ptr == NULL || __memchk(__main_heap, ptr)); \
421 handle; \
422 })
423#define mainheap_deref(handle, type) \
424 ({ \
425 type *ptr; \
426 ptr = __check_ref_width(ptr, handle); \
427 ptr; \
428 })
429
430#define sysgroup_add(__group, __obj) do { } while (0)
431#define sysgroup_remove(__group, __obj) do { } while (0)
432
433static inline int heapobj_pkg_init_shared(void)
434{
435 return 0;
436}
437
438static inline int __heapobj_init(struct heapobj *hobj, const char *name,
439 size_t size, void *mem)
440{
441 return __heapobj_init_private(hobj, name, size, mem);
442}
443
444static inline int heapobj_init(struct heapobj *hobj, const char *name,
445 size_t size)
446{
447 return __heapobj_init_private(hobj, name, size, NULL);
448}
449
450static inline int heapobj_init_array(struct heapobj *hobj, const char *name,
451 size_t size, int elems)
452{
453 return heapobj_init_array_private(hobj, name, size, elems);
454}
455
456static inline void heapobj_destroy(struct heapobj *hobj)
457{
458 pvheapobj_destroy(hobj);
459}
460
461static inline int heapobj_extend(struct heapobj *hobj,
462 size_t size, void *mem)
463{
464 return pvheapobj_extend(hobj, size, mem);
465}
466
467static inline void *heapobj_alloc(struct heapobj *hobj,
468 size_t size)
469{
470 return pvheapobj_alloc(hobj, size);
471}
472
473static inline void heapobj_free(struct heapobj *hobj,
474 void *ptr)
475{
476 pvheapobj_free(hobj, ptr);
477}
478
479static inline size_t heapobj_validate(struct heapobj *hobj,
480 void *ptr)
481{
482 return pvheapobj_validate(hobj, ptr);
483}
484
485static inline size_t heapobj_inquire(struct heapobj *hobj)
486{
487 return pvheapobj_inquire(hobj);
488}
489
490static inline int heapobj_bind_session(const char *session)
491{
492 return -ENOSYS;
493}
494
495static inline int heapobj_unlink_session(const char *session)
496{
497 return 0;
498}
499
500static inline void heapobj_unbind_session(void) { }
501
502static inline void *xnmalloc(size_t size)
503{
504 return pvmalloc(size);
505}
506
507static inline void xnfree(void *ptr)
508{
509 pvfree(ptr);
510}
511
512static inline char *xnstrdup(const char *ptr)
513{
514 return pvstrdup(ptr);
515}
516
517#endif /* !CONFIG_XENO_PSHARED */
518
519static inline const char *heapobj_name(struct heapobj *hobj)
520{
521 return hobj->name;
522}
523
524static inline size_t heapobj_size(struct heapobj *hobj)
525{
526 return hobj->size;
527}
528
529#endif /* _COPPERPLATE_HEAPOBJ_H */