Xenomai 3.3.2
Loading...
Searching...
No Matches
thread.h
1/*
2 * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
3 *
4 * Xenomai is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published
6 * by the Free Software Foundation; either version 2 of the License,
7 * or (at your option) any later version.
8 *
9 * Xenomai is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with Xenomai; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17 * 02111-1307, USA.
18 */
19#ifndef _COBALT_KERNEL_THREAD_H
20#define _COBALT_KERNEL_THREAD_H
21
22#include <linux/irq_work.h>
23#include <linux/wait.h>
24#include <linux/sched.h>
25#include <linux/sched/rt.h>
26#include <pipeline/thread.h>
27#include <cobalt/kernel/list.h>
28#include <cobalt/kernel/stat.h>
29#include <cobalt/kernel/timer.h>
30#include <cobalt/kernel/registry.h>
31#include <cobalt/kernel/schedparam.h>
32#include <cobalt/kernel/trace.h>
33#include <cobalt/kernel/synch.h>
34#include <cobalt/uapi/kernel/thread.h>
35#include <cobalt/uapi/signal.h>
36#include <asm/xenomai/machine.h>
37#include <asm/xenomai/thread.h>
38
43#define XNTHREAD_BLOCK_BITS (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
44#define XNTHREAD_MODE_BITS (XNRRB|XNWARN|XNTRAPLB)
45
46#define XNTHREAD_SIGDEBUG 0
47#define XNTHREAD_SIGSHADOW_HARDEN 1
48#define XNTHREAD_SIGSHADOW_BACKTRACE 2
49#define XNTHREAD_SIGSHADOW_HOME 3
50#define XNTHREAD_SIGTERM 4
51#define XNTHREAD_MAX_SIGNALS 5
52
53struct xnthread;
54struct xnsched;
55struct xnselector;
56struct xnsched_class;
57struct xnsched_tpslot;
58struct xnthread_personality;
59struct completion;
60
61struct lostage_signal {
62 struct irq_work inband_work;
63 struct task_struct *task;
64 int signo, sigval;
65};
66
67struct lostage_wakeup {
68 struct irq_work inband_work;
69 struct task_struct *task;
70};
71
72struct xnthread_init_attr {
73 struct xnthread_personality *personality;
74 cpumask_t affinity;
75 int flags;
76 const char *name;
77};
78
79struct xnthread_start_attr {
80 int mode;
81 void (*entry)(void *cookie);
82 void *cookie;
83};
84
85struct xnthread_wait_context {
86 int posted;
87};
88
89struct xnthread_personality {
90 const char *name;
91 unsigned int magic;
92 int xid;
93 atomic_t refcnt;
94 struct {
95 void *(*attach_process)(void);
96 void (*detach_process)(void *arg);
97 void (*map_thread)(struct xnthread *thread);
98 struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
99 struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
100 struct xnthread_personality *(*move_thread)(struct xnthread *thread,
101 int dest_cpu);
102 struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
103 struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
104 } ops;
105 struct module *module;
106};
107
108struct xnthread {
109 struct xnarchtcb tcb; /* Architecture-dependent block */
110
111 __u32 state; /* Thread state flags */
112 __u32 info; /* Thread information flags */
113 __u32 local_info; /* Local thread information flags */
114
115 struct xnsched *sched; /* Thread scheduler */
116 struct xnsched_class *sched_class; /* Current scheduling class */
117 struct xnsched_class *base_class; /* Base scheduling class */
118
119#ifdef CONFIG_XENO_OPT_SCHED_TP
120 struct xnsched_tpslot *tps; /* Current partition slot for TP scheduling */
121 struct list_head tp_link; /* Link in per-sched TP thread queue */
122#endif
123#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
124 struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
125#endif
126#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
127 struct xnsched_quota_group *quota; /* Quota scheduling group. */
128 struct list_head quota_expired;
129 struct list_head quota_next;
130#endif
131 cpumask_t affinity; /* Processor affinity. */
132
134 int bprio;
135
137 int cprio;
138
142 int wprio;
143
144 int lock_count;
150 struct list_head rlink;
151
156 struct list_head plink;
157
159 struct list_head glink;
160
173 struct list_head boosters;
174
175 struct xnsynch *wchan; /* Resource the thread pends on */
176
177 struct xnsynch *wwake; /* Wait channel the thread was resumed from */
178
179 int res_count; /* Held resources count */
180
181 struct xntimer rtimer; /* Resource timer */
182
183 struct xntimer ptimer; /* Periodic timer */
184
185 xnticks_t rrperiod; /* Allotted round-robin period (ns) */
186
187 struct xnthread_wait_context *wcontext; /* Active wait context. */
188
189 struct {
190 xnstat_counter_t ssw; /* Primary -> secondary mode switch count */
191 xnstat_counter_t csw; /* Context switches (includes secondary -> primary switches) */
192 xnstat_counter_t xsc; /* Xenomai syscalls */
193 xnstat_counter_t pf; /* Number of page faults */
194 xnstat_exectime_t account; /* Execution time accounting entity */
195 xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
196 } stat;
197
198 struct xnselector *selector; /* For select. */
199
200 xnhandle_t handle; /* Handle in registry */
201
202 char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
203
204 void (*entry)(void *cookie); /* Thread entry routine */
205 void *cookie; /* Cookie to pass to the entry routine */
206
211 struct xnthread_user_window *u_window;
212
213 struct xnthread_personality *personality;
214
215 struct completion exited;
216
217#ifdef CONFIG_XENO_OPT_DEBUG
218 const char *exe_path; /* Executable path */
219 u32 proghash; /* Hash value for exe_path */
220#endif
221 struct lostage_signal sigarray[XNTHREAD_MAX_SIGNALS];
222 struct lostage_wakeup relax_work;
223};
224
225static inline int xnthread_get_state(const struct xnthread *thread)
226{
227 return thread->state;
228}
229
230static inline int xnthread_test_state(struct xnthread *thread, int bits)
231{
232 return thread->state & bits;
233}
234
235static inline void xnthread_set_state(struct xnthread *thread, int bits)
236{
237 thread->state |= bits;
238}
239
240static inline void xnthread_clear_state(struct xnthread *thread, int bits)
241{
242 thread->state &= ~bits;
243}
244
245static inline int xnthread_test_info(struct xnthread *thread, int bits)
246{
247 return thread->info & bits;
248}
249
250static inline void xnthread_set_info(struct xnthread *thread, int bits)
251{
252 thread->info |= bits;
253}
254
255static inline void xnthread_clear_info(struct xnthread *thread, int bits)
256{
257 thread->info &= ~bits;
258}
259
260static inline int xnthread_test_localinfo(struct xnthread *curr, int bits)
261{
262 return curr->local_info & bits;
263}
264
265static inline void xnthread_set_localinfo(struct xnthread *curr, int bits)
266{
267 curr->local_info |= bits;
268}
269
270static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits)
271{
272 curr->local_info &= ~bits;
273}
274
275static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
276{
277 return &thread->tcb;
278}
279
280static inline int xnthread_base_priority(const struct xnthread *thread)
281{
282 return thread->bprio;
283}
284
285static inline int xnthread_current_priority(const struct xnthread *thread)
286{
287 return thread->cprio;
288}
289
290static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
291{
292 return xnarch_host_task(xnthread_archtcb(thread));
293}
294
295#define xnthread_for_each_booster(__pos, __thread) \
296 list_for_each_entry(__pos, &(__thread)->boosters, next)
297
298#define xnthread_for_each_booster_safe(__pos, __tmp, __thread) \
299 list_for_each_entry_safe(__pos, __tmp, &(__thread)->boosters, next)
300
301#define xnthread_run_handler(__t, __h, __a...) \
302 do { \
303 struct xnthread_personality *__p__ = (__t)->personality; \
304 if ((__p__)->ops.__h) \
305 (__p__)->ops.__h(__t, ##__a); \
306 } while (0)
307
308#define xnthread_run_handler_stack(__t, __h, __a...) \
309 do { \
310 struct xnthread_personality *__p__ = (__t)->personality; \
311 do { \
312 if ((__p__)->ops.__h == NULL) \
313 break; \
314 __p__ = (__p__)->ops.__h(__t, ##__a); \
315 } while (__p__); \
316 } while (0)
317
318static inline
319struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
320{
321 return thread->wcontext;
322}
323
324static inline
325int xnthread_register(struct xnthread *thread, const char *name)
326{
327 return xnregistry_enter(name, thread, &thread->handle, NULL);
328}
329
330static inline
331struct xnthread *xnthread_lookup(xnhandle_t threadh)
332{
333 struct xnthread *thread = xnregistry_lookup(threadh, NULL);
334 return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
335}
336
337static inline void xnthread_sync_window(struct xnthread *thread)
338{
339 if (thread->u_window) {
340 thread->u_window->state = thread->state;
341 thread->u_window->info = thread->info;
342 }
343}
344
345static inline
346void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
347{
348 if (thread->u_window) {
349 thread->u_window->state = thread->state & ~state_bits;
350 thread->u_window->info = thread->info;
351 }
352}
353
354static inline
355void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
356{
357 if (thread->u_window) {
358 thread->u_window->state = thread->state | state_bits;
359 thread->u_window->info = thread->info;
360 }
361}
362
363static inline int normalize_priority(int prio)
364{
365 return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
366}
367
368int __xnthread_init(struct xnthread *thread,
369 const struct xnthread_init_attr *attr,
370 struct xnsched *sched,
371 struct xnsched_class *sched_class,
372 const union xnsched_policy_param *sched_param);
373
374void __xnthread_test_cancel(struct xnthread *curr);
375
376void __xnthread_cleanup(struct xnthread *curr);
377
378void __xnthread_discard(struct xnthread *thread);
379
395static inline struct xnthread *xnthread_current(void)
396{
397 return pipeline_current()->thread;
398}
399
411static inline struct xnthread *xnthread_from_task(struct task_struct *p)
412{
413 return pipeline_thread_from_task(p);
414}
415
425static inline void xnthread_test_cancel(void)
426{
427 struct xnthread *curr = xnthread_current();
428
429 if (curr && xnthread_test_info(curr, XNCANCELD))
430 __xnthread_test_cancel(curr);
431}
432
433static inline
434void xnthread_complete_wait(struct xnthread_wait_context *wc)
435{
436 wc->posted = 1;
437}
438
439static inline
440int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
441{
442 return wc->posted;
443}
444
445#ifdef CONFIG_XENO_ARCH_FPU
446void xnthread_switch_fpu(struct xnsched *sched);
447#else
448static inline void xnthread_switch_fpu(struct xnsched *sched) { }
449#endif /* CONFIG_XENO_ARCH_FPU */
450
451void xnthread_deregister(struct xnthread *thread);
452
453char *xnthread_format_status(unsigned long status,
454 char *buf, int size);
455
456pid_t xnthread_host_pid(struct xnthread *thread);
457
458int xnthread_set_clock(struct xnthread *thread,
459 struct xnclock *newclock);
460
461xnticks_t xnthread_get_timeout(struct xnthread *thread,
462 xnticks_t ns);
463
464xnticks_t xnthread_get_period(struct xnthread *thread);
465
466void xnthread_prepare_wait(struct xnthread_wait_context *wc);
467
468int xnthread_init(struct xnthread *thread,
469 const struct xnthread_init_attr *attr,
470 struct xnsched_class *sched_class,
471 const union xnsched_policy_param *sched_param);
472
473int xnthread_start(struct xnthread *thread,
474 const struct xnthread_start_attr *attr);
475
476int xnthread_set_mode(int clrmask,
477 int setmask);
478
479void xnthread_suspend(struct xnthread *thread,
480 int mask,
481 xnticks_t timeout,
482 xntmode_t timeout_mode,
483 struct xnsynch *wchan);
484
485void xnthread_resume(struct xnthread *thread,
486 int mask);
487
488int xnthread_unblock(struct xnthread *thread);
489
490int xnthread_set_periodic(struct xnthread *thread,
491 xnticks_t idate,
492 xntmode_t timeout_mode,
493 xnticks_t period);
494
495int xnthread_wait_period(unsigned long *overruns_r);
496
497int xnthread_set_slice(struct xnthread *thread,
498 xnticks_t quantum);
499
500void xnthread_cancel(struct xnthread *thread);
501
502int xnthread_join(struct xnthread *thread, bool uninterruptible);
503
504int xnthread_harden(void);
505
506void xnthread_relax(int notify, int reason);
507
508void __xnthread_kick(struct xnthread *thread);
509
510void xnthread_kick(struct xnthread *thread);
511
512void __xnthread_demote(struct xnthread *thread);
513
514void xnthread_demote(struct xnthread *thread);
515
516void __xnthread_signal(struct xnthread *thread, int sig, int arg);
517
518void xnthread_signal(struct xnthread *thread, int sig, int arg);
519
520void xnthread_pin_initial(struct xnthread *thread);
521
522void xnthread_call_mayday(struct xnthread *thread, int reason);
523
524static inline void xnthread_get_resource(struct xnthread *curr)
525{
526 if (xnthread_test_state(curr, XNWEAK|XNDEBUG))
527 curr->res_count++;
528}
529
530static inline int xnthread_put_resource(struct xnthread *curr)
531{
532 if (xnthread_test_state(curr, XNWEAK) ||
533 IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
534 if (unlikely(curr->res_count == 0)) {
535 if (xnthread_test_state(curr, XNWARN))
536 xnthread_signal(curr, SIGDEBUG,
537 SIGDEBUG_RESCNT_IMBALANCE);
538 return -EPERM;
539 }
540 curr->res_count--;
541 }
542
543 return 0;
544}
545
546static inline void xnthread_commit_ceiling(struct xnthread *curr)
547{
548 if (curr->u_window->pp_pending)
549 xnsynch_commit_ceiling(curr);
550}
551
552#ifdef CONFIG_SMP
553
554void xnthread_migrate_passive(struct xnthread *thread,
555 struct xnsched *sched);
556#else
557
558static inline void xnthread_migrate_passive(struct xnthread *thread,
559 struct xnsched *sched)
560{ }
561
562#endif
563
564int __xnthread_set_schedparam(struct xnthread *thread,
565 struct xnsched_class *sched_class,
566 const union xnsched_policy_param *sched_param);
567
568int xnthread_set_schedparam(struct xnthread *thread,
569 struct xnsched_class *sched_class,
570 const union xnsched_policy_param *sched_param);
571
572int xnthread_killall(int grace, int mask);
573
574void __xnthread_propagate_schedparam(struct xnthread *curr);
575
576static inline void xnthread_propagate_schedparam(struct xnthread *curr)
577{
578 if (xnthread_test_info(curr, XNSCHEDP))
579 __xnthread_propagate_schedparam(curr);
580}
581
582extern struct xnthread_personality xenomai_personality;
583
586#endif /* !_COBALT_KERNEL_THREAD_H */
int xnregistry_enter(const char *key, void *objaddr, xnhandle_t *phandle, struct xnpnode *pnode)
Register a real-time object.
Definition registry.c:634
static void * xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
Find a real-time object into the registry.
Definition registry.h:176
#define XNSCHEDP
schedparam propagation is pending
Definition thread.h:74
#define XNCANCELD
Cancellation request is pending.
Definition thread.h:72
#define XNWARN
Issue SIGDEBUG on error detection.
Definition thread.h:45
#define XNDEBUG
User-level debugging enabled.
Definition thread.h:52
#define XNWEAK
Non real-time shadow (from the WEAK class)
Definition thread.h:48
int xnthread_wait_period(unsigned long *overruns_r)
Wait for the next periodic release point.
Definition thread.c:1394
static struct xnthread * xnthread_current(void)
Retrieve the current Cobalt core TCB.
Definition thread.h:395
int xnthread_set_mode(int clrmask, int setmask)
Change control mode of the current thread.
Definition thread.c:783
int xnthread_init(struct xnthread *thread, const struct xnthread_init_attr *attr, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Initialize a new thread.
Definition thread.c:619
int xnthread_unblock(struct xnthread *thread)
Unblock a thread.
Definition thread.c:1212
int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
Set thread time-slicing information.
Definition thread.c:1470
void xnthread_resume(struct xnthread *thread, int mask)
Resume a thread.
Definition thread.c:1094
void xnthread_relax(int notify, int reason)
Switch a shadow thread back to the Linux domain.
Definition thread.c:2045
void xnthread_suspend(struct xnthread *thread, int mask, xnticks_t timeout, xntmode_t timeout_mode, struct xnsynch *wchan)
Suspend a thread.
Definition thread.c:877
static void xnthread_test_cancel(void)
Introduce a thread cancellation point.
Definition thread.h:425
void xnthread_cancel(struct xnthread *thread)
Cancel a thread.
Definition thread.c:1527
static struct xnthread * xnthread_from_task(struct task_struct *p)
Retrieve the Cobalt core TCB attached to a Linux task.
Definition thread.h:411
int xnthread_start(struct xnthread *thread, const struct xnthread_start_attr *attr)
Start a newly created thread.
Definition thread.c:692
int xnthread_join(struct xnthread *thread, bool uninterruptible)
Join with a terminated thread.
Definition thread.c:1656
int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
Make a thread periodic.
Definition thread.c:1304
int xnthread_harden(void)
Migrate a Linux task to the Xenomai domain.
Definition thread.c:1925
int xnthread_set_schedparam(struct xnthread *thread, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Change the base scheduling parameters of a thread.
Definition thread.c:1830
Copyright © 2011 Gilles Chanteperdrix gilles.chanteperdrix@xenomai.org.
Definition atomic.h:24
Scheduling information structure.
Definition sched.h:64