Xenomai  3.1
thread.h
1 /*
2  * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_THREAD_H
20 #define _COBALT_KERNEL_THREAD_H
21 
22 #include <linux/wait.h>
23 #include <linux/sched.h>
24 #include <linux/sched/rt.h>
25 #include <cobalt/kernel/list.h>
26 #include <cobalt/kernel/stat.h>
27 #include <cobalt/kernel/timer.h>
28 #include <cobalt/kernel/registry.h>
29 #include <cobalt/kernel/schedparam.h>
30 #include <cobalt/kernel/trace.h>
31 #include <cobalt/kernel/synch.h>
32 #include <cobalt/uapi/kernel/thread.h>
33 #include <cobalt/uapi/signal.h>
34 #include <asm/xenomai/machine.h>
35 #include <asm/xenomai/thread.h>
36 
41 #define XNTHREAD_BLOCK_BITS (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNMIGRATE|XNHELD|XNDBGSTOP)
42 #define XNTHREAD_MODE_BITS (XNRRB|XNWARN|XNTRAPLB)
43 
44 struct xnthread;
45 struct xnsched;
46 struct xnselector;
47 struct xnsched_class;
48 struct xnsched_tpslot;
49 struct xnthread_personality;
50 struct completion;
51 
52 struct xnthread_init_attr {
53  struct xnthread_personality *personality;
54  cpumask_t affinity;
55  int flags;
56  const char *name;
57 };
58 
59 struct xnthread_start_attr {
60  int mode;
61  void (*entry)(void *cookie);
62  void *cookie;
63 };
64 
65 struct xnthread_wait_context {
66  int posted;
67 };
68 
69 struct xnthread_personality {
70  const char *name;
71  unsigned int magic;
72  int xid;
73  atomic_t refcnt;
74  struct {
75  void *(*attach_process)(void);
76  void (*detach_process)(void *arg);
77  void (*map_thread)(struct xnthread *thread);
78  struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
79  struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
80  struct xnthread_personality *(*move_thread)(struct xnthread *thread,
81  int dest_cpu);
82  struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
83  struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
84  } ops;
85  struct module *module;
86 };
87 
88 struct xnthread {
89  struct xnarchtcb tcb; /* Architecture-dependent block */
90 
91  __u32 state; /* Thread state flags */
92  __u32 info; /* Thread information flags */
93  __u32 local_info; /* Local thread information flags */
94 
95  struct xnsched *sched; /* Thread scheduler */
96  struct xnsched_class *sched_class; /* Current scheduling class */
97  struct xnsched_class *base_class; /* Base scheduling class */
98 
99 #ifdef CONFIG_XENO_OPT_SCHED_TP
100  struct xnsched_tpslot *tps; /* Current partition slot for TP scheduling */
101  struct list_head tp_link; /* Link in per-sched TP thread queue */
102 #endif
103 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
104  struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
105 #endif
106 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
107  struct xnsched_quota_group *quota; /* Quota scheduling group. */
108  struct list_head quota_expired;
109  struct list_head quota_next;
110 #endif
111  cpumask_t affinity; /* Processor affinity. */
112 
114  int bprio;
115 
117  int cprio;
118 
122  int wprio;
123 
124  int lock_count;
130  struct list_head rlink;
131 
136  struct list_head plink;
137 
139  struct list_head glink;
140 
153  struct list_head boosters;
154 
155  struct xnsynch *wchan; /* Resource the thread pends on */
156 
157  struct xnsynch *wwake; /* Wait channel the thread was resumed from */
158 
159  int res_count; /* Held resources count */
160 
161  struct xntimer rtimer; /* Resource timer */
162 
163  struct xntimer ptimer; /* Periodic timer */
164 
165  xnticks_t rrperiod; /* Allotted round-robin period (ns) */
166 
167  struct xnthread_wait_context *wcontext; /* Active wait context. */
168 
169  struct {
170  xnstat_counter_t ssw; /* Primary -> secondary mode switch count */
171  xnstat_counter_t csw; /* Context switches (includes secondary -> primary switches) */
172  xnstat_counter_t xsc; /* Xenomai syscalls */
173  xnstat_counter_t pf; /* Number of page faults */
174  xnstat_exectime_t account; /* Execution time accounting entity */
175  xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
176  } stat;
177 
178  struct xnselector *selector; /* For select. */
179 
180  xnhandle_t handle; /* Handle in registry */
181 
182  char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
183 
184  void (*entry)(void *cookie); /* Thread entry routine */
185  void *cookie; /* Cookie to pass to the entry routine */
186 
191  struct xnthread_user_window *u_window;
192 
193  struct xnthread_personality *personality;
194 
195  struct completion exited;
196 
197 #ifdef CONFIG_XENO_OPT_DEBUG
198  const char *exe_path; /* Executable path */
199  u32 proghash; /* Hash value for exe_path */
200 #endif
201 };
202 
203 static inline int xnthread_get_state(const struct xnthread *thread)
204 {
205  return thread->state;
206 }
207 
208 static inline int xnthread_test_state(struct xnthread *thread, int bits)
209 {
210  return thread->state & bits;
211 }
212 
213 static inline void xnthread_set_state(struct xnthread *thread, int bits)
214 {
215  thread->state |= bits;
216 }
217 
218 static inline void xnthread_clear_state(struct xnthread *thread, int bits)
219 {
220  thread->state &= ~bits;
221 }
222 
223 static inline int xnthread_test_info(struct xnthread *thread, int bits)
224 {
225  return thread->info & bits;
226 }
227 
228 static inline void xnthread_set_info(struct xnthread *thread, int bits)
229 {
230  thread->info |= bits;
231 }
232 
233 static inline void xnthread_clear_info(struct xnthread *thread, int bits)
234 {
235  thread->info &= ~bits;
236 }
237 
238 static inline int xnthread_test_localinfo(struct xnthread *curr, int bits)
239 {
240  return curr->local_info & bits;
241 }
242 
243 static inline void xnthread_set_localinfo(struct xnthread *curr, int bits)
244 {
245  curr->local_info |= bits;
246 }
247 
248 static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits)
249 {
250  curr->local_info &= ~bits;
251 }
252 
253 static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
254 {
255  return &thread->tcb;
256 }
257 
258 static inline int xnthread_base_priority(const struct xnthread *thread)
259 {
260  return thread->bprio;
261 }
262 
263 static inline int xnthread_current_priority(const struct xnthread *thread)
264 {
265  return thread->cprio;
266 }
267 
268 static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
269 {
270  return xnthread_archtcb(thread)->core.host_task;
271 }
272 
273 #define xnthread_for_each_booster(__pos, __thread) \
274  list_for_each_entry(__pos, &(__thread)->boosters, next)
275 
276 #define xnthread_for_each_booster_safe(__pos, __tmp, __thread) \
277  list_for_each_entry_safe(__pos, __tmp, &(__thread)->boosters, next)
278 
279 #define xnthread_run_handler(__t, __h, __a...) \
280  do { \
281  struct xnthread_personality *__p__ = (__t)->personality; \
282  if ((__p__)->ops.__h) \
283  (__p__)->ops.__h(__t, ##__a); \
284  } while (0)
285 
286 #define xnthread_run_handler_stack(__t, __h, __a...) \
287  do { \
288  struct xnthread_personality *__p__ = (__t)->personality; \
289  do { \
290  if ((__p__)->ops.__h == NULL) \
291  break; \
292  __p__ = (__p__)->ops.__h(__t, ##__a); \
293  } while (__p__); \
294  } while (0)
295 
296 static inline
297 struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
298 {
299  return thread->wcontext;
300 }
301 
302 static inline
303 int xnthread_register(struct xnthread *thread, const char *name)
304 {
305  return xnregistry_enter(name, thread, &thread->handle, NULL);
306 }
307 
308 static inline
309 struct xnthread *xnthread_lookup(xnhandle_t threadh)
310 {
311  struct xnthread *thread = xnregistry_lookup(threadh, NULL);
312  return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
313 }
314 
315 static inline void xnthread_sync_window(struct xnthread *thread)
316 {
317  if (thread->u_window) {
318  thread->u_window->state = thread->state;
319  thread->u_window->info = thread->info;
320  }
321 }
322 
323 static inline
324 void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
325 {
326  if (thread->u_window) {
327  thread->u_window->state = thread->state & ~state_bits;
328  thread->u_window->info = thread->info;
329  }
330 }
331 
332 static inline
333 void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
334 {
335  if (thread->u_window) {
336  thread->u_window->state = thread->state | state_bits;
337  thread->u_window->info = thread->info;
338  }
339 }
340 
341 static inline int normalize_priority(int prio)
342 {
343  return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
344 }
345 
346 int __xnthread_init(struct xnthread *thread,
347  const struct xnthread_init_attr *attr,
348  struct xnsched *sched,
349  struct xnsched_class *sched_class,
350  const union xnsched_policy_param *sched_param);
351 
352 void __xnthread_test_cancel(struct xnthread *curr);
353 
354 void __xnthread_cleanup(struct xnthread *curr);
355 
356 void __xnthread_discard(struct xnthread *thread);
357 
373 static inline struct xnthread *xnthread_current(void)
374 {
375  return ipipe_current_threadinfo()->thread;
376 }
377 
389 static inline struct xnthread *xnthread_from_task(struct task_struct *p)
390 {
391  return ipipe_task_threadinfo(p)->thread;
392 }
393 
403 static inline void xnthread_test_cancel(void)
404 {
405  struct xnthread *curr = xnthread_current();
407  if (curr && xnthread_test_info(curr, XNCANCELD))
408  __xnthread_test_cancel(curr);
409 }
410 
411 static inline
412 void xnthread_complete_wait(struct xnthread_wait_context *wc)
413 {
414  wc->posted = 1;
415 }
416 
417 static inline
418 int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
419 {
420  return wc->posted;
421 }
422 
423 #ifdef CONFIG_XENO_ARCH_FPU
424 void xnthread_switch_fpu(struct xnsched *sched);
425 #else
426 static inline void xnthread_switch_fpu(struct xnsched *sched) { }
427 #endif /* CONFIG_XENO_ARCH_FPU */
428 
429 void xnthread_init_shadow_tcb(struct xnthread *thread);
430 
431 void xnthread_init_root_tcb(struct xnthread *thread);
432 
433 void xnthread_deregister(struct xnthread *thread);
434 
435 char *xnthread_format_status(unsigned long status,
436  char *buf, int size);
437 
438 pid_t xnthread_host_pid(struct xnthread *thread);
439 
440 int xnthread_set_clock(struct xnthread *thread,
441  struct xnclock *newclock);
442 
443 xnticks_t xnthread_get_timeout(struct xnthread *thread,
444  xnticks_t ns);
445 
446 xnticks_t xnthread_get_period(struct xnthread *thread);
447 
448 void xnthread_prepare_wait(struct xnthread_wait_context *wc);
449 
450 int xnthread_init(struct xnthread *thread,
451  const struct xnthread_init_attr *attr,
452  struct xnsched_class *sched_class,
453  const union xnsched_policy_param *sched_param);
454 
455 int xnthread_start(struct xnthread *thread,
456  const struct xnthread_start_attr *attr);
457 
458 int xnthread_set_mode(int clrmask,
459  int setmask);
460 
461 void xnthread_suspend(struct xnthread *thread,
462  int mask,
463  xnticks_t timeout,
464  xntmode_t timeout_mode,
465  struct xnsynch *wchan);
466 
467 void xnthread_resume(struct xnthread *thread,
468  int mask);
469 
470 int xnthread_unblock(struct xnthread *thread);
471 
472 int xnthread_set_periodic(struct xnthread *thread,
473  xnticks_t idate,
474  xntmode_t timeout_mode,
475  xnticks_t period);
476 
477 int xnthread_wait_period(unsigned long *overruns_r);
478 
479 int xnthread_set_slice(struct xnthread *thread,
480  xnticks_t quantum);
481 
482 void xnthread_cancel(struct xnthread *thread);
483 
484 int xnthread_join(struct xnthread *thread, bool uninterruptible);
485 
486 int xnthread_harden(void);
487 
488 void xnthread_relax(int notify, int reason);
489 
490 void __xnthread_kick(struct xnthread *thread);
491 
492 void xnthread_kick(struct xnthread *thread);
493 
494 void __xnthread_demote(struct xnthread *thread);
495 
496 void xnthread_demote(struct xnthread *thread);
497 
498 void xnthread_signal(struct xnthread *thread,
499  int sig, int arg);
500 
501 void xnthread_pin_initial(struct xnthread *thread);
502 
503 int xnthread_map(struct xnthread *thread,
504  struct completion *done);
505 
506 void xnthread_call_mayday(struct xnthread *thread, int reason);
507 
508 static inline void xnthread_get_resource(struct xnthread *curr)
509 {
510  if (xnthread_test_state(curr, XNWEAK|XNDEBUG))
511  curr->res_count++;
512 }
513 
514 static inline int xnthread_put_resource(struct xnthread *curr)
515 {
516  if (xnthread_test_state(curr, XNWEAK) ||
517  IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
518  if (unlikely(curr->res_count == 0)) {
519  if (xnthread_test_state(curr, XNWARN))
520  xnthread_signal(curr, SIGDEBUG,
521  SIGDEBUG_RESCNT_IMBALANCE);
522  return -EPERM;
523  }
524  curr->res_count--;
525  }
526 
527  return 0;
528 }
529 
530 static inline void xnthread_commit_ceiling(struct xnthread *curr)
531 {
532  if (curr->u_window->pp_pending)
533  xnsynch_commit_ceiling(curr);
534 }
535 
536 #ifdef CONFIG_SMP
537 
538 void xnthread_migrate_passive(struct xnthread *thread,
539  struct xnsched *sched);
540 #else
541 
542 static inline void xnthread_migrate_passive(struct xnthread *thread,
543  struct xnsched *sched)
544 { }
545 
546 #endif
547 
548 int __xnthread_set_schedparam(struct xnthread *thread,
549  struct xnsched_class *sched_class,
550  const union xnsched_policy_param *sched_param);
551 
552 int xnthread_set_schedparam(struct xnthread *thread,
553  struct xnsched_class *sched_class,
554  const union xnsched_policy_param *sched_param);
555 
556 int xnthread_killall(int grace, int mask);
557 
558 void __xnthread_propagate_schedparam(struct xnthread *curr);
559 
560 static inline void xnthread_propagate_schedparam(struct xnthread *curr)
561 {
562  if (xnthread_test_info(curr, XNSCHEDP))
563  __xnthread_propagate_schedparam(curr);
564 }
565 
566 extern struct xnthread_personality xenomai_personality;
567 
570 #endif /* !_COBALT_KERNEL_THREAD_H */
#define XNSCHEDP
schedparam propagation is pending
Definition: thread.h:75
void xnthread_relax(int notify, int reason)
Switch a shadow thread back to the Linux domain.
Definition: thread.c:2088
void xnthread_cancel(struct xnthread *thread)
Cancel a thread.
Definition: thread.c:1541
#define XNCANCELD
Cancellation request is pending.
Definition: thread.h:73
static void xnthread_test_cancel(void)
Introduce a thread cancellation point.
Definition: thread.h:406
#define XNWARN
Issue SIGDEBUG on error detection.
Definition: thread.h:46
#define XNDEBUG
User-level debugging enabled.
Definition: thread.h:53
int xnregistry_enter(const char *key, void *objaddr, xnhandle_t *phandle, struct xnpnode *pnode)
Register a real-time object.
Definition: registry.c:632
void xnthread_suspend(struct xnthread *thread, int mask, xnticks_t timeout, xntmode_t timeout_mode, struct xnsynch *wchan)
Suspend a thread.
Definition: thread.c:881
#define XNWEAK
Non real-time shadow (from the WEAK class)
Definition: thread.h:49
int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
Set thread time-slicing information.
Definition: thread.c:1483
static struct xnthread * xnthread_current(void)
Retrieve the current Cobalt core TCB.
Definition: thread.h:374
int xnthread_wait_period(unsigned long *overruns_r)
Wait for the next periodic release point.
Definition: thread.c:1406
Scheduling information structure.
Definition: sched.h:58
int xnthread_map(struct xnthread *thread, struct completion *done)
Create a shadow thread context over a kernel task.
Definition: thread.c:2540
int xnthread_set_mode(int clrmask, int setmask)
Change control mode of the current thread.
Definition: thread.c:786
static void * xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
Find a real-time object into the registry.
Definition: registry.h:176
int xnthread_init(struct xnthread *thread, const struct xnthread_init_attr *attr, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Initialize a new thread.
Definition: thread.c:620
int xnthread_harden(void)
Migrate a Linux task to the Xenomai domain.
Definition: thread.c:1943
int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
Make a thread periodic.
Definition: thread.c:1320
int xnthread_start(struct xnthread *thread, const struct xnthread_start_attr *attr)
Start a newly created thread.
Definition: thread.c:694
Copyright © 2011 Gilles Chanteperdrix gilles.chanteperdrix@xenomai.org.
Definition: atomic.h:24
int xnthread_unblock(struct xnthread *thread)
Unblock a thread.
Definition: thread.c:1228
static struct xnthread * xnthread_from_task(struct task_struct *p)
Retrieve the Cobalt core TCB attached to a Linux task.
Definition: thread.h:391
int xnthread_join(struct xnthread *thread, bool uninterruptible)
Join with a terminated thread.
Definition: thread.c:1671
int xnthread_set_schedparam(struct xnthread *thread, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Change the base scheduling parameters of a thread.
Definition: thread.c:1847
void xnthread_resume(struct xnthread *thread, int mask)
Resume a thread.
Definition: thread.c:1109