Xenomai  3.1
cobalt-core.h
1 /*
2  * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
3  * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
4  *
5  * Xenomai is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published
7  * by the Free Software Foundation; either version 2 of the License,
8  * or (at your option) any later version.
9  *
10  * Xenomai is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with Xenomai; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
18  * 02111-1307, USA.
19  */
20 #undef TRACE_SYSTEM
21 #define TRACE_SYSTEM cobalt_core
22 
23 #if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
24 #define _TRACE_COBALT_CORE_H
25 
26 #include <linux/tracepoint.h>
27 
28 DECLARE_EVENT_CLASS(thread_event,
29  TP_PROTO(struct xnthread *thread),
30  TP_ARGS(thread),
31 
32  TP_STRUCT__entry(
33  __field(pid_t, pid)
34  __field(unsigned long, state)
35  __field(unsigned long, info)
36  ),
37 
38  TP_fast_assign(
39  __entry->state = thread->state;
40  __entry->info = thread->info;
41  __entry->pid = xnthread_host_pid(thread);
42  ),
43 
44  TP_printk("pid=%d state=0x%lx info=0x%lx",
45  __entry->pid, __entry->state, __entry->info)
46 );
47 
48 DECLARE_EVENT_CLASS(curr_thread_event,
49  TP_PROTO(struct xnthread *thread),
50  TP_ARGS(thread),
51 
52  TP_STRUCT__entry(
53  __field(struct xnthread *, thread)
54  __field(unsigned long, state)
55  __field(unsigned long, info)
56  ),
57 
58  TP_fast_assign(
59  __entry->state = thread->state;
60  __entry->info = thread->info;
61  ),
62 
63  TP_printk("state=0x%lx info=0x%lx",
64  __entry->state, __entry->info)
65 );
66 
67 DECLARE_EVENT_CLASS(synch_wait_event,
68  TP_PROTO(struct xnsynch *synch),
69  TP_ARGS(synch),
70 
71  TP_STRUCT__entry(
72  __field(struct xnsynch *, synch)
73  ),
74 
75  TP_fast_assign(
76  __entry->synch = synch;
77  ),
78 
79  TP_printk("synch=%p", __entry->synch)
80 );
81 
82 DECLARE_EVENT_CLASS(synch_post_event,
83  TP_PROTO(struct xnsynch *synch),
84  TP_ARGS(synch),
85 
86  TP_STRUCT__entry(
87  __field(struct xnsynch *, synch)
88  ),
89 
90  TP_fast_assign(
91  __entry->synch = synch;
92  ),
93 
94  TP_printk("synch=%p", __entry->synch)
95 );
96 
97 DECLARE_EVENT_CLASS(irq_event,
98  TP_PROTO(unsigned int irq),
99  TP_ARGS(irq),
100 
101  TP_STRUCT__entry(
102  __field(unsigned int, irq)
103  ),
104 
105  TP_fast_assign(
106  __entry->irq = irq;
107  ),
108 
109  TP_printk("irq=%u", __entry->irq)
110 );
111 
112 DECLARE_EVENT_CLASS(clock_event,
113  TP_PROTO(unsigned int irq),
114  TP_ARGS(irq),
115 
116  TP_STRUCT__entry(
117  __field(unsigned int, irq)
118  ),
119 
120  TP_fast_assign(
121  __entry->irq = irq;
122  ),
123 
124  TP_printk("clock_irq=%u", __entry->irq)
125 );
126 
127 DECLARE_EVENT_CLASS(timer_event,
128  TP_PROTO(struct xntimer *timer),
129  TP_ARGS(timer),
130 
131  TP_STRUCT__entry(
132  __field(struct xntimer *, timer)
133  ),
134 
135  TP_fast_assign(
136  __entry->timer = timer;
137  ),
138 
139  TP_printk("timer=%p", __entry->timer)
140 );
141 
142 TRACE_EVENT(cobalt_schedule,
143  TP_PROTO(struct xnsched *sched),
144  TP_ARGS(sched),
145 
146  TP_STRUCT__entry(
147  __field(unsigned long, status)
148  ),
149 
150  TP_fast_assign(
151  __entry->status = sched->status;
152  ),
153 
154  TP_printk("status=0x%lx", __entry->status)
155 );
156 
157 TRACE_EVENT(cobalt_schedule_remote,
158  TP_PROTO(struct xnsched *sched),
159  TP_ARGS(sched),
160 
161  TP_STRUCT__entry(
162  __field(unsigned long, status)
163  ),
164 
165  TP_fast_assign(
166  __entry->status = sched->status;
167  ),
168 
169  TP_printk("status=0x%lx", __entry->status)
170 );
171 
172 TRACE_EVENT(cobalt_switch_context,
173  TP_PROTO(struct xnthread *prev, struct xnthread *next),
174  TP_ARGS(prev, next),
175 
176  TP_STRUCT__entry(
177  __field(struct xnthread *, prev)
178  __string(prev_name, prev->name)
179  __field(pid_t, prev_pid)
180  __field(int, prev_prio)
181  __field(unsigned long, prev_state)
182  __field(struct xnthread *, next)
183  __string(next_name, next->name)
184  __field(pid_t, next_pid)
185  __field(int, next_prio)
186  ),
187 
188  TP_fast_assign(
189  __entry->prev = prev;
190  __assign_str(prev_name, prev->name);
191  __entry->prev_pid = xnthread_host_pid(prev);
192  __entry->prev_prio = xnthread_current_priority(prev);
193  __entry->prev_state = prev->state;
194  __entry->next = next;
195  __assign_str(next_name, next->name);
196  __entry->next_pid = xnthread_host_pid(next);
197  __entry->next_prio = xnthread_current_priority(next);
198  ),
199 
200  TP_printk("prev_name=%s prev_pid=%d prev_prio=%d prev_state=0x%lx ==> next_name=%s next_pid=%d next_prio=%d",
201  __get_str(prev_name), __entry->prev_pid,
202  __entry->prev_prio, __entry->prev_state,
203  __get_str(next_name), __entry->next_pid, __entry->next_prio)
204 );
205 
206 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
207 
208 TRACE_EVENT(cobalt_schedquota_refill,
209  TP_PROTO(int dummy),
210  TP_ARGS(dummy),
211 
212  TP_STRUCT__entry(
213  __field(int, dummy)
214  ),
215 
216  TP_fast_assign(
217  (void)dummy;
218  ),
219 
220  TP_printk("%s", "")
221 );
222 
223 DECLARE_EVENT_CLASS(schedquota_group_event,
224  TP_PROTO(struct xnsched_quota_group *tg),
225  TP_ARGS(tg),
226 
227  TP_STRUCT__entry(
228  __field(int, tgid)
229  ),
230 
231  TP_fast_assign(
232  __entry->tgid = tg->tgid;
233  ),
234 
235  TP_printk("tgid=%d",
236  __entry->tgid)
237 );
238 
239 DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_create_group,
240  TP_PROTO(struct xnsched_quota_group *tg),
241  TP_ARGS(tg)
242 );
243 
244 DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_destroy_group,
245  TP_PROTO(struct xnsched_quota_group *tg),
246  TP_ARGS(tg)
247 );
248 
249 TRACE_EVENT(cobalt_schedquota_set_limit,
250  TP_PROTO(struct xnsched_quota_group *tg,
251  int percent,
252  int peak_percent),
253  TP_ARGS(tg, percent, peak_percent),
254 
255  TP_STRUCT__entry(
256  __field(int, tgid)
257  __field(int, percent)
258  __field(int, peak_percent)
259  ),
260 
261  TP_fast_assign(
262  __entry->tgid = tg->tgid;
263  __entry->percent = percent;
264  __entry->peak_percent = peak_percent;
265  ),
266 
267  TP_printk("tgid=%d percent=%d peak_percent=%d",
268  __entry->tgid, __entry->percent, __entry->peak_percent)
269 );
270 
271 DECLARE_EVENT_CLASS(schedquota_thread_event,
272  TP_PROTO(struct xnsched_quota_group *tg,
273  struct xnthread *thread),
274  TP_ARGS(tg, thread),
275 
276  TP_STRUCT__entry(
277  __field(int, tgid)
278  __field(struct xnthread *, thread)
279  __field(pid_t, pid)
280  ),
281 
282  TP_fast_assign(
283  __entry->tgid = tg->tgid;
284  __entry->thread = thread;
285  __entry->pid = xnthread_host_pid(thread);
286  ),
287 
288  TP_printk("tgid=%d thread=%p pid=%d",
289  __entry->tgid, __entry->thread, __entry->pid)
290 );
291 
292 DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_add_thread,
293  TP_PROTO(struct xnsched_quota_group *tg,
294  struct xnthread *thread),
295  TP_ARGS(tg, thread)
296 );
297 
298 DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_remove_thread,
299  TP_PROTO(struct xnsched_quota_group *tg,
300  struct xnthread *thread),
301  TP_ARGS(tg, thread)
302 );
303 
304 #endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
305 
306 TRACE_EVENT(cobalt_thread_init,
307  TP_PROTO(struct xnthread *thread,
308  const struct xnthread_init_attr *attr,
309  struct xnsched_class *sched_class),
310  TP_ARGS(thread, attr, sched_class),
311 
312  TP_STRUCT__entry(
313  __field(struct xnthread *, thread)
314  __string(thread_name, thread->name)
315  __string(class_name, sched_class->name)
316  __field(unsigned long, flags)
317  __field(int, cprio)
318  ),
319 
320  TP_fast_assign(
321  __entry->thread = thread;
322  __assign_str(thread_name, thread->name);
323  __entry->flags = attr->flags;
324  __assign_str(class_name, sched_class->name);
325  __entry->cprio = thread->cprio;
326  ),
327 
328  TP_printk("thread=%p name=%s flags=0x%lx class=%s prio=%d",
329  __entry->thread, __get_str(thread_name), __entry->flags,
330  __get_str(class_name), __entry->cprio)
331 );
332 
333 TRACE_EVENT(cobalt_thread_suspend,
334  TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
335  xntmode_t timeout_mode, struct xnsynch *wchan),
336  TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
337 
338  TP_STRUCT__entry(
339  __field(pid_t, pid)
340  __field(unsigned long, mask)
341  __field(xnticks_t, timeout)
342  __field(xntmode_t, timeout_mode)
343  __field(struct xnsynch *, wchan)
344  ),
345 
346  TP_fast_assign(
347  __entry->pid = xnthread_host_pid(thread);
348  __entry->mask = mask;
349  __entry->timeout = timeout;
350  __entry->timeout_mode = timeout_mode;
351  __entry->wchan = wchan;
352  ),
353 
354  TP_printk("pid=%d mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
355  __entry->pid, __entry->mask,
356  __entry->timeout, __entry->timeout_mode, __entry->wchan)
357 );
358 
359 TRACE_EVENT(cobalt_thread_resume,
360  TP_PROTO(struct xnthread *thread, unsigned long mask),
361  TP_ARGS(thread, mask),
362 
363  TP_STRUCT__entry(
364  __string(name, thread->name)
365  __field(pid_t, pid)
366  __field(unsigned long, mask)
367  ),
368 
369  TP_fast_assign(
370  __assign_str(name, thread->name);
371  __entry->pid = xnthread_host_pid(thread);
372  __entry->mask = mask;
373  ),
374 
375  TP_printk("name=%s pid=%d mask=0x%lx",
376  __get_str(name), __entry->pid, __entry->mask)
377 );
378 
379 TRACE_EVENT(cobalt_thread_fault,
380  TP_PROTO(struct ipipe_trap_data *td),
381  TP_ARGS(td),
382 
383  TP_STRUCT__entry(
384  __field(void *, ip)
385  __field(unsigned int, type)
386  ),
387 
388  TP_fast_assign(
389  __entry->ip = (void *)xnarch_fault_pc(td);
390  __entry->type = xnarch_fault_trap(td);
391  ),
392 
393  TP_printk("ip=%p type=%x",
394  __entry->ip, __entry->type)
395 );
396 
397 TRACE_EVENT(cobalt_thread_set_current_prio,
398  TP_PROTO(struct xnthread *thread),
399  TP_ARGS(thread),
400 
401  TP_STRUCT__entry(
402  __field(struct xnthread *, thread)
403  __field(pid_t, pid)
404  __field(int, cprio)
405  ),
406 
407  TP_fast_assign(
408  __entry->thread = thread;
409  __entry->pid = xnthread_host_pid(thread);
410  __entry->cprio = xnthread_current_priority(thread);
411  ),
412 
413  TP_printk("thread=%p pid=%d prio=%d",
414  __entry->thread, __entry->pid, __entry->cprio)
415 );
416 
417 DEFINE_EVENT(thread_event, cobalt_thread_start,
418  TP_PROTO(struct xnthread *thread),
419  TP_ARGS(thread)
420 );
421 
422 DEFINE_EVENT(thread_event, cobalt_thread_cancel,
423  TP_PROTO(struct xnthread *thread),
424  TP_ARGS(thread)
425 );
426 
427 DEFINE_EVENT(thread_event, cobalt_thread_join,
428  TP_PROTO(struct xnthread *thread),
429  TP_ARGS(thread)
430 );
431 
432 DEFINE_EVENT(thread_event, cobalt_thread_unblock,
433  TP_PROTO(struct xnthread *thread),
434  TP_ARGS(thread)
435 );
436 
437 DEFINE_EVENT(curr_thread_event, cobalt_thread_wait_period,
438  TP_PROTO(struct xnthread *thread),
439  TP_ARGS(thread)
440 );
441 
442 DEFINE_EVENT(curr_thread_event, cobalt_thread_missed_period,
443  TP_PROTO(struct xnthread *thread),
444  TP_ARGS(thread)
445 );
446 
447 DEFINE_EVENT(curr_thread_event, cobalt_thread_set_mode,
448  TP_PROTO(struct xnthread *thread),
449  TP_ARGS(thread)
450 );
451 
452 TRACE_EVENT(cobalt_thread_migrate,
453  TP_PROTO(unsigned int cpu),
454  TP_ARGS(cpu),
455 
456  TP_STRUCT__entry(
457  __field(unsigned int, cpu)
458  ),
459 
460  TP_fast_assign(
461  __entry->cpu = cpu;
462  ),
463 
464  TP_printk("cpu=%u", __entry->cpu)
465 );
466 
467 TRACE_EVENT(cobalt_thread_migrate_passive,
468  TP_PROTO(struct xnthread *thread, unsigned int cpu),
469  TP_ARGS(thread, cpu),
470 
471  TP_STRUCT__entry(
472  __field(struct xnthread *, thread)
473  __field(pid_t, pid)
474  __field(unsigned int, cpu)
475  ),
476 
477  TP_fast_assign(
478  __entry->thread = thread;
479  __entry->pid = xnthread_host_pid(thread);
480  __entry->cpu = cpu;
481  ),
482 
483  TP_printk("thread=%p pid=%d cpu=%u",
484  __entry->thread, __entry->pid, __entry->cpu)
485 );
486 
487 DEFINE_EVENT(curr_thread_event, cobalt_shadow_gohard,
488  TP_PROTO(struct xnthread *thread),
489  TP_ARGS(thread)
490 );
491 
492 DEFINE_EVENT(curr_thread_event, cobalt_watchdog_signal,
493  TP_PROTO(struct xnthread *thread),
494  TP_ARGS(thread)
495 );
496 
497 DEFINE_EVENT(curr_thread_event, cobalt_shadow_hardened,
498  TP_PROTO(struct xnthread *thread),
499  TP_ARGS(thread)
500 );
501 
502 #define cobalt_print_relax_reason(reason) \
503  __print_symbolic(reason, \
504  { SIGDEBUG_UNDEFINED, "undefined" }, \
505  { SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \
506  { SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \
507  { SIGDEBUG_MIGRATE_FAULT, "fault" })
508 
509 TRACE_EVENT(cobalt_shadow_gorelax,
510  TP_PROTO(int reason),
511  TP_ARGS(reason),
512 
513  TP_STRUCT__entry(
514  __field(int, reason)
515  ),
516 
517  TP_fast_assign(
518  __entry->reason = reason;
519  ),
520 
521  TP_printk("reason=%s", cobalt_print_relax_reason(__entry->reason))
522 );
523 
524 DEFINE_EVENT(curr_thread_event, cobalt_shadow_relaxed,
525  TP_PROTO(struct xnthread *thread),
526  TP_ARGS(thread)
527 );
528 
529 DEFINE_EVENT(curr_thread_event, cobalt_shadow_entry,
530  TP_PROTO(struct xnthread *thread),
531  TP_ARGS(thread)
532 );
533 
534 TRACE_EVENT(cobalt_shadow_map,
535  TP_PROTO(struct xnthread *thread),
536  TP_ARGS(thread),
537 
538  TP_STRUCT__entry(
539  __field(struct xnthread *, thread)
540  __field(pid_t, pid)
541  __field(int, prio)
542  ),
543 
544  TP_fast_assign(
545  __entry->thread = thread;
546  __entry->pid = xnthread_host_pid(thread);
547  __entry->prio = xnthread_base_priority(thread);
548  ),
549 
550  TP_printk("thread=%p pid=%d prio=%d",
551  __entry->thread, __entry->pid, __entry->prio)
552 );
553 
554 DEFINE_EVENT(curr_thread_event, cobalt_shadow_unmap,
555  TP_PROTO(struct xnthread *thread),
556  TP_ARGS(thread)
557 );
558 
559 TRACE_EVENT(cobalt_lostage_request,
560  TP_PROTO(const char *type, struct task_struct *task),
561  TP_ARGS(type, task),
562 
563  TP_STRUCT__entry(
564  __field(pid_t, pid)
565  __array(char, comm, TASK_COMM_LEN)
566  __field(const char *, type)
567  ),
568 
569  TP_fast_assign(
570  __entry->type = type;
571  __entry->pid = task_pid_nr(task);
572  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
573  ),
574 
575  TP_printk("request=%s pid=%d comm=%s",
576  __entry->type, __entry->pid, __entry->comm)
577 );
578 
579 TRACE_EVENT(cobalt_lostage_wakeup,
580  TP_PROTO(struct task_struct *task),
581  TP_ARGS(task),
582 
583  TP_STRUCT__entry(
584  __field(pid_t, pid)
585  __array(char, comm, TASK_COMM_LEN)
586  ),
587 
588  TP_fast_assign(
589  __entry->pid = task_pid_nr(task);
590  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
591  ),
592 
593  TP_printk("pid=%d comm=%s",
594  __entry->pid, __entry->comm)
595 );
596 
597 TRACE_EVENT(cobalt_lostage_signal,
598  TP_PROTO(struct task_struct *task, int sig),
599  TP_ARGS(task, sig),
600 
601  TP_STRUCT__entry(
602  __field(pid_t, pid)
603  __array(char, comm, TASK_COMM_LEN)
604  __field(int, sig)
605  ),
606 
607  TP_fast_assign(
608  __entry->pid = task_pid_nr(task);
609  __entry->sig = sig;
610  memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
611  ),
612 
613  TP_printk("pid=%d comm=%s sig=%d",
614  __entry->pid, __entry->comm, __entry->sig)
615 );
616 
617 DEFINE_EVENT(irq_event, cobalt_irq_entry,
618  TP_PROTO(unsigned int irq),
619  TP_ARGS(irq)
620 );
621 
622 DEFINE_EVENT(irq_event, cobalt_irq_exit,
623  TP_PROTO(unsigned int irq),
624  TP_ARGS(irq)
625 );
626 
627 DEFINE_EVENT(irq_event, cobalt_irq_attach,
628  TP_PROTO(unsigned int irq),
629  TP_ARGS(irq)
630 );
631 
632 DEFINE_EVENT(irq_event, cobalt_irq_detach,
633  TP_PROTO(unsigned int irq),
634  TP_ARGS(irq)
635 );
636 
637 DEFINE_EVENT(irq_event, cobalt_irq_enable,
638  TP_PROTO(unsigned int irq),
639  TP_ARGS(irq)
640 );
641 
642 DEFINE_EVENT(irq_event, cobalt_irq_disable,
643  TP_PROTO(unsigned int irq),
644  TP_ARGS(irq)
645 );
646 
647 DEFINE_EVENT(clock_event, cobalt_clock_entry,
648  TP_PROTO(unsigned int irq),
649  TP_ARGS(irq)
650 );
651 
652 DEFINE_EVENT(clock_event, cobalt_clock_exit,
653  TP_PROTO(unsigned int irq),
654  TP_ARGS(irq)
655 );
656 
657 DEFINE_EVENT(timer_event, cobalt_timer_stop,
658  TP_PROTO(struct xntimer *timer),
659  TP_ARGS(timer)
660 );
661 
662 DEFINE_EVENT(timer_event, cobalt_timer_expire,
663  TP_PROTO(struct xntimer *timer),
664  TP_ARGS(timer)
665 );
666 
667 #define cobalt_print_timer_mode(mode) \
668  __print_symbolic(mode, \
669  { XN_RELATIVE, "rel" }, \
670  { XN_ABSOLUTE, "abs" }, \
671  { XN_REALTIME, "rt" })
672 
673 TRACE_EVENT(cobalt_timer_start,
674  TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
675  xntmode_t mode),
676  TP_ARGS(timer, value, interval, mode),
677 
678  TP_STRUCT__entry(
679  __field(struct xntimer *, timer)
680 #ifdef CONFIG_XENO_OPT_STATS
681  __string(name, timer->name)
682 #endif
683  __field(xnticks_t, value)
684  __field(xnticks_t, interval)
685  __field(xntmode_t, mode)
686  ),
687 
688  TP_fast_assign(
689  __entry->timer = timer;
690 #ifdef CONFIG_XENO_OPT_STATS
691  __assign_str(name, timer->name);
692 #endif
693  __entry->value = value;
694  __entry->interval = interval;
695  __entry->mode = mode;
696  ),
697 
698  TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
699  __entry->timer,
700 #ifdef CONFIG_XENO_OPT_STATS
701  __get_str(name),
702 #else
703  "(anon)",
704 #endif
705  __entry->value, __entry->interval,
706  cobalt_print_timer_mode(__entry->mode))
707 );
708 
709 #ifdef CONFIG_SMP
710 
711 TRACE_EVENT(cobalt_timer_migrate,
712  TP_PROTO(struct xntimer *timer, unsigned int cpu),
713  TP_ARGS(timer, cpu),
714 
715  TP_STRUCT__entry(
716  __field(struct xntimer *, timer)
717  __field(unsigned int, cpu)
718  ),
719 
720  TP_fast_assign(
721  __entry->timer = timer;
722  __entry->cpu = cpu;
723  ),
724 
725  TP_printk("timer=%p cpu=%u",
726  __entry->timer, __entry->cpu)
727 );
728 
729 #endif /* CONFIG_SMP */
730 
731 DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
732  TP_PROTO(struct xnsynch *synch),
733  TP_ARGS(synch)
734 );
735 
736 DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
737  TP_PROTO(struct xnsynch *synch),
738  TP_ARGS(synch)
739 );
740 
741 DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
742  TP_PROTO(struct xnsynch *synch),
743  TP_ARGS(synch)
744 );
745 
746 DEFINE_EVENT(synch_post_event, cobalt_synch_release,
747  TP_PROTO(struct xnsynch *synch),
748  TP_ARGS(synch)
749 );
750 
751 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
752  TP_PROTO(struct xnsynch *synch),
753  TP_ARGS(synch)
754 );
755 
756 DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
757  TP_PROTO(struct xnsynch *synch),
758  TP_ARGS(synch)
759 );
760 
761 DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
762  TP_PROTO(struct xnsynch *synch),
763  TP_ARGS(synch)
764 );
765 
766 DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
767  TP_PROTO(struct xnsynch *synch),
768  TP_ARGS(synch)
769 );
770 
771 #endif /* _TRACE_COBALT_CORE_H */
772 
773 /* This part must be outside protection */
774 #undef TRACE_INCLUDE_PATH
775 #undef TRACE_INCLUDE_FILE
776 #define TRACE_INCLUDE_FILE cobalt-core
777 #include <trace/define_trace.h>
Scheduling information structure.
Definition: sched.h:58
unsigned long status
Definition: sched.h:60