Xenomai 3.3.2
Loading...
Searching...
No Matches
cobalt-core.h
1/*
2 * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
3 * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
4 *
5 * Xenomai is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published
7 * by the Free Software Foundation; either version 2 of the License,
8 * or (at your option) any later version.
9 *
10 * Xenomai is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with Xenomai; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
18 * 02111-1307, USA.
19 */
20#undef TRACE_SYSTEM
21#define TRACE_SYSTEM cobalt_core
22
23#if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
24#define _TRACE_COBALT_CORE_H
25
26#include <linux/tracepoint.h>
27#include <linux/math64.h>
28#include <cobalt/kernel/timer.h>
29#include <cobalt/kernel/registry.h>
30#include <cobalt/uapi/kernel/types.h>
31
32struct xnsched;
33struct xnthread;
34struct xnsynch;
35struct xnsched_class;
36struct xnsched_quota_group;
37struct xnthread_init_attr;
38
39DECLARE_EVENT_CLASS(thread_event,
40 TP_PROTO(struct xnthread *thread),
41 TP_ARGS(thread),
42
43 TP_STRUCT__entry(
44 __field(pid_t, pid)
45 __field(unsigned long, state)
46 __field(unsigned long, info)
47 ),
48
49 TP_fast_assign(
50 __entry->state = thread->state;
51 __entry->info = thread->info;
52 __entry->pid = xnthread_host_pid(thread);
53 ),
54
55 TP_printk("pid=%d state=0x%lx info=0x%lx",
56 __entry->pid, __entry->state, __entry->info)
57);
58
59DECLARE_EVENT_CLASS(curr_thread_event,
60 TP_PROTO(struct xnthread *thread),
61 TP_ARGS(thread),
62
63 TP_STRUCT__entry(
64 __field(struct xnthread *, thread)
65 __field(unsigned long, state)
66 __field(unsigned long, info)
67 ),
68
69 TP_fast_assign(
70 __entry->state = thread->state;
71 __entry->info = thread->info;
72 ),
73
74 TP_printk("state=0x%lx info=0x%lx",
75 __entry->state, __entry->info)
76);
77
78DECLARE_EVENT_CLASS(synch_wait_event,
79 TP_PROTO(struct xnsynch *synch),
80 TP_ARGS(synch),
81
82 TP_STRUCT__entry(
83 __field(struct xnsynch *, synch)
84 ),
85
86 TP_fast_assign(
87 __entry->synch = synch;
88 ),
89
90 TP_printk("synch=%p", __entry->synch)
91);
92
93DECLARE_EVENT_CLASS(synch_post_event,
94 TP_PROTO(struct xnsynch *synch),
95 TP_ARGS(synch),
96
97 TP_STRUCT__entry(
98 __field(struct xnsynch *, synch)
99 ),
100
101 TP_fast_assign(
102 __entry->synch = synch;
103 ),
104
105 TP_printk("synch=%p", __entry->synch)
106);
107
108DECLARE_EVENT_CLASS(irq_event,
109 TP_PROTO(unsigned int irq),
110 TP_ARGS(irq),
111
112 TP_STRUCT__entry(
113 __field(unsigned int, irq)
114 ),
115
116 TP_fast_assign(
117 __entry->irq = irq;
118 ),
119
120 TP_printk("irq=%u", __entry->irq)
121);
122
123DECLARE_EVENT_CLASS(clock_event,
124 TP_PROTO(unsigned int irq),
125 TP_ARGS(irq),
126
127 TP_STRUCT__entry(
128 __field(unsigned int, irq)
129 ),
130
131 TP_fast_assign(
132 __entry->irq = irq;
133 ),
134
135 TP_printk("clock_irq=%u", __entry->irq)
136);
137
138DECLARE_EVENT_CLASS(timer_event,
139 TP_PROTO(struct xntimer *timer),
140 TP_ARGS(timer),
141
142 TP_STRUCT__entry(
143 __field(struct xntimer *, timer)
144 ),
145
146 TP_fast_assign(
147 __entry->timer = timer;
148 ),
149
150 TP_printk("timer=%p", __entry->timer)
151);
152
153DECLARE_EVENT_CLASS(registry_event,
154 TP_PROTO(const char *key, void *addr),
155 TP_ARGS(key, addr),
156
157 TP_STRUCT__entry(
158 __string(key, key ?: "(anon)")
159 __field(void *, addr)
160 ),
161
162 TP_fast_assign(
163 __wrap_assign_str(key, key ?: "(anon)");
164 __entry->addr = addr;
165 ),
166
167 TP_printk("key=%s, addr=%p", __get_str(key), __entry->addr)
168);
169
170TRACE_EVENT(cobalt_schedule,
171 TP_PROTO(struct xnsched *sched),
172 TP_ARGS(sched),
173
174 TP_STRUCT__entry(
175 __field(unsigned long, status)
176 ),
177
178 TP_fast_assign(
179 __entry->status = sched->status;
180 ),
181
182 TP_printk("status=0x%lx", __entry->status)
183);
184
185TRACE_EVENT(cobalt_schedule_remote,
186 TP_PROTO(struct xnsched *sched),
187 TP_ARGS(sched),
188
189 TP_STRUCT__entry(
190 __field(unsigned long, status)
191 ),
192
193 TP_fast_assign(
194 __entry->status = sched->status;
195 ),
196
197 TP_printk("status=0x%lx", __entry->status)
198);
199
200TRACE_EVENT(cobalt_switch_context,
201 TP_PROTO(struct xnthread *prev, struct xnthread *next),
202 TP_ARGS(prev, next),
203
204 TP_STRUCT__entry(
205 __field(struct xnthread *, prev)
206 __string(prev_name, prev->name)
207 __field(pid_t, prev_pid)
208 __field(int, prev_prio)
209 __field(unsigned long, prev_state)
210 __field(struct xnthread *, next)
211 __string(next_name, next->name)
212 __field(pid_t, next_pid)
213 __field(int, next_prio)
214 ),
215
216 TP_fast_assign(
217 __entry->prev = prev;
218 __wrap_assign_str(prev_name, prev->name);
219 __entry->prev_pid = xnthread_host_pid(prev);
220 __entry->prev_prio = xnthread_current_priority(prev);
221 __entry->prev_state = prev->state;
222 __entry->next = next;
223 __wrap_assign_str(next_name, next->name);
224 __entry->next_pid = xnthread_host_pid(next);
225 __entry->next_prio = xnthread_current_priority(next);
226 ),
227
228 TP_printk("prev_name=%s prev_pid=%d prev_prio=%d prev_state=0x%lx ==> next_name=%s next_pid=%d next_prio=%d",
229 __get_str(prev_name), __entry->prev_pid,
230 __entry->prev_prio, __entry->prev_state,
231 __get_str(next_name), __entry->next_pid, __entry->next_prio)
232);
233
234#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
235
236TRACE_EVENT(cobalt_schedquota_refill,
237 TP_PROTO(int dummy),
238 TP_ARGS(dummy),
239
240 TP_STRUCT__entry(
241 __field(int, dummy)
242 ),
243
244 TP_fast_assign(
245 (void)dummy;
246 ),
247
248 TP_printk("%s", "")
249);
250
251DECLARE_EVENT_CLASS(schedquota_group_event,
252 TP_PROTO(struct xnsched_quota_group *tg),
253 TP_ARGS(tg),
254
255 TP_STRUCT__entry(
256 __field(int, tgid)
257 ),
258
259 TP_fast_assign(
260 __entry->tgid = tg->tgid;
261 ),
262
263 TP_printk("tgid=%d",
264 __entry->tgid)
265);
266
267DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_create_group,
268 TP_PROTO(struct xnsched_quota_group *tg),
269 TP_ARGS(tg)
270);
271
272DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_destroy_group,
273 TP_PROTO(struct xnsched_quota_group *tg),
274 TP_ARGS(tg)
275);
276
277TRACE_EVENT(cobalt_schedquota_set_limit,
278 TP_PROTO(struct xnsched_quota_group *tg,
279 int percent,
280 int peak_percent),
281 TP_ARGS(tg, percent, peak_percent),
282
283 TP_STRUCT__entry(
284 __field(int, tgid)
285 __field(int, percent)
286 __field(int, peak_percent)
287 ),
288
289 TP_fast_assign(
290 __entry->tgid = tg->tgid;
291 __entry->percent = percent;
292 __entry->peak_percent = peak_percent;
293 ),
294
295 TP_printk("tgid=%d percent=%d peak_percent=%d",
296 __entry->tgid, __entry->percent, __entry->peak_percent)
297);
298
299DECLARE_EVENT_CLASS(schedquota_thread_event,
300 TP_PROTO(struct xnsched_quota_group *tg,
301 struct xnthread *thread),
302 TP_ARGS(tg, thread),
303
304 TP_STRUCT__entry(
305 __field(int, tgid)
306 __field(struct xnthread *, thread)
307 __field(pid_t, pid)
308 ),
309
310 TP_fast_assign(
311 __entry->tgid = tg->tgid;
312 __entry->thread = thread;
313 __entry->pid = xnthread_host_pid(thread);
314 ),
315
316 TP_printk("tgid=%d thread=%p pid=%d",
317 __entry->tgid, __entry->thread, __entry->pid)
318);
319
320DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_add_thread,
321 TP_PROTO(struct xnsched_quota_group *tg,
322 struct xnthread *thread),
323 TP_ARGS(tg, thread)
324);
325
326DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_remove_thread,
327 TP_PROTO(struct xnsched_quota_group *tg,
328 struct xnthread *thread),
329 TP_ARGS(tg, thread)
330);
331
332#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
333
334TRACE_EVENT(cobalt_thread_init,
335 TP_PROTO(struct xnthread *thread,
336 const struct xnthread_init_attr *attr,
337 struct xnsched_class *sched_class),
338 TP_ARGS(thread, attr, sched_class),
339
340 TP_STRUCT__entry(
341 __field(struct xnthread *, thread)
342 __string(thread_name, thread->name)
343 __string(class_name, sched_class->name)
344 __field(unsigned long, flags)
345 __field(int, cprio)
346 ),
347
348 TP_fast_assign(
349 __entry->thread = thread;
350 __wrap_assign_str(thread_name, thread->name);
351 __entry->flags = attr->flags;
352 __wrap_assign_str(class_name, sched_class->name);
353 __entry->cprio = thread->cprio;
354 ),
355
356 TP_printk("thread=%p name=%s flags=0x%lx class=%s prio=%d",
357 __entry->thread, __get_str(thread_name), __entry->flags,
358 __get_str(class_name), __entry->cprio)
359);
360
361TRACE_EVENT(cobalt_thread_suspend,
362 TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
363 xntmode_t timeout_mode, struct xnsynch *wchan),
364 TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
365
366 TP_STRUCT__entry(
367 __field(pid_t, pid)
368 __field(unsigned long, mask)
369 __field(xnticks_t, timeout)
370 __field(xntmode_t, timeout_mode)
371 __field(struct xnsynch *, wchan)
372 ),
373
374 TP_fast_assign(
375 __entry->pid = xnthread_host_pid(thread);
376 __entry->mask = mask;
377 __entry->timeout = timeout;
378 __entry->timeout_mode = timeout_mode;
379 __entry->wchan = wchan;
380 ),
381
382 TP_printk("pid=%d mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
383 __entry->pid, __entry->mask,
384 __entry->timeout, __entry->timeout_mode, __entry->wchan)
385);
386
387TRACE_EVENT(cobalt_thread_resume,
388 TP_PROTO(struct xnthread *thread, unsigned long mask),
389 TP_ARGS(thread, mask),
390
391 TP_STRUCT__entry(
392 __string(name, thread->name)
393 __field(pid_t, pid)
394 __field(unsigned long, mask)
395 ),
396
397 TP_fast_assign(
398 __wrap_assign_str(name, thread->name);
399 __entry->pid = xnthread_host_pid(thread);
400 __entry->mask = mask;
401 ),
402
403 TP_printk("name=%s pid=%d mask=0x%lx",
404 __get_str(name), __entry->pid, __entry->mask)
405);
406
407TRACE_EVENT(cobalt_thread_fault,
408 TP_PROTO(unsigned long ip, unsigned int type),
409 TP_ARGS(ip, type),
410
411 TP_STRUCT__entry(
412 __field(unsigned long, ip)
413 __field(unsigned int, type)
414 ),
415
416 TP_fast_assign(
417 __entry->ip = ip;
418 __entry->type = type;
419 ),
420
421 TP_printk("ip=%#lx type=%#x",
422 __entry->ip, __entry->type)
423);
424
425TRACE_EVENT(cobalt_thread_set_current_prio,
426 TP_PROTO(struct xnthread *thread),
427 TP_ARGS(thread),
428
429 TP_STRUCT__entry(
430 __field(struct xnthread *, thread)
431 __field(pid_t, pid)
432 __field(int, cprio)
433 ),
434
435 TP_fast_assign(
436 __entry->thread = thread;
437 __entry->pid = xnthread_host_pid(thread);
438 __entry->cprio = xnthread_current_priority(thread);
439 ),
440
441 TP_printk("thread=%p pid=%d prio=%d",
442 __entry->thread, __entry->pid, __entry->cprio)
443);
444
445DEFINE_EVENT(thread_event, cobalt_thread_start,
446 TP_PROTO(struct xnthread *thread),
447 TP_ARGS(thread)
448);
449
450DEFINE_EVENT(thread_event, cobalt_thread_cancel,
451 TP_PROTO(struct xnthread *thread),
452 TP_ARGS(thread)
453);
454
455DEFINE_EVENT(thread_event, cobalt_thread_join,
456 TP_PROTO(struct xnthread *thread),
457 TP_ARGS(thread)
458);
459
460DEFINE_EVENT(thread_event, cobalt_thread_unblock,
461 TP_PROTO(struct xnthread *thread),
462 TP_ARGS(thread)
463);
464
465DEFINE_EVENT(curr_thread_event, cobalt_thread_wait_period,
466 TP_PROTO(struct xnthread *thread),
467 TP_ARGS(thread)
468);
469
470DEFINE_EVENT(curr_thread_event, cobalt_thread_missed_period,
471 TP_PROTO(struct xnthread *thread),
472 TP_ARGS(thread)
473);
474
475DEFINE_EVENT(curr_thread_event, cobalt_thread_set_mode,
476 TP_PROTO(struct xnthread *thread),
477 TP_ARGS(thread)
478);
479
480TRACE_EVENT(cobalt_thread_migrate,
481 TP_PROTO(unsigned int cpu),
482 TP_ARGS(cpu),
483
484 TP_STRUCT__entry(
485 __field(unsigned int, cpu)
486 ),
487
488 TP_fast_assign(
489 __entry->cpu = cpu;
490 ),
491
492 TP_printk("cpu=%u", __entry->cpu)
493);
494
495TRACE_EVENT(cobalt_thread_migrate_passive,
496 TP_PROTO(struct xnthread *thread, unsigned int cpu),
497 TP_ARGS(thread, cpu),
498
499 TP_STRUCT__entry(
500 __field(struct xnthread *, thread)
501 __field(pid_t, pid)
502 __field(unsigned int, cpu)
503 ),
504
505 TP_fast_assign(
506 __entry->thread = thread;
507 __entry->pid = xnthread_host_pid(thread);
508 __entry->cpu = cpu;
509 ),
510
511 TP_printk("thread=%p pid=%d cpu=%u",
512 __entry->thread, __entry->pid, __entry->cpu)
513);
514
515DEFINE_EVENT(curr_thread_event, cobalt_shadow_gohard,
516 TP_PROTO(struct xnthread *thread),
517 TP_ARGS(thread)
518);
519
520DEFINE_EVENT(curr_thread_event, cobalt_watchdog_signal,
521 TP_PROTO(struct xnthread *thread),
522 TP_ARGS(thread)
523);
524
525DEFINE_EVENT(curr_thread_event, cobalt_shadow_hardened,
526 TP_PROTO(struct xnthread *thread),
527 TP_ARGS(thread)
528);
529
530#define cobalt_print_relax_reason(reason) \
531 __print_symbolic(reason, \
532 { SIGDEBUG_UNDEFINED, "undefined" }, \
533 { SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \
534 { SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \
535 { SIGDEBUG_MIGRATE_FAULT, "fault" })
536
537TRACE_EVENT(cobalt_shadow_gorelax,
538 TP_PROTO(int reason),
539 TP_ARGS(reason),
540
541 TP_STRUCT__entry(
542 __field(int, reason)
543 ),
544
545 TP_fast_assign(
546 __entry->reason = reason;
547 ),
548
549 TP_printk("reason=%s", cobalt_print_relax_reason(__entry->reason))
550);
551
552DEFINE_EVENT(curr_thread_event, cobalt_shadow_relaxed,
553 TP_PROTO(struct xnthread *thread),
554 TP_ARGS(thread)
555);
556
557DEFINE_EVENT(curr_thread_event, cobalt_shadow_entry,
558 TP_PROTO(struct xnthread *thread),
559 TP_ARGS(thread)
560);
561
562TRACE_EVENT(cobalt_shadow_map,
563 TP_PROTO(struct xnthread *thread),
564 TP_ARGS(thread),
565
566 TP_STRUCT__entry(
567 __field(struct xnthread *, thread)
568 __field(pid_t, pid)
569 __field(int, prio)
570 ),
571
572 TP_fast_assign(
573 __entry->thread = thread;
574 __entry->pid = xnthread_host_pid(thread);
575 __entry->prio = xnthread_base_priority(thread);
576 ),
577
578 TP_printk("thread=%p pid=%d prio=%d",
579 __entry->thread, __entry->pid, __entry->prio)
580);
581
582DEFINE_EVENT(curr_thread_event, cobalt_shadow_unmap,
583 TP_PROTO(struct xnthread *thread),
584 TP_ARGS(thread)
585);
586
587TRACE_EVENT(cobalt_lostage_request,
588 TP_PROTO(const char *type, struct task_struct *task),
589 TP_ARGS(type, task),
590
591 TP_STRUCT__entry(
592 __field(pid_t, pid)
593 __array(char, comm, TASK_COMM_LEN)
594 __field(const char *, type)
595 ),
596
597 TP_fast_assign(
598 __entry->type = type;
599 __entry->pid = task_pid_nr(task);
600 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
601 ),
602
603 TP_printk("request=%s pid=%d comm=%s",
604 __entry->type, __entry->pid, __entry->comm)
605);
606
607TRACE_EVENT(cobalt_lostage_wakeup,
608 TP_PROTO(struct task_struct *task),
609 TP_ARGS(task),
610
611 TP_STRUCT__entry(
612 __field(pid_t, pid)
613 __array(char, comm, TASK_COMM_LEN)
614 ),
615
616 TP_fast_assign(
617 __entry->pid = task_pid_nr(task);
618 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
619 ),
620
621 TP_printk("pid=%d comm=%s",
622 __entry->pid, __entry->comm)
623);
624
625TRACE_EVENT(cobalt_lostage_signal,
626 TP_PROTO(struct task_struct *task, int sig),
627 TP_ARGS(task, sig),
628
629 TP_STRUCT__entry(
630 __field(pid_t, pid)
631 __array(char, comm, TASK_COMM_LEN)
632 __field(int, sig)
633 ),
634
635 TP_fast_assign(
636 __entry->pid = task_pid_nr(task);
637 __entry->sig = sig;
638 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
639 ),
640
641 TP_printk("pid=%d comm=%s sig=%d",
642 __entry->pid, __entry->comm, __entry->sig)
643);
644
645DEFINE_EVENT(irq_event, cobalt_irq_entry,
646 TP_PROTO(unsigned int irq),
647 TP_ARGS(irq)
648);
649
650DEFINE_EVENT(irq_event, cobalt_irq_exit,
651 TP_PROTO(unsigned int irq),
652 TP_ARGS(irq)
653);
654
655DEFINE_EVENT(irq_event, cobalt_irq_attach,
656 TP_PROTO(unsigned int irq),
657 TP_ARGS(irq)
658);
659
660DEFINE_EVENT(irq_event, cobalt_irq_detach,
661 TP_PROTO(unsigned int irq),
662 TP_ARGS(irq)
663);
664
665DEFINE_EVENT(irq_event, cobalt_irq_enable,
666 TP_PROTO(unsigned int irq),
667 TP_ARGS(irq)
668);
669
670DEFINE_EVENT(irq_event, cobalt_irq_disable,
671 TP_PROTO(unsigned int irq),
672 TP_ARGS(irq)
673);
674
675DEFINE_EVENT(clock_event, cobalt_clock_entry,
676 TP_PROTO(unsigned int irq),
677 TP_ARGS(irq)
678);
679
680DEFINE_EVENT(clock_event, cobalt_clock_exit,
681 TP_PROTO(unsigned int irq),
682 TP_ARGS(irq)
683);
684
685DEFINE_EVENT(timer_event, cobalt_timer_stop,
686 TP_PROTO(struct xntimer *timer),
687 TP_ARGS(timer)
688);
689
690DEFINE_EVENT(timer_event, cobalt_timer_expire,
691 TP_PROTO(struct xntimer *timer),
692 TP_ARGS(timer)
693);
694
695#define cobalt_print_timer_mode(mode) \
696 __print_symbolic(mode, \
697 { XN_RELATIVE, "rel" }, \
698 { XN_ABSOLUTE, "abs" }, \
699 { XN_REALTIME, "rt" })
700
701TRACE_EVENT(cobalt_timer_start,
702 TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
703 xntmode_t mode),
704 TP_ARGS(timer, value, interval, mode),
705
706 TP_STRUCT__entry(
707 __field(struct xntimer *, timer)
708#ifdef CONFIG_XENO_OPT_STATS
709 __string(name, timer->name)
710#endif
711 __field(xnticks_t, value)
712 __field(xnticks_t, interval)
713 __field(xntmode_t, mode)
714 ),
715
716 TP_fast_assign(
717 __entry->timer = timer;
718#ifdef CONFIG_XENO_OPT_STATS
719 __wrap_assign_str(name, timer->name);
720#endif
721 __entry->value = value;
722 __entry->interval = interval;
723 __entry->mode = mode;
724 ),
725
726 TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
727 __entry->timer,
728#ifdef CONFIG_XENO_OPT_STATS
729 __get_str(name),
730#else
731 "(anon)",
732#endif
733 __entry->value, __entry->interval,
734 cobalt_print_timer_mode(__entry->mode))
735);
736
737#ifdef CONFIG_SMP
738
739TRACE_EVENT(cobalt_timer_migrate,
740 TP_PROTO(struct xntimer *timer, unsigned int cpu),
741 TP_ARGS(timer, cpu),
742
743 TP_STRUCT__entry(
744 __field(struct xntimer *, timer)
745 __field(unsigned int, cpu)
746 ),
747
748 TP_fast_assign(
749 __entry->timer = timer;
750 __entry->cpu = cpu;
751 ),
752
753 TP_printk("timer=%p cpu=%u",
754 __entry->timer, __entry->cpu)
755);
756
757#endif /* CONFIG_SMP */
758
759DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
760 TP_PROTO(struct xnsynch *synch),
761 TP_ARGS(synch)
762);
763
764DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
765 TP_PROTO(struct xnsynch *synch),
766 TP_ARGS(synch)
767);
768
769DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
770 TP_PROTO(struct xnsynch *synch),
771 TP_ARGS(synch)
772);
773
774DEFINE_EVENT(synch_post_event, cobalt_synch_release,
775 TP_PROTO(struct xnsynch *synch),
776 TP_ARGS(synch)
777);
778
779DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
780 TP_PROTO(struct xnsynch *synch),
781 TP_ARGS(synch)
782);
783
784DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
785 TP_PROTO(struct xnsynch *synch),
786 TP_ARGS(synch)
787);
788
789DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
790 TP_PROTO(struct xnsynch *synch),
791 TP_ARGS(synch)
792);
793
794DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
795 TP_PROTO(struct xnsynch *synch),
796 TP_ARGS(synch)
797);
798
799DEFINE_EVENT(registry_event, cobalt_registry_enter,
800 TP_PROTO(const char *key, void *addr),
801 TP_ARGS(key, addr)
802);
803
804DEFINE_EVENT(registry_event, cobalt_registry_remove,
805 TP_PROTO(const char *key, void *addr),
806 TP_ARGS(key, addr)
807);
808
809DEFINE_EVENT(registry_event, cobalt_registry_unlink,
810 TP_PROTO(const char *key, void *addr),
811 TP_ARGS(key, addr)
812);
813
814TRACE_EVENT(cobalt_tick_shot,
815 TP_PROTO(s64 delta),
816 TP_ARGS(delta),
817
818 TP_STRUCT__entry(
819 __field(u64, secs)
820 __field(u32, nsecs)
821 __field(s64, delta)
822 ),
823
824 TP_fast_assign(
825 __entry->delta = div_s64(delta, 1000);
826 __entry->secs = div_u64_rem(trace_clock_local() + delta,
827 NSEC_PER_SEC, &__entry->nsecs);
828 ),
829
830 TP_printk("next tick at %Lu.%06u (delay: %Ld us)",
831 (unsigned long long)__entry->secs,
832 __entry->nsecs / 1000, __entry->delta)
833);
834
835TRACE_EVENT(cobalt_trace,
836 TP_PROTO(const char *msg),
837 TP_ARGS(msg),
838 TP_STRUCT__entry(
839 __string(msg, msg)
840 ),
841 TP_fast_assign(
842 __wrap_assign_str(msg, msg);
843 ),
844 TP_printk("%s", __get_str(msg))
845);
846
847TRACE_EVENT(cobalt_trace_longval,
848 TP_PROTO(int id, u64 val),
849 TP_ARGS(id, val),
850 TP_STRUCT__entry(
851 __field(int, id)
852 __field(u64, val)
853 ),
854 TP_fast_assign(
855 __entry->id = id;
856 __entry->val = val;
857 ),
858 TP_printk("id=%#x, v=%llu", __entry->id, __entry->val)
859);
860
861TRACE_EVENT(cobalt_trace_pid,
862 TP_PROTO(pid_t pid, int prio),
863 TP_ARGS(pid, prio),
864 TP_STRUCT__entry(
865 __field(pid_t, pid)
866 __field(int, prio)
867 ),
868 TP_fast_assign(
869 __entry->pid = pid;
870 __entry->prio = prio;
871 ),
872 TP_printk("pid=%d, prio=%d", __entry->pid, __entry->prio)
873);
874
875TRACE_EVENT(cobalt_latpeak,
876 TP_PROTO(int latmax_ns),
877 TP_ARGS(latmax_ns),
878 TP_STRUCT__entry(
879 __field(int, latmax_ns)
880 ),
881 TP_fast_assign(
882 __entry->latmax_ns = latmax_ns;
883 ),
884 TP_printk("** latency peak: %d.%.3d us **",
885 __entry->latmax_ns / 1000,
886 __entry->latmax_ns % 1000)
887);
888
889/* Basically cobalt_trace() + trigger point */
890TRACE_EVENT(cobalt_trigger,
891 TP_PROTO(const char *issuer),
892 TP_ARGS(issuer),
893 TP_STRUCT__entry(
894 __string(issuer, issuer)
895 ),
896 TP_fast_assign(
897 __wrap_assign_str(issuer, issuer);
898 ),
899 TP_printk("%s", __get_str(issuer))
900);
901
902#endif /* _TRACE_COBALT_CORE_H */
903
904/* This part must be outside protection */
905#undef TRACE_INCLUDE_PATH
906#undef TRACE_INCLUDE_FILE
907#define TRACE_INCLUDE_FILE cobalt-core
908#include <trace/define_trace.h>
Scheduling information structure.
Definition sched.h:64
unsigned long status
Definition sched.h:66