21#define TRACE_SYSTEM cobalt_core
23#if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
24#define _TRACE_COBALT_CORE_H
26#include <linux/tracepoint.h>
27#include <linux/math64.h>
28#include <cobalt/kernel/timer.h>
29#include <cobalt/kernel/registry.h>
30#include <cobalt/uapi/kernel/types.h>
36struct xnsched_quota_group;
37struct xnthread_init_attr;
39DECLARE_EVENT_CLASS(thread_event,
40 TP_PROTO(
struct xnthread *thread),
45 __field(
unsigned long, state)
46 __field(
unsigned long, info)
50 __entry->state = thread->state;
51 __entry->info = thread->info;
52 __entry->pid = xnthread_host_pid(thread);
55 TP_printk(
"pid=%d state=0x%lx info=0x%lx",
56 __entry->pid, __entry->state, __entry->info)
59DECLARE_EVENT_CLASS(curr_thread_event,
60 TP_PROTO(
struct xnthread *thread),
64 __field(
struct xnthread *, thread)
65 __field(
unsigned long, state)
66 __field(
unsigned long, info)
70 __entry->state = thread->state;
71 __entry->info = thread->info;
74 TP_printk(
"state=0x%lx info=0x%lx",
75 __entry->state, __entry->info)
78DECLARE_EVENT_CLASS(synch_wait_event,
79 TP_PROTO(
struct xnsynch *synch),
83 __field(
struct xnsynch *, synch)
87 __entry->synch = synch;
90 TP_printk(
"synch=%p", __entry->synch)
93DECLARE_EVENT_CLASS(synch_post_event,
94 TP_PROTO(
struct xnsynch *synch),
98 __field(
struct xnsynch *, synch)
102 __entry->synch = synch;
105 TP_printk(
"synch=%p", __entry->synch)
108DECLARE_EVENT_CLASS(irq_event,
109 TP_PROTO(
unsigned int irq),
113 __field(
unsigned int, irq)
120 TP_printk(
"irq=%u", __entry->irq)
123DECLARE_EVENT_CLASS(clock_event,
124 TP_PROTO(
unsigned int irq),
128 __field(
unsigned int, irq)
135 TP_printk(
"clock_irq=%u", __entry->irq)
138DECLARE_EVENT_CLASS(timer_event,
139 TP_PROTO(
struct xntimer *timer),
143 __field(
struct xntimer *, timer)
147 __entry->timer = timer;
150 TP_printk(
"timer=%p", __entry->timer)
153DECLARE_EVENT_CLASS(registry_event,
154 TP_PROTO(
const char *key,
void *addr),
158 __string(key, key ?:
"(anon)")
159 __field(void *, addr)
163 __wrap_assign_str(key, key ?:
"(anon)");
164 __entry->addr = addr;
167 TP_printk(
"key=%s, addr=%p", __get_str(key), __entry->addr)
170TRACE_EVENT(cobalt_schedule,
171 TP_PROTO(
struct xnsched *sched),
175 __field(
unsigned long, status)
179 __entry->status = sched->
status;
182 TP_printk(
"status=0x%lx", __entry->status)
185TRACE_EVENT(cobalt_schedule_remote,
186 TP_PROTO(
struct xnsched *sched),
190 __field(
unsigned long, status)
194 __entry->status = sched->
status;
197 TP_printk(
"status=0x%lx", __entry->status)
200TRACE_EVENT(cobalt_switch_context,
201 TP_PROTO(
struct xnthread *prev,
struct xnthread *next),
205 __field(
struct xnthread *, prev)
206 __string(prev_name, prev->name)
207 __field(pid_t, prev_pid)
208 __field(
int, prev_prio)
209 __field(
unsigned long, prev_state)
210 __field(
struct xnthread *, next)
211 __string(next_name, next->name)
212 __field(pid_t, next_pid)
213 __field(
int, next_prio)
217 __entry->prev = prev;
218 __wrap_assign_str(prev_name, prev->name);
219 __entry->prev_pid = xnthread_host_pid(prev);
220 __entry->prev_prio = xnthread_current_priority(prev);
221 __entry->prev_state = prev->state;
222 __entry->next = next;
223 __wrap_assign_str(next_name, next->name);
224 __entry->next_pid = xnthread_host_pid(next);
225 __entry->next_prio = xnthread_current_priority(next);
228 TP_printk(
"prev_name=%s prev_pid=%d prev_prio=%d prev_state=0x%lx ==> next_name=%s next_pid=%d next_prio=%d",
229 __get_str(prev_name), __entry->prev_pid,
230 __entry->prev_prio, __entry->prev_state,
231 __get_str(next_name), __entry->next_pid, __entry->next_prio)
234#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
236TRACE_EVENT(cobalt_schedquota_refill,
251DECLARE_EVENT_CLASS(schedquota_group_event,
252 TP_PROTO(
struct xnsched_quota_group *tg),
260 __entry->tgid = tg->tgid;
267DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_create_group,
268 TP_PROTO(
struct xnsched_quota_group *tg),
272DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_destroy_group,
273 TP_PROTO(
struct xnsched_quota_group *tg),
277TRACE_EVENT(cobalt_schedquota_set_limit,
278 TP_PROTO(
struct xnsched_quota_group *tg,
281 TP_ARGS(tg, percent, peak_percent),
285 __field(
int, percent)
286 __field(
int, peak_percent)
290 __entry->tgid = tg->tgid;
291 __entry->percent = percent;
292 __entry->peak_percent = peak_percent;
295 TP_printk(
"tgid=%d percent=%d peak_percent=%d",
296 __entry->tgid, __entry->percent, __entry->peak_percent)
299DECLARE_EVENT_CLASS(schedquota_thread_event,
300 TP_PROTO(
struct xnsched_quota_group *tg,
301 struct xnthread *thread),
306 __field(
struct xnthread *, thread)
311 __entry->tgid = tg->tgid;
312 __entry->thread = thread;
313 __entry->pid = xnthread_host_pid(thread);
316 TP_printk(
"tgid=%d thread=%p pid=%d",
317 __entry->tgid, __entry->thread, __entry->pid)
320DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_add_thread,
321 TP_PROTO(
struct xnsched_quota_group *tg,
322 struct xnthread *thread),
326DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_remove_thread,
327 TP_PROTO(
struct xnsched_quota_group *tg,
328 struct xnthread *thread),
334TRACE_EVENT(cobalt_thread_init,
335 TP_PROTO(
struct xnthread *thread,
336 const struct xnthread_init_attr *attr,
337 struct xnsched_class *sched_class),
338 TP_ARGS(thread, attr, sched_class),
341 __field(
struct xnthread *, thread)
342 __string(thread_name, thread->name)
343 __string(class_name, sched_class->name)
344 __field(
unsigned long, flags)
349 __entry->thread = thread;
350 __wrap_assign_str(thread_name, thread->name);
351 __entry->flags = attr->flags;
352 __wrap_assign_str(class_name, sched_class->name);
353 __entry->cprio = thread->cprio;
356 TP_printk(
"thread=%p name=%s flags=0x%lx class=%s prio=%d",
357 __entry->thread, __get_str(thread_name), __entry->flags,
358 __get_str(class_name), __entry->cprio)
361TRACE_EVENT(cobalt_thread_suspend,
362 TP_PROTO(
struct xnthread *thread,
unsigned long mask, xnticks_t timeout,
363 xntmode_t timeout_mode,
struct xnsynch *wchan),
364 TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
368 __field(
unsigned long, mask)
369 __field(xnticks_t, timeout)
370 __field(xntmode_t, timeout_mode)
371 __field(
struct xnsynch *, wchan)
375 __entry->pid = xnthread_host_pid(thread);
376 __entry->mask = mask;
377 __entry->timeout = timeout;
378 __entry->timeout_mode = timeout_mode;
379 __entry->wchan = wchan;
382 TP_printk(
"pid=%d mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
383 __entry->pid, __entry->mask,
384 __entry->timeout, __entry->timeout_mode, __entry->wchan)
387TRACE_EVENT(cobalt_thread_resume,
388 TP_PROTO(
struct xnthread *thread,
unsigned long mask),
389 TP_ARGS(thread, mask),
392 __string(name, thread->name)
394 __field(
unsigned long, mask)
398 __wrap_assign_str(name, thread->name);
399 __entry->pid = xnthread_host_pid(thread);
400 __entry->mask = mask;
403 TP_printk(
"name=%s pid=%d mask=0x%lx",
404 __get_str(name), __entry->pid, __entry->mask)
407TRACE_EVENT(cobalt_thread_fault,
408 TP_PROTO(
unsigned long ip,
unsigned int type),
412 __field(
unsigned long, ip)
413 __field(
unsigned int, type)
418 __entry->type = type;
421 TP_printk(
"ip=%#lx type=%#x",
422 __entry->ip, __entry->type)
425TRACE_EVENT(cobalt_thread_set_current_prio,
426 TP_PROTO(
struct xnthread *thread),
430 __field(
struct xnthread *, thread)
436 __entry->thread = thread;
437 __entry->pid = xnthread_host_pid(thread);
438 __entry->cprio = xnthread_current_priority(thread);
441 TP_printk(
"thread=%p pid=%d prio=%d",
442 __entry->thread, __entry->pid, __entry->cprio)
445DEFINE_EVENT(thread_event, cobalt_thread_start,
446 TP_PROTO(
struct xnthread *thread),
450DEFINE_EVENT(thread_event, cobalt_thread_cancel,
451 TP_PROTO(
struct xnthread *thread),
455DEFINE_EVENT(thread_event, cobalt_thread_join,
456 TP_PROTO(
struct xnthread *thread),
460DEFINE_EVENT(thread_event, cobalt_thread_unblock,
461 TP_PROTO(
struct xnthread *thread),
465DEFINE_EVENT(curr_thread_event, cobalt_thread_wait_period,
466 TP_PROTO(
struct xnthread *thread),
470DEFINE_EVENT(curr_thread_event, cobalt_thread_missed_period,
471 TP_PROTO(
struct xnthread *thread),
475DEFINE_EVENT(curr_thread_event, cobalt_thread_set_mode,
476 TP_PROTO(
struct xnthread *thread),
480TRACE_EVENT(cobalt_thread_migrate,
481 TP_PROTO(
unsigned int cpu),
485 __field(
unsigned int, cpu)
492 TP_printk(
"cpu=%u", __entry->cpu)
495TRACE_EVENT(cobalt_thread_migrate_passive,
496 TP_PROTO(
struct xnthread *thread,
unsigned int cpu),
497 TP_ARGS(thread, cpu),
500 __field(
struct xnthread *, thread)
502 __field(
unsigned int, cpu)
506 __entry->thread = thread;
507 __entry->pid = xnthread_host_pid(thread);
511 TP_printk(
"thread=%p pid=%d cpu=%u",
512 __entry->thread, __entry->pid, __entry->cpu)
515DEFINE_EVENT(curr_thread_event, cobalt_shadow_gohard,
516 TP_PROTO(
struct xnthread *thread),
520DEFINE_EVENT(curr_thread_event, cobalt_watchdog_signal,
521 TP_PROTO(
struct xnthread *thread),
525DEFINE_EVENT(curr_thread_event, cobalt_shadow_hardened,
526 TP_PROTO(
struct xnthread *thread),
530#define cobalt_print_relax_reason(reason) \
531 __print_symbolic(reason, \
532 { SIGDEBUG_UNDEFINED, "undefined" }, \
533 { SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \
534 { SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \
535 { SIGDEBUG_MIGRATE_FAULT, "fault" })
537TRACE_EVENT(cobalt_shadow_gorelax,
538 TP_PROTO(
int reason),
546 __entry->reason = reason;
549 TP_printk(
"reason=%s", cobalt_print_relax_reason(__entry->reason))
552DEFINE_EVENT(curr_thread_event, cobalt_shadow_relaxed,
553 TP_PROTO(
struct xnthread *thread),
557DEFINE_EVENT(curr_thread_event, cobalt_shadow_entry,
558 TP_PROTO(
struct xnthread *thread),
562TRACE_EVENT(cobalt_shadow_map,
563 TP_PROTO(
struct xnthread *thread),
567 __field(
struct xnthread *, thread)
573 __entry->thread = thread;
574 __entry->pid = xnthread_host_pid(thread);
575 __entry->prio = xnthread_base_priority(thread);
578 TP_printk(
"thread=%p pid=%d prio=%d",
579 __entry->thread, __entry->pid, __entry->prio)
582DEFINE_EVENT(curr_thread_event, cobalt_shadow_unmap,
583 TP_PROTO(
struct xnthread *thread),
587TRACE_EVENT(cobalt_lostage_request,
588 TP_PROTO(
const char *type,
struct task_struct *task),
593 __array(
char, comm, TASK_COMM_LEN)
594 __field(
const char *, type)
598 __entry->type = type;
599 __entry->pid = task_pid_nr(task);
600 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
603 TP_printk(
"request=%s pid=%d comm=%s",
604 __entry->type, __entry->pid, __entry->comm)
607TRACE_EVENT(cobalt_lostage_wakeup,
608 TP_PROTO(
struct task_struct *task),
613 __array(
char, comm, TASK_COMM_LEN)
617 __entry->pid = task_pid_nr(task);
618 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
621 TP_printk(
"pid=%d comm=%s",
622 __entry->pid, __entry->comm)
625TRACE_EVENT(cobalt_lostage_signal,
626 TP_PROTO(
struct task_struct *task,
int sig),
631 __array(
char, comm, TASK_COMM_LEN)
636 __entry->pid = task_pid_nr(task);
638 memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
641 TP_printk(
"pid=%d comm=%s sig=%d",
642 __entry->pid, __entry->comm, __entry->sig)
645DEFINE_EVENT(irq_event, cobalt_irq_entry,
646 TP_PROTO(
unsigned int irq),
650DEFINE_EVENT(irq_event, cobalt_irq_exit,
651 TP_PROTO(
unsigned int irq),
655DEFINE_EVENT(irq_event, cobalt_irq_attach,
656 TP_PROTO(
unsigned int irq),
660DEFINE_EVENT(irq_event, cobalt_irq_detach,
661 TP_PROTO(
unsigned int irq),
665DEFINE_EVENT(irq_event, cobalt_irq_enable,
666 TP_PROTO(
unsigned int irq),
670DEFINE_EVENT(irq_event, cobalt_irq_disable,
671 TP_PROTO(
unsigned int irq),
675DEFINE_EVENT(clock_event, cobalt_clock_entry,
676 TP_PROTO(
unsigned int irq),
680DEFINE_EVENT(clock_event, cobalt_clock_exit,
681 TP_PROTO(
unsigned int irq),
685DEFINE_EVENT(timer_event, cobalt_timer_stop,
686 TP_PROTO(
struct xntimer *timer),
690DEFINE_EVENT(timer_event, cobalt_timer_expire,
691 TP_PROTO(
struct xntimer *timer),
695#define cobalt_print_timer_mode(mode) \
696 __print_symbolic(mode, \
697 { XN_RELATIVE, "rel" }, \
698 { XN_ABSOLUTE, "abs" }, \
699 { XN_REALTIME, "rt" })
701TRACE_EVENT(cobalt_timer_start,
702 TP_PROTO(
struct xntimer *timer, xnticks_t value, xnticks_t interval,
704 TP_ARGS(timer, value, interval, mode),
707 __field(
struct xntimer *, timer)
708#ifdef CONFIG_XENO_OPT_STATS
709 __string(name, timer->name)
711 __field(xnticks_t, value)
712 __field(xnticks_t, interval)
713 __field(xntmode_t, mode)
717 __entry->timer = timer;
718#ifdef CONFIG_XENO_OPT_STATS
719 __wrap_assign_str(name, timer->name);
721 __entry->value = value;
722 __entry->interval = interval;
723 __entry->mode = mode;
726 TP_printk(
"timer=%p(%s) value=%Lu interval=%Lu mode=%s",
728#ifdef CONFIG_XENO_OPT_STATS
733 __entry->value, __entry->interval,
734 cobalt_print_timer_mode(__entry->mode))
739TRACE_EVENT(cobalt_timer_migrate,
740 TP_PROTO(
struct xntimer *timer,
unsigned int cpu),
744 __field(
struct xntimer *, timer)
745 __field(
unsigned int, cpu)
749 __entry->timer = timer;
753 TP_printk(
"timer=%p cpu=%u",
754 __entry->timer, __entry->cpu)
759DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
760 TP_PROTO(
struct xnsynch *synch),
764DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
765 TP_PROTO(
struct xnsynch *synch),
769DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
770 TP_PROTO(
struct xnsynch *synch),
774DEFINE_EVENT(synch_post_event, cobalt_synch_release,
775 TP_PROTO(
struct xnsynch *synch),
779DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
780 TP_PROTO(
struct xnsynch *synch),
784DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
785 TP_PROTO(
struct xnsynch *synch),
789DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
790 TP_PROTO(
struct xnsynch *synch),
794DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
795 TP_PROTO(
struct xnsynch *synch),
799DEFINE_EVENT(registry_event, cobalt_registry_enter,
800 TP_PROTO(
const char *key,
void *addr),
804DEFINE_EVENT(registry_event, cobalt_registry_remove,
805 TP_PROTO(
const char *key,
void *addr),
809DEFINE_EVENT(registry_event, cobalt_registry_unlink,
810 TP_PROTO(
const char *key,
void *addr),
814TRACE_EVENT(cobalt_tick_shot,
825 __entry->delta = div_s64(delta, 1000);
826 __entry->secs = div_u64_rem(trace_clock_local() + delta,
827 NSEC_PER_SEC, &__entry->nsecs);
830 TP_printk(
"next tick at %Lu.%06u (delay: %Ld us)",
831 (
unsigned long long)__entry->secs,
832 __entry->nsecs / 1000, __entry->delta)
835TRACE_EVENT(cobalt_trace,
836 TP_PROTO(
const char *msg),
842 __wrap_assign_str(msg, msg);
844 TP_printk(
"%s", __get_str(msg))
847TRACE_EVENT(cobalt_trace_longval,
848 TP_PROTO(
int id, u64 val),
858 TP_printk(
"id=%#x, v=%llu", __entry->id, __entry->val)
861TRACE_EVENT(cobalt_trace_pid,
862 TP_PROTO(pid_t pid,
int prio),
870 __entry->prio = prio;
872 TP_printk(
"pid=%d, prio=%d", __entry->pid, __entry->prio)
875TRACE_EVENT(cobalt_latpeak,
876 TP_PROTO(
int latmax_ns),
879 __field(
int, latmax_ns)
882 __entry->latmax_ns = latmax_ns;
884 TP_printk(
"** latency peak: %d.%.3d us **",
885 __entry->latmax_ns / 1000,
886 __entry->latmax_ns % 1000)
890TRACE_EVENT(cobalt_trigger,
891 TP_PROTO(
const char *issuer),
894 __string(issuer, issuer)
897 __wrap_assign_str(issuer, issuer);
899 TP_printk(
"%s", __get_str(issuer))
905#undef TRACE_INCLUDE_PATH
906#undef TRACE_INCLUDE_FILE
907#define TRACE_INCLUDE_FILE cobalt-core
908#include <trace/define_trace.h>
Scheduling information structure.
Definition sched.h:64
unsigned long status
Definition sched.h:66