19 #ifndef _COBALT_KERNEL_SCHED_H 20 #define _COBALT_KERNEL_SCHED_H 22 #include <linux/percpu.h> 23 #include <cobalt/kernel/lock.h> 24 #include <cobalt/kernel/thread.h> 25 #include <cobalt/kernel/schedqueue.h> 26 #include <cobalt/kernel/sched-tp.h> 27 #include <cobalt/kernel/sched-weak.h> 28 #include <cobalt/kernel/sched-sporadic.h> 29 #include <cobalt/kernel/sched-quota.h> 30 #include <cobalt/kernel/vfile.h> 31 #include <cobalt/kernel/assert.h> 32 #include <asm/xenomai/machine.h> 40 #define XNRESCHED 0x10000000 41 #define XNINSW 0x20000000 42 #define XNINTCK 0x40000000 45 #define XNIDLE 0x00010000 46 #define XNHTICK 0x00008000 47 #define XNINIRQ 0x00004000 48 #define XNHDEFER 0x00002000 51 xnsched_queue_t runnable;
73 #ifdef CONFIG_XENO_OPT_SCHED_WEAK 75 struct xnsched_weak weak;
77 #ifdef CONFIG_XENO_OPT_SCHED_TP 81 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC 83 struct xnsched_sporadic pss;
85 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA 87 struct xnsched_quota quota;
96 struct xnthread rootcb;
97 #ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH 98 struct xnthread *last;
100 #ifdef CONFIG_XENO_ARCH_FPU 102 struct xnthread *fpuholder;
104 #ifdef CONFIG_XENO_OPT_WATCHDOG 106 struct xntimer wdtimer;
108 #ifdef CONFIG_XENO_OPT_STATS 110 xnticks_t last_account_switch;
112 xnstat_exectime_t *current_account;
116 DECLARE_PER_CPU(
struct xnsched, nksched);
118 extern cpumask_t cobalt_cpu_affinity;
120 extern struct list_head nkthreadq;
122 extern int cobalt_nrthreads;
124 #ifdef CONFIG_XENO_OPT_VFILE 128 union xnsched_policy_param;
130 struct xnsched_class {
131 void (*sched_init)(
struct xnsched *sched);
132 void (*sched_enqueue)(
struct xnthread *thread);
133 void (*sched_dequeue)(
struct xnthread *thread);
134 void (*sched_requeue)(
struct xnthread *thread);
135 struct xnthread *(*sched_pick)(
struct xnsched *sched);
136 void (*sched_tick)(
struct xnsched *sched);
137 void (*sched_rotate)(
struct xnsched *sched,
138 const union xnsched_policy_param *p);
139 void (*sched_migrate)(
struct xnthread *thread,
141 int (*sched_chkparam)(
struct xnthread *thread,
142 const union xnsched_policy_param *p);
168 bool (*sched_setparam)(
struct xnthread *thread,
169 const union xnsched_policy_param *p);
170 void (*sched_getparam)(
struct xnthread *thread,
171 union xnsched_policy_param *p);
172 void (*sched_trackprio)(
struct xnthread *thread,
173 const union xnsched_policy_param *p);
174 void (*sched_protectprio)(
struct xnthread *thread,
int prio);
175 int (*sched_declare)(
struct xnthread *thread,
176 const union xnsched_policy_param *p);
177 void (*sched_forget)(
struct xnthread *thread);
178 void (*sched_kick)(
struct xnthread *thread);
179 #ifdef CONFIG_XENO_OPT_VFILE 180 int (*sched_init_vfile)(
struct xnsched_class *schedclass,
181 struct xnvfile_directory *vfroot);
182 void (*sched_cleanup_vfile)(
struct xnsched_class *schedclass);
185 struct xnsched_class *next;
191 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR) 194 #define XNSCHED_RUNPRIO 0x80000000 196 #define xnsched_for_each_thread(__thread) \ 197 list_for_each_entry(__thread, &nkthreadq, glink) 200 static inline int xnsched_cpu(
struct xnsched *sched)
205 static inline int xnsched_cpu(
struct xnsched *sched)
211 static inline struct xnsched *xnsched_struct(
int cpu)
213 return &per_cpu(nksched,
cpu);
216 static inline struct xnsched *xnsched_current(
void)
219 return raw_cpu_ptr(&nksched);
222 static inline struct xnthread *xnsched_current_thread(
void)
224 return xnsched_current()->
curr;
228 static inline int xnsched_resched_p(
struct xnsched *sched)
230 return sched->
status & XNRESCHED;
234 static inline void xnsched_set_self_resched(
struct xnsched *sched)
236 sched->
status |= XNRESCHED;
239 #define xnsched_realtime_domain cobalt_pipeline.domain 244 static inline void xnsched_set_resched(
struct xnsched *sched)
246 struct xnsched *current_sched = xnsched_current();
248 if (current_sched == sched)
249 current_sched->
status |= XNRESCHED;
250 else if (!xnsched_resched_p(sched)) {
251 cpumask_set_cpu(xnsched_cpu(sched), ¤t_sched->
resched);
252 sched->
status |= XNRESCHED;
253 current_sched->
status |= XNRESCHED;
257 #define xnsched_realtime_cpus cobalt_pipeline.supported_cpus 259 static inline int xnsched_supported_cpu(
int cpu)
261 return cpumask_test_cpu(
cpu, &xnsched_realtime_cpus);
264 static inline int xnsched_threading_cpu(
int cpu)
266 return cpumask_test_cpu(
cpu, &cobalt_cpu_affinity);
271 static inline void xnsched_set_resched(
struct xnsched *sched)
273 xnsched_set_self_resched(sched);
276 #define xnsched_realtime_cpus CPU_MASK_ALL 278 static inline int xnsched_supported_cpu(
int cpu)
283 static inline int xnsched_threading_cpu(
int cpu)
290 #define for_each_realtime_cpu(cpu) \ 291 for_each_online_cpu(cpu) \ 292 if (xnsched_supported_cpu(cpu)) \ 294 int ___xnsched_run(
struct xnsched *sched);
296 void __xnsched_run_handler(
void);
298 static inline int __xnsched_run(
struct xnsched *sched)
305 (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
308 return ___xnsched_run(sched);
313 struct xnsched *sched = xnsched_current();
321 struct xnthread *curr = READ_ONCE(sched->
curr);
327 return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
330 void xnsched_lock(
void);
332 void xnsched_unlock(
void);
334 static inline int xnsched_interrupt_p(
void)
336 return xnsched_current()->
lflags & XNINIRQ;
339 static inline int xnsched_root_p(
void)
341 return xnthread_test_state(xnsched_current_thread(),
XNROOT);
344 static inline int xnsched_unblockable_p(
void)
346 return xnsched_interrupt_p() || xnsched_root_p();
349 static inline int xnsched_primary_p(
void)
351 return !xnsched_unblockable_p();
354 #ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH 356 struct xnsched *xnsched_finish_unlocked_switch(
struct xnsched *sched);
358 #define xnsched_resched_after_unlocked_switch() xnsched_run() 361 int xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
363 return sched->
status & XNRESCHED;
369 xnsched_finish_unlocked_switch(
struct xnsched *sched)
371 XENO_BUG_ON(COBALT, !hard_irqs_disabled());
372 return xnsched_current();
375 static inline void xnsched_resched_after_unlocked_switch(
void) { }
378 xnsched_maybe_resched_after_unlocked_switch(
struct xnsched *sched)
385 bool xnsched_set_effective_priority(
struct xnthread *thread,
388 #include <cobalt/kernel/sched-idle.h> 389 #include <cobalt/kernel/sched-rt.h> 391 int xnsched_init_proc(
void);
393 void xnsched_cleanup_proc(
void);
395 void xnsched_register_classes(
void);
397 void xnsched_init_all(
void);
399 void xnsched_destroy_all(
void);
401 struct xnthread *xnsched_pick_next(
struct xnsched *sched);
403 void xnsched_putback(
struct xnthread *thread);
405 int xnsched_set_policy(
struct xnthread *thread,
406 struct xnsched_class *sched_class,
407 const union xnsched_policy_param *p);
409 void xnsched_track_policy(
struct xnthread *thread,
410 struct xnthread *target);
412 void xnsched_protect_priority(
struct xnthread *thread,
415 void xnsched_migrate(
struct xnthread *thread,
418 void xnsched_migrate_passive(
struct xnthread *thread,
444 struct xnsched_class *sched_class,
445 const union xnsched_policy_param *sched_param)
447 sched_class->sched_rotate(sched, sched_param);
450 static inline int xnsched_init_thread(
struct xnthread *thread)
454 xnsched_idle_init_thread(thread);
455 xnsched_rt_init_thread(thread);
457 #ifdef CONFIG_XENO_OPT_SCHED_TP 458 ret = xnsched_tp_init_thread(thread);
462 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC 463 ret = xnsched_sporadic_init_thread(thread);
467 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA 468 ret = xnsched_quota_init_thread(thread);
476 static inline int xnsched_root_priority(
struct xnsched *sched)
478 return sched->rootcb.cprio;
481 static inline struct xnsched_class *xnsched_root_class(
struct xnsched *sched)
483 return sched->rootcb.sched_class;
486 static inline void xnsched_tick(
struct xnsched *sched)
488 struct xnthread *curr = sched->
curr;
489 struct xnsched_class *sched_class = curr->sched_class;
496 if (sched_class == curr->base_class &&
497 sched_class->sched_tick &&
498 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|
XNRRB) ==
XNRRB &&
499 curr->lock_count == 0)
500 sched_class->sched_tick(sched);
503 static inline int xnsched_chkparam(
struct xnsched_class *sched_class,
504 struct xnthread *thread,
505 const union xnsched_policy_param *p)
507 if (sched_class->sched_chkparam)
508 return sched_class->sched_chkparam(thread, p);
513 static inline int xnsched_declare(
struct xnsched_class *sched_class,
514 struct xnthread *thread,
515 const union xnsched_policy_param *p)
519 if (sched_class->sched_declare) {
520 ret = sched_class->sched_declare(thread, p);
524 if (sched_class != thread->base_class)
525 sched_class->nthreads++;
530 static inline int xnsched_calc_wprio(
struct xnsched_class *sched_class,
533 return prio + sched_class->weight;
536 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES 538 static inline void xnsched_enqueue(
struct xnthread *thread)
540 struct xnsched_class *sched_class = thread->sched_class;
542 if (sched_class != &xnsched_class_idle)
543 sched_class->sched_enqueue(thread);
546 static inline void xnsched_dequeue(
struct xnthread *thread)
548 struct xnsched_class *sched_class = thread->sched_class;
550 if (sched_class != &xnsched_class_idle)
551 sched_class->sched_dequeue(thread);
554 static inline void xnsched_requeue(
struct xnthread *thread)
556 struct xnsched_class *sched_class = thread->sched_class;
558 if (sched_class != &xnsched_class_idle)
559 sched_class->sched_requeue(thread);
563 bool xnsched_setparam(
struct xnthread *thread,
564 const union xnsched_policy_param *p)
566 return thread->base_class->sched_setparam(thread, p);
569 static inline void xnsched_getparam(
struct xnthread *thread,
570 union xnsched_policy_param *p)
572 thread->sched_class->sched_getparam(thread, p);
575 static inline void xnsched_trackprio(
struct xnthread *thread,
576 const union xnsched_policy_param *p)
578 thread->sched_class->sched_trackprio(thread, p);
579 thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
582 static inline void xnsched_protectprio(
struct xnthread *thread,
int prio)
584 thread->sched_class->sched_protectprio(thread, prio);
585 thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
588 static inline void xnsched_forget(
struct xnthread *thread)
590 struct xnsched_class *sched_class = thread->base_class;
592 --sched_class->nthreads;
594 if (sched_class->sched_forget)
595 sched_class->sched_forget(thread);
598 static inline void xnsched_kick(
struct xnthread *thread)
600 struct xnsched_class *sched_class = thread->base_class;
602 xnthread_set_info(thread,
XNKICKED);
604 if (sched_class->sched_kick)
605 sched_class->sched_kick(thread);
607 xnsched_set_resched(thread->sched);
617 static inline void xnsched_enqueue(
struct xnthread *thread)
619 struct xnsched_class *sched_class = thread->sched_class;
621 if (sched_class != &xnsched_class_idle)
622 __xnsched_rt_enqueue(thread);
625 static inline void xnsched_dequeue(
struct xnthread *thread)
627 struct xnsched_class *sched_class = thread->sched_class;
629 if (sched_class != &xnsched_class_idle)
630 __xnsched_rt_dequeue(thread);
633 static inline void xnsched_requeue(
struct xnthread *thread)
635 struct xnsched_class *sched_class = thread->sched_class;
637 if (sched_class != &xnsched_class_idle)
638 __xnsched_rt_requeue(thread);
641 static inline bool xnsched_setparam(
struct xnthread *thread,
642 const union xnsched_policy_param *p)
644 struct xnsched_class *sched_class = thread->base_class;
646 if (sched_class == &xnsched_class_idle)
647 return __xnsched_idle_setparam(thread, p);
649 return __xnsched_rt_setparam(thread, p);
652 static inline void xnsched_getparam(
struct xnthread *thread,
653 union xnsched_policy_param *p)
655 struct xnsched_class *sched_class = thread->sched_class;
657 if (sched_class == &xnsched_class_idle)
658 __xnsched_idle_getparam(thread, p);
660 __xnsched_rt_getparam(thread, p);
663 static inline void xnsched_trackprio(
struct xnthread *thread,
664 const union xnsched_policy_param *p)
666 struct xnsched_class *sched_class = thread->sched_class;
668 if (sched_class == &xnsched_class_idle)
669 __xnsched_idle_trackprio(thread, p);
671 __xnsched_rt_trackprio(thread, p);
673 thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
676 static inline void xnsched_protectprio(
struct xnthread *thread,
int prio)
678 struct xnsched_class *sched_class = thread->sched_class;
680 if (sched_class == &xnsched_class_idle)
681 __xnsched_idle_protectprio(thread, prio);
683 __xnsched_rt_protectprio(thread, prio);
685 thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
688 static inline void xnsched_forget(
struct xnthread *thread)
690 --thread->base_class->nthreads;
691 __xnsched_rt_forget(thread);
694 static inline void xnsched_kick(
struct xnthread *thread)
696 xnthread_set_info(thread,
XNKICKED);
697 xnsched_set_resched(thread->sched);
struct xnthread * curr
Definition: sched.h:64
Snapshot revision tag.
Definition: vfile.h:482
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:70
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:45
int cpu
Definition: sched.h:67
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
volatile unsigned inesting
Definition: sched.h:90
Scheduling information structure.
Definition: sched.h:58
struct xnsched_rt rt
Definition: sched.h:72
struct xntimer htimer
Definition: sched.h:92
unsigned long lflags
Definition: sched.h:62
struct xntimer rrbtimer
Definition: sched.h:94
unsigned long status
Definition: sched.h:60
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:444
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:311
cpumask_t resched
Definition: sched.h:69