Xenomai 3.3.2
Loading...
Searching...
No Matches
sched.h
1/*
2 * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
3 *
4 * Xenomai is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published
6 * by the Free Software Foundation; either version 2 of the License,
7 * or (at your option) any later version.
8 *
9 * Xenomai is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with Xenomai; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17 * 02111-1307, USA.
18 */
19#ifndef _COBALT_KERNEL_SCHED_H
20#define _COBALT_KERNEL_SCHED_H
21
22#include <linux/percpu.h>
23#include <cobalt/kernel/lock.h>
24#include <cobalt/kernel/thread.h>
25#include <cobalt/kernel/schedqueue.h>
26#include <cobalt/kernel/sched-tp.h>
27#include <cobalt/kernel/sched-weak.h>
28#include <cobalt/kernel/sched-sporadic.h>
29#include <cobalt/kernel/sched-quota.h>
30#include <cobalt/kernel/vfile.h>
31#include <cobalt/kernel/assert.h>
32#include <asm/xenomai/machine.h>
33#include <pipeline/sched.h>
34
40/* Sched status flags */
41#define XNRESCHED 0x10000000 /* Needs rescheduling */
42#define XNINSW 0x20000000 /* In context switch */
43#define XNINTCK 0x40000000 /* In master tick handler context */
44
45/* Sched local flags */
46#define XNIDLE 0x00010000 /* Idle (no outstanding timer) */
47#define XNHTICK 0x00008000 /* Host tick pending */
48#define XNINIRQ 0x00004000 /* In IRQ handling context */
49#define XNHDEFER 0x00002000 /* Host tick deferred */
50
51/*
52 * Hardware timer is stopped.
53 */
54#define XNTSTOP 0x00000800
55
56struct xnsched_rt {
57 xnsched_queue_t runnable;
58};
59
64struct xnsched {
66 unsigned long status;
68 unsigned long lflags;
70 struct xnthread *curr;
71#ifdef CONFIG_SMP
73 int cpu;
75 cpumask_t resched;
76#endif
78 struct xnsched_rt rt;
79#ifdef CONFIG_XENO_OPT_SCHED_WEAK
81 struct xnsched_weak weak;
82#endif
83#ifdef CONFIG_XENO_OPT_SCHED_TP
85 struct xnsched_tp tp;
86#endif
87#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
89 struct xnsched_sporadic pss;
90#endif
91#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
93 struct xnsched_quota quota;
94#endif
96 volatile unsigned inesting;
98 struct xntimer htimer;
100 struct xntimer rrbtimer;
102 struct xnthread rootcb;
103#ifdef CONFIG_XENO_ARCH_FPU
105 struct xnthread *fpuholder;
106#endif
107#ifdef CONFIG_XENO_OPT_WATCHDOG
109 struct xntimer wdtimer;
110#endif
111#ifdef CONFIG_XENO_OPT_STATS
113 xnticks_t last_account_switch;
115 xnstat_exectime_t *current_account;
116#endif
117};
118
119DECLARE_PER_CPU(struct xnsched, nksched);
120
121extern cpumask_t cobalt_cpu_affinity;
122
123extern struct list_head nkthreadq;
124
125extern int cobalt_nrthreads;
126
127#ifdef CONFIG_XENO_OPT_VFILE
128extern struct xnvfile_rev_tag nkthreadlist_tag;
129#endif
130
131union xnsched_policy_param;
132
133struct xnsched_class {
134 void (*sched_init)(struct xnsched *sched);
135 void (*sched_enqueue)(struct xnthread *thread);
136 void (*sched_dequeue)(struct xnthread *thread);
137 void (*sched_requeue)(struct xnthread *thread);
138 struct xnthread *(*sched_pick)(struct xnsched *sched);
139 void (*sched_tick)(struct xnsched *sched);
140 void (*sched_rotate)(struct xnsched *sched,
141 const union xnsched_policy_param *p);
142 void (*sched_migrate)(struct xnthread *thread,
143 struct xnsched *sched);
144 int (*sched_chkparam)(struct xnthread *thread,
145 const union xnsched_policy_param *p);
171 bool (*sched_setparam)(struct xnthread *thread,
172 const union xnsched_policy_param *p);
173 void (*sched_getparam)(struct xnthread *thread,
174 union xnsched_policy_param *p);
175 void (*sched_trackprio)(struct xnthread *thread,
176 const union xnsched_policy_param *p);
177 void (*sched_protectprio)(struct xnthread *thread, int prio);
178 int (*sched_declare)(struct xnthread *thread,
179 const union xnsched_policy_param *p);
180 void (*sched_forget)(struct xnthread *thread);
181 void (*sched_kick)(struct xnthread *thread);
182#ifdef CONFIG_XENO_OPT_VFILE
183 int (*sched_init_vfile)(struct xnsched_class *schedclass,
184 struct xnvfile_directory *vfroot);
185 void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
186#endif
187 int nthreads;
188 struct xnsched_class *next;
189 int weight;
190 int policy;
191 const char *name;
192};
193
194#define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
195
196/* Placeholder for current thread priority */
197#define XNSCHED_RUNPRIO 0x80000000
198
199#define xnsched_for_each_thread(__thread) \
200 list_for_each_entry(__thread, &nkthreadq, glink)
201
202#ifdef CONFIG_SMP
203static inline int xnsched_cpu(struct xnsched *sched)
204{
205 return sched->cpu;
206}
207#else /* !CONFIG_SMP */
208static inline int xnsched_cpu(struct xnsched *sched)
209{
210 return 0;
211}
212#endif /* CONFIG_SMP */
213
214static inline struct xnsched *xnsched_struct(int cpu)
215{
216 return &per_cpu(nksched, cpu);
217}
218
219static inline struct xnsched *xnsched_current(void)
220{
221 /* IRQs off */
222 return raw_cpu_ptr(&nksched);
223}
224
225static inline struct xnthread *xnsched_current_thread(void)
226{
227 return xnsched_current()->curr;
228}
229
230/* Test resched flag of given sched. */
231static inline int xnsched_resched_p(struct xnsched *sched)
232{
233 return sched->status & XNRESCHED;
234}
235
236/* Set self resched flag for the current scheduler. */
237static inline void xnsched_set_self_resched(struct xnsched *sched)
238{
239 sched->status |= XNRESCHED;
240}
241
242/* Set resched flag for the given scheduler. */
243#ifdef CONFIG_SMP
244
245static inline void xnsched_set_resched(struct xnsched *sched)
246{
247 struct xnsched *current_sched = xnsched_current();
248
249 if (current_sched == sched)
250 current_sched->status |= XNRESCHED;
251 else if (!xnsched_resched_p(sched)) {
252 cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
253 sched->status |= XNRESCHED;
254 current_sched->status |= XNRESCHED;
255 }
256}
257
258#define xnsched_realtime_cpus cobalt_pipeline.supported_cpus
259
260static inline int xnsched_supported_cpu(int cpu)
261{
262 return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
263}
264
265static inline int xnsched_threading_cpu(int cpu)
266{
267 return cpumask_test_cpu(cpu, &cobalt_cpu_affinity);
268}
269
270#else /* !CONFIG_SMP */
271
272static inline void xnsched_set_resched(struct xnsched *sched)
273{
274 xnsched_set_self_resched(sched);
275}
276
277#define xnsched_realtime_cpus CPU_MASK_ALL
278
279static inline int xnsched_supported_cpu(int cpu)
280{
281 return 1;
282}
283
284static inline int xnsched_threading_cpu(int cpu)
285{
286 return 1;
287}
288
289#endif /* !CONFIG_SMP */
290
291#define for_each_realtime_cpu(cpu) \
292 for_each_online_cpu(cpu) \
293 if (xnsched_supported_cpu(cpu)) \
294
295int ___xnsched_run(struct xnsched *sched);
296
297void __xnsched_run_handler(void);
298
299static inline int __xnsched_run(struct xnsched *sched)
300{
301 /*
302 * Reschedule if XNSCHED is pending, but never over an IRQ
303 * handler or in the middle of unlocked context switch.
304 */
305 if (((sched->status|sched->lflags) &
306 (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
307 return 0;
308
309 return pipeline_schedule(sched);
310}
311
312static inline int xnsched_run(void)
313{
314 struct xnsched *sched = xnsched_current();
315 /*
316 * sched->curr is shared locklessly with ___xnsched_run().
317 * READ_ONCE() makes sure the compiler never uses load tearing
318 * for reading this pointer piecemeal, so that multiple stores
319 * occurring concurrently on remote CPUs never yield a
320 * spurious merged value on the local one.
321 */
322 struct xnthread *curr = READ_ONCE(sched->curr);
323
324 /*
325 * If running over the root thread, hard irqs must be off
326 * (asserted out of line in ___xnsched_run()).
327 */
328 return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
329}
330
331void xnsched_lock(void);
332
333void xnsched_unlock(void);
334
335static inline int xnsched_interrupt_p(void)
336{
337 return xnsched_current()->lflags & XNINIRQ;
338}
339
340static inline int xnsched_root_p(void)
341{
342 return xnthread_test_state(xnsched_current_thread(), XNROOT);
343}
344
345static inline int xnsched_unblockable_p(void)
346{
347 return xnsched_interrupt_p() || xnsched_root_p();
348}
349
350static inline int xnsched_primary_p(void)
351{
352 return !xnsched_unblockable_p();
353}
354
355bool xnsched_set_effective_priority(struct xnthread *thread,
356 int prio);
357
358#include <cobalt/kernel/sched-idle.h>
359#include <cobalt/kernel/sched-rt.h>
360
361int xnsched_init_proc(void);
362
363void xnsched_cleanup_proc(void);
364
365void xnsched_register_classes(void);
366
367void xnsched_init_all(void);
368
369void xnsched_destroy_all(void);
370
371struct xnthread *xnsched_pick_next(struct xnsched *sched);
372
373void xnsched_putback(struct xnthread *thread);
374
375int xnsched_set_policy(struct xnthread *thread,
376 struct xnsched_class *sched_class,
377 const union xnsched_policy_param *p);
378
379void xnsched_track_policy(struct xnthread *thread,
380 struct xnthread *target);
381
382void xnsched_protect_priority(struct xnthread *thread,
383 int prio);
384
385void xnsched_migrate(struct xnthread *thread,
386 struct xnsched *sched);
387
388void xnsched_migrate_passive(struct xnthread *thread,
389 struct xnsched *sched);
390
413static inline void xnsched_rotate(struct xnsched *sched,
414 struct xnsched_class *sched_class,
415 const union xnsched_policy_param *sched_param)
416{
417 sched_class->sched_rotate(sched, sched_param);
418}
419
420static inline int xnsched_init_thread(struct xnthread *thread)
421{
422 int ret = 0;
423
424 xnsched_idle_init_thread(thread);
425 xnsched_rt_init_thread(thread);
426
427#ifdef CONFIG_XENO_OPT_SCHED_TP
428 ret = xnsched_tp_init_thread(thread);
429 if (ret)
430 return ret;
431#endif /* CONFIG_XENO_OPT_SCHED_TP */
432#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
433 ret = xnsched_sporadic_init_thread(thread);
434 if (ret)
435 return ret;
436#endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
437#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
438 ret = xnsched_quota_init_thread(thread);
439 if (ret)
440 return ret;
441#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
442
443 return ret;
444}
445
446static inline int xnsched_root_priority(struct xnsched *sched)
447{
448 return sched->rootcb.cprio;
449}
450
451static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
452{
453 return sched->rootcb.sched_class;
454}
455
456static inline void xnsched_tick(struct xnsched *sched)
457{
458 struct xnthread *curr = sched->curr;
459 struct xnsched_class *sched_class = curr->sched_class;
460 /*
461 * A thread that undergoes round-robin scheduling only
462 * consumes its time slice when it runs within its own
463 * scheduling class, which excludes temporary PI boosts, and
464 * does not hold the scheduler lock.
465 */
466 if (sched_class == curr->base_class &&
467 sched_class->sched_tick &&
468 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
469 curr->lock_count == 0)
470 sched_class->sched_tick(sched);
471}
472
473static inline int xnsched_chkparam(struct xnsched_class *sched_class,
474 struct xnthread *thread,
475 const union xnsched_policy_param *p)
476{
477 if (sched_class->sched_chkparam)
478 return sched_class->sched_chkparam(thread, p);
479
480 return 0;
481}
482
483static inline int xnsched_declare(struct xnsched_class *sched_class,
484 struct xnthread *thread,
485 const union xnsched_policy_param *p)
486{
487 int ret;
488
489 if (sched_class->sched_declare) {
490 ret = sched_class->sched_declare(thread, p);
491 if (ret)
492 return ret;
493 }
494 if (sched_class != thread->base_class)
495 sched_class->nthreads++;
496
497 return 0;
498}
499
500static inline int xnsched_calc_wprio(struct xnsched_class *sched_class,
501 int prio)
502{
503 return prio + sched_class->weight;
504}
505
506#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
507
508static inline void xnsched_enqueue(struct xnthread *thread)
509{
510 struct xnsched_class *sched_class = thread->sched_class;
511
512 if (sched_class != &xnsched_class_idle)
513 sched_class->sched_enqueue(thread);
514}
515
516static inline void xnsched_dequeue(struct xnthread *thread)
517{
518 struct xnsched_class *sched_class = thread->sched_class;
519
520 if (sched_class != &xnsched_class_idle)
521 sched_class->sched_dequeue(thread);
522}
523
524static inline void xnsched_requeue(struct xnthread *thread)
525{
526 struct xnsched_class *sched_class = thread->sched_class;
527
528 if (sched_class != &xnsched_class_idle)
529 sched_class->sched_requeue(thread);
530}
531
532static inline
533bool xnsched_setparam(struct xnthread *thread,
534 const union xnsched_policy_param *p)
535{
536 return thread->base_class->sched_setparam(thread, p);
537}
538
539static inline void xnsched_getparam(struct xnthread *thread,
540 union xnsched_policy_param *p)
541{
542 thread->sched_class->sched_getparam(thread, p);
543}
544
545static inline void xnsched_trackprio(struct xnthread *thread,
546 const union xnsched_policy_param *p)
547{
548 thread->sched_class->sched_trackprio(thread, p);
549 thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
550}
551
552static inline void xnsched_protectprio(struct xnthread *thread, int prio)
553{
554 thread->sched_class->sched_protectprio(thread, prio);
555 thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
556}
557
558static inline void xnsched_forget(struct xnthread *thread)
559{
560 struct xnsched_class *sched_class = thread->base_class;
561
562 --sched_class->nthreads;
563
564 if (sched_class->sched_forget)
565 sched_class->sched_forget(thread);
566}
567
568static inline void xnsched_kick(struct xnthread *thread)
569{
570 struct xnsched_class *sched_class = thread->base_class;
571
572 xnthread_set_info(thread, XNKICKED);
573
574 if (sched_class->sched_kick)
575 sched_class->sched_kick(thread);
576
577 xnsched_set_resched(thread->sched);
578}
579
580#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
581
582/*
583 * If only the RT and IDLE scheduling classes are compiled in, we can
584 * fully inline common helpers for dealing with those.
585 */
586
587static inline void xnsched_enqueue(struct xnthread *thread)
588{
589 struct xnsched_class *sched_class = thread->sched_class;
590
591 if (sched_class != &xnsched_class_idle)
592 __xnsched_rt_enqueue(thread);
593}
594
595static inline void xnsched_dequeue(struct xnthread *thread)
596{
597 struct xnsched_class *sched_class = thread->sched_class;
598
599 if (sched_class != &xnsched_class_idle)
600 __xnsched_rt_dequeue(thread);
601}
602
603static inline void xnsched_requeue(struct xnthread *thread)
604{
605 struct xnsched_class *sched_class = thread->sched_class;
606
607 if (sched_class != &xnsched_class_idle)
608 __xnsched_rt_requeue(thread);
609}
610
611static inline bool xnsched_setparam(struct xnthread *thread,
612 const union xnsched_policy_param *p)
613{
614 struct xnsched_class *sched_class = thread->base_class;
615
616 if (sched_class == &xnsched_class_idle)
617 return __xnsched_idle_setparam(thread, p);
618
619 return __xnsched_rt_setparam(thread, p);
620}
621
622static inline void xnsched_getparam(struct xnthread *thread,
623 union xnsched_policy_param *p)
624{
625 struct xnsched_class *sched_class = thread->sched_class;
626
627 if (sched_class == &xnsched_class_idle)
628 __xnsched_idle_getparam(thread, p);
629 else
630 __xnsched_rt_getparam(thread, p);
631}
632
633static inline void xnsched_trackprio(struct xnthread *thread,
634 const union xnsched_policy_param *p)
635{
636 struct xnsched_class *sched_class = thread->sched_class;
637
638 if (sched_class == &xnsched_class_idle)
639 __xnsched_idle_trackprio(thread, p);
640 else
641 __xnsched_rt_trackprio(thread, p);
642
643 thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
644}
645
646static inline void xnsched_protectprio(struct xnthread *thread, int prio)
647{
648 struct xnsched_class *sched_class = thread->sched_class;
649
650 if (sched_class == &xnsched_class_idle)
651 __xnsched_idle_protectprio(thread, prio);
652 else
653 __xnsched_rt_protectprio(thread, prio);
654
655 thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
656}
657
658static inline void xnsched_forget(struct xnthread *thread)
659{
660 --thread->base_class->nthreads;
661 __xnsched_rt_forget(thread);
662}
663
664static inline void xnsched_kick(struct xnthread *thread)
665{
666 xnthread_set_info(thread, XNKICKED);
667 xnsched_set_resched(thread->sched);
668}
669
670#endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
671
674#endif /* !_COBALT_KERNEL_SCHED_H */
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition sched.h:413
static int xnsched_run(void)
The rescheduling procedure.
Definition sched.h:312
#define XNKICKED
Forced out of primary mode.
Definition thread.h:69
#define XNRRB
Undergoes a round-robin scheduling.
Definition thread.h:44
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition thread.h:47
Scheduling information structure.
Definition sched.h:64
struct xnsched_rt rt
Definition sched.h:78
int cpu
Definition sched.h:73
cpumask_t resched
Definition sched.h:75
struct xntimer rrbtimer
Definition sched.h:100
volatile unsigned inesting
Definition sched.h:96
unsigned long status
Definition sched.h:66
struct xntimer htimer
Definition sched.h:98
unsigned long lflags
Definition sched.h:68
struct xnthread * curr
Definition sched.h:70
Snapshot revision tag .
Definition vfile.h:483