Xenomai  3.1
sched.h
1 /*
2  * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_SCHED_H
20 #define _COBALT_KERNEL_SCHED_H
21 
22 #include <linux/percpu.h>
23 #include <cobalt/kernel/lock.h>
24 #include <cobalt/kernel/thread.h>
25 #include <cobalt/kernel/schedqueue.h>
26 #include <cobalt/kernel/sched-tp.h>
27 #include <cobalt/kernel/sched-weak.h>
28 #include <cobalt/kernel/sched-sporadic.h>
29 #include <cobalt/kernel/sched-quota.h>
30 #include <cobalt/kernel/vfile.h>
31 #include <cobalt/kernel/assert.h>
32 #include <asm/xenomai/machine.h>
33 
39 /* Sched status flags */
40 #define XNRESCHED 0x10000000 /* Needs rescheduling */
41 #define XNINSW 0x20000000 /* In context switch */
42 #define XNINTCK 0x40000000 /* In master tick handler context */
43 
44 /* Sched local flags */
45 #define XNIDLE 0x00010000 /* Idle (no outstanding timer) */
46 #define XNHTICK 0x00008000 /* Host tick pending */
47 #define XNINIRQ 0x00004000 /* In IRQ handling context */
48 #define XNHDEFER 0x00002000 /* Host tick deferred */
49 
50 struct xnsched_rt {
51  xnsched_queue_t runnable;
52 };
53 
58 struct xnsched {
60  unsigned long status;
62  unsigned long lflags;
64  struct xnthread *curr;
65 #ifdef CONFIG_SMP
66 
67  int cpu;
69  cpumask_t resched;
70 #endif
71 
72  struct xnsched_rt rt;
73 #ifdef CONFIG_XENO_OPT_SCHED_WEAK
74 
75  struct xnsched_weak weak;
76 #endif
77 #ifdef CONFIG_XENO_OPT_SCHED_TP
78 
79  struct xnsched_tp tp;
80 #endif
81 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
82 
83  struct xnsched_sporadic pss;
84 #endif
85 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
86 
87  struct xnsched_quota quota;
88 #endif
89 
90  volatile unsigned inesting;
92  struct xntimer htimer;
94  struct xntimer rrbtimer;
96  struct xnthread rootcb;
97 #ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
98  struct xnthread *last;
99 #endif
100 #ifdef CONFIG_XENO_ARCH_FPU
101 
102  struct xnthread *fpuholder;
103 #endif
104 #ifdef CONFIG_XENO_OPT_WATCHDOG
105 
106  struct xntimer wdtimer;
107 #endif
108 #ifdef CONFIG_XENO_OPT_STATS
109 
110  xnticks_t last_account_switch;
112  xnstat_exectime_t *current_account;
113 #endif
114 };
115 
116 DECLARE_PER_CPU(struct xnsched, nksched);
117 
118 extern cpumask_t cobalt_cpu_affinity;
119 
120 extern struct list_head nkthreadq;
121 
122 extern int cobalt_nrthreads;
123 
124 #ifdef CONFIG_XENO_OPT_VFILE
125 extern struct xnvfile_rev_tag nkthreadlist_tag;
126 #endif
127 
128 union xnsched_policy_param;
129 
130 struct xnsched_class {
131  void (*sched_init)(struct xnsched *sched);
132  void (*sched_enqueue)(struct xnthread *thread);
133  void (*sched_dequeue)(struct xnthread *thread);
134  void (*sched_requeue)(struct xnthread *thread);
135  struct xnthread *(*sched_pick)(struct xnsched *sched);
136  void (*sched_tick)(struct xnsched *sched);
137  void (*sched_rotate)(struct xnsched *sched,
138  const union xnsched_policy_param *p);
139  void (*sched_migrate)(struct xnthread *thread,
140  struct xnsched *sched);
141  int (*sched_chkparam)(struct xnthread *thread,
142  const union xnsched_policy_param *p);
168  bool (*sched_setparam)(struct xnthread *thread,
169  const union xnsched_policy_param *p);
170  void (*sched_getparam)(struct xnthread *thread,
171  union xnsched_policy_param *p);
172  void (*sched_trackprio)(struct xnthread *thread,
173  const union xnsched_policy_param *p);
174  void (*sched_protectprio)(struct xnthread *thread, int prio);
175  int (*sched_declare)(struct xnthread *thread,
176  const union xnsched_policy_param *p);
177  void (*sched_forget)(struct xnthread *thread);
178  void (*sched_kick)(struct xnthread *thread);
179 #ifdef CONFIG_XENO_OPT_VFILE
180  int (*sched_init_vfile)(struct xnsched_class *schedclass,
181  struct xnvfile_directory *vfroot);
182  void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
183 #endif
184  int nthreads;
185  struct xnsched_class *next;
186  int weight;
187  int policy;
188  const char *name;
189 };
190 
191 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR)
192 
193 /* Placeholder for current thread priority */
194 #define XNSCHED_RUNPRIO 0x80000000
195 
196 #define xnsched_for_each_thread(__thread) \
197  list_for_each_entry(__thread, &nkthreadq, glink)
198 
199 #ifdef CONFIG_SMP
200 static inline int xnsched_cpu(struct xnsched *sched)
201 {
202  return sched->cpu;
203 }
204 #else /* !CONFIG_SMP */
205 static inline int xnsched_cpu(struct xnsched *sched)
206 {
207  return 0;
208 }
209 #endif /* CONFIG_SMP */
210 
211 static inline struct xnsched *xnsched_struct(int cpu)
212 {
213  return &per_cpu(nksched, cpu);
214 }
215 
216 static inline struct xnsched *xnsched_current(void)
217 {
218  /* IRQs off */
219  return raw_cpu_ptr(&nksched);
220 }
221 
222 static inline struct xnthread *xnsched_current_thread(void)
223 {
224  return xnsched_current()->curr;
225 }
226 
227 /* Test resched flag of given sched. */
228 static inline int xnsched_resched_p(struct xnsched *sched)
229 {
230  return sched->status & XNRESCHED;
231 }
232 
233 /* Set self resched flag for the current scheduler. */
234 static inline void xnsched_set_self_resched(struct xnsched *sched)
235 {
236  sched->status |= XNRESCHED;
237 }
238 
239 #define xnsched_realtime_domain cobalt_pipeline.domain
240 
241 /* Set resched flag for the given scheduler. */
242 #ifdef CONFIG_SMP
243 
244 static inline void xnsched_set_resched(struct xnsched *sched)
245 {
246  struct xnsched *current_sched = xnsched_current();
247 
248  if (current_sched == sched)
249  current_sched->status |= XNRESCHED;
250  else if (!xnsched_resched_p(sched)) {
251  cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
252  sched->status |= XNRESCHED;
253  current_sched->status |= XNRESCHED;
254  }
255 }
256 
257 #define xnsched_realtime_cpus cobalt_pipeline.supported_cpus
258 
259 static inline int xnsched_supported_cpu(int cpu)
260 {
261  return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
262 }
263 
264 static inline int xnsched_threading_cpu(int cpu)
265 {
266  return cpumask_test_cpu(cpu, &cobalt_cpu_affinity);
267 }
268 
269 #else /* !CONFIG_SMP */
270 
271 static inline void xnsched_set_resched(struct xnsched *sched)
272 {
273  xnsched_set_self_resched(sched);
274 }
275 
276 #define xnsched_realtime_cpus CPU_MASK_ALL
277 
278 static inline int xnsched_supported_cpu(int cpu)
279 {
280  return 1;
281 }
282 
283 static inline int xnsched_threading_cpu(int cpu)
284 {
285  return 1;
286 }
287 
288 #endif /* !CONFIG_SMP */
289 
290 #define for_each_realtime_cpu(cpu) \
291  for_each_online_cpu(cpu) \
292  if (xnsched_supported_cpu(cpu)) \
293 
294 int ___xnsched_run(struct xnsched *sched);
295 
296 void __xnsched_run_handler(void);
297 
298 static inline int __xnsched_run(struct xnsched *sched)
299 {
300  /*
301  * Reschedule if XNSCHED is pending, but never over an IRQ
302  * handler or in the middle of unlocked context switch.
303  */
304  if (((sched->status|sched->lflags) &
305  (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
306  return 0;
307 
308  return ___xnsched_run(sched);
309 }
310 
311 static inline int xnsched_run(void)
312 {
313  struct xnsched *sched = xnsched_current();
314  /*
315  * sched->curr is shared locklessly with ___xnsched_run().
316  * READ_ONCE() makes sure the compiler never uses load tearing
317  * for reading this pointer piecemeal, so that multiple stores
318  * occurring concurrently on remote CPUs never yield a
319  * spurious merged value on the local one.
320  */
321  struct xnthread *curr = READ_ONCE(sched->curr);
322 
323  /*
324  * If running over the root thread, hard irqs must be off
325  * (asserted out of line in ___xnsched_run()).
326  */
327  return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
328 }
329 
330 void xnsched_lock(void);
331 
332 void xnsched_unlock(void);
333 
334 static inline int xnsched_interrupt_p(void)
335 {
336  return xnsched_current()->lflags & XNINIRQ;
337 }
338 
339 static inline int xnsched_root_p(void)
340 {
341  return xnthread_test_state(xnsched_current_thread(), XNROOT);
342 }
343 
344 static inline int xnsched_unblockable_p(void)
345 {
346  return xnsched_interrupt_p() || xnsched_root_p();
347 }
348 
349 static inline int xnsched_primary_p(void)
350 {
351  return !xnsched_unblockable_p();
352 }
353 
354 #ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
355 
356 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
357 
358 #define xnsched_resched_after_unlocked_switch() xnsched_run()
359 
360 static inline
361 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
362 {
363  return sched->status & XNRESCHED;
364 }
365 
366 #else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
367 
368 static inline struct xnsched *
369 xnsched_finish_unlocked_switch(struct xnsched *sched)
370 {
371  XENO_BUG_ON(COBALT, !hard_irqs_disabled());
372  return xnsched_current();
373 }
374 
375 static inline void xnsched_resched_after_unlocked_switch(void) { }
376 
377 static inline int
378 xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
379 {
380  return 0;
381 }
382 
383 #endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
384 
385 bool xnsched_set_effective_priority(struct xnthread *thread,
386  int prio);
387 
388 #include <cobalt/kernel/sched-idle.h>
389 #include <cobalt/kernel/sched-rt.h>
390 
391 int xnsched_init_proc(void);
392 
393 void xnsched_cleanup_proc(void);
394 
395 void xnsched_register_classes(void);
396 
397 void xnsched_init_all(void);
398 
399 void xnsched_destroy_all(void);
400 
401 struct xnthread *xnsched_pick_next(struct xnsched *sched);
402 
403 void xnsched_putback(struct xnthread *thread);
404 
405 int xnsched_set_policy(struct xnthread *thread,
406  struct xnsched_class *sched_class,
407  const union xnsched_policy_param *p);
408 
409 void xnsched_track_policy(struct xnthread *thread,
410  struct xnthread *target);
411 
412 void xnsched_protect_priority(struct xnthread *thread,
413  int prio);
414 
415 void xnsched_migrate(struct xnthread *thread,
416  struct xnsched *sched);
417 
418 void xnsched_migrate_passive(struct xnthread *thread,
419  struct xnsched *sched);
420 
443 static inline void xnsched_rotate(struct xnsched *sched,
444  struct xnsched_class *sched_class,
445  const union xnsched_policy_param *sched_param)
446 {
447  sched_class->sched_rotate(sched, sched_param);
448 }
449 
450 static inline int xnsched_init_thread(struct xnthread *thread)
451 {
452  int ret = 0;
453 
454  xnsched_idle_init_thread(thread);
455  xnsched_rt_init_thread(thread);
456 
457 #ifdef CONFIG_XENO_OPT_SCHED_TP
458  ret = xnsched_tp_init_thread(thread);
459  if (ret)
460  return ret;
461 #endif /* CONFIG_XENO_OPT_SCHED_TP */
462 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
463  ret = xnsched_sporadic_init_thread(thread);
464  if (ret)
465  return ret;
466 #endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
467 #ifdef CONFIG_XENO_OPT_SCHED_QUOTA
468  ret = xnsched_quota_init_thread(thread);
469  if (ret)
470  return ret;
471 #endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
472 
473  return ret;
474 }
475 
476 static inline int xnsched_root_priority(struct xnsched *sched)
477 {
478  return sched->rootcb.cprio;
479 }
480 
481 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
482 {
483  return sched->rootcb.sched_class;
484 }
485 
486 static inline void xnsched_tick(struct xnsched *sched)
487 {
488  struct xnthread *curr = sched->curr;
489  struct xnsched_class *sched_class = curr->sched_class;
490  /*
491  * A thread that undergoes round-robin scheduling only
492  * consumes its time slice when it runs within its own
493  * scheduling class, which excludes temporary PI boosts, and
494  * does not hold the scheduler lock.
495  */
496  if (sched_class == curr->base_class &&
497  sched_class->sched_tick &&
498  xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
499  curr->lock_count == 0)
500  sched_class->sched_tick(sched);
501 }
502 
503 static inline int xnsched_chkparam(struct xnsched_class *sched_class,
504  struct xnthread *thread,
505  const union xnsched_policy_param *p)
506 {
507  if (sched_class->sched_chkparam)
508  return sched_class->sched_chkparam(thread, p);
509 
510  return 0;
511 }
512 
513 static inline int xnsched_declare(struct xnsched_class *sched_class,
514  struct xnthread *thread,
515  const union xnsched_policy_param *p)
516 {
517  int ret;
518 
519  if (sched_class->sched_declare) {
520  ret = sched_class->sched_declare(thread, p);
521  if (ret)
522  return ret;
523  }
524  if (sched_class != thread->base_class)
525  sched_class->nthreads++;
526 
527  return 0;
528 }
529 
530 static inline int xnsched_calc_wprio(struct xnsched_class *sched_class,
531  int prio)
532 {
533  return prio + sched_class->weight;
534 }
535 
536 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
537 
538 static inline void xnsched_enqueue(struct xnthread *thread)
539 {
540  struct xnsched_class *sched_class = thread->sched_class;
541 
542  if (sched_class != &xnsched_class_idle)
543  sched_class->sched_enqueue(thread);
544 }
545 
546 static inline void xnsched_dequeue(struct xnthread *thread)
547 {
548  struct xnsched_class *sched_class = thread->sched_class;
549 
550  if (sched_class != &xnsched_class_idle)
551  sched_class->sched_dequeue(thread);
552 }
553 
554 static inline void xnsched_requeue(struct xnthread *thread)
555 {
556  struct xnsched_class *sched_class = thread->sched_class;
557 
558  if (sched_class != &xnsched_class_idle)
559  sched_class->sched_requeue(thread);
560 }
561 
562 static inline
563 bool xnsched_setparam(struct xnthread *thread,
564  const union xnsched_policy_param *p)
565 {
566  return thread->base_class->sched_setparam(thread, p);
567 }
568 
569 static inline void xnsched_getparam(struct xnthread *thread,
570  union xnsched_policy_param *p)
571 {
572  thread->sched_class->sched_getparam(thread, p);
573 }
574 
575 static inline void xnsched_trackprio(struct xnthread *thread,
576  const union xnsched_policy_param *p)
577 {
578  thread->sched_class->sched_trackprio(thread, p);
579  thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
580 }
581 
582 static inline void xnsched_protectprio(struct xnthread *thread, int prio)
583 {
584  thread->sched_class->sched_protectprio(thread, prio);
585  thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
586 }
587 
588 static inline void xnsched_forget(struct xnthread *thread)
589 {
590  struct xnsched_class *sched_class = thread->base_class;
591 
592  --sched_class->nthreads;
593 
594  if (sched_class->sched_forget)
595  sched_class->sched_forget(thread);
596 }
597 
598 static inline void xnsched_kick(struct xnthread *thread)
599 {
600  struct xnsched_class *sched_class = thread->base_class;
601 
602  xnthread_set_info(thread, XNKICKED);
603 
604  if (sched_class->sched_kick)
605  sched_class->sched_kick(thread);
606 
607  xnsched_set_resched(thread->sched);
608 }
609 
610 #else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
611 
612 /*
613  * If only the RT and IDLE scheduling classes are compiled in, we can
614  * fully inline common helpers for dealing with those.
615  */
616 
617 static inline void xnsched_enqueue(struct xnthread *thread)
618 {
619  struct xnsched_class *sched_class = thread->sched_class;
620 
621  if (sched_class != &xnsched_class_idle)
622  __xnsched_rt_enqueue(thread);
623 }
624 
625 static inline void xnsched_dequeue(struct xnthread *thread)
626 {
627  struct xnsched_class *sched_class = thread->sched_class;
628 
629  if (sched_class != &xnsched_class_idle)
630  __xnsched_rt_dequeue(thread);
631 }
632 
633 static inline void xnsched_requeue(struct xnthread *thread)
634 {
635  struct xnsched_class *sched_class = thread->sched_class;
636 
637  if (sched_class != &xnsched_class_idle)
638  __xnsched_rt_requeue(thread);
639 }
640 
641 static inline bool xnsched_setparam(struct xnthread *thread,
642  const union xnsched_policy_param *p)
643 {
644  struct xnsched_class *sched_class = thread->base_class;
645 
646  if (sched_class == &xnsched_class_idle)
647  return __xnsched_idle_setparam(thread, p);
648 
649  return __xnsched_rt_setparam(thread, p);
650 }
651 
652 static inline void xnsched_getparam(struct xnthread *thread,
653  union xnsched_policy_param *p)
654 {
655  struct xnsched_class *sched_class = thread->sched_class;
656 
657  if (sched_class == &xnsched_class_idle)
658  __xnsched_idle_getparam(thread, p);
659  else
660  __xnsched_rt_getparam(thread, p);
661 }
662 
663 static inline void xnsched_trackprio(struct xnthread *thread,
664  const union xnsched_policy_param *p)
665 {
666  struct xnsched_class *sched_class = thread->sched_class;
667 
668  if (sched_class == &xnsched_class_idle)
669  __xnsched_idle_trackprio(thread, p);
670  else
671  __xnsched_rt_trackprio(thread, p);
672 
673  thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
674 }
675 
676 static inline void xnsched_protectprio(struct xnthread *thread, int prio)
677 {
678  struct xnsched_class *sched_class = thread->sched_class;
679 
680  if (sched_class == &xnsched_class_idle)
681  __xnsched_idle_protectprio(thread, prio);
682  else
683  __xnsched_rt_protectprio(thread, prio);
684 
685  thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
686 }
687 
688 static inline void xnsched_forget(struct xnthread *thread)
689 {
690  --thread->base_class->nthreads;
691  __xnsched_rt_forget(thread);
692 }
693 
694 static inline void xnsched_kick(struct xnthread *thread)
695 {
696  xnthread_set_info(thread, XNKICKED);
697  xnsched_set_resched(thread->sched);
698 }
699 
700 #endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
701 
704 #endif /* !_COBALT_KERNEL_SCHED_H */
struct xnthread * curr
Definition: sched.h:64
Snapshot revision tag.
Definition: vfile.h:482
#define XNKICKED
Forced out of primary mode.
Definition: thread.h:70
#define XNRRB
Undergoes a round-robin scheduling.
Definition: thread.h:45
int cpu
Definition: sched.h:67
#define XNROOT
Root thread (that is, Linux/IDLE)
Definition: thread.h:48
volatile unsigned inesting
Definition: sched.h:90
Scheduling information structure.
Definition: sched.h:58
struct xnsched_rt rt
Definition: sched.h:72
struct xntimer htimer
Definition: sched.h:92
unsigned long lflags
Definition: sched.h:62
struct xntimer rrbtimer
Definition: sched.h:94
unsigned long status
Definition: sched.h:60
static void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
Rotate a scheduler runqueue.
Definition: sched.h:444
static int xnsched_run(void)
The rescheduling procedure.
Definition: sched.h:311
cpumask_t resched
Definition: sched.h:69