Xenomai  3.1
sched-rt.h
1 /*
2  * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
3  *
4  * Xenomai is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published
6  * by the Free Software Foundation; either version 2 of the License,
7  * or (at your option) any later version.
8  *
9  * Xenomai is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with Xenomai; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
17  * 02111-1307, USA.
18  */
19 #ifndef _COBALT_KERNEL_SCHED_RT_H
20 #define _COBALT_KERNEL_SCHED_RT_H
21 
22 #ifndef _COBALT_KERNEL_SCHED_H
23 #error "please don't include cobalt/kernel/sched-rt.h directly"
24 #endif
25 
31 /*
32  * Global priority scale for Xenomai's core scheduling class,
33  * available to SCHED_COBALT members.
34  */
35 #define XNSCHED_CORE_MIN_PRIO 0
36 #define XNSCHED_CORE_MAX_PRIO 259
37 #define XNSCHED_CORE_NR_PRIO \
38  (XNSCHED_CORE_MAX_PRIO - XNSCHED_CORE_MIN_PRIO + 1)
39 
40 /*
41  * Priority range for SCHED_FIFO, and all other classes Cobalt
42  * implements except SCHED_COBALT.
43  */
44 #define XNSCHED_FIFO_MIN_PRIO 1
45 #define XNSCHED_FIFO_MAX_PRIO 256
46 
47 #if XNSCHED_CORE_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR || \
48  (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) && \
49  XNSCHED_CORE_NR_PRIO > XNSCHED_MLQ_LEVELS)
50 #error "XNSCHED_MLQ_LEVELS is too low"
51 #endif
52 
53 extern struct xnsched_class xnsched_class_rt;
54 
55 static inline void __xnsched_rt_requeue(struct xnthread *thread)
56 {
57  xnsched_addq(&thread->sched->rt.runnable, thread);
58 }
59 
60 static inline void __xnsched_rt_enqueue(struct xnthread *thread)
61 {
62  xnsched_addq_tail(&thread->sched->rt.runnable, thread);
63 }
64 
65 static inline void __xnsched_rt_dequeue(struct xnthread *thread)
66 {
67  xnsched_delq(&thread->sched->rt.runnable, thread);
68 }
69 
70 static inline void __xnsched_rt_track_weakness(struct xnthread *thread)
71 {
72  /*
73  * We have to track threads exiting weak scheduling, i.e. any
74  * thread leaving the WEAK class code if compiled in, or
75  * assigned a zero priority if weak threads are hosted by the
76  * RT class.
77  *
78  * CAUTION: since we need to check the effective priority
79  * level for determining the weakness state, this can only
80  * apply to non-boosted threads.
81  */
82  if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK) || thread->cprio)
83  xnthread_clear_state(thread, XNWEAK);
84  else
85  xnthread_set_state(thread, XNWEAK);
86 }
87 
88 static inline bool __xnsched_rt_setparam(struct xnthread *thread,
89  const union xnsched_policy_param *p)
90 {
91  bool ret = xnsched_set_effective_priority(thread, p->rt.prio);
92 
93  if (!xnthread_test_state(thread, XNBOOST))
94  __xnsched_rt_track_weakness(thread);
95 
96  return ret;
97 }
98 
99 static inline void __xnsched_rt_getparam(struct xnthread *thread,
100  union xnsched_policy_param *p)
101 {
102  p->rt.prio = thread->cprio;
103 }
104 
105 static inline void __xnsched_rt_trackprio(struct xnthread *thread,
106  const union xnsched_policy_param *p)
107 {
108  if (p)
109  thread->cprio = p->rt.prio; /* Force update. */
110  else {
111  thread->cprio = thread->bprio;
112  /* Leaving PI/PP, so non-boosted by definition. */
113  __xnsched_rt_track_weakness(thread);
114  }
115 }
116 
117 static inline void __xnsched_rt_protectprio(struct xnthread *thread, int prio)
118 {
119  /*
120  * The RT class supports the widest priority range from
121  * XNSCHED_CORE_MIN_PRIO to XNSCHED_CORE_MAX_PRIO inclusive,
122  * no need to cap the input value which is guaranteed to be in
123  * the range [1..XNSCHED_CORE_MAX_PRIO].
124  */
125  thread->cprio = prio;
126 }
127 
128 static inline void __xnsched_rt_forget(struct xnthread *thread)
129 {
130 }
131 
132 static inline int xnsched_rt_init_thread(struct xnthread *thread)
133 {
134  return 0;
135 }
136 
137 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
138 struct xnthread *xnsched_rt_pick(struct xnsched *sched);
139 #else
140 static inline struct xnthread *xnsched_rt_pick(struct xnsched *sched)
141 {
142  return xnsched_getq(&sched->rt.runnable);
143 }
144 #endif
145 
146 void xnsched_rt_tick(struct xnsched *sched);
147 
150 #endif /* !_COBALT_KERNEL_SCHED_RT_H */
#define XNBOOST
PI/PP boost undergoing.
Definition: thread.h:42
#define XNWEAK
Non real-time shadow (from the WEAK class)
Definition: thread.h:49
Scheduling information structure.
Definition: sched.h:58
struct xnsched_rt rt
Definition: sched.h:72