Xenomai  3.1
arith.h
1 
20 #ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H
21 #define _COBALT_UAPI_ASM_GENERIC_ARITH_H
22 
23 #ifndef xnarch_u64tou32
24 #define xnarch_u64tou32(ull, h, l) ({ \
25  union { \
26  unsigned long long _ull; \
27  struct endianstruct _s; \
28  } _u; \
29  _u._ull = (ull); \
30  (h) = _u._s._h; \
31  (l) = _u._s._l; \
32 })
33 #endif /* !xnarch_u64tou32 */
34 
35 #ifndef xnarch_u64fromu32
36 #define xnarch_u64fromu32(h, l) ({ \
37  union { \
38  unsigned long long _ull; \
39  struct endianstruct _s; \
40  } _u; \
41  _u._s._h = (h); \
42  _u._s._l = (l); \
43  _u._ull; \
44 })
45 #endif /* !xnarch_u64fromu32 */
46 
47 #ifndef xnarch_ullmul
48 static inline __attribute__((__const__)) unsigned long long
49 xnarch_generic_ullmul(const unsigned m0, const unsigned m1)
50 {
51  return (unsigned long long) m0 * m1;
52 }
53 #define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1))
54 #endif /* !xnarch_ullmul */
55 
56 #ifndef xnarch_ulldiv
57 static inline unsigned long long xnarch_generic_ulldiv (unsigned long long ull,
58  const unsigned uld,
59  unsigned long *const rp)
60 {
61  const unsigned r = do_div(ull, uld);
62 
63  if (rp)
64  *rp = r;
65 
66  return ull;
67 }
68 #define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp))
69 #endif /* !xnarch_ulldiv */
70 
71 #ifndef xnarch_uldivrem
72 #define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp)))
73 #endif /* !xnarch_uldivrem */
74 
75 #ifndef xnarch_divmod64
76 static inline unsigned long long
77 xnarch_generic_divmod64(unsigned long long a,
78  unsigned long long b,
79  unsigned long long *rem)
80 {
81  unsigned long long q;
82 #if defined(__KERNEL__) && BITS_PER_LONG < 64
83  unsigned long long
84  xnarch_generic_full_divmod64(unsigned long long a,
85  unsigned long long b,
86  unsigned long long *rem);
87  if (b <= 0xffffffffULL) {
88  unsigned long r;
89  q = xnarch_ulldiv(a, b, &r);
90  if (rem)
91  *rem = r;
92  } else {
93  if (a < b) {
94  if (rem)
95  *rem = a;
96  return 0;
97  }
98 
99  return xnarch_generic_full_divmod64(a, b, rem);
100  }
101 #else /* !(__KERNEL__ && BITS_PER_LONG < 64) */
102  q = a / b;
103  if (rem)
104  *rem = a % b;
105 #endif /* !(__KERNEL__ && BITS_PER_LONG < 64) */
106  return q;
107 }
108 #define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp))
109 #endif /* !xnarch_divmod64 */
110 
111 #ifndef xnarch_imuldiv
112 static inline __attribute__((__const__)) int xnarch_generic_imuldiv(int i,
113  int mult,
114  int div)
115 {
116  /* (int)i = (unsigned long long)i*(unsigned)(mult)/(unsigned)div. */
117  const unsigned long long ull = xnarch_ullmul(i, mult);
118  return xnarch_uldivrem(ull, div, NULL);
119 }
120 #define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d))
121 #endif /* !xnarch_imuldiv */
122 
123 #ifndef xnarch_imuldiv_ceil
124 static inline __attribute__((__const__)) int xnarch_generic_imuldiv_ceil(int i,
125  int mult,
126  int div)
127 {
128  /* Same as xnarch_generic_imuldiv, rounding up. */
129  const unsigned long long ull = xnarch_ullmul(i, mult);
130  return xnarch_uldivrem(ull + (unsigned)div - 1, div, NULL);
131 }
132 #define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d))
133 #endif /* !xnarch_imuldiv_ceil */
134 
135 /* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits.
136  Building block for llimd. Without const qualifiers, gcc reload registers
137  after each call to uldivrem. */
138 static inline unsigned long long
139 xnarch_generic_div96by32(const unsigned long long h,
140  const unsigned l,
141  const unsigned d,
142  unsigned long *const rp)
143 {
144  unsigned long rh;
145  const unsigned qh = xnarch_uldivrem(h, d, &rh);
146  const unsigned long long t = xnarch_u64fromu32(rh, l);
147  const unsigned ql = xnarch_uldivrem(t, d, rp);
148 
149  return xnarch_u64fromu32(qh, ql);
150 }
151 
152 #ifndef xnarch_llimd
153 static inline __attribute__((__const__))
154 unsigned long long xnarch_generic_ullimd(const unsigned long long op,
155  const unsigned m,
156  const unsigned d)
157 {
158  unsigned int oph, opl, tlh, tll;
159  unsigned long long th, tl;
160 
161  xnarch_u64tou32(op, oph, opl);
162  tl = xnarch_ullmul(opl, m);
163  xnarch_u64tou32(tl, tlh, tll);
164  th = xnarch_ullmul(oph, m);
165  th += tlh;
166 
167  return xnarch_generic_div96by32(th, tll, d, NULL);
168 }
169 
170 static inline __attribute__((__const__)) long long
171 xnarch_generic_llimd (long long op, unsigned m, unsigned d)
172 {
173  long long ret;
174  int sign = 0;
175 
176  if (op < 0LL) {
177  sign = 1;
178  op = -op;
179  }
180  ret = xnarch_generic_ullimd(op, m, d);
181 
182  return sign ? -ret : ret;
183 }
184 #define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d))
185 #endif /* !xnarch_llimd */
186 
187 #ifndef _xnarch_u96shift
188 #define xnarch_u96shift(h, m, l, s) ({ \
189  unsigned int _l = (l); \
190  unsigned int _m = (m); \
191  unsigned int _s = (s); \
192  _l >>= _s; \
193  _l |= (_m << (32 - _s)); \
194  _m >>= _s; \
195  _m |= ((h) << (32 - _s)); \
196  xnarch_u64fromu32(_m, _l); \
197 })
198 #endif /* !xnarch_u96shift */
199 
200 static inline long long xnarch_llmi(int i, int j)
201 {
202  /* Fast 32x32->64 signed multiplication */
203  return (long long) i * j;
204 }
205 
206 #ifndef xnarch_llmulshft
207 /* Fast scaled-math-based replacement for long long multiply-divide */
208 static inline long long
209 xnarch_generic_llmulshft(const long long op,
210  const unsigned m,
211  const unsigned s)
212 {
213  unsigned int oph, opl, tlh, tll, thh, thl;
214  unsigned long long th, tl;
215 
216  xnarch_u64tou32(op, oph, opl);
217  tl = xnarch_ullmul(opl, m);
218  xnarch_u64tou32(tl, tlh, tll);
219  th = xnarch_llmi(oph, m);
220  th += tlh;
221  xnarch_u64tou32(th, thh, thl);
222 
223  return xnarch_u96shift(thh, thl, tll, s);
224 }
225 #define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s))
226 #endif /* !xnarch_llmulshft */
227 
228 #ifdef XNARCH_HAVE_NODIV_LLIMD
229 
230 /* Representation of a 32 bits fraction. */
231 struct xnarch_u32frac {
232  unsigned long long frac;
233  unsigned integ;
234 };
235 
236 static inline void xnarch_init_u32frac(struct xnarch_u32frac *const f,
237  const unsigned m,
238  const unsigned d)
239 {
240  /*
241  * Avoid clever compiler optimizations to occur when d is
242  * known at compile-time. The performance of this function is
243  * not critical since it is only called at init time.
244  */
245  volatile unsigned vol_d = d;
246  f->integ = m / d;
247  f->frac = xnarch_generic_div96by32
248  (xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL);
249 }
250 
251 #ifndef xnarch_nodiv_imuldiv
252 static inline __attribute__((__const__)) unsigned
253 xnarch_generic_nodiv_imuldiv(unsigned op, const struct xnarch_u32frac f)
254 {
255  return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op;
256 }
257 #define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f))
258 #endif /* xnarch_nodiv_imuldiv */
259 
260 #ifndef xnarch_nodiv_imuldiv_ceil
261 static inline __attribute__((__const__)) unsigned
262 xnarch_generic_nodiv_imuldiv_ceil(unsigned op, const struct xnarch_u32frac f)
263 {
264  unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U;
265  return (full >> 32) + f.integ * op;
266 }
267 #define xnarch_nodiv_imuldiv_ceil(op, f) \
268  xnarch_generic_nodiv_imuldiv_ceil((op),(f))
269 #endif /* xnarch_nodiv_imuldiv_ceil */
270 
271 #ifndef xnarch_nodiv_ullimd
272 
273 #ifndef xnarch_add96and64
274 #error "xnarch_add96and64 must be implemented."
275 #endif
276 
277 static inline __attribute__((__const__)) unsigned long long
278 xnarch_mul64by64_high(const unsigned long long op, const unsigned long long m)
279 {
280  /* Compute high 64 bits of multiplication 64 bits x 64 bits. */
281  register unsigned long long t0, t1, t2, t3;
282  register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l;
283 
284  xnarch_u64tou32(op, oph, opl);
285  xnarch_u64tou32(m, mh, ml);
286  t0 = xnarch_ullmul(opl, ml);
287  xnarch_u64tou32(t0, t0h, t0l);
288  t3 = xnarch_ullmul(oph, mh);
289  xnarch_u64tou32(t3, t3h, t3l);
290  xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31);
291  t1 = xnarch_ullmul(oph, ml);
292  xnarch_u64tou32(t1, t1h, t1l);
293  xnarch_add96and64(t3h, t3l, t0h, t1h, t1l);
294  t2 = xnarch_ullmul(opl, mh);
295  xnarch_u64tou32(t2, t2h, t2l);
296  xnarch_add96and64(t3h, t3l, t0h, t2h, t2l);
297 
298  return xnarch_u64fromu32(t3h, t3l);
299 }
300 
301 static inline unsigned long long
302 xnarch_generic_nodiv_ullimd(const unsigned long long op,
303  const unsigned long long frac,
304  unsigned int integ)
305 {
306  return xnarch_mul64by64_high(op, frac) + integ * op;
307 }
308 #define xnarch_nodiv_ullimd(op, f, i) xnarch_generic_nodiv_ullimd((op),(f), (i))
309 #endif /* !xnarch_nodiv_ullimd */
310 
311 #ifndef xnarch_nodiv_llimd
312 static inline __attribute__((__const__)) long long
313 xnarch_generic_nodiv_llimd(long long op, unsigned long long frac,
314  unsigned int integ)
315 {
316  long long ret;
317  int sign = 0;
318 
319  if (op < 0LL) {
320  sign = 1;
321  op = -op;
322  }
323  ret = xnarch_nodiv_ullimd(op, frac, integ);
324 
325  return sign ? -ret : ret;
326 }
327 #define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ))
328 #endif /* !xnarch_nodiv_llimd */
329 
330 #endif /* XNARCH_HAVE_NODIV_LLIMD */
331 
332 static inline void xnarch_init_llmulshft(const unsigned m_in,
333  const unsigned d_in,
334  unsigned *m_out,
335  unsigned *s_out)
336 {
337  /*
338  * Avoid clever compiler optimizations to occur when d is
339  * known at compile-time. The performance of this function is
340  * not critical since it is only called at init time.
341  */
342  volatile unsigned int vol_d = d_in;
343  unsigned long long mult;
344 
345  *s_out = 31;
346  while (1) {
347  mult = ((unsigned long long)m_in) << *s_out;
348  do_div(mult, vol_d);
349  if (mult <= 0x7FFFFFFF)
350  break;
351  (*s_out)--;
352  }
353  *m_out = (unsigned int)mult;
354 }
355 
356 #define xnarch_ullmod(ull,uld,rem) ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
357 #define xnarch_uldiv(ull, d) xnarch_uldivrem(ull, d, NULL)
358 #define xnarch_ulmod(ull, d) ({ unsigned long _rem; \
359  xnarch_uldivrem(ull,d,&_rem); _rem; })
360 
361 #define xnarch_div64(a,b) xnarch_divmod64((a),(b),NULL)
362 #define xnarch_mod64(a,b) ({ unsigned long long _rem; \
363  xnarch_divmod64((a),(b),&_rem); _rem; })
364 
365 #endif /* _COBALT_UAPI_ASM_GENERIC_ARITH_H */
unsigned long long xnarch_generic_full_divmod64(unsigned long long a, unsigned long long b, unsigned long long *rem)
Architecture-independent div64 operation with remainder.
Definition: arith.c:44
static int __attribute__((cold))
Test if a mutex structure contains a valid autoinitializer.
Definition: mutex.c:177