Xenomai  3.1
arith.h
1 
21 #ifndef _COBALT_X86_ASM_UAPI_ARITH_H
22 #define _COBALT_X86_ASM_UAPI_ARITH_H
23 #define _COBALT_X86_ASM_UAPI_ARITH_H
24 
25 #include <asm/xenomai/uapi/features.h>
26 
27 #ifdef __i386__
28 
29 #define xnarch_u64tou32(ull, h, l) ({ \
30  unsigned long long _ull = (ull); \
31  (l) = _ull & 0xffffffff; \
32  (h) = _ull >> 32; \
33 })
34 
35 #define xnarch_u64fromu32(h, l) ({ \
36  unsigned long long _ull; \
37  asm ( "": "=A"(_ull) : "d"(h), "a"(l)); \
38  _ull; \
39 })
40 
41 /* const helper for xnarch_uldivrem, so that the compiler will eliminate
42  multiple calls with same arguments, at no additionnal cost. */
43 static inline __attribute__((__const__)) unsigned long long
44 __mach_x86_32_uldivrem(const unsigned long long ull, const unsigned long d)
45 {
46  unsigned long long ret;
47  __asm__ ("divl %1" : "=A,A"(ret) : "r,?m"(d), "A,A"(ull));
48  /* Exception if quotient does not fit on unsigned long. */
49  return ret;
50 }
51 
52 /* Fast long long division: when the quotient and remainder fit on 32 bits. */
53 static inline unsigned long mach_x86_32_uldivrem(unsigned long long ull,
54  const unsigned d,
55  unsigned long *const rp)
56 {
57  unsigned long q, r;
58  ull = __mach_x86_32_uldivrem(ull, d);
59  __asm__ ( "": "=d"(r), "=a"(q) : "A"(ull));
60  if(rp)
61  *rp = r;
62  return q;
63 }
64 #define xnarch_uldivrem(ull, d, rp) mach_x86_32_uldivrem((ull),(d),(rp))
65 
66 /* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits.
67  Building block for ulldiv. */
68 static inline unsigned long long mach_x86_32_div96by32(const unsigned long long h,
69  const unsigned long l,
70  const unsigned long d,
71  unsigned long *const rp)
72 {
73  unsigned long rh;
74  const unsigned long qh = xnarch_uldivrem(h, d, &rh);
75  const unsigned long long t = xnarch_u64fromu32(rh, l);
76  const unsigned long ql = xnarch_uldivrem(t, d, rp);
77 
78  return xnarch_u64fromu32(qh, ql);
79 }
80 
81 /* Slow long long division. Uses xnarch_uldivrem, hence has the same property:
82  the compiler removes redundant calls. */
83 static inline unsigned long long
84 mach_x86_32_ulldiv(const unsigned long long ull,
85  const unsigned d,
86  unsigned long *const rp)
87 {
88  unsigned long h, l;
89  xnarch_u64tou32(ull, h, l);
90  return mach_x86_32_div96by32(h, l, d, rp);
91 }
92 #define xnarch_ulldiv(ull,d,rp) mach_x86_32_ulldiv((ull),(d),(rp))
93 
94 /* Fast scaled-math-based replacement for long long multiply-divide */
95 #define xnarch_llmulshft(ll, m, s) \
96 ({ \
97  long long __ret; \
98  unsigned __lo, __hi; \
99  \
100  __asm__ ( \
101  /* HI = HIWORD(ll) * m */ \
102  "mov %%eax,%%ecx\n\t" \
103  "mov %%edx,%%eax\n\t" \
104  "imull %[__m]\n\t" \
105  "mov %%eax,%[__lo]\n\t" \
106  "mov %%edx,%[__hi]\n\t" \
107  \
108  /* LO = LOWORD(ll) * m */ \
109  "mov %%ecx,%%eax\n\t" \
110  "mull %[__m]\n\t" \
111  \
112  /* ret = (HI << 32) + LO */ \
113  "add %[__lo],%%edx\n\t" \
114  "adc $0,%[__hi]\n\t" \
115  \
116  /* ret = ret >> s */ \
117  "mov %[__s],%%ecx\n\t" \
118  "shrd %%cl,%%edx,%%eax\n\t" \
119  "shrd %%cl,%[__hi],%%edx\n\t" \
120  : "=A" (__ret), [__lo] "=&r" (__lo), [__hi] "=&r" (__hi) \
121  : "A" (ll), [__m] "m" (m), [__s] "m" (s) \
122  : "ecx"); \
123  __ret; \
124 })
125 
126 static inline __attribute__((const)) unsigned long long
127 mach_x86_32_nodiv_ullimd(const unsigned long long op,
128  const unsigned long long frac,
129  unsigned rhs_integ)
130 {
131  register unsigned rl __asm__("ecx");
132  register unsigned rm __asm__("esi");
133  register unsigned rh __asm__("edi");
134  unsigned fracl, frach, opl, oph;
135  volatile unsigned integ = rhs_integ;
136  register unsigned long long t;
137 
138  xnarch_u64tou32(op, oph, opl);
139  xnarch_u64tou32(frac, frach, fracl);
140 
141  __asm__ ("mov %[oph], %%eax\n\t"
142  "mull %[frach]\n\t"
143  "mov %%eax, %[rm]\n\t"
144  "mov %%edx, %[rh]\n\t"
145  "mov %[opl], %%eax\n\t"
146  "mull %[fracl]\n\t"
147  "mov %%edx, %[rl]\n\t"
148  "shl $1, %%eax\n\t"
149  "adc $0, %[rl]\n\t"
150  "adc $0, %[rm]\n\t"
151  "adc $0, %[rh]\n\t"
152  "mov %[oph], %%eax\n\t"
153  "mull %[fracl]\n\t"
154  "add %%eax, %[rl]\n\t"
155  "adc %%edx, %[rm]\n\t"
156  "adc $0, %[rh]\n\t"
157  "mov %[opl], %%eax\n\t"
158  "mull %[frach]\n\t"
159  "add %%eax, %[rl]\n\t"
160  "adc %%edx, %[rm]\n\t"
161  "adc $0, %[rh]\n\t"
162  "mov %[opl], %%eax\n\t"
163  "mull %[integ]\n\t"
164  "add %[rm], %%eax\n\t"
165  "adc %%edx, %[rh]\n\t"
166  "mov %[oph], %%edx\n\t"
167  "imul %[integ], %%edx\n\t"
168  "add %[rh], %%edx\n\t"
169  : [rl]"=&c"(rl), [rm]"=&S"(rm), [rh]"=&D"(rh), "=&A"(t)
170  : [opl]"m"(opl), [oph]"m"(oph),
171  [fracl]"m"(fracl), [frach]"m"(frach), [integ]"m"(integ)
172  : "cc");
173 
174  return t;
175 }
176 
177 #define xnarch_nodiv_ullimd(op, frac, integ) \
178  mach_x86_32_nodiv_ullimd((op), (frac), (integ))
179 
180 #else /* x86_64 */
181 
182 static inline __attribute__((__const__)) long long
183 mach_x86_64_llimd (long long op, unsigned m, unsigned d)
184 {
185  long long result;
186 
187  __asm__ (
188  "imul %[m]\n\t"
189  "idiv %[d]\n\t"
190  : "=a" (result)
191  : "a" (op), [m] "r" ((unsigned long long)m),
192  [d] "r" ((unsigned long long)d)
193  : "rdx");
194 
195  return result;
196 }
197 #define xnarch_llimd(ll,m,d) mach_x86_64_llimd((ll),(m),(d))
198 
199 static inline __attribute__((__const__)) long long
200 mach_x86_64_llmulshft(long long op, unsigned m, unsigned s)
201 {
202  long long result;
203 
204  __asm__ (
205  "imulq %[m]\n\t"
206  "shrd %%cl,%%rdx,%%rax\n\t"
207  : "=a,a" (result)
208  : "a,a" (op), [m] "m,r" ((unsigned long long)m),
209  "c,c" (s)
210  : "rdx");
211 
212  return result;
213 }
214 #define xnarch_llmulshft(op, m, s) mach_x86_64_llmulshft((op), (m), (s))
215 
216 static inline __attribute__((__const__)) unsigned long long
217 mach_x86_64_nodiv_ullimd(unsigned long long op,
218  unsigned long long frac, unsigned rhs_integ)
219 {
220  register unsigned long long rl __asm__("rax") = frac;
221  register unsigned long long rh __asm__("rdx");
222  register unsigned long long integ __asm__("rsi") = rhs_integ;
223  register unsigned long long t __asm__("r8") = 0x80000000ULL;
224 
225  __asm__ ("mulq %[op]\n\t"
226  "addq %[t], %[rl]\n\t"
227  "adcq $0, %[rh]\n\t"
228  "imulq %[op], %[integ]\n\t"
229  "leaq (%[integ], %[rh], 1),%[rl]":
230  [rh]"=&d"(rh), [rl]"+&a"(rl), [integ]"+S"(integ):
231  [op]"D"(op), [t]"r"(t): "cc");
232 
233  return rl;
234 }
235 
236 #define xnarch_nodiv_ullimd(op, frac, integ) \
237  mach_x86_64_nodiv_ullimd((op), (frac), (integ))
238 
239 #endif /* x86_64 */
240 
241 #include <cobalt/uapi/asm-generic/arith.h>
242 
243 #endif /* _COBALT_X86_ASM_UAPI_ARITH_H */
static int __attribute__((cold))
Test if a mutex structure contains a valid autoinitializer.
Definition: mutex.c:177