20#ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H
21#define _COBALT_UAPI_ASM_GENERIC_ARITH_H
23#ifndef xnarch_u64tou32
24#define xnarch_u64tou32(ull, h, l) ({ \
26 unsigned long long _ull; \
27 struct endianstruct _s; \
35#ifndef xnarch_u64fromu32
36#define xnarch_u64fromu32(h, l) ({ \
38 unsigned long long _ull; \
39 struct endianstruct _s; \
48static inline __attribute__((__const__))
unsigned long long
49xnarch_generic_ullmul(
const unsigned m0,
const unsigned m1)
51 return (
unsigned long long) m0 * m1;
53#define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1))
57static inline unsigned long long xnarch_generic_ulldiv (
unsigned long long ull,
59 unsigned long *
const rp)
61 const unsigned r = do_div(ull, uld);
68#define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp))
71#ifndef xnarch_uldivrem
72#define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp)))
75#ifndef xnarch_divmod64
76#if defined(__KERNEL__) && BITS_PER_LONG < 64
77unsigned long long xnarch_generic_full_divmod64(
unsigned long long a,
79 unsigned long long *rem);
80#define IMPLEMENT_GENERIC_FULL_DIVMOD64
83static inline unsigned long long
84xnarch_generic_divmod64(
unsigned long long a,
86 unsigned long long *rem)
89#if defined(__KERNEL__) && BITS_PER_LONG < 64
90 if (b <= 0xffffffffULL) {
92 q = xnarch_ulldiv(a, b, &r);
102 return xnarch_generic_full_divmod64(a, b, rem);
111#define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp))
114#ifndef xnarch_imuldiv
115static inline __attribute__((__const__))
int xnarch_generic_imuldiv(
int i,
120 const unsigned long long ull = xnarch_ullmul(i, mult);
121 return xnarch_uldivrem(ull, div, NULL);
123#define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d))
126#ifndef xnarch_imuldiv_ceil
127static inline __attribute__((__const__))
int xnarch_generic_imuldiv_ceil(
int i,
132 const unsigned long long ull = xnarch_ullmul(i, mult);
133 return xnarch_uldivrem(ull + (
unsigned)div - 1, div, NULL);
135#define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d))
141static inline unsigned long long
142xnarch_generic_div96by32(
const unsigned long long h,
145 unsigned long *
const rp)
148 const unsigned qh = xnarch_uldivrem(h, d, &rh);
149 const unsigned long long t = xnarch_u64fromu32(rh, l);
150 const unsigned ql = xnarch_uldivrem(t, d, rp);
152 return xnarch_u64fromu32(qh, ql);
156static inline __attribute__((__const__))
157unsigned long long xnarch_generic_ullimd(
const unsigned long long op,
161 unsigned int oph, opl, tlh, tll;
162 unsigned long long th, tl;
164 xnarch_u64tou32(op, oph, opl);
165 tl = xnarch_ullmul(opl, m);
166 xnarch_u64tou32(tl, tlh, tll);
167 th = xnarch_ullmul(oph, m);
170 return xnarch_generic_div96by32(th, tll, d, NULL);
173static inline __attribute__((__const__))
long long
174xnarch_generic_llimd (
long long op,
unsigned m,
unsigned d)
183 ret = xnarch_generic_ullimd(op, m, d);
185 return sign ? -ret : ret;
187#define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d))
190#ifndef _xnarch_u96shift
191#define xnarch_u96shift(h, m, l, s) ({ \
192 unsigned int _l = (l); \
193 unsigned int _m = (m); \
194 unsigned int _s = (s); \
196 _l |= (_m << (32 - _s)); \
198 _m |= ((h) << (32 - _s)); \
199 xnarch_u64fromu32(_m, _l); \
203static inline long long xnarch_llmi(
int i,
int j)
206 return (
long long) i * j;
209#ifndef xnarch_llmulshft
211static inline long long
212xnarch_generic_llmulshft(
const long long op,
216 unsigned int oph, opl, tlh, tll, thh, thl;
217 unsigned long long th, tl;
219 xnarch_u64tou32(op, oph, opl);
220 tl = xnarch_ullmul(opl, m);
221 xnarch_u64tou32(tl, tlh, tll);
222 th = xnarch_llmi(oph, m);
224 xnarch_u64tou32(th, thh, thl);
226 return xnarch_u96shift(thh, thl, tll, s);
228#define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s))
231#ifdef XNARCH_HAVE_NODIV_LLIMD
234struct xnarch_u32frac {
235 unsigned long long frac;
239static inline void xnarch_init_u32frac(
struct xnarch_u32frac *
const f,
248 volatile unsigned vol_d = d;
250 f->frac = xnarch_generic_div96by32
251 (xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL);
254#ifndef xnarch_nodiv_imuldiv
255static inline __attribute__((__const__))
unsigned
256xnarch_generic_nodiv_imuldiv(
unsigned op,
const struct xnarch_u32frac f)
258 return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op;
260#define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f))
263#ifndef xnarch_nodiv_imuldiv_ceil
264static inline __attribute__((__const__))
unsigned
265xnarch_generic_nodiv_imuldiv_ceil(
unsigned op,
const struct xnarch_u32frac f)
267 unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U;
268 return (full >> 32) + f.integ * op;
270#define xnarch_nodiv_imuldiv_ceil(op, f) \
271 xnarch_generic_nodiv_imuldiv_ceil((op),(f))
274#ifndef xnarch_nodiv_ullimd
276#ifndef xnarch_add96and64
277#error "xnarch_add96and64 must be implemented."
280static inline __attribute__((__const__))
unsigned long long
281xnarch_mul64by64_high(
const unsigned long long op,
const unsigned long long m)
284 register unsigned long long t0, t1, t2, t3;
285 register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l;
287 xnarch_u64tou32(op, oph, opl);
288 xnarch_u64tou32(m, mh, ml);
289 t0 = xnarch_ullmul(opl, ml);
290 xnarch_u64tou32(t0, t0h, t0l);
291 t3 = xnarch_ullmul(oph, mh);
292 xnarch_u64tou32(t3, t3h, t3l);
293 xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31);
294 t1 = xnarch_ullmul(oph, ml);
295 xnarch_u64tou32(t1, t1h, t1l);
296 xnarch_add96and64(t3h, t3l, t0h, t1h, t1l);
297 t2 = xnarch_ullmul(opl, mh);
298 xnarch_u64tou32(t2, t2h, t2l);
299 xnarch_add96and64(t3h, t3l, t0h, t2h, t2l);
301 return xnarch_u64fromu32(t3h, t3l);
304static inline unsigned long long
305xnarch_generic_nodiv_ullimd(
const unsigned long long op,
306 const unsigned long long frac,
309 return xnarch_mul64by64_high(op, frac) + integ * op;
311#define xnarch_nodiv_ullimd(op, f, i) xnarch_generic_nodiv_ullimd((op),(f), (i))
314#ifndef xnarch_nodiv_llimd
315static inline __attribute__((__const__))
long long
316xnarch_generic_nodiv_llimd(
long long op,
unsigned long long frac,
326 ret = xnarch_nodiv_ullimd(op, frac, integ);
328 return sign ? -ret : ret;
330#define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ))
335static inline void xnarch_init_llmulshft(
const unsigned m_in,
345 volatile unsigned int vol_d = d_in;
346 unsigned long long mult;
350 mult = ((
unsigned long long)m_in) << *s_out;
352 if (mult <= 0x7FFFFFFF)
356 *m_out = (
unsigned int)mult;
359#define xnarch_ullmod(ull,uld,rem) ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
360#define xnarch_uldiv(ull, d) xnarch_uldivrem(ull, d, NULL)
361#define xnarch_ulmod(ull, d) ({ unsigned long _rem; \
362 xnarch_uldivrem(ull,d,&_rem); _rem; })
364#define xnarch_div64(a,b) xnarch_divmod64((a),(b),NULL)
365#define xnarch_mod64(a,b) ({ unsigned long long _rem; \
366 xnarch_divmod64((a),(b),&_rem); _rem; })