Commit d5e181f7 authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge Committed by Ingo Molnar

add an inlined version of iter_div_u64_rem

iter_div_u64_rem is used in the x86-64 vdso, which cannot call other
kernel code.  For this case, provide the always_inlined version,
__iter_div_u64_rem.
Signed-off-by: default avatarJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent f595ec96
...@@ -83,4 +83,23 @@ static inline s64 div_s64(s64 dividend, s32 divisor) ...@@ -83,4 +83,23 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
static __always_inline u32
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
u32 ret = 0;
while (dividend >= divisor) {
/* The following asm() prevents the compiler from
optimising this loop into a modulo operation. */
asm("" : "+rm"(dividend));
dividend -= divisor;
ret++;
}
*remainder = dividend;
return ret;
}
#endif /* _LINUX_MATH64_H */ #endif /* _LINUX_MATH64_H */
...@@ -105,19 +105,6 @@ EXPORT_SYMBOL(div64_u64); ...@@ -105,19 +105,6 @@ EXPORT_SYMBOL(div64_u64);
*/ */
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{ {
u32 ret = 0; return __iter_div_u64_rem(dividend, divisor, remainder);
while (dividend >= divisor) {
/* The following asm() prevents the compiler from
optimising this loop into a modulo operation. */
asm("" : "+rm"(dividend));
dividend -= divisor;
ret++;
}
*remainder = dividend;
return ret;
} }
EXPORT_SYMBOL(iter_div_u64_rem); EXPORT_SYMBOL(iter_div_u64_rem);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment