Merge branch 'core/iter-div' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core/iter-div' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: always_inline timespec_add_ns add an inlined version of iter_div_u64_rem common implementation of iterative div/mod
This commit is contained in:
commit
dc10885d68
4 changed files with 40 additions and 20 deletions
|
@ -12,6 +12,7 @@
|
|||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
#include <asm/xen/hypervisor.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
@ -150,11 +151,7 @@ static void do_stolen_accounting(void)
|
|||
if (stolen < 0)
|
||||
stolen = 0;
|
||||
|
||||
ticks = 0;
|
||||
while (stolen >= NS_PER_TICK) {
|
||||
ticks++;
|
||||
stolen -= NS_PER_TICK;
|
||||
}
|
||||
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
|
||||
__get_cpu_var(residual_stolen) = stolen;
|
||||
account_steal_time(NULL, ticks);
|
||||
|
||||
|
@ -166,11 +163,7 @@ static void do_stolen_accounting(void)
|
|||
if (blocked < 0)
|
||||
blocked = 0;
|
||||
|
||||
ticks = 0;
|
||||
while (blocked >= NS_PER_TICK) {
|
||||
ticks++;
|
||||
blocked -= NS_PER_TICK;
|
||||
}
|
||||
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
|
||||
__get_cpu_var(residual_blocked) = blocked;
|
||||
account_steal_time(idle_task(smp_processor_id()), ticks);
|
||||
}
|
||||
|
|
|
@ -81,4 +81,25 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
|
|||
}
|
||||
#endif
|
||||
|
||||
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
|
||||
|
||||
static __always_inline u32
|
||||
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
|
||||
{
|
||||
u32 ret = 0;
|
||||
|
||||
while (dividend >= divisor) {
|
||||
/* The following asm() prevents the compiler from
|
||||
optimising this loop into a modulo operation. */
|
||||
asm("" : "+rm"(dividend));
|
||||
|
||||
dividend -= divisor;
|
||||
ret++;
|
||||
}
|
||||
|
||||
*remainder = dividend;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_MATH64_H */
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#ifdef __KERNEL__
|
||||
# include <linux/cache.h>
|
||||
# include <linux/seqlock.h>
|
||||
# include <linux/math64.h>
|
||||
#endif
|
||||
|
||||
#ifndef _STRUCT_TIMESPEC
|
||||
|
@ -169,18 +170,13 @@ extern struct timeval ns_to_timeval(const s64 nsec);
|
|||
* timespec_add_ns - Adds nanoseconds to a timespec
|
||||
* @a: pointer to timespec to be incremented
|
||||
* @ns: unsigned nanoseconds value to be added
|
||||
*
|
||||
* This must always be inlined because its used from the x86-64 vdso,
|
||||
* which cannot call other kernel functions.
|
||||
*/
|
||||
static inline void timespec_add_ns(struct timespec *a, u64 ns)
|
||||
static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
|
||||
{
|
||||
ns += a->tv_nsec;
|
||||
while(unlikely(ns >= NSEC_PER_SEC)) {
|
||||
/* The following asm() prevents the compiler from
|
||||
* optimising this loop into a modulo operation. */
|
||||
asm("" : "+r"(ns));
|
||||
|
||||
ns -= NSEC_PER_SEC;
|
||||
a->tv_sec++;
|
||||
}
|
||||
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
|
||||
a->tv_nsec = ns;
|
||||
}
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
10
lib/div64.c
10
lib/div64.c
|
@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64);
|
|||
#endif
|
||||
|
||||
#endif /* BITS_PER_LONG == 32 */
|
||||
|
||||
/*
|
||||
* Iterative div/mod for use when dividend is not expected to be much
|
||||
* bigger than divisor.
|
||||
*/
|
||||
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
|
||||
{
|
||||
return __iter_div_u64_rem(dividend, divisor, remainder);
|
||||
}
|
||||
EXPORT_SYMBOL(iter_div_u64_rem);
|
||||
|
|
Loading…
Reference in a new issue