Merge branch 'core/iter-div' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core/iter-div' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
always_inline timespec_add_ns
add an inlined version of iter_div_u64_rem
common implementation of iterative div/mod

+40 -20
+3 -10
arch/x86/xen/time.c
··· 12 #include <linux/clocksource.h> 13 #include <linux/clockchips.h> 14 #include <linux/kernel_stat.h> 15 16 #include <asm/xen/hypervisor.h> 17 #include <asm/xen/hypercall.h> ··· 151 if (stolen < 0) 152 stolen = 0; 153 154 - ticks = 0; 155 - while (stolen >= NS_PER_TICK) { 156 - ticks++; 157 - stolen -= NS_PER_TICK; 158 - } 159 __get_cpu_var(residual_stolen) = stolen; 160 account_steal_time(NULL, ticks); 161 ··· 163 if (blocked < 0) 164 blocked = 0; 165 166 - ticks = 0; 167 - while (blocked >= NS_PER_TICK) { 168 - ticks++; 169 - blocked -= NS_PER_TICK; 170 - } 171 __get_cpu_var(residual_blocked) = blocked; 172 account_steal_time(idle_task(smp_processor_id()), ticks); 173 }
··· 12 #include <linux/clocksource.h> 13 #include <linux/clockchips.h> 14 #include <linux/kernel_stat.h> 15 + #include <linux/math64.h> 16 17 #include <asm/xen/hypervisor.h> 18 #include <asm/xen/hypercall.h> ··· 150 if (stolen < 0) 151 stolen = 0; 152 153 + ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 154 __get_cpu_var(residual_stolen) = stolen; 155 account_steal_time(NULL, ticks); 156 ··· 166 if (blocked < 0) 167 blocked = 0; 168 169 + ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); 170 __get_cpu_var(residual_blocked) = blocked; 171 account_steal_time(idle_task(smp_processor_id()), ticks); 172 }
+21
include/linux/math64.h
··· 81 } 82 #endif 83 84 #endif /* _LINUX_MATH64_H */
··· 81 } 82 #endif 83 84 + u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); 85 + 86 + static __always_inline u32 87 + __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 88 + { 89 + u32 ret = 0; 90 + 91 + while (dividend >= divisor) { 92 + /* The following asm() prevents the compiler from 93 + optimising this loop into a modulo operation. */ 94 + asm("" : "+rm"(dividend)); 95 + 96 + dividend -= divisor; 97 + ret++; 98 + } 99 + 100 + *remainder = dividend; 101 + 102 + return ret; 103 + } 104 + 105 #endif /* _LINUX_MATH64_H */
+6 -10
include/linux/time.h
··· 6 #ifdef __KERNEL__ 7 # include <linux/cache.h> 8 # include <linux/seqlock.h> 9 #endif 10 11 #ifndef _STRUCT_TIMESPEC ··· 170 * timespec_add_ns - Adds nanoseconds to a timespec 171 * @a: pointer to timespec to be incremented 172 * @ns: unsigned nanoseconds value to be added 173 */ 174 - static inline void timespec_add_ns(struct timespec *a, u64 ns) 175 { 176 - ns += a->tv_nsec; 177 - while(unlikely(ns >= NSEC_PER_SEC)) { 178 - /* The following asm() prevents the compiler from 179 - * optimising this loop into a modulo operation. */ 180 - asm("" : "+r"(ns)); 181 - 182 - ns -= NSEC_PER_SEC; 183 - a->tv_sec++; 184 - } 185 a->tv_nsec = ns; 186 } 187 #endif /* __KERNEL__ */
··· 6 #ifdef __KERNEL__ 7 # include <linux/cache.h> 8 # include <linux/seqlock.h> 9 + # include <linux/math64.h> 10 #endif 11 12 #ifndef _STRUCT_TIMESPEC ··· 169 * timespec_add_ns - Adds nanoseconds to a timespec 170 * @a: pointer to timespec to be incremented 171 * @ns: unsigned nanoseconds value to be added 172 + * 173 + * This must always be inlined because its used from the x86-64 vdso, 174 + * which cannot call other kernel functions. 175 */ 176 + static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) 177 { 178 + a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); 179 a->tv_nsec = ns; 180 } 181 #endif /* __KERNEL__ */
+10
lib/div64.c
··· 98 #endif 99 100 #endif /* BITS_PER_LONG == 32 */
··· 98 #endif 99 100 #endif /* BITS_PER_LONG == 32 */ 101 + 102 + /* 103 + * Iterative div/mod for use when dividend is not expected to be much 104 + * bigger than divisor. 105 + */ 106 + u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 107 + { 108 + return __iter_div_u64_rem(dividend, divisor, remainder); 109 + } 110 + EXPORT_SYMBOL(iter_div_u64_rem);