Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

remove div_long_long_rem

x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.

The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.

There is little incentive to keep this API alive, so this converts the few
users to the new API.

Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Roman Zippel and committed by
Linus Torvalds
f8bd2258 6f6d6a1a

+44 -110
+3 -2
arch/mips/kernel/binfmt_elfn32.c
··· 54 54 #include <linux/module.h> 55 55 #include <linux/elfcore.h> 56 56 #include <linux/compat.h> 57 + #include <linux/math64.h> 57 58 58 59 #define elf_prstatus elf_prstatus32 59 60 struct elf_prstatus32 ··· 103 102 * one divide. 104 103 */ 105 104 u64 nsec = (u64)jiffies * TICK_NSEC; 106 - long rem; 107 - value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem); 105 + u32 rem; 106 + value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); 108 107 value->tv_usec = rem / NSEC_PER_USEC; 109 108 } 110 109
+3 -2
arch/mips/kernel/binfmt_elfo32.c
··· 56 56 #include <linux/module.h> 57 57 #include <linux/elfcore.h> 58 58 #include <linux/compat.h> 59 + #include <linux/math64.h> 59 60 60 61 #define elf_prstatus elf_prstatus32 61 62 struct elf_prstatus32 ··· 105 104 * one divide. 106 105 */ 107 106 u64 nsec = (u64)jiffies * TICK_NSEC; 108 - long rem; 109 - value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem); 107 + u32 rem; 108 + value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); 110 109 value->tv_usec = rem / NSEC_PER_USEC; 111 110 } 112 111
+11 -13
drivers/char/mmtimer.c
··· 30 30 #include <linux/miscdevice.h> 31 31 #include <linux/posix-timers.h> 32 32 #include <linux/interrupt.h> 33 + #include <linux/time.h> 34 + #include <linux/math64.h> 33 35 34 36 #include <asm/uaccess.h> 35 37 #include <asm/sn/addrs.h> ··· 474 472 475 473 nsec = rtc_time() * sgi_clock_period 476 474 + sgi_clock_offset.tv_nsec; 477 - tp->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tp->tv_nsec) 478 - + sgi_clock_offset.tv_sec; 475 + *tp = ns_to_timespec(nsec); 476 + tp->tv_sec += sgi_clock_offset.tv_sec; 479 477 return 0; 480 478 }; 481 479 ··· 483 481 { 484 482 485 483 u64 nsec; 486 - u64 rem; 484 + u32 rem; 487 485 488 486 nsec = rtc_time() * sgi_clock_period; 489 487 490 - sgi_clock_offset.tv_sec = tp->tv_sec - div_long_long_rem(nsec, NSEC_PER_SEC, &rem); 488 + sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem); 491 489 492 490 if (rem <= tp->tv_nsec) 493 491 sgi_clock_offset.tv_nsec = tp->tv_sec - rem; ··· 646 644 return 0; 647 645 } 648 646 649 - #define timespec_to_ns(x) ((x).tv_nsec + (x).tv_sec * NSEC_PER_SEC) 650 - #define ns_to_timespec(ts, nsec) (ts).tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &(ts).tv_nsec) 651 - 652 647 /* Assumption: it_lock is already held with irq's disabled */ 653 648 static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) 654 649 { ··· 658 659 return; 659 660 } 660 661 661 - ns_to_timespec(cur_setting->it_interval, timr->it.mmtimer.incr * sgi_clock_period); 662 - ns_to_timespec(cur_setting->it_value, (timr->it.mmtimer.expires - rtc_time())* sgi_clock_period); 663 - return; 662 + cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period); 663 + cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period); 664 664 } 665 665 666 666 ··· 677 679 sgi_timer_get(timr, old_setting); 678 680 679 681 sgi_timer_del(timr); 680 - when = timespec_to_ns(new_setting->it_value); 681 - period = timespec_to_ns(new_setting->it_interval); 682 + when = timespec_to_ns(&new_setting->it_value); 683 + period = timespec_to_ns(&new_setting->it_interval); 682 684 683 685 if (when == 0) 684 686 /* Clear timer */ ··· 693 695 unsigned long now; 694 696 695 697 getnstimeofday(&n); 696 - now = timespec_to_ns(n); 698 + now = timespec_to_ns(&n); 697 699 if (when > now) 698 700 when -= now; 699 701 else
-18
include/asm-x86/div64.h
··· 33 33 __mod; \ 34 34 }) 35 35 36 - /* 37 - * (long)X = ((long long)divs) / (long)div 38 - * (long)rem = ((long long)divs) % (long)div 39 - * 40 - * Warning, this will do an exception if X overflows. 41 - */ 42 - #define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c) 43 - 44 - static inline long div_ll_X_l_rem(long long divs, long div, long *rem) 45 - { 46 - long dum2; 47 - asm("divl %2":"=a"(dum2), "=d"(*rem) 48 - : "rm"(div), "A"(divs)); 49 - 50 - return dum2; 51 - 52 - } 53 - 54 36 static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) 55 37 { 56 38 union {
-49
include/linux/calc64.h
··· 1 - #ifndef _LINUX_CALC64_H 2 - #define _LINUX_CALC64_H 3 - 4 - #include <linux/types.h> 5 - #include <asm/div64.h> 6 - 7 - /* 8 - * This is a generic macro which is used when the architecture 9 - * specific div64.h does not provide a optimized one. 10 - * 11 - * The 64bit dividend is divided by the divisor (data type long), the 12 - * result is returned and the remainder stored in the variable 13 - * referenced by remainder (data type long *). In contrast to the 14 - * do_div macro the dividend is kept intact. 15 - */ 16 - #ifndef div_long_long_rem 17 - #define div_long_long_rem(dividend, divisor, remainder) \ 18 - do_div_llr((dividend), divisor, remainder) 19 - 20 - static inline unsigned long do_div_llr(const long long dividend, 21 - const long divisor, long *remainder) 22 - { 23 - u64 result = dividend; 24 - 25 - *(remainder) = do_div(result, divisor); 26 - return (unsigned long) result; 27 - } 28 - #endif 29 - 30 - /* 31 - * Sign aware variation of the above. On some architectures a 32 - * negative dividend leads to an divide overflow exception, which 33 - * is avoided by the sign check. 34 - */ 35 - static inline long div_long_long_rem_signed(const long long dividend, 36 - const long divisor, long *remainder) 37 - { 38 - long res; 39 - 40 - if (unlikely(dividend < 0)) { 41 - res = -div_long_long_rem(-dividend, divisor, remainder); 42 - *remainder = -(*remainder); 43 - } else 44 - res = div_long_long_rem(dividend, divisor, remainder); 45 - 46 - return res; 47 - } 48 - 49 - #endif
+1 -1
include/linux/jiffies.h
··· 1 1 #ifndef _LINUX_JIFFIES_H 2 2 #define _LINUX_JIFFIES_H 3 3 4 - #include <linux/calc64.h> 4 + #include <linux/math64.h> 5 5 #include <linux/kernel.h> 6 6 #include <linux/types.h> 7 7 #include <linux/time.h>
+5 -6
kernel/posix-cpu-timers.c
··· 4 4 5 5 #include <linux/sched.h> 6 6 #include <linux/posix-timers.h> 7 - #include <asm/uaccess.h> 8 7 #include <linux/errno.h> 8 + #include <linux/math64.h> 9 + #include <asm/uaccess.h> 9 10 10 11 static int check_clock(const clockid_t which_clock) 11 12 { ··· 48 47 union cpu_time_count cpu, 49 48 struct timespec *tp) 50 49 { 51 - if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 52 - tp->tv_sec = div_long_long_rem(cpu.sched, 53 - NSEC_PER_SEC, &tp->tv_nsec); 54 - } else { 50 + if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) 51 + *tp = ns_to_timespec(cpu.sched); 52 + else 55 53 cputime_to_timespec(cpu.cpu, tp); 56 - } 57 54 } 58 55 59 56 static inline int cpu_time_before(const clockid_t which_clock,
+15 -10
kernel/time.c
··· 392 392 struct timespec ns_to_timespec(const s64 nsec) 393 393 { 394 394 struct timespec ts; 395 + s32 rem; 395 396 396 397 if (!nsec) 397 398 return (struct timespec) {0, 0}; 398 399 399 - ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec); 400 - if (unlikely(nsec < 0)) 401 - set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec); 400 + ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); 401 + if (unlikely(rem < 0)) { 402 + ts.tv_sec--; 403 + rem += NSEC_PER_SEC; 404 + } 405 + ts.tv_nsec = rem; 402 406 403 407 return ts; 404 408 } ··· 532 528 * Convert jiffies to nanoseconds and separate with 533 529 * one divide. 534 530 */ 535 - u64 nsec = (u64)jiffies * TICK_NSEC; 536 - value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec); 531 + u32 rem; 532 + value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, 533 + NSEC_PER_SEC, &rem); 534 + value->tv_nsec = rem; 537 535 } 538 536 EXPORT_SYMBOL(jiffies_to_timespec); 539 537 ··· 573 567 * Convert jiffies to nanoseconds and separate with 574 568 * one divide. 575 569 */ 576 - u64 nsec = (u64)jiffies * TICK_NSEC; 577 - long tv_usec; 570 + u32 rem; 578 571 579 - value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec); 580 - tv_usec /= NSEC_PER_USEC; 581 - value->tv_usec = tv_usec; 572 + value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, 573 + NSEC_PER_SEC, &rem); 574 + value->tv_usec = rem / NSEC_PER_USEC; 582 575 } 583 576 EXPORT_SYMBOL(jiffies_to_timeval); 584 577
+2 -4
kernel/time/ntp.c
··· 234 234 */ 235 235 int do_adjtimex(struct timex *txc) 236 236 { 237 - long mtemp, save_adjust, rem; 237 + long mtemp, save_adjust; 238 238 s64 freq_adj; 239 239 int result; 240 240 ··· 345 345 freq_adj += time_freq; 346 346 freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); 347 347 time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); 348 - time_offset = div_long_long_rem_signed(time_offset, 349 - NTP_INTERVAL_FREQ, 350 - &rem); 348 + time_offset = div_s64(time_offset, NTP_INTERVAL_FREQ); 351 349 time_offset <<= SHIFT_UPDATE; 352 350 } /* STA_PLL */ 353 351 } /* txc->modes & ADJ_OFFSET */
+4 -5
mm/slub.c
··· 22 22 #include <linux/debugobjects.h> 23 23 #include <linux/kallsyms.h> 24 24 #include <linux/memory.h> 25 + #include <linux/math64.h> 25 26 26 27 /* 27 28 * Lock order: ··· 3622 3621 len += sprintf(buf + len, "<not-available>"); 3623 3622 3624 3623 if (l->sum_time != l->min_time) { 3625 - unsigned long remainder; 3626 - 3627 3624 len += sprintf(buf + len, " age=%ld/%ld/%ld", 3628 - l->min_time, 3629 - div_long_long_rem(l->sum_time, l->count, &remainder), 3630 - l->max_time); 3625 + l->min_time, 3626 + (long)div_u64(l->sum_time, l->count), 3627 + l->max_time); 3631 3628 } else 3632 3629 len += sprintf(buf + len, " age=%ld", 3633 3630 l->min_time);