Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

+42 -396
-5
arch/i386/Kconfig
··· 908 The default yes will allow the kernel to do irq load balancing. 909 Saying no will keep the kernel from doing irq load balancing. 910 911 - config HAVE_DEC_LOCK 912 - bool 913 - depends on (SMP || PREEMPT) && X86_CMPXCHG 914 - default y 915 - 916 # turning this on wastes a bunch of space. 917 # Summit needs it only when NUMA is on 918 config BOOT_IOREMAP
··· 908 The default yes will allow the kernel to do irq load balancing. 909 Saying no will keep the kernel from doing irq load balancing. 910 911 # turning this on wastes a bunch of space. 912 # Summit needs it only when NUMA is on 913 config BOOT_IOREMAP
-1
arch/i386/lib/Makefile
··· 7 bitops.o 8 9 lib-$(CONFIG_X86_USE_3DNOW) += mmx.o 10 - lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
··· 7 bitops.o 8 9 lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
-42
arch/i386/lib/dec_and_lock.c
··· 1 - /* 2 - * x86 version of "atomic_dec_and_lock()" using 3 - * the atomic "cmpxchg" instruction. 4 - * 5 - * (For CPU's lacking cmpxchg, we use the slow 6 - * generic version, and this one never even gets 7 - * compiled). 8 - */ 9 - 10 - #include <linux/spinlock.h> 11 - #include <linux/module.h> 12 - #include <asm/atomic.h> 13 - 14 - int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 15 - { 16 - int counter; 17 - int newcount; 18 - 19 - repeat: 20 - counter = atomic_read(atomic); 21 - newcount = counter-1; 22 - 23 - if (!newcount) 24 - goto slow_path; 25 - 26 - asm volatile("lock; cmpxchgl %1,%2" 27 - :"=a" (newcount) 28 - :"r" (newcount), "m" (atomic->counter), "0" (counter)); 29 - 30 - /* If the above failed, "eax" will have changed */ 31 - if (newcount != counter) 32 - goto repeat; 33 - return 0; 34 - 35 - slow_path: 36 - spin_lock(lock); 37 - if (atomic_dec_and_test(atomic)) 38 - return 1; 39 - spin_unlock(lock); 40 - return 0; 41 - } 42 - EXPORT_SYMBOL(_atomic_dec_and_lock);
···
-5
arch/ia64/Kconfig
··· 298 299 source "mm/Kconfig" 300 301 - config HAVE_DEC_LOCK 302 - bool 303 - depends on (SMP || PREEMPT) 304 - default y 305 - 306 config IA32_SUPPORT 307 bool "Support for Linux/x86 binaries" 308 help
··· 298 299 source "mm/Kconfig" 300 301 config IA32_SUPPORT 302 bool "Support for Linux/x86 binaries" 303 help
-1
arch/ia64/lib/Makefile
··· 15 lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 16 lib-$(CONFIG_PERFMON) += carta_random.o 17 lib-$(CONFIG_MD_RAID5) += xor.o 18 - lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o 19 20 AFLAGS___divdi3.o = 21 AFLAGS___udivdi3.o = -DUNSIGNED
··· 15 lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o 16 lib-$(CONFIG_PERFMON) += carta_random.o 17 lib-$(CONFIG_MD_RAID5) += xor.o 18 19 AFLAGS___divdi3.o = 20 AFLAGS___udivdi3.o = -DUNSIGNED
-42
arch/ia64/lib/dec_and_lock.c
··· 1 - /* 2 - * Copyright (C) 2003 Jerome Marchand, Bull S.A. 3 - * Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com> 4 - * 5 - * This file is released under the GPLv2, or at your option any later version. 6 - * 7 - * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This 8 - * code is an adaptation of the x86 version of "atomic_dec_and_lock()". 9 - */ 10 - 11 - #include <linux/compiler.h> 12 - #include <linux/module.h> 13 - #include <linux/spinlock.h> 14 - #include <asm/atomic.h> 15 - 16 - /* 17 - * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these 18 - * operations have to be done atomically, so that the count doesn't drop to zero without 19 - * acquiring the spinlock first. 20 - */ 21 - int 22 - _atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock) 23 - { 24 - int old, new; 25 - 26 - do { 27 - old = atomic_read(refcount); 28 - new = old - 1; 29 - 30 - if (unlikely (old == 1)) { 31 - /* oops, we may be decrementing to zero, do it the slow way... */ 32 - spin_lock(lock); 33 - if (atomic_dec_and_test(refcount)) 34 - return 1; 35 - spin_unlock(lock); 36 - return 0; 37 - } 38 - } while (cmpxchg(&refcount->counter, old, new) != old); 39 - return 0; 40 - } 41 - 42 - EXPORT_SYMBOL(_atomic_dec_and_lock);
···
-5
arch/m32r/Kconfig
··· 220 Say Y here if you are building a kernel for a desktop, embedded 221 or real-time system. Say N if you are unsure. 222 223 - config HAVE_DEC_LOCK 224 - bool 225 - depends on (SMP || PREEMPT) 226 - default n 227 - 228 config SMP 229 bool "Symmetric multi-processing support" 230 ---help---
··· 220 Say Y here if you are building a kernel for a desktop, embedded 221 or real-time system. Say N if you are unsure. 222 223 config SMP 224 bool "Symmetric multi-processing support" 225 ---help---
-4
arch/mips/Kconfig
··· 1009 bool 1010 default y 1011 1012 - config HAVE_DEC_LOCK 1013 - bool 1014 - default y 1015 - 1016 # 1017 # Select some configuration options automatically based on user selections. 1018 #
··· 1009 bool 1010 default y 1011 1012 # 1013 # Select some configuration options automatically based on user selections. 1014 #
+1 -1
arch/mips/lib/Makefile
··· 2 # Makefile for MIPS-specific library files.. 3 # 4 5 - lib-y += csum_partial_copy.o dec_and_lock.o memcpy.o promlib.o \ 6 strlen_user.o strncpy_user.o strnlen_user.o 7 8 obj-y += iomap.o
··· 2 # Makefile for MIPS-specific library files.. 3 # 4 5 + lib-y += csum_partial_copy.o memcpy.o promlib.o \ 6 strlen_user.o strncpy_user.o strnlen_user.o 7 8 obj-y += iomap.o
-47
arch/mips/lib/dec_and_lock.c
··· 1 - /* 2 - * MIPS version of atomic_dec_and_lock() using cmpxchg 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation; either version 7 - * 2 of the License, or (at your option) any later version. 8 - */ 9 - 10 - #include <linux/module.h> 11 - #include <linux/spinlock.h> 12 - #include <asm/atomic.h> 13 - #include <asm/system.h> 14 - 15 - /* 16 - * This is an implementation of the notion of "decrement a 17 - * reference count, and return locked if it decremented to zero". 18 - * 19 - * This implementation can be used on any architecture that 20 - * has a cmpxchg, and where atomic->value is an int holding 21 - * the value of the atomic (i.e. the high bits aren't used 22 - * for a lock or anything like that). 23 - */ 24 - int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 25 - { 26 - int counter; 27 - int newcount; 28 - 29 - for (;;) { 30 - counter = atomic_read(atomic); 31 - newcount = counter - 1; 32 - if (!newcount) 33 - break; /* do it the slow way */ 34 - 35 - newcount = cmpxchg(&atomic->counter, counter, newcount); 36 - if (newcount == counter) 37 - return 0; 38 - } 39 - 40 - spin_lock(lock); 41 - if (atomic_dec_and_test(atomic)) 42 - return 1; 43 - spin_unlock(lock); 44 - return 0; 45 - } 46 - 47 - EXPORT_SYMBOL(_atomic_dec_and_lock);
···
-4
arch/ppc/Kconfig
··· 26 bool 27 default y 28 29 - config HAVE_DEC_LOCK 30 - bool 31 - default y 32 - 33 config PPC 34 bool 35 default y
··· 26 bool 27 default y 28 29 config PPC 30 bool 31 default y
+1 -1
arch/ppc/lib/Makefile
··· 2 # Makefile for ppc-specific library files.. 3 # 4 5 - obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o 6 7 obj-$(CONFIG_8xx) += rheap.o 8 obj-$(CONFIG_CPM2) += rheap.o
··· 2 # Makefile for ppc-specific library files.. 3 # 4 5 + obj-y := checksum.o string.o strcase.o div64.o 6 7 obj-$(CONFIG_8xx) += rheap.o 8 obj-$(CONFIG_CPM2) += rheap.o
-38
arch/ppc/lib/dec_and_lock.c
··· 1 - #include <linux/module.h> 2 - #include <linux/spinlock.h> 3 - #include <asm/atomic.h> 4 - #include <asm/system.h> 5 - 6 - /* 7 - * This is an implementation of the notion of "decrement a 8 - * reference count, and return locked if it decremented to zero". 9 - * 10 - * This implementation can be used on any architecture that 11 - * has a cmpxchg, and where atomic->value is an int holding 12 - * the value of the atomic (i.e. the high bits aren't used 13 - * for a lock or anything like that). 14 - */ 15 - int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 16 - { 17 - int counter; 18 - int newcount; 19 - 20 - for (;;) { 21 - counter = atomic_read(atomic); 22 - newcount = counter - 1; 23 - if (!newcount) 24 - break; /* do it the slow way */ 25 - 26 - newcount = cmpxchg(&atomic->counter, counter, newcount); 27 - if (newcount == counter) 28 - return 0; 29 - } 30 - 31 - spin_lock(lock); 32 - if (atomic_dec_and_test(atomic)) 33 - return 1; 34 - spin_unlock(lock); 35 - return 0; 36 - } 37 - 38 - EXPORT_SYMBOL(_atomic_dec_and_lock);
···
-4
arch/ppc64/Kconfig
··· 28 bool 29 default y 30 31 - config HAVE_DEC_LOCK 32 - bool 33 - default y 34 - 35 config EARLY_PRINTK 36 bool 37 default y
··· 28 bool 29 default y 30 31 config EARLY_PRINTK 32 bool 33 default y
+1 -1
arch/ppc64/lib/Makefile
··· 2 # Makefile for ppc64-specific library files.. 3 # 4 5 - lib-y := checksum.o dec_and_lock.o string.o strcase.o 6 lib-y += copypage.o memcpy.o copyuser.o usercopy.o 7 8 # Lock primitives are defined as no-ops in include/linux/spinlock.h
··· 2 # Makefile for ppc64-specific library files.. 3 # 4 5 + lib-y := checksum.o string.o strcase.o 6 lib-y += copypage.o memcpy.o copyuser.o usercopy.o 7 8 # Lock primitives are defined as no-ops in include/linux/spinlock.h
-47
arch/ppc64/lib/dec_and_lock.c
··· 1 - /* 2 - * ppc64 version of atomic_dec_and_lock() using cmpxchg 3 - * 4 - * This program is free software; you can redistribute it and/or 5 - * modify it under the terms of the GNU General Public License 6 - * as published by the Free Software Foundation; either version 7 - * 2 of the License, or (at your option) any later version. 8 - */ 9 - 10 - #include <linux/module.h> 11 - #include <linux/spinlock.h> 12 - #include <asm/atomic.h> 13 - #include <asm/system.h> 14 - 15 - /* 16 - * This is an implementation of the notion of "decrement a 17 - * reference count, and return locked if it decremented to zero". 18 - * 19 - * This implementation can be used on any architecture that 20 - * has a cmpxchg, and where atomic->value is an int holding 21 - * the value of the atomic (i.e. the high bits aren't used 22 - * for a lock or anything like that). 23 - */ 24 - int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 25 - { 26 - int counter; 27 - int newcount; 28 - 29 - for (;;) { 30 - counter = atomic_read(atomic); 31 - newcount = counter - 1; 32 - if (!newcount) 33 - break; /* do it the slow way */ 34 - 35 - newcount = cmpxchg(&atomic->counter, counter, newcount); 36 - if (newcount == counter) 37 - return 0; 38 - } 39 - 40 - spin_lock(lock); 41 - if (atomic_dec_and_test(atomic)) 42 - return 1; 43 - spin_unlock(lock); 44 - return 0; 45 - } 46 - 47 - EXPORT_SYMBOL(_atomic_dec_and_lock);
···
-8
arch/sparc64/Kconfig.debug
··· 33 depends on DEBUG_KERNEL 34 bool "Debug BOOTMEM initialization" 35 36 - # We have a custom atomic_dec_and_lock() implementation but it's not 37 - # compatible with spinlock debugging so we need to fall back on 38 - # the generic version in that case. 39 - config HAVE_DEC_LOCK 40 - bool 41 - depends on SMP && !DEBUG_SPINLOCK 42 - default y 43 - 44 config MCOUNT 45 bool 46 depends on STACK_DEBUG
··· 33 depends on DEBUG_KERNEL 34 bool "Debug BOOTMEM initialization" 35 36 config MCOUNT 37 bool 38 depends on STACK_DEBUG
-3
arch/sparc64/kernel/sparc64_ksyms.c
··· 163 EXPORT_SYMBOL(atomic64_add_ret); 164 EXPORT_SYMBOL(atomic64_sub); 165 EXPORT_SYMBOL(atomic64_sub_ret); 166 - #ifdef CONFIG_SMP 167 - EXPORT_SYMBOL(_atomic_dec_and_lock); 168 - #endif 169 170 /* Atomic bit operations. */ 171 EXPORT_SYMBOL(test_and_set_bit);
··· 163 EXPORT_SYMBOL(atomic64_add_ret); 164 EXPORT_SYMBOL(atomic64_sub); 165 EXPORT_SYMBOL(atomic64_sub_ret); 166 167 /* Atomic bit operations. */ 168 EXPORT_SYMBOL(test_and_set_bit);
-2
arch/sparc64/lib/Makefile
··· 14 copy_in_user.o user_fixup.o memmove.o \ 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 16 17 - lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o 18 - 19 obj-y += iomap.o
··· 14 copy_in_user.o user_fixup.o memmove.o \ 15 mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o 16 17 obj-y += iomap.o
-80
arch/sparc64/lib/dec_and_lock.S
··· 1 - /* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $ 2 - * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()" 3 - * using cas and ldstub instructions. 4 - * 5 - * Copyright (C) 2000 David S. Miller (davem@redhat.com) 6 - */ 7 - #include <linux/config.h> 8 - #include <asm/thread_info.h> 9 - 10 - .text 11 - .align 64 12 - 13 - /* CAS basically works like this: 14 - * 15 - * void CAS(MEM, REG1, REG2) 16 - * { 17 - * START_ATOMIC(); 18 - * if (*(MEM) == REG1) { 19 - * TMP = *(MEM); 20 - * *(MEM) = REG2; 21 - * REG2 = TMP; 22 - * } else 23 - * REG2 = *(MEM); 24 - * END_ATOMIC(); 25 - * } 26 - */ 27 - 28 - .globl _atomic_dec_and_lock 29 - _atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */ 30 - loop1: lduw [%o0], %g2 31 - subcc %g2, 1, %g7 32 - be,pn %icc, start_to_zero 33 - nop 34 - nzero: cas [%o0], %g2, %g7 35 - cmp %g2, %g7 36 - bne,pn %icc, loop1 37 - mov 0, %g1 38 - 39 - out: 40 - membar #StoreLoad | #StoreStore 41 - retl 42 - mov %g1, %o0 43 - start_to_zero: 44 - #ifdef CONFIG_PREEMPT 45 - ldsw [%g6 + TI_PRE_COUNT], %g3 46 - add %g3, 1, %g3 47 - stw %g3, [%g6 + TI_PRE_COUNT] 48 - #endif 49 - to_zero: 50 - ldstub [%o1], %g3 51 - membar #StoreLoad | #StoreStore 52 - brnz,pn %g3, spin_on_lock 53 - nop 54 - loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */ 55 - cmp %g2, %g7 56 - 57 - be,pt %icc, out 58 - mov 1, %g1 59 - lduw [%o0], %g2 60 - subcc %g2, 1, %g7 61 - be,pn %icc, loop2 62 - nop 63 - membar #StoreStore | #LoadStore 64 - stb %g0, [%o1] 65 - #ifdef CONFIG_PREEMPT 66 - ldsw [%g6 + TI_PRE_COUNT], %g3 67 - sub %g3, 1, %g3 68 - stw %g3, [%g6 + TI_PRE_COUNT] 69 - #endif 70 - 71 - b,pt %xcc, nzero 72 - nop 73 - spin_on_lock: 74 - ldub [%o1], %g3 75 - membar #LoadLoad 76 - brnz,pt %g3, spin_on_lock 77 - nop 78 - ba,pt %xcc, to_zero 79 - nop 80 - nop
···
-5
arch/x86_64/Kconfig
··· 277 config HAVE_ARCH_EARLY_PFN_TO_NID 278 def_bool y 279 280 - config HAVE_DEC_LOCK 281 - bool 282 - depends on SMP 283 - default y 284 - 285 config NR_CPUS 286 int "Maximum number of CPUs (2-256)" 287 range 2 256
··· 277 config HAVE_ARCH_EARLY_PFN_TO_NID 278 def_bool y 279 280 config NR_CPUS 281 int "Maximum number of CPUs (2-256)" 282 range 2 256
-4
arch/x86_64/kernel/x8664_ksyms.c
··· 178 179 EXPORT_SYMBOL(empty_zero_page); 180 181 - #ifdef CONFIG_HAVE_DEC_LOCK 182 - EXPORT_SYMBOL(_atomic_dec_and_lock); 183 - #endif 184 - 185 EXPORT_SYMBOL(die_chain); 186 EXPORT_SYMBOL(register_die_notifier); 187
··· 178 179 EXPORT_SYMBOL(empty_zero_page); 180 181 EXPORT_SYMBOL(die_chain); 182 EXPORT_SYMBOL(register_die_notifier); 183
-2
arch/x86_64/lib/Makefile
··· 10 usercopy.o getuser.o putuser.o \ 11 thunk.o clear_page.o copy_page.o bitstr.o bitops.o 12 lib-y += memcpy.o memmove.o memset.o copy_user.o 13 - 14 - lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
··· 10 usercopy.o getuser.o putuser.o \ 11 thunk.o clear_page.o copy_page.o bitstr.o bitops.o 12 lib-y += memcpy.o memmove.o memset.o copy_user.o
-40
arch/x86_64/lib/dec_and_lock.c
··· 1 - /* 2 - * x86 version of "atomic_dec_and_lock()" using 3 - * the atomic "cmpxchg" instruction. 4 - * 5 - * (For CPU's lacking cmpxchg, we use the slow 6 - * generic version, and this one never even gets 7 - * compiled). 8 - */ 9 - 10 - #include <linux/spinlock.h> 11 - #include <asm/atomic.h> 12 - 13 - int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 14 - { 15 - int counter; 16 - int newcount; 17 - 18 - repeat: 19 - counter = atomic_read(atomic); 20 - newcount = counter-1; 21 - 22 - if (!newcount) 23 - goto slow_path; 24 - 25 - asm volatile("lock; cmpxchgl %1,%2" 26 - :"=a" (newcount) 27 - :"r" (newcount), "m" (atomic->counter), "0" (counter)); 28 - 29 - /* If the above failed, "eax" will have changed */ 30 - if (newcount != counter) 31 - goto repeat; 32 - return 0; 33 - 34 - slow_path: 35 - spin_lock(lock); 36 - if (atomic_dec_and_test(atomic)) 37 - return 1; 38 - spin_unlock(lock); 39 - return 0; 40 - }
···
-4
arch/xtensa/Kconfig
··· 26 bool 27 default y 28 29 - config HAVE_DEC_LOCK 30 - bool 31 - default y 32 - 33 config GENERIC_HARDIRQS 34 bool 35 default y
··· 26 bool 27 default y 28 29 config GENERIC_HARDIRQS 30 bool 31 default y
+4
fs/compat.c
··· 44 #include <linux/nfsd/syscall.h> 45 #include <linux/personality.h> 46 #include <linux/rwsem.h> 47 48 #include <net/sock.h> /* siocdevprivate_ioctl */ 49 ··· 1489 1490 /* execve success */ 1491 security_bprm_free(bprm); 1492 kfree(bprm); 1493 return retval; 1494 }
··· 44 #include <linux/nfsd/syscall.h> 45 #include <linux/personality.h> 46 #include <linux/rwsem.h> 47 + #include <linux/acct.h> 48 + #include <linux/mm.h> 49 50 #include <net/sock.h> /* siocdevprivate_ioctl */ 51 ··· 1487 1488 /* execve success */ 1489 security_bprm_free(bprm); 1490 + acct_update_integrals(current); 1491 + update_mem_hiwater(current); 1492 kfree(bprm); 1493 return retval; 1494 }
+35
lib/dec_and_lock.c
··· 1 #include <linux/module.h> 2 #include <linux/spinlock.h> 3 #include <asm/atomic.h> 4 5 /* 6 * This is an architecture-neutral, but slow, 7 * implementation of the notion of "decrement ··· 67 spin_unlock(lock); 68 return 0; 69 } 70 71 EXPORT_SYMBOL(_atomic_dec_and_lock);
··· 1 #include <linux/module.h> 2 #include <linux/spinlock.h> 3 #include <asm/atomic.h> 4 + #include <asm/system.h> 5 6 + #ifdef __HAVE_ARCH_CMPXCHG 7 + /* 8 + * This is an implementation of the notion of "decrement a 9 + * reference count, and return locked if it decremented to zero". 10 + * 11 + * This implementation can be used on any architecture that 12 + * has a cmpxchg, and where atomic->value is an int holding 13 + * the value of the atomic (i.e. the high bits aren't used 14 + * for a lock or anything like that). 15 + */ 16 + int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 17 + { 18 + int counter; 19 + int newcount; 20 + 21 + for (;;) { 22 + counter = atomic_read(atomic); 23 + newcount = counter - 1; 24 + if (!newcount) 25 + break; /* do it the slow way */ 26 + 27 + newcount = cmpxchg(&atomic->counter, counter, newcount); 28 + if (newcount == counter) 29 + return 0; 30 + } 31 + 32 + spin_lock(lock); 33 + if (atomic_dec_and_test(atomic)) 34 + return 1; 35 + spin_unlock(lock); 36 + return 0; 37 + } 38 + #else 39 /* 40 * This is an architecture-neutral, but slow, 41 * implementation of the notion of "decrement ··· 33 spin_unlock(lock); 34 return 0; 35 } 36 + #endif 37 38 EXPORT_SYMBOL(_atomic_dec_and_lock);