Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
gcc-4.6: kernel/*: Fix unused but set warnings
mutex: Fix annotations to include it in kernel-locking docbook
pid: make setpgid() system call use RCU read-side critical section
MAINTAINERS: Add RCU's public git tree

+30 -29
+6
Documentation/DocBook/kernel-locking.tmpl
··· 1961 </sect1> 1962 </chapter> 1963 1964 <chapter id="references"> 1965 <title>Further reading</title> 1966
··· 1961 </sect1> 1962 </chapter> 1963 1964 + <chapter id="apiref"> 1965 + <title>Mutex API reference</title> 1966 + !Iinclude/linux/mutex.h 1967 + !Ekernel/mutex.c 1968 + </chapter> 1969 + 1970 <chapter id="references"> 1971 <title>Further reading</title> 1972
+2 -1
Documentation/mutex-design.txt
··· 9 mutex semantics are sufficient for your code, then there are a couple 10 of advantages of mutexes: 11 12 - - 'struct mutex' is smaller on most architectures: .e.g on x86, 13 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. 14 A smaller structure size means less RAM footprint, and better 15 CPU-cache utilization. ··· 136 void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 137 int mutex_lock_interruptible_nested(struct mutex *lock, 138 unsigned int subclass);
··· 9 mutex semantics are sufficient for your code, then there are a couple 10 of advantages of mutexes: 11 12 + - 'struct mutex' is smaller on most architectures: E.g. on x86, 13 'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes. 14 A smaller structure size means less RAM footprint, and better 15 CPU-cache utilization. ··· 136 void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 137 int mutex_lock_interruptible_nested(struct mutex *lock, 138 unsigned int subclass); 139 + int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+2
MAINTAINERS
··· 4810 M: Josh Triplett <josh@freedesktop.org> 4811 M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4812 S: Supported 4813 F: Documentation/RCU/torture.txt 4814 F: kernel/rcutorture.c 4815 ··· 4835 M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4836 W: http://www.rdrop.com/users/paulmck/rclock/ 4837 S: Supported 4838 F: Documentation/RCU/ 4839 F: include/linux/rcu* 4840 F: include/linux/srcu*
··· 4810 M: Josh Triplett <josh@freedesktop.org> 4811 M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4812 S: Supported 4813 + T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git 4814 F: Documentation/RCU/torture.txt 4815 F: kernel/rcutorture.c 4816 ··· 4834 M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> 4835 W: http://www.rdrop.com/users/paulmck/rclock/ 4836 S: Supported 4837 + T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git 4838 F: Documentation/RCU/ 4839 F: include/linux/rcu* 4840 F: include/linux/srcu*
+8
include/linux/mutex.h
··· 78 # include <linux/mutex-debug.h> 79 #else 80 # define __DEBUG_MUTEX_INITIALIZER(lockname) 81 # define mutex_init(mutex) \ 82 do { \ 83 static struct lock_class_key __key; \
··· 78 # include <linux/mutex-debug.h> 79 #else 80 # define __DEBUG_MUTEX_INITIALIZER(lockname) 81 + /** 82 + * mutex_init - initialize the mutex 83 + * @mutex: the mutex to be initialized 84 + * 85 + * Initialize the mutex to unlocked state. 86 + * 87 + * It is not allowed to initialize an already locked mutex. 88 + */ 89 # define mutex_init(mutex) \ 90 do { \ 91 static struct lock_class_key __key; \
-2
kernel/debug/kdb/kdb_bp.c
··· 274 int i, bpno; 275 kdb_bp_t *bp, *bp_check; 276 int diag; 277 - int free; 278 char *symname = NULL; 279 long offset = 0ul; 280 int nextarg; ··· 304 /* 305 * Find an empty bp structure to allocate 306 */ 307 - free = KDB_MAXBPT; 308 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { 309 if (bp->bp_free) 310 break;
··· 274 int i, bpno; 275 kdb_bp_t *bp, *bp_check; 276 int diag; 277 char *symname = NULL; 278 long offset = 0ul; 279 int nextarg; ··· 305 /* 306 * Find an empty bp structure to allocate 307 */ 308 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { 309 if (bp->bp_free) 310 break;
+1 -2
kernel/hrtimer.c
··· 1091 */ 1092 ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 1093 { 1094 - struct hrtimer_clock_base *base; 1095 unsigned long flags; 1096 ktime_t rem; 1097 1098 - base = lock_hrtimer_base(timer, &flags); 1099 rem = hrtimer_expires_remaining(timer); 1100 unlock_hrtimer_base(timer, &flags); 1101
··· 1091 */ 1092 ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 1093 { 1094 unsigned long flags; 1095 ktime_t rem; 1096 1097 + lock_hrtimer_base(timer, &flags); 1098 rem = hrtimer_expires_remaining(timer); 1099 unlock_hrtimer_base(timer, &flags); 1100
+7 -16
kernel/mutex.c
··· 36 # include <asm/mutex.h> 37 #endif 38 39 - /*** 40 - * mutex_init - initialize the mutex 41 - * @lock: the mutex to be initialized 42 - * @key: the lock_class_key for the class; used by mutex lock debugging 43 - * 44 - * Initialize the mutex to unlocked state. 45 - * 46 - * It is not allowed to initialize an already locked mutex. 47 - */ 48 void 49 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 50 { ··· 59 static __used noinline void __sched 60 __mutex_lock_slowpath(atomic_t *lock_count); 61 62 - /*** 63 * mutex_lock - acquire the mutex 64 * @lock: the mutex to be acquired 65 * ··· 96 97 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 98 99 - /*** 100 * mutex_unlock - release the mutex 101 * @lock: the mutex to be released 102 * ··· 355 static noinline int __sched 356 __mutex_lock_interruptible_slowpath(atomic_t *lock_count); 357 358 - /*** 359 - * mutex_lock_interruptible - acquire the mutex, interruptable 360 * @lock: the mutex to be acquired 361 * 362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has ··· 447 return prev == 1; 448 } 449 450 - /*** 451 - * mutex_trylock - try acquire the mutex, without waiting 452 * @lock: the mutex to be acquired 453 * 454 * Try to acquire the mutex atomically. Returns 1 if the mutex 455 * has been acquired successfully, and 0 on contention. 456 * 457 * NOTE: this function follows the spin_trylock() convention, so 458 - * it is negated to the down_trylock() return values! Be careful 459 * about this when converting semaphore users to mutexes. 460 * 461 * This function must not be used in interrupt context. The
··· 36 # include <asm/mutex.h> 37 #endif 38 39 void 40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 41 { ··· 68 static __used noinline void __sched 69 __mutex_lock_slowpath(atomic_t *lock_count); 70 71 + /** 72 * mutex_lock - acquire the mutex 73 * @lock: the mutex to be acquired 74 * ··· 105 106 static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 107 108 + /** 109 * mutex_unlock - release the mutex 110 * @lock: the mutex to be released 111 * ··· 364 static noinline int __sched 365 __mutex_lock_interruptible_slowpath(atomic_t *lock_count); 366 367 + /** 368 + * mutex_lock_interruptible - acquire the mutex, interruptible 369 * @lock: the mutex to be acquired 370 * 371 * Lock the mutex like mutex_lock(), and return 0 if the mutex has ··· 456 return prev == 1; 457 } 458 459 + /** 460 + * mutex_trylock - try to acquire the mutex, without waiting 461 * @lock: the mutex to be acquired 462 * 463 * Try to acquire the mutex atomically. Returns 1 if the mutex 464 * has been acquired successfully, and 0 on contention. 465 * 466 * NOTE: this function follows the spin_trylock() convention, so 467 + * it is negated from the down_trylock() return values! Be careful 468 * about this when converting semaphore users to mutexes. 469 * 470 * This function must not be used in interrupt context. The
+1 -2
kernel/sched_fair.c
··· 1313 find_idlest_group(struct sched_domain *sd, struct task_struct *p, 1314 int this_cpu, int load_idx) 1315 { 1316 - struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; 1317 unsigned long min_load = ULONG_MAX, this_load = 0; 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2; 1319 ··· 1348 1349 if (local_group) { 1350 this_load = avg_load; 1351 - this = group; 1352 } else if (avg_load < min_load) { 1353 min_load = avg_load; 1354 idlest = group;
··· 1313 find_idlest_group(struct sched_domain *sd, struct task_struct *p, 1314 int this_cpu, int load_idx) 1315 { 1316 + struct sched_group *idlest = NULL, *group = sd->groups; 1317 unsigned long min_load = ULONG_MAX, this_load = 0; 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2; 1319 ··· 1348 1349 if (local_group) { 1350 this_load = avg_load; 1351 } else if (avg_load < min_load) { 1352 min_load = avg_load; 1353 idlest = group;
+2
kernel/sys.c
··· 931 pgid = pid; 932 if (pgid < 0) 933 return -EINVAL; 934 935 /* From this point forward we keep holding onto the tasklist lock 936 * so that our parent does not change from under us. -DaveM ··· 985 out: 986 /* All paths lead to here, thus we are safe. -DaveM */ 987 write_unlock_irq(&tasklist_lock); 988 return err; 989 } 990
··· 931 pgid = pid; 932 if (pgid < 0) 933 return -EINVAL; 934 + rcu_read_lock(); 935 936 /* From this point forward we keep holding onto the tasklist lock 937 * so that our parent does not change from under us. -DaveM ··· 984 out: 985 /* All paths lead to here, thus we are safe. -DaveM */ 986 write_unlock_irq(&tasklist_lock); 987 + rcu_read_unlock(); 988 return err; 989 } 990
+1 -4
kernel/sysctl.c
··· 1713 { 1714 sysctl_set_parent(NULL, root_table); 1715 #ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1716 - { 1717 - int err; 1718 - err = sysctl_check_table(current->nsproxy, root_table); 1719 - } 1720 #endif 1721 return 0; 1722 }
··· 1713 { 1714 sysctl_set_parent(NULL, root_table); 1715 #ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1716 + sysctl_check_table(current->nsproxy, root_table); 1717 #endif 1718 return 0; 1719 }
-2
kernel/trace/ring_buffer.c
··· 2985 2986 static void rb_advance_iter(struct ring_buffer_iter *iter) 2987 { 2988 - struct ring_buffer *buffer; 2989 struct ring_buffer_per_cpu *cpu_buffer; 2990 struct ring_buffer_event *event; 2991 unsigned length; 2992 2993 cpu_buffer = iter->cpu_buffer; 2994 - buffer = cpu_buffer->buffer; 2995 2996 /* 2997 * Check if we are at the end of the buffer.
··· 2985 2986 static void rb_advance_iter(struct ring_buffer_iter *iter) 2987 { 2988 struct ring_buffer_per_cpu *cpu_buffer; 2989 struct ring_buffer_event *event; 2990 unsigned length; 2991 2992 cpu_buffer = iter->cpu_buffer; 2993 2994 /* 2995 * Check if we are at the end of the buffer.