Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
"Two futex fixes: a input parameters robustness fix, and futex race
fixes"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
futex: Prevent overflow by strengthen input validation
futex: Avoid violating the 10th rule of futex

+90 -23
+70 -16
kernel/futex.c
··· 1878 1878 struct futex_q *this, *next; 1879 1879 DEFINE_WAKE_Q(wake_q); 1880 1880 1881 + if (nr_wake < 0 || nr_requeue < 0) 1882 + return -EINVAL; 1883 + 1881 1884 /* 1882 1885 * When PI not supported: return -ENOSYS if requeue_pi is true, 1883 1886 * consequently the compiler knows requeue_pi is always false past ··· 2297 2294 spin_unlock(q->lock_ptr); 2298 2295 } 2299 2296 2300 - /* 2301 - * Fixup the pi_state owner with the new owner. 2302 - * 2303 - * Must be called with hash bucket lock held and mm->sem held for non 2304 - * private futexes. 2305 - */ 2306 2297 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 2307 - struct task_struct *newowner) 2298 + struct task_struct *argowner) 2308 2299 { 2309 - u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; 2310 2300 struct futex_pi_state *pi_state = q->pi_state; 2311 2301 u32 uval, uninitialized_var(curval), newval; 2312 - struct task_struct *oldowner; 2302 + struct task_struct *oldowner, *newowner; 2303 + u32 newtid; 2313 2304 int ret; 2305 + 2306 + lockdep_assert_held(q->lock_ptr); 2314 2307 2315 2308 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 2316 2309 ··· 2316 2317 newtid |= FUTEX_OWNER_DIED; 2317 2318 2318 2319 /* 2319 - * We are here either because we stole the rtmutex from the 2320 - * previous highest priority waiter or we are the highest priority 2321 - * waiter but have failed to get the rtmutex the first time. 2320 + * We are here because either: 2322 2321 * 2323 - * We have to replace the newowner TID in the user space variable. 2322 + * - we stole the lock and pi_state->owner needs updating to reflect 2323 + * that (@argowner == current), 2324 + * 2325 + * or: 2326 + * 2327 + * - someone stole our lock and we need to fix things to point to the 2328 + * new owner (@argowner == NULL). 2329 + * 2330 + * Either way, we have to replace the TID in the user space variable. 2324 2331 * This must be atomic as we have to preserve the owner died bit here. 2325 2332 * 2326 2333 * Note: We write the user space value _before_ changing the pi_state ··· 2339 2334 * in the PID check in lookup_pi_state. 2340 2335 */ 2341 2336 retry: 2337 + if (!argowner) { 2338 + if (oldowner != current) { 2339 + /* 2340 + * We raced against a concurrent self; things are 2341 + * already fixed up. Nothing to do. 2342 + */ 2343 + ret = 0; 2344 + goto out_unlock; 2345 + } 2346 + 2347 + if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { 2348 + /* We got the lock after all, nothing to fix. */ 2349 + ret = 0; 2350 + goto out_unlock; 2351 + } 2352 + 2353 + /* 2354 + * Since we just failed the trylock; there must be an owner. 2355 + */ 2356 + newowner = rt_mutex_owner(&pi_state->pi_mutex); 2357 + BUG_ON(!newowner); 2358 + } else { 2359 + WARN_ON_ONCE(argowner != current); 2360 + if (oldowner == current) { 2361 + /* 2362 + * We raced against a concurrent self; things are 2363 + * already fixed up. Nothing to do. 2364 + */ 2365 + ret = 0; 2366 + goto out_unlock; 2367 + } 2368 + newowner = argowner; 2369 + } 2370 + 2371 + newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; 2372 + 2342 2373 if (get_futex_value_locked(&uval, uaddr)) 2343 2374 goto handle_fault; 2344 2375 ··· 2475 2434 * Got the lock. We might not be the anticipated owner if we 2476 2435 * did a lock-steal - fix up the PI-state in that case: 2477 2436 * 2478 - * We can safely read pi_state->owner without holding wait_lock 2479 - * because we now own the rt_mutex, only the owner will attempt 2480 - * to change it. 2437 + * Speculative pi_state->owner read (we don't hold wait_lock); 2438 + * since we own the lock pi_state->owner == current is the 2439 + * stable state, anything else needs more attention. 2481 2440 */ 2482 2441 if (q->pi_state->owner != current) 2483 2442 ret = fixup_pi_state_owner(uaddr, q, current); 2443 + goto out; 2444 + } 2445 + 2446 + /* 2447 + * If we didn't get the lock; check if anybody stole it from us. In 2448 + * that case, we need to fix up the uval to point to them instead of 2449 + * us, otherwise bad things happen. [10] 2450 + * 2451 + * Another speculative read; pi_state->owner == current is unstable 2452 + * but needs our attention. 2453 + */ 2454 + if (q->pi_state->owner == current) { 2455 + ret = fixup_pi_state_owner(uaddr, q, NULL); 2484 2456 goto out; 2485 2457 } 2486 2458
+19 -7
kernel/locking/rtmutex.c
··· 1290 1290 return ret; 1291 1291 } 1292 1292 1293 + static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) 1294 + { 1295 + int ret = try_to_take_rt_mutex(lock, current, NULL); 1296 + 1297 + /* 1298 + * try_to_take_rt_mutex() sets the lock waiters bit 1299 + * unconditionally. Clean this up. 1300 + */ 1301 + fixup_rt_mutex_waiters(lock); 1302 + 1303 + return ret; 1304 + } 1305 + 1293 1306 /* 1294 1307 * Slow path try-lock function: 1295 1308 */ ··· 1325 1312 */ 1326 1313 raw_spin_lock_irqsave(&lock->wait_lock, flags); 1327 1314 1328 - ret = try_to_take_rt_mutex(lock, current, NULL); 1329 - 1330 - /* 1331 - * try_to_take_rt_mutex() sets the lock waiters bit 1332 - * unconditionally. Clean this up. 1333 - */ 1334 - fixup_rt_mutex_waiters(lock); 1315 + ret = __rt_mutex_slowtrylock(lock); 1335 1316 1336 1317 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 1337 1318 ··· 1510 1503 int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) 1511 1504 { 1512 1505 return rt_mutex_slowtrylock(lock); 1506 + } 1507 + 1508 + int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) 1509 + { 1510 + return __rt_mutex_slowtrylock(lock); 1513 1511 } 1514 1512 1515 1513 /**
+1
kernel/locking/rtmutex_common.h
··· 148 148 struct rt_mutex_waiter *waiter); 149 149 150 150 extern int rt_mutex_futex_trylock(struct rt_mutex *l); 151 + extern int __rt_mutex_futex_trylock(struct rt_mutex *l); 151 152 152 153 extern void rt_mutex_futex_unlock(struct rt_mutex *lock); 153 154 extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,