···125#include <linux/syscalls.h>126#include <linux/time.h>127#include <linux/rcupdate.h>0128129#include <asm/semaphore.h>130#include <asm/uaccess.h>···186 fl->fl_fasync = NULL;187 fl->fl_owner = NULL;188 fl->fl_pid = 0;0189 fl->fl_file = NULL;190 fl->fl_flags = 0;191 fl->fl_type = 0;···555{556 list_add(&fl->fl_link, &file_lock_list);55700558 /* insert into file's list */559 fl->fl_next = *pos;560 *pos = fl;···587588 if (fl->fl_ops && fl->fl_ops->fl_remove)589 fl->fl_ops->fl_remove(fl);00000590591 locks_wake_up_blocks(fl);592 locks_free_lock(fl);···643 return (locks_conflict(caller_fl, sys_fl));644}645646-static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout)647-{648- int result = 0;649- DECLARE_WAITQUEUE(wait, current);650-651- __set_current_state(TASK_INTERRUPTIBLE);652- add_wait_queue(fl_wait, &wait);653- if (timeout == 0)654- schedule();655- else656- result = schedule_timeout(timeout);657- if (signal_pending(current))658- result = -ERESTARTSYS;659- remove_wait_queue(fl_wait, &wait);660- __set_current_state(TASK_RUNNING);661- return result;662-}663-664-static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time)665-{666- int result;667- locks_insert_block(blocker, waiter);668- result = interruptible_sleep_on_locked(&waiter->fl_wait, time);669- __locks_delete_block(waiter);670- return result;671-}672-673void674posix_test_lock(struct file *filp, struct file_lock *fl)675{···655 if (posix_locks_conflict(fl, cfl))656 break;657 }658- if (cfl)659 __locks_copy_lock(fl, cfl);660- else000661 fl->fl_type = F_UNLCK;662 unlock_kernel();663 return;664}665-666EXPORT_SYMBOL(posix_test_lock);667668-/* This function tests for deadlock condition before putting a process to669- * sleep. The detection scheme is no longer recursive. Recursive was neat,670- * but dangerous - we risked stack corruption if the lock data was bad, or671- * if the recursion was too deep for any other reason.672 *673- * We rely on the fact that a task can only be on one lock's wait queue674- * at a time. When we find blocked_task on a wait queue we can re-search675- * with blocked_task equal to that queue's owner, until either blocked_task676- * isn't found, or blocked_task is found on a queue owned by my_task.677 *678- * Note: the above assumption may not be true when handling lock requests679- * from a broken NFS client. But broken NFS clients have a lot more to680- * worry about than proper deadlock detection anyway... --okir0000681 *682- * However, the failure of this assumption (also possible in the case of683- * multiple tasks sharing the same open file table) also means there's no684- * guarantee that the loop below will terminate. As a hack, we give up685- * after a few iterations.00000686 */687688#define MAX_DEADLK_ITERATIONS 10689000000000000690static int posix_locks_deadlock(struct file_lock *caller_fl,691 struct file_lock *block_fl)692{693- struct file_lock *fl;694 int i = 0;695696-next_task:697- if (posix_same_owner(caller_fl, block_fl))698- return 1;699- list_for_each_entry(fl, &blocked_list, fl_link) {700- if (posix_same_owner(fl, block_fl)) {701- if (i++ > MAX_DEADLK_ITERATIONS)702- return 0;703- fl = fl->fl_next;704- block_fl = fl;705- goto next_task;706- }707 }708 return 0;709}···1250 if (break_time == 0)1251 break_time++;1252 }1253- error = locks_block_on_timeout(flock, new_fl, break_time);0001254 if (error >= 0) {1255 if (error == 0)1256 time_out_leases(inode);···2081 int id, char *pfx)2082{2083 struct inode *inode = NULL;00000020842085 if (fl->fl_file != NULL)2086 inode = fl->fl_file->f_path.dentry->d_inode;···2127 }2128 if (inode) {2129#ifdef WE_CAN_BREAK_LSLK_NOW2130- seq_printf(f, "%d %s:%ld ", fl->fl_pid,2131 inode->i_sb->s_id, inode->i_ino);2132#else2133 /* userspace relies on this representation of dev_t ;-( */2134- seq_printf(f, "%d %02x:%02x:%ld ", fl->fl_pid,2135 MAJOR(inode->i_sb->s_dev),2136 MINOR(inode->i_sb->s_dev), inode->i_ino);2137#endif2138 } else {2139- seq_printf(f, "%d <none>:0 ", fl->fl_pid);2140 }2141 if (IS_POSIX(fl)) {2142 if (fl->fl_end == OFFSET_MAX)
···125#include <linux/syscalls.h>126#include <linux/time.h>127#include <linux/rcupdate.h>128+#include <linux/pid_namespace.h>129130#include <asm/semaphore.h>131#include <asm/uaccess.h>···185 fl->fl_fasync = NULL;186 fl->fl_owner = NULL;187 fl->fl_pid = 0;188+ fl->fl_nspid = NULL;189 fl->fl_file = NULL;190 fl->fl_flags = 0;191 fl->fl_type = 0;···553{554 list_add(&fl->fl_link, &file_lock_list);555556+ fl->fl_nspid = get_pid(task_tgid(current));557+558 /* insert into file's list */559 fl->fl_next = *pos;560 *pos = fl;···583584 if (fl->fl_ops && fl->fl_ops->fl_remove)585 fl->fl_ops->fl_remove(fl);586+587+ if (fl->fl_nspid) {588+ put_pid(fl->fl_nspid);589+ fl->fl_nspid = NULL;590+ }591592 locks_wake_up_blocks(fl);593 locks_free_lock(fl);···634 return (locks_conflict(caller_fl, sys_fl));635}636000000000000000000000000000637void638posix_test_lock(struct file *filp, struct file_lock *fl)639{···673 if (posix_locks_conflict(fl, cfl))674 break;675 }676+ if (cfl) {677 __locks_copy_lock(fl, cfl);678+ if (cfl->fl_nspid)679+ fl->fl_pid = pid_nr_ns(cfl->fl_nspid,680+ task_active_pid_ns(current));681+ } else682 fl->fl_type = F_UNLCK;683 unlock_kernel();684 return;685}0686EXPORT_SYMBOL(posix_test_lock);687688+/*689+ * Deadlock detection:00690 *691+ * We attempt to detect deadlocks that are due purely to posix file692+ * locks.00693 *694+ * We assume that a task can be waiting for at most one lock at a time.695+ * So for any acquired lock, the process holding that lock may be696+ * waiting on at most one other lock. That lock in turns may be held by697+ * someone waiting for at most one other lock. Given a requested lock698+ * caller_fl which is about to wait for a conflicting lock block_fl, we699+ * follow this chain of waiters to ensure we are not about to create a700+ * cycle.701 *702+ * Since we do this before we ever put a process to sleep on a lock, we703+ * are ensured that there is never a cycle; that is what guarantees that704+ * the while() loop in posix_locks_deadlock() eventually completes.705+ *706+ * Note: the above assumption may not be true when handling lock707+ * requests from a broken NFS client. It may also fail in the presence708+ * of tasks (such as posix threads) sharing the same open file table.709+ *710+ * To handle those cases, we just bail out after a few iterations.711 */712713#define MAX_DEADLK_ITERATIONS 10714715+/* Find a lock that the owner of the given block_fl is blocking on. */716+static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)717+{718+ struct file_lock *fl;719+720+ list_for_each_entry(fl, &blocked_list, fl_link) {721+ if (posix_same_owner(fl, block_fl))722+ return fl->fl_next;723+ }724+ return NULL;725+}726+727static int posix_locks_deadlock(struct file_lock *caller_fl,728 struct file_lock *block_fl)729{0730 int i = 0;731732+ while ((block_fl = what_owner_is_waiting_for(block_fl))) {733+ if (i++ > MAX_DEADLK_ITERATIONS)734+ return 0;735+ if (posix_same_owner(caller_fl, block_fl))736+ return 1;000000737 }738 return 0;739}···1256 if (break_time == 0)1257 break_time++;1258 }1259+ locks_insert_block(flock, new_fl);1260+ error = wait_event_interruptible_timeout(new_fl->fl_wait,1261+ !new_fl->fl_next, break_time);1262+ __locks_delete_block(new_fl);1263 if (error >= 0) {1264 if (error == 0)1265 time_out_leases(inode);···2084 int id, char *pfx)2085{2086 struct inode *inode = NULL;2087+ unsigned int fl_pid;2088+2089+ if (fl->fl_nspid)2090+ fl_pid = pid_nr_ns(fl->fl_nspid, task_active_pid_ns(current));2091+ else2092+ fl_pid = fl->fl_pid;20932094 if (fl->fl_file != NULL)2095 inode = fl->fl_file->f_path.dentry->d_inode;···2124 }2125 if (inode) {2126#ifdef WE_CAN_BREAK_LSLK_NOW2127+ seq_printf(f, "%d %s:%ld ", fl_pid,2128 inode->i_sb->s_id, inode->i_ino);2129#else2130 /* userspace relies on this representation of dev_t ;-( */2131+ seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,2132 MAJOR(inode->i_sb->s_dev),2133 MINOR(inode->i_sb->s_dev), inode->i_ino);2134#endif2135 } else {2136+ seq_printf(f, "%d <none>:0 ", fl_pid);2137 }2138 if (IS_POSIX(fl)) {2139 if (fl->fl_end == OFFSET_MAX)
+1
include/linux/fs.h
···872 struct list_head fl_block; /* circular list of blocked processes */873 fl_owner_t fl_owner;874 unsigned int fl_pid;0875 wait_queue_head_t fl_wait;876 struct file *fl_file;877 unsigned char fl_flags;
···872 struct list_head fl_block; /* circular list of blocked processes */873 fl_owner_t fl_owner;874 unsigned int fl_pid;875+ struct pid *fl_nspid;876 wait_queue_head_t fl_wait;877 struct file *fl_file;878 unsigned char fl_flags;