Merge branch 'fixes' of git://git.linux-nfs.org/pub/linux/nfs-2.6

* 'fixes' of git://git.linux-nfs.org/pub/linux/nfs-2.6:
SUNRPC: Fix obvious refcounting bugs in rpc_pipefs.
RPC: Ensure that we disconnect TCP socket when client requests error out
NLM/lockd: remove b_done
NFS: make 2 functions static
NFS: Release dcache_lock in an error path of nfs_path

+75 -62
+3 -9
fs/lockd/svclock.c
··· 638 if (task->tk_status < 0) { 639 /* RPC error: Re-insert for retransmission */ 640 timeout = 10 * HZ; 641 - } else if (block->b_done) { 642 - /* Block already removed, kill it for real */ 643 - timeout = 0; 644 } else { 645 /* Call was successful, now wait for client callback */ 646 timeout = 60 * HZ; ··· 706 break; 707 if (time_after(block->b_when,jiffies)) 708 break; 709 - dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", 710 - block, block->b_when, block->b_done); 711 kref_get(&block->b_count); 712 - if (block->b_done) 713 - nlmsvc_unlink_block(block); 714 - else 715 - nlmsvc_grant_blocked(block); 716 nlmsvc_release_block(block); 717 } 718
··· 638 if (task->tk_status < 0) { 639 /* RPC error: Re-insert for retransmission */ 640 timeout = 10 * HZ; 641 } else { 642 /* Call was successful, now wait for client callback */ 643 timeout = 60 * HZ; ··· 709 break; 710 if (time_after(block->b_when,jiffies)) 711 break; 712 + dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", 713 + block, block->b_when); 714 kref_get(&block->b_count); 715 + nlmsvc_grant_blocked(block); 716 nlmsvc_release_block(block); 717 } 718
+3 -1
fs/nfs/namespace.c
··· 51 namelen = dentry->d_name.len; 52 buflen -= namelen + 1; 53 if (buflen < 0) 54 - goto Elong; 55 end -= namelen; 56 memcpy(end, dentry->d_name.name, namelen); 57 *--end = '/'; ··· 68 end -= namelen; 69 memcpy(end, base, namelen); 70 return end; 71 Elong: 72 return ERR_PTR(-ENAMETOOLONG); 73 }
··· 51 namelen = dentry->d_name.len; 52 buflen -= namelen + 1; 53 if (buflen < 0) 54 + goto Elong_unlock; 55 end -= namelen; 56 memcpy(end, dentry->d_name.name, namelen); 57 *--end = '/'; ··· 68 end -= namelen; 69 memcpy(end, base, namelen); 70 return end; 71 + Elong_unlock: 72 + spin_unlock(&dcache_lock); 73 Elong: 74 return ERR_PTR(-ENAMETOOLONG); 75 }
+1 -1
fs/nfs/read.c
··· 63 return p; 64 } 65 66 - void nfs_readdata_free(struct nfs_read_data *p) 67 { 68 if (p && (p->pagevec != &p->page_array[0])) 69 kfree(p->pagevec);
··· 63 return p; 64 } 65 66 + static void nfs_readdata_free(struct nfs_read_data *p) 67 { 68 if (p && (p->pagevec != &p->page_array[0])) 69 kfree(p->pagevec);
+1 -1
fs/nfs/write.c
··· 137 return p; 138 } 139 140 - void nfs_writedata_free(struct nfs_write_data *p) 141 { 142 if (p && (p->pagevec != &p->page_array[0])) 143 kfree(p->pagevec);
··· 137 return p; 138 } 139 140 + static void nfs_writedata_free(struct nfs_write_data *p) 141 { 142 if (p && (p->pagevec != &p->page_array[0])) 143 kfree(p->pagevec);
-1
include/linux/lockd/lockd.h
··· 123 unsigned int b_id; /* block id */ 124 unsigned char b_queued; /* re-queued */ 125 unsigned char b_granted; /* VFS granted lock */ 126 - unsigned char b_done; /* callback complete */ 127 struct nlm_file * b_file; /* file in question */ 128 }; 129
··· 123 unsigned int b_id; /* block id */ 124 unsigned char b_queued; /* re-queued */ 125 unsigned char b_granted; /* VFS granted lock */ 126 struct nlm_file * b_file; /* file in question */ 127 }; 128
+2 -4
include/linux/nfs_fs.h
··· 476 } 477 478 /* 479 - * Allocate and free nfs_write_data structures 480 */ 481 extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount); 482 - extern void nfs_writedata_free(struct nfs_write_data *p); 483 484 /* 485 * linux/fs/nfs/read.c ··· 490 extern void nfs_readdata_release(void *data); 491 492 /* 493 - * Allocate and free nfs_read_data structures 494 */ 495 extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount); 496 - extern void nfs_readdata_free(struct nfs_read_data *p); 497 498 /* 499 * linux/fs/nfs3proc.c
··· 476 } 477 478 /* 479 + * Allocate nfs_write_data structures 480 */ 481 extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount); 482 483 /* 484 * linux/fs/nfs/read.c ··· 491 extern void nfs_readdata_release(void *data); 492 493 /* 494 + * Allocate nfs_read_data structures 495 */ 496 extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount); 497 498 /* 499 * linux/fs/nfs3proc.c
+1 -1
include/linux/sunrpc/xprt.h
··· 229 int xprt_reserve_xprt_cong(struct rpc_task *task); 230 int xprt_prepare_transmit(struct rpc_task *task); 231 void xprt_transmit(struct rpc_task *task); 232 - void xprt_abort_transmit(struct rpc_task *task); 233 int xprt_adjust_timeout(struct rpc_rqst *req); 234 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); 235 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
··· 229 int xprt_reserve_xprt_cong(struct rpc_task *task); 230 int xprt_prepare_transmit(struct rpc_task *task); 231 void xprt_transmit(struct rpc_task *task); 232 + void xprt_end_transmit(struct rpc_task *task); 233 int xprt_adjust_timeout(struct rpc_rqst *req); 234 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); 235 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+29 -23
net/sunrpc/clnt.c
··· 921 task->tk_status = xprt_prepare_transmit(task); 922 if (task->tk_status != 0) 923 return; 924 /* Encode here so that rpcsec_gss can use correct sequence number. */ 925 if (rpc_task_need_encode(task)) { 926 - task->tk_rqstp->rq_bytes_sent = 0; 927 call_encode(task); 928 /* Did the encode result in an error condition? */ 929 if (task->tk_status != 0) 930 - goto out_nosend; 931 } 932 - task->tk_action = call_transmit_status; 933 xprt_transmit(task); 934 if (task->tk_status < 0) 935 return; 936 - if (!task->tk_msg.rpc_proc->p_decode) { 937 - task->tk_action = rpc_exit_task; 938 - rpc_wake_up_task(task); 939 - } 940 - return; 941 - out_nosend: 942 - /* release socket write lock before attempting to handle error */ 943 - xprt_abort_transmit(task); 944 rpc_task_force_reencode(task); 945 } 946 ··· 1009 } 1010 1011 /* 1012 - * 6a. Handle transmission errors. 1013 - */ 1014 - static void 1015 - call_transmit_status(struct rpc_task *task) 1016 - { 1017 - if (task->tk_status != -EAGAIN) 1018 - rpc_task_force_reencode(task); 1019 - call_status(task); 1020 - } 1021 - 1022 - /* 1023 - * 6b. Handle RPC timeout 1024 * We do not release the request slot, so we keep using the 1025 * same XID for all retransmits. 1026 */
··· 921 task->tk_status = xprt_prepare_transmit(task); 922 if (task->tk_status != 0) 923 return; 924 + task->tk_action = call_transmit_status; 925 /* Encode here so that rpcsec_gss can use correct sequence number. */ 926 if (rpc_task_need_encode(task)) { 927 + BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); 928 call_encode(task); 929 /* Did the encode result in an error condition? */ 930 if (task->tk_status != 0) 931 + return; 932 } 933 xprt_transmit(task); 934 if (task->tk_status < 0) 935 return; 936 + /* 937 + * On success, ensure that we call xprt_end_transmit() before sleeping 938 + * in order to allow access to the socket to other RPC requests. 939 + */ 940 + call_transmit_status(task); 941 + if (task->tk_msg.rpc_proc->p_decode != NULL) 942 + return; 943 + task->tk_action = rpc_exit_task; 944 + rpc_wake_up_task(task); 945 + } 946 + 947 + /* 948 + * 5a. Handle cleanup after a transmission 949 + */ 950 + static void 951 + call_transmit_status(struct rpc_task *task) 952 + { 953 + task->tk_action = call_status; 954 + /* 955 + * Special case: if we've been waiting on the socket's write_space() 956 + * callback, then don't call xprt_end_transmit(). 957 + */ 958 + if (task->tk_status == -EAGAIN) 959 + return; 960 + xprt_end_transmit(task); 961 rpc_task_force_reencode(task); 962 } 963 ··· 992 } 993 994 /* 995 + * 6a. Handle RPC timeout 996 * We do not release the request slot, so we keep using the 997 * same XID for all retransmits. 998 */
+4 -2
net/sunrpc/rpc_pipe.c
··· 667 RPCAUTH_info, RPCAUTH_EOF); 668 if (error) 669 goto err_depopulate; 670 out: 671 mutex_unlock(&dir->i_mutex); 672 rpc_release_path(&nd); 673 - return dget(dentry); 674 err_depopulate: 675 rpc_depopulate(dentry); 676 __rpc_rmdir(dir, dentry); ··· 732 rpci->flags = flags; 733 rpci->ops = ops; 734 inode_dir_notify(dir, DN_CREATE); 735 out: 736 mutex_unlock(&dir->i_mutex); 737 rpc_release_path(&nd); 738 - return dget(dentry); 739 err_dput: 740 dput(dentry); 741 dentry = ERR_PTR(-ENOMEM);
··· 667 RPCAUTH_info, RPCAUTH_EOF); 668 if (error) 669 goto err_depopulate; 670 + dget(dentry); 671 out: 672 mutex_unlock(&dir->i_mutex); 673 rpc_release_path(&nd); 674 + return dentry; 675 err_depopulate: 676 rpc_depopulate(dentry); 677 __rpc_rmdir(dir, dentry); ··· 731 rpci->flags = flags; 732 rpci->ops = ops; 733 inode_dir_notify(dir, DN_CREATE); 734 + dget(dentry); 735 out: 736 mutex_unlock(&dir->i_mutex); 737 rpc_release_path(&nd); 738 + return dentry; 739 err_dput: 740 dput(dentry); 741 dentry = ERR_PTR(-ENOMEM);
+3 -18
net/sunrpc/xprt.c
··· 707 return err; 708 } 709 710 - void 711 - xprt_abort_transmit(struct rpc_task *task) 712 { 713 - struct rpc_xprt *xprt = task->tk_xprt; 714 - 715 - xprt_release_write(xprt, task); 716 } 717 718 /** ··· 758 task->tk_status = -ENOTCONN; 759 else if (!req->rq_received) 760 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 761 - 762 - xprt->ops->release_xprt(xprt, task); 763 spin_unlock_bh(&xprt->transport_lock); 764 return; 765 } ··· 767 * schedq, and being picked up by a parallel run of rpciod(). 768 */ 769 task->tk_status = status; 770 - 771 - switch (status) { 772 - case -ECONNREFUSED: 773 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 774 - case -EAGAIN: 775 - case -ENOTCONN: 776 - return; 777 - default: 778 - break; 779 - } 780 - xprt_release_write(xprt, task); 781 - return; 782 } 783 784 static inline void do_xprt_reserve(struct rpc_task *task)
··· 707 return err; 708 } 709 710 + void xprt_end_transmit(struct rpc_task *task) 711 { 712 + xprt_release_write(task->tk_xprt, task); 713 } 714 715 /** ··· 761 task->tk_status = -ENOTCONN; 762 else if (!req->rq_received) 763 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 764 spin_unlock_bh(&xprt->transport_lock); 765 return; 766 } ··· 772 * schedq, and being picked up by a parallel run of rpciod(). 773 */ 774 task->tk_status = status; 775 + if (status == -ECONNREFUSED) 776 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 777 } 778 779 static inline void do_xprt_reserve(struct rpc_task *task)
+28 -1
net/sunrpc/xprtsock.c
··· 414 } 415 416 /** 417 * xs_close - close a socket 418 * @xprt: transport 419 * ··· 1277 1278 static struct rpc_xprt_ops xs_tcp_ops = { 1279 .reserve_xprt = xprt_reserve_xprt, 1280 - .release_xprt = xprt_release_xprt, 1281 .set_port = xs_set_port, 1282 .connect = xs_connect, 1283 .buf_alloc = rpc_malloc,
··· 414 } 415 416 /** 417 + * xs_tcp_release_xprt - clean up after a tcp transmission 418 + * @xprt: transport 419 + * @task: rpc task 420 + * 421 + * This cleans up if an error causes us to abort the transmission of a request. 422 + * In this case, the socket may need to be reset in order to avoid confusing 423 + * the server. 424 + */ 425 + static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) 426 + { 427 + struct rpc_rqst *req; 428 + 429 + if (task != xprt->snd_task) 430 + return; 431 + if (task == NULL) 432 + goto out_release; 433 + req = task->tk_rqstp; 434 + if (req->rq_bytes_sent == 0) 435 + goto out_release; 436 + if (req->rq_bytes_sent == req->rq_snd_buf.len) 437 + goto out_release; 438 + set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state); 439 + out_release: 440 + xprt_release_xprt(xprt, task); 441 + } 442 + 443 + /** 444 * xs_close - close a socket 445 * @xprt: transport 446 * ··· 1250 1251 static struct rpc_xprt_ops xs_tcp_ops = { 1252 .reserve_xprt = xprt_reserve_xprt, 1253 + .release_xprt = xs_tcp_release_xprt, 1254 .set_port = xs_set_port, 1255 .connect = xs_connect, 1256 .buf_alloc = rpc_malloc,