Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[AF_RXRPC]: Return the number of bytes buffered in rxrpc_send_data()
[IPVS]: Fix state variable on failure to start ipvs threads
[XFRM]: Fix MTU calculation for non-ESP SAs

+44 -4
+39 -2
net/ipv4/ipvs/ip_vs_sync.c
··· 67 67 struct ip_vs_seq out_seq; /* outgoing seq. struct */ 68 68 }; 69 69 70 + struct ip_vs_sync_thread_data { 71 + struct completion *startup; 72 + int state; 73 + }; 74 + 70 75 #define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ) 71 76 #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn)) 72 77 #define FULL_CONN_SIZE \ ··· 756 751 mm_segment_t oldmm; 757 752 int state; 758 753 const char *name; 754 + struct ip_vs_sync_thread_data *tinfo = startup; 759 755 760 756 /* increase the module use count */ 761 757 ip_vs_use_count_inc(); ··· 795 789 add_wait_queue(&sync_wait, &wait); 796 790 797 791 set_sync_pid(state, current->pid); 798 - complete((struct completion *)startup); 792 + complete(tinfo->startup); 793 + 794 + /* 795 + * once we call the completion queue above, we should 796 + * null out that reference, since its allocated on the 797 + * stack of the creating kernel thread 798 + */ 799 + tinfo->startup = NULL; 799 800 800 801 /* processing master/backup loop here */ 801 802 if (state == IP_VS_STATE_MASTER) ··· 814 801 remove_wait_queue(&sync_wait, &wait); 815 802 816 803 /* thread exits */ 804 + 805 + /* 806 + * If we weren't explicitly stopped, then we 807 + * exited in error, and should undo our state 808 + */ 809 + if ((!stop_master_sync) && (!stop_backup_sync)) 810 + ip_vs_sync_state -= tinfo->state; 811 + 817 812 set_sync_pid(state, 0); 818 813 IP_VS_INFO("sync thread stopped!\n"); 819 814 ··· 833 812 set_stop_sync(state, 0); 834 813 wake_up(&stop_sync_wait); 835 814 815 + /* 816 + * we need to free the structure that was allocated 817 + * for us in start_sync_thread 818 + */ 819 + kfree(tinfo); 836 820 return 0; 837 821 } 838 822 ··· 864 838 { 865 839 DECLARE_COMPLETION_ONSTACK(startup); 866 840 pid_t pid; 841 + struct ip_vs_sync_thread_data *tinfo; 867 842 868 843 if ((state == IP_VS_STATE_MASTER && sync_master_pid) || 869 844 (state == IP_VS_STATE_BACKUP && sync_backup_pid)) 870 845 return -EEXIST; 846 + 847 + /* 848 + * Note that tinfo will be freed in sync_thread on exit 849 + */ 850 + tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL); 851 + if (!tinfo) 852 + return -ENOMEM; 871 853 872 854 IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid); 873 855 IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n", ··· 892 858 ip_vs_backup_syncid = syncid; 893 859 } 894 860 861 + tinfo->state = state; 862 + tinfo->startup = &startup; 863 + 895 864 repeat: 896 - if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) { 865 + if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) { 897 866 IP_VS_ERR("could not create fork_sync_thread due to %d... " 898 867 "retrying.\n", pid); 899 868 msleep_interruptible(1000);
+4 -1
net/rxrpc/ar-output.c
··· 640 640 goto efault; 641 641 sp->remain -= copy; 642 642 skb->mark += copy; 643 + copied += copy; 643 644 644 645 len -= copy; 645 646 segment -= copy; ··· 710 709 711 710 } while (segment > 0); 712 711 712 + success: 713 + ret = copied; 713 714 out: 714 715 call->tx_pending = skb; 715 716 _leave(" = %d", ret); ··· 728 725 729 726 maybe_error: 730 727 if (copied) 731 - ret = copied; 728 + goto success; 732 729 goto out; 733 730 734 731 efault:
+1 -1
net/xfrm/xfrm_state.c
··· 1729 1729 x->type && x->type->get_mtu) 1730 1730 res = x->type->get_mtu(x, mtu); 1731 1731 else 1732 - res = mtu; 1732 + res = mtu - x->props.header_len; 1733 1733 spin_unlock_bh(&x->lock); 1734 1734 return res; 1735 1735 }