Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mptcp-mem-scheduling'

Mat Martineau says:

====================
mptcp: Updates for mem scheduling and SK_RECLAIM

In the "net: reduce tcp_memory_allocated inflation" series (merge commit
e10b02ee5b6c), Eric Dumazet noted that "Removal of SK_RECLAIM_CHUNK and
SK_RECLAIM_THRESHOLD is left to MPTCP maintainers as a follow up."

Patches 1-3 align MPTCP with the above TCP changes to forward memory
allocation, reclaim, and memory scheduling.

Patch 4 removes the SK_RECLAIM_* macros as Eric requested.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+7 -47
-5
include/net/sock.h
··· 1619 1619 sk->sk_forward_alloc -= size; 1620 1620 } 1621 1621 1622 - /* the following macros control memory reclaiming in mptcp_rmem_uncharge() 1623 - */ 1624 - #define SK_RECLAIM_THRESHOLD (1 << 21) 1625 - #define SK_RECLAIM_CHUNK (1 << 20) 1626 - 1627 1622 static inline void sk_mem_uncharge(struct sock *sk, int size) 1628 1623 { 1629 1624 if (!sk_has_account(sk))
+7 -42
net/mptcp/protocol.c
··· 181 181 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); 182 182 183 183 /* see sk_mem_uncharge() for the rationale behind the following schema */ 184 - if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD)) 185 - __mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK); 184 + if (unlikely(reclaimable >= PAGE_SIZE)) 185 + __mptcp_rmem_reclaim(sk, reclaimable); 186 186 } 187 187 188 188 static void mptcp_rfree(struct sk_buff *skb) ··· 323 323 struct mptcp_sock *msk = mptcp_sk(sk); 324 324 int amt, amount; 325 325 326 - if (size < msk->rmem_fwd_alloc) 326 + if (size <= msk->rmem_fwd_alloc) 327 327 return true; 328 328 329 + size -= msk->rmem_fwd_alloc; 329 330 amt = sk_mem_pages(size); 330 331 amount = amt << PAGE_SHIFT; 331 - msk->rmem_fwd_alloc += amount; 332 - if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) { 333 - if (ssk->sk_forward_alloc < amount) { 334 - msk->rmem_fwd_alloc -= amount; 335 - return false; 336 - } 332 + if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) 333 + return false; 337 334 338 - ssk->sk_forward_alloc -= amount; 339 - } 335 + msk->rmem_fwd_alloc += amount; 340 336 return true; 341 337 } 342 338 ··· 962 966 df->data_seq + df->data_len == msk->write_seq; 963 967 } 964 968 965 - static void __mptcp_mem_reclaim_partial(struct sock *sk) 966 - { 967 - int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk); 968 - 969 - lockdep_assert_held_once(&sk->sk_lock.slock); 970 - 971 - if (reclaimable > (int)PAGE_SIZE) 972 - __mptcp_rmem_reclaim(sk, reclaimable - 1); 973 - 974 - sk_mem_reclaim(sk); 975 - } 976 - 977 - static void mptcp_mem_reclaim_partial(struct sock *sk) 978 - { 979 - mptcp_data_lock(sk); 980 - __mptcp_mem_reclaim_partial(sk); 981 - mptcp_data_unlock(sk); 982 - } 983 - 984 969 static void dfrag_uncharge(struct sock *sk, int len) 985 970 { 986 971 sk_mem_uncharge(sk, len); ··· 981 1004 { 982 1005 struct mptcp_sock *msk = mptcp_sk(sk); 983 1006 struct mptcp_data_frag *dtmp, *dfrag; 984 - bool cleaned = false; 985 1007 u64 snd_una; 986 1008 987 1009 /* on fallback we just need to ignore snd_una, as this is really ··· 1003 1027 } 1004 1028 1005 1029 dfrag_clear(sk, dfrag); 1006 - cleaned = true; 1007 1030 } 1008 1031 1009 1032 dfrag = mptcp_rtx_head(sk); ··· 1024 1049 dfrag->already_sent -= delta; 1025 1050 1026 1051 dfrag_uncharge(sk, delta); 1027 - cleaned = true; 1028 1052 } 1029 1053 1030 1054 /* all retransmitted data acked, recovery completed */ ··· 1031 1057 msk->recovery = false; 1032 1058 1033 1059 out: 1034 - if (cleaned && tcp_under_memory_pressure(sk)) 1035 - __mptcp_mem_reclaim_partial(sk); 1036 - 1037 1060 if (snd_una == READ_ONCE(msk->snd_nxt) && 1038 1061 snd_una == READ_ONCE(msk->write_seq)) { 1039 1062 if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) ··· 1182 1211 { 1183 1212 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation; 1184 1213 1185 - if (unlikely(tcp_under_memory_pressure(sk))) { 1186 - if (data_lock_held) 1187 - __mptcp_mem_reclaim_partial(sk); 1188 - else 1189 - mptcp_mem_reclaim_partial(sk); 1190 - } 1191 1214 return __mptcp_alloc_tx_skb(sk, ssk, gfp); 1192 1215 } 1193 1216