Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

af_iucv: use paged SKBs for big inbound messages

When an inbound message is bigger than a page, allocate a paged SKB,
and subsequently use IUCV receive primitive with IPBUFLST flag.
This relaxes the pressure to allocate big contiguous kernel buffers.

Signed-off-by: Eugene Crosser <Eugene.Crosser@ru.ibm.com>
Signed-off-by: Ursula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eugene Crosser and committed by
David S. Miller
a006353a 291759a5

+50 -6
+50 -6
net/iucv/af_iucv.c
··· 1231 1231 return err; 1232 1232 } 1233 1233 1234 + static struct sk_buff *alloc_iucv_recv_skb(unsigned long len) 1235 + { 1236 + size_t headroom, linear; 1237 + struct sk_buff *skb; 1238 + int err; 1239 + 1240 + if (len < PAGE_SIZE) { 1241 + headroom = 0; 1242 + linear = len; 1243 + } else { 1244 + headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1); 1245 + linear = PAGE_SIZE - headroom; 1246 + } 1247 + skb = alloc_skb_with_frags(headroom + linear, len - linear, 1248 + 0, &err, GFP_ATOMIC | GFP_DMA); 1249 + WARN_ONCE(!skb, 1250 + "alloc of recv iucv skb len=%lu failed with errcode=%d\n", 1251 + len, err); 1252 + if (skb) { 1253 + if (headroom) 1254 + skb_reserve(skb, headroom); 1255 + skb_put(skb, linear); 1256 + skb->len = len; 1257 + skb->data_len = len - linear; 1258 + } 1259 + return skb; 1260 + } 1261 + 1234 1262 /* iucv_process_message() - Receive a single outstanding IUCV message 1235 1263 * 1236 1264 * Locking: must be called with message_q.lock held ··· 1283 1255 skb->len = 0; 1284 1256 } 1285 1257 } else { 1286 - rc = pr_iucv->message_receive(path, msg, 1258 + if (skb_is_nonlinear(skb)) { 1259 + struct iucv_array *iba = (struct iucv_array *)skb->head; 1260 + int i; 1261 + 1262 + iba[0].address = (u32)(addr_t)skb->data; 1263 + iba[0].length = (u32)skb_headlen(skb); 1264 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1265 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1266 + 1267 + iba[i + 1].address = 1268 + (u32)(addr_t)skb_frag_address(frag); 1269 + iba[i + 1].length = (u32)skb_frag_size(frag); 1270 + } 1271 + rc = pr_iucv->message_receive(path, msg, 1272 + IUCV_IPBUFLST, 1273 + (void *)iba, len, NULL); 1274 + } else { 1275 + rc = pr_iucv->message_receive(path, msg, 1287 1276 msg->flags & IUCV_IPRMDATA, 1288 1277 skb->data, len, NULL); 1278 + } 1289 1279 if (rc) { 1290 1280 kfree_skb(skb); 1291 1281 return; 1292 1282 } 1293 - skb_reset_transport_header(skb); 1294 - skb_reset_network_header(skb); 1295 - skb->len = len; 1283 + WARN_ON_ONCE(skb->len != len); 1296 1284 } 1297 1285 1298 1286 IUCV_SKB_CB(skb)->offset = 0; ··· 1327 1283 struct sock_msg_q *p, *n; 1328 1284 1329 1285 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 1330 - skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); 1286 + skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg)); 1331 1287 if (!skb) 1332 1288 break; 1333 1289 iucv_process_message(sk, skb, p->path, &p->msg); ··· 1822 1778 if (len > sk->sk_rcvbuf) 1823 1779 goto save_message; 1824 1780 1825 - skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); 1781 + skb = alloc_iucv_recv_skb(iucv_msg_length(msg)); 1826 1782 if (!skb) 1827 1783 goto save_message; 1828 1784