Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

af_iucv: fix recvmsg by replacing skb_pull() function

When receiving data messages, the "BUG_ON(skb->len < skb->data_len)" in
the skb_pull() function triggers a kernel panic.

Replace the skb_pull logic by a per skb offset as advised by
Eric Dumazet.

Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: Frank Blaschka <blaschka@linux.vnet.ibm.com>
Reviewed-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Ursula Braun and committed by
David S. Miller
f9c41a62 88c5b5ce

+24 -18
+8
include/net/iucv/af_iucv.h
··· 130 130 enum iucv_tx_notify n); 131 131 }; 132 132 133 + struct iucv_skb_cb { 134 + u32 class; /* target class of message */ 135 + u32 tag; /* tag associated with message */ 136 + u32 offset; /* offset for skb receival */ 137 + }; 138 + 139 + #define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0])) 140 + 133 141 /* iucv socket options (SOL_IUCV) */ 134 142 #define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ 135 143 #define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */
+16 -18
net/iucv/af_iucv.c
··· 49 49 50 50 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) 51 51 52 - /* macros to set/get socket control buffer at correct offset */ 53 - #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */ 54 - #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag)) 55 - #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ 56 - #define CB_TRGCLS_LEN (TRGCLS_SIZE) 57 - 58 52 #define __iucv_sock_wait(sk, condition, timeo, ret) \ 59 53 do { \ 60 54 DEFINE_WAIT(__wait); \ ··· 1135 1141 1136 1142 /* increment and save iucv message tag for msg_completion cbk */ 1137 1143 txmsg.tag = iucv->send_tag++; 1138 - memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); 1144 + IUCV_SKB_CB(skb)->tag = txmsg.tag; 1139 1145 1140 1146 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1141 1147 atomic_inc(&iucv->msg_sent); ··· 1218 1224 return -ENOMEM; 1219 1225 1220 1226 /* copy target class to control buffer of new skb */ 1221 - memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); 1227 + IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; 1222 1228 1223 1229 /* copy data fragment */ 1224 1230 memcpy(nskb->data, skb->data + copied, size); ··· 1250 1256 1251 1257 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1252 1258 /* Note: the first 4 bytes are reserved for msg tag */ 1253 - memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); 1259 + IUCV_SKB_CB(skb)->class = msg->class; 1254 1260 1255 1261 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1256 1262 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { ··· 1286 1292 } 1287 1293 } 1288 1294 1295 + IUCV_SKB_CB(skb)->offset = 0; 1289 1296 if (sock_queue_rcv_skb(sk, skb)) 1290 1297 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 1291 1298 } ··· 1322 1327 unsigned int copied, rlen; 1323 1328 struct sk_buff *skb, *rskb, *cskb; 1324 1329 int err = 0; 1330 + u32 offset; 1325 1331 1326 1332 msg->msg_namelen = 0; 1327 1333 ··· 1344 1348 return err; 1345 1349 } 1346 1350 1347 - rlen = skb->len; /* real length of skb */ 1351 + offset = IUCV_SKB_CB(skb)->offset; 1352 + rlen = skb->len - offset; /* real length of skb */ 1348 1353 copied = min_t(unsigned int, rlen, len); 1349 1354 if (!rlen) 1350 1355 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1351 1356 1352 1357 cskb = skb; 1353 - if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { 1358 + if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { 1354 1359 if (!(flags & MSG_PEEK)) 1355 1360 skb_queue_head(&sk->sk_receive_queue, skb); 1356 1361 return -EFAULT; ··· 1369 1372 * get the trgcls from the control buffer of the skb due to 1370 1373 * fragmentation of original iucv message. */ 1371 1374 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1372 - CB_TRGCLS_LEN, CB_TRGCLS(skb)); 1375 + sizeof(IUCV_SKB_CB(skb)->class), 1376 + (void *)&IUCV_SKB_CB(skb)->class); 1373 1377 if (err) { 1374 1378 if (!(flags & MSG_PEEK)) 1375 1379 skb_queue_head(&sk->sk_receive_queue, skb); ··· 1382 1384 1383 1385 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1384 1386 if (sk->sk_type == SOCK_STREAM) { 1385 - skb_pull(skb, copied); 1386 - if (skb->len) { 1387 - skb_queue_head(&sk->sk_receive_queue, skb); 1387 + if (copied < rlen) { 1388 + IUCV_SKB_CB(skb)->offset = offset + copied; 1388 1389 goto done; 1389 1390 } 1390 1391 } ··· 1402 1405 spin_lock_bh(&iucv->message_q.lock); 1403 1406 rskb = skb_dequeue(&iucv->backlog_skb_q); 1404 1407 while (rskb) { 1408 + IUCV_SKB_CB(rskb)->offset = 0; 1405 1409 if (sock_queue_rcv_skb(sk, rskb)) { 1406 1410 skb_queue_head(&iucv->backlog_skb_q, 1407 1411 rskb); ··· 1830 1832 spin_lock_irqsave(&list->lock, flags); 1831 1833 1832 1834 while (list_skb != (struct sk_buff *)list) { 1833 - if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { 1835 + if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { 1834 1836 this = list_skb; 1835 1837 break; 1836 1838 } ··· 2091 2093 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2092 2094 skb_reset_transport_header(skb); 2093 2095 skb_reset_network_header(skb); 2096 + IUCV_SKB_CB(skb)->offset = 0; 2094 2097 spin_lock(&iucv->message_q.lock); 2095 2098 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2096 2099 if (sock_queue_rcv_skb(sk, skb)) { ··· 2196 2197 /* fall through and receive zero length data */ 2197 2198 case 0: 2198 2199 /* plain data frame */ 2199 - memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, 2200 - CB_TRGCLS_LEN); 2200 + IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; 2201 2201 err = afiucv_hs_callback_rx(sk, skb); 2202 2202 break; 2203 2203 default: