Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: Transform skb_queue_len() binary tests into skb_queue_empty()

This is part of the grand scheme to eliminate the qlen
member of skb_queue_head, and subsequently remove the
'list' member of sk_buff.

Most users of skb_queue_len() want to know if the queue is
empty or not, and that's trivially done with skb_queue_empty()
which doesn't use the skb_queue_head->qlen member and instead
uses the queue list emptyness as the test.

Signed-off-by: David S. Miller <davem@davemloft.net>

+84 -89
+1 -1
drivers/bluetooth/hci_vhci.c
··· 120 120 121 121 poll_wait(file, &hci_vhci->read_wait, wait); 122 122 123 - if (skb_queue_len(&hci_vhci->readq)) 123 + if (!skb_queue_empty(&hci_vhci->readq)) 124 124 return POLLIN | POLLRDNORM; 125 125 126 126 return POLLOUT | POLLWRNORM;
+2 -1
drivers/isdn/hisax/isdnl1.c
··· 279 279 if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) 280 280 st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); 281 281 if (!test_bit(BC_FLG_ACTIV, &bcs->Flag)) { 282 - if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && (!skb_queue_len(&bcs->squeue))) { 282 + if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && 283 + skb_queue_empty(&bcs->squeue)) { 283 284 st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL); 284 285 } 285 286 }
+9 -8
drivers/isdn/hisax/isdnl2.c
··· 108 108 static void 109 109 set_peer_busy(struct Layer2 *l2) { 110 110 test_and_set_bit(FLG_PEER_BUSY, &l2->flag); 111 - if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue)) 111 + if (!skb_queue_empty(&l2->i_queue) || 112 + !skb_queue_empty(&l2->ui_queue)) 112 113 test_and_set_bit(FLG_L2BLOCK, &l2->flag); 113 114 } 114 115 ··· 755 754 st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL); 756 755 757 756 if ((ST_L2_7==state) || (ST_L2_8 == state)) 758 - if (skb_queue_len(&st->l2.i_queue) && cansend(st)) 757 + if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) 759 758 st->l2.l2l1(st, PH_PULL | REQUEST, NULL); 760 759 } 761 760 ··· 811 810 if (pr != -1) 812 811 st->l2.l2l3(st, pr, NULL); 813 812 814 - if (skb_queue_len(&st->l2.i_queue) && cansend(st)) 813 + if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) 815 814 st->l2.l2l1(st, PH_PULL | REQUEST, NULL); 816 815 } 817 816 ··· 1015 1014 if(typ != RR) FsmDelTimer(&st->l2.t203, 9); 1016 1015 restart_t200(st, 12); 1017 1016 } 1018 - if (skb_queue_len(&st->l2.i_queue) && (typ == RR)) 1017 + if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR)) 1019 1018 st->l2.l2l1(st, PH_PULL | REQUEST, NULL); 1020 1019 } else 1021 1020 nrerrorrecovery(fi); ··· 1121 1120 return; 1122 1121 } 1123 1122 1124 - if (skb_queue_len(&st->l2.i_queue) && (fi->state == ST_L2_7)) 1123 + if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7)) 1125 1124 st->l2.l2l1(st, PH_PULL | REQUEST, NULL); 1126 1125 if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag)) 1127 1126 enquiry_cr(st, RR, RSP, 0); ··· 1139 1138 test_and_set_bit(FLG_L3_INIT, &st->l2.flag); 1140 1139 } else 1141 1140 FsmChangeState(fi, ST_L2_4); 1142 - if (skb_queue_len(&st->l2.ui_queue)) 1141 + if (!skb_queue_empty(&st->l2.ui_queue)) 1143 1142 tx_ui(st); 1144 1143 } 1145 1144 ··· 1302 1301 FsmDelTimer(&st->l2.t203, 13); 1303 1302 FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11); 1304 1303 } 1305 - if (skb_queue_len(&l2->i_queue) && cansend(st)) 1304 + if (!skb_queue_empty(&l2->i_queue) && cansend(st)) 1306 1305 st->l2.l2l1(st, PH_PULL | REQUEST, NULL); 1307 1306 } 1308 1307 ··· 1348 1347 } 1349 1348 invoke_retransmission(st, nr); 1350 1349 FsmChangeState(fi, ST_L2_7); 1351 - if (skb_queue_len(&l2->i_queue) && cansend(st)) 1350 + if (!skb_queue_empty(&l2->i_queue) && cansend(st)) 1352 1351 st->l2.l2l1(st, PH_PULL | REQUEST, NULL); 1353 1352 } else 1354 1353 nrerrorrecovery(fi);
+1 -1
drivers/isdn/hisax/isdnl3.c
··· 302 302 !test_bit(FLG_PTP, &p->st->l2.flag)) { 303 303 if (p->debug) 304 304 l3_debug(p->st, "release_l3_process: last process"); 305 - if (!skb_queue_len(&p->st->l3.squeue)) { 305 + if (skb_queue_empty(&p->st->l3.squeue)) { 306 306 if (p->debug) 307 307 l3_debug(p->st, "release_l3_process: release link"); 308 308 if (p->st->protocol != ISDN_PTYPE_NI1)
+2 -2
drivers/isdn/i4l/isdn_tty.c
··· 1223 1223 total += c; 1224 1224 } 1225 1225 atomic_dec(&info->xmit_lock); 1226 - if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) { 1226 + if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) { 1227 1227 if (m->mdmreg[REG_DXMT] & BIT_DXMT) { 1228 1228 isdn_tty_senddown(info); 1229 1229 isdn_tty_tint(info); ··· 1284 1284 1285 1285 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_flush_chars")) 1286 1286 return; 1287 - if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) 1287 + if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) 1288 1288 isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, 1); 1289 1289 } 1290 1290
+2 -2
drivers/isdn/icn/icn.c
··· 304 304 isdn_ctrl cmd; 305 305 306 306 if (!(card->sndcount[channel] || card->xskb[channel] || 307 - skb_queue_len(&card->spqueue[channel]))) 307 + !skb_queue_empty(&card->spqueue[channel]))) 308 308 return; 309 309 if (icn_trymaplock_channel(card, mch)) { 310 310 while (sbfree && 311 311 (card->sndcount[channel] || 312 - skb_queue_len(&card->spqueue[channel]) || 312 + !skb_queue_empty(&card->spqueue[channel]) || 313 313 card->xskb[channel])) { 314 314 spin_lock_irqsave(&card->lock, flags); 315 315 if (card->xmit_lock[channel]) {
+2 -3
drivers/net/hamradio/scc.c
··· 304 304 scc->tx_buff = NULL; 305 305 } 306 306 307 - while (skb_queue_len(&scc->tx_queue)) 307 + while (!skb_queue_empty(&scc->tx_queue)) 308 308 dev_kfree_skb(skb_dequeue(&scc->tx_queue)); 309 309 310 310 spin_unlock_irqrestore(&scc->lock, flags); ··· 1126 1126 1127 1127 if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */ 1128 1128 { 1129 - if (skb_queue_len(&scc->tx_queue) == 0) /* nothing to send */ 1130 - { 1129 + if (skb_queue_empty(&scc->tx_queue)) { /* nothing to send */ 1131 1130 scc->stat.tx_state = TXS_IDLE; 1132 1131 netif_wake_queue(scc->dev); /* t_maxkeyup locked it. */ 1133 1132 return;
+1 -1
drivers/net/ppp_async.c
··· 364 364 spin_lock_irqsave(&ap->recv_lock, flags); 365 365 ppp_async_input(ap, buf, cflags, count); 366 366 spin_unlock_irqrestore(&ap->recv_lock, flags); 367 - if (skb_queue_len(&ap->rqueue)) 367 + if (!skb_queue_empty(&ap->rqueue)) 368 368 tasklet_schedule(&ap->tsk); 369 369 ap_put(ap); 370 370 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
+6 -6
drivers/net/ppp_generic.c
··· 1237 1237 pch = list_entry(list, struct channel, clist); 1238 1238 navail += pch->avail = (pch->chan != NULL); 1239 1239 if (pch->avail) { 1240 - if (skb_queue_len(&pch->file.xq) == 0 1241 - || !pch->had_frag) { 1240 + if (skb_queue_empty(&pch->file.xq) || 1241 + !pch->had_frag) { 1242 1242 pch->avail = 2; 1243 1243 ++nfree; 1244 1244 } ··· 1374 1374 1375 1375 /* try to send it down the channel */ 1376 1376 chan = pch->chan; 1377 - if (skb_queue_len(&pch->file.xq) 1378 - || !chan->ops->start_xmit(chan, frag)) 1377 + if (!skb_queue_empty(&pch->file.xq) || 1378 + !chan->ops->start_xmit(chan, frag)) 1379 1379 skb_queue_tail(&pch->file.xq, frag); 1380 1380 pch->had_frag = 1; 1381 1381 p += flen; ··· 1412 1412 1413 1413 spin_lock_bh(&pch->downl); 1414 1414 if (pch->chan != 0) { 1415 - while (skb_queue_len(&pch->file.xq) > 0) { 1415 + while (!skb_queue_empty(&pch->file.xq)) { 1416 1416 skb = skb_dequeue(&pch->file.xq); 1417 1417 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { 1418 1418 /* put the packet back and try again later */ ··· 1426 1426 } 1427 1427 spin_unlock_bh(&pch->downl); 1428 1428 /* see if there is anything from the attached unit to be sent */ 1429 - if (skb_queue_len(&pch->file.xq) == 0) { 1429 + if (skb_queue_empty(&pch->file.xq)) { 1430 1430 read_lock_bh(&pch->upl); 1431 1431 ppp = pch->ppp; 1432 1432 if (ppp != 0)
+1 -1
drivers/net/ppp_synctty.c
··· 406 406 spin_lock_irqsave(&ap->recv_lock, flags); 407 407 ppp_sync_input(ap, buf, cflags, count); 408 408 spin_unlock_irqrestore(&ap->recv_lock, flags); 409 - if (skb_queue_len(&ap->rqueue)) 409 + if (!skb_queue_empty(&ap->rqueue)) 410 410 tasklet_schedule(&ap->tsk); 411 411 sp_put(ap); 412 412 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
+1 -1
drivers/net/tun.c
··· 215 215 216 216 poll_wait(file, &tun->read_wait, wait); 217 217 218 - if (skb_queue_len(&tun->readq)) 218 + if (!skb_queue_empty(&tun->readq)) 219 219 mask |= POLLIN | POLLRDNORM; 220 220 221 221 return mask;
+2 -2
drivers/net/wireless/airo.c
··· 2374 2374 /* 2375 2375 * Clean out tx queue 2376 2376 */ 2377 - if (test_bit(FLAG_MPI, &ai->flags) && skb_queue_len (&ai->txq) > 0) { 2377 + if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) { 2378 2378 struct sk_buff *skb = NULL; 2379 2379 for (;(skb = skb_dequeue(&ai->txq));) 2380 2380 dev_kfree_skb(skb); ··· 3287 3287 if (status & EV_TXEXC) 3288 3288 get_tx_error(apriv, -1); 3289 3289 spin_lock_irqsave(&apriv->aux_lock, flags); 3290 - if (skb_queue_len (&apriv->txq)) { 3290 + if (!skb_queue_empty(&apriv->txq)) { 3291 3291 spin_unlock_irqrestore(&apriv->aux_lock,flags); 3292 3292 mpi_send_packet (dev); 3293 3293 } else {
+2 -2
drivers/s390/net/claw.c
··· 428 428 new_skb = NULL; /* assume no dice */ 429 429 pkt_cnt = 0; 430 430 CLAW_DBF_TEXT(4,trace,"PackSKBe"); 431 - if (skb_queue_len(&p_ch->collect_queue) > 0) { 431 + if (!skb_queue_empty(&p_ch->collect_queue)) { 432 432 /* some data */ 433 433 held_skb = skb_dequeue(&p_ch->collect_queue); 434 434 if (p_env->packing != DO_PACKED) ··· 1254 1254 privptr = (struct claw_privbk *) dev->priv; 1255 1255 claw_free_wrt_buf( dev ); 1256 1256 if ((privptr->write_free_count > 0) && 1257 - (skb_queue_len(&p_ch->collect_queue) > 0)) { 1257 + !skb_queue_empty(&p_ch->collect_queue)) { 1258 1258 pk_skb = claw_pack_skb(privptr); 1259 1259 while (pk_skb != NULL) { 1260 1260 rc = claw_hw_tx( pk_skb, dev,1);
+3 -3
drivers/s390/net/ctctty.c
··· 156 156 skb_queue_head(&info->rx_queue, skb); 157 157 else { 158 158 kfree_skb(skb); 159 - ret = skb_queue_len(&info->rx_queue); 159 + ret = !skb_queue_empty(&info->rx_queue); 160 160 } 161 161 } 162 162 } ··· 530 530 total += c; 531 531 count -= c; 532 532 } 533 - if (skb_queue_len(&info->tx_queue)) { 533 + if (!skb_queue_empty(&info->tx_queue)) { 534 534 info->lsr &= ~UART_LSR_TEMT; 535 535 tasklet_schedule(&info->tasklet); 536 536 } ··· 594 594 return; 595 595 if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars")) 596 596 return; 597 - if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue))) 597 + if (tty->stopped || tty->hw_stopped || skb_queue_empty(&info->tx_queue)) 598 598 return; 599 599 tasklet_schedule(&info->tasklet); 600 600 }
+3 -3
drivers/usb/net/usbnet.c
··· 3227 3227 temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq); 3228 3228 3229 3229 // maybe wait for deletions to finish. 3230 - while (skb_queue_len (&dev->rxq) 3231 - && skb_queue_len (&dev->txq) 3232 - && skb_queue_len (&dev->done)) { 3230 + while (!skb_queue_empty(&dev->rxq) && 3231 + !skb_queue_empty(&dev->txq) && 3232 + !skb_queue_empty(&dev->done)) { 3233 3233 msleep(UNLINK_TIMEOUT_MS); 3234 3234 if (netif_msg_ifdown (dev)) 3235 3235 devdbg (dev, "waited for %d urb completions", temp);
+1 -1
include/net/irda/irda_device.h
··· 224 224 /* Interface for internal use */ 225 225 static inline int irda_device_txqueue_empty(const struct net_device *dev) 226 226 { 227 - return (skb_queue_len(&dev->qdisc->q) == 0); 227 + return skb_queue_empty(&dev->qdisc->q); 228 228 } 229 229 int irda_device_set_raw_mode(struct net_device* self, int status); 230 230 struct net_device *alloc_irdadev(int sizeof_priv);
+1 -1
include/net/tcp.h
··· 991 991 992 992 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) 993 993 { 994 - if (skb_queue_len(&tp->out_of_order_queue) == 0 && 994 + if (skb_queue_empty(&tp->out_of_order_queue) && 995 995 tp->rcv_wnd && 996 996 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && 997 997 !tp->urg_data)
+2 -4
net/bluetooth/cmtp/core.c
··· 213 213 return kernel_sendmsg(sock, &msg, &iv, 1, len); 214 214 } 215 215 216 - static int cmtp_process_transmit(struct cmtp_session *session) 216 + static void cmtp_process_transmit(struct cmtp_session *session) 217 217 { 218 218 struct sk_buff *skb, *nskb; 219 219 unsigned char *hdr; ··· 223 223 224 224 if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { 225 225 BT_ERR("Can't allocate memory for new frame"); 226 - return -ENOMEM; 226 + return; 227 227 } 228 228 229 229 while ((skb = skb_dequeue(&session->transmit))) { ··· 275 275 cmtp_send_frame(session, nskb->data, nskb->len); 276 276 277 277 kfree_skb(nskb); 278 - 279 - return skb_queue_len(&session->transmit); 280 278 } 281 279 282 280 static int cmtp_session(void *arg)
+1 -4
net/bluetooth/hidp/core.c
··· 428 428 return kernel_sendmsg(sock, &msg, &iv, 1, len); 429 429 } 430 430 431 - static int hidp_process_transmit(struct hidp_session *session) 431 + static void hidp_process_transmit(struct hidp_session *session) 432 432 { 433 433 struct sk_buff *skb; 434 434 ··· 453 453 hidp_set_timer(session); 454 454 kfree_skb(skb); 455 455 } 456 - 457 - return skb_queue_len(&session->ctrl_transmit) + 458 - skb_queue_len(&session->intr_transmit); 459 456 } 460 457 461 458 static int hidp_session(void *arg)
+5 -2
net/bluetooth/rfcomm/sock.c
··· 590 590 for (;;) { 591 591 set_current_state(TASK_INTERRUPTIBLE); 592 592 593 - if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || 594 - signal_pending(current) || !timeo) 593 + if (!skb_queue_empty(&sk->sk_receive_queue) || 594 + sk->sk_err || 595 + (sk->sk_shutdown & RCV_SHUTDOWN) || 596 + signal_pending(current) || 597 + !timeo) 595 598 break; 596 599 597 600 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+1 -1
net/bluetooth/rfcomm/tty.c
··· 781 781 782 782 BT_DBG("tty %p dev %p", tty, dev); 783 783 784 - if (skb_queue_len(&dlc->tx_queue)) 784 + if (!skb_queue_empty(&dlc->tx_queue)) 785 785 return dlc->mtu; 786 786 787 787 return 0;
+5 -5
net/decnet/af_decnet.c
··· 536 536 * we are double checking that we are not sending too 537 537 * many of these keepalive frames. 538 538 */ 539 - if (skb_queue_len(&scp->other_xmit_queue) == 0) 539 + if (skb_queue_empty(&scp->other_xmit_queue)) 540 540 dn_nsp_send_link(sk, DN_NOCHANGE, 0); 541 541 } 542 542 ··· 1191 1191 struct dn_scp *scp = DN_SK(sk); 1192 1192 int mask = datagram_poll(file, sock, wait); 1193 1193 1194 - if (skb_queue_len(&scp->other_receive_queue)) 1194 + if (!skb_queue_empty(&scp->other_receive_queue)) 1195 1195 mask |= POLLRDBAND; 1196 1196 1197 1197 return mask; ··· 1214 1214 1215 1215 case SIOCATMARK: 1216 1216 lock_sock(sk); 1217 - val = (skb_queue_len(&scp->other_receive_queue) != 0); 1217 + val = !skb_queue_empty(&scp->other_receive_queue); 1218 1218 if (scp->state != DN_RUN) 1219 1219 val = -ENOTCONN; 1220 1220 release_sock(sk); ··· 1630 1630 int len = 0; 1631 1631 1632 1632 if (flags & MSG_OOB) 1633 - return skb_queue_len(q) ? 1 : 0; 1633 + return !skb_queue_empty(q) ? 1 : 0; 1634 1634 1635 1635 while(skb != (struct sk_buff *)q) { 1636 1636 struct dn_skb_cb *cb = DN_SKB_CB(skb); ··· 1707 1707 if (sk->sk_err) 1708 1708 goto out; 1709 1709 1710 - if (skb_queue_len(&scp->other_receive_queue)) { 1710 + if (!skb_queue_empty(&scp->other_receive_queue)) { 1711 1711 if (!(flags & MSG_OOB)) { 1712 1712 msg->msg_flags |= MSG_OOB; 1713 1713 if (!scp->other_report) {
+2 -1
net/decnet/dn_nsp_out.c
··· 342 342 343 343 dn_nsp_output(sk); 344 344 345 - if (skb_queue_len(&scp->data_xmit_queue) || skb_queue_len(&scp->other_xmit_queue)) 345 + if (!skb_queue_empty(&scp->data_xmit_queue) || 346 + !skb_queue_empty(&scp->other_xmit_queue)) 346 347 scp->persist = dn_nsp_persist(sk); 347 348 348 349 return 0;
+4 -4
net/ipv4/tcp.c
··· 1105 1105 struct sk_buff *skb; 1106 1106 struct tcp_sock *tp = tcp_sk(sk); 1107 1107 1108 - NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); 1108 + NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); 1109 1109 1110 1110 /* RX process wants to run with disabled BHs, though it is not 1111 1111 * necessary */ ··· 1369 1369 * is not empty. It is more elegant, but eats cycles, 1370 1370 * unfortunately. 1371 1371 */ 1372 - if (skb_queue_len(&tp->ucopy.prequeue)) 1372 + if (!skb_queue_empty(&tp->ucopy.prequeue)) 1373 1373 goto do_prequeue; 1374 1374 1375 1375 /* __ Set realtime policy in scheduler __ */ ··· 1394 1394 } 1395 1395 1396 1396 if (tp->rcv_nxt == tp->copied_seq && 1397 - skb_queue_len(&tp->ucopy.prequeue)) { 1397 + !skb_queue_empty(&tp->ucopy.prequeue)) { 1398 1398 do_prequeue: 1399 1399 tcp_prequeue_process(sk); 1400 1400 ··· 1476 1476 } while (len > 0); 1477 1477 1478 1478 if (user_recv) { 1479 - if (skb_queue_len(&tp->ucopy.prequeue)) { 1479 + if (!skb_queue_empty(&tp->ucopy.prequeue)) { 1480 1480 int chunk; 1481 1481 1482 1482 tp->ucopy.len = copied > 0 ? len : 0;
+5 -6
net/ipv4/tcp_input.c
··· 2802 2802 int this_sack; 2803 2803 2804 2804 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 2805 - if (skb_queue_len(&tp->out_of_order_queue) == 0) { 2805 + if (skb_queue_empty(&tp->out_of_order_queue)) { 2806 2806 tp->rx_opt.num_sacks = 0; 2807 2807 tp->rx_opt.eff_sacks = tp->rx_opt.dsack; 2808 2808 return; ··· 2935 2935 if(th->fin) 2936 2936 tcp_fin(skb, sk, th); 2937 2937 2938 - if (skb_queue_len(&tp->out_of_order_queue)) { 2938 + if (!skb_queue_empty(&tp->out_of_order_queue)) { 2939 2939 tcp_ofo_queue(sk); 2940 2940 2941 2941 /* RFC2581. 4.2. SHOULD send immediate ACK, when 2942 2942 * gap in queue is filled. 2943 2943 */ 2944 - if (!skb_queue_len(&tp->out_of_order_queue)) 2944 + if (skb_queue_empty(&tp->out_of_order_queue)) 2945 2945 tp->ack.pingpong = 0; 2946 2946 } 2947 2947 ··· 3249 3249 * This must not ever occur. */ 3250 3250 3251 3251 /* First, purge the out_of_order queue. */ 3252 - if (skb_queue_len(&tp->out_of_order_queue)) { 3253 - NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, 3254 - skb_queue_len(&tp->out_of_order_queue)); 3252 + if (!skb_queue_empty(&tp->out_of_order_queue)) { 3253 + NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); 3255 3254 __skb_queue_purge(&tp->out_of_order_queue); 3256 3255 3257 3256 /* Reset SACK state. A conforming SACK implementation will
+2 -3
net/ipv4/tcp_timer.c
··· 231 231 } 232 232 tp->ack.pending &= ~TCP_ACK_TIMER; 233 233 234 - if (skb_queue_len(&tp->ucopy.prequeue)) { 234 + if (!skb_queue_empty(&tp->ucopy.prequeue)) { 235 235 struct sk_buff *skb; 236 236 237 - NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, 238 - skb_queue_len(&tp->ucopy.prequeue)); 237 + NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); 239 238 240 239 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 241 240 sk->sk_backlog_rcv(sk, skb);
+1 -2
net/irda/irlap.c
··· 445 445 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 446 446 447 447 /* Don't disconnect until all data frames are successfully sent */ 448 - if (skb_queue_len(&self->txq) > 0) { 448 + if (!skb_queue_empty(&self->txq)) { 449 449 self->disconnect_pending = TRUE; 450 - 451 450 return; 452 451 } 453 452
+7 -7
net/irda/irlap_event.c
··· 191 191 * Send out the RR frames faster if our own transmit queue is empty, or 192 192 * if the peer is busy. The effect is a much faster conversation 193 193 */ 194 - if ((skb_queue_len(&self->txq) == 0) || (self->remote_busy)) { 194 + if (skb_queue_empty(&self->txq) || self->remote_busy) { 195 195 if (self->fast_RR == TRUE) { 196 196 /* 197 197 * Assert that the fast poll timer has not reached the ··· 263 263 IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, 264 264 skb_queue_len(&self->txq)); 265 265 266 - if (skb_queue_len(&self->txq)) { 266 + if (!skb_queue_empty(&self->txq)) { 267 267 /* Prevent race conditions with irlap_data_request() */ 268 268 self->local_busy = TRUE; 269 269 ··· 1074 1074 #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1075 1075 /* Window has been adjusted for the max packet 1076 1076 * size, so much simpler... - Jean II */ 1077 - nextfit = (skb_queue_len(&self->txq) > 0); 1077 + nextfit = !skb_queue_empty(&self->txq); 1078 1078 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1079 1079 /* 1080 1080 * Send data with poll bit cleared only if window > 1 ··· 1814 1814 #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1815 1815 /* Window has been adjusted for the max packet 1816 1816 * size, so much simpler... - Jean II */ 1817 - nextfit = (skb_queue_len(&self->txq) > 0); 1817 + nextfit = !skb_queue_empty(&self->txq); 1818 1818 #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1819 1819 /* 1820 1820 * Send data with final bit cleared only if window > 1 ··· 1937 1937 irlap_data_indication(self, skb, FALSE); 1938 1938 1939 1939 /* Any pending data requests? */ 1940 - if ((skb_queue_len(&self->txq) > 0) && 1940 + if (!skb_queue_empty(&self->txq) && 1941 1941 (self->window > 0)) 1942 1942 { 1943 1943 self->ack_required = TRUE; ··· 2038 2038 /* 2039 2039 * Any pending data requests? 2040 2040 */ 2041 - if ((skb_queue_len(&self->txq) > 0) && 2041 + if (!skb_queue_empty(&self->txq) && 2042 2042 (self->window > 0) && !self->remote_busy) 2043 2043 { 2044 2044 irlap_data_indication(self, skb, TRUE); ··· 2069 2069 */ 2070 2070 nr_status = irlap_validate_nr_received(self, info->nr); 2071 2071 if (nr_status == NR_EXPECTED) { 2072 - if ((skb_queue_len( &self->txq) > 0) && 2072 + if (!skb_queue_empty(&self->txq) && 2073 2073 (self->window > 0)) { 2074 2074 self->remote_busy = FALSE; 2075 2075
+3 -5
net/irda/irlap_frame.c
··· 1018 1018 /* 1019 1019 * We can now fill the window with additional data frames 1020 1020 */ 1021 - while (skb_queue_len( &self->txq) > 0) { 1021 + while (!skb_queue_empty(&self->txq)) { 1022 1022 1023 1023 IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__); 1024 - if ((skb_queue_len( &self->txq) > 0) && 1025 - (self->window > 0)) { 1024 + if (self->window > 0) { 1026 1025 skb = skb_dequeue( &self->txq); 1027 1026 IRDA_ASSERT(skb != NULL, return;); 1028 1027 ··· 1030 1031 * bit cleared 1031 1032 */ 1032 1033 if ((self->window > 1) && 1033 - skb_queue_len(&self->txq) > 0) 1034 - { 1034 + !skb_queue_empty(&self->txq)) { 1035 1035 irlap_send_data_primary(self, skb); 1036 1036 } else { 1037 1037 irlap_send_data_primary_poll(self, skb);
+1 -1
net/irda/irttp.c
··· 1513 1513 /* 1514 1514 * Check if there is still data segments in the transmit queue 1515 1515 */ 1516 - if (skb_queue_len(&self->tx_queue) > 0) { 1516 + if (!skb_queue_empty(&self->tx_queue)) { 1517 1517 if (priority == P_HIGH) { 1518 1518 /* 1519 1519 * No need to send the queued data, if we are
+1 -1
net/llc/llc_c_ev.c
··· 84 84 if (llc->dev->flags & IFF_LOOPBACK) 85 85 goto out; 86 86 rc = 1; 87 - if (!skb_queue_len(&llc->pdu_unack_q)) 87 + if (skb_queue_empty(&llc->pdu_unack_q)) 88 88 goto out; 89 89 skb = skb_peek(&llc->pdu_unack_q); 90 90 pdu = llc_pdu_sn_hdr(skb);
+1 -1
net/netlink/af_netlink.c
··· 858 858 { 859 859 struct netlink_sock *nlk = nlk_sk(sk); 860 860 861 - if (!skb_queue_len(&sk->sk_receive_queue)) 861 + if (skb_queue_empty(&sk->sk_receive_queue)) 862 862 clear_bit(0, &nlk->state); 863 863 if (!test_bit(0, &nlk->state)) 864 864 wake_up_interruptible(&nlk->wait);
+1 -1
net/sched/sch_red.c
··· 385 385 memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256); 386 386 387 387 q->qcount = -1; 388 - if (skb_queue_len(&sch->q) == 0) 388 + if (skb_queue_empty(&sch->q)) 389 389 PSCHED_SET_PASTPERFECT(q->qidlestart); 390 390 sch_tree_unlock(sch); 391 391 return 0;
+2 -2
net/unix/af_unix.c
··· 302 302 * may receive messages only from that peer. */ 303 303 static void unix_dgram_disconnected(struct sock *sk, struct sock *other) 304 304 { 305 - if (skb_queue_len(&sk->sk_receive_queue)) { 305 + if (!skb_queue_empty(&sk->sk_receive_queue)) { 306 306 skb_queue_purge(&sk->sk_receive_queue); 307 307 wake_up_interruptible_all(&unix_sk(sk)->peer_wait); 308 308 ··· 1619 1619 for (;;) { 1620 1620 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1621 1621 1622 - if (skb_queue_len(&sk->sk_receive_queue) || 1622 + if (!skb_queue_empty(&sk->sk_receive_queue) || 1623 1623 sk->sk_err || 1624 1624 (sk->sk_shutdown & RCV_SHUTDOWN) || 1625 1625 signal_pending(current) ||