Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Don't use shared bluetooth antenna in iwlwifi driver for management
frames, from Emmanuel Grumbach.

2) Fix device ID check in ath9k driver, from Felix Fietkau.

3) Off by one in xen-netback BUG checks, from Dan Carpenter.

4) Fix IFLA_VF_PORT netlink attribute validation, from Daniel Borkmann.

5) Fix races in setting peeked bit flag in SKBs during datagram
receive. If it's shared we have to clone it otherwise the value can
easily be corrupted. Fix from Herbert Xu.

6) Revert fec clock handling change, causes regressions. From Fabio
Estevam.

7) Fix use after free in fq_codel and sfq packet schedulers, from WANG
Cong.

8) ipvlan bug fixes (memory leaks, missing rcu_dereference_bh, etc.)
from WANG Cong and Konstantin Khlebnikov.

9) Memory leak in act_bpf packet action, from Alexei Starovoitov.

10) ARM bpf JIT bug fixes from Nicolas Schichan.

11) Fix backwards compat of ANY_LAYOUT in virtio_net driver, from
Michael S Tsirkin.

12) Destruction of bond with different ARP header types not handled
correctly, fix from Nikolay Aleksandrov.

13) Revert GRO receive support in ipv6 SIT tunnel driver, causes
regressions because the GRO packets created cannot be processed
properly on the GSO side if we forward the frame. From Herbert Xu.

14) TCCR update race and other fixes to ravb driver from Sergei
Shtylyov.

15) Fix SKB leaks in caif_queue_rcv_skb(), from Eric Dumazet.

16) Fix panics on packet scheduler filter replace, from Daniel Borkmann.

17) Make sure AF_PACKET sees properly IP headers in defragmented frames
(via PACKET_FANOUT_FLAG_DEFRAG option), from Edward Hyunkoo Jee.

18) AF_NETLINK cannot hold mutex in RCU callback, fix from Florian
Westphal.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (84 commits)
ravb: fix ring memory allocation
net: phy: dp83867: Fix warning check for setting the internal delay
openvswitch: allocate nr_node_ids flow_stats instead of num_possible_nodes
netlink: don't hold mutex in rcu callback when releasing mmapd ring
ARM: net: fix vlan access instructions in ARM JIT.
ARM: net: handle negative offsets in BPF JIT.
ARM: net: fix condition for load_order > 0 when translating load instructions.
tcp: suppress a division by zero warning
drivers: net: cpsw: remove tx event processing in rx napi poll
inet: frags: fix defragmented packet's IP header for af_packet
net: mvneta: fix refilling for Rx DMA buffers
stmmac: fix setting of driver data in stmmac_dvr_probe
sched: cls_flow: fix panic on filter replace
sched: cls_flower: fix panic on filter replace
sched: cls_bpf: fix panic on filter replace
net/mdio: fix mdio_bus_match for c45 PHY
net: ratelimit warnings about dst entry refcount underflow or overflow
caif: fix leaks and race in caif_queue_rcv_skb()
qmi_wwan: add the second QMI/network interface for Sierra Wireless MC7305/MC7355
ravb: fix race updating TCCR
...

+714 -811
+44 -13
arch/arm/net/bpf_jit_32.c
··· 74 74 75 75 int bpf_jit_enable __read_mostly; 76 76 77 - static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 77 + static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, 78 + unsigned int size) 79 + { 80 + void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); 81 + 82 + if (!ptr) 83 + return -EFAULT; 84 + memcpy(ret, ptr, size); 85 + return 0; 86 + } 87 + 88 + static u64 jit_get_skb_b(struct sk_buff *skb, int offset) 78 89 { 79 90 u8 ret; 80 91 int err; 81 92 82 - err = skb_copy_bits(skb, offset, &ret, 1); 93 + if (offset < 0) 94 + err = call_neg_helper(skb, offset, &ret, 1); 95 + else 96 + err = skb_copy_bits(skb, offset, &ret, 1); 83 97 84 98 return (u64)err << 32 | ret; 85 99 } 86 100 87 - static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 101 + static u64 jit_get_skb_h(struct sk_buff *skb, int offset) 88 102 { 89 103 u16 ret; 90 104 int err; 91 105 92 - err = skb_copy_bits(skb, offset, &ret, 2); 106 + if (offset < 0) 107 + err = call_neg_helper(skb, offset, &ret, 2); 108 + else 109 + err = skb_copy_bits(skb, offset, &ret, 2); 93 110 94 111 return (u64)err << 32 | ntohs(ret); 95 112 } 96 113 97 - static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 114 + static u64 jit_get_skb_w(struct sk_buff *skb, int offset) 98 115 { 99 116 u32 ret; 100 117 int err; 101 118 102 - err = skb_copy_bits(skb, offset, &ret, 4); 119 + if (offset < 0) 120 + err = call_neg_helper(skb, offset, &ret, 4); 121 + else 122 + err = skb_copy_bits(skb, offset, &ret, 4); 103 123 104 124 return (u64)err << 32 | ntohl(ret); 105 125 } ··· 556 536 case BPF_LD | BPF_B | BPF_ABS: 557 537 load_order = 0; 558 538 load: 559 - /* the interpreter will deal with the negative K */ 560 - if ((int)k < 0) 561 - return -ENOTSUPP; 562 539 emit_mov_i(r_off, k, ctx); 563 540 load_common: 564 541 ctx->seen |= SEEN_DATA | SEEN_CALL; ··· 564 547 emit(ARM_SUB_I(r_scratch, r_skb_hl, 565 548 1 << load_order), ctx); 566 549 emit(ARM_CMP_R(r_scratch, r_off), ctx); 567 - condt = ARM_COND_HS; 550 + condt = ARM_COND_GE; 568 551 } else { 569 552 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 570 553 condt = ARM_COND_HI; 571 554 } 555 + 556 + /* 557 + * test for negative offset, only if we are 558 + * currently scheduled to take the fast 559 + * path. this will update the flags so that 560 + * the slowpath instruction are ignored if the 561 + * offset is negative. 562 + * 563 + * for loard_order == 0 the HI condition will 564 + * make loads at offset 0 take the slow path too. 565 + */ 566 + _emit(condt, ARM_CMP_I(r_off, 0), ctx); 572 567 573 568 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 574 569 ctx); ··· 889 860 off = offsetof(struct sk_buff, vlan_tci); 890 861 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 891 862 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 892 - OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 893 - else 894 - OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 863 + OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); 864 + else { 865 + OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); 866 + OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); 867 + } 895 868 break; 896 869 case BPF_ANC | SKF_AD_QUEUE: 897 870 ctx->seen |= SEEN_SKB;
+5 -6
drivers/bluetooth/btbcm.c
··· 472 472 473 473 /* Read Verbose Config Version Info */ 474 474 skb = btbcm_read_verbose_config(hdev); 475 - if (IS_ERR(skb)) 476 - return PTR_ERR(skb); 477 - 478 - BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 479 - get_unaligned_le16(skb->data + 5)); 480 - kfree_skb(skb); 475 + if (!IS_ERR(skb)) { 476 + BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 477 + get_unaligned_le16(skb->data + 5)); 478 + kfree_skb(skb); 479 + } 481 480 482 481 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 483 482
+10 -25
drivers/isdn/gigaset/ser-gigaset.c
··· 524 524 cs->hw.ser->tty = tty; 525 525 atomic_set(&cs->hw.ser->refcnt, 1); 526 526 init_completion(&cs->hw.ser->dead_cmp); 527 - 528 527 tty->disc_data = cs; 528 + 529 + /* Set the amount of data we're willing to receive per call 530 + * from the hardware driver to half of the input buffer size 531 + * to leave some reserve. 532 + * Note: We don't do flow control towards the hardware driver. 533 + * If more data is received than will fit into the input buffer, 534 + * it will be dropped and an error will be logged. This should 535 + * never happen as the device is slow and the buffer size ample. 536 + */ 537 + tty->receive_room = RBUFSIZE/2; 529 538 530 539 /* OK.. Initialization of the datastructures and the HW is done.. Now 531 540 * startup system and notify the LL that we are ready to run ··· 604 595 { 605 596 gigaset_tty_close(tty); 606 597 return 0; 607 - } 608 - 609 - /* 610 - * Read on the tty. 611 - * Unused, received data goes only to the Gigaset driver. 612 - */ 613 - static ssize_t 614 - gigaset_tty_read(struct tty_struct *tty, struct file *file, 615 - unsigned char __user *buf, size_t count) 616 - { 617 - return -EAGAIN; 618 - } 619 - 620 - /* 621 - * Write on the tty. 622 - * Unused, transmit data comes only from the Gigaset driver. 623 - */ 624 - static ssize_t 625 - gigaset_tty_write(struct tty_struct *tty, struct file *file, 626 - const unsigned char *buf, size_t count) 627 - { 628 - return -EAGAIN; 629 598 } 630 599 631 600 /* ··· 739 752 .open = gigaset_tty_open, 740 753 .close = gigaset_tty_close, 741 754 .hangup = gigaset_tty_hangup, 742 - .read = gigaset_tty_read, 743 - .write = gigaset_tty_write, 744 755 .ioctl = gigaset_tty_ioctl, 745 756 .receive_buf = gigaset_tty_receive, 746 757 .write_wakeup = gigaset_tty_wakeup,
+31 -3
drivers/net/bonding/bond_main.c
··· 625 625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 626 626 } 627 627 628 + static struct slave *bond_get_old_active(struct bonding *bond, 629 + struct slave *new_active) 630 + { 631 + struct slave *slave; 632 + struct list_head *iter; 633 + 634 + bond_for_each_slave(bond, slave, iter) { 635 + if (slave == new_active) 636 + continue; 637 + 638 + if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) 639 + return slave; 640 + } 641 + 642 + return NULL; 643 + } 644 + 628 645 /* bond_do_fail_over_mac 629 646 * 630 647 * Perform special MAC address swapping for fail_over_mac settings ··· 668 651 */ 669 652 if (!new_active) 670 653 return; 654 + 655 + if (!old_active) 656 + old_active = bond_get_old_active(bond, new_active); 671 657 672 658 if (old_active) { 673 659 ether_addr_copy(tmp_mac, new_active->dev->dev_addr); ··· 1745 1725 1746 1726 err_undo_flags: 1747 1727 /* Enslave of first slave has failed and we need to fix master's mac */ 1748 - if (!bond_has_slaves(bond) && 1749 - ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) 1750 - eth_hw_addr_random(bond_dev); 1728 + if (!bond_has_slaves(bond)) { 1729 + if (ether_addr_equal_64bits(bond_dev->dev_addr, 1730 + slave_dev->dev_addr)) 1731 + eth_hw_addr_random(bond_dev); 1732 + if (bond_dev->type != ARPHRD_ETHER) { 1733 + ether_setup(bond_dev); 1734 + bond_dev->flags |= IFF_MASTER; 1735 + bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1736 + } 1737 + } 1751 1738 1752 1739 return res; 1753 1740 } ··· 1943 1916 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1944 1917 netdev_info(bond_dev, "Destroying bond %s\n", 1945 1918 bond_dev->name); 1919 + bond_remove_proc_entry(bond); 1946 1920 unregister_netdevice(bond_dev); 1947 1921 } 1948 1922 return ret;
+4 -4
drivers/net/can/at91_can.c
··· 577 577 578 578 cf->can_id |= CAN_ERR_CRTL; 579 579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 580 - netif_receive_skb(skb); 581 580 582 581 stats->rx_packets++; 583 582 stats->rx_bytes += cf->can_dlc; 583 + netif_receive_skb(skb); 584 584 } 585 585 586 586 /** ··· 642 642 } 643 643 644 644 at91_read_mb(dev, mb, cf); 645 - netif_receive_skb(skb); 646 645 647 646 stats->rx_packets++; 648 647 stats->rx_bytes += cf->can_dlc; 648 + netif_receive_skb(skb); 649 649 650 650 can_led_event(dev, CAN_LED_EVENT_RX); 651 651 } ··· 802 802 return 0; 803 803 804 804 at91_poll_err_frame(dev, cf, reg_sr); 805 - netif_receive_skb(skb); 806 805 807 806 dev->stats.rx_packets++; 808 807 dev->stats.rx_bytes += cf->can_dlc; 808 + netif_receive_skb(skb); 809 809 810 810 return 1; 811 811 } ··· 1067 1067 return; 1068 1068 1069 1069 at91_irq_err_state(dev, cf, new_state); 1070 - netif_rx(skb); 1071 1070 1072 1071 dev->stats.rx_packets++; 1073 1072 dev->stats.rx_bytes += cf->can_dlc; 1073 + netif_rx(skb); 1074 1074 1075 1075 priv->can.state = new_state; 1076 1076 }
+2 -4
drivers/net/can/bfin_can.c
··· 424 424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; 425 425 } 426 426 427 - netif_rx(skb); 428 - 429 427 stats->rx_packets++; 430 428 stats->rx_bytes += cf->can_dlc; 429 + netif_rx(skb); 431 430 } 432 431 433 432 static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) ··· 507 508 508 509 priv->can.state = state; 509 510 510 - netif_rx(skb); 511 - 512 511 stats->rx_packets++; 513 512 stats->rx_bytes += cf->can_dlc; 513 + netif_rx(skb); 514 514 515 515 return 0; 516 516 }
+2 -2
drivers/net/can/cc770/cc770.c
··· 504 504 for (i = 0; i < cf->can_dlc; i++) 505 505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); 506 506 } 507 - netif_rx(skb); 508 507 509 508 stats->rx_packets++; 510 509 stats->rx_bytes += cf->can_dlc; 510 + netif_rx(skb); 511 511 } 512 512 513 513 static int cc770_err(struct net_device *dev, u8 status) ··· 584 584 } 585 585 } 586 586 587 - netif_rx(skb); 588 587 589 588 stats->rx_packets++; 590 589 stats->rx_bytes += cf->can_dlc; 590 + netif_rx(skb); 591 591 592 592 return 0; 593 593 }
+3 -4
drivers/net/can/flexcan.c
··· 577 577 return 0; 578 578 579 579 do_bus_err(dev, cf, reg_esr); 580 - netif_receive_skb(skb); 581 580 582 581 dev->stats.rx_packets++; 583 582 dev->stats.rx_bytes += cf->can_dlc; 583 + netif_receive_skb(skb); 584 584 585 585 return 1; 586 586 } ··· 622 622 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 623 623 can_bus_off(dev); 624 624 625 - netif_receive_skb(skb); 626 - 627 625 dev->stats.rx_packets++; 628 626 dev->stats.rx_bytes += cf->can_dlc; 627 + netif_receive_skb(skb); 629 628 630 629 return 1; 631 630 } ··· 669 670 } 670 671 671 672 flexcan_read_fifo(dev, cf); 672 - netif_receive_skb(skb); 673 673 674 674 stats->rx_packets++; 675 675 stats->rx_bytes += cf->can_dlc; 676 + netif_receive_skb(skb); 676 677 677 678 can_led_event(dev, CAN_LED_EVENT_RX); 678 679
+2 -1
drivers/net/can/grcan.c
··· 1216 1216 cf->data[i] = (u8)(slot[j] >> shift); 1217 1217 } 1218 1218 } 1219 - netif_receive_skb(skb); 1220 1219 1221 1220 /* Update statistics and read pointer */ 1222 1221 stats->rx_packets++; 1223 1222 stats->rx_bytes += cf->can_dlc; 1223 + netif_receive_skb(skb); 1224 + 1224 1225 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); 1225 1226 } 1226 1227
+2 -4
drivers/net/can/sja1000/sja1000.c
··· 377 377 /* release receive buffer */ 378 378 sja1000_write_cmdreg(priv, CMD_RRB); 379 379 380 - netif_rx(skb); 381 - 382 380 stats->rx_packets++; 383 381 stats->rx_bytes += cf->can_dlc; 382 + netif_rx(skb); 384 383 385 384 can_led_event(dev, CAN_LED_EVENT_RX); 386 385 } ··· 483 484 can_bus_off(dev); 484 485 } 485 486 486 - netif_rx(skb); 487 - 488 487 stats->rx_packets++; 489 488 stats->rx_bytes += cf->can_dlc; 489 + netif_rx(skb); 490 490 491 491 return 0; 492 492 }
+1 -1
drivers/net/can/slcan.c
··· 218 218 219 219 memcpy(skb_put(skb, sizeof(struct can_frame)), 220 220 &cf, sizeof(struct can_frame)); 221 - netif_rx_ni(skb); 222 221 223 222 sl->dev->stats.rx_packets++; 224 223 sl->dev->stats.rx_bytes += cf.can_dlc; 224 + netif_rx_ni(skb); 225 225 } 226 226 227 227 /* parse tty input stream */
+8 -9
drivers/net/can/spi/mcp251x.c
··· 1086 1086 if (ret) 1087 1087 goto out_clk; 1088 1088 1089 - priv->power = devm_regulator_get(&spi->dev, "vdd"); 1090 - priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1089 + priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); 1090 + priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); 1091 1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1092 1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { 1093 1093 ret = -EPROBE_DEFER; ··· 1222 1222 struct spi_device *spi = to_spi_device(dev); 1223 1223 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1224 1224 1225 - if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1225 + if (priv->after_suspend & AFTER_SUSPEND_POWER) 1226 1226 mcp251x_power_enable(priv->power, 1); 1227 + 1228 + if (priv->after_suspend & AFTER_SUSPEND_UP) { 1229 + mcp251x_power_enable(priv->transceiver, 1); 1227 1230 queue_work(priv->wq, &priv->restart_work); 1228 1231 } else { 1229 - if (priv->after_suspend & AFTER_SUSPEND_UP) { 1230 - mcp251x_power_enable(priv->transceiver, 1); 1231 - queue_work(priv->wq, &priv->restart_work); 1232 - } else { 1233 - priv->after_suspend = 0; 1234 - } 1232 + priv->after_suspend = 0; 1235 1233 } 1234 + 1236 1235 priv->force_quit = 0; 1237 1236 enable_irq(spi->irq); 1238 1237 return 0;
+1 -1
drivers/net/can/ti_hecc.c
··· 747 747 } 748 748 } 749 749 750 - netif_rx(skb); 751 750 stats->rx_packets++; 752 751 stats->rx_bytes += cf->can_dlc; 752 + netif_rx(skb); 753 753 754 754 return 0; 755 755 }
+2 -4
drivers/net/can/usb/ems_usb.c
··· 324 324 cf->data[i] = msg->msg.can_msg.msg[i]; 325 325 } 326 326 327 - netif_rx(skb); 328 - 329 327 stats->rx_packets++; 330 328 stats->rx_bytes += cf->can_dlc; 329 + netif_rx(skb); 331 330 } 332 331 333 332 static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) ··· 399 400 stats->rx_errors++; 400 401 } 401 402 402 - netif_rx(skb); 403 - 404 403 stats->rx_packets++; 405 404 stats->rx_bytes += cf->can_dlc; 405 + netif_rx(skb); 406 406 } 407 407 408 408 /*
+2 -4
drivers/net/can/usb/esd_usb2.c
··· 301 301 cf->data[7] = rxerr; 302 302 } 303 303 304 - netif_rx(skb); 305 - 306 304 priv->bec.txerr = txerr; 307 305 priv->bec.rxerr = rxerr; 308 306 309 307 stats->rx_packets++; 310 308 stats->rx_bytes += cf->can_dlc; 309 + netif_rx(skb); 311 310 } 312 311 } 313 312 ··· 346 347 cf->data[i] = msg->msg.rx.data[i]; 347 348 } 348 349 349 - netif_rx(skb); 350 - 351 350 stats->rx_packets++; 352 351 stats->rx_bytes += cf->can_dlc; 352 + netif_rx(skb); 353 353 } 354 354 355 355 return;
+3 -4
drivers/net/can/usb/peak_usb/pcan_usb.c
··· 526 526 hwts->hwtstamp = timeval_to_ktime(tv); 527 527 } 528 528 529 - netif_rx(skb); 530 529 mc->netdev->stats.rx_packets++; 531 530 mc->netdev->stats.rx_bytes += cf->can_dlc; 531 + netif_rx(skb); 532 532 533 533 return 0; 534 534 } ··· 659 659 hwts = skb_hwtstamps(skb); 660 660 hwts->hwtstamp = timeval_to_ktime(tv); 661 661 662 - /* push the skb */ 663 - netif_rx(skb); 664 - 665 662 /* update statistics */ 666 663 mc->netdev->stats.rx_packets++; 667 664 mc->netdev->stats.rx_bytes += cf->can_dlc; 665 + /* push the skb */ 666 + netif_rx(skb); 668 667 669 668 return 0; 670 669
+2 -2
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
··· 553 553 hwts = skb_hwtstamps(skb); 554 554 hwts->hwtstamp = timeval_to_ktime(tv); 555 555 556 - netif_rx(skb); 557 556 netdev->stats.rx_packets++; 558 557 netdev->stats.rx_bytes += can_frame->can_dlc; 558 + netif_rx(skb); 559 559 560 560 return 0; 561 561 } ··· 670 670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); 671 671 hwts = skb_hwtstamps(skb); 672 672 hwts->hwtstamp = timeval_to_ktime(tv); 673 - netif_rx(skb); 674 673 netdev->stats.rx_packets++; 675 674 netdev->stats.rx_bytes += can_frame->can_dlc; 675 + netif_rx(skb); 676 676 677 677 return 0; 678 678 }
+2 -4
drivers/net/can/usb/usb_8dev.c
··· 461 461 priv->bec.txerr = txerr; 462 462 priv->bec.rxerr = rxerr; 463 463 464 - netif_rx(skb); 465 - 466 464 stats->rx_packets++; 467 465 stats->rx_bytes += cf->can_dlc; 466 + netif_rx(skb); 468 467 } 469 468 470 469 /* Read data and status frames */ ··· 493 494 else 494 495 memcpy(cf->data, msg->data, cf->can_dlc); 495 496 496 - netif_rx(skb); 497 - 498 497 stats->rx_packets++; 499 498 stats->rx_bytes += cf->can_dlc; 499 + netif_rx(skb); 500 500 501 501 can_led_event(priv->netdev, CAN_LED_EVENT_RX); 502 502 } else {
+13 -2
drivers/net/dsa/bcm_sf2.c
··· 696 696 } 697 697 698 698 /* Include the pseudo-PHY address and the broadcast PHY address to 699 - * divert reads towards our workaround 699 + * divert reads towards our workaround. This is only required for 700 + * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such 701 + * that we can use the regular SWITCH_MDIO master controller instead. 702 + * 703 + * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask 704 + * to have a 1:1 mapping between Port address and PHY address in order 705 + * to utilize the slave_mii_bus instance to read from Port PHYs. This is 706 + * not what we want here, so we initialize phys_mii_mask 0 to always 707 + * utilize the "master" MDIO bus backed by the "mdio-unimac" driver. 700 708 */ 701 - ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 709 + if (of_machine_is_compatible("brcm,bcm7445d0")) 710 + ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 711 + else 712 + ds->phys_mii_mask = 0; 702 713 703 714 rev = reg_readl(priv, REG_SWITCH_REVISION); 704 715 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
+1 -1
drivers/net/dsa/mv88e6xxx.c
··· 1163 1163 1164 1164 newfid = __ffs(ps->fid_mask); 1165 1165 ps->fid[port] = newfid; 1166 - ps->fid_mask &= (1 << newfid); 1166 + ps->fid_mask &= ~(1 << newfid); 1167 1167 ps->bridge_mask[fid] &= ~(1 << port); 1168 1168 ps->bridge_mask[newfid] = 1 << port; 1169 1169
+13 -75
drivers/net/ethernet/freescale/fec_main.c
··· 24 24 #include <linux/module.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/string.h> 27 - #include <linux/pm_runtime.h> 28 27 #include <linux/ptrace.h> 29 28 #include <linux/errno.h> 30 29 #include <linux/ioport.h> ··· 77 78 #define FEC_ENET_RAEM_V 0x8 78 79 #define FEC_ENET_RAFL_V 0x8 79 80 #define FEC_ENET_OPD_V 0xFFF0 80 - #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 81 81 82 82 static struct platform_device_id fec_devtype[] = { 83 83 { ··· 1767 1769 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1768 1770 { 1769 1771 struct fec_enet_private *fep = bus->priv; 1770 - struct device *dev = &fep->pdev->dev; 1771 1772 unsigned long time_left; 1772 - int ret = 0; 1773 - 1774 - ret = pm_runtime_get_sync(dev); 1775 - if (IS_ERR_VALUE(ret)) 1776 - return ret; 1777 1773 1778 1774 fep->mii_timeout = 0; 1779 1775 init_completion(&fep->mdio_done); ··· 1783 1791 if (time_left == 0) { 1784 1792 fep->mii_timeout = 1; 1785 1793 netdev_err(fep->netdev, "MDIO read timeout\n"); 1786 - ret = -ETIMEDOUT; 1787 - goto out; 1794 + return -ETIMEDOUT; 1788 1795 } 1789 1796 1790 - ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1791 - 1792 - out: 1793 - pm_runtime_mark_last_busy(dev); 1794 - pm_runtime_put_autosuspend(dev); 1795 - 1796 - return ret; 1797 + /* return value */ 1798 + return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1797 1799 } 1798 1800 1799 1801 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1800 1802 u16 value) 1801 1803 { 1802 1804 struct fec_enet_private *fep = bus->priv; 1803 - struct device *dev = &fep->pdev->dev; 1804 1805 unsigned long time_left; 1805 - int ret = 0; 1806 - 1807 - ret = pm_runtime_get_sync(dev); 1808 - if (IS_ERR_VALUE(ret)) 1809 - return ret; 1810 1806 1811 1807 fep->mii_timeout = 0; 1812 1808 init_completion(&fep->mdio_done); ··· 1811 1831 if (time_left == 0) { 1812 1832 fep->mii_timeout = 1; 1813 1833 netdev_err(fep->netdev, "MDIO write timeout\n"); 1814 - ret = -ETIMEDOUT; 1834 + return -ETIMEDOUT; 1815 1835 } 1816 1836 1817 - pm_runtime_mark_last_busy(dev); 1818 - pm_runtime_put_autosuspend(dev); 1819 - 1820 - return ret; 1837 + return 0; 1821 1838 } 1822 1839 1823 1840 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ··· 1826 1849 ret = clk_prepare_enable(fep->clk_ahb); 1827 1850 if (ret) 1828 1851 return ret; 1852 + ret = clk_prepare_enable(fep->clk_ipg); 1853 + if (ret) 1854 + goto failed_clk_ipg; 1829 1855 if (fep->clk_enet_out) { 1830 1856 ret = clk_prepare_enable(fep->clk_enet_out); 1831 1857 if (ret) ··· 1852 1872 } 1853 1873 } else { 1854 1874 clk_disable_unprepare(fep->clk_ahb); 1875 + clk_disable_unprepare(fep->clk_ipg); 1855 1876 if (fep->clk_enet_out) 1856 1877 clk_disable_unprepare(fep->clk_enet_out); 1857 1878 if (fep->clk_ptp) { ··· 1874 1893 if (fep->clk_enet_out) 1875 1894 clk_disable_unprepare(fep->clk_enet_out); 1876 1895 failed_clk_enet_out: 1896 + clk_disable_unprepare(fep->clk_ipg); 1897 + failed_clk_ipg: 1877 1898 clk_disable_unprepare(fep->clk_ahb); 1878 1899 1879 1900 return ret; ··· 2847 2864 struct fec_enet_private *fep = netdev_priv(ndev); 2848 2865 int ret; 2849 2866 2850 - ret = pm_runtime_get_sync(&fep->pdev->dev); 2851 - if (IS_ERR_VALUE(ret)) 2852 - return ret; 2853 - 2854 2867 pinctrl_pm_select_default_state(&fep->pdev->dev); 2855 2868 ret = fec_enet_clk_enable(ndev, true); 2856 2869 if (ret) 2857 - goto clk_enable; 2870 + return ret; 2858 2871 2859 2872 /* I should reset the ring buffers here, but I don't yet know 2860 2873 * a simple way to do that. ··· 2881 2902 fec_enet_free_buffers(ndev); 2882 2903 err_enet_alloc: 2883 2904 fec_enet_clk_enable(ndev, false); 2884 - clk_enable: 2885 - pm_runtime_mark_last_busy(&fep->pdev->dev); 2886 - pm_runtime_put_autosuspend(&fep->pdev->dev); 2887 2905 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2888 2906 return ret; 2889 2907 } ··· 2903 2927 2904 2928 fec_enet_clk_enable(ndev, false); 2905 2929 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2906 - pm_runtime_mark_last_busy(&fep->pdev->dev); 2907 - pm_runtime_put_autosuspend(&fep->pdev->dev); 2908 - 2909 2930 fec_enet_free_buffers(ndev); 2910 2931 2911 2932 return 0; ··· 3388 3415 if (ret) 3389 3416 goto failed_clk; 3390 3417 3391 - ret = clk_prepare_enable(fep->clk_ipg); 3392 - if (ret) 3393 - goto failed_clk_ipg; 3394 - 3395 3418 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3396 3419 if (!IS_ERR(fep->reg_phy)) { 3397 3420 ret = regulator_enable(fep->reg_phy); ··· 3434 3465 netif_carrier_off(ndev); 3435 3466 fec_enet_clk_enable(ndev, false); 3436 3467 pinctrl_pm_select_sleep_state(&pdev->dev); 3437 - pm_runtime_set_active(&pdev->dev); 3438 - pm_runtime_enable(&pdev->dev); 3439 3468 3440 3469 ret = register_netdev(ndev); 3441 3470 if (ret) ··· 3447 3480 3448 3481 fep->rx_copybreak = COPYBREAK_DEFAULT; 3449 3482 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3450 - 3451 - pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 3452 - pm_runtime_use_autosuspend(&pdev->dev); 3453 - pm_runtime_mark_last_busy(&pdev->dev); 3454 - pm_runtime_put_autosuspend(&pdev->dev); 3455 - 3456 3483 return 0; 3457 3484 3458 3485 failed_register: ··· 3457 3496 if (fep->reg_phy) 3458 3497 regulator_disable(fep->reg_phy); 3459 3498 failed_regulator: 3460 - clk_disable_unprepare(fep->clk_ipg); 3461 - failed_clk_ipg: 3462 3499 fec_enet_clk_enable(ndev, false); 3463 3500 failed_clk: 3464 3501 failed_phy: ··· 3568 3609 return ret; 3569 3610 } 3570 3611 3571 - static int __maybe_unused fec_runtime_suspend(struct device *dev) 3572 - { 3573 - struct net_device *ndev = dev_get_drvdata(dev); 3574 - struct fec_enet_private *fep = netdev_priv(ndev); 3575 - 3576 - clk_disable_unprepare(fep->clk_ipg); 3577 - 3578 - return 0; 3579 - } 3580 - 3581 - static int __maybe_unused fec_runtime_resume(struct device *dev) 3582 - { 3583 - struct net_device *ndev = dev_get_drvdata(dev); 3584 - struct fec_enet_private *fep = netdev_priv(ndev); 3585 - 3586 - return clk_prepare_enable(fep->clk_ipg); 3587 - } 3588 - 3589 - static const struct dev_pm_ops fec_pm_ops = { 3590 - SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 3591 - SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 3592 - }; 3612 + static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); 3593 3613 3594 3614 static struct platform_driver fec_driver = { 3595 3615 .driver = {
+10 -12
drivers/net/ethernet/marvell/mvneta.c
··· 1462 1462 struct mvneta_rx_queue *rxq) 1463 1463 { 1464 1464 struct net_device *dev = pp->dev; 1465 - int rx_done, rx_filled; 1465 + int rx_done; 1466 1466 u32 rcvd_pkts = 0; 1467 1467 u32 rcvd_bytes = 0; 1468 1468 ··· 1473 1473 rx_todo = rx_done; 1474 1474 1475 1475 rx_done = 0; 1476 - rx_filled = 0; 1477 1476 1478 1477 /* Fairness NAPI loop */ 1479 1478 while (rx_done < rx_todo) { ··· 1483 1484 int rx_bytes, err; 1484 1485 1485 1486 rx_done++; 1486 - rx_filled++; 1487 1487 rx_status = rx_desc->status; 1488 1488 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1489 1489 data = (unsigned char *)rx_desc->buf_cookie; ··· 1522 1524 continue; 1523 1525 } 1524 1526 1527 + /* Refill processing */ 1528 + err = mvneta_rx_refill(pp, rx_desc); 1529 + if (err) { 1530 + netdev_err(dev, "Linux processing - Can't refill\n"); 1531 + rxq->missed++; 1532 + goto err_drop_frame; 1533 + } 1534 + 1525 1535 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1526 1536 if (!skb) 1527 1537 goto err_drop_frame; ··· 1549 1543 mvneta_rx_csum(pp, rx_status, skb); 1550 1544 1551 1545 napi_gro_receive(&pp->napi, skb); 1552 - 1553 - /* Refill processing */ 1554 - err = mvneta_rx_refill(pp, rx_desc); 1555 - if (err) { 1556 - netdev_err(dev, "Linux processing - Can't refill\n"); 1557 - rxq->missed++; 1558 - rx_filled--; 1559 - } 1560 1546 } 1561 1547 1562 1548 if (rcvd_pkts) { ··· 1561 1563 } 1562 1564 1563 1565 /* Update rxq management counters */ 1564 - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1566 + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); 1565 1567 1566 1568 return rx_done; 1567 1569 }
+38 -36
drivers/net/ethernet/renesas/ravb_main.c
··· 228 228 struct ravb_desc *desc = NULL; 229 229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 230 230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; 231 - struct sk_buff *skb; 232 231 dma_addr_t dma_addr; 233 - void *buffer; 234 232 int i; 235 233 236 234 priv->cur_rx[q] = 0; ··· 239 241 memset(priv->rx_ring[q], 0, rx_ring_size); 240 242 /* Build RX ring buffer */ 241 243 for (i = 0; i < priv->num_rx_ring[q]; i++) { 242 - priv->rx_skb[q][i] = NULL; 243 - skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); 244 - if (!skb) 245 - break; 246 - ravb_set_buffer_align(skb); 247 244 /* RX descriptor */ 248 245 rx_desc = &priv->rx_ring[q][i]; 249 246 /* The size of the buffer should be on 16-byte boundary. */ 250 247 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); 251 - dma_addr = dma_map_single(&ndev->dev, skb->data, 248 + dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data, 252 249 ALIGN(PKT_BUF_SZ, 16), 253 250 DMA_FROM_DEVICE); 254 - if (dma_mapping_error(&ndev->dev, dma_addr)) { 255 - dev_kfree_skb(skb); 256 - break; 257 - } 258 - priv->rx_skb[q][i] = skb; 251 + /* We just set the data size to 0 for a failed mapping which 252 + * should prevent DMA from happening... 253 + */ 254 + if (dma_mapping_error(&ndev->dev, dma_addr)) 255 + rx_desc->ds_cc = cpu_to_le16(0); 259 256 rx_desc->dptr = cpu_to_le32(dma_addr); 260 257 rx_desc->die_dt = DT_FEMPTY; 261 258 } 262 259 rx_desc = &priv->rx_ring[q][i]; 263 260 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 264 261 rx_desc->die_dt = DT_LINKFIX; /* type */ 265 - priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]); 266 262 267 263 memset(priv->tx_ring[q], 0, tx_ring_size); 268 264 /* Build TX ring buffer */ 269 265 for (i = 0; i < priv->num_tx_ring[q]; i++) { 270 - priv->tx_skb[q][i] = NULL; 271 - priv->tx_buffers[q][i] = NULL; 272 - buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); 273 - if (!buffer) 274 - break; 275 - /* Aligned TX buffer */ 276 - priv->tx_buffers[q][i] = buffer; 277 266 tx_desc = &priv->tx_ring[q][i]; 278 267 tx_desc->die_dt = DT_EEMPTY; 279 268 } ··· 283 298 static int ravb_ring_init(struct net_device *ndev, int q) 284 299 { 285 300 struct ravb_private *priv = netdev_priv(ndev); 301 + struct sk_buff *skb; 286 302 int ring_size; 303 + void *buffer; 304 + int i; 287 305 288 306 /* Allocate RX and TX skb rings */ 289 307 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], ··· 296 308 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 297 309 goto error; 298 310 311 + for (i = 0; i < priv->num_rx_ring[q]; i++) { 312 + skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); 313 + if (!skb) 314 + goto error; 315 + ravb_set_buffer_align(skb); 316 + priv->rx_skb[q][i] = skb; 317 + } 318 + 299 319 /* Allocate rings for the aligned buffers */ 300 320 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], 301 321 sizeof(*priv->tx_buffers[q]), GFP_KERNEL); 302 322 if (!priv->tx_buffers[q]) 303 323 goto error; 324 + 325 + for (i = 0; i < priv->num_tx_ring[q]; i++) { 326 + buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); 327 + if (!buffer) 328 + goto error; 329 + /* Aligned TX buffer */ 330 + priv->tx_buffers[q][i] = buffer; 331 + } 304 332 305 333 /* Allocate all RX descriptors. */ 306 334 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); ··· 528 524 if (--boguscnt < 0) 529 525 break; 530 526 527 + /* We use 0-byte descriptors to mark the DMA mapping errors */ 528 + if (!pkt_len) 529 + continue; 530 + 531 531 if (desc_status & MSC_MC) 532 532 stats->multicast++; 533 533 ··· 551 543 552 544 skb = priv->rx_skb[q][entry]; 553 545 priv->rx_skb[q][entry] = NULL; 554 - dma_sync_single_for_cpu(&ndev->dev, 555 - le32_to_cpu(desc->dptr), 556 - ALIGN(PKT_BUF_SZ, 16), 557 - DMA_FROM_DEVICE); 546 + dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), 547 + ALIGN(PKT_BUF_SZ, 16), 548 + DMA_FROM_DEVICE); 558 549 get_ts &= (q == RAVB_NC) ? 559 550 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 560 551 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; ··· 591 584 if (!skb) 592 585 break; /* Better luck next round. */ 593 586 ravb_set_buffer_align(skb); 594 - dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), 595 - ALIGN(PKT_BUF_SZ, 16), 596 - DMA_FROM_DEVICE); 597 587 dma_addr = dma_map_single(&ndev->dev, skb->data, 598 588 le16_to_cpu(desc->ds_cc), 599 589 DMA_FROM_DEVICE); 600 590 skb_checksum_none_assert(skb); 601 - if (dma_mapping_error(&ndev->dev, dma_addr)) { 602 - dev_kfree_skb_any(skb); 603 - break; 604 - } 591 + /* We just set the data size to 0 for a failed mapping 592 + * which should prevent DMA from happening... 593 + */ 594 + if (dma_mapping_error(&ndev->dev, dma_addr)) 595 + desc->ds_cc = cpu_to_le16(0); 605 596 desc->dptr = cpu_to_le32(dma_addr); 606 597 priv->rx_skb[q][entry] = skb; 607 598 } ··· 1284 1279 u32 dma_addr; 1285 1280 void *buffer; 1286 1281 u32 entry; 1287 - u32 tccr; 1288 1282 1289 1283 spin_lock_irqsave(&priv->lock, flags); 1290 1284 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { ··· 1332 1328 dma_wmb(); 1333 1329 desc->die_dt = DT_FSINGLE; 1334 1330 1335 - tccr = ravb_read(ndev, TCCR); 1336 - if (!(tccr & (TCCR_TSRQ0 << q))) 1337 - ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR); 1331 + ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); 1338 1332 1339 1333 priv->cur_tx[q]++; 1340 1334 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 2843 2843 if (res->mac) 2844 2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 2845 2845 2846 - dev_set_drvdata(device, priv); 2846 + dev_set_drvdata(device, priv->dev); 2847 2847 2848 2848 /* Verify driver arguments */ 2849 2849 stmmac_verify_args();
+3 -6
drivers/net/ethernet/ti/cpsw.c
··· 793 793 static int cpsw_poll(struct napi_struct *napi, int budget) 794 794 { 795 795 struct cpsw_priv *priv = napi_to_priv(napi); 796 - int num_tx, num_rx; 797 - 798 - num_tx = cpdma_chan_process(priv->txch, 128); 796 + int num_rx; 799 797 800 798 num_rx = cpdma_chan_process(priv->rxch, budget); 801 799 if (num_rx < budget) { ··· 808 810 } 809 811 } 810 812 811 - if (num_rx || num_tx) 812 - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 813 - num_rx, num_tx); 813 + if (num_rx) 814 + cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx); 814 815 815 816 return num_rx; 816 817 }
+1 -1
drivers/net/ethernet/ti/netcp_core.c
··· 1617 1617 } 1618 1618 mutex_unlock(&netcp_modules_lock); 1619 1619 1620 - netcp_rxpool_refill(netcp); 1621 1620 napi_enable(&netcp->rx_napi); 1622 1621 napi_enable(&netcp->tx_napi); 1623 1622 knav_queue_enable_notify(netcp->tx_compl_q); 1624 1623 knav_queue_enable_notify(netcp->rx_queue); 1624 + netcp_rxpool_refill(netcp); 1625 1625 netif_tx_wake_all_queues(ndev); 1626 1626 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1627 1627 return 0;
+6 -3
drivers/net/ipvlan/ipvlan.h
··· 67 67 struct ipvl_port *port; 68 68 struct net_device *phy_dev; 69 69 struct list_head addrs; 70 - int ipv4cnt; 71 - int ipv6cnt; 72 70 struct ipvl_pcpu_stats __percpu *pcpu_stats; 73 71 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); 74 72 netdev_features_t sfeatures; ··· 104 106 return rcu_dereference(d->rx_handler_data); 105 107 } 106 108 109 + static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d) 110 + { 111 + return rcu_dereference_bh(d->rx_handler_data); 112 + } 113 + 107 114 static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) 108 115 { 109 116 return rtnl_dereference(d->rx_handler_data); ··· 127 124 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); 128 125 struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 129 126 const void *iaddr, bool is_v6); 130 - void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 127 + void ipvlan_ht_addr_del(struct ipvl_addr *addr); 131 128 #endif /* __IPVLAN_H */
+2 -4
drivers/net/ipvlan/ipvlan_core.c
··· 85 85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 86 86 } 87 87 88 - void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88 + void ipvlan_ht_addr_del(struct ipvl_addr *addr) 89 89 { 90 90 hlist_del_init_rcu(&addr->hlnode); 91 - if (sync) 92 - synchronize_rcu(); 93 91 } 94 92 95 93 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, ··· 529 531 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 530 532 { 531 533 struct ipvl_dev *ipvlan = netdev_priv(dev); 532 - struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); 534 + struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); 533 535 534 536 if (!port) 535 537 goto out;
+19 -23
drivers/net/ipvlan/ipvlan_main.c
··· 153 153 else 154 154 dev->flags &= ~IFF_NOARP; 155 155 156 - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 157 - list_for_each_entry(addr, &ipvlan->addrs, anode) 158 - ipvlan_ht_addr_add(ipvlan, addr); 159 - } 156 + list_for_each_entry(addr, &ipvlan->addrs, anode) 157 + ipvlan_ht_addr_add(ipvlan, addr); 158 + 160 159 return dev_uc_add(phy_dev, phy_dev->dev_addr); 161 160 } 162 161 ··· 170 171 171 172 dev_uc_del(phy_dev, phy_dev->dev_addr); 172 173 173 - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 174 - list_for_each_entry(addr, &ipvlan->addrs, anode) 175 - ipvlan_ht_addr_del(addr, !dev->dismantle); 176 - } 174 + list_for_each_entry(addr, &ipvlan->addrs, anode) 175 + ipvlan_ht_addr_del(addr); 176 + 177 177 return 0; 178 178 } 179 179 ··· 469 471 ipvlan->port = port; 470 472 ipvlan->sfeatures = IPVLAN_FEATURES; 471 473 INIT_LIST_HEAD(&ipvlan->addrs); 472 - ipvlan->ipv4cnt = 0; 473 - ipvlan->ipv6cnt = 0; 474 474 475 475 /* TODO Probably put random address here to be presented to the 476 476 * world but keep using the physical-dev address for the outgoing ··· 504 508 struct ipvl_dev *ipvlan = netdev_priv(dev); 505 509 struct ipvl_addr *addr, *next; 506 510 507 - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 508 - list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 509 - ipvlan_ht_addr_del(addr, !dev->dismantle); 510 - list_del(&addr->anode); 511 - } 511 + list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 512 + ipvlan_ht_addr_del(addr); 513 + list_del(&addr->anode); 514 + kfree_rcu(addr, rcu); 512 515 } 516 + 513 517 list_del_rcu(&ipvlan->pnode); 514 518 unregister_netdevice_queue(dev, head); 515 519 netdev_upper_dev_unlink(ipvlan->phy_dev, dev); ··· 623 627 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 624 628 addr->atype = IPVL_IPV6; 625 629 list_add_tail(&addr->anode, &ipvlan->addrs); 626 - ipvlan->ipv6cnt++; 630 + 627 631 /* If the interface is not up, the address will be added to the hash 628 632 * list by ipvlan_open. 629 633 */ ··· 641 645 if (!addr) 642 646 return; 643 647 644 - ipvlan_ht_addr_del(addr, true); 648 + ipvlan_ht_addr_del(addr); 645 649 list_del(&addr->anode); 646 - ipvlan->ipv6cnt--; 647 - WARN_ON(ipvlan->ipv6cnt < 0); 648 650 kfree_rcu(addr, rcu); 649 651 650 652 return; ··· 654 660 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr; 655 661 struct net_device *dev = (struct net_device *)if6->idev->dev; 656 662 struct ipvl_dev *ipvlan = netdev_priv(dev); 663 + 664 + /* FIXME IPv6 autoconf calls us from bh without RTNL */ 665 + if (in_softirq()) 666 + return NOTIFY_DONE; 657 667 658 668 if (!netif_is_ipvlan(dev)) 659 669 return NOTIFY_DONE; ··· 697 699 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 698 700 addr->atype = IPVL_IPV4; 699 701 list_add_tail(&addr->anode, &ipvlan->addrs); 700 - ipvlan->ipv4cnt++; 702 + 701 703 /* If the interface is not up, the address will be added to the hash 702 704 * list by ipvlan_open. 703 705 */ ··· 715 717 if (!addr) 716 718 return; 717 719 718 - ipvlan_ht_addr_del(addr, true); 720 + ipvlan_ht_addr_del(addr); 719 721 list_del(&addr->anode); 720 - ipvlan->ipv4cnt--; 721 - WARN_ON(ipvlan->ipv4cnt < 0); 722 722 kfree_rcu(addr, rcu); 723 723 724 724 return;
+1 -1
drivers/net/phy/dp83867.c
··· 164 164 return ret; 165 165 } 166 166 167 - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || 167 + if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && 168 168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { 169 169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, 170 170 DP83867_DEVADDR, phydev->addr);
+17 -2
drivers/net/phy/mdio_bus.c
··· 421 421 { 422 422 struct phy_device *phydev = to_phy_device(dev); 423 423 struct phy_driver *phydrv = to_phy_driver(drv); 424 + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); 425 + int i; 424 426 425 427 if (of_driver_match_device(dev, drv)) 426 428 return 1; ··· 430 428 if (phydrv->match_phy_device) 431 429 return phydrv->match_phy_device(phydev); 432 430 433 - return (phydrv->phy_id & phydrv->phy_id_mask) == 434 - (phydev->phy_id & phydrv->phy_id_mask); 431 + if (phydev->is_c45) { 432 + for (i = 1; i < num_ids; i++) { 433 + if (!(phydev->c45_ids.devices_in_package & (1 << i))) 434 + continue; 435 + 436 + if ((phydrv->phy_id & phydrv->phy_id_mask) == 437 + (phydev->c45_ids.device_ids[i] & 438 + phydrv->phy_id_mask)) 439 + return 1; 440 + } 441 + return 0; 442 + } else { 443 + return (phydrv->phy_id & phydrv->phy_id_mask) == 444 + (phydev->phy_id & phydrv->phy_id_mask); 445 + } 435 446 } 436 447 437 448 #ifdef CONFIG_PM
+1
drivers/net/usb/qmi_wwan.c
··· 757 757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 758 758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 759 759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 760 + {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */ 760 761 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 761 762 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ 762 763 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
+2 -1
drivers/net/virtio_net.c
··· 1828 1828 else 1829 1829 vi->hdr_len = sizeof(struct virtio_net_hdr); 1830 1830 1831 - if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) 1831 + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || 1832 + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) 1832 1833 vi->any_header_sg = true; 1833 1834 1834 1835 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
+1
drivers/net/wireless/ath/ath9k/hw.c
··· 279 279 return; 280 280 case AR9300_DEVID_QCA956X: 281 281 ah->hw_version.macVersion = AR_SREV_VERSION_9561; 282 + return; 282 283 } 283 284 284 285 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
+6
drivers/net/wireless/iwlwifi/iwl-fh.h
··· 438 438 #define RX_QUEUE_MASK 255 439 439 #define RX_QUEUE_SIZE_LOG 8 440 440 441 + /* 442 + * RX related structures and functions 443 + */ 444 + #define RX_FREE_BUFFERS 64 445 + #define RX_LOW_WATERMARK 8 446 + 441 447 /** 442 448 * struct iwl_rb_status - reserve buffer status 443 449 * host memory mapped FH registers
+5 -7
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
··· 540 540 hw_addr = (const u8 *)(mac_override + 541 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 542 542 543 - /* The byte order is little endian 16 bit, meaning 214365 */ 544 - data->hw_addr[0] = hw_addr[1]; 545 - data->hw_addr[1] = hw_addr[0]; 546 - data->hw_addr[2] = hw_addr[3]; 547 - data->hw_addr[3] = hw_addr[2]; 548 - data->hw_addr[4] = hw_addr[5]; 549 - data->hw_addr[5] = hw_addr[4]; 543 + /* 544 + * Store the MAC address from MAO section. 545 + * No byte swapping is required in MAO section 546 + */ 547 + memcpy(data->hw_addr, hw_addr, ETH_ALEN); 550 548 551 549 /* 552 550 * Force the use of the OTP MAC address in case of reserved MAC
+2 -1
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
··· 660 660 * iwl_umac_scan_flags 661 661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request 662 662 * can be preempted by other scan requests with higher priority. 663 - * The low priority scan is aborted. 663 + * The low priority scan will be resumed when the higher proirity scan is 664 + * completed. 664 665 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver 665 666 * when scan starts. 666 667 */
+3
drivers/net/wireless/iwlwifi/mvm/scan.c
··· 1109 1109 cmd->uid = cpu_to_le32(uid); 1110 1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); 1111 1111 1112 + if (type == IWL_MVM_SCAN_SCHED) 1113 + cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); 1114 + 1112 1115 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) 1113 1116 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1114 1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+2 -1
drivers/net/wireless/iwlwifi/mvm/sta.c
··· 1401 1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1402 1402 u8 sta_id; 1403 1403 int ret; 1404 + static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 1404 1405 1405 1406 lockdep_assert_held(&mvm->mutex); 1406 1407 ··· 1468 1467 end: 1469 1468 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 1470 1469 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 1471 - sta->addr, ret); 1470 + sta ? sta->addr : zero_addr, ret); 1472 1471 return ret; 1473 1472 } 1474 1473
+1 -1
drivers/net/wireless/iwlwifi/mvm/time-event.c
··· 86 86 { 87 87 lockdep_assert_held(&mvm->time_event_lock); 88 88 89 - if (te_data->id == TE_MAX) 89 + if (!te_data->vif) 90 90 return; 91 91 92 92 list_del(&te_data->list);
+1 -1
drivers/net/wireless/iwlwifi/mvm/tx.c
··· 252 252 253 253 if (info->band == IEEE80211_BAND_2GHZ && 254 254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 255 - rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; 255 + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; 256 256 else 257 257 rate_flags = 258 258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+3 -2
drivers/net/wireless/iwlwifi/pcie/drv.c
··· 368 368 /* 3165 Series */ 369 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 370 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 371 + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, 371 372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 372 373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 373 374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 374 375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, 375 376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, 376 377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, 378 + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, 377 379 378 380 /* 7265 Series */ 379 381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, ··· 428 426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 429 427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 430 428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 429 + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, 431 430 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 432 - {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, 433 - {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, 434 431 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 435 432 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 436 433 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+8 -43
drivers/net/wireless/iwlwifi/pcie/internal.h
··· 44 44 #include "iwl-io.h" 45 45 #include "iwl-op-mode.h" 46 46 47 - /* 48 - * RX related structures and functions 49 - */ 50 - #define RX_NUM_QUEUES 1 51 - #define RX_POST_REQ_ALLOC 2 52 - #define RX_CLAIM_REQ_ALLOC 8 53 - #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) 54 - #define RX_LOW_WATERMARK 8 55 - 56 47 struct iwl_host_cmd; 57 48 58 49 /*This file includes the declaration that are internal to the ··· 77 86 * struct iwl_rxq - Rx queue 78 87 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 79 88 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 89 + * @pool: 90 + * @queue: 80 91 * @read: Shared index to newest available Rx buffer 81 92 * @write: Shared index to oldest written Rx packet 82 93 * @free_count: Number of pre-allocated buffers in rx_free 83 - * @used_count: Number of RBDs handled to allocator to use for allocation 84 94 * @write_actual: 85 - * @rx_free: list of RBDs with allocated RB ready for use 86 - * @rx_used: list of RBDs with no RB attached 95 + * @rx_free: list of free SKBs for use 96 + * @rx_used: List of Rx buffers with no SKB 87 97 * @need_update: flag to indicate we need to update read/write index 88 98 * @rb_stts: driver's pointer to receive buffer status 89 99 * @rb_stts_dma: bus address of receive buffer status 90 100 * @lock: 91 - * @pool: initial pool of iwl_rx_mem_buffer for the queue 92 - * @queue: actual rx queue 93 101 * 94 102 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 95 103 */ 96 104 struct iwl_rxq { 97 105 __le32 *bd; 98 106 dma_addr_t bd_dma; 107 + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 108 + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 99 109 u32 read; 100 110 u32 write; 101 111 u32 free_count; 102 - u32 used_count; 103 112 u32 write_actual; 104 113 struct list_head rx_free; 105 114 struct list_head rx_used; ··· 107 116 struct iwl_rb_status *rb_stts; 108 117 dma_addr_t rb_stts_dma; 109 118 spinlock_t lock; 110 - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; 111 - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 112 - }; 113 - 114 - /** 115 - * struct iwl_rb_allocator - Rx allocator 116 - * @pool: initial pool of allocator 117 - * @req_pending: number of requests the allcator had not processed yet 118 - * @req_ready: number of requests honored and ready for claiming 119 - * @rbd_allocated: RBDs with pages allocated and ready to be handled to 120 - * the queue. This is a list of &struct iwl_rx_mem_buffer 121 - * @rbd_empty: RBDs with no page attached for allocator use. This is a list 122 - * of &struct iwl_rx_mem_buffer 123 - * @lock: protects the rbd_allocated and rbd_empty lists 124 - * @alloc_wq: work queue for background calls 125 - * @rx_alloc: work struct for background calls 126 - */ 127 - struct iwl_rb_allocator { 128 - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; 129 - atomic_t req_pending; 130 - atomic_t req_ready; 131 - struct list_head rbd_allocated; 132 - struct list_head rbd_empty; 133 - spinlock_t lock; 134 - struct workqueue_struct *alloc_wq; 135 - struct work_struct rx_alloc; 136 119 }; 137 120 138 121 struct iwl_dma_ptr { ··· 250 285 /** 251 286 * struct iwl_trans_pcie - PCIe transport specific data 252 287 * @rxq: all the RX queue data 253 - * @rba: allocator for RX replenishing 288 + * @rx_replenish: work that will be called when buffers need to be allocated 254 289 * @drv - pointer to iwl_drv 255 290 * @trans: pointer to the generic transport area 256 291 * @scd_base_addr: scheduler sram base address in SRAM ··· 273 308 */ 274 309 struct iwl_trans_pcie { 275 310 struct iwl_rxq rxq; 276 - struct iwl_rb_allocator rba; 311 + struct work_struct rx_replenish; 277 312 struct iwl_trans *trans; 278 313 struct iwl_drv *drv; 279 314
+83 -331
drivers/net/wireless/iwlwifi/pcie/rx.c
··· 1 1 /****************************************************************************** 2 2 * 3 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 5 5 * 6 6 * Portions of this file are derived from the ipw3945 project, as well 7 7 * as portions of the ieee80211 subsystem header files. ··· 74 74 * resets the Rx queue buffers with new memory. 75 75 * 76 76 * The management in the driver is as follows: 77 - * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 78 - * When the interrupt handler is called, the request is processed. 79 - * The page is either stolen - transferred to the upper layer 80 - * or reused - added immediately to the iwl->rxq->rx_free list. 81 - * + When the page is stolen - the driver updates the matching queue's used 82 - * count, detaches the RBD and transfers it to the queue used list. 83 - * When there are two used RBDs - they are transferred to the allocator empty 84 - * list. Work is then scheduled for the allocator to start allocating 85 - * eight buffers. 86 - * When there are another 6 used RBDs - they are transferred to the allocator 87 - * empty list and the driver tries to claim the pre-allocated buffers and 88 - * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 89 - * until ready. 90 - * When there are 8+ buffers in the free list - either from allocation or from 91 - * 8 reused unstolen pages - restock is called to update the FW and indexes. 92 - * + In order to make sure the allocator always has RBDs to use for allocation 93 - * the allocator has initial pool in the size of num_queues*(8-2) - the 94 - * maximum missing RBDs per allocation request (request posted with 2 95 - * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 96 - * The queues supplies the recycle of the rest of the RBDs. 77 + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 78 + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 79 + * to replenish the iwl->rxq->rx_free. 80 + * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the 81 + * iwl->rxq is replenished and the READ INDEX is updated (updating the 82 + * 'processed' and 'read' driver indexes as well) 97 83 * + A received packet is processed and handed to the kernel network stack, 98 84 * detached from the iwl->rxq. The driver 'processed' index is updated. 99 - * + If there are no allocated buffers in iwl->rxq->rx_free, 85 + * + The Host/Firmware iwl->rxq is replenished at irq thread time from the 86 + * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, 100 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 101 88 * If there were enough free buffers and RX_STALLED is set it is cleared. 102 89 * ··· 92 105 * 93 106 * iwl_rxq_alloc() Allocates rx_free 94 107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 95 - * iwl_pcie_rxq_restock. 96 - * Used only during initialization. 108 + * iwl_pcie_rxq_restock 97 109 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 98 110 * queue, updates firmware pointers, and updates 99 - * the WRITE index. 100 - * iwl_pcie_rx_allocator() Background work for allocating pages. 111 + * the WRITE index. If insufficient rx_free buffers 112 + * are available, schedules iwl_pcie_rx_replenish 101 113 * 102 114 * -- enable interrupts -- 103 115 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 104 116 * READ INDEX, detaching the SKB from the pool. 105 117 * Moves the packet buffer from queue to rx_used. 106 - * Posts and claims requests to the allocator. 107 118 * Calls iwl_pcie_rxq_restock to refill any empty 108 119 * slots. 109 - * 110 - * RBD life-cycle: 111 - * 112 - * Init: 113 - * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 114 - * 115 - * Regular Receive interrupt: 116 - * Page Stolen: 117 - * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 118 - * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 119 - * Page not Stolen: 120 - * rxq.queue -> rxq.rx_free -> rxq.queue 121 120 * ... 122 121 * 123 122 */ ··· 240 267 rxq->free_count--; 241 268 } 242 269 spin_unlock(&rxq->lock); 270 + /* If the pre-allocated buffer pool is dropping low, schedule to 271 + * refill it */ 272 + if (rxq->free_count <= RX_LOW_WATERMARK) 273 + schedule_work(&trans_pcie->rx_replenish); 243 274 244 275 /* If we've added more space for the firmware to place data, tell it. 245 276 * Increment device's write pointer in multiples of 8. */ ··· 255 278 } 256 279 257 280 /* 258 - * iwl_pcie_rx_alloc_page - allocates and returns a page. 259 - * 260 - */ 261 - static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) 262 - { 263 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 264 - struct iwl_rxq *rxq = &trans_pcie->rxq; 265 - struct page *page; 266 - gfp_t gfp_mask = GFP_KERNEL; 267 - 268 - if (rxq->free_count > RX_LOW_WATERMARK) 269 - gfp_mask |= __GFP_NOWARN; 270 - 271 - if (trans_pcie->rx_page_order > 0) 272 - gfp_mask |= __GFP_COMP; 273 - 274 - /* Alloc a new receive buffer */ 275 - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 276 - if (!page) { 277 - if (net_ratelimit()) 278 - IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 279 - trans_pcie->rx_page_order); 280 - /* Issue an error if the hardware has consumed more than half 281 - * of its free buffer list and we don't have enough 282 - * pre-allocated buffers. 283 - ` */ 284 - if (rxq->free_count <= RX_LOW_WATERMARK && 285 - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && 286 - net_ratelimit()) 287 - IWL_CRIT(trans, 288 - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", 289 - rxq->free_count); 290 - return NULL; 291 - } 292 - return page; 293 - } 294 - 295 - /* 296 281 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 297 282 * 298 283 * A used RBD is an Rx buffer that has been given to the stack. To use it again ··· 263 324 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 264 325 * allocated buffers. 265 326 */ 266 - static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) 327 + static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) 267 328 { 268 329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 269 330 struct iwl_rxq *rxq = &trans_pcie->rxq; 270 331 struct iwl_rx_mem_buffer *rxb; 271 332 struct page *page; 333 + gfp_t gfp_mask = priority; 272 334 273 335 while (1) { 274 336 spin_lock(&rxq->lock); ··· 279 339 } 280 340 spin_unlock(&rxq->lock); 281 341 342 + if (rxq->free_count > RX_LOW_WATERMARK) 343 + gfp_mask |= __GFP_NOWARN; 344 + 345 + if (trans_pcie->rx_page_order > 0) 346 + gfp_mask |= __GFP_COMP; 347 + 282 348 /* Alloc a new receive buffer */ 283 - page = iwl_pcie_rx_alloc_page(trans); 284 - if (!page) 349 + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 350 + if (!page) { 351 + if (net_ratelimit()) 352 + IWL_DEBUG_INFO(trans, "alloc_pages failed, " 353 + "order: %d\n", 354 + trans_pcie->rx_page_order); 355 + 356 + if ((rxq->free_count <= RX_LOW_WATERMARK) && 357 + net_ratelimit()) 358 + IWL_CRIT(trans, "Failed to alloc_pages with %s." 359 + "Only %u free buffers remaining.\n", 360 + priority == GFP_ATOMIC ? 361 + "GFP_ATOMIC" : "GFP_KERNEL", 362 + rxq->free_count); 363 + /* We don't reschedule replenish work here -- we will 364 + * call the restock method and if it still needs 365 + * more buffers it will schedule replenish */ 285 366 return; 367 + } 286 368 287 369 spin_lock(&rxq->lock); 288 370 ··· 355 393 356 394 lockdep_assert_held(&rxq->lock); 357 395 358 - for (i = 0; i < RX_QUEUE_SIZE; i++) { 396 + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 359 397 if (!rxq->pool[i].page) 360 398 continue; 361 399 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, ··· 372 410 * When moving to rx_free an page is allocated for the slot. 373 411 * 374 412 * Also restock the Rx queue via iwl_pcie_rxq_restock. 375 - * This is called only during initialization 413 + * This is called as a scheduled work item (except for during initialization) 376 414 */ 377 - static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 415 + static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) 378 416 { 379 - iwl_pcie_rxq_alloc_rbs(trans); 417 + iwl_pcie_rxq_alloc_rbs(trans, gfp); 380 418 381 419 iwl_pcie_rxq_restock(trans); 382 420 } 383 421 384 - /* 385 - * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 386 - * 387 - * Allocates for each received request 8 pages 388 - * Called as a scheduled work item. 389 - */ 390 - static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 422 + static void iwl_pcie_rx_replenish_work(struct work_struct *data) 391 423 { 392 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 393 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 394 - 395 - while (atomic_read(&rba->req_pending)) { 396 - int i; 397 - struct list_head local_empty; 398 - struct list_head local_allocated; 399 - 400 - INIT_LIST_HEAD(&local_allocated); 401 - spin_lock(&rba->lock); 402 - /* swap out the entire rba->rbd_empty to a local list */ 403 - list_replace_init(&rba->rbd_empty, &local_empty); 404 - spin_unlock(&rba->lock); 405 - 406 - for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 407 - struct iwl_rx_mem_buffer *rxb; 408 - struct page *page; 409 - 410 - /* List should never be empty - each reused RBD is 411 - * returned to the list, and initial pool covers any 412 - * possible gap between the time the page is allocated 413 - * to the time the RBD is added. 414 - */ 415 - BUG_ON(list_empty(&local_empty)); 416 - /* Get the first rxb from the rbd list */ 417 - rxb = list_first_entry(&local_empty, 418 - struct iwl_rx_mem_buffer, list); 419 - BUG_ON(rxb->page); 420 - 421 - /* Alloc a new receive buffer */ 422 - page = iwl_pcie_rx_alloc_page(trans); 423 - if (!page) 424 - continue; 425 - rxb->page = page; 426 - 427 - /* Get physical address of the RB */ 428 - rxb->page_dma = dma_map_page(trans->dev, page, 0, 429 - PAGE_SIZE << trans_pcie->rx_page_order, 430 - DMA_FROM_DEVICE); 431 - if (dma_mapping_error(trans->dev, rxb->page_dma)) { 432 - rxb->page = NULL; 433 - __free_pages(page, trans_pcie->rx_page_order); 434 - continue; 435 - } 436 - /* dma address must be no more than 36 bits */ 437 - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 438 - /* and also 256 byte aligned! */ 439 - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 440 - 441 - /* move the allocated entry to the out list */ 442 - list_move(&rxb->list, &local_allocated); 443 - i++; 444 - } 445 - 446 - spin_lock(&rba->lock); 447 - /* add the allocated rbds to the allocator allocated list */ 448 - list_splice_tail(&local_allocated, &rba->rbd_allocated); 449 - /* add the unused rbds back to the allocator empty list */ 450 - list_splice_tail(&local_empty, &rba->rbd_empty); 451 - spin_unlock(&rba->lock); 452 - 453 - atomic_dec(&rba->req_pending); 454 - atomic_inc(&rba->req_ready); 455 - } 456 - } 457 - 458 - /* 459 - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages 460 - .* 461 - .* Called by queue when the queue posted allocation request and 462 - * has freed 8 RBDs in order to restock itself. 463 - */ 464 - static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 465 - struct iwl_rx_mem_buffer 466 - *out[RX_CLAIM_REQ_ALLOC]) 467 - { 468 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 469 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 470 - int i; 471 - 472 - if (atomic_dec_return(&rba->req_ready) < 0) { 473 - atomic_inc(&rba->req_ready); 474 - IWL_DEBUG_RX(trans, 475 - "Allocation request not ready, pending requests = %d\n", 476 - atomic_read(&rba->req_pending)); 477 - return -ENOMEM; 478 - } 479 - 480 - spin_lock(&rba->lock); 481 - for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 482 - /* Get next free Rx buffer, remove it from free list */ 483 - out[i] = list_first_entry(&rba->rbd_allocated, 484 - struct iwl_rx_mem_buffer, list); 485 - list_del(&out[i]->list); 486 - } 487 - spin_unlock(&rba->lock); 488 - 489 - return 0; 490 - } 491 - 492 - static void iwl_pcie_rx_allocator_work(struct work_struct *data) 493 - { 494 - struct iwl_rb_allocator *rba_p = 495 - container_of(data, struct iwl_rb_allocator, rx_alloc); 496 424 struct iwl_trans_pcie *trans_pcie = 497 - container_of(rba_p, struct iwl_trans_pcie, rba); 425 + container_of(data, struct iwl_trans_pcie, rx_replenish); 498 426 499 - iwl_pcie_rx_allocator(trans_pcie->trans); 427 + iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); 500 428 } 501 429 502 430 static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 503 431 { 504 432 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 505 433 struct iwl_rxq *rxq = &trans_pcie->rxq; 506 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 507 434 struct device *dev = trans->dev; 508 435 509 436 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 510 437 511 438 spin_lock_init(&rxq->lock); 512 - spin_lock_init(&rba->lock); 513 439 514 440 if (WARN_ON(rxq->bd || rxq->rb_stts)) 515 441 return -EINVAL; ··· 487 637 INIT_LIST_HEAD(&rxq->rx_free); 488 638 INIT_LIST_HEAD(&rxq->rx_used); 489 639 rxq->free_count = 0; 490 - rxq->used_count = 0; 491 640 492 - for (i = 0; i < RX_QUEUE_SIZE; i++) 641 + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 493 642 list_add(&rxq->pool[i].list, &rxq->rx_used); 494 - } 495 - 496 - static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) 497 - { 498 - int i; 499 - 500 - lockdep_assert_held(&rba->lock); 501 - 502 - INIT_LIST_HEAD(&rba->rbd_allocated); 503 - INIT_LIST_HEAD(&rba->rbd_empty); 504 - 505 - for (i = 0; i < RX_POOL_SIZE; i++) 506 - list_add(&rba->pool[i].list, &rba->rbd_empty); 507 - } 508 - 509 - static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) 510 - { 511 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 512 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 513 - int i; 514 - 515 - lockdep_assert_held(&rba->lock); 516 - 517 - for (i = 0; i < RX_POOL_SIZE; i++) { 518 - if (!rba->pool[i].page) 519 - continue; 520 - dma_unmap_page(trans->dev, rba->pool[i].page_dma, 521 - PAGE_SIZE << trans_pcie->rx_page_order, 522 - DMA_FROM_DEVICE); 523 - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); 524 - rba->pool[i].page = NULL; 525 - } 526 643 } 527 644 528 645 int iwl_pcie_rx_init(struct iwl_trans *trans) 529 646 { 530 647 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 531 648 struct iwl_rxq *rxq = &trans_pcie->rxq; 532 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 533 649 int i, err; 534 650 535 651 if (!rxq->bd) { ··· 503 687 if (err) 504 688 return err; 505 689 } 506 - if (!rba->alloc_wq) 507 - rba->alloc_wq = alloc_workqueue("rb_allocator", 508 - WQ_HIGHPRI | WQ_UNBOUND, 1); 509 - INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); 510 - 511 - spin_lock(&rba->lock); 512 - atomic_set(&rba->req_pending, 0); 513 - atomic_set(&rba->req_ready, 0); 514 - /* free all first - we might be reconfigured for a different size */ 515 - iwl_pcie_rx_free_rba(trans); 516 - iwl_pcie_rx_init_rba(rba); 517 - spin_unlock(&rba->lock); 518 690 519 691 spin_lock(&rxq->lock); 692 + 693 + INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); 520 694 521 695 /* free all first - we might be reconfigured for a different size */ 522 696 iwl_pcie_rxq_free_rbs(trans); ··· 522 716 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 523 717 spin_unlock(&rxq->lock); 524 718 525 - iwl_pcie_rx_replenish(trans); 719 + iwl_pcie_rx_replenish(trans, GFP_KERNEL); 526 720 527 721 iwl_pcie_rx_hw_init(trans, rxq); 528 722 ··· 537 731 { 538 732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 539 733 struct iwl_rxq *rxq = &trans_pcie->rxq; 540 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 541 734 542 735 /*if rxq->bd is NULL, it means that nothing has been allocated, 543 736 * exit now */ ··· 545 740 return; 546 741 } 547 742 548 - cancel_work_sync(&rba->rx_alloc); 549 - if (rba->alloc_wq) { 550 - destroy_workqueue(rba->alloc_wq); 551 - rba->alloc_wq = NULL; 552 - } 553 - 554 - spin_lock(&rba->lock); 555 - iwl_pcie_rx_free_rba(trans); 556 - spin_unlock(&rba->lock); 743 + cancel_work_sync(&trans_pcie->rx_replenish); 557 744 558 745 spin_lock(&rxq->lock); 559 746 iwl_pcie_rxq_free_rbs(trans); ··· 564 767 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); 565 768 rxq->rb_stts_dma = 0; 566 769 rxq->rb_stts = NULL; 567 - } 568 - 569 - /* 570 - * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 571 - * 572 - * Called when a RBD can be reused. The RBD is transferred to the allocator. 573 - * When there are 2 empty RBDs - a request for allocation is posted 574 - */ 575 - static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 576 - struct iwl_rx_mem_buffer *rxb, 577 - struct iwl_rxq *rxq) 578 - { 579 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 580 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 581 - 582 - /* Count the used RBDs */ 583 - rxq->used_count++; 584 - 585 - /* Move the RBD to the used list, will be moved to allocator in batches 586 - * before claiming or posting a request*/ 587 - list_add_tail(&rxb->list, &rxq->rx_used); 588 - 589 - /* If we have RX_POST_REQ_ALLOC new released rx buffers - 590 - * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 591 - * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 592 - * after but we still need to post another request. 593 - */ 594 - if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 595 - /* Move the 2 RBDs to the allocator ownership. 596 - Allocator has another 6 from pool for the request completion*/ 597 - spin_lock(&rba->lock); 598 - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 599 - spin_unlock(&rba->lock); 600 - 601 - atomic_inc(&rba->req_pending); 602 - queue_work(rba->alloc_wq, &rba->rx_alloc); 603 - } 604 770 } 605 771 606 772 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ··· 688 928 */ 689 929 __free_pages(rxb->page, trans_pcie->rx_page_order); 690 930 rxb->page = NULL; 691 - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 931 + list_add_tail(&rxb->list, &rxq->rx_used); 692 932 } else { 693 933 list_add_tail(&rxb->list, &rxq->rx_free); 694 934 rxq->free_count++; 695 935 } 696 936 } else 697 - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 937 + list_add_tail(&rxb->list, &rxq->rx_used); 698 938 } 699 939 700 940 /* ··· 704 944 { 705 945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 706 946 struct iwl_rxq *rxq = &trans_pcie->rxq; 707 - u32 r, i, j; 947 + u32 r, i; 948 + u8 fill_rx = 0; 949 + u32 count = 8; 950 + int total_empty; 708 951 709 952 restart: 710 953 spin_lock(&rxq->lock); ··· 720 957 if (i == r) 721 958 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 722 959 960 + /* calculate total frames need to be restock after handling RX */ 961 + total_empty = r - rxq->write_actual; 962 + if (total_empty < 0) 963 + total_empty += RX_QUEUE_SIZE; 964 + 965 + if (total_empty > (RX_QUEUE_SIZE / 2)) 966 + fill_rx = 1; 967 + 723 968 while (i != r) { 724 969 struct iwl_rx_mem_buffer *rxb; 725 970 ··· 739 968 iwl_pcie_rx_handle_rb(trans, rxb); 740 969 741 970 i = (i + 1) & RX_QUEUE_MASK; 742 - 743 - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 744 - * try to claim the pre-allocated buffers from the allocator */ 745 - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 746 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 747 - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 748 - 749 - /* Add the remaining 6 empty RBDs for allocator use */ 750 - spin_lock(&rba->lock); 751 - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 752 - spin_unlock(&rba->lock); 753 - 754 - /* If not ready - continue, will try to reclaim later. 755 - * No need to reschedule work - allocator exits only on 756 - * success */ 757 - if (!iwl_pcie_rx_allocator_get(trans, out)) { 758 - /* If success - then RX_CLAIM_REQ_ALLOC 759 - * buffers were retrieved and should be added 760 - * to free list */ 761 - rxq->used_count -= RX_CLAIM_REQ_ALLOC; 762 - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { 763 - list_add_tail(&out[j]->list, 764 - &rxq->rx_free); 765 - rxq->free_count++; 766 - } 971 + /* If there are a lot of unused frames, 972 + * restock the Rx queue so ucode wont assert. */ 973 + if (fill_rx) { 974 + count++; 975 + if (count >= 8) { 976 + rxq->read = i; 977 + spin_unlock(&rxq->lock); 978 + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); 979 + count = 0; 980 + goto restart; 767 981 } 768 - } 769 - /* handle restock for two cases: 770 - * - we just pulled buffers from the allocator 771 - * - we have 8+ unstolen pages accumulated */ 772 - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { 773 - rxq->read = i; 774 - spin_unlock(&rxq->lock); 775 - iwl_pcie_rxq_restock(trans); 776 - goto restart; 777 982 } 778 983 } 779 984 780 985 /* Backtrack one entry */ 781 986 rxq->read = i; 782 987 spin_unlock(&rxq->lock); 988 + 989 + if (fill_rx) 990 + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); 991 + else 992 + iwl_pcie_rxq_restock(trans); 783 993 784 994 if (trans_pcie->napi.poll) 785 995 napi_gro_flush(&trans_pcie->napi, false);
+29 -23
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 182 182 183 183 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 184 184 { 185 - if (!trans->cfg->apmg_not_supported) 185 + if (trans->cfg->apmg_not_supported) 186 186 return; 187 187 188 188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) ··· 2459 2459 struct iwl_trans_pcie *trans_pcie; 2460 2460 struct iwl_trans *trans; 2461 2461 u16 pci_cmd; 2462 - int err; 2462 + int ret; 2463 2463 2464 2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2465 2465 &pdev->dev, cfg, &trans_ops_pcie, 0); ··· 2474 2474 spin_lock_init(&trans_pcie->ref_lock); 2475 2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2476 2476 2477 - err = pci_enable_device(pdev); 2478 - if (err) 2477 + ret = pci_enable_device(pdev); 2478 + if (ret) 2479 2479 goto out_no_pci; 2480 2480 2481 2481 if (!cfg->base_params->pcie_l1_allowed) { ··· 2491 2491 2492 2492 pci_set_master(pdev); 2493 2493 2494 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2495 - if (!err) 2496 - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2497 - if (err) { 2498 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2499 - if (!err) 2500 - err = pci_set_consistent_dma_mask(pdev, 2494 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2495 + if (!ret) 2496 + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2497 + if (ret) { 2498 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2499 + if (!ret) 2500 + ret = pci_set_consistent_dma_mask(pdev, 2501 2501 DMA_BIT_MASK(32)); 2502 2502 /* both attempts failed: */ 2503 - if (err) { 2503 + if (ret) { 2504 2504 dev_err(&pdev->dev, "No suitable DMA available\n"); 2505 2505 goto out_pci_disable_device; 2506 2506 } 2507 2507 } 2508 2508 2509 - err = pci_request_regions(pdev, DRV_NAME); 2510 - if (err) { 2509 + ret = pci_request_regions(pdev, DRV_NAME); 2510 + if (ret) { 2511 2511 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2512 2512 goto out_pci_disable_device; 2513 2513 } ··· 2515 2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2516 2516 if (!trans_pcie->hw_base) { 2517 2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2518 - err = -ENODEV; 2518 + ret = -ENODEV; 2519 2519 goto out_pci_release_regions; 2520 2520 } 2521 2521 ··· 2527 2527 trans_pcie->pci_dev = pdev; 2528 2528 iwl_disable_interrupts(trans); 2529 2529 2530 - err = pci_enable_msi(pdev); 2531 - if (err) { 2532 - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 2530 + ret = pci_enable_msi(pdev); 2531 + if (ret) { 2532 + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); 2533 2533 /* enable rfkill interrupt: hw bug w/a */ 2534 2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2535 2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { ··· 2547 2547 */ 2548 2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2549 2549 unsigned long flags; 2550 - int ret; 2551 2550 2552 2551 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2553 2552 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2553 + 2554 + ret = iwl_pcie_prepare_card_hw(trans); 2555 + if (ret) { 2556 + IWL_WARN(trans, "Exit HW not ready\n"); 2557 + goto out_pci_disable_msi; 2558 + } 2554 2559 2555 2560 /* 2556 2561 * in-order to recognize C step driver should read chip version ··· 2596 2591 /* Initialize the wait queue for commands */ 2597 2592 init_waitqueue_head(&trans_pcie->wait_command_queue); 2598 2593 2599 - if (iwl_pcie_alloc_ict(trans)) 2594 + ret = iwl_pcie_alloc_ict(trans); 2595 + if (ret) 2600 2596 goto out_pci_disable_msi; 2601 2597 2602 - err = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2598 + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2603 2599 iwl_pcie_irq_handler, 2604 2600 IRQF_SHARED, DRV_NAME, trans); 2605 - if (err) { 2601 + if (ret) { 2606 2602 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2607 2603 goto out_free_ict; 2608 2604 } ··· 2623 2617 pci_disable_device(pdev); 2624 2618 out_no_pci: 2625 2619 iwl_trans_free(trans); 2626 - return ERR_PTR(err); 2620 + return ERR_PTR(ret); 2627 2621 }
+3 -3
drivers/net/xen-netback/netback.c
··· 1566 1566 smp_rmb(); 1567 1567 1568 1568 while (dc != dp) { 1569 - BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1569 + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); 1570 1570 pending_idx = 1571 1571 queue->dealloc_ring[pending_index(dc++)]; 1572 1572 1573 - pending_idx_release[gop-queue->tx_unmap_ops] = 1573 + pending_idx_release[gop - queue->tx_unmap_ops] = 1574 1574 pending_idx; 1575 - queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1575 + queue->pages_to_unmap[gop - queue->tx_unmap_ops] = 1576 1576 queue->mmap_pages[pending_idx]; 1577 1577 gnttab_set_unmap_op(gop, 1578 1578 idx_to_kaddr(queue, pending_idx),
+17
include/net/cfg80211.h
··· 4868 4868 struct cfg80211_chan_def *chandef, 4869 4869 enum nl80211_iftype iftype); 4870 4870 4871 + /** 4872 + * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation 4873 + * @wiphy: the wiphy 4874 + * @chandef: the channel definition 4875 + * @iftype: interface type 4876 + * 4877 + * Return: %true if there is no secondary channel or the secondary channel(s) 4878 + * can be used for beaconing (i.e. is not a radar channel etc.). This version 4879 + * also checks if IR-relaxation conditions apply, to allow beaconing under 4880 + * more permissive conditions. 4881 + * 4882 + * Requires the RTNL to be held. 4883 + */ 4884 + bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, 4885 + struct cfg80211_chan_def *chandef, 4886 + enum nl80211_iftype iftype); 4887 + 4871 4888 /* 4872 4889 * cfg80211_ch_switch_notify - update wdev channel and notify userspace 4873 4890 * @dev: the device which switched channels
+1
include/net/ip.h
··· 161 161 } 162 162 163 163 /* datagram.c */ 164 + int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 164 165 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 165 166 166 167 void ip4_datagram_release_cb(struct sock *sk);
+1
net/ax25/ax25_subr.c
··· 264 264 { 265 265 ax25_clear_queues(ax25); 266 266 267 + ax25_stop_heartbeat(ax25); 267 268 ax25_stop_t1timer(ax25); 268 269 ax25_stop_t2timer(ax25); 269 270 ax25_stop_t3timer(ax25);
-1
net/bridge/br_mdb.c
··· 351 351 if (state == MDB_TEMPORARY) 352 352 mod_timer(&p->timer, now + br->multicast_membership_interval); 353 353 354 - br_mdb_notify(br->dev, port, group, RTM_NEWMDB); 355 354 return 0; 356 355 } 357 356
+30 -7
net/bridge/br_multicast.c
··· 39 39 struct bridge_mcast_own_query *query); 40 40 static void br_multicast_add_router(struct net_bridge *br, 41 41 struct net_bridge_port *port); 42 + static void br_ip4_multicast_leave_group(struct net_bridge *br, 43 + struct net_bridge_port *port, 44 + __be32 group, 45 + __u16 vid); 46 + #if IS_ENABLED(CONFIG_IPV6) 47 + static void br_ip6_multicast_leave_group(struct net_bridge *br, 48 + struct net_bridge_port *port, 49 + const struct in6_addr *group, 50 + __u16 vid); 51 + #endif 42 52 unsigned int br_mdb_rehash_seq; 43 53 44 54 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) ··· 1020 1010 continue; 1021 1011 } 1022 1012 1023 - err = br_ip4_multicast_add_group(br, port, group, vid); 1024 - if (err) 1025 - break; 1013 + if ((type == IGMPV3_CHANGE_TO_INCLUDE || 1014 + type == IGMPV3_MODE_IS_INCLUDE) && 1015 + ntohs(grec->grec_nsrcs) == 0) { 1016 + br_ip4_multicast_leave_group(br, port, group, vid); 1017 + } else { 1018 + err = br_ip4_multicast_add_group(br, port, group, vid); 1019 + if (err) 1020 + break; 1021 + } 1026 1022 } 1027 1023 1028 1024 return err; ··· 1087 1071 continue; 1088 1072 } 1089 1073 1090 - err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1091 - vid); 1092 - if (err) 1093 - break; 1074 + if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || 1075 + grec->grec_type == MLD2_MODE_IS_INCLUDE) && 1076 + ntohs(*nsrcs) == 0) { 1077 + br_ip6_multicast_leave_group(br, port, &grec->grec_mca, 1078 + vid); 1079 + } else { 1080 + err = br_ip6_multicast_add_group(br, port, 1081 + &grec->grec_mca, vid); 1082 + if (!err) 1083 + break; 1084 + } 1094 1085 } 1095 1086 1096 1087 return err;
+8 -11
net/caif/caif_socket.c
··· 121 121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 122 122 * not dropped, but CAIF is sending flow off instead. 123 123 */ 124 - static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124 + static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 125 125 { 126 126 int err; 127 127 unsigned long flags; 128 128 struct sk_buff_head *list = &sk->sk_receive_queue; 129 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 130 + bool queued = false; 130 131 131 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { ··· 140 139 141 140 err = sk_filter(sk, skb); 142 141 if (err) 143 - return err; 142 + goto out; 143 + 144 144 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { 145 145 set_rx_flow_off(cf_sk); 146 146 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); ··· 149 147 } 150 148 skb->dev = NULL; 151 149 skb_set_owner_r(skb, sk); 152 - /* Cache the SKB length before we tack it onto the receive 153 - * queue. Once it is added it no longer belongs to us and 154 - * may be freed by other threads of control pulling packets 155 - * from the queue. 156 - */ 157 150 spin_lock_irqsave(&list->lock, flags); 158 - if (!sock_flag(sk, SOCK_DEAD)) 151 + queued = !sock_flag(sk, SOCK_DEAD); 152 + if (queued) 159 153 __skb_queue_tail(list, skb); 160 154 spin_unlock_irqrestore(&list->lock, flags); 161 - 162 - if (!sock_flag(sk, SOCK_DEAD)) 155 + out: 156 + if (queued) 163 157 sk->sk_data_ready(sk); 164 158 else 165 159 kfree_skb(skb); 166 - return 0; 167 160 } 168 161 169 162 /* Packet Receive Callback function called from CAIF Stack */
+47 -9
net/core/datagram.c
··· 131 131 goto out; 132 132 } 133 133 134 + static int skb_set_peeked(struct sk_buff *skb) 135 + { 136 + struct sk_buff *nskb; 137 + 138 + if (skb->peeked) 139 + return 0; 140 + 141 + /* We have to unshare an skb before modifying it. */ 142 + if (!skb_shared(skb)) 143 + goto done; 144 + 145 + nskb = skb_clone(skb, GFP_ATOMIC); 146 + if (!nskb) 147 + return -ENOMEM; 148 + 149 + skb->prev->next = nskb; 150 + skb->next->prev = nskb; 151 + nskb->prev = skb->prev; 152 + nskb->next = skb->next; 153 + 154 + consume_skb(skb); 155 + skb = nskb; 156 + 157 + done: 158 + skb->peeked = 1; 159 + 160 + return 0; 161 + } 162 + 134 163 /** 135 164 * __skb_recv_datagram - Receive a datagram skbuff 136 165 * @sk: socket ··· 194 165 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, 195 166 int *peeked, int *off, int *err) 196 167 { 168 + struct sk_buff_head *queue = &sk->sk_receive_queue; 197 169 struct sk_buff *skb, *last; 170 + unsigned long cpu_flags; 198 171 long timeo; 199 172 /* 200 173 * Caller is allowed not to check sk->sk_err before skb_recv_datagram() ··· 215 184 * Look at current nfs client by the way... 216 185 * However, this function was correct in any case. 8) 217 186 */ 218 - unsigned long cpu_flags; 219 - struct sk_buff_head *queue = &sk->sk_receive_queue; 220 187 int _off = *off; 221 188 222 189 last = (struct sk_buff *)queue; ··· 228 199 _off -= skb->len; 229 200 continue; 230 201 } 231 - skb->peeked = 1; 202 + 203 + error = skb_set_peeked(skb); 204 + if (error) 205 + goto unlock_err; 206 + 232 207 atomic_inc(&skb->users); 233 208 } else 234 209 __skb_unlink(skb, queue); ··· 256 223 257 224 return NULL; 258 225 226 + unlock_err: 227 + spin_unlock_irqrestore(&queue->lock, cpu_flags); 259 228 no_packet: 260 229 *err = error; 261 230 return NULL; ··· 657 622 !skb->csum_complete_sw) 658 623 netdev_rx_csum_fault(skb->dev); 659 624 } 660 - skb->csum_valid = !sum; 625 + if (!skb_shared(skb)) 626 + skb->csum_valid = !sum; 661 627 return sum; 662 628 } 663 629 EXPORT_SYMBOL(__skb_checksum_complete_head); ··· 678 642 netdev_rx_csum_fault(skb->dev); 679 643 } 680 644 681 - /* Save full packet checksum */ 682 - skb->csum = csum; 683 - skb->ip_summed = CHECKSUM_COMPLETE; 684 - skb->csum_complete_sw = 1; 685 - skb->csum_valid = !sum; 645 + if (!skb_shared(skb)) { 646 + /* Save full packet checksum */ 647 + skb->csum = csum; 648 + skb->ip_summed = CHECKSUM_COMPLETE; 649 + skb->csum_complete_sw = 1; 650 + skb->csum_valid = !sum; 651 + } 686 652 687 653 return sum; 688 654 }
+3 -1
net/core/dst.c
··· 284 284 int newrefcnt; 285 285 286 286 newrefcnt = atomic_dec_return(&dst->__refcnt); 287 - WARN_ON(newrefcnt < 0); 287 + if (unlikely(newrefcnt < 0)) 288 + net_warn_ratelimited("%s: dst:%p refcnt:%d\n", 289 + __func__, dst, newrefcnt); 288 290 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 289 291 call_rcu(&dst->rcu_head, dst_destroy_rcu); 290 292 }
+7 -4
net/core/rtnetlink.c
··· 1804 1804 goto errout; 1805 1805 1806 1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 1807 - if (nla_type(attr) != IFLA_VF_PORT) 1808 - continue; 1809 - err = nla_parse_nested(port, IFLA_PORT_MAX, 1810 - attr, ifla_port_policy); 1807 + if (nla_type(attr) != IFLA_VF_PORT || 1808 + nla_len(attr) < NLA_HDRLEN) { 1809 + err = -EINVAL; 1810 + goto errout; 1811 + } 1812 + err = nla_parse_nested(port, IFLA_PORT_MAX, attr, 1813 + ifla_port_policy); 1811 1814 if (err < 0) 1812 1815 goto errout; 1813 1816 if (!port[IFLA_PORT_VF]) {
+12 -4
net/ipv4/datagram.c
··· 20 20 #include <net/route.h> 21 21 #include <net/tcp_states.h> 22 22 23 - int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 23 + int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 24 24 { 25 25 struct inet_sock *inet = inet_sk(sk); 26 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; ··· 38 38 return -EAFNOSUPPORT; 39 39 40 40 sk_dst_reset(sk); 41 - 42 - lock_sock(sk); 43 41 44 42 oif = sk->sk_bound_dev_if; 45 43 saddr = inet->inet_saddr; ··· 80 82 sk_dst_set(sk, &rt->dst); 81 83 err = 0; 82 84 out: 83 - release_sock(sk); 84 85 return err; 86 + } 87 + EXPORT_SYMBOL(__ip4_datagram_connect); 88 + 89 + int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 90 + { 91 + int res; 92 + 93 + lock_sock(sk); 94 + res = __ip4_datagram_connect(sk, uaddr, addr_len); 95 + release_sock(sk); 96 + return res; 85 97 } 86 98 EXPORT_SYMBOL(ip4_datagram_connect); 87 99
+5 -6
net/ipv4/inet_hashtables.c
··· 624 624 625 625 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 626 626 { 627 + unsigned int locksz = sizeof(spinlock_t); 627 628 unsigned int i, nblocks = 1; 628 629 629 - if (sizeof(spinlock_t) != 0) { 630 + if (locksz != 0) { 630 631 /* allocate 2 cache lines or at least one spinlock per cpu */ 631 - nblocks = max_t(unsigned int, 632 - 2 * L1_CACHE_BYTES / sizeof(spinlock_t), 633 - 1); 632 + nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); 634 633 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 635 634 636 635 /* no more locks than number of hash buckets */ 637 636 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 638 637 639 - hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), 638 + hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, 640 639 GFP_KERNEL | __GFP_NOWARN); 641 640 if (!hashinfo->ehash_locks) 642 - hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); 641 + hashinfo->ehash_locks = vmalloc(nblocks * locksz); 643 642 644 643 if (!hashinfo->ehash_locks) 645 644 return -ENOMEM;
+4 -2
net/ipv4/ip_fragment.c
··· 351 351 ihl = ip_hdrlen(skb); 352 352 353 353 /* Determine the position of this fragment. */ 354 - end = offset + skb->len - ihl; 354 + end = offset + skb->len - skb_network_offset(skb) - ihl; 355 355 err = -EINVAL; 356 356 357 357 /* Is this the final fragment? */ ··· 381 381 goto err; 382 382 383 383 err = -ENOMEM; 384 - if (!pskb_pull(skb, ihl)) 384 + if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) 385 385 goto err; 386 386 387 387 err = pskb_trim_rcsum(skb, end - offset); ··· 640 640 } else { 641 641 iph->frag_off = 0; 642 642 } 643 + 644 + ip_send_check(iph); 643 645 644 646 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 645 647 qp->q.fragments = NULL;
+1 -2
net/ipv4/tcp_input.c
··· 1917 1917 const struct inet_connection_sock *icsk = inet_csk(sk); 1918 1918 struct tcp_sock *tp = tcp_sk(sk); 1919 1919 struct sk_buff *skb; 1920 - bool new_recovery = false; 1920 + bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; 1921 1921 bool is_reneg; /* is receiver reneging on SACKs? */ 1922 1922 1923 1923 /* Reduce ssthresh if it has not yet been made inside this window. */ 1924 1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1925 1925 !after(tp->high_seq, tp->snd_una) || 1926 1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1927 - new_recovery = true; 1928 1927 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1929 1928 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1930 1929 tcp_ca_event(sk, CA_EVENT_LOSS);
+15 -5
net/ipv6/datagram.c
··· 40 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 41 41 } 42 42 43 - int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 43 + static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 44 44 { 45 45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 46 46 struct inet_sock *inet = inet_sk(sk); ··· 56 56 if (usin->sin6_family == AF_INET) { 57 57 if (__ipv6_only_sock(sk)) 58 58 return -EAFNOSUPPORT; 59 - err = ip4_datagram_connect(sk, uaddr, addr_len); 59 + err = __ip4_datagram_connect(sk, uaddr, addr_len); 60 60 goto ipv4_connected; 61 61 } 62 62 ··· 98 98 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 99 99 sin.sin_port = usin->sin6_port; 100 100 101 - err = ip4_datagram_connect(sk, 102 - (struct sockaddr *) &sin, 103 - sizeof(sin)); 101 + err = __ip4_datagram_connect(sk, 102 + (struct sockaddr *) &sin, 103 + sizeof(sin)); 104 104 105 105 ipv4_connected: 106 106 if (err) ··· 203 203 out: 204 204 fl6_sock_release(flowlabel); 205 205 return err; 206 + } 207 + 208 + int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 209 + { 210 + int res; 211 + 212 + lock_sock(sk); 213 + res = __ip6_datagram_connect(sk, uaddr, addr_len); 214 + release_sock(sk); 215 + return res; 206 216 } 207 217 EXPORT_SYMBOL_GPL(ip6_datagram_connect); 208 218
-2
net/ipv6/ip6_offload.c
··· 292 292 static const struct net_offload sit_offload = { 293 293 .callbacks = { 294 294 .gso_segment = ipv6_gso_segment, 295 - .gro_receive = ipv6_gro_receive, 296 - .gro_complete = ipv6_gro_complete, 297 295 }, 298 296 }; 299 297
+1
net/mac80211/debugfs_netdev.c
··· 723 723 724 724 debugfs_remove_recursive(sdata->vif.debugfs_dir); 725 725 sdata->vif.debugfs_dir = NULL; 726 + sdata->debugfs.subdir_stations = NULL; 726 727 } 727 728 728 729 void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
+14 -11
net/mac80211/iface.c
··· 1863 1863 ieee80211_teardown_sdata(sdata); 1864 1864 } 1865 1865 1866 - /* 1867 - * Remove all interfaces, may only be called at hardware unregistration 1868 - * time because it doesn't do RCU-safe list removals. 1869 - */ 1870 1866 void ieee80211_remove_interfaces(struct ieee80211_local *local) 1871 1867 { 1872 1868 struct ieee80211_sub_if_data *sdata, *tmp; ··· 1871 1875 1872 1876 ASSERT_RTNL(); 1873 1877 1874 - /* 1875 - * Close all AP_VLAN interfaces first, as otherwise they 1876 - * might be closed while the AP interface they belong to 1877 - * is closed, causing unregister_netdevice_many() to crash. 1878 + /* Before destroying the interfaces, make sure they're all stopped so 1879 + * that the hardware is stopped. Otherwise, the driver might still be 1880 + * iterating the interfaces during the shutdown, e.g. from a worker 1881 + * or from RX processing or similar, and if it does so (using atomic 1882 + * iteration) while we're manipulating the list, the iteration will 1883 + * crash. 1884 + * 1885 + * After this, the hardware should be stopped and the driver should 1886 + * have stopped all of its activities, so that we can do RCU-unaware 1887 + * manipulations of the interface list below. 1878 1888 */ 1879 - list_for_each_entry(sdata, &local->interfaces, list) 1880 - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1881 - dev_close(sdata->dev); 1889 + cfg80211_shutdown_all_interfaces(local->hw.wiphy); 1890 + 1891 + WARN(local->open_count, "%s: open count remains %d\n", 1892 + wiphy_name(local->hw.wiphy), local->open_count); 1882 1893 1883 1894 mutex_lock(&local->iflist_mtx); 1884 1895 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
+4 -1
net/mac80211/mesh_plink.c
··· 306 306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) { 307 307 /* AID */ 308 308 pos = skb_put(skb, 2); 309 - put_unaligned_le16(plid, pos + 2); 309 + put_unaligned_le16(plid, pos); 310 310 } 311 311 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 312 312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || ··· 1122 1122 WLAN_SP_MESH_PEERING_CONFIRM) { 1123 1123 baseaddr += 4; 1124 1124 baselen += 4; 1125 + 1126 + if (baselen > len) 1127 + return; 1125 1128 } 1126 1129 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); 1127 1130 mesh_process_plink_frame(sdata, mgmt, &elems);
+16
net/mac80211/pm.c
··· 76 76 if (sdata->vif.type != NL80211_IFTYPE_STATION) 77 77 continue; 78 78 ieee80211_mgd_quiesce(sdata); 79 + /* If suspended during TX in progress, and wowlan 80 + * is enabled (connection will be active) there 81 + * can be a race where the driver is put out 82 + * of power-save due to TX and during suspend 83 + * dynamic_ps_timer is cancelled and TX packet 84 + * is flushed, leaving the driver in ACTIVE even 85 + * after resuming until dynamic_ps_timer puts 86 + * driver back in DOZE. 87 + */ 88 + if (sdata->u.mgd.associated && 89 + sdata->u.mgd.powersave && 90 + !(local->hw.conf.flags & IEEE80211_CONF_PS)) { 91 + local->hw.conf.flags |= IEEE80211_CONF_PS; 92 + ieee80211_hw_config(local, 93 + IEEE80211_CONF_CHANGE_PS); 94 + } 79 95 } 80 96 81 97 err = drv_suspend(local, wowlan);
+3 -3
net/mac80211/tdls.c
··· 60 60 struct ieee80211_channel *ch; 61 61 struct cfg80211_chan_def chandef; 62 62 int i, subband_start; 63 + struct wiphy *wiphy = sdata->local->hw.wiphy; 63 64 64 65 for (i = start; i <= end; i += spacing) { 65 66 if (!ch_cnt) ··· 71 70 /* we will be active on the channel */ 72 71 cfg80211_chandef_create(&chandef, ch, 73 72 NL80211_CHAN_NO_HT); 74 - if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, 75 - &chandef, 76 - sdata->wdev.iftype)) { 73 + if (cfg80211_reg_can_beacon_relax(wiphy, &chandef, 74 + sdata->wdev.iftype)) { 77 75 ch_cnt++; 78 76 /* 79 77 * check if the next channel is also part of
+3 -1
net/mac80211/tx.c
··· 1117 1117 queued = true; 1118 1118 info->control.vif = &tx->sdata->vif; 1119 1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1120 - info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; 1120 + info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS | 1121 + IEEE80211_TX_CTL_NO_PS_BUFFER | 1122 + IEEE80211_TX_STATUS_EOSP; 1121 1123 __skb_queue_tail(&tid_tx->pending, skb); 1122 1124 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1123 1125 purge_skb = __skb_dequeue(&tid_tx->pending);
+47 -32
net/netlink/af_netlink.c
··· 357 357 return NULL; 358 358 } 359 359 360 + 361 + static void 362 + __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, 363 + unsigned int order) 364 + { 365 + struct netlink_sock *nlk = nlk_sk(sk); 366 + struct sk_buff_head *queue; 367 + struct netlink_ring *ring; 368 + 369 + queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 370 + ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 371 + 372 + spin_lock_bh(&queue->lock); 373 + 374 + ring->frame_max = req->nm_frame_nr - 1; 375 + ring->head = 0; 376 + ring->frame_size = req->nm_frame_size; 377 + ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; 378 + 379 + swap(ring->pg_vec_len, req->nm_block_nr); 380 + swap(ring->pg_vec_order, order); 381 + swap(ring->pg_vec, pg_vec); 382 + 383 + __skb_queue_purge(queue); 384 + spin_unlock_bh(&queue->lock); 385 + 386 + WARN_ON(atomic_read(&nlk->mapped)); 387 + 388 + if (pg_vec) 389 + free_pg_vec(pg_vec, order, req->nm_block_nr); 390 + } 391 + 360 392 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, 361 - bool closing, bool tx_ring) 393 + bool tx_ring) 362 394 { 363 395 struct netlink_sock *nlk = nlk_sk(sk); 364 396 struct netlink_ring *ring; 365 - struct sk_buff_head *queue; 366 397 void **pg_vec = NULL; 367 398 unsigned int order = 0; 368 - int err; 369 399 370 400 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 371 - queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; 372 401 373 - if (!closing) { 374 - if (atomic_read(&nlk->mapped)) 375 - return -EBUSY; 376 - if (atomic_read(&ring->pending)) 377 - return -EBUSY; 378 - } 402 + if (atomic_read(&nlk->mapped)) 403 + return -EBUSY; 404 + if (atomic_read(&ring->pending)) 405 + return -EBUSY; 379 406 380 407 if (req->nm_block_nr) { 381 408 if (ring->pg_vec != NULL) ··· 434 407 return -EINVAL; 435 408 } 436 409 437 - err = -EBUSY; 438 410 mutex_lock(&nlk->pg_vec_lock); 439 - if (closing || atomic_read(&nlk->mapped) == 0) { 440 - err = 0; 441 - spin_lock_bh(&queue->lock); 442 - 443 - ring->frame_max = req->nm_frame_nr - 1; 444 - ring->head = 0; 445 - ring->frame_size = req->nm_frame_size; 446 - ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; 447 - 448 - swap(ring->pg_vec_len, req->nm_block_nr); 449 - swap(ring->pg_vec_order, order); 450 - swap(ring->pg_vec, pg_vec); 451 - 452 - __skb_queue_purge(queue); 453 - spin_unlock_bh(&queue->lock); 454 - 455 - WARN_ON(atomic_read(&nlk->mapped)); 411 + if (atomic_read(&nlk->mapped) == 0) { 412 + __netlink_set_ring(sk, req, tx_ring, pg_vec, order); 413 + mutex_unlock(&nlk->pg_vec_lock); 414 + return 0; 456 415 } 416 + 457 417 mutex_unlock(&nlk->pg_vec_lock); 458 418 459 419 if (pg_vec) 460 420 free_pg_vec(pg_vec, order, req->nm_block_nr); 461 - return err; 421 + 422 + return -EBUSY; 462 423 } 463 424 464 425 static void netlink_mm_open(struct vm_area_struct *vma) ··· 915 900 916 901 memset(&req, 0, sizeof(req)); 917 902 if (nlk->rx_ring.pg_vec) 918 - netlink_set_ring(sk, &req, true, false); 903 + __netlink_set_ring(sk, &req, false, NULL, 0); 919 904 memset(&req, 0, sizeof(req)); 920 905 if (nlk->tx_ring.pg_vec) 921 - netlink_set_ring(sk, &req, true, true); 906 + __netlink_set_ring(sk, &req, true, NULL, 0); 922 907 } 923 908 #endif /* CONFIG_NETLINK_MMAP */ 924 909 ··· 2238 2223 return -EINVAL; 2239 2224 if (copy_from_user(&req, optval, sizeof(req))) 2240 2225 return -EFAULT; 2241 - err = netlink_set_ring(sk, &req, false, 2226 + err = netlink_set_ring(sk, &req, 2242 2227 optname == NETLINK_TX_RING); 2243 2228 break; 2244 2229 }
+1 -1
net/openvswitch/flow_table.c
··· 752 752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 753 753 754 754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 755 - + (num_possible_nodes() 755 + + (nr_node_ids 756 756 * sizeof(struct flow_stats *)), 757 757 0, 0, NULL); 758 758 if (flow_cache == NULL)
+3
net/sched/act_bpf.c
··· 339 339 bpf_prog_put(prog->filter); 340 340 else 341 341 bpf_prog_destroy(prog->filter); 342 + 343 + kfree(prog->bpf_ops); 344 + kfree(prog->bpf_name); 342 345 } 343 346 344 347 static struct tc_action_ops act_bpf_ops __read_mostly = {
+1 -1
net/sched/cls_bpf.c
··· 378 378 goto errout; 379 379 380 380 if (oldprog) { 381 - list_replace_rcu(&prog->link, &oldprog->link); 381 + list_replace_rcu(&oldprog->link, &prog->link); 382 382 tcf_unbind_filter(tp, &oldprog->res); 383 383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); 384 384 } else {
+3 -2
net/sched/cls_flow.c
··· 425 425 if (!fnew) 426 426 goto err2; 427 427 428 + tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); 429 + 428 430 fold = (struct flow_filter *)*arg; 429 431 if (fold) { 430 432 err = -EINVAL; ··· 488 486 fnew->mask = ~0U; 489 487 fnew->tp = tp; 490 488 get_random_bytes(&fnew->hashrnd, 4); 491 - tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); 492 489 } 493 490 494 491 fnew->perturb_timer.function = flow_perturbation; ··· 527 526 if (*arg == 0) 528 527 list_add_tail_rcu(&fnew->list, &head->filters); 529 528 else 530 - list_replace_rcu(&fnew->list, &fold->list); 529 + list_replace_rcu(&fold->list, &fnew->list); 531 530 532 531 *arg = (unsigned long)fnew; 533 532
+1 -1
net/sched/cls_flower.c
··· 499 499 *arg = (unsigned long) fnew; 500 500 501 501 if (fold) { 502 - list_replace_rcu(&fnew->list, &fold->list); 502 + list_replace_rcu(&fold->list, &fnew->list); 503 503 tcf_unbind_filter(tp, &fold->res); 504 504 call_rcu(&fold->rcu, fl_destroy_filter); 505 505 } else {
+11 -2
net/sched/sch_fq_codel.c
··· 155 155 skb = dequeue_head(flow); 156 156 len = qdisc_pkt_len(skb); 157 157 q->backlogs[idx] -= len; 158 - kfree_skb(skb); 159 158 sch->q.qlen--; 160 159 qdisc_qstats_drop(sch); 161 160 qdisc_qstats_backlog_dec(sch, skb); 161 + kfree_skb(skb); 162 162 flow->dropped++; 163 163 return idx; 164 + } 165 + 166 + static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch) 167 + { 168 + unsigned int prev_backlog; 169 + 170 + prev_backlog = sch->qstats.backlog; 171 + fq_codel_drop(sch); 172 + return prev_backlog - sch->qstats.backlog; 164 173 } 165 174 166 175 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) ··· 613 604 .enqueue = fq_codel_enqueue, 614 605 .dequeue = fq_codel_dequeue, 615 606 .peek = qdisc_peek_dequeued, 616 - .drop = fq_codel_drop, 607 + .drop = fq_codel_qdisc_drop, 617 608 .init = fq_codel_init, 618 609 .reset = fq_codel_reset, 619 610 .destroy = fq_codel_destroy,
+1 -1
net/sched/sch_sfq.c
··· 306 306 len = qdisc_pkt_len(skb); 307 307 slot->backlog -= len; 308 308 sfq_dec(q, x); 309 - kfree_skb(skb); 310 309 sch->q.qlen--; 311 310 qdisc_qstats_drop(sch); 312 311 qdisc_qstats_backlog_dec(sch, skb); 312 + kfree_skb(skb); 313 313 return len; 314 314 } 315 315
+34 -11
net/wireless/chan.c
··· 797 797 return false; 798 798 } 799 799 800 - bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 801 - struct cfg80211_chan_def *chandef, 802 - enum nl80211_iftype iftype) 800 + static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy, 801 + struct cfg80211_chan_def *chandef, 802 + enum nl80211_iftype iftype, 803 + bool check_no_ir) 803 804 { 804 805 bool res; 805 806 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 806 807 IEEE80211_CHAN_RADAR; 807 808 808 - trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); 809 + trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); 809 810 810 - /* 811 - * Under certain conditions suggested by some regulatory bodies a 812 - * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag 813 - * only if such relaxations are not enabled and the conditions are not 814 - * met. 815 - */ 816 - if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan)) 811 + if (check_no_ir) 817 812 prohibited_flags |= IEEE80211_CHAN_NO_IR; 818 813 819 814 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && ··· 822 827 trace_cfg80211_return_bool(res); 823 828 return res; 824 829 } 830 + 831 + bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 832 + struct cfg80211_chan_def *chandef, 833 + enum nl80211_iftype iftype) 834 + { 835 + return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true); 836 + } 825 837 EXPORT_SYMBOL(cfg80211_reg_can_beacon); 838 + 839 + bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, 840 + struct cfg80211_chan_def *chandef, 841 + enum nl80211_iftype iftype) 842 + { 843 + bool check_no_ir; 844 + 845 + ASSERT_RTNL(); 846 + 847 + /* 848 + * Under certain conditions suggested by some regulatory bodies a 849 + * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag 850 + * only if such relaxations are not enabled and the conditions are not 851 + * met. 852 + */ 853 + check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype, 854 + chandef->chan); 855 + 856 + return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); 857 + } 858 + EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax); 826 859 827 860 int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 828 861 struct cfg80211_chan_def *chandef)
+8 -6
net/wireless/nl80211.c
··· 2003 2003 switch (iftype) { 2004 2004 case NL80211_IFTYPE_AP: 2005 2005 case NL80211_IFTYPE_P2P_GO: 2006 - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { 2006 + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, 2007 + iftype)) { 2007 2008 result = -EINVAL; 2008 2009 break; 2009 2010 } ··· 3404 3403 } else if (!nl80211_get_ap_channel(rdev, &params)) 3405 3404 return -EINVAL; 3406 3405 3407 - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 3408 - wdev->iftype)) 3406 + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef, 3407 + wdev->iftype)) 3409 3408 return -EINVAL; 3410 3409 3411 3410 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { ··· 6493 6492 if (err) 6494 6493 return err; 6495 6494 6496 - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 6497 - wdev->iftype)) 6495 + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef, 6496 + wdev->iftype)) 6498 6497 return -EINVAL; 6499 6498 6500 6499 err = cfg80211_chandef_dfs_required(wdev->wiphy, ··· 10171 10170 return -EINVAL; 10172 10171 10173 10172 /* we will be active on the TDLS link */ 10174 - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) 10173 + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, 10174 + wdev->iftype)) 10175 10175 return -EINVAL; 10176 10176 10177 10177 /* don't allow switching to DFS channels */
+4 -4
net/wireless/reg.c
··· 544 544 reg_regdb_query(alpha2); 545 545 546 546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { 547 - pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); 547 + pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n"); 548 548 return -EINVAL; 549 549 } 550 550 551 551 if (!is_world_regdom((char *) alpha2)) 552 - pr_info("Calling CRDA for country: %c%c\n", 552 + pr_debug("Calling CRDA for country: %c%c\n", 553 553 alpha2[0], alpha2[1]); 554 554 else 555 - pr_info("Calling CRDA to update world regulatory domain\n"); 555 + pr_debug("Calling CRDA to update world regulatory domain\n"); 556 556 557 557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env); 558 558 } ··· 1589 1589 case NL80211_IFTYPE_AP: 1590 1590 case NL80211_IFTYPE_P2P_GO: 1591 1591 case NL80211_IFTYPE_ADHOC: 1592 - return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); 1592 + return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); 1593 1593 case NL80211_IFTYPE_STATION: 1594 1594 case NL80211_IFTYPE_P2P_CLIENT: 1595 1595 return cfg80211_chandef_usable(wiphy, &chandef,
+7 -4
net/wireless/trace.h
··· 2358 2358 2359 2359 TRACE_EVENT(cfg80211_reg_can_beacon, 2360 2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, 2361 - enum nl80211_iftype iftype), 2362 - TP_ARGS(wiphy, chandef, iftype), 2361 + enum nl80211_iftype iftype, bool check_no_ir), 2362 + TP_ARGS(wiphy, chandef, iftype, check_no_ir), 2363 2363 TP_STRUCT__entry( 2364 2364 WIPHY_ENTRY 2365 2365 CHAN_DEF_ENTRY 2366 2366 __field(enum nl80211_iftype, iftype) 2367 + __field(bool, check_no_ir) 2367 2368 ), 2368 2369 TP_fast_assign( 2369 2370 WIPHY_ASSIGN; 2370 2371 CHAN_DEF_ASSIGN(chandef); 2371 2372 __entry->iftype = iftype; 2373 + __entry->check_no_ir = check_no_ir; 2372 2374 ), 2373 - TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", 2374 - WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) 2375 + TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s", 2376 + WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype, 2377 + BOOL_TO_STR(__entry->check_no_ir)) 2375 2378 ); 2376 2379 2377 2380 TRACE_EVENT(cfg80211_chandef_dfs_required,