Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-5.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
"Quite calm.

The noisy DSA driver (embedded switches) changes, and adjustment to
IPv6 IOAM behavior add to diffstat's bottom line but are not scary.

Current release - regressions:

- af_unix: rename UNIX-DGRAM to UNIX to maintain backwards
compatibility

- procfs: revert "add seq_puts() statement for dev_mcast", minor
format change broke user space

Current release - new code bugs:

- dsa: fix bridge_num not getting cleared after ports leaving the
bridge, resource leak

- dsa: tag_dsa: send packets with TX fwd offload from VLAN-unaware
bridges using VID 0, prevent packet drops if pvid is removed

- dsa: mv88e6xxx: keep the pvid at 0 when VLAN-unaware, prevent HW
getting confused about station to VLAN mapping

Previous releases - regressions:

- virtio-net: fix for skb_over_panic inside big mode

- phy: do not shutdown PHYs in READY state

- dsa: mv88e6xxx: don't use PHY_DETECT on internal PHY's, fix link
LED staying lit after ifdown

- mptcp: fix possible infinite wait on recvmsg(MSG_WAITALL)

- mqprio: Correct stats in mqprio_dump_class_stats()

- ice: fix deadlock for Tx timestamp tracking flush

- stmmac: fix feature detection on old hardware

Previous releases - always broken:

- sctp: account stream padding length for reconf chunk

- icmp: fix icmp_ext_echo_iio parsing in icmp_build_probe()

- isdn: cpai: check ctr->cnr to avoid array index out of bound

- isdn: mISDN: fix sleeping function called from invalid context

- nfc: nci: fix potential UAF of rf_conn_info object

- dsa: microchip: prevent ksz_mib_read_work from kicking back in
after it's canceled in .remove and crashing

- dsa: mv88e6xxx: isolate the ATU databases of standalone and bridged
ports

- dsa: sja1105, ocelot: break circular dependency between switch and
tag drivers

- dsa: felix: improve timestamping in presence of packe loss

- mlxsw: thermal: fix out-of-bounds memory accesses

Misc:

- ipv6: ioam: move the check for undefined bits to improve
interoperability"

* tag 'net-5.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (60 commits)
icmp: fix icmp_ext_echo_iio parsing in icmp_build_probe
MAINTAINERS: Update the devicetree documentation path of imx fec driver
sctp: account stream padding length for reconf chunk
mlxsw: thermal: Fix out-of-bounds memory accesses
ethernet: s2io: fix setting mac address during resume
NFC: digital: fix possible memory leak in digital_in_send_sdd_req()
NFC: digital: fix possible memory leak in digital_tg_listen_mdaa()
nfc: fix error handling of nfc_proto_register()
Revert "net: procfs: add seq_puts() statement for dev_mcast"
net: encx24j600: check error in devm_regmap_init_encx24j600
net: korina: select CRC32
net: arc: select CRC32
net: dsa: felix: break at first CPU port during init and teardown
net: dsa: tag_ocelot_8021q: fix inability to inject STP BPDUs into BLOCKING ports
net: dsa: felix: purge skb from TX timestamping queue if it cannot be sent
net: dsa: tag_ocelot_8021q: break circular dependency with ocelot switch lib
net: dsa: tag_ocelot: break circular dependency with ocelot switch lib driver
net: mscc: ocelot: cross-check the sequence id from the timestamp FIFO with the skb PTP header
net: mscc: ocelot: deny TX timestamping of non-PTP packets
net: mscc: ocelot: warn when a PTP IRQ is raised for an unknown skb
...

+1013 -589
+2
Documentation/devicetree/bindings/net/snps,dwmac.yaml
··· 21 21 contains: 22 22 enum: 23 23 - snps,dwmac 24 + - snps,dwmac-3.40a 24 25 - snps,dwmac-3.50a 25 26 - snps,dwmac-3.610 26 27 - snps,dwmac-3.70a ··· 77 76 - rockchip,rk3399-gmac 78 77 - rockchip,rv1108-gmac 79 78 - snps,dwmac 79 + - snps,dwmac-3.40a 80 80 - snps,dwmac-3.50a 81 81 - snps,dwmac-3.610 82 82 - snps,dwmac-3.70a
+2 -1
MAINTAINERS
··· 7440 7440 M: Joakim Zhang <qiangqing.zhang@nxp.com> 7441 7441 L: netdev@vger.kernel.org 7442 7442 S: Maintained 7443 - F: Documentation/devicetree/bindings/net/fsl-fec.txt 7443 + F: Documentation/devicetree/bindings/net/fsl,fec.yaml 7444 7444 F: drivers/net/ethernet/freescale/fec.h 7445 7445 F: drivers/net/ethernet/freescale/fec_main.c 7446 7446 F: drivers/net/ethernet/freescale/fec_ptp.c ··· 11153 11153 F: Documentation/devicetree/bindings/net/dsa/marvell.txt 11154 11154 F: Documentation/networking/devlink/mv88e6xxx.rst 11155 11155 F: drivers/net/dsa/mv88e6xxx/ 11156 + F: include/linux/dsa/mv88e6xxx.h 11156 11157 F: include/linux/platform_data/mv88e6xxx.h 11157 11158 11158 11159 MARVELL ARMADA 3700 PHY DRIVERS
+1 -1
arch/arm/boot/dts/spear3xx.dtsi
··· 47 47 }; 48 48 49 49 gmac: eth@e0800000 { 50 - compatible = "st,spear600-gmac"; 50 + compatible = "snps,dwmac-3.40a"; 51 51 reg = <0xe0800000 0x8000>; 52 52 interrupts = <23 22>; 53 53 interrupt-names = "macirq", "eth_wake_irq";
+5
drivers/isdn/capi/kcapi.c
··· 480 480 481 481 ctr_down(ctr, CAPI_CTR_DETACHED); 482 482 483 + if (ctr->cnr < 1 || ctr->cnr - 1 >= CAPI_MAXCONTR) { 484 + err = -EINVAL; 485 + goto unlock_out; 486 + } 487 + 483 488 if (capi_controller[ctr->cnr - 1] != ctr) { 484 489 err = -EINVAL; 485 490 goto unlock_out;
+1 -1
drivers/isdn/hardware/mISDN/netjet.c
··· 949 949 nj_disable_hwirq(card); 950 950 mode_tiger(&card->bc[0], ISDN_P_NONE); 951 951 mode_tiger(&card->bc[1], ISDN_P_NONE); 952 - card->isac.release(&card->isac); 953 952 spin_unlock_irqrestore(&card->lock, flags); 953 + card->isac.release(&card->isac); 954 954 release_region(card->base, card->base_s); 955 955 card->base_s = 0; 956 956 }
+3 -1
drivers/net/dsa/microchip/ksz_common.c
··· 449 449 void ksz_switch_remove(struct ksz_device *dev) 450 450 { 451 451 /* timer started */ 452 - if (dev->mib_read_interval) 452 + if (dev->mib_read_interval) { 453 + dev->mib_read_interval = 0; 453 454 cancel_delayed_work_sync(&dev->mib_read); 455 + } 454 456 455 457 dev->dev_ops->exit(dev); 456 458 dsa_unregister_switch(dev->ds);
+108 -17
drivers/net/dsa/mv88e6xxx/chip.c
··· 12 12 13 13 #include <linux/bitfield.h> 14 14 #include <linux/delay.h> 15 + #include <linux/dsa/mv88e6xxx.h> 15 16 #include <linux/etherdevice.h> 16 17 #include <linux/ethtool.h> 17 18 #include <linux/if_bridge.h> ··· 750 749 ops = chip->info->ops; 751 750 752 751 mv88e6xxx_reg_lock(chip); 753 - if ((!mv88e6xxx_port_ppu_updates(chip, port) || 752 + /* Internal PHYs propagate their configuration directly to the MAC. 753 + * External PHYs depend on whether the PPU is enabled for this port. 754 + */ 755 + if (((!mv88e6xxx_phy_is_internal(ds, port) && 756 + !mv88e6xxx_port_ppu_updates(chip, port)) || 754 757 mode == MLO_AN_FIXED) && ops->port_sync_link) 755 758 err = ops->port_sync_link(chip, port, mode, false); 756 759 mv88e6xxx_reg_unlock(chip); ··· 777 772 ops = chip->info->ops; 778 773 779 774 mv88e6xxx_reg_lock(chip); 780 - if (!mv88e6xxx_port_ppu_updates(chip, port) || mode == MLO_AN_FIXED) { 775 + /* Internal PHYs propagate their configuration directly to the MAC. 776 + * External PHYs depend on whether the PPU is enabled for this port. 777 + */ 778 + if ((!mv88e6xxx_phy_is_internal(ds, port) && 779 + !mv88e6xxx_port_ppu_updates(chip, port)) || 780 + mode == MLO_AN_FIXED) { 781 781 /* FIXME: for an automedia port, should we force the link 782 782 * down here - what if the link comes up due to "other" media 783 783 * while we're bringing the port up, how is the exclusivity ··· 1687 1677 return 0; 1688 1678 } 1689 1679 1680 + static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port) 1681 + { 1682 + struct dsa_port *dp = dsa_to_port(chip->ds, port); 1683 + struct mv88e6xxx_port *p = &chip->ports[port]; 1684 + u16 pvid = MV88E6XXX_VID_STANDALONE; 1685 + bool drop_untagged = false; 1686 + int err; 1687 + 1688 + if (dp->bridge_dev) { 1689 + if (br_vlan_enabled(dp->bridge_dev)) { 1690 + pvid = p->bridge_pvid.vid; 1691 + drop_untagged = !p->bridge_pvid.valid; 1692 + } else { 1693 + pvid = MV88E6XXX_VID_BRIDGED; 1694 + } 1695 + } 1696 + 1697 + err = mv88e6xxx_port_set_pvid(chip, port, pvid); 1698 + if (err) 1699 + return err; 1700 + 1701 + return mv88e6xxx_port_drop_untagged(chip, port, drop_untagged); 1702 + } 1703 + 1690 1704 static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, 1691 1705 bool vlan_filtering, 1692 1706 struct netlink_ext_ack *extack) ··· 1724 1690 return -EOPNOTSUPP; 1725 1691 1726 1692 mv88e6xxx_reg_lock(chip); 1693 + 1727 1694 err = mv88e6xxx_port_set_8021q_mode(chip, port, mode); 1695 + if (err) 1696 + goto unlock; 1697 + 1698 + err = mv88e6xxx_port_commit_pvid(chip, port); 1699 + if (err) 1700 + goto unlock; 1701 + 1702 + unlock: 1728 1703 mv88e6xxx_reg_unlock(chip); 1729 1704 1730 1705 return err; ··· 1768 1725 u16 fid; 1769 1726 int err; 1770 1727 1771 - /* Null VLAN ID corresponds to the port private database */ 1728 + /* Ports have two private address databases: one for when the port is 1729 + * standalone and one for when the port is under a bridge and the 1730 + * 802.1Q mode is disabled. When the port is standalone, DSA wants its 1731 + * address database to remain 100% empty, so we never load an ATU entry 1732 + * into a standalone port's database. Therefore, translate the null 1733 + * VLAN ID into the port's database used for VLAN-unaware bridging. 1734 + */ 1772 1735 if (vid == 0) { 1773 - err = mv88e6xxx_port_get_fid(chip, port, &fid); 1774 - if (err) 1775 - return err; 1736 + fid = MV88E6XXX_FID_BRIDGED; 1776 1737 } else { 1777 1738 err = mv88e6xxx_vtu_get(chip, vid, &vlan); 1778 1739 if (err) ··· 2170 2123 struct mv88e6xxx_chip *chip = ds->priv; 2171 2124 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 2172 2125 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; 2126 + struct mv88e6xxx_port *p = &chip->ports[port]; 2173 2127 bool warn; 2174 2128 u8 member; 2175 2129 int err; ··· 2204 2156 } 2205 2157 2206 2158 if (pvid) { 2207 - err = mv88e6xxx_port_set_pvid(chip, port, vlan->vid); 2208 - if (err) { 2209 - dev_err(ds->dev, "p%d: failed to set PVID %d\n", 2210 - port, vlan->vid); 2159 + p->bridge_pvid.vid = vlan->vid; 2160 + p->bridge_pvid.valid = true; 2161 + 2162 + err = mv88e6xxx_port_commit_pvid(chip, port); 2163 + if (err) 2211 2164 goto out; 2212 - } 2165 + } else if (vlan->vid && p->bridge_pvid.vid == vlan->vid) { 2166 + /* The old pvid was reinstalled as a non-pvid VLAN */ 2167 + p->bridge_pvid.valid = false; 2168 + 2169 + err = mv88e6xxx_port_commit_pvid(chip, port); 2170 + if (err) 2171 + goto out; 2213 2172 } 2173 + 2214 2174 out: 2215 2175 mv88e6xxx_reg_unlock(chip); 2216 2176 ··· 2268 2212 const struct switchdev_obj_port_vlan *vlan) 2269 2213 { 2270 2214 struct mv88e6xxx_chip *chip = ds->priv; 2215 + struct mv88e6xxx_port *p = &chip->ports[port]; 2271 2216 int err = 0; 2272 2217 u16 pvid; 2273 2218 ··· 2286 2229 goto unlock; 2287 2230 2288 2231 if (vlan->vid == pvid) { 2289 - err = mv88e6xxx_port_set_pvid(chip, port, 0); 2232 + p->bridge_pvid.valid = false; 2233 + 2234 + err = mv88e6xxx_port_commit_pvid(chip, port); 2290 2235 if (err) 2291 2236 goto unlock; 2292 2237 } ··· 2452 2393 int err; 2453 2394 2454 2395 mv88e6xxx_reg_lock(chip); 2396 + 2455 2397 err = mv88e6xxx_bridge_map(chip, br); 2398 + if (err) 2399 + goto unlock; 2400 + 2401 + err = mv88e6xxx_port_commit_pvid(chip, port); 2402 + if (err) 2403 + goto unlock; 2404 + 2405 + unlock: 2456 2406 mv88e6xxx_reg_unlock(chip); 2457 2407 2458 2408 return err; ··· 2471 2403 struct net_device *br) 2472 2404 { 2473 2405 struct mv88e6xxx_chip *chip = ds->priv; 2406 + int err; 2474 2407 2475 2408 mv88e6xxx_reg_lock(chip); 2409 + 2476 2410 if (mv88e6xxx_bridge_map(chip, br) || 2477 2411 mv88e6xxx_port_vlan_map(chip, port)) 2478 2412 dev_err(ds->dev, "failed to remap in-chip Port VLAN\n"); 2413 + 2414 + err = mv88e6xxx_port_commit_pvid(chip, port); 2415 + if (err) 2416 + dev_err(ds->dev, 2417 + "port %d failed to restore standalone pvid: %pe\n", 2418 + port, ERR_PTR(err)); 2419 + 2479 2420 mv88e6xxx_reg_unlock(chip); 2480 2421 } 2481 2422 ··· 2930 2853 if (err) 2931 2854 return err; 2932 2855 2856 + /* Associate MV88E6XXX_VID_BRIDGED with MV88E6XXX_FID_BRIDGED in the 2857 + * ATU by virtue of the fact that mv88e6xxx_atu_new() will pick it as 2858 + * the first free FID after MV88E6XXX_FID_STANDALONE. This will be used 2859 + * as the private PVID on ports under a VLAN-unaware bridge. 2860 + * Shared (DSA and CPU) ports must also be members of it, to translate 2861 + * the VID from the DSA tag into MV88E6XXX_FID_BRIDGED, instead of 2862 + * relying on their port default FID. 2863 + */ 2864 + err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED, 2865 + MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED, 2866 + false); 2867 + if (err) 2868 + return err; 2869 + 2933 2870 if (chip->info->ops->port_set_jumbo_size) { 2934 2871 err = chip->info->ops->port_set_jumbo_size(chip, port, 10218); 2935 2872 if (err) ··· 3016 2925 * database, and allow bidirectional communication between the 3017 2926 * CPU and DSA port(s), and the other ports. 3018 2927 */ 3019 - err = mv88e6xxx_port_set_fid(chip, port, 0); 2928 + err = mv88e6xxx_port_set_fid(chip, port, MV88E6XXX_FID_STANDALONE); 3020 2929 if (err) 3021 2930 return err; 3022 2931 ··· 3206 3115 } 3207 3116 } 3208 3117 3118 + err = mv88e6xxx_vtu_setup(chip); 3119 + if (err) 3120 + goto unlock; 3121 + 3209 3122 /* Setup Switch Port Registers */ 3210 3123 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { 3211 3124 if (dsa_is_unused_port(ds, i)) ··· 3236 3141 goto unlock; 3237 3142 3238 3143 err = mv88e6xxx_phy_setup(chip); 3239 - if (err) 3240 - goto unlock; 3241 - 3242 - err = mv88e6xxx_vtu_setup(chip); 3243 3144 if (err) 3244 3145 goto unlock; 3245 3146
+9
drivers/net/dsa/mv88e6xxx/chip.h
··· 21 21 #define EDSA_HLEN 8 22 22 #define MV88E6XXX_N_FID 4096 23 23 24 + #define MV88E6XXX_FID_STANDALONE 0 25 + #define MV88E6XXX_FID_BRIDGED 1 26 + 24 27 /* PVT limits for 4-bit port and 5-bit switch */ 25 28 #define MV88E6XXX_MAX_PVT_SWITCHES 32 26 29 #define MV88E6XXX_MAX_PVT_PORTS 16 ··· 249 246 u16 vid; 250 247 }; 251 248 249 + struct mv88e6xxx_vlan { 250 + u16 vid; 251 + bool valid; 252 + }; 253 + 252 254 struct mv88e6xxx_port { 253 255 struct mv88e6xxx_chip *chip; 254 256 int port; 257 + struct mv88e6xxx_vlan bridge_pvid; 255 258 u64 serdes_stats[2]; 256 259 u64 atu_member_violation; 257 260 u64 atu_miss_violation;
+21
drivers/net/dsa/mv88e6xxx/port.c
··· 1257 1257 return 0; 1258 1258 } 1259 1259 1260 + int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port, 1261 + bool drop_untagged) 1262 + { 1263 + u16 old, new; 1264 + int err; 1265 + 1266 + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &old); 1267 + if (err) 1268 + return err; 1269 + 1270 + if (drop_untagged) 1271 + new = old | MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED; 1272 + else 1273 + new = old & ~MV88E6XXX_PORT_CTL2_DISCARD_UNTAGGED; 1274 + 1275 + if (new == old) 1276 + return 0; 1277 + 1278 + return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new); 1279 + } 1280 + 1260 1281 int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port) 1261 1282 { 1262 1283 u16 reg;
+2
drivers/net/dsa/mv88e6xxx/port.h
··· 423 423 phy_interface_t mode); 424 424 int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); 425 425 int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); 426 + int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port, 427 + bool drop_untagged); 426 428 int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); 427 429 int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, 428 430 int upstream_port);
+137 -12
drivers/net/dsa/ocelot/felix.c
··· 266 266 */ 267 267 static int felix_setup_mmio_filtering(struct felix *felix) 268 268 { 269 - unsigned long user_ports = 0, cpu_ports = 0; 269 + unsigned long user_ports = dsa_user_ports(felix->ds); 270 270 struct ocelot_vcap_filter *redirect_rule; 271 271 struct ocelot_vcap_filter *tagging_rule; 272 272 struct ocelot *ocelot = &felix->ocelot; 273 273 struct dsa_switch *ds = felix->ds; 274 - int port, ret; 274 + int cpu = -1, port, ret; 275 275 276 276 tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); 277 277 if (!tagging_rule) ··· 284 284 } 285 285 286 286 for (port = 0; port < ocelot->num_phys_ports; port++) { 287 - if (dsa_is_user_port(ds, port)) 288 - user_ports |= BIT(port); 289 - if (dsa_is_cpu_port(ds, port)) 290 - cpu_ports |= BIT(port); 287 + if (dsa_is_cpu_port(ds, port)) { 288 + cpu = port; 289 + break; 290 + } 291 291 } 292 + 293 + if (cpu < 0) 294 + return -EINVAL; 292 295 293 296 tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE; 294 297 *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588); ··· 328 325 * the CPU port module 329 326 */ 330 327 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; 331 - redirect_rule->action.port_mask = cpu_ports; 328 + redirect_rule->action.port_mask = BIT(cpu); 332 329 } else { 333 330 /* Trap PTP packets only to the CPU port module (which is 334 331 * redirected to the NPI port) ··· 1077 1074 return 0; 1078 1075 } 1079 1076 1077 + static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, 1078 + struct sk_buff *skb) 1079 + { 1080 + struct ocelot_port *ocelot_port = ocelot->ports[port]; 1081 + struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 1082 + struct sk_buff *skb_match = NULL, *skb_tmp; 1083 + unsigned long flags; 1084 + 1085 + if (!clone) 1086 + return; 1087 + 1088 + spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); 1089 + 1090 + skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { 1091 + if (skb != clone) 1092 + continue; 1093 + __skb_unlink(skb, &ocelot_port->tx_skbs); 1094 + skb_match = skb; 1095 + break; 1096 + } 1097 + 1098 + spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); 1099 + 1100 + WARN_ONCE(!skb_match, 1101 + "Could not find skb clone in TX timestamping list\n"); 1102 + } 1103 + 1104 + #define work_to_xmit_work(w) \ 1105 + container_of((w), struct felix_deferred_xmit_work, work) 1106 + 1107 + static void felix_port_deferred_xmit(struct kthread_work *work) 1108 + { 1109 + struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); 1110 + struct dsa_switch *ds = xmit_work->dp->ds; 1111 + struct sk_buff *skb = xmit_work->skb; 1112 + u32 rew_op = ocelot_ptp_rew_op(skb); 1113 + struct ocelot *ocelot = ds->priv; 1114 + int port = xmit_work->dp->index; 1115 + int retries = 10; 1116 + 1117 + do { 1118 + if (ocelot_can_inject(ocelot, 0)) 1119 + break; 1120 + 1121 + cpu_relax(); 1122 + } while (--retries); 1123 + 1124 + if (!retries) { 1125 + dev_err(ocelot->dev, "port %d failed to inject skb\n", 1126 + port); 1127 + ocelot_port_purge_txtstamp_skb(ocelot, port, skb); 1128 + kfree_skb(skb); 1129 + return; 1130 + } 1131 + 1132 + ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 1133 + 1134 + consume_skb(skb); 1135 + kfree(xmit_work); 1136 + } 1137 + 1138 + static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port) 1139 + { 1140 + struct dsa_port *dp = dsa_to_port(ds, port); 1141 + struct ocelot *ocelot = ds->priv; 1142 + struct felix *felix = ocelot_to_felix(ocelot); 1143 + struct felix_port *felix_port; 1144 + 1145 + if (!dsa_port_is_user(dp)) 1146 + return 0; 1147 + 1148 + felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL); 1149 + if (!felix_port) 1150 + return -ENOMEM; 1151 + 1152 + felix_port->xmit_worker = felix->xmit_worker; 1153 + felix_port->xmit_work_fn = felix_port_deferred_xmit; 1154 + 1155 + dp->priv = felix_port; 1156 + 1157 + return 0; 1158 + } 1159 + 1160 + static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port) 1161 + { 1162 + struct dsa_port *dp = dsa_to_port(ds, port); 1163 + struct felix_port *felix_port = dp->priv; 1164 + 1165 + if (!felix_port) 1166 + return; 1167 + 1168 + dp->priv = NULL; 1169 + kfree(felix_port); 1170 + } 1171 + 1080 1172 /* Hardware initialization done here so that we can allocate structures with 1081 1173 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing 1082 1174 * us to allocate structures twice (leak memory) and map PCI memory twice ··· 1200 1102 } 1201 1103 } 1202 1104 1105 + felix->xmit_worker = kthread_create_worker(0, "felix_xmit"); 1106 + if (IS_ERR(felix->xmit_worker)) { 1107 + err = PTR_ERR(felix->xmit_worker); 1108 + goto out_deinit_timestamp; 1109 + } 1110 + 1203 1111 for (port = 0; port < ds->num_ports; port++) { 1204 1112 if (dsa_is_unused_port(ds, port)) 1205 1113 continue; ··· 1216 1112 * bits of vlan tag. 1217 1113 */ 1218 1114 felix_port_qos_map_init(ocelot, port); 1115 + 1116 + err = felix_port_setup_tagger_data(ds, port); 1117 + if (err) { 1118 + dev_err(ds->dev, 1119 + "port %d failed to set up tagger data: %pe\n", 1120 + port, ERR_PTR(err)); 1121 + goto out_deinit_ports; 1122 + } 1219 1123 } 1220 1124 1221 1125 err = ocelot_devlink_sb_register(ocelot); ··· 1238 1126 * there's no real point in checking for errors. 1239 1127 */ 1240 1128 felix_set_tag_protocol(ds, port, felix->tag_proto); 1129 + break; 1241 1130 } 1242 1131 1243 1132 ds->mtu_enforcement_ingress = true; ··· 1251 1138 if (dsa_is_unused_port(ds, port)) 1252 1139 continue; 1253 1140 1141 + felix_port_teardown_tagger_data(ds, port); 1254 1142 ocelot_deinit_port(ocelot, port); 1255 1143 } 1256 1144 1145 + kthread_destroy_worker(felix->xmit_worker); 1146 + 1147 + out_deinit_timestamp: 1257 1148 ocelot_deinit_timestamp(ocelot); 1258 1149 ocelot_deinit(ocelot); 1259 1150 ··· 1279 1162 continue; 1280 1163 1281 1164 felix_del_tag_protocol(ds, port, felix->tag_proto); 1165 + break; 1282 1166 } 1283 - 1284 - ocelot_devlink_sb_unregister(ocelot); 1285 - ocelot_deinit_timestamp(ocelot); 1286 - ocelot_deinit(ocelot); 1287 1167 1288 1168 for (port = 0; port < ocelot->num_phys_ports; port++) { 1289 1169 if (dsa_is_unused_port(ds, port)) 1290 1170 continue; 1291 1171 1172 + felix_port_teardown_tagger_data(ds, port); 1292 1173 ocelot_deinit_port(ocelot, port); 1293 1174 } 1175 + 1176 + kthread_destroy_worker(felix->xmit_worker); 1177 + 1178 + ocelot_devlink_sb_unregister(ocelot); 1179 + ocelot_deinit_timestamp(ocelot); 1180 + ocelot_deinit(ocelot); 1294 1181 1295 1182 if (felix->info->mdio_bus_free) 1296 1183 felix->info->mdio_bus_free(ocelot); ··· 1412 1291 if (!ocelot->ptp) 1413 1292 return; 1414 1293 1415 - if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) 1294 + if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { 1295 + dev_err_ratelimited(ds->dev, 1296 + "port %d delivering skb without TX timestamp\n", 1297 + port); 1416 1298 return; 1299 + } 1417 1300 1418 1301 if (clone) 1419 1302 OCELOT_SKB_CB(skb)->clone = clone;
+1
drivers/net/dsa/ocelot/felix.h
··· 62 62 resource_size_t switch_base; 63 63 resource_size_t imdio_base; 64 64 enum dsa_tag_protocol tag_proto; 65 + struct kthread_worker *xmit_worker; 65 66 }; 66 67 67 68 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
+1 -2
drivers/net/dsa/sja1105/sja1105_main.c
··· 3117 3117 sja1105_static_config_free(&priv->static_config); 3118 3118 } 3119 3119 3120 - const struct dsa_switch_ops sja1105_switch_ops = { 3120 + static const struct dsa_switch_ops sja1105_switch_ops = { 3121 3121 .get_tag_protocol = sja1105_get_tag_protocol, 3122 3122 .setup = sja1105_setup, 3123 3123 .teardown = sja1105_teardown, ··· 3166 3166 .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload, 3167 3167 .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload, 3168 3168 }; 3169 - EXPORT_SYMBOL_GPL(sja1105_switch_ops); 3170 3169 3171 3170 static const struct of_device_id sja1105_dt_ids[]; 3172 3171
+6 -39
drivers/net/dsa/sja1105/sja1105_ptp.c
··· 64 64 static int sja1105_change_rxtstamping(struct sja1105_private *priv, 65 65 bool on) 66 66 { 67 + struct sja1105_tagger_data *tagger_data = &priv->tagger_data; 67 68 struct sja1105_ptp_data *ptp_data = &priv->ptp_data; 68 69 struct sja1105_general_params_entry *general_params; 69 70 struct sja1105_table *table; ··· 80 79 priv->tagger_data.stampable_skb = NULL; 81 80 } 82 81 ptp_cancel_worker_sync(ptp_data->clock); 83 - skb_queue_purge(&ptp_data->skb_txtstamp_queue); 82 + skb_queue_purge(&tagger_data->skb_txtstamp_queue); 84 83 skb_queue_purge(&ptp_data->skb_rxtstamp_queue); 85 84 86 85 return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING); ··· 453 452 return priv->info->rxtstamp(ds, port, skb); 454 453 } 455 454 456 - void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, 457 - enum sja1110_meta_tstamp dir, u64 tstamp) 458 - { 459 - struct sja1105_private *priv = ds->priv; 460 - struct sja1105_ptp_data *ptp_data = &priv->ptp_data; 461 - struct sk_buff *skb, *skb_tmp, *skb_match = NULL; 462 - struct skb_shared_hwtstamps shwt = {0}; 463 - 464 - /* We don't care about RX timestamps on the CPU port */ 465 - if (dir == SJA1110_META_TSTAMP_RX) 466 - return; 467 - 468 - spin_lock(&ptp_data->skb_txtstamp_queue.lock); 469 - 470 - skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) { 471 - if (SJA1105_SKB_CB(skb)->ts_id != ts_id) 472 - continue; 473 - 474 - __skb_unlink(skb, &ptp_data->skb_txtstamp_queue); 475 - skb_match = skb; 476 - 477 - break; 478 - } 479 - 480 - spin_unlock(&ptp_data->skb_txtstamp_queue.lock); 481 - 482 - if (WARN_ON(!skb_match)) 483 - return; 484 - 485 - shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp)); 486 - skb_complete_tx_timestamp(skb_match, &shwt); 487 - } 488 - EXPORT_SYMBOL_GPL(sja1110_process_meta_tstamp); 489 - 490 455 /* In addition to cloning the skb which is done by the common 491 456 * sja1105_port_txtstamp, we need to generate a timestamp ID and save the 492 457 * packet to the TX timestamping queue. ··· 461 494 { 462 495 struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; 463 496 struct sja1105_private *priv = ds->priv; 464 - struct sja1105_ptp_data *ptp_data = &priv->ptp_data; 465 497 struct sja1105_port *sp = &priv->ports[port]; 466 498 u8 ts_id; 467 499 ··· 476 510 477 511 spin_unlock(&sp->data->meta_lock); 478 512 479 - skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone); 513 + skb_queue_tail(&sp->data->skb_txtstamp_queue, clone); 480 514 } 481 515 482 516 /* Called from dsa_skb_tx_timestamp. This callback is just to clone ··· 919 953 /* Only used on SJA1105 */ 920 954 skb_queue_head_init(&ptp_data->skb_rxtstamp_queue); 921 955 /* Only used on SJA1110 */ 922 - skb_queue_head_init(&ptp_data->skb_txtstamp_queue); 956 + skb_queue_head_init(&tagger_data->skb_txtstamp_queue); 923 957 spin_lock_init(&tagger_data->meta_lock); 924 958 925 959 ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev); ··· 937 971 void sja1105_ptp_clock_unregister(struct dsa_switch *ds) 938 972 { 939 973 struct sja1105_private *priv = ds->priv; 974 + struct sja1105_tagger_data *tagger_data = &priv->tagger_data; 940 975 struct sja1105_ptp_data *ptp_data = &priv->ptp_data; 941 976 942 977 if (IS_ERR_OR_NULL(ptp_data->clock)) ··· 945 978 946 979 del_timer_sync(&ptp_data->extts_timer); 947 980 ptp_cancel_worker_sync(ptp_data->clock); 948 - skb_queue_purge(&ptp_data->skb_txtstamp_queue); 981 + skb_queue_purge(&tagger_data->skb_txtstamp_queue); 949 982 skb_queue_purge(&ptp_data->skb_rxtstamp_queue); 950 983 ptp_clock_unregister(ptp_data->clock); 951 984 ptp_data->clock = NULL;
-19
drivers/net/dsa/sja1105/sja1105_ptp.h
··· 8 8 9 9 #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) 10 10 11 - /* Timestamps are in units of 8 ns clock ticks (equivalent to 12 - * a fixed 125 MHz clock). 13 - */ 14 - #define SJA1105_TICK_NS 8 15 - 16 - static inline s64 ns_to_sja1105_ticks(s64 ns) 17 - { 18 - return ns / SJA1105_TICK_NS; 19 - } 20 - 21 - static inline s64 sja1105_ticks_to_ns(s64 ticks) 22 - { 23 - return ticks * SJA1105_TICK_NS; 24 - } 25 - 26 11 /* Calculate the first base_time in the future that satisfies this 27 12 * relationship: 28 13 * ··· 62 77 struct timer_list extts_timer; 63 78 /* Used only on SJA1105 to reconstruct partial timestamps */ 64 79 struct sk_buff_head skb_rxtstamp_queue; 65 - /* Used on SJA1110 where meta frames are generated only for 66 - * 2-step TX timestamps 67 - */ 68 - struct sk_buff_head skb_txtstamp_queue; 69 80 struct ptp_clock_info caps; 70 81 struct ptp_clock *clock; 71 82 struct sja1105_ptp_cmd cmd;
+1
drivers/net/ethernet/Kconfig
··· 100 100 config KORINA 101 101 tristate "Korina (IDT RC32434) Ethernet support" 102 102 depends on MIKROTIK_RB532 || COMPILE_TEST 103 + select CRC32 103 104 select MII 104 105 help 105 106 If you have a Mikrotik RouterBoard 500 or IDT RC32434
+1
drivers/net/ethernet/arc/Kconfig
··· 21 21 depends on ARC || ARCH_ROCKCHIP || COMPILE_TEST 22 22 select MII 23 23 select PHYLIB 24 + select CRC32 24 25 25 26 config ARC_EMAC 26 27 tristate "ARC EMAC support"
+7 -8
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 1313 1313 { 1314 1314 u8 idx; 1315 1315 1316 - spin_lock(&tx->lock); 1317 - 1318 1316 for (idx = 0; idx < tx->len; idx++) { 1319 1317 u8 phy_idx = idx + tx->quad_offset; 1320 1318 1321 - /* Clear any potential residual timestamp in the PHY block */ 1322 - if (!pf->hw.reset_ongoing) 1323 - ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx); 1324 - 1319 + spin_lock(&tx->lock); 1325 1320 if (tx->tstamps[idx].skb) { 1326 1321 dev_kfree_skb_any(tx->tstamps[idx].skb); 1327 1322 tx->tstamps[idx].skb = NULL; 1328 1323 } 1329 - } 1324 + clear_bit(idx, tx->in_use); 1325 + spin_unlock(&tx->lock); 1330 1326 1331 - spin_unlock(&tx->lock); 1327 + /* Clear any potential residual timestamp in the PHY block */ 1328 + if (!pf->hw.reset_ongoing) 1329 + ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx); 1330 + } 1332 1331 } 1333 1332 1334 1333 /**
+3 -4
drivers/net/ethernet/mellanox/mlx5/core/cq.c
··· 155 155 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {}; 156 156 int err; 157 157 158 + mlx5_debug_cq_remove(dev, cq); 159 + 158 160 mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); 159 161 mlx5_eq_del_cq(&cq->eq->core, cq); 160 162 ··· 164 162 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); 165 163 MLX5_SET(destroy_cq_in, in, uid, cq->uid); 166 164 err = mlx5_cmd_exec_in(dev, destroy_cq, in); 167 - if (err) 168 - return err; 169 165 170 166 synchronize_irq(cq->irqn); 171 167 172 - mlx5_debug_cq_remove(dev, cq); 173 168 mlx5_cq_put(cq); 174 169 wait_for_completion(&cq->free); 175 170 176 - return 0; 171 + return err; 177 172 } 178 173 EXPORT_SYMBOL(mlx5_core_destroy_cq); 179 174
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
··· 475 475 esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n"); 476 476 goto err_alloc_wq; 477 477 } 478 - INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work); 479 - queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 480 - msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL)); 481 478 482 479 br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event; 483 480 err = register_switchdev_notifier(&br_offloads->nb); ··· 497 500 err); 498 501 goto err_register_netdev; 499 502 } 503 + INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work); 504 + queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 505 + msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL)); 500 506 return; 501 507 502 508 err_register_netdev: ··· 523 523 if (!br_offloads) 524 524 return; 525 525 526 + cancel_delayed_work_sync(&br_offloads->update_work); 526 527 unregister_netdevice_notifier(&br_offloads->netdev_nb); 527 528 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk); 528 529 unregister_switchdev_notifier(&br_offloads->nb); 529 - cancel_delayed_work(&br_offloads->update_work); 530 530 destroy_workqueue(br_offloads->wq); 531 531 rtnl_lock(); 532 532 mlx5_esw_bridge_cleanup(esw);
+54 -7
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 2981 2981 agg_count += mqprio->qopt.count[i]; 2982 2982 } 2983 2983 2984 - if (priv->channels.params.num_channels < agg_count) { 2985 - netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n", 2984 + if (priv->channels.params.num_channels != agg_count) { 2985 + netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n", 2986 2986 agg_count, priv->channels.params.num_channels); 2987 2987 return -EINVAL; 2988 2988 } ··· 3325 3325 return mlx5_set_port_fcs(mdev, !enable); 3326 3326 } 3327 3327 3328 + static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable) 3329 + { 3330 + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {}; 3331 + bool supported, curr_state; 3332 + int err; 3333 + 3334 + if (!MLX5_CAP_GEN(mdev, ports_check)) 3335 + return 0; 3336 + 3337 + err = mlx5_query_ports_check(mdev, in, sizeof(in)); 3338 + if (err) 3339 + return err; 3340 + 3341 + supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap); 3342 + curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc); 3343 + 3344 + if (!supported || enable == curr_state) 3345 + return 0; 3346 + 3347 + MLX5_SET(pcmr_reg, in, local_port, 1); 3348 + MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable); 3349 + 3350 + return mlx5_set_ports_check(mdev, in, sizeof(in)); 3351 + } 3352 + 3328 3353 static int set_feature_rx_fcs(struct net_device *netdev, bool enable) 3329 3354 { 3330 3355 struct mlx5e_priv *priv = netdev_priv(netdev); 3356 + struct mlx5e_channels *chs = &priv->channels; 3357 + struct mlx5_core_dev *mdev = priv->mdev; 3331 3358 int err; 3332 3359 3333 3360 mutex_lock(&priv->state_lock); 3334 3361 3335 - priv->channels.params.scatter_fcs_en = enable; 3336 - err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable); 3337 - if (err) 3338 - priv->channels.params.scatter_fcs_en = !enable; 3362 + if (enable) { 3363 + err = mlx5e_set_rx_port_ts(mdev, false); 3364 + if (err) 3365 + goto out; 3339 3366 3367 + chs->params.scatter_fcs_en = true; 3368 + err = mlx5e_modify_channels_scatter_fcs(chs, true); 3369 + if (err) { 3370 + chs->params.scatter_fcs_en = false; 3371 + mlx5e_set_rx_port_ts(mdev, true); 3372 + } 3373 + } else { 3374 + chs->params.scatter_fcs_en = false; 3375 + err = mlx5e_modify_channels_scatter_fcs(chs, false); 3376 + if (err) { 3377 + chs->params.scatter_fcs_en = true; 3378 + goto out; 3379 + } 3380 + err = mlx5e_set_rx_port_ts(mdev, true); 3381 + if (err) { 3382 + mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err); 3383 + err = 0; 3384 + } 3385 + } 3386 + 3387 + out: 3340 3388 mutex_unlock(&priv->state_lock); 3341 - 3342 3389 return err; 3343 3390 } 3344 3391
+5 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 618 618 params->mqprio.num_tc = 1; 619 619 params->tunneled_offload_en = false; 620 620 621 + /* Set an initial non-zero value, so that mlx5e_select_queue won't 622 + * divide by zero if called before first activating channels. 623 + */ 624 + priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc; 625 + 621 626 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode); 622 627 } 623 628 ··· 648 643 netdev->hw_features |= NETIF_F_RXCSUM; 649 644 650 645 netdev->features |= netdev->hw_features; 651 - netdev->features |= NETIF_F_VLAN_CHALLENGED; 652 646 netdev->features |= NETIF_F_NETNS_LOCAL; 653 647 } 654 648
+5 -47
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
··· 24 24 #define MLXSW_THERMAL_ZONE_MAX_NAME 16 25 25 #define MLXSW_THERMAL_TEMP_SCORE_MAX GENMASK(31, 0) 26 26 #define MLXSW_THERMAL_MAX_STATE 10 27 + #define MLXSW_THERMAL_MIN_STATE 2 27 28 #define MLXSW_THERMAL_MAX_DUTY 255 28 - /* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values 29 - * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for 30 - * setting fan speed dynamic minimum. For example, if value is set to 14 (40%) 31 - * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to 32 - * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100. 33 - */ 34 - #define MLXSW_THERMAL_SPEED_MIN (MLXSW_THERMAL_MAX_STATE + 2) 35 - #define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2) 36 - #define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */ 37 29 38 30 /* External cooling devices, allowed for binding to mlxsw thermal zones. */ 39 31 static char * const mlxsw_thermal_external_allowed_cdev[] = { ··· 638 646 struct mlxsw_thermal *thermal = cdev->devdata; 639 647 struct device *dev = thermal->bus_info->dev; 640 648 char mfsc_pl[MLXSW_REG_MFSC_LEN]; 641 - unsigned long cur_state, i; 642 649 int idx; 643 - u8 duty; 644 650 int err; 651 + 652 + if (state > MLXSW_THERMAL_MAX_STATE) 653 + return -EINVAL; 645 654 646 655 idx = mlxsw_get_cooling_device_idx(thermal, cdev); 647 656 if (idx < 0) 648 657 return idx; 649 - 650 - /* Verify if this request is for changing allowed fan dynamical 651 - * minimum. If it is - update cooling levels accordingly and update 652 - * state, if current state is below the newly requested minimum state. 653 - * For example, if current state is 5, and minimal state is to be 654 - * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed 655 - * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be 656 - * overwritten. 657 - */ 658 - if (state >= MLXSW_THERMAL_SPEED_MIN && 659 - state <= MLXSW_THERMAL_SPEED_MAX) { 660 - state -= MLXSW_THERMAL_MAX_STATE; 661 - for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++) 662 - thermal->cooling_levels[i] = max(state, i); 663 - 664 - mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0); 665 - err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl); 666 - if (err) 667 - return err; 668 - 669 - duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl); 670 - cur_state = mlxsw_duty_to_state(duty); 671 - 672 - /* If current fan state is lower than requested dynamical 673 - * minimum, increase fan speed up to dynamical minimum. 674 - */ 675 - if (state < cur_state) 676 - return 0; 677 - 678 - state = cur_state; 679 - } 680 - 681 - if (state > MLXSW_THERMAL_MAX_STATE) 682 - return -EINVAL; 683 658 684 659 /* Normalize the state to the valid speed range. */ 685 660 state = thermal->cooling_levels[state]; ··· 957 998 958 999 /* Initialize cooling levels per PWM state. */ 959 1000 for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++) 960 - thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL, 961 - i); 1001 + thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i); 962 1002 963 1003 thermal->polling_delay = bus_info->low_frequency ? 964 1004 MLXSW_THERMAL_SLOW_POLL_INT :
+8 -2
drivers/net/ethernet/microchip/encx24j600-regmap.c
··· 497 497 .reg_read = regmap_encx24j600_phy_reg_read, 498 498 }; 499 499 500 - void devm_regmap_init_encx24j600(struct device *dev, 501 - struct encx24j600_context *ctx) 500 + int devm_regmap_init_encx24j600(struct device *dev, 501 + struct encx24j600_context *ctx) 502 502 { 503 503 mutex_init(&ctx->mutex); 504 504 regcfg.lock_arg = ctx; 505 505 ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg); 506 + if (IS_ERR(ctx->regmap)) 507 + return PTR_ERR(ctx->regmap); 506 508 ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg); 509 + if (IS_ERR(ctx->phymap)) 510 + return PTR_ERR(ctx->phymap); 511 + 512 + return 0; 507 513 } 508 514 EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600); 509 515
+4 -1
drivers/net/ethernet/microchip/encx24j600.c
··· 1023 1023 priv->speed = SPEED_100; 1024 1024 1025 1025 priv->ctx.spi = spi; 1026 - devm_regmap_init_encx24j600(&spi->dev, &priv->ctx); 1027 1026 ndev->irq = spi->irq; 1028 1027 ndev->netdev_ops = &encx24j600_netdev_ops; 1028 + 1029 + ret = devm_regmap_init_encx24j600(&spi->dev, &priv->ctx); 1030 + if (ret) 1031 + goto out_free; 1029 1032 1030 1033 mutex_init(&priv->lock); 1031 1034
+2 -2
drivers/net/ethernet/microchip/encx24j600_hw.h
··· 15 15 int bank; 16 16 }; 17 17 18 - void devm_regmap_init_encx24j600(struct device *dev, 19 - struct encx24j600_context *ctx); 18 + int devm_regmap_init_encx24j600(struct device *dev, 19 + struct encx24j600_context *ctx); 20 20 21 21 /* Single-byte instructions */ 22 22 #define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
+3 -1
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 1477 1477 if (err) 1478 1478 goto out; 1479 1479 1480 - if (cq->gdma_id >= gc->max_num_cqs) 1480 + if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { 1481 + err = -EINVAL; 1481 1482 goto out; 1483 + } 1482 1484 1483 1485 gc->cq_table[cq->gdma_id] = cq->gdma_cq; 1484 1486
+73 -38
drivers/net/ethernet/mscc/ocelot.c
··· 472 472 !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP)) 473 473 ocelot_port_rmwl(ocelot_port, 474 474 DEV_CLOCK_CFG_MAC_TX_RST | 475 - DEV_CLOCK_CFG_MAC_TX_RST, 475 + DEV_CLOCK_CFG_MAC_RX_RST, 476 476 DEV_CLOCK_CFG_MAC_TX_RST | 477 - DEV_CLOCK_CFG_MAC_TX_RST, 477 + DEV_CLOCK_CFG_MAC_RX_RST, 478 478 DEV_CLOCK_CFG); 479 479 } 480 480 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down); ··· 569 569 } 570 570 EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up); 571 571 572 - static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, 573 - struct sk_buff *clone) 572 + static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, 573 + struct sk_buff *clone) 574 574 { 575 575 struct ocelot_port *ocelot_port = ocelot->ports[port]; 576 + unsigned long flags; 576 577 577 - spin_lock(&ocelot_port->ts_id_lock); 578 + spin_lock_irqsave(&ocelot->ts_id_lock, flags); 579 + 580 + if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID || 581 + ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { 582 + spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); 583 + return -EBUSY; 584 + } 578 585 579 586 skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; 580 587 /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ 581 588 OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; 582 - ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4; 589 + 590 + ocelot_port->ts_id++; 591 + if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID) 592 + ocelot_port->ts_id = 0; 593 + 594 + ocelot_port->ptp_skbs_in_flight++; 595 + ocelot->ptp_skbs_in_flight++; 596 + 583 597 skb_queue_tail(&ocelot_port->tx_skbs, clone); 584 598 585 - spin_unlock(&ocelot_port->ts_id_lock); 599 + spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); 600 + 601 + return 0; 586 602 } 587 603 588 - u32 ocelot_ptp_rew_op(struct sk_buff *skb) 589 - { 590 - struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 591 - u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd; 592 - u32 rew_op = 0; 593 - 594 - if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) { 595 - rew_op = ptp_cmd; 596 - rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3; 597 - } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { 598 - rew_op = ptp_cmd; 599 - } 600 - 601 - return rew_op; 602 - } 603 - EXPORT_SYMBOL(ocelot_ptp_rew_op); 604 - 605 - static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb) 604 + static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb, 605 + unsigned int ptp_class) 606 606 { 607 607 struct ptp_header *hdr; 608 - unsigned int ptp_class; 609 608 u8 msgtype, twostep; 610 - 611 - ptp_class = ptp_classify_raw(skb); 612 - if (ptp_class == PTP_CLASS_NONE) 613 - return false; 614 609 615 610 hdr = ptp_parse_header(skb, ptp_class); 616 611 if (!hdr) ··· 626 631 { 627 632 struct ocelot_port *ocelot_port = ocelot->ports[port]; 628 633 u8 ptp_cmd = ocelot_port->ptp_cmd; 634 + unsigned int ptp_class; 635 + int err; 636 + 637 + /* Don't do anything if PTP timestamping not enabled */ 638 + if (!ptp_cmd) 639 + return 0; 640 + 641 + ptp_class = ptp_classify_raw(skb); 642 + if (ptp_class == PTP_CLASS_NONE) 643 + return -EINVAL; 629 644 630 645 /* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */ 631 646 if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { 632 - if (ocelot_ptp_is_onestep_sync(skb)) { 647 + if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) { 633 648 OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; 634 649 return 0; 635 650 } ··· 653 648 if (!(*clone)) 654 649 return -ENOMEM; 655 650 656 - ocelot_port_add_txtstamp_skb(ocelot, port, *clone); 651 + err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone); 652 + if (err) 653 + return err; 654 + 657 655 OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; 656 + OCELOT_SKB_CB(*clone)->ptp_class = ptp_class; 658 657 } 659 658 660 659 return 0; ··· 692 683 spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); 693 684 } 694 685 686 + static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid) 687 + { 688 + struct ptp_header *hdr; 689 + 690 + hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class); 691 + if (WARN_ON(!hdr)) 692 + return false; 693 + 694 + return seqid == ntohs(hdr->sequence_id); 695 + } 696 + 695 697 void ocelot_get_txtstamp(struct ocelot *ocelot) 696 698 { 697 699 int budget = OCELOT_PTP_QUEUE_SZ; ··· 710 690 while (budget--) { 711 691 struct sk_buff *skb, *skb_tmp, *skb_match = NULL; 712 692 struct skb_shared_hwtstamps shhwtstamps; 693 + u32 val, id, seqid, txport; 713 694 struct ocelot_port *port; 714 695 struct timespec64 ts; 715 696 unsigned long flags; 716 - u32 val, id, txport; 717 697 718 698 val = ocelot_read(ocelot, SYS_PTP_STATUS); 719 699 ··· 726 706 /* Retrieve the ts ID and Tx port */ 727 707 id = SYS_PTP_STATUS_PTP_MESS_ID_X(val); 728 708 txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); 709 + seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); 729 710 730 - /* Retrieve its associated skb */ 731 711 port = ocelot->ports[txport]; 732 712 713 + spin_lock(&ocelot->ts_id_lock); 714 + port->ptp_skbs_in_flight--; 715 + ocelot->ptp_skbs_in_flight--; 716 + spin_unlock(&ocelot->ts_id_lock); 717 + 718 + /* Retrieve its associated skb */ 719 + try_again: 733 720 spin_lock_irqsave(&port->tx_skbs.lock, flags); 734 721 735 722 skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { ··· 749 722 750 723 spin_unlock_irqrestore(&port->tx_skbs.lock, flags); 751 724 725 + if (WARN_ON(!skb_match)) 726 + continue; 727 + 728 + if (!ocelot_validate_ptp_skb(skb_match, seqid)) { 729 + dev_err_ratelimited(ocelot->dev, 730 + "port %d received stale TX timestamp for seqid %d, discarding\n", 731 + txport, seqid); 732 + dev_kfree_skb_any(skb); 733 + goto try_again; 734 + } 735 + 752 736 /* Get the h/w timestamp */ 753 737 ocelot_get_hwtimestamp(ocelot, &ts); 754 - 755 - if (unlikely(!skb_match)) 756 - continue; 757 738 758 739 /* Set the timestamp into the skb */ 759 740 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); ··· 1983 1948 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1984 1949 1985 1950 skb_queue_head_init(&ocelot_port->tx_skbs); 1986 - spin_lock_init(&ocelot_port->ts_id_lock); 1987 1951 1988 1952 /* Basic L2 initialization */ 1989 1953 ··· 2115 2081 mutex_init(&ocelot->stats_lock); 2116 2082 mutex_init(&ocelot->ptp_lock); 2117 2083 spin_lock_init(&ocelot->ptp_clock_lock); 2084 + spin_lock_init(&ocelot->ts_id_lock); 2118 2085 snprintf(queue_name, sizeof(queue_name), "%s-stats", 2119 2086 dev_name(ocelot->dev)); 2120 2087 ocelot->stats_queue = create_singlethread_workqueue(queue_name);
+2 -1
drivers/net/ethernet/mscc/ocelot_net.c
··· 8 8 * Copyright 2020-2021 NXP 9 9 */ 10 10 11 + #include <linux/dsa/ocelot.h> 11 12 #include <linux/if_bridge.h> 12 13 #include <linux/of_net.h> 13 14 #include <linux/phy/phy.h> ··· 1626 1625 if (phy_mode == PHY_INTERFACE_MODE_QSGMII) 1627 1626 ocelot_port_rmwl(ocelot_port, 0, 1628 1627 DEV_CLOCK_CFG_MAC_TX_RST | 1629 - DEV_CLOCK_CFG_MAC_TX_RST, 1628 + DEV_CLOCK_CFG_MAC_RX_RST, 1630 1629 DEV_CLOCK_CFG); 1631 1630 1632 1631 ocelot_port->phy_mode = phy_mode;
+1 -1
drivers/net/ethernet/neterion/s2io.c
··· 8566 8566 return; 8567 8567 } 8568 8568 8569 - if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) { 8569 + if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) { 8570 8570 s2io_card_down(sp); 8571 8571 pr_err("Can't restore mac addr after reset.\n"); 8572 8572 return;
+14 -5
drivers/net/ethernet/netronome/nfp/flower/main.c
··· 830 830 if (err) 831 831 goto err_cleanup; 832 832 833 - err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app); 834 - if (err) 835 - goto err_cleanup; 836 - 837 833 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) 838 834 nfp_flower_qos_init(app); 839 835 ··· 938 942 return err; 939 943 } 940 944 941 - return nfp_tunnel_config_start(app); 945 + err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app); 946 + if (err) 947 + return err; 948 + 949 + err = nfp_tunnel_config_start(app); 950 + if (err) 951 + goto err_tunnel_config; 952 + 953 + return 0; 954 + 955 + err_tunnel_config: 956 + flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app, 957 + nfp_flower_setup_indr_tc_release); 958 + return err; 942 959 } 943 960 944 961 static void nfp_flower_stop(struct nfp_app *app)
+4
drivers/net/ethernet/pensando/ionic/ionic_lif.c
··· 1379 1379 1380 1380 static int ionic_addr_del(struct net_device *netdev, const u8 *addr) 1381 1381 { 1382 + /* Don't delete our own address from the uc list */ 1383 + if (ether_addr_equal(addr, netdev->dev_addr)) 1384 + return 0; 1385 + 1382 1386 return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); 1383 1387 } 1384 1388
+1
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 1299 1299 } else { 1300 1300 DP_NOTICE(cdev, 1301 1301 "Failed to acquire PTT for aRFS\n"); 1302 + rc = -EINVAL; 1302 1303 goto err; 1303 1304 } 1304 1305 }
+1
drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
··· 71 71 72 72 static const struct of_device_id dwmac_generic_match[] = { 73 73 { .compatible = "st,spear600-gmac"}, 74 + { .compatible = "snps,dwmac-3.40a"}, 74 75 { .compatible = "snps,dwmac-3.50a"}, 75 76 { .compatible = "snps,dwmac-3.610"}, 76 77 { .compatible = "snps,dwmac-3.70a"},
+11 -2
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
··· 218 218 readl(ioaddr + DMA_BUS_MODE + i * 4); 219 219 } 220 220 221 - static void dwmac1000_get_hw_feature(void __iomem *ioaddr, 222 - struct dma_features *dma_cap) 221 + static int dwmac1000_get_hw_feature(void __iomem *ioaddr, 222 + struct dma_features *dma_cap) 223 223 { 224 224 u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE); 225 + 226 + if (!hw_cap) { 227 + /* 0x00000000 is the value read on old hardware that does not 228 + * implement this register 229 + */ 230 + return -EOPNOTSUPP; 231 + } 225 232 226 233 dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); 227 234 dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; ··· 259 252 dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; 260 253 /* Alternate (enhanced) DESC mode */ 261 254 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 255 + 256 + return 0; 262 257 } 263 258 264 259 static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
+4 -2
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
··· 347 347 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); 348 348 } 349 349 350 - static void dwmac4_get_hw_feature(void __iomem *ioaddr, 351 - struct dma_features *dma_cap) 350 + static int dwmac4_get_hw_feature(void __iomem *ioaddr, 351 + struct dma_features *dma_cap) 352 352 { 353 353 u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0); 354 354 ··· 437 437 dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; 438 438 dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; 439 439 dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5; 440 + 441 + return 0; 440 442 } 441 443 442 444 /* Enable/disable TSO feature and set MSS */
+4 -2
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
··· 371 371 return ret; 372 372 } 373 373 374 - static void dwxgmac2_get_hw_feature(void __iomem *ioaddr, 375 - struct dma_features *dma_cap) 374 + static int dwxgmac2_get_hw_feature(void __iomem *ioaddr, 375 + struct dma_features *dma_cap) 376 376 { 377 377 u32 hw_cap; 378 378 ··· 445 445 dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11; 446 446 dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9; 447 447 dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3; 448 + 449 + return 0; 448 450 } 449 451 450 452 static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
+3 -3
drivers/net/ethernet/stmicro/stmmac/hwif.h
··· 203 203 int (*dma_interrupt) (void __iomem *ioaddr, 204 204 struct stmmac_extra_stats *x, u32 chan, u32 dir); 205 205 /* If supported then get the optional core features */ 206 - void (*get_hw_feature)(void __iomem *ioaddr, 207 - struct dma_features *dma_cap); 206 + int (*get_hw_feature)(void __iomem *ioaddr, 207 + struct dma_features *dma_cap); 208 208 /* Program the HW RX Watchdog */ 209 209 void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue); 210 210 void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); ··· 255 255 #define stmmac_dma_interrupt_status(__priv, __args...) \ 256 256 stmmac_do_callback(__priv, dma, dma_interrupt, __args) 257 257 #define stmmac_get_hw_feature(__priv, __args...) \ 258 - stmmac_do_void_callback(__priv, dma, get_hw_feature, __args) 258 + stmmac_do_callback(__priv, dma, get_hw_feature, __args) 259 259 #define stmmac_rx_watchdog(__priv, __args...) \ 260 260 stmmac_do_void_callback(__priv, dma, rx_watchdog, __args) 261 261 #define stmmac_set_tx_ring_len(__priv, __args...) \
+8
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 508 508 plat->pmt = 1; 509 509 } 510 510 511 + if (of_device_is_compatible(np, "snps,dwmac-3.40a")) { 512 + plat->has_gmac = 1; 513 + plat->enh_desc = 1; 514 + plat->tx_coe = 1; 515 + plat->bugged_jumbo = 1; 516 + plat->pmt = 1; 517 + } 518 + 511 519 if (of_device_is_compatible(np, "snps,dwmac-4.00") || 512 520 of_device_is_compatible(np, "snps,dwmac-4.10a") || 513 521 of_device_is_compatible(np, "snps,dwmac-4.20a") ||
+3
drivers/net/phy/phy_device.c
··· 3125 3125 { 3126 3126 struct phy_device *phydev = to_phy_device(dev); 3127 3127 3128 + if (phydev->state == PHY_READY || !phydev->attached_dev) 3129 + return; 3130 + 3128 3131 phy_disable_interrupts(phydev); 3129 3132 } 3130 3133
+4
drivers/net/usb/Kconfig
··· 99 99 config USB_RTL8152 100 100 tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 101 101 select MII 102 + select CRC32 103 + select CRYPTO 104 + select CRYPTO_HASH 105 + select CRYPTO_SHA256 102 106 help 103 107 This option adds support for Realtek RTL8152 based USB 2.0 104 108 10/100 Ethernet adapters and RTL8153 based USB 3.0 10/100/1000
+1 -1
drivers/net/virtio_net.c
··· 406 406 * add_recvbuf_mergeable() + get_mergeable_buf_len() 407 407 */ 408 408 truesize = headroom ? PAGE_SIZE : truesize; 409 - tailroom = truesize - len - headroom; 409 + tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len); 410 410 buf = p - headroom; 411 411 412 412 len -= hdr_len;
+13
include/linux/dsa/mv88e6xxx.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 2 + * Copyright 2021 NXP 3 + */ 4 + 5 + #ifndef _NET_DSA_TAG_MV88E6XXX_H 6 + #define _NET_DSA_TAG_MV88E6XXX_H 7 + 8 + #include <linux/if_vlan.h> 9 + 10 + #define MV88E6XXX_VID_STANDALONE 0 11 + #define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1) 12 + 13 + #endif
+49
include/linux/dsa/ocelot.h
··· 5 5 #ifndef _NET_DSA_TAG_OCELOT_H 6 6 #define _NET_DSA_TAG_OCELOT_H 7 7 8 + #include <linux/kthread.h> 8 9 #include <linux/packing.h> 10 + #include <linux/skbuff.h> 11 + 12 + struct ocelot_skb_cb { 13 + struct sk_buff *clone; 14 + unsigned int ptp_class; /* valid only for clones */ 15 + u8 ptp_cmd; 16 + u8 ts_id; 17 + }; 18 + 19 + #define OCELOT_SKB_CB(skb) \ 20 + ((struct ocelot_skb_cb *)((skb)->cb)) 21 + 22 + #define IFH_TAG_TYPE_C 0 23 + #define IFH_TAG_TYPE_S 1 24 + 25 + #define IFH_REW_OP_NOOP 0x0 26 + #define IFH_REW_OP_DSCP 0x1 27 + #define IFH_REW_OP_ONE_STEP_PTP 0x2 28 + #define IFH_REW_OP_TWO_STEP_PTP 0x3 29 + #define IFH_REW_OP_ORIGIN_PTP 0x5 9 30 10 31 #define OCELOT_TAG_LEN 16 11 32 #define OCELOT_SHORT_PREFIX_LEN 4 ··· 161 140 * +------+------+------+------+------+------+------+------+ 162 141 */ 163 142 143 + struct felix_deferred_xmit_work { 144 + struct dsa_port *dp; 145 + struct sk_buff *skb; 146 + struct kthread_work work; 147 + }; 148 + 149 + struct felix_port { 150 + void (*xmit_work_fn)(struct kthread_work *work); 151 + struct kthread_worker *xmit_worker; 152 + }; 153 + 164 154 static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val) 165 155 { 166 156 packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0); ··· 245 213 static inline void ocelot_ifh_set_vid(void *injection, u64 vid) 246 214 { 247 215 packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0); 216 + } 217 + 218 + /* Determine the PTP REW_OP to use for injecting the given skb */ 219 + static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb) 220 + { 221 + struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; 222 + u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd; 223 + u32 rew_op = 0; 224 + 225 + if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) { 226 + rew_op = ptp_cmd; 227 + rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3; 228 + } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) { 229 + rew_op = ptp_cmd; 230 + } 231 + 232 + return rew_op; 248 233 } 249 234 250 235 #endif
+15 -29
include/linux/dsa/sja1105.h
··· 48 48 spinlock_t meta_lock; 49 49 unsigned long state; 50 50 u8 ts_id; 51 + /* Used on SJA1110 where meta frames are generated only for 52 + * 2-step TX timestamps 53 + */ 54 + struct sk_buff_head skb_txtstamp_queue; 51 55 }; 52 56 53 57 struct sja1105_skb_cb { ··· 73 69 bool hwts_tx_en; 74 70 }; 75 71 76 - enum sja1110_meta_tstamp { 77 - SJA1110_META_TSTAMP_TX = 0, 78 - SJA1110_META_TSTAMP_RX = 1, 79 - }; 72 + /* Timestamps are in units of 8 ns clock ticks (equivalent to 73 + * a fixed 125 MHz clock). 74 + */ 75 + #define SJA1105_TICK_NS 8 80 76 81 - #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) 82 - 83 - void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, 84 - enum sja1110_meta_tstamp dir, u64 tstamp); 85 - 86 - #else 87 - 88 - static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, 89 - u8 ts_id, enum sja1110_meta_tstamp dir, 90 - u64 tstamp) 77 + static inline s64 ns_to_sja1105_ticks(s64 ns) 91 78 { 79 + return ns / SJA1105_TICK_NS; 92 80 } 93 81 94 - #endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */ 95 - 96 - #if IS_ENABLED(CONFIG_NET_DSA_SJA1105) 97 - 98 - extern const struct dsa_switch_ops sja1105_switch_ops; 82 + static inline s64 sja1105_ticks_to_ns(s64 ticks) 83 + { 84 + return ticks * SJA1105_TICK_NS; 85 + } 99 86 100 87 static inline bool dsa_port_is_sja1105(struct dsa_port *dp) 101 88 { 102 - return dp->ds->ops == &sja1105_switch_ops; 89 + return true; 103 90 } 104 - 105 - #else 106 - 107 - static inline bool dsa_port_is_sja1105(struct dsa_port *dp) 108 - { 109 - return false; 110 - } 111 - 112 - #endif 113 91 114 92 #endif /* _NET_DSA_SJA1105_H */
+8 -2
include/linux/mlx5/mlx5_ifc.h
··· 9475 9475 u8 reserved_at_0[0x8]; 9476 9476 u8 local_port[0x8]; 9477 9477 u8 reserved_at_10[0x10]; 9478 + 9478 9479 u8 entropy_force_cap[0x1]; 9479 9480 u8 entropy_calc_cap[0x1]; 9480 9481 u8 entropy_gre_calc_cap[0x1]; 9481 - u8 reserved_at_23[0x1b]; 9482 + u8 reserved_at_23[0xf]; 9483 + u8 rx_ts_over_crc_cap[0x1]; 9484 + u8 reserved_at_33[0xb]; 9482 9485 u8 fcs_cap[0x1]; 9483 9486 u8 reserved_at_3f[0x1]; 9487 + 9484 9488 u8 entropy_force[0x1]; 9485 9489 u8 entropy_calc[0x1]; 9486 9490 u8 entropy_gre_calc[0x1]; 9487 - u8 reserved_at_43[0x1b]; 9491 + u8 reserved_at_43[0xf]; 9492 + u8 rx_ts_over_crc[0x1]; 9493 + u8 reserved_at_53[0xb]; 9488 9494 u8 fcs_chk[0x1]; 9489 9495 u8 reserved_at_5f[0x1]; 9490 9496 };
+4 -51
include/soc/mscc/ocelot.h
··· 89 89 /* Source PGIDs, one per physical port */ 90 90 #define PGID_SRC 80 91 91 92 - #define IFH_TAG_TYPE_C 0 93 - #define IFH_TAG_TYPE_S 1 94 - 95 - #define IFH_REW_OP_NOOP 0x0 96 - #define IFH_REW_OP_DSCP 0x1 97 - #define IFH_REW_OP_ONE_STEP_PTP 0x2 98 - #define IFH_REW_OP_TWO_STEP_PTP 0x3 99 - #define IFH_REW_OP_ORIGIN_PTP 0x5 100 - 101 92 #define OCELOT_NUM_TC 8 102 93 103 94 #define OCELOT_SPEED_2500 0 ··· 594 603 /* The VLAN ID that will be transmitted as untagged, on egress */ 595 604 struct ocelot_vlan native_vlan; 596 605 606 + unsigned int ptp_skbs_in_flight; 597 607 u8 ptp_cmd; 598 608 struct sk_buff_head tx_skbs; 599 609 u8 ts_id; 600 - spinlock_t ts_id_lock; 601 610 602 611 phy_interface_t phy_mode; 603 612 ··· 671 680 struct ptp_clock *ptp_clock; 672 681 struct ptp_clock_info ptp_info; 673 682 struct hwtstamp_config hwtstamp_config; 683 + unsigned int ptp_skbs_in_flight; 684 + /* Protects the 2-step TX timestamp ID logic */ 685 + spinlock_t ts_id_lock; 674 686 /* Protects the PTP interface state */ 675 687 struct mutex ptp_lock; 676 688 /* Protects the PTP clock */ ··· 685 691 u32 rate; /* kilobit per second */ 686 692 u32 burst; /* bytes */ 687 693 }; 688 - 689 - struct ocelot_skb_cb { 690 - struct sk_buff *clone; 691 - u8 ptp_cmd; 692 - u8 ts_id; 693 - }; 694 - 695 - #define OCELOT_SKB_CB(skb) \ 696 - ((struct ocelot_skb_cb *)((skb)->cb)) 697 694 698 695 #define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) 699 696 #define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi)) ··· 737 752 void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target, 738 753 u32 val, u32 reg, u32 offset); 739 754 740 - #if IS_ENABLED(CONFIG_MSCC_OCELOT_SWITCH_LIB) 741 - 742 755 /* Packet I/O */ 743 756 bool ocelot_can_inject(struct ocelot *ocelot, int grp); 744 757 void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp, 745 758 u32 rew_op, struct sk_buff *skb); 746 759 int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **skb); 747 760 void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp); 748 - 749 - u32 ocelot_ptp_rew_op(struct sk_buff *skb); 750 - #else 751 - 752 - static inline bool ocelot_can_inject(struct ocelot *ocelot, int grp) 753 - { 754 - return false; 755 - } 756 - 757 - static inline void ocelot_port_inject_frame(struct ocelot *ocelot, int port, 758 - int grp, u32 rew_op, 759 - struct sk_buff *skb) 760 - { 761 - } 762 - 763 - static inline int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, 764 - struct sk_buff **skb) 765 - { 766 - return -EIO; 767 - } 768 - 769 - static inline void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp) 770 - { 771 - } 772 - 773 - static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb) 774 - { 775 - return 0; 776 - } 777 - #endif 778 761 779 762 /* Hardware initialization */ 780 763 int ocelot_regfields_init(struct ocelot *ocelot,
+3
include/soc/mscc/ocelot_ptp.h
··· 13 13 #include <linux/ptp_clock_kernel.h> 14 14 #include <soc/mscc/ocelot.h> 15 15 16 + #define OCELOT_MAX_PTP_ID 63 17 + #define OCELOT_PTP_FIFO_SIZE 128 18 + 16 19 #define PTP_PIN_CFG_RSZ 0x20 17 20 #define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ 18 21 #define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
+11 -13
net/core/net-procfs.c
··· 77 77 struct rtnl_link_stats64 temp; 78 78 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 79 79 80 - seq_printf(seq, "%9s: %16llu %12llu %4llu %6llu %4llu %5llu %10llu %9llu " 81 - "%16llu %12llu %4llu %6llu %4llu %5llu %7llu %10llu\n", 80 + seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " 81 + "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", 82 82 dev->name, stats->rx_bytes, stats->rx_packets, 83 83 stats->rx_errors, 84 84 stats->rx_dropped + stats->rx_missed_errors, ··· 103 103 static int dev_seq_show(struct seq_file *seq, void *v) 104 104 { 105 105 if (v == SEQ_START_TOKEN) 106 - seq_puts(seq, "Interface| Receive " 107 - " | Transmit\n" 108 - " | bytes packets errs drop fifo frame " 109 - "compressed multicast| bytes packets errs " 110 - " drop fifo colls carrier compressed\n"); 106 + seq_puts(seq, "Inter-| Receive " 107 + " | Transmit\n" 108 + " face |bytes packets errs drop fifo frame " 109 + "compressed multicast|bytes packets errs " 110 + "drop fifo colls carrier compressed\n"); 111 111 else 112 112 dev_seq_printf_stats(seq, v); 113 113 return 0; ··· 259 259 struct packet_type *pt = v; 260 260 261 261 if (v == SEQ_START_TOKEN) 262 - seq_puts(seq, "Type Device Function\n"); 262 + seq_puts(seq, "Type Device Function\n"); 263 263 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { 264 264 if (pt->type == htons(ETH_P_ALL)) 265 265 seq_puts(seq, "ALL "); 266 266 else 267 267 seq_printf(seq, "%04x", ntohs(pt->type)); 268 268 269 - seq_printf(seq, " %-9s %ps\n", 269 + seq_printf(seq, " %-8s %ps\n", 270 270 pt->dev ? pt->dev->name : "", pt->func); 271 271 } 272 272 ··· 327 327 struct netdev_hw_addr *ha; 328 328 struct net_device *dev = v; 329 329 330 - if (v == SEQ_START_TOKEN) { 331 - seq_puts(seq, "Ifindex Interface Refcount Global_use Address\n"); 330 + if (v == SEQ_START_TOKEN) 332 331 return 0; 333 - } 334 332 335 333 netif_addr_lock_bh(dev); 336 334 netdev_for_each_mc_addr(ha, dev) { 337 - seq_printf(seq, "%-7d %-9s %-8d %-10d %*phN\n", 335 + seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n", 338 336 dev->ifindex, dev->name, 339 337 ha->refcount, ha->global_use, 340 338 (int)dev->addr_len, ha->addr);
-5
net/dsa/Kconfig
··· 101 101 102 102 config NET_DSA_TAG_OCELOT 103 103 tristate "Tag driver for Ocelot family of switches, using NPI port" 104 - depends on MSCC_OCELOT_SWITCH_LIB || \ 105 - (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST) 106 104 select PACKING 107 105 help 108 106 Say Y or M if you want to enable NPI tagging for the Ocelot switches ··· 112 114 113 115 config NET_DSA_TAG_OCELOT_8021Q 114 116 tristate "Tag driver for Ocelot family of switches, using VLAN" 115 - depends on MSCC_OCELOT_SWITCH_LIB || \ 116 - (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST) 117 117 help 118 118 Say Y or M if you want to enable support for tagging frames with a 119 119 custom VLAN-based header. Frames that require timestamping, such as ··· 134 138 135 139 config NET_DSA_TAG_SJA1105 136 140 tristate "Tag driver for NXP SJA1105 switches" 137 - depends on NET_DSA_SJA1105 || !NET_DSA_SJA1105 138 141 select PACKING 139 142 help 140 143 Say Y or M if you want to enable support for tagging frames with the
+3 -1
net/dsa/dsa2.c
··· 170 170 /* Check if the bridge is still in use, otherwise it is time 171 171 * to clean it up so we can reuse this bridge_num later. 172 172 */ 173 - if (!dsa_bridge_num_find(bridge_dev)) 173 + if (dsa_bridge_num_find(bridge_dev) < 0) 174 174 clear_bit(bridge_num, &dsa_fwd_offloading_bridges); 175 175 } 176 176 ··· 811 811 if (!dsa_is_cpu_port(ds, port)) 812 812 continue; 813 813 814 + rtnl_lock(); 814 815 err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto); 816 + rtnl_unlock(); 815 817 if (err) { 816 818 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n", 817 819 tag_ops->name, ERR_PTR(err));
+1 -1
net/dsa/switch.c
··· 168 168 if (extack._msg) 169 169 dev_err(ds->dev, "port %d: %s\n", info->port, 170 170 extack._msg); 171 - if (err && err != EOPNOTSUPP) 171 + if (err && err != -EOPNOTSUPP) 172 172 return err; 173 173 } 174 174
+9 -19
net/dsa/tag_dsa.c
··· 45 45 * 6 6 2 2 4 2 N 46 46 */ 47 47 48 + #include <linux/dsa/mv88e6xxx.h> 48 49 #include <linux/etherdevice.h> 49 50 #include <linux/list.h> 50 51 #include <linux/slab.h> ··· 130 129 u8 tag_dev, tag_port; 131 130 enum dsa_cmd cmd; 132 131 u8 *dsa_header; 133 - u16 pvid = 0; 134 - int err; 135 132 136 133 if (skb->offload_fwd_mark) { 137 134 struct dsa_switch_tree *dst = dp->ds->dst; 138 - struct net_device *br = dp->bridge_dev; 139 135 140 136 cmd = DSA_CMD_FORWARD; 141 137 ··· 142 144 */ 143 145 tag_dev = dst->last_switch + 1 + dp->bridge_num; 144 146 tag_port = 0; 145 - 146 - /* If we are offloading forwarding for a VLAN-unaware bridge, 147 - * inject packets to hardware using the bridge's pvid, since 148 - * that's where the packets ingressed from. 149 - */ 150 - if (!br_vlan_enabled(br)) { 151 - /* Safe because __dev_queue_xmit() runs under 152 - * rcu_read_lock_bh() 153 - */ 154 - err = br_vlan_get_pvid_rcu(br, &pvid); 155 - if (err) 156 - return NULL; 157 - } 158 147 } else { 159 148 cmd = DSA_CMD_FROM_CPU; 160 149 tag_dev = dp->ds->index; ··· 165 180 dsa_header[2] &= ~0x10; 166 181 } 167 182 } else { 183 + struct net_device *br = dp->bridge_dev; 184 + u16 vid; 185 + 186 + vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE; 187 + 168 188 skb_push(skb, DSA_HLEN + extra); 169 189 dsa_alloc_etype_header(skb, DSA_HLEN + extra); 170 190 171 - /* Construct untagged DSA tag. */ 191 + /* Construct DSA header from untagged frame. */ 172 192 dsa_header = dsa_etype_header_pos_tx(skb) + extra; 173 193 174 194 dsa_header[0] = (cmd << 6) | tag_dev; 175 195 dsa_header[1] = tag_port << 3; 176 - dsa_header[2] = pvid >> 8; 177 - dsa_header[3] = pvid & 0xff; 196 + dsa_header[2] = vid >> 8; 197 + dsa_header[3] = vid & 0xff; 178 198 } 179 199 180 200 return skb;
-1
net/dsa/tag_ocelot.c
··· 2 2 /* Copyright 2019 NXP 3 3 */ 4 4 #include <linux/dsa/ocelot.h> 5 - #include <soc/mscc/ocelot.h> 6 5 #include "dsa_priv.h" 7 6 8 7 static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
+27 -13
net/dsa/tag_ocelot_8021q.c
··· 9 9 * that on egress 10 10 */ 11 11 #include <linux/dsa/8021q.h> 12 - #include <soc/mscc/ocelot.h> 13 - #include <soc/mscc/ocelot_ptp.h> 12 + #include <linux/dsa/ocelot.h> 14 13 #include "dsa_priv.h" 14 + 15 + static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp, 16 + struct sk_buff *skb) 17 + { 18 + struct felix_deferred_xmit_work *xmit_work; 19 + struct felix_port *felix_port = dp->priv; 20 + 21 + xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC); 22 + if (!xmit_work) 23 + return NULL; 24 + 25 + /* Calls felix_port_deferred_xmit in felix.c */ 26 + kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn); 27 + /* Increase refcount so the kfree_skb in dsa_slave_xmit 28 + * won't really free the packet. 29 + */ 30 + xmit_work->dp = dp; 31 + xmit_work->skb = skb_get(skb); 32 + 33 + kthread_queue_work(felix_port->xmit_worker, &xmit_work->work); 34 + 35 + return NULL; 36 + } 15 37 16 38 static struct sk_buff *ocelot_xmit(struct sk_buff *skb, 17 39 struct net_device *netdev) ··· 42 20 u16 tx_vid = dsa_8021q_tx_vid(dp->ds, dp->index); 43 21 u16 queue_mapping = skb_get_queue_mapping(skb); 44 22 u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); 45 - struct ocelot *ocelot = dp->ds->priv; 46 - int port = dp->index; 47 - u32 rew_op = 0; 23 + struct ethhdr *hdr = eth_hdr(skb); 48 24 49 - rew_op = ocelot_ptp_rew_op(skb); 50 - if (rew_op) { 51 - if (!ocelot_can_inject(ocelot, 0)) 52 - return NULL; 53 - 54 - ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); 55 - return NULL; 56 - } 25 + if (ocelot_ptp_rew_op(skb) || is_link_local_ether_addr(hdr->h_dest)) 26 + return ocelot_defer_xmit(dp, skb); 57 27 58 28 return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q, 59 29 ((pcp << VLAN_PRIO_SHIFT) | tx_vid));
+43
net/dsa/tag_sja1105.c
··· 4 4 #include <linux/if_vlan.h> 5 5 #include <linux/dsa/sja1105.h> 6 6 #include <linux/dsa/8021q.h> 7 + #include <linux/skbuff.h> 7 8 #include <linux/packing.h> 8 9 #include "dsa_priv.h" 9 10 ··· 53 52 #define SJA1110_RX_TRAILER_LEN 13 54 53 #define SJA1110_TX_TRAILER_LEN 4 55 54 #define SJA1110_MAX_PADDING_LEN 15 55 + 56 + enum sja1110_meta_tstamp { 57 + SJA1110_META_TSTAMP_TX = 0, 58 + SJA1110_META_TSTAMP_RX = 1, 59 + }; 56 60 57 61 /* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */ 58 62 static inline bool sja1105_is_link_local(const struct sk_buff *skb) ··· 524 518 525 519 return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local, 526 520 is_meta); 521 + } 522 + 523 + static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, 524 + u8 ts_id, enum sja1110_meta_tstamp dir, 525 + u64 tstamp) 526 + { 527 + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; 528 + struct dsa_port *dp = dsa_to_port(ds, port); 529 + struct skb_shared_hwtstamps shwt = {0}; 530 + struct sja1105_port *sp = dp->priv; 531 + 532 + if (!dsa_port_is_sja1105(dp)) 533 + return; 534 + 535 + /* We don't care about RX timestamps on the CPU port */ 536 + if (dir == SJA1110_META_TSTAMP_RX) 537 + return; 538 + 539 + spin_lock(&sp->data->skb_txtstamp_queue.lock); 540 + 541 + skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) { 542 + if (SJA1105_SKB_CB(skb)->ts_id != ts_id) 543 + continue; 544 + 545 + __skb_unlink(skb, &sp->data->skb_txtstamp_queue); 546 + skb_match = skb; 547 + 548 + break; 549 + } 550 + 551 + spin_unlock(&sp->data->skb_txtstamp_queue.lock); 552 + 553 + if (WARN_ON(!skb_match)) 554 + return; 555 + 556 + shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp)); 557 + skb_complete_tx_timestamp(skb_match, &shwt); 527 558 } 528 559 529 560 static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
+11 -12
net/ipv4/icmp.c
··· 1054 1054 iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio); 1055 1055 if (!ext_hdr || !iio) 1056 1056 goto send_mal_query; 1057 - if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr)) 1057 + if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) || 1058 + ntohs(iio->extobj_hdr.length) > sizeof(_iio)) 1058 1059 goto send_mal_query; 1059 1060 ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr); 1061 + iio = skb_header_pointer(skb, sizeof(_ext_hdr), 1062 + sizeof(iio->extobj_hdr) + ident_len, &_iio); 1063 + if (!iio) 1064 + goto send_mal_query; 1065 + 1060 1066 status = 0; 1061 1067 dev = NULL; 1062 1068 switch (iio->extobj_hdr.class_type) { 1063 1069 case ICMP_EXT_ECHO_CTYPE_NAME: 1064 - iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio); 1065 1070 if (ident_len >= IFNAMSIZ) 1066 1071 goto send_mal_query; 1067 1072 memset(buff, 0, sizeof(buff)); ··· 1074 1069 dev = dev_get_by_name(net, buff); 1075 1070 break; 1076 1071 case ICMP_EXT_ECHO_CTYPE_INDEX: 1077 - iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) + 1078 - sizeof(iio->ident.ifindex), &_iio); 1079 1072 if (ident_len != sizeof(iio->ident.ifindex)) 1080 1073 goto send_mal_query; 1081 1074 dev = dev_get_by_index(net, ntohl(iio->ident.ifindex)); 1082 1075 break; 1083 1076 case ICMP_EXT_ECHO_CTYPE_ADDR: 1084 - if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) + 1077 + if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) || 1078 + ident_len != sizeof(iio->ident.addr.ctype3_hdr) + 1085 1079 iio->ident.addr.ctype3_hdr.addrlen) 1086 1080 goto send_mal_query; 1087 1081 switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) { 1088 1082 case ICMP_AFI_IP: 1089 - iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr) + 1090 - sizeof(struct in_addr), &_iio); 1091 - if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) + 1092 - sizeof(struct in_addr)) 1083 + if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr)) 1093 1084 goto send_mal_query; 1094 1085 dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr); 1095 1086 break; 1096 1087 #if IS_ENABLED(CONFIG_IPV6) 1097 1088 case ICMP_AFI_IP6: 1098 - iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(_iio), &_iio); 1099 - if (ident_len != sizeof(iio->ident.addr.ctype3_hdr) + 1100 - sizeof(struct in6_addr)) 1089 + if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr)) 1101 1090 goto send_mal_query; 1102 1091 dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev); 1103 1092 dev_hold(dev);
+62 -8
net/ipv6/ioam6.c
··· 770 770 data += sizeof(__be32); 771 771 } 772 772 773 + /* bit12 undefined: filled with empty value */ 774 + if (trace->type.bit12) { 775 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 776 + data += sizeof(__be32); 777 + } 778 + 779 + /* bit13 undefined: filled with empty value */ 780 + if (trace->type.bit13) { 781 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 782 + data += sizeof(__be32); 783 + } 784 + 785 + /* bit14 undefined: filled with empty value */ 786 + if (trace->type.bit14) { 787 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 788 + data += sizeof(__be32); 789 + } 790 + 791 + /* bit15 undefined: filled with empty value */ 792 + if (trace->type.bit15) { 793 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 794 + data += sizeof(__be32); 795 + } 796 + 797 + /* bit16 undefined: filled with empty value */ 798 + if (trace->type.bit16) { 799 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 800 + data += sizeof(__be32); 801 + } 802 + 803 + /* bit17 undefined: filled with empty value */ 804 + if (trace->type.bit17) { 805 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 806 + data += sizeof(__be32); 807 + } 808 + 809 + /* bit18 undefined: filled with empty value */ 810 + if (trace->type.bit18) { 811 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 812 + data += sizeof(__be32); 813 + } 814 + 815 + /* bit19 undefined: filled with empty value */ 816 + if (trace->type.bit19) { 817 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 818 + data += sizeof(__be32); 819 + } 820 + 821 + /* bit20 undefined: filled with empty value */ 822 + if (trace->type.bit20) { 823 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 824 + data += sizeof(__be32); 825 + } 826 + 827 + /* bit21 undefined: filled with empty value */ 828 + if (trace->type.bit21) { 829 + *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE); 830 + data += sizeof(__be32); 831 + } 832 + 773 833 /* opaque state snapshot */ 774 834 if (trace->type.bit22) { 775 835 if (!sc) { ··· 851 791 struct ioam6_schema *sc; 852 792 u8 sclen = 0; 853 793 854 - /* Skip if Overflow flag is set OR 855 - * if an unknown type (bit 12-21) is set 794 + /* Skip if Overflow flag is set 856 795 */ 857 - if (trace->overflow || 858 - trace->type.bit12 | trace->type.bit13 | trace->type.bit14 | 859 - trace->type.bit15 | trace->type.bit16 | trace->type.bit17 | 860 - trace->type.bit18 | trace->type.bit19 | trace->type.bit20 | 861 - trace->type.bit21) { 796 + if (trace->overflow) 862 797 return; 863 - } 864 798 865 799 /* NodeLen does not include Opaque State Snapshot length. We need to 866 800 * take it into account if the corresponding bit is set (bit 22) and
+5 -1
net/ipv6/ioam6_iptunnel.c
··· 75 75 u32 fields; 76 76 77 77 if (!trace->type_be32 || !trace->remlen || 78 - trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4) 78 + trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4 || 79 + trace->type.bit12 | trace->type.bit13 | trace->type.bit14 | 80 + trace->type.bit15 | trace->type.bit16 | trace->type.bit17 | 81 + trace->type.bit18 | trace->type.bit19 | trace->type.bit20 | 82 + trace->type.bit21) 79 83 return false; 80 84 81 85 trace->nodelen = 0;
+15 -40
net/mptcp/protocol.c
··· 528 528 529 529 sk->sk_shutdown |= RCV_SHUTDOWN; 530 530 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 531 - set_bit(MPTCP_DATA_READY, &msk->flags); 532 531 533 532 switch (sk->sk_state) { 534 533 case TCP_ESTABLISHED: ··· 741 742 742 743 /* Wake-up the reader only for in-sequence data */ 743 744 mptcp_data_lock(sk); 744 - if (move_skbs_to_msk(msk, ssk)) { 745 - set_bit(MPTCP_DATA_READY, &msk->flags); 745 + if (move_skbs_to_msk(msk, ssk)) 746 746 sk->sk_data_ready(sk); 747 - } 747 + 748 748 mptcp_data_unlock(sk); 749 749 } 750 750 ··· 845 847 sk->sk_shutdown |= RCV_SHUTDOWN; 846 848 847 849 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 848 - set_bit(MPTCP_DATA_READY, &msk->flags); 849 850 sk->sk_data_ready(sk); 850 851 } 851 852 ··· 1756 1759 return copied ? : ret; 1757 1760 } 1758 1761 1759 - static void mptcp_wait_data(struct sock *sk, long *timeo) 1760 - { 1761 - DEFINE_WAIT_FUNC(wait, woken_wake_function); 1762 - struct mptcp_sock *msk = mptcp_sk(sk); 1763 - 1764 - add_wait_queue(sk_sleep(sk), &wait); 1765 - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1766 - 1767 - sk_wait_event(sk, timeo, 1768 - test_bit(MPTCP_DATA_READY, &msk->flags), &wait); 1769 - 1770 - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 1771 - remove_wait_queue(sk_sleep(sk), &wait); 1772 - } 1773 - 1774 1762 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, 1775 1763 struct msghdr *msg, 1776 1764 size_t len, int flags, ··· 2059 2077 } 2060 2078 2061 2079 pr_debug("block timeout %ld", timeo); 2062 - mptcp_wait_data(sk, &timeo); 2063 - } 2064 - 2065 - if (skb_queue_empty_lockless(&sk->sk_receive_queue) && 2066 - skb_queue_empty(&msk->receive_queue)) { 2067 - /* entire backlog drained, clear DATA_READY. */ 2068 - clear_bit(MPTCP_DATA_READY, &msk->flags); 2069 - 2070 - /* .. race-breaker: ssk might have gotten new data 2071 - * after last __mptcp_move_skbs() returned false. 2072 - */ 2073 - if (unlikely(__mptcp_move_skbs(msk))) 2074 - set_bit(MPTCP_DATA_READY, &msk->flags); 2080 + sk_wait_data(sk, &timeo, NULL); 2075 2081 } 2076 2082 2077 2083 out_err: ··· 2068 2098 tcp_recv_timestamp(msg, sk, &tss); 2069 2099 } 2070 2100 2071 - pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d", 2072 - msk, test_bit(MPTCP_DATA_READY, &msk->flags), 2073 - skb_queue_empty_lockless(&sk->sk_receive_queue), copied); 2101 + pr_debug("msk=%p rx queue empty=%d:%d copied=%d", 2102 + msk, skb_queue_empty_lockless(&sk->sk_receive_queue), 2103 + skb_queue_empty(&msk->receive_queue), copied); 2074 2104 if (!(flags & MSG_PEEK)) 2075 2105 mptcp_rcv_space_adjust(msk, copied); 2076 2106 ··· 2338 2368 inet_sk_state_store(sk, TCP_CLOSE); 2339 2369 sk->sk_shutdown = SHUTDOWN_MASK; 2340 2370 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ 2341 - set_bit(MPTCP_DATA_READY, &msk->flags); 2342 2371 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); 2343 2372 2344 2373 mptcp_close_wake_up(sk); ··· 3354 3385 3355 3386 static __poll_t mptcp_check_readable(struct mptcp_sock *msk) 3356 3387 { 3357 - return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 3358 - 0; 3388 + /* Concurrent splices from sk_receive_queue into receive_queue will 3389 + * always show at least one non-empty queue when checked in this order. 3390 + */ 3391 + if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) && 3392 + skb_queue_empty_lockless(&msk->receive_queue)) 3393 + return 0; 3394 + 3395 + return EPOLLIN | EPOLLRDNORM; 3359 3396 } 3360 3397 3361 3398 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) ··· 3396 3421 state = inet_sk_state_load(sk); 3397 3422 pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); 3398 3423 if (state == TCP_LISTEN) 3399 - return mptcp_check_readable(msk); 3424 + return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0; 3400 3425 3401 3426 if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { 3402 3427 mask |= mptcp_check_readable(msk);
+3
net/nfc/af_nfc.c
··· 60 60 proto_tab[nfc_proto->id] = nfc_proto; 61 61 write_unlock(&proto_tab_lock); 62 62 63 + if (rc) 64 + proto_unregister(nfc_proto->proto); 65 + 63 66 return rc; 64 67 } 65 68 EXPORT_SYMBOL(nfc_proto_register);
+7 -2
net/nfc/digital_core.c
··· 277 277 static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech) 278 278 { 279 279 struct digital_tg_mdaa_params *params; 280 + int rc; 280 281 281 282 params = kzalloc(sizeof(*params), GFP_KERNEL); 282 283 if (!params) ··· 292 291 get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2); 293 292 params->sc = DIGITAL_SENSF_FELICA_SC; 294 293 295 - return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params, 296 - 500, digital_tg_recv_atr_req, NULL); 294 + rc = digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params, 295 + 500, digital_tg_recv_atr_req, NULL); 296 + if (rc) 297 + kfree(params); 298 + 299 + return rc; 297 300 } 298 301 299 302 static int digital_tg_listen_md(struct nfc_digital_dev *ddev, u8 rf_tech)
+6 -2
net/nfc/digital_technology.c
··· 465 465 skb_put_u8(skb, sel_cmd); 466 466 skb_put_u8(skb, DIGITAL_SDD_REQ_SEL_PAR); 467 467 468 - return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res, 469 - target); 468 + rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res, 469 + target); 470 + if (rc) 471 + kfree_skb(skb); 472 + 473 + return rc; 470 474 } 471 475 472 476 static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg,
+2
net/nfc/nci/rsp.c
··· 334 334 ndev->cur_conn_id); 335 335 if (conn_info) { 336 336 list_del(&conn_info->list); 337 + if (conn_info == ndev->rf_conn_info) 338 + ndev->rf_conn_info = NULL; 337 339 devm_kfree(&ndev->nfc_dev->dev, conn_info); 338 340 } 339 341 }
+19 -13
net/sched/sch_mqprio.c
··· 529 529 for (i = tc.offset; i < tc.offset + tc.count; i++) { 530 530 struct netdev_queue *q = netdev_get_tx_queue(dev, i); 531 531 struct Qdisc *qdisc = rtnl_dereference(q->qdisc); 532 - struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL; 533 - struct gnet_stats_queue __percpu *cpu_qstats = NULL; 534 532 535 533 spin_lock_bh(qdisc_lock(qdisc)); 536 - if (qdisc_is_percpu_stats(qdisc)) { 537 - cpu_bstats = qdisc->cpu_bstats; 538 - cpu_qstats = qdisc->cpu_qstats; 539 - } 540 534 541 - qlen = qdisc_qlen_sum(qdisc); 542 - __gnet_stats_copy_basic(NULL, &sch->bstats, 543 - cpu_bstats, &qdisc->bstats); 544 - __gnet_stats_copy_queue(&sch->qstats, 545 - cpu_qstats, 546 - &qdisc->qstats, 547 - qlen); 535 + if (qdisc_is_percpu_stats(qdisc)) { 536 + qlen = qdisc_qlen_sum(qdisc); 537 + 538 + __gnet_stats_copy_basic(NULL, &bstats, 539 + qdisc->cpu_bstats, 540 + &qdisc->bstats); 541 + __gnet_stats_copy_queue(&qstats, 542 + qdisc->cpu_qstats, 543 + &qdisc->qstats, 544 + qlen); 545 + } else { 546 + qlen += qdisc->q.qlen; 547 + bstats.bytes += qdisc->bstats.bytes; 548 + bstats.packets += qdisc->bstats.packets; 549 + qstats.backlog += qdisc->qstats.backlog; 550 + qstats.drops += qdisc->qstats.drops; 551 + qstats.requeues += qdisc->qstats.requeues; 552 + qstats.overlimits += qdisc->qstats.overlimits; 553 + } 548 554 spin_unlock_bh(qdisc_lock(qdisc)); 549 555 } 550 556
+1 -1
net/sctp/sm_make_chunk.c
··· 3697 3697 outlen = (sizeof(outreq) + stream_len) * out; 3698 3698 inlen = (sizeof(inreq) + stream_len) * in; 3699 3699 3700 - retval = sctp_make_reconf(asoc, outlen + inlen); 3700 + retval = sctp_make_reconf(asoc, SCTP_PAD4(outlen) + SCTP_PAD4(inlen)); 3701 3701 if (!retval) 3702 3702 return NULL; 3703 3703
+6 -1
net/smc/smc_cdc.c
··· 150 150 151 151 again: 152 152 link = conn->lnk; 153 + if (!smc_wr_tx_link_hold(link)) 154 + return -ENOLINK; 153 155 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, NULL, &pend); 154 156 if (rc) 155 - return rc; 157 + goto put_out; 156 158 157 159 spin_lock_bh(&conn->send_lock); 158 160 if (link != conn->lnk) { ··· 162 160 spin_unlock_bh(&conn->send_lock); 163 161 smc_wr_tx_put_slot(link, 164 162 (struct smc_wr_tx_pend_priv *)pend); 163 + smc_wr_tx_link_put(link); 165 164 if (again) 166 165 return -ENOLINK; 167 166 again = true; ··· 170 167 } 171 168 rc = smc_cdc_msg_send(conn, wr_buf, pend); 172 169 spin_unlock_bh(&conn->send_lock); 170 + put_out: 171 + smc_wr_tx_link_put(link); 173 172 return rc; 174 173 } 175 174
+11 -9
net/smc/smc_core.c
··· 949 949 to_lnk = &lgr->lnk[i]; 950 950 break; 951 951 } 952 - if (!to_lnk) { 952 + if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) { 953 953 smc_lgr_terminate_sched(lgr); 954 954 return NULL; 955 955 } ··· 981 981 read_unlock_bh(&lgr->conns_lock); 982 982 /* pre-fetch buffer outside of send_lock, might sleep */ 983 983 rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend); 984 - if (rc) { 985 - smcr_link_down_cond_sched(to_lnk); 986 - return NULL; 987 - } 984 + if (rc) 985 + goto err_out; 988 986 /* avoid race with smcr_tx_sndbuf_nonempty() */ 989 987 spin_lock_bh(&conn->send_lock); 990 988 smc_switch_link_and_count(conn, to_lnk); 991 989 rc = smc_switch_cursor(smc, pend, wr_buf); 992 990 spin_unlock_bh(&conn->send_lock); 993 991 sock_put(&smc->sk); 994 - if (rc) { 995 - smcr_link_down_cond_sched(to_lnk); 996 - return NULL; 997 - } 992 + if (rc) 993 + goto err_out; 998 994 goto again; 999 995 } 1000 996 read_unlock_bh(&lgr->conns_lock); 997 + smc_wr_tx_link_put(to_lnk); 1001 998 return to_lnk; 999 + 1000 + err_out: 1001 + smcr_link_down_cond_sched(to_lnk); 1002 + smc_wr_tx_link_put(to_lnk); 1003 + return NULL; 1002 1004 } 1003 1005 1004 1006 static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
+49 -14
net/smc/smc_llc.c
··· 383 383 struct smc_wr_buf *wr_buf; 384 384 int rc; 385 385 386 + if (!smc_wr_tx_link_hold(link)) 387 + return -ENOLINK; 386 388 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 387 389 if (rc) 388 - return rc; 390 + goto put_out; 389 391 confllc = (struct smc_llc_msg_confirm_link *)wr_buf; 390 392 memset(confllc, 0, sizeof(*confllc)); 391 393 confllc->hd.common.type = SMC_LLC_CONFIRM_LINK; ··· 404 402 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; 405 403 /* send llc message */ 406 404 rc = smc_wr_tx_send(link, pend); 405 + put_out: 406 + smc_wr_tx_link_put(link); 407 407 return rc; 408 408 } 409 409 ··· 419 415 struct smc_link *link; 420 416 int i, rc, rtok_ix; 421 417 418 + if (!smc_wr_tx_link_hold(send_link)) 419 + return -ENOLINK; 422 420 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend); 423 421 if (rc) 424 - return rc; 422 + goto put_out; 425 423 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf; 426 424 memset(rkeyllc, 0, sizeof(*rkeyllc)); 427 425 rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY; ··· 450 444 (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl)); 451 445 /* send llc message */ 452 446 rc = smc_wr_tx_send(send_link, pend); 447 + put_out: 448 + smc_wr_tx_link_put(send_link); 453 449 return rc; 454 450 } 455 451 ··· 464 456 struct smc_wr_buf *wr_buf; 465 457 int rc; 466 458 459 + if (!smc_wr_tx_link_hold(link)) 460 + return -ENOLINK; 467 461 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 468 462 if (rc) 469 - return rc; 463 + goto put_out; 470 464 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf; 471 465 memset(rkeyllc, 0, sizeof(*rkeyllc)); 472 466 rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY; ··· 477 467 rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey); 478 468 /* send llc message */ 479 469 rc = smc_wr_tx_send(link, pend); 470 + put_out: 471 + smc_wr_tx_link_put(link); 480 472 return rc; 481 473 } 482 474 ··· 492 480 struct smc_wr_buf *wr_buf; 493 481 int rc; 494 482 483 + if (!smc_wr_tx_link_hold(link)) 484 + return -ENOLINK; 495 485 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 496 486 if (rc) 497 - return rc; 487 + goto put_out; 498 488 addllc = (struct smc_llc_msg_add_link *)wr_buf; 499 489 500 490 memset(addllc, 0, sizeof(*addllc)); ··· 518 504 } 519 505 /* send llc message */ 520 506 rc = smc_wr_tx_send(link, pend); 507 + put_out: 508 + smc_wr_tx_link_put(link); 521 509 return rc; 522 510 } 523 511 ··· 533 517 struct smc_wr_buf *wr_buf; 534 518 int rc; 535 519 520 + if (!smc_wr_tx_link_hold(link)) 521 + return -ENOLINK; 536 522 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 537 523 if (rc) 538 - return rc; 524 + goto put_out; 539 525 delllc = (struct smc_llc_msg_del_link *)wr_buf; 540 526 541 527 memset(delllc, 0, sizeof(*delllc)); ··· 554 536 delllc->reason = htonl(reason); 555 537 /* send llc message */ 556 538 rc = smc_wr_tx_send(link, pend); 539 + put_out: 540 + smc_wr_tx_link_put(link); 557 541 return rc; 558 542 } 559 543 ··· 567 547 struct smc_wr_buf *wr_buf; 568 548 int rc; 569 549 550 + if (!smc_wr_tx_link_hold(link)) 551 + return -ENOLINK; 570 552 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 571 553 if (rc) 572 - return rc; 554 + goto put_out; 573 555 testllc = (struct smc_llc_msg_test_link *)wr_buf; 574 556 memset(testllc, 0, sizeof(*testllc)); 575 557 testllc->hd.common.type = SMC_LLC_TEST_LINK; ··· 579 557 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data)); 580 558 /* send llc message */ 581 559 rc = smc_wr_tx_send(link, pend); 560 + put_out: 561 + smc_wr_tx_link_put(link); 582 562 return rc; 583 563 } 584 564 ··· 591 567 struct smc_wr_buf *wr_buf; 592 568 int rc; 593 569 594 - if (!smc_link_usable(link)) 570 + if (!smc_wr_tx_link_hold(link)) 595 571 return -ENOLINK; 596 572 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 597 573 if (rc) 598 - return rc; 574 + goto put_out; 599 575 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg)); 600 - return smc_wr_tx_send(link, pend); 576 + rc = smc_wr_tx_send(link, pend); 577 + put_out: 578 + smc_wr_tx_link_put(link); 579 + return rc; 601 580 } 602 581 603 582 /* schedule an llc send on link, may wait for buffers, ··· 613 586 struct smc_wr_buf *wr_buf; 614 587 int rc; 615 588 616 - if (!smc_link_usable(link)) 589 + if (!smc_wr_tx_link_hold(link)) 617 590 return -ENOLINK; 618 591 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 619 592 if (rc) 620 - return rc; 593 + goto put_out; 621 594 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg)); 622 - return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME); 595 + rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME); 596 + put_out: 597 + smc_wr_tx_link_put(link); 598 + return rc; 623 599 } 624 600 625 601 /********************************* receive ***********************************/ ··· 702 672 struct smc_buf_desc *rmb; 703 673 u8 n; 704 674 675 + if (!smc_wr_tx_link_hold(link)) 676 + return -ENOLINK; 705 677 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 706 678 if (rc) 707 - return rc; 679 + goto put_out; 708 680 addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf; 709 681 memset(addc_llc, 0, sizeof(*addc_llc)); 710 682 ··· 738 706 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); 739 707 if (lgr->role == SMC_CLNT) 740 708 addc_llc->hd.flags |= SMC_LLC_FLAG_RESP; 741 - return smc_wr_tx_send(link, pend); 709 + rc = smc_wr_tx_send(link, pend); 710 + put_out: 711 + smc_wr_tx_link_put(link); 712 + return rc; 742 713 } 743 714 744 715 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
+5 -17
net/smc/smc_tx.c
··· 496 496 /* Wakeup sndbuf consumers from any context (IRQ or process) 497 497 * since there is more data to transmit; usable snd_wnd as max transmit 498 498 */ 499 - static int _smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 499 + static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 500 500 { 501 501 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; 502 502 struct smc_link *link = conn->lnk; ··· 505 505 struct smc_wr_buf *wr_buf; 506 506 int rc; 507 507 508 + if (!link || !smc_wr_tx_link_hold(link)) 509 + return -ENOLINK; 508 510 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend); 509 511 if (rc < 0) { 512 + smc_wr_tx_link_put(link); 510 513 if (rc == -EBUSY) { 511 514 struct smc_sock *smc = 512 515 container_of(conn, struct smc_sock, conn); ··· 550 547 551 548 out_unlock: 552 549 spin_unlock_bh(&conn->send_lock); 553 - return rc; 554 - } 555 - 556 - static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 557 - { 558 - struct smc_link *link = conn->lnk; 559 - int rc = -ENOLINK; 560 - 561 - if (!link) 562 - return rc; 563 - 564 - atomic_inc(&link->wr_tx_refcnt); 565 - if (smc_link_usable(link)) 566 - rc = _smcr_tx_sndbuf_nonempty(conn); 567 - if (atomic_dec_and_test(&link->wr_tx_refcnt)) 568 - wake_up_all(&link->wr_tx_wait); 550 + smc_wr_tx_link_put(link); 569 551 return rc; 570 552 } 571 553
+14
net/smc/smc_wr.h
··· 60 60 atomic_long_set(wr_tx_id, val); 61 61 } 62 62 63 + static inline bool smc_wr_tx_link_hold(struct smc_link *link) 64 + { 65 + if (!smc_link_usable(link)) 66 + return false; 67 + atomic_inc(&link->wr_tx_refcnt); 68 + return true; 69 + } 70 + 71 + static inline void smc_wr_tx_link_put(struct smc_link *link) 72 + { 73 + if (atomic_dec_and_test(&link->wr_tx_refcnt)) 74 + wake_up_all(&link->wr_tx_wait); 75 + } 76 + 63 77 static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk) 64 78 { 65 79 wake_up_all(&lnk->wr_tx_wait);
+1 -1
net/unix/af_unix.c
··· 828 828 } 829 829 830 830 struct proto unix_dgram_proto = { 831 - .name = "UNIX-DGRAM", 831 + .name = "UNIX", 832 832 .owner = THIS_MODULE, 833 833 .obj_size = sizeof(struct unix_sock), 834 834 .close = unix_close,
+20 -4
tools/testing/selftests/net/ioam6.sh
··· 468 468 for i in {0..22} 469 469 do 470 470 ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \ 471 - prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0 471 + prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} \ 472 + dev veth0 &>/dev/null 472 473 473 - run_test "out_bit$i" "${desc/<n>/$i}" ioam-node-alpha ioam-node-beta \ 474 - db01::2 db01::1 veth0 ${bit2type[$i]} 123 474 + local cmd_res=$? 475 + local descr="${desc/<n>/$i}" 476 + 477 + if [[ $i -ge 12 && $i -le 21 ]] 478 + then 479 + if [ $cmd_res != 0 ] 480 + then 481 + npassed=$((npassed+1)) 482 + log_test_passed "$descr" 483 + else 484 + nfailed=$((nfailed+1)) 485 + log_test_failed "$descr" 486 + fi 487 + else 488 + run_test "out_bit$i" "$descr" ioam-node-alpha ioam-node-beta \ 489 + db01::2 db01::1 veth0 ${bit2type[$i]} 123 490 + fi 475 491 done 476 492 477 493 bit2size[22]=$tmp ··· 560 544 local tmp=${bit2size[22]} 561 545 bit2size[22]=$(( $tmp + ${#BETA[9]} + ((4 - (${#BETA[9]} % 4)) % 4) )) 562 546 563 - for i in {0..22} 547 + for i in {0..11} {22..22} 564 548 do 565 549 ip -netns ioam-node-alpha route change db01::/64 encap ioam6 trace \ 566 550 prealloc type ${bit2type[$i]} ns 123 size ${bit2size[$i]} dev veth0
+60 -104
tools/testing/selftests/net/ioam6_parser.c
··· 94 94 TEST_OUT_BIT9, 95 95 TEST_OUT_BIT10, 96 96 TEST_OUT_BIT11, 97 - TEST_OUT_BIT12, 98 - TEST_OUT_BIT13, 99 - TEST_OUT_BIT14, 100 - TEST_OUT_BIT15, 101 - TEST_OUT_BIT16, 102 - TEST_OUT_BIT17, 103 - TEST_OUT_BIT18, 104 - TEST_OUT_BIT19, 105 - TEST_OUT_BIT20, 106 - TEST_OUT_BIT21, 107 97 TEST_OUT_BIT22, 108 98 TEST_OUT_FULL_SUPP_TRACE, 109 99 ··· 115 125 TEST_IN_BIT9, 116 126 TEST_IN_BIT10, 117 127 TEST_IN_BIT11, 118 - TEST_IN_BIT12, 119 - TEST_IN_BIT13, 120 - TEST_IN_BIT14, 121 - TEST_IN_BIT15, 122 - TEST_IN_BIT16, 123 - TEST_IN_BIT17, 124 - TEST_IN_BIT18, 125 - TEST_IN_BIT19, 126 - TEST_IN_BIT20, 127 - TEST_IN_BIT21, 128 128 TEST_IN_BIT22, 129 129 TEST_IN_FULL_SUPP_TRACE, 130 130 ··· 178 198 return ioam6h->overflow || 179 199 ioam6h->nodelen != 2 || 180 200 ioam6h->remlen; 181 - 182 - case TEST_OUT_BIT12: 183 - case TEST_IN_BIT12: 184 - case TEST_OUT_BIT13: 185 - case TEST_IN_BIT13: 186 - case TEST_OUT_BIT14: 187 - case TEST_IN_BIT14: 188 - case TEST_OUT_BIT15: 189 - case TEST_IN_BIT15: 190 - case TEST_OUT_BIT16: 191 - case TEST_IN_BIT16: 192 - case TEST_OUT_BIT17: 193 - case TEST_IN_BIT17: 194 - case TEST_OUT_BIT18: 195 - case TEST_IN_BIT18: 196 - case TEST_OUT_BIT19: 197 - case TEST_IN_BIT19: 198 - case TEST_OUT_BIT20: 199 - case TEST_IN_BIT20: 200 - case TEST_OUT_BIT21: 201 - case TEST_IN_BIT21: 202 - return ioam6h->overflow || 203 - ioam6h->nodelen || 204 - ioam6h->remlen != 1; 205 201 206 202 case TEST_OUT_BIT22: 207 203 case TEST_IN_BIT22: ··· 277 321 } 278 322 279 323 if (ioam6h->type.bit11) { 324 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 325 + return 1; 326 + *p += sizeof(__u32); 327 + } 328 + 329 + if (ioam6h->type.bit12) { 330 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 331 + return 1; 332 + *p += sizeof(__u32); 333 + } 334 + 335 + if (ioam6h->type.bit13) { 336 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 337 + return 1; 338 + *p += sizeof(__u32); 339 + } 340 + 341 + if (ioam6h->type.bit14) { 342 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 343 + return 1; 344 + *p += sizeof(__u32); 345 + } 346 + 347 + if (ioam6h->type.bit15) { 348 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 349 + return 1; 350 + *p += sizeof(__u32); 351 + } 352 + 353 + if (ioam6h->type.bit16) { 354 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 355 + return 1; 356 + *p += sizeof(__u32); 357 + } 358 + 359 + if (ioam6h->type.bit17) { 360 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 361 + return 1; 362 + *p += sizeof(__u32); 363 + } 364 + 365 + if (ioam6h->type.bit18) { 366 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 367 + return 1; 368 + *p += sizeof(__u32); 369 + } 370 + 371 + if (ioam6h->type.bit19) { 372 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 373 + return 1; 374 + *p += sizeof(__u32); 375 + } 376 + 377 + if (ioam6h->type.bit20) { 378 + if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 379 + return 1; 380 + *p += sizeof(__u32); 381 + } 382 + 383 + if (ioam6h->type.bit21) { 280 384 if (__be32_to_cpu(*((__u32 *)*p)) != 0xffffffff) 281 385 return 1; 282 386 *p += sizeof(__u32); ··· 471 455 return TEST_OUT_BIT10; 472 456 if (!strcmp("out_bit11", tname)) 473 457 return TEST_OUT_BIT11; 474 - if (!strcmp("out_bit12", tname)) 475 - return TEST_OUT_BIT12; 476 - if (!strcmp("out_bit13", tname)) 477 - return TEST_OUT_BIT13; 478 - if (!strcmp("out_bit14", tname)) 479 - return TEST_OUT_BIT14; 480 - if (!strcmp("out_bit15", tname)) 481 - return TEST_OUT_BIT15; 482 - if (!strcmp("out_bit16", tname)) 483 - return TEST_OUT_BIT16; 484 - if (!strcmp("out_bit17", tname)) 485 - return TEST_OUT_BIT17; 486 - if (!strcmp("out_bit18", tname)) 487 - return TEST_OUT_BIT18; 488 - if (!strcmp("out_bit19", tname)) 489 - return TEST_OUT_BIT19; 490 - if (!strcmp("out_bit20", tname)) 491 - return TEST_OUT_BIT20; 492 - if (!strcmp("out_bit21", tname)) 493 - return TEST_OUT_BIT21; 494 458 if (!strcmp("out_bit22", tname)) 495 459 return TEST_OUT_BIT22; 496 460 if (!strcmp("out_full_supp_trace", tname)) ··· 505 509 return TEST_IN_BIT10; 506 510 if (!strcmp("in_bit11", tname)) 507 511 return TEST_IN_BIT11; 508 - if (!strcmp("in_bit12", tname)) 509 - return TEST_IN_BIT12; 510 - if (!strcmp("in_bit13", tname)) 511 - return TEST_IN_BIT13; 512 - if (!strcmp("in_bit14", tname)) 513 - return TEST_IN_BIT14; 514 - if (!strcmp("in_bit15", tname)) 515 - return TEST_IN_BIT15; 516 - if (!strcmp("in_bit16", tname)) 517 - return TEST_IN_BIT16; 518 - if (!strcmp("in_bit17", tname)) 519 - return TEST_IN_BIT17; 520 - if (!strcmp("in_bit18", tname)) 521 - return TEST_IN_BIT18; 522 - if (!strcmp("in_bit19", tname)) 523 - return TEST_IN_BIT19; 524 - if (!strcmp("in_bit20", tname)) 525 - return TEST_IN_BIT20; 526 - if (!strcmp("in_bit21", tname)) 527 - return TEST_IN_BIT21; 528 512 if (!strcmp("in_bit22", tname)) 529 513 return TEST_IN_BIT22; 530 514 if (!strcmp("in_full_supp_trace", tname)) ··· 582 606 [TEST_OUT_BIT9] = check_ioam_header_and_data, 583 607 [TEST_OUT_BIT10] = check_ioam_header_and_data, 584 608 [TEST_OUT_BIT11] = check_ioam_header_and_data, 585 - [TEST_OUT_BIT12] = check_ioam_header, 586 - [TEST_OUT_BIT13] = check_ioam_header, 587 - [TEST_OUT_BIT14] = check_ioam_header, 588 - [TEST_OUT_BIT15] = check_ioam_header, 589 - [TEST_OUT_BIT16] = check_ioam_header, 590 - [TEST_OUT_BIT17] = check_ioam_header, 591 - [TEST_OUT_BIT18] = check_ioam_header, 592 - [TEST_OUT_BIT19] = check_ioam_header, 593 - [TEST_OUT_BIT20] = check_ioam_header, 594 - [TEST_OUT_BIT21] = check_ioam_header, 595 609 [TEST_OUT_BIT22] = check_ioam_header_and_data, 596 610 [TEST_OUT_FULL_SUPP_TRACE] = check_ioam_header_and_data, 597 611 [TEST_IN_UNDEF_NS] = check_ioam_header, ··· 599 633 [TEST_IN_BIT9] = check_ioam_header_and_data, 600 634 [TEST_IN_BIT10] = check_ioam_header_and_data, 601 635 [TEST_IN_BIT11] = check_ioam_header_and_data, 602 - [TEST_IN_BIT12] = check_ioam_header, 603 - [TEST_IN_BIT13] = check_ioam_header, 604 - [TEST_IN_BIT14] = check_ioam_header, 605 - [TEST_IN_BIT15] = check_ioam_header, 606 - [TEST_IN_BIT16] = check_ioam_header, 607 - [TEST_IN_BIT17] = check_ioam_header, 608 - [TEST_IN_BIT18] = check_ioam_header, 609 - [TEST_IN_BIT19] = check_ioam_header, 610 - [TEST_IN_BIT20] = check_ioam_header, 611 - [TEST_IN_BIT21] = check_ioam_header, 612 636 [TEST_IN_BIT22] = check_ioam_header_and_data, 613 637 [TEST_IN_FULL_SUPP_TRACE] = check_ioam_header_and_data, 614 638 [TEST_FWD_FULL_SUPP_TRACE] = check_ioam_header_and_data,