Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"This fixes some fallout from the net-next merge the other day, plus
some non-merge-window-related bug fixes:

1) Fix sparse warnings in bcmgenet, systemport, b53, and mt7530
(Florian Fainelli)

2) pptp does a bogus dst_release() on a route we have a single
refcount on, and attached to a socket, which needs that refcount
(Eric Dumazet)

3) UDP connected sockets on ipv6 can race with route update handling,
resulting in a pre-PMTU update route still stuck on the socket and
thus continuing to get ICMPV6_PKT_TOOBIG errors. We end up never
seeing the updated route. (Alexey Kodanev)

4) Missing list initializer(s) in TIPC (Jon Maloy)

5) Connect phy early to prevent crashes in lan78xx driver (Alexander
Graf)

6) Fix build with modular NVMEM (Arnd Bergmann)

7) netdevsim canot mark nsim_devlink_net_ops and nsim_fib_net_ops as
__net_initdata, as these are references from module unload
unconditionally (Arnd Bergmann)"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (23 commits)
netdevsim: remove incorrect __net_initdata annotations
sfc: remove ctpio_dmabuf_start from stats
inet: frags: fix ip6frag_low_thresh boundary
tipc: Fix namespace violation in tipc_sk_fill_sock_diag
net: avoid unneeded atomic operation in ip*_append_data()
nvmem: disallow modular CONFIG_NVMEM
net: hns3: fix length overflow when CONFIG_ARM64_64K_PAGES
nfp: use full 40 bits of the NSP buffer address
lan78xx: Connect phy early
nfp: add a separate counter for packets with CHECKSUM_COMPLETE
tipc: Fix missing list initializations in struct tipc_subscription
ipv6: udp: set dst cache for a connected sk if current not valid
ipv6: udp: convert 'connected' to bool type in udpv6_sendmsg()
ipv6: allow to cache dst for a connected sk in ip6_sk_dst_lookup_flow()
ipv6: add a wrapper for ip6_dst_store() with flowi6 checks
net: phy: marvell10g: add thermal hwmon device
pptp: remove a buggy dst release in pptp_connect()
net: dsa: mt7530: Use NULL instead of plain integer
net: dsa: b53: Fix sparse warnings in b53_mmap.c
af_unix: remove redundant lockdep class
...

+326 -120
+24 -9
drivers/net/dsa/b53/b53_mmap.c
··· 30 30 31 31 static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) 32 32 { 33 - u8 __iomem *regs = dev->priv; 33 + struct b53_mmap_priv *priv = dev->priv; 34 + void __iomem *regs = priv->regs; 34 35 35 36 *val = readb(regs + (page << 8) + reg); 36 37 ··· 40 39 41 40 static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) 42 41 { 43 - u8 __iomem *regs = dev->priv; 42 + struct b53_mmap_priv *priv = dev->priv; 43 + void __iomem *regs = priv->regs; 44 44 45 45 if (WARN_ON(reg % 2)) 46 46 return -EINVAL; ··· 56 54 57 55 static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) 58 56 { 59 - u8 __iomem *regs = dev->priv; 57 + struct b53_mmap_priv *priv = dev->priv; 58 + void __iomem *regs = priv->regs; 60 59 61 60 if (WARN_ON(reg % 4)) 62 61 return -EINVAL; ··· 72 69 73 70 static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) 74 71 { 75 - u8 __iomem *regs = dev->priv; 72 + struct b53_mmap_priv *priv = dev->priv; 73 + void __iomem *regs = priv->regs; 76 74 77 75 if (WARN_ON(reg % 2)) 78 76 return -EINVAL; ··· 111 107 112 108 static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) 113 109 { 114 - u8 __iomem *regs = dev->priv; 110 + struct b53_mmap_priv *priv = dev->priv; 111 + void __iomem *regs = priv->regs; 115 112 u32 hi, lo; 116 113 117 114 if (WARN_ON(reg % 4)) ··· 133 128 134 129 static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) 135 130 { 136 - u8 __iomem *regs = dev->priv; 131 + struct b53_mmap_priv *priv = dev->priv; 132 + void __iomem *regs = priv->regs; 137 133 138 134 writeb(value, regs + (page << 8) + reg); 139 135 ··· 144 138 static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg, 145 139 u16 value) 146 140 { 147 - u8 __iomem *regs = dev->priv; 141 + struct b53_mmap_priv *priv = dev->priv; 142 + void __iomem *regs = priv->regs; 148 143 149 144 if (WARN_ON(reg % 2)) 150 145 return -EINVAL; ··· 161 154 static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg, 162 155 u32 value) 163 156 { 164 - u8 __iomem *regs = dev->priv; 157 + struct b53_mmap_priv *priv = dev->priv; 158 + void __iomem *regs = priv->regs; 165 159 166 160 if (WARN_ON(reg % 4)) 167 161 return -EINVAL; ··· 231 223 static int b53_mmap_probe(struct platform_device *pdev) 232 224 { 233 225 struct b53_platform_data *pdata = pdev->dev.platform_data; 226 + struct b53_mmap_priv *priv; 234 227 struct b53_device *dev; 235 228 236 229 if (!pdata) 237 230 return -EINVAL; 238 231 239 - dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, pdata->regs); 232 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 233 + if (!priv) 234 + return -ENOMEM; 235 + 236 + priv->regs = pdata->regs; 237 + 238 + dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, priv); 240 239 if (!dev) 241 240 return -ENOMEM; 242 241
+3 -3
drivers/net/dsa/mt7530.c
··· 917 917 918 918 mutex_lock(&priv->reg_mutex); 919 919 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); 920 - ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); 920 + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 921 921 mutex_unlock(&priv->reg_mutex); 922 922 923 923 return ret; ··· 933 933 934 934 mutex_lock(&priv->reg_mutex); 935 935 mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); 936 - ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); 936 + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, NULL); 937 937 mutex_unlock(&priv->reg_mutex); 938 938 939 939 return ret; ··· 1293 1293 } 1294 1294 1295 1295 /* Flush the FDB table */ 1296 - ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, 0); 1296 + ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); 1297 1297 if (ret < 0) 1298 1298 return ret; 1299 1299
+6 -5
drivers/net/ethernet/broadcom/bcmsysport.c
··· 1192 1192 u32 csum_info; 1193 1193 u8 ip_proto; 1194 1194 u16 csum_start; 1195 - u16 ip_ver; 1195 + __be16 ip_ver; 1196 1196 1197 1197 /* Re-allocate SKB if needed */ 1198 1198 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { ··· 1211 1211 memset(tsb, 0, sizeof(*tsb)); 1212 1212 1213 1213 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1214 - ip_ver = htons(skb->protocol); 1214 + ip_ver = skb->protocol; 1215 1215 switch (ip_ver) { 1216 - case ETH_P_IP: 1216 + case htons(ETH_P_IP): 1217 1217 ip_proto = ip_hdr(skb)->protocol; 1218 1218 break; 1219 - case ETH_P_IPV6: 1219 + case htons(ETH_P_IPV6): 1220 1220 ip_proto = ipv6_hdr(skb)->nexthdr; 1221 1221 break; 1222 1222 default: ··· 1230 1230 1231 1231 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1232 1232 csum_info |= L4_LENGTH_VALID; 1233 - if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 1233 + if (ip_proto == IPPROTO_UDP && 1234 + ip_ver == htons(ETH_P_IP)) 1234 1235 csum_info |= L4_UDP; 1235 1236 } else { 1236 1237 csum_info = 0;
+6 -5
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1489 1489 struct sk_buff *new_skb; 1490 1490 u16 offset; 1491 1491 u8 ip_proto; 1492 - u16 ip_ver; 1492 + __be16 ip_ver; 1493 1493 u32 tx_csum_info; 1494 1494 1495 1495 if (unlikely(skb_headroom(skb) < sizeof(*status))) { ··· 1509 1509 status = (struct status_64 *)skb->data; 1510 1510 1511 1511 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1512 - ip_ver = htons(skb->protocol); 1512 + ip_ver = skb->protocol; 1513 1513 switch (ip_ver) { 1514 - case ETH_P_IP: 1514 + case htons(ETH_P_IP): 1515 1515 ip_proto = ip_hdr(skb)->protocol; 1516 1516 break; 1517 - case ETH_P_IPV6: 1517 + case htons(ETH_P_IPV6): 1518 1518 ip_proto = ipv6_hdr(skb)->nexthdr; 1519 1519 break; 1520 1520 default: ··· 1530 1530 */ 1531 1531 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 1532 1532 tx_csum_info |= STATUS_TX_CSUM_LV; 1533 - if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 1533 + if (ip_proto == IPPROTO_UDP && 1534 + ip_ver == htons(ETH_P_IP)) 1534 1535 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; 1535 1536 } else { 1536 1537 tx_csum_info = 0;
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
··· 288 288 u16 page_offset; 289 289 u16 reuse_flag; 290 290 291 - u16 length; /* length of the buffer */ 291 + u32 length; /* length of the buffer */ 292 292 293 293 /* desc type, used by the ring user to mark the type of the priv data */ 294 294 u16 type;
+3 -1
drivers/net/ethernet/netronome/nfp/nfp_net.h
··· 391 391 * @rx_drops: Number of packets dropped on RX due to lack of resources 392 392 * @hw_csum_rx_ok: Counter of packets where the HW checksum was OK 393 393 * @hw_csum_rx_inner_ok: Counter of packets where the inner HW checksum was OK 394 + * @hw_csum_rx_complete: Counter of packets with CHECKSUM_COMPLETE reported 394 395 * @hw_csum_rx_error: Counter of packets with bad checksums 395 396 * @tx_sync: Seqlock for atomic updates of TX stats 396 397 * @tx_pkts: Number of Transmitted packets ··· 435 434 u64 rx_drops; 436 435 u64 hw_csum_rx_ok; 437 436 u64 hw_csum_rx_inner_ok; 438 - u64 hw_csum_rx_error; 437 + u64 hw_csum_rx_complete; 439 438 440 439 struct nfp_net_tx_ring *xdp_ring; 441 440 ··· 447 446 u64 tx_gather; 448 447 u64 tx_lso; 449 448 449 + u64 hw_csum_rx_error; 450 450 u64 rx_replace_buf_alloc_fail; 451 451 u64 tx_errors; 452 452 u64 tx_busy;
+1 -1
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 1406 1406 skb->ip_summed = meta->csum_type; 1407 1407 skb->csum = meta->csum; 1408 1408 u64_stats_update_begin(&r_vec->rx_sync); 1409 - r_vec->hw_csum_rx_ok++; 1409 + r_vec->hw_csum_rx_complete++; 1410 1410 u64_stats_update_end(&r_vec->rx_sync); 1411 1411 return; 1412 1412 }
+9 -7
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
··· 179 179 180 180 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) 181 181 #define NN_ET_SWITCH_STATS_LEN 9 182 - #define NN_RVEC_GATHER_STATS 8 182 + #define NN_RVEC_GATHER_STATS 9 183 183 #define NN_RVEC_PER_Q_STATS 3 184 184 185 185 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) ··· 468 468 469 469 data = nfp_pr_et(data, "hw_rx_csum_ok"); 470 470 data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); 471 + data = nfp_pr_et(data, "hw_rx_csum_complete"); 471 472 data = nfp_pr_et(data, "hw_rx_csum_err"); 472 473 data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); 473 474 data = nfp_pr_et(data, "hw_tx_csum"); ··· 494 493 data[0] = nn->r_vecs[i].rx_pkts; 495 494 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; 496 495 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; 497 - tmp[2] = nn->r_vecs[i].hw_csum_rx_error; 498 - tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail; 496 + tmp[2] = nn->r_vecs[i].hw_csum_rx_complete; 497 + tmp[3] = nn->r_vecs[i].hw_csum_rx_error; 498 + tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail; 499 499 } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); 500 500 501 501 do { 502 502 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); 503 503 data[1] = nn->r_vecs[i].tx_pkts; 504 504 data[2] = nn->r_vecs[i].tx_busy; 505 - tmp[4] = nn->r_vecs[i].hw_csum_tx; 506 - tmp[5] = nn->r_vecs[i].hw_csum_tx_inner; 507 - tmp[6] = nn->r_vecs[i].tx_gather; 508 - tmp[7] = nn->r_vecs[i].tx_lso; 505 + tmp[5] = nn->r_vecs[i].hw_csum_tx; 506 + tmp[6] = nn->r_vecs[i].hw_csum_tx_inner; 507 + tmp[7] = nn->r_vecs[i].tx_gather; 508 + tmp[8] = nn->r_vecs[i].tx_lso; 509 509 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); 510 510 511 511 data += NN_RVEC_PER_Q_STATS;
+5 -4
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
··· 71 71 /* CPP address to retrieve the data from */ 72 72 #define NSP_BUFFER 0x10 73 73 #define NSP_BUFFER_CPP GENMASK_ULL(63, 40) 74 - #define NSP_BUFFER_PCIE GENMASK_ULL(39, 38) 75 - #define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0) 74 + #define NSP_BUFFER_ADDRESS GENMASK_ULL(39, 0) 76 75 77 76 #define NSP_DFLT_BUFFER 0x18 77 + #define NSP_DFLT_BUFFER_CPP GENMASK_ULL(63, 40) 78 + #define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0) 78 79 79 80 #define NSP_DFLT_BUFFER_CONFIG 0x20 80 81 #define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) ··· 428 427 if (err < 0) 429 428 return err; 430 429 431 - cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8; 432 - cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg); 430 + cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; 431 + cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); 433 432 434 433 if (in_buf && in_size) { 435 434 err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
-2
drivers/net/ethernet/sfc/ef10.c
··· 1666 1666 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), 1667 1667 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), 1668 1668 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), 1669 - EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START), 1670 1669 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), 1671 1670 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), 1672 1671 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), ··· 1776 1777 * These bits are in the second u64 of the raw mask. 1777 1778 */ 1778 1779 #define EF10_CTPIO_STAT_MASK ( \ 1779 - (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \ 1780 1780 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ 1781 1781 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ 1782 1782 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
-1
drivers/net/ethernet/sfc/nic.h
··· 332 332 EF10_STAT_fec_corrected_symbols_lane1, 333 333 EF10_STAT_fec_corrected_symbols_lane2, 334 334 EF10_STAT_fec_corrected_symbols_lane3, 335 - EF10_STAT_ctpio_dmabuf_start, 336 335 EF10_STAT_ctpio_vi_busy_fallback, 337 336 EF10_STAT_ctpio_long_write_success, 338 337 EF10_STAT_ctpio_missing_dbell_fail,
+1 -1
drivers/net/netdevsim/devlink.c
··· 267 267 return 0; 268 268 } 269 269 270 - static struct pernet_operations nsim_devlink_net_ops __net_initdata = { 270 + static struct pernet_operations nsim_devlink_net_ops = { 271 271 .init = nsim_devlink_netns_init, 272 272 .id = &nsim_devlink_id, 273 273 .size = sizeof(bool),
+1 -1
drivers/net/netdevsim/fib.c
··· 230 230 return 0; 231 231 } 232 232 233 - static struct pernet_operations nsim_fib_net_ops __net_initdata = { 233 + static struct pernet_operations nsim_fib_net_ops = { 234 234 .init = nsim_fib_netns_init, 235 235 .id = &nsim_fib_net_id, 236 236 .size = sizeof(struct nsim_fib_data),
+182 -2
drivers/net/phy/marvell10g.c
··· 21 21 * If both the fiber and copper ports are connected, the first to gain 22 22 * link takes priority and the other port is completely locked out. 23 23 */ 24 - #include <linux/phy.h> 24 + #include <linux/ctype.h> 25 + #include <linux/hwmon.h> 25 26 #include <linux/marvell_phy.h> 27 + #include <linux/phy.h> 26 28 27 29 enum { 28 30 MV_PCS_BASE_T = 0x0000, ··· 42 40 */ 43 41 MV_AN_CTRL1000 = 0x8000, /* 1000base-T control register */ 44 42 MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */ 43 + 44 + /* Vendor2 MMD registers */ 45 + MV_V2_TEMP_CTRL = 0xf08a, 46 + MV_V2_TEMP_CTRL_MASK = 0xc000, 47 + MV_V2_TEMP_CTRL_SAMPLE = 0x0000, 48 + MV_V2_TEMP_CTRL_DISABLE = 0xc000, 49 + MV_V2_TEMP = 0xf08c, 50 + MV_V2_TEMP_UNKNOWN = 0x9600, /* unknown function */ 51 + }; 52 + 53 + struct mv3310_priv { 54 + struct device *hwmon_dev; 55 + char *hwmon_name; 45 56 }; 46 57 47 58 static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, ··· 75 60 return ret < 0 ? ret : 1; 76 61 } 77 62 63 + #ifdef CONFIG_HWMON 64 + static umode_t mv3310_hwmon_is_visible(const void *data, 65 + enum hwmon_sensor_types type, 66 + u32 attr, int channel) 67 + { 68 + if (type == hwmon_chip && attr == hwmon_chip_update_interval) 69 + return 0444; 70 + if (type == hwmon_temp && attr == hwmon_temp_input) 71 + return 0444; 72 + return 0; 73 + } 74 + 75 + static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, 76 + u32 attr, int channel, long *value) 77 + { 78 + struct phy_device *phydev = dev_get_drvdata(dev); 79 + int temp; 80 + 81 + if (type == hwmon_chip && attr == hwmon_chip_update_interval) { 82 + *value = MSEC_PER_SEC; 83 + return 0; 84 + } 85 + 86 + if (type == hwmon_temp && attr == hwmon_temp_input) { 87 + temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP); 88 + if (temp < 0) 89 + return temp; 90 + 91 + *value = ((temp & 0xff) - 75) * 1000; 92 + 93 + return 0; 94 + } 95 + 96 + return -EOPNOTSUPP; 97 + } 98 + 99 + static const struct hwmon_ops mv3310_hwmon_ops = { 100 + .is_visible = mv3310_hwmon_is_visible, 101 + .read = mv3310_hwmon_read, 102 + }; 103 + 104 + static u32 mv3310_hwmon_chip_config[] = { 105 + HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL, 106 + 0, 107 + }; 108 + 109 + static const struct hwmon_channel_info mv3310_hwmon_chip = { 110 + .type = hwmon_chip, 111 + .config = mv3310_hwmon_chip_config, 112 + }; 113 + 114 + static u32 mv3310_hwmon_temp_config[] = { 115 + HWMON_T_INPUT, 116 + 0, 117 + }; 118 + 119 + static const struct hwmon_channel_info mv3310_hwmon_temp = { 120 + .type = hwmon_temp, 121 + .config = mv3310_hwmon_temp_config, 122 + }; 123 + 124 + static const struct hwmon_channel_info *mv3310_hwmon_info[] = { 125 + &mv3310_hwmon_chip, 126 + &mv3310_hwmon_temp, 127 + NULL, 128 + }; 129 + 130 + static const struct hwmon_chip_info mv3310_hwmon_chip_info = { 131 + .ops = &mv3310_hwmon_ops, 132 + .info = mv3310_hwmon_info, 133 + }; 134 + 135 + static int mv3310_hwmon_config(struct phy_device *phydev, bool enable) 136 + { 137 + u16 val; 138 + int ret; 139 + 140 + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP, 141 + MV_V2_TEMP_UNKNOWN); 142 + if (ret < 0) 143 + return ret; 144 + 145 + val = enable ? MV_V2_TEMP_CTRL_SAMPLE : MV_V2_TEMP_CTRL_DISABLE; 146 + ret = mv3310_modify(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL, 147 + MV_V2_TEMP_CTRL_MASK, val); 148 + 149 + return ret < 0 ? ret : 0; 150 + } 151 + 152 + static void mv3310_hwmon_disable(void *data) 153 + { 154 + struct phy_device *phydev = data; 155 + 156 + mv3310_hwmon_config(phydev, false); 157 + } 158 + 159 + static int mv3310_hwmon_probe(struct phy_device *phydev) 160 + { 161 + struct device *dev = &phydev->mdio.dev; 162 + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); 163 + int i, j, ret; 164 + 165 + priv->hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); 166 + if (!priv->hwmon_name) 167 + return -ENODEV; 168 + 169 + for (i = j = 0; priv->hwmon_name[i]; i++) { 170 + if (isalnum(priv->hwmon_name[i])) { 171 + if (i != j) 172 + priv->hwmon_name[j] = priv->hwmon_name[i]; 173 + j++; 174 + } 175 + } 176 + priv->hwmon_name[j] = '\0'; 177 + 178 + ret = mv3310_hwmon_config(phydev, true); 179 + if (ret) 180 + return ret; 181 + 182 + ret = devm_add_action_or_reset(dev, mv3310_hwmon_disable, phydev); 183 + if (ret) 184 + return ret; 185 + 186 + priv->hwmon_dev = devm_hwmon_device_register_with_info(dev, 187 + priv->hwmon_name, phydev, 188 + &mv3310_hwmon_chip_info, NULL); 189 + 190 + return PTR_ERR_OR_ZERO(priv->hwmon_dev); 191 + } 192 + #else 193 + static inline int mv3310_hwmon_config(struct phy_device *phydev, bool enable) 194 + { 195 + return 0; 196 + } 197 + 198 + static int mv3310_hwmon_probe(struct phy_device *phydev) 199 + { 200 + return 0; 201 + } 202 + #endif 203 + 78 204 static int mv3310_probe(struct phy_device *phydev) 79 205 { 206 + struct mv3310_priv *priv; 80 207 u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN; 208 + int ret; 81 209 82 210 if (!phydev->is_c45 || 83 211 (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask) 84 212 return -ENODEV; 85 213 214 + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); 215 + if (!priv) 216 + return -ENOMEM; 217 + 218 + dev_set_drvdata(&phydev->mdio.dev, priv); 219 + 220 + ret = mv3310_hwmon_probe(phydev); 221 + if (ret) 222 + return ret; 223 + 86 224 return 0; 225 + } 226 + 227 + static int mv3310_suspend(struct phy_device *phydev) 228 + { 229 + return 0; 230 + } 231 + 232 + static int mv3310_resume(struct phy_device *phydev) 233 + { 234 + return mv3310_hwmon_config(phydev, true); 87 235 } 88 236 89 237 static int mv3310_config_init(struct phy_device *phydev) ··· 545 367 SUPPORTED_FIBRE | 546 368 SUPPORTED_10000baseT_Full | 547 369 SUPPORTED_Backplane, 548 - .probe = mv3310_probe, 549 370 .soft_reset = gen10g_no_soft_reset, 550 371 .config_init = mv3310_config_init, 372 + .probe = mv3310_probe, 373 + .suspend = mv3310_suspend, 374 + .resume = mv3310_resume, 551 375 .config_aneg = mv3310_config_aneg, 552 376 .aneg_done = mv3310_aneg_done, 553 377 .read_status = mv3310_read_status,
-1
drivers/net/ppp/pptp.c
··· 464 464 po->chan.mtu = dst_mtu(&rt->dst); 465 465 if (!po->chan.mtu) 466 466 po->chan.mtu = PPP_MRU; 467 - ip_rt_put(rt); 468 467 po->chan.mtu -= PPTP_HEADER_OVERHEAD; 469 468 470 469 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+18 -16
drivers/net/usb/lan78xx.c
··· 2082 2082 2083 2083 dev->fc_autoneg = phydev->autoneg; 2084 2084 2085 - phy_start(phydev); 2086 - 2087 - netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); 2088 - 2089 2085 return 0; 2090 2086 2091 2087 error: ··· 2518 2522 if (ret < 0) 2519 2523 goto done; 2520 2524 2521 - ret = lan78xx_phy_init(dev); 2522 - if (ret < 0) 2523 - goto done; 2525 + phy_start(net->phydev); 2526 + 2527 + netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); 2524 2528 2525 2529 /* for Link Check */ 2526 2530 if (dev->urb_intr) { ··· 2581 2585 if (timer_pending(&dev->stat_monitor)) 2582 2586 del_timer_sync(&dev->stat_monitor); 2583 2587 2584 - phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); 2585 - phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); 2586 - 2587 - phy_stop(net->phydev); 2588 - phy_disconnect(net->phydev); 2589 - 2590 - net->phydev = NULL; 2588 + if (net->phydev) 2589 + phy_stop(net->phydev); 2591 2590 2592 2591 clear_bit(EVENT_DEV_OPEN, &dev->flags); 2593 2592 netif_stop_queue(net); ··· 3497 3506 return; 3498 3507 3499 3508 udev = interface_to_usbdev(intf); 3500 - 3501 3509 net = dev->net; 3510 + 3511 + phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); 3512 + phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); 3513 + 3514 + phy_disconnect(net->phydev); 3515 + 3502 3516 unregister_netdev(net); 3503 3517 3504 3518 cancel_delayed_work_sync(&dev->wq); ··· 3659 3663 pm_runtime_set_autosuspend_delay(&udev->dev, 3660 3664 DEFAULT_AUTOSUSPEND_DELAY); 3661 3665 3666 + ret = lan78xx_phy_init(dev); 3667 + if (ret < 0) 3668 + goto out4; 3669 + 3662 3670 return 0; 3663 3671 3672 + out4: 3673 + unregister_netdev(netdev); 3664 3674 out3: 3665 3675 lan78xx_unbind(dev, intf); 3666 3676 out2: ··· 4014 4012 4015 4013 lan78xx_reset(dev); 4016 4014 4017 - lan78xx_phy_init(dev); 4015 + phy_start(dev->net->phydev); 4018 4016 4019 4017 return lan78xx_resume(intf); 4020 4018 }
+1 -1
drivers/nvmem/Kconfig
··· 1 1 menuconfig NVMEM 2 - tristate "NVMEM Support" 2 + bool "NVMEM Support" 3 3 help 4 4 Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... 5 5
+3
include/net/ip6_route.h
··· 214 214 #endif 215 215 } 216 216 217 + void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst, 218 + const struct flowi6 *fl6); 219 + 217 220 static inline bool ipv6_unicast_destination(const struct sk_buff *skb) 218 221 { 219 222 struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+2 -1
include/net/ipv6.h
··· 965 965 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, 966 966 const struct in6_addr *final_dst); 967 967 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, 968 - const struct in6_addr *final_dst); 968 + const struct in6_addr *final_dst, 969 + bool connected); 969 970 struct dst_entry *ip6_blackhole_route(struct net *net, 970 971 struct dst_entry *orig_dst); 971 972
-2
net/ieee802154/6lowpan/reassembly.c
··· 411 411 } 412 412 413 413 #ifdef CONFIG_SYSCTL 414 - static long zero; 415 414 416 415 static struct ctl_table lowpan_frags_ns_ctl_table[] = { 417 416 { ··· 427 428 .maxlen = sizeof(unsigned long), 428 429 .mode = 0644, 429 430 .proc_handler = proc_doulongvec_minmax, 430 - .extra1 = &zero, 431 431 .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh 432 432 }, 433 433 {
+2 -3
net/ipv4/ip_fragment.c
··· 667 667 EXPORT_SYMBOL(ip_check_defrag); 668 668 669 669 #ifdef CONFIG_SYSCTL 670 - static long zero; 670 + static int dist_min; 671 671 672 672 static struct ctl_table ip4_frags_ns_ctl_table[] = { 673 673 { ··· 684 684 .maxlen = sizeof(unsigned long), 685 685 .mode = 0644, 686 686 .proc_handler = proc_doulongvec_minmax, 687 - .extra1 = &zero, 688 687 .extra2 = &init_net.ipv4.frags.high_thresh 689 688 }, 690 689 { ··· 699 700 .maxlen = sizeof(int), 700 701 .mode = 0644, 701 702 .proc_handler = proc_dointvec_minmax, 702 - .extra1 = &zero 703 + .extra1 = &dist_min, 703 704 }, 704 705 { } 705 706 };
+2 -1
net/ipv4/ip_output.c
··· 1090 1090 length -= copy; 1091 1091 } 1092 1092 1093 - refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1093 + if (wmem_alloc_delta) 1094 + refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1094 1095 return 0; 1095 1096 1096 1097 error_efault:
+1 -8
net/ipv6/datagram.c
··· 106 106 } 107 107 } 108 108 109 - ip6_dst_store(sk, dst, 110 - ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? 111 - &sk->sk_v6_daddr : NULL, 112 - #ifdef CONFIG_IPV6_SUBTREES 113 - ipv6_addr_equal(&fl6.saddr, &np->saddr) ? 114 - &np->saddr : 115 - #endif 116 - NULL); 109 + ip6_sk_dst_store_flow(sk, dst, &fl6); 117 110 118 111 out: 119 112 fl6_sock_release(flowlabel);
+14 -4
net/ipv6/ip6_output.c
··· 1105 1105 * @sk: socket which provides the dst cache and route info 1106 1106 * @fl6: flow to lookup 1107 1107 * @final_dst: final destination address for ipsec lookup 1108 + * @connected: whether @sk is connected or not 1108 1109 * 1109 1110 * This function performs a route lookup on the given flow with the 1110 1111 * possibility of using the cached route in the socket if it is valid. 1111 1112 * It will take the socket dst lock when operating on the dst cache. 1112 1113 * As a result, this function can only be used in process context. 1113 1114 * 1115 + * In addition, for a connected socket, cache the dst in the socket 1116 + * if the current cache is not valid. 1117 + * 1114 1118 * It returns a valid dst pointer on success, or a pointer encoded 1115 1119 * error code. 1116 1120 */ 1117 1121 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, 1118 - const struct in6_addr *final_dst) 1122 + const struct in6_addr *final_dst, 1123 + bool connected) 1119 1124 { 1120 1125 struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); 1121 1126 1122 1127 dst = ip6_sk_dst_check(sk, dst, fl6); 1123 - if (!dst) 1124 - dst = ip6_dst_lookup_flow(sk, fl6, final_dst); 1128 + if (dst) 1129 + return dst; 1130 + 1131 + dst = ip6_dst_lookup_flow(sk, fl6, final_dst); 1132 + if (connected && !IS_ERR(dst)) 1133 + ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6); 1125 1134 1126 1135 return dst; 1127 1136 } ··· 1545 1536 length -= copy; 1546 1537 } 1547 1538 1548 - refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1539 + if (wmem_alloc_delta) 1540 + refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); 1549 1541 return 0; 1550 1542 1551 1543 error_efault:
-2
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 55 55 static struct inet_frags nf_frags; 56 56 57 57 #ifdef CONFIG_SYSCTL 58 - static long zero; 59 58 60 59 static struct ctl_table nf_ct_frag6_sysctl_table[] = { 61 60 { ··· 70 71 .maxlen = sizeof(unsigned long), 71 72 .mode = 0644, 72 73 .proc_handler = proc_doulongvec_minmax, 73 - .extra1 = &zero, 74 74 .extra2 = &init_net.nf_frag.frags.high_thresh 75 75 }, 76 76 {
+1 -1
net/ipv6/ping.c
··· 121 121 ipc6.tclass = np->tclass; 122 122 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 123 123 124 - dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr); 124 + dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false); 125 125 if (IS_ERR(dst)) 126 126 return PTR_ERR(dst); 127 127 rt = (struct rt6_info *) dst;
-2
net/ipv6/reassembly.c
··· 548 548 }; 549 549 550 550 #ifdef CONFIG_SYSCTL 551 - static int zero; 552 551 553 552 static struct ctl_table ip6_frags_ns_ctl_table[] = { 554 553 { ··· 564 565 .maxlen = sizeof(unsigned long), 565 566 .mode = 0644, 566 567 .proc_handler = proc_doulongvec_minmax, 567 - .extra1 = &zero, 568 568 .extra2 = &init_net.ipv6.frags.high_thresh 569 569 }, 570 570 {
+17
net/ipv6/route.c
··· 2229 2229 } 2230 2230 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 2231 2231 2232 + void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst, 2233 + const struct flowi6 *fl6) 2234 + { 2235 + #ifdef CONFIG_IPV6_SUBTREES 2236 + struct ipv6_pinfo *np = inet6_sk(sk); 2237 + #endif 2238 + 2239 + ip6_dst_store(sk, dst, 2240 + ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ? 2241 + &sk->sk_v6_daddr : NULL, 2242 + #ifdef CONFIG_IPV6_SUBTREES 2243 + ipv6_addr_equal(&fl6->saddr, &np->saddr) ? 2244 + &np->saddr : 2245 + #endif 2246 + NULL); 2247 + } 2248 + 2232 2249 /* Handle redirects */ 2233 2250 struct ip6rd_flowi { 2234 2251 struct flowi6 fl6;
+7 -24
net/ipv6/udp.c
··· 1116 1116 struct dst_entry *dst; 1117 1117 struct ipcm6_cookie ipc6; 1118 1118 int addr_len = msg->msg_namelen; 1119 + bool connected = false; 1119 1120 int ulen = len; 1120 1121 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 1121 1122 int err; 1122 - int connected = 0; 1123 1123 int is_udplite = IS_UDPLITE(sk); 1124 1124 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1125 1125 struct sockcm_cookie sockc; ··· 1241 1241 fl6.fl6_dport = inet->inet_dport; 1242 1242 daddr = &sk->sk_v6_daddr; 1243 1243 fl6.flowlabel = np->flow_label; 1244 - connected = 1; 1244 + connected = true; 1245 1245 } 1246 1246 1247 1247 if (!fl6.flowi6_oif) ··· 1271 1271 } 1272 1272 if (!(opt->opt_nflen|opt->opt_flen)) 1273 1273 opt = NULL; 1274 - connected = 0; 1274 + connected = false; 1275 1275 } 1276 1276 if (!opt) { 1277 1277 opt = txopt_get(np); ··· 1293 1293 1294 1294 final_p = fl6_update_dst(&fl6, opt, &final); 1295 1295 if (final_p) 1296 - connected = 0; 1296 + connected = false; 1297 1297 1298 1298 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) { 1299 1299 fl6.flowi6_oif = np->mcast_oif; 1300 - connected = 0; 1300 + connected = false; 1301 1301 } else if (!fl6.flowi6_oif) 1302 1302 fl6.flowi6_oif = np->ucast_oif; 1303 1303 ··· 1308 1308 1309 1309 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); 1310 1310 1311 - dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); 1311 + dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected); 1312 1312 if (IS_ERR(dst)) { 1313 1313 err = PTR_ERR(dst); 1314 1314 dst = NULL; ··· 1333 1333 err = PTR_ERR(skb); 1334 1334 if (!IS_ERR_OR_NULL(skb)) 1335 1335 err = udp_v6_send_skb(skb, &fl6); 1336 - goto release_dst; 1336 + goto out; 1337 1337 } 1338 1338 1339 1339 lock_sock(sk); ··· 1366 1366 if (err > 0) 1367 1367 err = np->recverr ? net_xmit_errno(err) : 0; 1368 1368 release_sock(sk); 1369 - 1370 - release_dst: 1371 - if (dst) { 1372 - if (connected) { 1373 - ip6_dst_store(sk, dst, 1374 - ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ? 1375 - &sk->sk_v6_daddr : NULL, 1376 - #ifdef CONFIG_IPV6_SUBTREES 1377 - ipv6_addr_equal(&fl6.saddr, &np->saddr) ? 1378 - &np->saddr : 1379 - #endif 1380 - NULL); 1381 - } else { 1382 - dst_release(dst); 1383 - } 1384 - dst = NULL; 1385 - } 1386 1369 1387 1370 out: 1388 1371 dst_release(dst);
+6
net/rxrpc/input.c
··· 1200 1200 !rxrpc_validate_jumbo(skb)) 1201 1201 goto bad_message; 1202 1202 break; 1203 + 1204 + /* Packet types 9-11 should just be ignored. */ 1205 + case RXRPC_PACKET_TYPE_PARAMS: 1206 + case RXRPC_PACKET_TYPE_10: 1207 + case RXRPC_PACKET_TYPE_11: 1208 + goto discard; 1203 1209 } 1204 1210 1205 1211 rcu_read_lock();
+6
net/rxrpc/protocol.h
··· 46 46 #define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */ 47 47 #define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */ 48 48 #define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */ 49 + #define RXRPC_PACKET_TYPE_PARAMS 9 /* Parameter negotiation (unspec'd, ignore) */ 50 + #define RXRPC_PACKET_TYPE_10 10 /* Ignored */ 51 + #define RXRPC_PACKET_TYPE_11 11 /* Ignored */ 49 52 #define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */ 50 53 #define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */ 51 54 ··· 81 78 (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \ 82 79 (1 << RXRPC_PACKET_TYPE_RESPONSE) | \ 83 80 /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \ 81 + (1 << RXRPC_PACKET_TYPE_PARAMS) | \ 82 + (1 << RXRPC_PACKET_TYPE_10) | \ 83 + (1 << RXRPC_PACKET_TYPE_11) | \ 84 84 (1 << RXRPC_PACKET_TYPE_VERSION)) 85 85 86 86 /*****************************************************************************/
+2 -1
net/tipc/socket.c
··· 3280 3280 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) || 3281 3281 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) || 3282 3282 nla_put_u32(skb, TIPC_NLA_SOCK_UID, 3283 - from_kuid_munged(sk_user_ns(sk), sock_i_uid(sk))) || 3283 + from_kuid_munged(sk_user_ns(NETLINK_CB(skb).sk), 3284 + sock_i_uid(sk))) || 3284 3285 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE, 3285 3286 tipc_diag_gen_cookie(sk), 3286 3287 TIPC_NLA_SOCK_PAD))
+2
net/tipc/subscr.c
··· 145 145 pr_warn("Subscription rejected, no memory\n"); 146 146 return NULL; 147 147 } 148 + INIT_LIST_HEAD(&sub->service_list); 149 + INIT_LIST_HEAD(&sub->sub_list); 148 150 sub->net = net; 149 151 sub->conid = conid; 150 152 sub->inactive = false;
-10
net/unix/af_unix.c
··· 745 745 .obj_size = sizeof(struct unix_sock), 746 746 }; 747 747 748 - /* 749 - * AF_UNIX sockets do not interact with hardware, hence they 750 - * dont trigger interrupts - so it's safe for them to have 751 - * bh-unsafe locking for their sk_receive_queue.lock. Split off 752 - * this special lock-class by reinitializing the spinlock key: 753 - */ 754 - static struct lock_class_key af_unix_sk_receive_queue_lock_key; 755 - 756 748 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) 757 749 { 758 750 struct sock *sk = NULL; ··· 759 767 goto out; 760 768 761 769 sock_init_data(sock, sk); 762 - lockdep_set_class(&sk->sk_receive_queue.lock, 763 - &af_unix_sk_receive_queue_lock_key); 764 770 765 771 sk->sk_allocation = GFP_KERNEL_ACCOUNT; 766 772 sk->sk_write_space = unix_write_space;