Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
"Hopefully this is the last batch of networking fixes for 4.14

Fingers crossed...

1) Fix stmmac to use the proper sized OF property read, from Bhadram
Varka.

2) Fix use after free in net scheduler tc action code, from Cong
Wang.

3) Fix SKB control block mangling in tcp_make_synack().

4) Use proper locking in fib_dump_info(), from Florian Westphal.

5) Fix IPG encodings in systemport driver, from Florian Fainelli.

6) Fix division by zero in NV TCP congestion control module, from
Konstantin Khlebnikov.

7) Fix use after free in nf_reject_ipv4, from Tejaswi Tanikella"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
net: systemport: Correct IPG length settings
tcp: do not mangle skb->cb[] in tcp_make_synack()
fib: fib_dump_info can no longer use __in_dev_get_rtnl
stmmac: use of_property_read_u32 instead of read_u8
net_sched: hold netns refcnt for each action
net_sched: acquire RTNL in tc_action_net_exit()
net: vrf: correct FRA_L3MDEV encode type
tcp_nv: fix division by zero in tcpnv_acked()
netfilter: nf_reject_ipv4: Fix use-after-free in send_reset
netfilter: nft_set_hash: disable fast_ops for 2-len keys

+60 -50
+6 -4
drivers/net/ethernet/broadcom/bcmsysport.c
··· 1809 1809 1810 1810 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv) 1811 1811 { 1812 - u32 __maybe_unused reg; 1812 + u32 reg; 1813 1813 1814 - /* Include Broadcom tag in pad extension */ 1814 + reg = gib_readl(priv, GIB_CONTROL); 1815 + /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */ 1815 1816 if (netdev_uses_dsa(priv->netdev)) { 1816 - reg = gib_readl(priv, GIB_CONTROL); 1817 1817 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT); 1818 1818 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT; 1819 - gib_writel(priv, reg, GIB_CONTROL); 1820 1819 } 1820 + reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT); 1821 + reg |= 12 << GIB_IPG_LEN_SHIFT; 1822 + gib_writel(priv, reg, GIB_CONTROL); 1821 1823 } 1822 1824 1823 1825 static int bcm_sysport_open(struct net_device *dev)
+8 -8
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
··· 168 168 } 169 169 170 170 /* Processing RX queues common config */ 171 - if (of_property_read_u8(rx_node, "snps,rx-queues-to-use", 172 - &plat->rx_queues_to_use)) 171 + if (of_property_read_u32(rx_node, "snps,rx-queues-to-use", 172 + &plat->rx_queues_to_use)) 173 173 plat->rx_queues_to_use = 1; 174 174 175 175 if (of_property_read_bool(rx_node, "snps,rx-sched-sp")) ··· 191 191 else 192 192 plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; 193 193 194 - if (of_property_read_u8(q_node, "snps,map-to-dma-channel", 195 - &plat->rx_queues_cfg[queue].chan)) 194 + if (of_property_read_u32(q_node, "snps,map-to-dma-channel", 195 + &plat->rx_queues_cfg[queue].chan)) 196 196 plat->rx_queues_cfg[queue].chan = queue; 197 197 /* TODO: Dynamic mapping to be included in the future */ 198 198 ··· 222 222 } 223 223 224 224 /* Processing TX queues common config */ 225 - if (of_property_read_u8(tx_node, "snps,tx-queues-to-use", 226 - &plat->tx_queues_to_use)) 225 + if (of_property_read_u32(tx_node, "snps,tx-queues-to-use", 226 + &plat->tx_queues_to_use)) 227 227 plat->tx_queues_to_use = 1; 228 228 229 229 if (of_property_read_bool(tx_node, "snps,tx-sched-wrr")) ··· 244 244 if (queue >= plat->tx_queues_to_use) 245 245 break; 246 246 247 - if (of_property_read_u8(q_node, "snps,weight", 248 - &plat->tx_queues_cfg[queue].weight)) 247 + if (of_property_read_u32(q_node, "snps,weight", 248 + &plat->tx_queues_cfg[queue].weight)) 249 249 plat->tx_queues_cfg[queue].weight = 0x10 + queue; 250 250 251 251 if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
+1 -1
drivers/net/vrf.c
··· 1165 1165 frh->family = family; 1166 1166 frh->action = FR_ACT_TO_TBL; 1167 1167 1168 - if (nla_put_u32(skb, FRA_L3MDEV, 1)) 1168 + if (nla_put_u8(skb, FRA_L3MDEV, 1)) 1169 1169 goto nla_put_failure; 1170 1170 1171 1171 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
+4 -4
include/linux/stmmac.h
··· 126 126 127 127 struct stmmac_rxq_cfg { 128 128 u8 mode_to_use; 129 - u8 chan; 129 + u32 chan; 130 130 u8 pkt_route; 131 131 bool use_prio; 132 132 u32 prio; 133 133 }; 134 134 135 135 struct stmmac_txq_cfg { 136 - u8 weight; 136 + u32 weight; 137 137 u8 mode_to_use; 138 138 /* Credit Base Shaper parameters */ 139 139 u32 send_slope; ··· 168 168 int unicast_filter_entries; 169 169 int tx_fifo_size; 170 170 int rx_fifo_size; 171 - u8 rx_queues_to_use; 172 - u8 tx_queues_to_use; 171 + u32 rx_queues_to_use; 172 + u32 tx_queues_to_use; 173 173 u8 rx_sched_algorithm; 174 174 u8 tx_sched_algorithm; 175 175 struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
+5 -1
include/net/act_api.h
··· 14 14 struct tcf_idrinfo { 15 15 spinlock_t lock; 16 16 struct idr action_idr; 17 + struct net *net; 17 18 }; 18 19 19 20 struct tc_action_ops; ··· 106 105 107 106 static inline 108 107 int tc_action_net_init(struct tc_action_net *tn, 109 - const struct tc_action_ops *ops) 108 + const struct tc_action_ops *ops, struct net *net) 110 109 { 111 110 int err = 0; 112 111 ··· 114 113 if (!tn->idrinfo) 115 114 return -ENOMEM; 116 115 tn->ops = ops; 116 + tn->idrinfo->net = net; 117 117 spin_lock_init(&tn->idrinfo->lock); 118 118 idr_init(&tn->idrinfo->action_idr); 119 119 return err; ··· 125 123 126 124 static inline void tc_action_net_exit(struct tc_action_net *tn) 127 125 { 126 + rtnl_lock(); 128 127 tcf_idrinfo_destroy(tn->ops, tn->idrinfo); 128 + rtnl_unlock(); 129 129 kfree(tn->idrinfo); 130 130 } 131 131
+10 -6
net/ipv4/fib_semantics.c
··· 1365 1365 nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc)) 1366 1366 goto nla_put_failure; 1367 1367 if (fi->fib_nhs == 1) { 1368 - struct in_device *in_dev; 1369 - 1370 1368 if (fi->fib_nh->nh_gw && 1371 1369 nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw)) 1372 1370 goto nla_put_failure; ··· 1372 1374 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif)) 1373 1375 goto nla_put_failure; 1374 1376 if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) { 1375 - in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev); 1377 + struct in_device *in_dev; 1378 + 1379 + rcu_read_lock(); 1380 + in_dev = __in_dev_get_rcu(fi->fib_nh->nh_dev); 1376 1381 if (in_dev && 1377 1382 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) 1378 1383 rtm->rtm_flags |= RTNH_F_DEAD; 1384 + rcu_read_unlock(); 1379 1385 } 1380 1386 if (fi->fib_nh->nh_flags & RTNH_F_OFFLOAD) 1381 1387 rtm->rtm_flags |= RTNH_F_OFFLOAD; ··· 1402 1400 goto nla_put_failure; 1403 1401 1404 1402 for_nexthops(fi) { 1405 - struct in_device *in_dev; 1406 - 1407 1403 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); 1408 1404 if (!rtnh) 1409 1405 goto nla_put_failure; 1410 1406 1411 1407 rtnh->rtnh_flags = nh->nh_flags & 0xFF; 1412 1408 if (nh->nh_flags & RTNH_F_LINKDOWN) { 1413 - in_dev = __in_dev_get_rtnl(nh->nh_dev); 1409 + struct in_device *in_dev; 1410 + 1411 + rcu_read_lock(); 1412 + in_dev = __in_dev_get_rcu(nh->nh_dev); 1414 1413 if (in_dev && 1415 1414 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) 1416 1415 rtnh->rtnh_flags |= RTNH_F_DEAD; 1416 + rcu_read_unlock(); 1417 1417 } 1418 1418 rtnh->rtnh_hops = nh->nh_weight - 1; 1419 1419 rtnh->rtnh_ifindex = nh->nh_oif;
+2
net/ipv4/netfilter/nf_reject_ipv4.c
··· 132 132 if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) 133 133 goto free_nskb; 134 134 135 + niph = ip_hdr(nskb); 136 + 135 137 /* "Never happens" */ 136 138 if (nskb->len > dst_mtu(skb_dst(nskb))) 137 139 goto free_nskb;
+1 -1
net/ipv4/tcp_nv.c
··· 252 252 253 253 /* rate in 100's bits per second */ 254 254 rate64 = ((u64)sample->in_flight) * 8000000; 255 - rate = (u32)div64_u64(rate64, (u64)(avg_rtt * 100)); 255 + rate = (u32)div64_u64(rate64, (u64)(avg_rtt ?: 1) * 100); 256 256 257 257 /* Remember the maximum rate seen during this RTT 258 258 * Note: It may be more than one RTT. This function should be
+2 -7
net/ipv4/tcp_output.c
··· 3180 3180 th->source = htons(ireq->ir_num); 3181 3181 th->dest = ireq->ir_rmt_port; 3182 3182 skb->mark = ireq->ir_mark; 3183 - /* Setting of flags are superfluous here for callers (and ECE is 3184 - * not even correctly set) 3185 - */ 3186 - tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 3187 - TCPHDR_SYN | TCPHDR_ACK); 3188 - 3189 - th->seq = htonl(TCP_SKB_CB(skb)->seq); 3183 + skb->ip_summed = CHECKSUM_PARTIAL; 3184 + th->seq = htonl(tcp_rsk(req)->snt_isn); 3190 3185 /* XXX data is queued and acked as is. No buffer/window check */ 3191 3186 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 3192 3187
-1
net/netfilter/nft_set_hash.c
··· 643 643 { 644 644 if (desc->size) { 645 645 switch (desc->klen) { 646 - case 2: 647 646 case 4: 648 647 return &nft_hash_fast_ops; 649 648 default:
+4
net/sched/act_api.c
··· 78 78 spin_lock_bh(&idrinfo->lock); 79 79 idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); 80 80 spin_unlock_bh(&idrinfo->lock); 81 + put_net(idrinfo->net); 81 82 gen_kill_estimator(&p->tcfa_rate_est); 82 83 free_tcf(p); 83 84 } ··· 86 85 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 87 86 { 88 87 int ret = 0; 88 + 89 + ASSERT_RTNL(); 89 90 90 91 if (p) { 91 92 if (bind) ··· 337 334 p->idrinfo = idrinfo; 338 335 p->ops = ops; 339 336 INIT_LIST_HEAD(&p->list); 337 + get_net(idrinfo->net); 340 338 *a = p; 341 339 return 0; 342 340 }
+1 -1
net/sched/act_bpf.c
··· 398 398 { 399 399 struct tc_action_net *tn = net_generic(net, bpf_net_id); 400 400 401 - return tc_action_net_init(tn, &act_bpf_ops); 401 + return tc_action_net_init(tn, &act_bpf_ops, net); 402 402 } 403 403 404 404 static void __net_exit bpf_exit_net(struct net *net)
+1 -1
net/sched/act_connmark.c
··· 206 206 { 207 207 struct tc_action_net *tn = net_generic(net, connmark_net_id); 208 208 209 - return tc_action_net_init(tn, &act_connmark_ops); 209 + return tc_action_net_init(tn, &act_connmark_ops, net); 210 210 } 211 211 212 212 static void __net_exit connmark_exit_net(struct net *net)
+1 -1
net/sched/act_csum.c
··· 626 626 { 627 627 struct tc_action_net *tn = net_generic(net, csum_net_id); 628 628 629 - return tc_action_net_init(tn, &act_csum_ops); 629 + return tc_action_net_init(tn, &act_csum_ops, net); 630 630 } 631 631 632 632 static void __net_exit csum_exit_net(struct net *net)
+1 -1
net/sched/act_gact.c
··· 232 232 { 233 233 struct tc_action_net *tn = net_generic(net, gact_net_id); 234 234 235 - return tc_action_net_init(tn, &act_gact_ops); 235 + return tc_action_net_init(tn, &act_gact_ops, net); 236 236 } 237 237 238 238 static void __net_exit gact_exit_net(struct net *net)
+1 -1
net/sched/act_ife.c
··· 818 818 { 819 819 struct tc_action_net *tn = net_generic(net, ife_net_id); 820 820 821 - return tc_action_net_init(tn, &act_ife_ops); 821 + return tc_action_net_init(tn, &act_ife_ops, net); 822 822 } 823 823 824 824 static void __net_exit ife_exit_net(struct net *net)
+2 -2
net/sched/act_ipt.c
··· 334 334 { 335 335 struct tc_action_net *tn = net_generic(net, ipt_net_id); 336 336 337 - return tc_action_net_init(tn, &act_ipt_ops); 337 + return tc_action_net_init(tn, &act_ipt_ops, net); 338 338 } 339 339 340 340 static void __net_exit ipt_exit_net(struct net *net) ··· 384 384 { 385 385 struct tc_action_net *tn = net_generic(net, xt_net_id); 386 386 387 - return tc_action_net_init(tn, &act_xt_ops); 387 + return tc_action_net_init(tn, &act_xt_ops, net); 388 388 } 389 389 390 390 static void __net_exit xt_exit_net(struct net *net)
+1 -1
net/sched/act_mirred.c
··· 343 343 { 344 344 struct tc_action_net *tn = net_generic(net, mirred_net_id); 345 345 346 - return tc_action_net_init(tn, &act_mirred_ops); 346 + return tc_action_net_init(tn, &act_mirred_ops, net); 347 347 } 348 348 349 349 static void __net_exit mirred_exit_net(struct net *net)
+1 -1
net/sched/act_nat.c
··· 307 307 { 308 308 struct tc_action_net *tn = net_generic(net, nat_net_id); 309 309 310 - return tc_action_net_init(tn, &act_nat_ops); 310 + return tc_action_net_init(tn, &act_nat_ops, net); 311 311 } 312 312 313 313 static void __net_exit nat_exit_net(struct net *net)
+1 -1
net/sched/act_pedit.c
··· 450 450 { 451 451 struct tc_action_net *tn = net_generic(net, pedit_net_id); 452 452 453 - return tc_action_net_init(tn, &act_pedit_ops); 453 + return tc_action_net_init(tn, &act_pedit_ops, net); 454 454 } 455 455 456 456 static void __net_exit pedit_exit_net(struct net *net)
+1 -1
net/sched/act_police.c
··· 331 331 { 332 332 struct tc_action_net *tn = net_generic(net, police_net_id); 333 333 334 - return tc_action_net_init(tn, &act_police_ops); 334 + return tc_action_net_init(tn, &act_police_ops, net); 335 335 } 336 336 337 337 static void __net_exit police_exit_net(struct net *net)
+1 -1
net/sched/act_sample.c
··· 240 240 { 241 241 struct tc_action_net *tn = net_generic(net, sample_net_id); 242 242 243 - return tc_action_net_init(tn, &act_sample_ops); 243 + return tc_action_net_init(tn, &act_sample_ops, net); 244 244 } 245 245 246 246 static void __net_exit sample_exit_net(struct net *net)
+1 -1
net/sched/act_simple.c
··· 201 201 { 202 202 struct tc_action_net *tn = net_generic(net, simp_net_id); 203 203 204 - return tc_action_net_init(tn, &act_simp_ops); 204 + return tc_action_net_init(tn, &act_simp_ops, net); 205 205 } 206 206 207 207 static void __net_exit simp_exit_net(struct net *net)
+1 -1
net/sched/act_skbedit.c
··· 238 238 { 239 239 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 240 240 241 - return tc_action_net_init(tn, &act_skbedit_ops); 241 + return tc_action_net_init(tn, &act_skbedit_ops, net); 242 242 } 243 243 244 244 static void __net_exit skbedit_exit_net(struct net *net)
+1 -1
net/sched/act_skbmod.c
··· 263 263 { 264 264 struct tc_action_net *tn = net_generic(net, skbmod_net_id); 265 265 266 - return tc_action_net_init(tn, &act_skbmod_ops); 266 + return tc_action_net_init(tn, &act_skbmod_ops, net); 267 267 } 268 268 269 269 static void __net_exit skbmod_exit_net(struct net *net)
+1 -1
net/sched/act_tunnel_key.c
··· 322 322 { 323 323 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); 324 324 325 - return tc_action_net_init(tn, &act_tunnel_key_ops); 325 + return tc_action_net_init(tn, &act_tunnel_key_ops, net); 326 326 } 327 327 328 328 static void __net_exit tunnel_key_exit_net(struct net *net)
+1 -1
net/sched/act_vlan.c
··· 269 269 { 270 270 struct tc_action_net *tn = net_generic(net, vlan_net_id); 271 271 272 - return tc_action_net_init(tn, &act_vlan_ops); 272 + return tc_action_net_init(tn, &act_vlan_ops, net); 273 273 } 274 274 275 275 static void __net_exit vlan_exit_net(struct net *net)