Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
net/core/rtnetlink.c
net/core/skbuff.c

Both conflicts were very simple overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>

+66 -27
+4 -4
arch/sparc/net/bpf_jit_comp.c
··· 83 83 #define BNE (F2(0, 2) | CONDNE) 84 84 85 85 #ifdef CONFIG_SPARC64 86 - #define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20)) 86 + #define BE_PTR (F2(0, 1) | CONDE | (2 << 20)) 87 87 #else 88 - #define BNE_PTR BNE 88 + #define BE_PTR BE 89 89 #endif 90 90 91 91 #define SETHI(K, REG) \ ··· 592 592 case BPF_ANC | SKF_AD_IFINDEX: 593 593 emit_skb_loadptr(dev, r_A); 594 594 emit_cmpi(r_A, 0); 595 - emit_branch(BNE_PTR, cleanup_addr + 4); 595 + emit_branch(BE_PTR, cleanup_addr + 4); 596 596 emit_nop(); 597 597 emit_load32(r_A, struct net_device, ifindex, r_A); 598 598 break; ··· 605 605 case BPF_ANC | SKF_AD_HATYPE: 606 606 emit_skb_loadptr(dev, r_A); 607 607 emit_cmpi(r_A, 0); 608 - emit_branch(BNE_PTR, cleanup_addr + 4); 608 + emit_branch(BE_PTR, cleanup_addr + 4); 609 609 emit_nop(); 610 610 emit_load16(r_A, struct net_device, type, r_A); 611 611 break;
+7
drivers/net/ethernet/sfc/io.h
··· 66 66 #define EFX_USE_QWORD_IO 1 67 67 #endif 68 68 69 + /* Hardware issue requires that only 64-bit naturally aligned writes 70 + * are seen by hardware. Its not strictly necessary to restrict to 71 + * x86_64 arch, but done for safety since unusual write combining behaviour 72 + * can break PIO. 73 + */ 74 + #ifdef CONFIG_X86_64 69 75 /* PIO is a win only if write-combining is possible */ 70 76 #ifdef ARCH_HAS_IOREMAP_WC 71 77 #define EFX_USE_PIO 1 78 + #endif 72 79 #endif 73 80 74 81 #ifdef EFX_USE_QWORD_IO
+17 -5
drivers/net/ethernet/sfc/tx.c
··· 189 189 u8 buf[L1_CACHE_BYTES]; 190 190 }; 191 191 192 + /* Copy in explicit 64-bit writes. */ 193 + static void efx_memcpy_64(void __iomem *dest, void *src, size_t len) 194 + { 195 + u64 *src64 = src; 196 + u64 __iomem *dest64 = dest; 197 + size_t l64 = len / 8; 198 + size_t i; 199 + 200 + for (i = 0; i < l64; i++) 201 + writeq(src64[i], &dest64[i]); 202 + } 203 + 192 204 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned. 193 205 * Advances piobuf pointer. Leaves additional data in the copy buffer. 194 206 */ ··· 210 198 { 211 199 int block_len = len & ~(sizeof(copy_buf->buf) - 1); 212 200 213 - memcpy_toio(*piobuf, data, block_len); 201 + efx_memcpy_64(*piobuf, data, block_len); 214 202 *piobuf += block_len; 215 203 len -= block_len; 216 204 ··· 242 230 if (copy_buf->used < sizeof(copy_buf->buf)) 243 231 return; 244 232 245 - memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 233 + efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 246 234 *piobuf += sizeof(copy_buf->buf); 247 235 data += copy_to_buf; 248 236 len -= copy_to_buf; ··· 257 245 { 258 246 /* if there's anything in it, write the whole buffer, including junk */ 259 247 if (copy_buf->used) 260 - memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 248 + efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf)); 261 249 } 262 250 263 251 /* Traverse skb structure and copy fragments in to PIO buffer. ··· 316 304 */ 317 305 BUILD_BUG_ON(L1_CACHE_BYTES > 318 306 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 319 - memcpy_toio(tx_queue->piobuf, skb->data, 320 - ALIGN(skb->len, L1_CACHE_BYTES)); 307 + efx_memcpy_64(tx_queue->piobuf, skb->data, 308 + ALIGN(skb->len, L1_CACHE_BYTES)); 321 309 } 322 310 323 311 EFX_POPULATE_QWORD_5(buffer->option,
-1
drivers/net/macvlan.c
··· 1204 1204 list_for_each_entry_safe(vlan, next, &port->vlans, list) 1205 1205 vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); 1206 1206 unregister_netdevice_many(&list_kill); 1207 - list_del(&list_kill); 1208 1207 break; 1209 1208 case NETDEV_PRE_TYPE_CHANGE: 1210 1209 /* Forbid underlaying device to change its type. */
+6 -1
drivers/net/usb/qmi_wwan.c
··· 763 763 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 764 764 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 765 765 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 766 - {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 766 + {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 767 + {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ 768 + {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ 769 + {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)}, /* Olivetti Olicard 155 */ 770 + {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 771 + {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ 767 772 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ 768 773 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 769 774 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
+4 -1
net/core/dev.c
··· 6634 6634 /** 6635 6635 * unregister_netdevice_many - unregister many devices 6636 6636 * @head: list of devices 6637 + * 6638 + * Note: As most callers use a stack allocated list_head, 6639 + * we force a list_del() to make sure stack wont be corrupted later. 6637 6640 */ 6638 6641 void unregister_netdevice_many(struct list_head *head) 6639 6642 { ··· 6646 6643 rollback_registered_many(head); 6647 6644 list_for_each_entry(dev, head, unreg_list) 6648 6645 net_set_todo(dev); 6646 + list_del(head); 6649 6647 } 6650 6648 } 6651 6649 EXPORT_SYMBOL(unregister_netdevice_many); ··· 7102 7098 } 7103 7099 } 7104 7100 unregister_netdevice_many(&dev_kill_list); 7105 - list_del(&dev_kill_list); 7106 7101 rtnl_unlock(); 7107 7102 } 7108 7103
+2 -1
net/core/rtnetlink.c
··· 1216 1216 .len = sizeof(struct ifla_vf_spoofchk) }, 1217 1217 [IFLA_VF_RATE] = { .type = NLA_BINARY, 1218 1218 .len = sizeof(struct ifla_vf_rate) }, 1219 + [IFLA_VF_LINK_STATE] = { .type = NLA_BINARY, 1220 + .len = sizeof(struct ifla_vf_link_state) }, 1219 1221 }; 1220 1222 1221 1223 static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { ··· 1772 1770 1773 1771 ops->dellink(dev, &list_kill); 1774 1772 unregister_netdevice_many(&list_kill); 1775 - list_del(&list_kill); 1776 1773 return 0; 1777 1774 } 1778 1775
+1 -1
net/core/skbuff.c
··· 2885 2885 int pos; 2886 2886 int dummy; 2887 2887 2888 + __skb_push(head_skb, doffset); 2888 2889 proto = skb_network_protocol(head_skb, &dummy); 2889 2890 if (unlikely(!proto)) 2890 2891 return ERR_PTR(-EINVAL); ··· 2893 2892 csum = !head_skb->encap_hdr_csum && 2894 2893 !!can_checksum_protocol(features, proto); 2895 2894 2896 - __skb_push(head_skb, doffset); 2897 2895 headroom = skb_headroom(head_skb); 2898 2896 pos = skb_headlen(head_skb); 2899 2897
+3 -1
net/dns_resolver/dns_query.c
··· 149 149 if (!*_result) 150 150 goto put; 151 151 152 - memcpy(*_result, upayload->data, len + 1); 152 + memcpy(*_result, upayload->data, len); 153 + *_result[len] = '\0'; 154 + 153 155 if (_expiry) 154 156 *_expiry = rkey->expiry; 155 157
+10 -6
net/ipv4/ip_tunnel.c
··· 268 268 __be32 remote = parms->iph.daddr; 269 269 __be32 local = parms->iph.saddr; 270 270 __be32 key = parms->i_key; 271 + __be16 flags = parms->i_flags; 271 272 int link = parms->link; 272 273 struct ip_tunnel *t = NULL; 273 274 struct hlist_head *head = ip_bucket(itn, parms); ··· 276 275 hlist_for_each_entry_rcu(t, head, hash_node) { 277 276 if (local == t->parms.iph.saddr && 278 277 remote == t->parms.iph.daddr && 279 - key == t->parms.i_key && 280 278 link == t->parms.link && 281 - type == t->dev->type) 279 + type == t->dev->type && 280 + ip_tunnel_key_match(&t->parms, flags, key)) 282 281 break; 283 282 } 284 283 return t; ··· 668 667 dev->needed_headroom = max_headroom; 669 668 670 669 if (skb_cow_head(skb, dev->needed_headroom)) { 670 + ip_rt_put(rt); 671 671 dev->stats.tx_dropped++; 672 672 kfree_skb(skb); 673 673 return; ··· 748 746 goto done; 749 747 if (p->iph.ttl) 750 748 p->iph.frag_off |= htons(IP_DF); 751 - if (!(p->i_flags&TUNNEL_KEY)) 752 - p->i_key = 0; 753 - if (!(p->o_flags&TUNNEL_KEY)) 754 - p->o_key = 0; 749 + if (!(p->i_flags & VTI_ISVTI)) { 750 + if (!(p->i_flags & TUNNEL_KEY)) 751 + p->i_key = 0; 752 + if (!(p->o_flags & TUNNEL_KEY)) 753 + p->o_key = 0; 754 + } 755 755 756 756 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); 757 757
+7 -1
net/ipv4/ip_vti.c
··· 313 313 return -EINVAL; 314 314 } 315 315 316 - p.i_flags |= VTI_ISVTI; 316 + if (!(p.i_flags & GRE_KEY)) 317 + p.i_key = 0; 318 + if (!(p.o_flags & GRE_KEY)) 319 + p.o_key = 0; 320 + 321 + p.i_flags = VTI_ISVTI; 322 + 317 323 err = ip_tunnel_ioctl(dev, &p, cmd); 318 324 if (err) 319 325 return err;
+2 -2
net/ipv4/ipip.c
··· 149 149 150 150 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 151 151 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 152 - t->dev->ifindex, 0, IPPROTO_IPIP, 0); 152 + t->parms.link, 0, IPPROTO_IPIP, 0); 153 153 err = 0; 154 154 goto out; 155 155 } 156 156 157 157 if (type == ICMP_REDIRECT) { 158 - ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0, 158 + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, 159 159 IPPROTO_IPIP, 0); 160 160 err = 0; 161 161 goto out;
+1
net/ipv6/output_core.c
··· 78 78 if (len > IPV6_MAXPLEN) 79 79 len = 0; 80 80 ipv6_hdr(skb)->payload_len = htons(len); 81 + IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 81 82 82 83 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 83 84 skb_dst(skb)->dev, dst_output);
+2 -2
net/ipv6/sit.c
··· 560 560 561 561 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { 562 562 ipv4_update_pmtu(skb, dev_net(skb->dev), info, 563 - t->dev->ifindex, 0, IPPROTO_IPV6, 0); 563 + t->parms.link, 0, IPPROTO_IPV6, 0); 564 564 err = 0; 565 565 goto out; 566 566 } 567 567 if (type == ICMP_REDIRECT) { 568 - ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0, 568 + ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, 569 569 IPPROTO_IPV6, 0); 570 570 err = 0; 571 571 goto out;
-1
net/mac80211/iface.c
··· 1780 1780 } 1781 1781 mutex_unlock(&local->iflist_mtx); 1782 1782 unregister_netdevice_many(&unreg_list); 1783 - list_del(&unreg_list); 1784 1783 1785 1784 list_for_each_entry_safe(sdata, tmp, &wdev_list, list) { 1786 1785 list_del(&sdata->list);