Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge commit master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 of HEAD

* HEAD:
[AX.25]: Use kzalloc
[ATM] net/atm/clip.c: fix PROC_FS=n compile
[PKT_SCHED]: act_api: Fix module leak while flushing actions
[NET]: Fix IPv4/DECnet routing rule dumping
[NET] gso: Fix up GSO packets with broken checksums
[NET] gso: Add skb_is_gso
[IRDA]: fix drivers/net/irda/ali-ircc.c:ali_ircc_init()
[ATM]: fix possible recursive locking in skb_migrate()
[ATM]: Typo in drivers/atm/Kconfig...
[TG3]: add amd8131 to "write reorder" chipsets
[NET]: Fix network device interface printk message priority

+242 -91
+1 -1
drivers/atm/Kconfig
··· 398 398 default n 399 399 help 400 400 This defers work to be done by the interrupt handler to a 401 - tasklet instead of hanlding everything at interrupt time. This 401 + tasklet instead of handling everything at interrupt time. This 402 402 may improve the responsive of the host. 403 403 404 404 config ATM_FORE200E_TX_RETRY
+1 -1
drivers/net/bnx2.c
··· 1639 1639 skb = tx_buf->skb; 1640 1640 #ifdef BCM_TSO 1641 1641 /* partial BD completions possible with TSO packets */ 1642 - if (skb_shinfo(skb)->gso_size) { 1642 + if (skb_is_gso(skb)) { 1643 1643 u16 last_idx, last_ring_idx; 1644 1644 1645 1645 last_idx = sw_cons +
+1 -1
drivers/net/chelsio/sge.c
··· 1417 1417 struct cpl_tx_pkt *cpl; 1418 1418 1419 1419 #ifdef NETIF_F_TSO 1420 - if (skb_shinfo(skb)->gso_size) { 1420 + if (skb_is_gso(skb)) { 1421 1421 int eth_type; 1422 1422 struct cpl_tx_pkt_lso *hdr; 1423 1423
+3 -4
drivers/net/e1000/e1000_main.c
··· 2524 2524 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2525 2525 int err; 2526 2526 2527 - if (skb_shinfo(skb)->gso_size) { 2527 + if (skb_is_gso(skb)) { 2528 2528 if (skb_header_cloned(skb)) { 2529 2529 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2530 2530 if (err) ··· 2649 2649 * tso gets written back prematurely before the data is fully 2650 2650 * DMA'd to the controller */ 2651 2651 if (!skb->data_len && tx_ring->last_tx_tso && 2652 - !skb_shinfo(skb)->gso_size) { 2652 + !skb_is_gso(skb)) { 2653 2653 tx_ring->last_tx_tso = 0; 2654 2654 size -= 4; 2655 2655 } ··· 2937 2937 2938 2938 #ifdef NETIF_F_TSO 2939 2939 /* Controller Erratum workaround */ 2940 - if (!skb->data_len && tx_ring->last_tx_tso && 2941 - !skb_shinfo(skb)->gso_size) 2940 + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) 2942 2941 count++; 2943 2942 #endif 2944 2943
+1 -1
drivers/net/forcedeth.c
··· 1495 1495 np->tx_skbuff[nr] = skb; 1496 1496 1497 1497 #ifdef NETIF_F_TSO 1498 - if (skb_shinfo(skb)->gso_size) 1498 + if (skb_is_gso(skb)) 1499 1499 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); 1500 1500 else 1501 1501 #endif
+2 -1
drivers/net/irda/ali-ircc.c
··· 146 146 { 147 147 ali_chip_t *chip; 148 148 chipio_t info; 149 - int ret = -ENODEV; 149 + int ret; 150 150 int cfg, cfg_base; 151 151 int reg, revision; 152 152 int i = 0; ··· 160 160 return ret; 161 161 } 162 162 163 + ret = -ENODEV; 163 164 164 165 /* Probe for all the ALi chipsets we know about */ 165 166 for (chip= chips; chip->name; chip++, i++)
+1 -1
drivers/net/ixgb/ixgb_main.c
··· 1173 1173 uint16_t ipcse, tucse, mss; 1174 1174 int err; 1175 1175 1176 - if(likely(skb_shinfo(skb)->gso_size)) { 1176 + if (likely(skb_is_gso(skb))) { 1177 1177 if (skb_header_cloned(skb)) { 1178 1178 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1179 1179 if (err)
+1 -1
drivers/net/loopback.c
··· 139 139 #endif 140 140 141 141 #ifdef LOOPBACK_TSO 142 - if (skb_shinfo(skb)->gso_size) { 142 + if (skb_is_gso(skb)) { 143 143 BUG_ON(skb->protocol != htons(ETH_P_IP)); 144 144 BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); 145 145
+1 -1
drivers/net/myri10ge/myri10ge.c
··· 2116 2116 } 2117 2117 idx = (idx + 1) & tx->mask; 2118 2118 } while (idx != last_idx); 2119 - if (skb_shinfo(skb)->gso_size) { 2119 + if (skb_is_gso(skb)) { 2120 2120 printk(KERN_ERR 2121 2121 "myri10ge: %s: TSO but wanted to linearize?!?!?\n", 2122 2122 mgp->dev->name);
+1 -1
drivers/net/sky2.c
··· 1159 1159 count = sizeof(dma_addr_t) / sizeof(u32); 1160 1160 count += skb_shinfo(skb)->nr_frags * count; 1161 1161 1162 - if (skb_shinfo(skb)->gso_size) 1162 + if (skb_is_gso(skb)) 1163 1163 ++count; 1164 1164 1165 1165 if (skb->ip_summed == CHECKSUM_HW)
+2
drivers/net/tg3.c
··· 10078 10078 static struct pci_device_id write_reorder_chipsets[] = { 10079 10079 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 10080 10080 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 10081 + { PCI_DEVICE(PCI_VENDOR_ID_AMD, 10082 + PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 10081 10083 { PCI_DEVICE(PCI_VENDOR_ID_VIA, 10082 10084 PCI_DEVICE_ID_VIA_8385_0) }, 10083 10085 { },
+2 -2
drivers/net/typhoon.c
··· 805 805 * If problems develop with TSO, check this first. 806 806 */ 807 807 numDesc = skb_shinfo(skb)->nr_frags + 1; 808 - if(skb_tso_size(skb)) 808 + if (skb_is_gso(skb)) 809 809 numDesc++; 810 810 811 811 /* When checking for free space in the ring, we need to also ··· 845 845 TYPHOON_TX_PF_VLAN_TAG_SHIFT); 846 846 } 847 847 848 - if(skb_tso_size(skb)) { 848 + if (skb_is_gso(skb)) { 849 849 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; 850 850 first_txd->numDesc++; 851 851
+1 -1
drivers/s390/net/qeth_main.c
··· 4457 4457 queue = card->qdio.out_qs 4458 4458 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 4459 4459 4460 - if (skb_shinfo(skb)->gso_size) 4460 + if (skb_is_gso(skb)) 4461 4461 large_send = card->options.large_send; 4462 4462 4463 4463 /*are we able to do TSO ? If so ,prepare and send it from here */
+5 -3
include/linux/netdevice.h
··· 549 549 struct net_device *); 550 550 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 551 551 int features); 552 + int (*gso_send_check)(struct sk_buff *skb); 552 553 void *af_packet_priv; 553 554 struct list_head list; 554 555 }; ··· 1002 1001 1003 1002 static inline int skb_gso_ok(struct sk_buff *skb, int features) 1004 1003 { 1005 - return net_gso_ok(features, skb_shinfo(skb)->gso_size ? 1006 - skb_shinfo(skb)->gso_type : 0); 1004 + return net_gso_ok(features, skb_shinfo(skb)->gso_type); 1007 1005 } 1008 1006 1009 1007 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 1010 1008 { 1011 - return !skb_gso_ok(skb, dev->features); 1009 + return skb_is_gso(skb) && 1010 + (!skb_gso_ok(skb, dev->features) || 1011 + unlikely(skb->ip_summed != CHECKSUM_HW)); 1012 1012 } 1013 1013 1014 1014 #endif /* __KERNEL__ */
+5
include/linux/skbuff.h
··· 1455 1455 { } 1456 1456 #endif 1457 1457 1458 + static inline int skb_is_gso(const struct sk_buff *skb) 1459 + { 1460 + return skb_shinfo(skb)->gso_size; 1461 + } 1462 + 1458 1463 #endif /* __KERNEL__ */ 1459 1464 #endif /* _LINUX_SKBUFF_H */
+2
include/net/protocol.h
··· 36 36 struct net_protocol { 37 37 int (*handler)(struct sk_buff *skb); 38 38 void (*err_handler)(struct sk_buff *skb, u32 info); 39 + int (*gso_send_check)(struct sk_buff *skb); 39 40 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 40 41 int features); 41 42 int no_policy; ··· 52 51 int type, int code, int offset, 53 52 __u32 info); 54 53 54 + int (*gso_send_check)(struct sk_buff *skb); 55 55 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 56 56 int features); 57 57
+1
include/net/tcp.h
··· 1086 1086 1087 1087 extern int tcp_v4_destroy_sock(struct sock *sk); 1088 1088 1089 + extern int tcp_v4_gso_send_check(struct sk_buff *skb); 1089 1090 extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); 1090 1091 1091 1092 #ifdef CONFIG_PROC_FS
+9 -4
net/atm/clip.c
··· 962 962 963 963 static int __init atm_clip_init(void) 964 964 { 965 - struct proc_dir_entry *p; 966 965 neigh_table_init_no_netlink(&clip_tbl); 967 966 968 967 clip_tbl_hook = &clip_tbl; ··· 971 972 972 973 setup_timer(&idle_timer, idle_timer_check, 0); 973 974 974 - p = create_proc_entry("arp", S_IRUGO, atm_proc_root); 975 - if (p) 976 - p->proc_fops = &arp_seq_fops; 975 + #ifdef CONFIG_PROC_FS 976 + { 977 + struct proc_dir_entry *p; 978 + 979 + p = create_proc_entry("arp", S_IRUGO, atm_proc_root); 980 + if (p) 981 + p->proc_fops = &arp_seq_fops; 982 + } 983 + #endif 977 984 978 985 return 0; 979 986 }
+11 -6
net/atm/ipcommon.c
··· 25 25 /* 26 26 * skb_migrate appends the list at "from" to "to", emptying "from" in the 27 27 * process. skb_migrate is atomic with respect to all other skb operations on 28 - * "from" and "to". Note that it locks both lists at the same time, so beware 29 - * of potential deadlocks. 28 + * "from" and "to". Note that it locks both lists at the same time, so to deal 29 + * with the lock ordering, the locks are taken in address order. 30 30 * 31 31 * This function should live in skbuff.c or skbuff.h. 32 32 */ 33 33 34 34 35 - void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) 35 + void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to) 36 36 { 37 37 unsigned long flags; 38 38 struct sk_buff *skb_from = (struct sk_buff *) from; 39 39 struct sk_buff *skb_to = (struct sk_buff *) to; 40 40 struct sk_buff *prev; 41 41 42 - spin_lock_irqsave(&from->lock,flags); 43 - spin_lock(&to->lock); 42 + if ((unsigned long) from < (unsigned long) to) { 43 + spin_lock_irqsave(&from->lock, flags); 44 + spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING); 45 + } else { 46 + spin_lock_irqsave(&to->lock, flags); 47 + spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING); 48 + } 44 49 prev = from->prev; 45 50 from->next->prev = to->prev; 46 51 prev->next = skb_to; ··· 56 51 from->prev = skb_from; 57 52 from->next = skb_from; 58 53 from->qlen = 0; 59 - spin_unlock_irqrestore(&from->lock,flags); 54 + spin_unlock_irqrestore(&from->lock, flags); 60 55 } 61 56 62 57
+1 -2
net/ax25/af_ax25.c
··· 486 486 { 487 487 ax25_cb *ax25; 488 488 489 - if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) 489 + if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) 490 490 return NULL; 491 491 492 - memset(ax25, 0x00, sizeof(*ax25)); 493 492 atomic_set(&ax25->refcount, 1); 494 493 495 494 skb_queue_head_init(&ax25->write_queue);
+1 -3
net/ax25/ax25_dev.c
··· 55 55 { 56 56 ax25_dev *ax25_dev; 57 57 58 - if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { 58 + if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { 59 59 printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); 60 60 return; 61 61 } 62 62 63 63 ax25_unregister_sysctl(); 64 - 65 - memset(ax25_dev, 0x00, sizeof(*ax25_dev)); 66 64 67 65 dev->ax25_ptr = ax25_dev; 68 66 ax25_dev->dev = dev;
+1 -1
net/bridge/br_forward.c
··· 35 35 int br_dev_queue_push_xmit(struct sk_buff *skb) 36 36 { 37 37 /* drop mtu oversized packets except gso */ 38 - if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size) 38 + if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) 39 39 kfree_skb(skb); 40 40 else { 41 41 #ifdef CONFIG_BRIDGE_NETFILTER
+1 -1
net/bridge/br_netfilter.c
··· 761 761 { 762 762 if (skb->protocol == htons(ETH_P_IP) && 763 763 skb->len > skb->dev->mtu && 764 - !skb_shinfo(skb)->gso_size) 764 + !skb_is_gso(skb)) 765 765 return ip_fragment(skb, br_dev_queue_push_xmit); 766 766 else 767 767 return br_dev_queue_push_xmit(skb);
+35 -7
net/core/dev.c
··· 1162 1162 unsigned int csum; 1163 1163 int ret = 0, offset = skb->h.raw - skb->data; 1164 1164 1165 - if (inward) { 1166 - skb->ip_summed = CHECKSUM_NONE; 1167 - goto out; 1165 + if (inward) 1166 + goto out_set_summed; 1167 + 1168 + if (unlikely(skb_shinfo(skb)->gso_size)) { 1169 + static int warned; 1170 + 1171 + WARN_ON(!warned); 1172 + warned = 1; 1173 + 1174 + /* Let GSO fix up the checksum. */ 1175 + goto out_set_summed; 1168 1176 } 1169 1177 1170 1178 if (skb_cloned(skb)) { ··· 1189 1181 BUG_ON(skb->csum + 2 > offset); 1190 1182 1191 1183 *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); 1184 + 1185 + out_set_summed: 1192 1186 skb->ip_summed = CHECKSUM_NONE; 1193 1187 out: 1194 1188 return ret; ··· 1211 1201 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1212 1202 struct packet_type *ptype; 1213 1203 int type = skb->protocol; 1204 + int err; 1214 1205 1215 1206 BUG_ON(skb_shinfo(skb)->frag_list); 1216 - BUG_ON(skb->ip_summed != CHECKSUM_HW); 1217 1207 1218 1208 skb->mac.raw = skb->data; 1219 1209 skb->mac_len = skb->nh.raw - skb->data; 1220 1210 __skb_pull(skb, skb->mac_len); 1221 1211 1212 + if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 1213 + static int warned; 1214 + 1215 + WARN_ON(!warned); 1216 + warned = 1; 1217 + 1218 + if (skb_header_cloned(skb) && 1219 + (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1220 + return ERR_PTR(err); 1221 + } 1222 + 1222 1223 rcu_read_lock(); 1223 1224 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 1224 1225 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1226 + if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 1227 + err = ptype->gso_send_check(skb); 1228 + segs = ERR_PTR(err); 1229 + if (err || skb_gso_ok(skb, features)) 1230 + break; 1231 + __skb_push(skb, skb->data - skb->nh.raw); 1232 + } 1225 1233 segs = ptype->gso_segment(skb, features); 1226 1234 break; 1227 1235 } ··· 1755 1727 if (dev->qdisc_ingress) { 1756 1728 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); 1757 1729 if (MAX_RED_LOOP < ttl++) { 1758 - printk("Redir loop detected Dropping packet (%s->%s)\n", 1730 + printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n", 1759 1731 skb->input_dev->name, skb->dev->name); 1760 1732 return TC_ACT_SHOT; 1761 1733 } ··· 2950 2922 /* Fix illegal SG+CSUM combinations. */ 2951 2923 if ((dev->features & NETIF_F_SG) && 2952 2924 !(dev->features & NETIF_F_ALL_CSUM)) { 2953 - printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", 2925 + printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", 2954 2926 dev->name); 2955 2927 dev->features &= ~NETIF_F_SG; 2956 2928 } ··· 2958 2930 /* TSO requires that SG is present as well. */ 2959 2931 if ((dev->features & NETIF_F_TSO) && 2960 2932 !(dev->features & NETIF_F_SG)) { 2961 - printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", 2933 + printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", 2962 2934 dev->name); 2963 2935 dev->features &= ~NETIF_F_TSO; 2964 2936 }
+2 -1
net/decnet/dn_rules.c
··· 399 399 rcu_read_lock(); 400 400 hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { 401 401 if (idx < s_idx) 402 - continue; 402 + goto next; 403 403 if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) 404 404 break; 405 + next: 405 406 idx++; 406 407 } 407 408 rcu_read_unlock();
+36
net/ipv4/af_inet.c
··· 1097 1097 1098 1098 EXPORT_SYMBOL(inet_sk_rebuild_header); 1099 1099 1100 + static int inet_gso_send_check(struct sk_buff *skb) 1101 + { 1102 + struct iphdr *iph; 1103 + struct net_protocol *ops; 1104 + int proto; 1105 + int ihl; 1106 + int err = -EINVAL; 1107 + 1108 + if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) 1109 + goto out; 1110 + 1111 + iph = skb->nh.iph; 1112 + ihl = iph->ihl * 4; 1113 + if (ihl < sizeof(*iph)) 1114 + goto out; 1115 + 1116 + if (unlikely(!pskb_may_pull(skb, ihl))) 1117 + goto out; 1118 + 1119 + skb->h.raw = __skb_pull(skb, ihl); 1120 + iph = skb->nh.iph; 1121 + proto = iph->protocol & (MAX_INET_PROTOS - 1); 1122 + err = -EPROTONOSUPPORT; 1123 + 1124 + rcu_read_lock(); 1125 + ops = rcu_dereference(inet_protos[proto]); 1126 + if (likely(ops && ops->gso_send_check)) 1127 + err = ops->gso_send_check(skb); 1128 + rcu_read_unlock(); 1129 + 1130 + out: 1131 + return err; 1132 + } 1133 + 1100 1134 static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) 1101 1135 { 1102 1136 struct sk_buff *segs = ERR_PTR(-EINVAL); ··· 1196 1162 static struct net_protocol tcp_protocol = { 1197 1163 .handler = tcp_v4_rcv, 1198 1164 .err_handler = tcp_v4_err, 1165 + .gso_send_check = tcp_v4_gso_send_check, 1199 1166 .gso_segment = tcp_tso_segment, 1200 1167 .no_policy = 1, 1201 1168 }; ··· 1243 1208 static struct packet_type ip_packet_type = { 1244 1209 .type = __constant_htons(ETH_P_IP), 1245 1210 .func = ip_rcv, 1211 + .gso_send_check = inet_gso_send_check, 1246 1212 .gso_segment = inet_gso_segment, 1247 1213 }; 1248 1214
+2 -2
net/ipv4/fib_rules.c
··· 457 457 458 458 rcu_read_lock(); 459 459 hlist_for_each_entry(r, node, &fib_rules, hlist) { 460 - 461 460 if (idx < s_idx) 462 - continue; 461 + goto next; 463 462 if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, 464 463 cb->nlh->nlmsg_seq, 465 464 RTM_NEWRULE, NLM_F_MULTI) < 0) 466 465 break; 466 + next: 467 467 idx++; 468 468 } 469 469 rcu_read_unlock();
+2 -2
net/ipv4/ip_output.c
··· 209 209 return dst_output(skb); 210 210 } 211 211 #endif 212 - if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) 212 + if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) 213 213 return ip_fragment(skb, ip_finish_output2); 214 214 else 215 215 return ip_finish_output2(skb); ··· 1095 1095 while (size > 0) { 1096 1096 int i; 1097 1097 1098 - if (skb_shinfo(skb)->gso_size) 1098 + if (skb_is_gso(skb)) 1099 1099 len = size; 1100 1100 else { 1101 1101
+18
net/ipv4/tcp_ipv4.c
··· 496 496 } 497 497 } 498 498 499 + int tcp_v4_gso_send_check(struct sk_buff *skb) 500 + { 501 + struct iphdr *iph; 502 + struct tcphdr *th; 503 + 504 + if (!pskb_may_pull(skb, sizeof(*th))) 505 + return -EINVAL; 506 + 507 + iph = skb->nh.iph; 508 + th = skb->h.th; 509 + 510 + th->check = 0; 511 + th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); 512 + skb->csum = offsetof(struct tcphdr, check); 513 + skb->ip_summed = CHECKSUM_HW; 514 + return 0; 515 + } 516 + 499 517 /* 500 518 * This routine will send an RST to the other tcp. 501 519 *
+1 -1
net/ipv4/xfrm4_output.c
··· 134 134 } 135 135 #endif 136 136 137 - if (!skb_shinfo(skb)->gso_size) 137 + if (!skb_is_gso(skb)) 138 138 return xfrm4_output_finish2(skb); 139 139 140 140 skb->protocol = htons(ETH_P_IP);
+2 -2
net/ipv6/ip6_output.c
··· 147 147 148 148 int ip6_output(struct sk_buff *skb) 149 149 { 150 - if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || 150 + if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || 151 151 dst_allfrag(skb->dst)) 152 152 return ip6_fragment(skb, ip6_output2); 153 153 else ··· 229 229 skb->priority = sk->sk_priority; 230 230 231 231 mtu = dst_mtu(dst); 232 - if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) { 232 + if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { 233 233 IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); 234 234 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, 235 235 dst_output);
+66 -33
net/ipv6/ipv6_sockglue.c
··· 57 57 58 58 DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; 59 59 60 + static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, 61 + int proto) 62 + { 63 + struct inet6_protocol *ops = NULL; 64 + 65 + for (;;) { 66 + struct ipv6_opt_hdr *opth; 67 + int len; 68 + 69 + if (proto != NEXTHDR_HOP) { 70 + ops = rcu_dereference(inet6_protos[proto]); 71 + 72 + if (unlikely(!ops)) 73 + break; 74 + 75 + if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) 76 + break; 77 + } 78 + 79 + if (unlikely(!pskb_may_pull(skb, 8))) 80 + break; 81 + 82 + opth = (void *)skb->data; 83 + len = opth->hdrlen * 8 + 8; 84 + 85 + if (unlikely(!pskb_may_pull(skb, len))) 86 + break; 87 + 88 + proto = opth->nexthdr; 89 + __skb_pull(skb, len); 90 + } 91 + 92 + return ops; 93 + } 94 + 95 + static int ipv6_gso_send_check(struct sk_buff *skb) 96 + { 97 + struct ipv6hdr *ipv6h; 98 + struct inet6_protocol *ops; 99 + int err = -EINVAL; 100 + 101 + if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) 102 + goto out; 103 + 104 + ipv6h = skb->nh.ipv6h; 105 + __skb_pull(skb, sizeof(*ipv6h)); 106 + err = -EPROTONOSUPPORT; 107 + 108 + rcu_read_lock(); 109 + ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 110 + if (likely(ops && ops->gso_send_check)) { 111 + skb->h.raw = skb->data; 112 + err = ops->gso_send_check(skb); 113 + } 114 + rcu_read_unlock(); 115 + 116 + out: 117 + return err; 118 + } 119 + 60 120 static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) 61 121 { 62 122 struct sk_buff *segs = ERR_PTR(-EINVAL); 63 123 struct ipv6hdr *ipv6h; 64 124 struct inet6_protocol *ops; 65 - int proto; 66 125 67 126 if (unlikely(skb_shinfo(skb)->gso_type & 68 127 ~(SKB_GSO_UDP | ··· 135 76 goto out; 136 77 137 78 ipv6h = skb->nh.ipv6h; 138 - proto = ipv6h->nexthdr; 139 79 __skb_pull(skb, sizeof(*ipv6h)); 80 + segs = ERR_PTR(-EPROTONOSUPPORT); 140 81 141 82 rcu_read_lock(); 142 - for (;;) { 143 - struct ipv6_opt_hdr *opth; 144 - int len; 145 - 146 - if (proto != NEXTHDR_HOP) { 147 - ops = rcu_dereference(inet6_protos[proto]); 148 - 149 - if (unlikely(!ops)) 150 - goto unlock; 151 - 152 - if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) 153 - break; 154 - } 155 - 156 - if (unlikely(!pskb_may_pull(skb, 8))) 157 - goto unlock; 158 - 159 - opth = (void *)skb->data; 160 - len = opth->hdrlen * 8 + 8; 161 - 162 - if (unlikely(!pskb_may_pull(skb, len))) 163 - goto unlock; 164 - 165 - proto = opth->nexthdr; 166 - __skb_pull(skb, len); 167 - } 168 - 169 - skb->h.raw = skb->data; 170 - if (likely(ops->gso_segment)) 83 + ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 84 + if (likely(ops && ops->gso_segment)) { 85 + skb->h.raw = skb->data; 171 86 segs = ops->gso_segment(skb, features); 172 - 173 - unlock: 87 + } 174 88 rcu_read_unlock(); 175 89 176 90 if (unlikely(IS_ERR(segs))) ··· 162 130 static struct packet_type ipv6_packet_type = { 163 131 .type = __constant_htons(ETH_P_IPV6), 164 132 .func = ipv6_rcv, 133 + .gso_send_check = ipv6_gso_send_check, 165 134 .gso_segment = ipv6_gso_segment, 166 135 }; 167 136
+19
net/ipv6/tcp_ipv6.c
··· 552 552 } 553 553 } 554 554 555 + static int tcp_v6_gso_send_check(struct sk_buff *skb) 556 + { 557 + struct ipv6hdr *ipv6h; 558 + struct tcphdr *th; 559 + 560 + if (!pskb_may_pull(skb, sizeof(*th))) 561 + return -EINVAL; 562 + 563 + ipv6h = skb->nh.ipv6h; 564 + th = skb->h.th; 565 + 566 + th->check = 0; 567 + th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, 568 + IPPROTO_TCP, 0); 569 + skb->csum = offsetof(struct tcphdr, check); 570 + skb->ip_summed = CHECKSUM_HW; 571 + return 0; 572 + } 555 573 556 574 static void tcp_v6_send_reset(struct sk_buff *skb) 557 575 { ··· 1621 1603 static struct inet6_protocol tcpv6_protocol = { 1622 1604 .handler = tcp_v6_rcv, 1623 1605 .err_handler = tcp_v6_err, 1606 + .gso_send_check = tcp_v6_gso_send_check, 1624 1607 .gso_segment = tcp_tso_segment, 1625 1608 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 1626 1609 };
+1 -1
net/ipv6/xfrm6_output.c
··· 122 122 { 123 123 struct sk_buff *segs; 124 124 125 - if (!skb_shinfo(skb)->gso_size) 125 + if (!skb_is_gso(skb)) 126 126 return xfrm6_output_finish2(skb); 127 127 128 128 skb->protocol = htons(ETH_P_IP);
+1 -3
net/netrom/af_netrom.c
··· 1382 1382 return -1; 1383 1383 } 1384 1384 1385 - dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1385 + dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1386 1386 if (dev_nr == NULL) { 1387 1387 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); 1388 1388 return -1; 1389 1389 } 1390 - 1391 - memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *)); 1392 1390 1393 1391 for (i = 0; i < nr_ndevs; i++) { 1394 1392 char name[IFNAMSIZ];
+1 -2
net/rose/af_rose.c
··· 1490 1490 1491 1491 rose_callsign = null_ax25_address; 1492 1492 1493 - dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1493 + dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); 1494 1494 if (dev_rose == NULL) { 1495 1495 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); 1496 1496 rc = -ENOMEM; 1497 1497 goto out_proto_unregister; 1498 1498 } 1499 1499 1500 - memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*)); 1501 1500 for (i = 0; i < rose_ndevs; i++) { 1502 1501 struct net_device *dev; 1503 1502 char name[IFNAMSIZ];
+1 -1
net/sched/act_api.c
··· 602 602 return err; 603 603 604 604 rtattr_failure: 605 - module_put(a->ops->owner); 606 605 nlmsg_failure: 606 + module_put(a->ops->owner); 607 607 err_out: 608 608 kfree_skb(skb); 609 609 kfree(a);