Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits)
[TIPC]: Removing useless casts
[IPV4]: Fix nexthop realm dumping for multipath routes
[DUMMY]: Avoid an oops when dummy_init_one() failed
[IFB] After ifb_init_one() failed, i is increased. Decrease
[NET]: Fix reversed error test in netif_tx_trylock
[MAINTAINERS]: Mark LAPB as Oprhan.
[NET]: Conversions from kmalloc+memset to k(z|c)alloc.
[NET]: sun happymeal, little pci cleanup
[IrDA]: Use alloc_skb() in IrDA TX path
[I/OAT]: Remove pci_module_init() from Intel I/OAT DMA engine
[I/OAT]: net/core/user_dma.c should #include <net/netdma.h>
[SCTP]: ADDIP: Don't use an address as source until it is ASCONF-ACKed
[SCTP]: Set chunk->data_accepted only if we are going to accept it.
[SCTP]: Verify all the paths to a peer via heartbeat before using them.
[SCTP]: Unhash the endpoint in sctp_endpoint_free().
[SCTP]: Check for NULL arg to sctp_bucket_destroy().
[PKT_SCHED] netem: Fix slab corruption with netem (2nd try)
[WAN]: Converted synclink drivers to use netif_carrier_*()
[WAN]: Cosmetic changes to N2 and C101 drivers
[WAN]: Added missing netif_dormant_off() to generic HDLC
...

+414 -471
+1 -3
MAINTAINERS
··· 1676 1676 S: Maintained 1677 1677 1678 1678 LAPB module 1679 - P: Henner Eisen 1680 - M: eis@baty.hanse.de 1681 1679 L: linux-x25@vger.kernel.org 1682 - S: Maintained 1680 + S: Orphan 1683 1681 1684 1682 LASI 53c700 driver for PARISC 1685 1683 P: James E.J. Bottomley
+10 -4
drivers/char/synclink.c
··· 1344 1344 } else 1345 1345 info->input_signal_events.dcd_down++; 1346 1346 #ifdef CONFIG_HDLC 1347 - if (info->netcount) 1348 - hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev); 1347 + if (info->netcount) { 1348 + if (status & MISCSTATUS_DCD) 1349 + netif_carrier_on(info->netdev); 1350 + else 1351 + netif_carrier_off(info->netdev); 1352 + } 1349 1353 #endif 1350 1354 } 1351 1355 if (status & MISCSTATUS_CTS_LATCHED) ··· 7848 7844 spin_lock_irqsave(&info->irq_spinlock, flags); 7849 7845 usc_get_serial_signals(info); 7850 7846 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7851 - hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 7852 - 7847 + if (info->serial_signals & SerialSignal_DCD) 7848 + netif_carrier_on(dev); 7849 + else 7850 + netif_carrier_off(dev); 7853 7851 return 0; 7854 7852 } 7855 7853
+10 -4
drivers/char/synclinkmp.c
··· 1752 1752 spin_lock_irqsave(&info->lock, flags); 1753 1753 get_signals(info); 1754 1754 spin_unlock_irqrestore(&info->lock, flags); 1755 - hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev); 1756 - 1755 + if (info->serial_signals & SerialSignal_DCD) 1756 + netif_carrier_on(dev); 1757 + else 1758 + netif_carrier_off(dev); 1757 1759 return 0; 1758 1760 } 1759 1761 ··· 2524 2522 } else 2525 2523 info->input_signal_events.dcd_down++; 2526 2524 #ifdef CONFIG_HDLC 2527 - if (info->netcount) 2528 - hdlc_set_carrier(status & SerialSignal_DCD, info->netdev); 2525 + if (info->netcount) { 2526 + if (status & SerialSignal_DCD) 2527 + netif_carrier_on(info->netdev); 2528 + else 2529 + netif_carrier_off(info->netdev); 2530 + } 2529 2531 #endif 2530 2532 } 2531 2533 if (status & MISCSTATUS_CTS_LATCHED)
+1 -1
drivers/dma/ioatdma.c
··· 828 828 /* if forced, worst case is that rmmod hangs */ 829 829 __unsafe(THIS_MODULE); 830 830 831 - return pci_module_init(&ioat_pci_drv); 831 + return pci_register_driver(&ioat_pci_drv); 832 832 } 833 833 834 834 module_init(ioat_init_module);
+1
drivers/net/dummy.c
··· 132 132 for (i = 0; i < numdummies && !err; i++) 133 133 err = dummy_init_one(i); 134 134 if (err) { 135 + i--; 135 136 while (--i >= 0) 136 137 dummy_free_one(i); 137 138 }
+1
drivers/net/ifb.c
··· 271 271 for (i = 0; i < numifbs && !err; i++) 272 272 err = ifb_init_one(i); 273 273 if (err) { 274 + i--; 274 275 while (--i >= 0) 275 276 ifb_free_one(i); 276 277 }
+2 -7
drivers/net/sunhme.c
··· 3255 3255 } 3256 3256 3257 3257 static struct pci_device_id happymeal_pci_ids[] = { 3258 - { 3259 - .vendor = PCI_VENDOR_ID_SUN, 3260 - .device = PCI_DEVICE_ID_SUN_HAPPYMEAL, 3261 - .subvendor = PCI_ANY_ID, 3262 - .subdevice = PCI_ANY_ID, 3263 - }, 3258 + { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, 3264 3259 { } /* Terminating entry */ 3265 3260 }; 3266 3261 ··· 3270 3275 3271 3276 static int __init happy_meal_pci_init(void) 3272 3277 { 3273 - return pci_module_init(&hme_pci_driver); 3278 + return pci_register_driver(&hme_pci_driver); 3274 3279 } 3275 3280 3276 3281 static void happy_meal_pci_exit(void)
+2 -2
drivers/net/wan/c101.c
··· 197 197 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); 198 198 199 199 set_carrier(port); 200 - printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port)); 201 200 202 201 /* enable MSCI1 CDCD interrupt */ 203 202 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); ··· 448 449 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 449 450 MODULE_DESCRIPTION("Moxa C101 serial port driver"); 450 451 MODULE_LICENSE("GPL v2"); 451 - module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */ 452 + module_param(hw, charp, 0444); 453 + MODULE_PARM_DESC(hw, "irq,ram:irq,...");
+1
drivers/net/wan/hdlc_ppp.c
··· 107 107 dev->hard_header = NULL; 108 108 dev->type = ARPHRD_PPP; 109 109 dev->addr_len = 0; 110 + netif_dormant_off(dev); 110 111 return 0; 111 112 } 112 113
+1
drivers/net/wan/hdlc_raw.c
··· 82 82 dev->type = ARPHRD_RAWHDLC; 83 83 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 84 84 dev->addr_len = 0; 85 + netif_dormant_off(dev); 85 86 return 0; 86 87 } 87 88
+1
drivers/net/wan/hdlc_raw_eth.c
··· 100 100 dev->tx_queue_len = old_qlen; 101 101 memcpy(dev->dev_addr, "\x00\x01", 2); 102 102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); 103 + netif_dormant_off(dev); 103 104 return 0; 104 105 } 105 106
+1
drivers/net/wan/hdlc_x25.c
··· 212 212 dev->hard_header = NULL; 213 213 dev->type = ARPHRD_X25; 214 214 dev->addr_len = 0; 215 + netif_dormant_off(dev); 215 216 return 0; 216 217 } 217 218
+2 -1
drivers/net/wan/n2.c
··· 564 564 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 565 565 MODULE_DESCRIPTION("RISCom/N2 serial port driver"); 566 566 MODULE_LICENSE("GPL v2"); 567 - module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */ 567 + module_param(hw, charp, 0444); 568 + MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
+3 -3
include/linux/netdevice.h
··· 924 924 925 925 static inline int netif_tx_trylock(struct net_device *dev) 926 926 { 927 - int err = spin_trylock(&dev->_xmit_lock); 928 - if (!err) 927 + int ok = spin_trylock(&dev->_xmit_lock); 928 + if (likely(ok)) 929 929 dev->xmit_lock_owner = smp_processor_id(); 930 - return err; 930 + return ok; 931 931 } 932 932 933 933 static inline void netif_tx_unlock(struct net_device *dev)
+1 -1
include/net/netdma.h
··· 37 37 } 38 38 39 39 int dma_skb_copy_datagram_iovec(struct dma_chan* chan, 40 - const struct sk_buff *skb, int offset, struct iovec *to, 40 + struct sk_buff *skb, int offset, struct iovec *to, 41 41 size_t len, struct dma_pinned_list *pinned_list); 42 42 43 43 #endif /* CONFIG_NET_DMA */
+6 -5
include/net/sctp/structs.h
··· 445 445 struct sctp_paramhdr param_hdr; 446 446 union sctp_addr daddr; 447 447 unsigned long sent_at; 448 + __u64 hb_nonce; 448 449 } __attribute__((packed)) sctp_sender_hb_info_t; 449 450 450 451 /* ··· 731 730 const union sctp_addr *sctp_source(const struct sctp_chunk *chunk); 732 731 733 732 /* This is a structure for holding either an IPv6 or an IPv4 address. */ 734 - /* sin_family -- AF_INET or AF_INET6 735 - * sin_port -- ordinary port number 736 - * sin_addr -- cast to either (struct in_addr) or (struct in6_addr) 737 - */ 738 733 struct sctp_sockaddr_entry { 739 734 struct list_head list; 740 735 union sctp_addr a; 736 + __u8 use_as_src; 741 737 }; 742 738 743 739 typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); ··· 982 984 */ 983 985 char cacc_saw_newack; 984 986 } cacc; 987 + 988 + /* 64-bit random number sent with heartbeat. */ 989 + __u64 hb_nonce; 985 990 }; 986 991 987 992 struct sctp_transport *sctp_transport_new(const union sctp_addr *, ··· 1139 1138 sctp_scope_t scope, gfp_t gfp, 1140 1139 int flags); 1141 1140 int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1142 - gfp_t gfp); 1141 + __u8 use_as_src, gfp_t gfp); 1143 1142 int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); 1144 1143 int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1145 1144 struct sctp_sock *);
+9
include/net/sctp/user.h
··· 560 560 } __attribute__((packed, aligned(4))); 561 561 562 562 /* Peer addresses's state. */ 563 + /* UNKNOWN: Peer address passed by the upper layer in sendmsg or connect[x] 564 + * calls. 565 + * UNCONFIRMED: Peer address received in INIT/INIT-ACK address parameters. 566 + * Not yet confirmed by a heartbeat and not available for data 567 + * transfers. 568 + * ACTIVE : Peer address confirmed, active and available for data transfers. 569 + * INACTIVE: Peer address inactive and not available for data transfers. 570 + */ 563 571 enum sctp_spinfo_state { 564 572 SCTP_INACTIVE, 565 573 SCTP_ACTIVE, 574 + SCTP_UNCONFIRMED, 566 575 SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */ 567 576 }; 568 577
+1 -2
net/8021q/vlan.c
··· 542 542 * so it cannot "appear" on us. 543 543 */ 544 544 if (!grp) { /* need to add a new group */ 545 - grp = kmalloc(sizeof(struct vlan_group), GFP_KERNEL); 545 + grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); 546 546 if (!grp) 547 547 goto out_free_unregister; 548 548 549 549 /* printk(KERN_ALERT "VLAN REGISTER: Allocated new group.\n"); */ 550 - memset(grp, 0, sizeof(struct vlan_group)); 551 550 grp->real_dev_ifindex = real_dev->ifindex; 552 551 553 552 hlist_add_head_rcu(&grp->hlist,
+2 -4
net/appletalk/ddp.c
··· 227 227 static struct atalk_iface *atif_add_device(struct net_device *dev, 228 228 struct atalk_addr *sa) 229 229 { 230 - struct atalk_iface *iface = kmalloc(sizeof(*iface), GFP_KERNEL); 230 + struct atalk_iface *iface = kzalloc(sizeof(*iface), GFP_KERNEL); 231 231 232 232 if (!iface) 233 233 goto out; 234 234 235 - memset(iface, 0, sizeof(*iface)); 236 235 dev_hold(dev); 237 236 iface->dev = dev; 238 237 dev->atalk_ptr = iface; ··· 558 559 } 559 560 560 561 if (!rt) { 561 - rt = kmalloc(sizeof(*rt), GFP_ATOMIC); 562 + rt = kzalloc(sizeof(*rt), GFP_ATOMIC); 562 563 563 564 retval = -ENOBUFS; 564 565 if (!rt) 565 566 goto out_unlock; 566 - memset(rt, 0, sizeof(*rt)); 567 567 568 568 rt->next = atalk_routes; 569 569 atalk_routes = rt;
+1 -2
net/atm/br2684.c
··· 508 508 509 509 if (copy_from_user(&be, arg, sizeof be)) 510 510 return -EFAULT; 511 - brvcc = kmalloc(sizeof(struct br2684_vcc), GFP_KERNEL); 511 + brvcc = kzalloc(sizeof(struct br2684_vcc), GFP_KERNEL); 512 512 if (!brvcc) 513 513 return -ENOMEM; 514 - memset(brvcc, 0, sizeof(struct br2684_vcc)); 515 514 write_lock_irq(&devs_lock); 516 515 net_dev = br2684_find_dev(&be.ifspec); 517 516 if (net_dev == NULL) {
+1 -2
net/atm/clip.c
··· 929 929 struct seq_file *seq; 930 930 int rc = -EAGAIN; 931 931 932 - state = kmalloc(sizeof(*state), GFP_KERNEL); 932 + state = kzalloc(sizeof(*state), GFP_KERNEL); 933 933 if (!state) { 934 934 rc = -ENOMEM; 935 935 goto out_kfree; 936 936 } 937 - memset(state, 0, sizeof(*state)); 938 937 state->ns.neigh_sub_iter = clip_seq_sub_iter; 939 938 940 939 rc = seq_open(file, &arp_seq_ops);
+1 -2
net/atm/lec.c
··· 1811 1811 { 1812 1812 struct lec_arp_table *to_return; 1813 1813 1814 - to_return = kmalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1814 + to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC); 1815 1815 if (!to_return) { 1816 1816 printk("LEC: Arp entry kmalloc failed\n"); 1817 1817 return NULL; 1818 1818 } 1819 - memset(to_return, 0, sizeof(struct lec_arp_table)); 1820 1819 memcpy(to_return->mac_addr, mac_addr, ETH_ALEN); 1821 1820 init_timer(&to_return->timer); 1822 1821 to_return->timer.function = lec_arp_expire_arp;
+1 -2
net/atm/mpc.c
··· 258 258 { 259 259 struct mpoa_client *mpc; 260 260 261 - mpc = kmalloc(sizeof (struct mpoa_client), GFP_KERNEL); 261 + mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL); 262 262 if (mpc == NULL) 263 263 return NULL; 264 - memset(mpc, 0, sizeof(struct mpoa_client)); 265 264 rwlock_init(&mpc->ingress_lock); 266 265 rwlock_init(&mpc->egress_lock); 267 266 mpc->next = mpcs;
+1 -2
net/atm/pppoatm.c
··· 287 287 if (be.encaps != PPPOATM_ENCAPS_AUTODETECT && 288 288 be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC) 289 289 return -EINVAL; 290 - pvcc = kmalloc(sizeof(*pvcc), GFP_KERNEL); 290 + pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL); 291 291 if (pvcc == NULL) 292 292 return -ENOMEM; 293 - memset(pvcc, 0, sizeof(*pvcc)); 294 293 pvcc->atmvcc = atmvcc; 295 294 pvcc->old_push = atmvcc->push; 296 295 pvcc->old_pop = atmvcc->pop;
+1 -2
net/atm/resources.c
··· 33 33 { 34 34 struct atm_dev *dev; 35 35 36 - dev = kmalloc(sizeof(*dev), GFP_KERNEL); 36 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 37 37 if (!dev) 38 38 return NULL; 39 - memset(dev, 0, sizeof(*dev)); 40 39 dev->type = type; 41 40 dev->signal = ATM_PHY_SIG_UNKNOWN; 42 41 dev->link_rate = ATM_OC3_PCR;
+1 -3
net/ax25/sysctl_net_ax25.c
··· 203 203 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) 204 204 ax25_table_size += sizeof(ctl_table); 205 205 206 - if ((ax25_table = kmalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 206 + if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 207 207 spin_unlock_bh(&ax25_dev_lock); 208 208 return; 209 209 } 210 - 211 - memset(ax25_table, 0x00, ax25_table_size); 212 210 213 211 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { 214 212 ctl_table *child = kmalloc(sizeof(ax25_param_table), GFP_ATOMIC);
+2 -5
net/bridge/br_ioctl.c
··· 162 162 if (num > BR_MAX_PORTS) 163 163 num = BR_MAX_PORTS; 164 164 165 - indices = kmalloc(num*sizeof(int), GFP_KERNEL); 165 + indices = kcalloc(num, sizeof(int), GFP_KERNEL); 166 166 if (indices == NULL) 167 167 return -ENOMEM; 168 - 169 - memset(indices, 0, num*sizeof(int)); 170 168 171 169 get_port_ifindices(br, indices, num); 172 170 if (copy_to_user((void __user *)args[1], indices, num*sizeof(int))) ··· 325 327 326 328 if (args[2] >= 2048) 327 329 return -ENOMEM; 328 - indices = kmalloc(args[2]*sizeof(int), GFP_KERNEL); 330 + indices = kcalloc(args[2], sizeof(int), GFP_KERNEL); 329 331 if (indices == NULL) 330 332 return -ENOMEM; 331 333 332 - memset(indices, 0, args[2]*sizeof(int)); 333 334 args[2] = get_bridge_ifindices(indices, args[2]); 334 335 335 336 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
+1
net/core/user_dma.c
··· 29 29 #include <linux/socket.h> 30 30 #include <linux/rtnetlink.h> /* for BUG_TRAP */ 31 31 #include <net/tcp.h> 32 + #include <net/netdma.h> 32 33 33 34 #define NET_DMA_DEFAULT_COPYBREAK 4096 34 35
+2 -7
net/decnet/dn_dev.c
··· 413 413 { 414 414 struct dn_ifaddr *ifa; 415 415 416 - ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 417 - 418 - if (ifa) { 419 - memset(ifa, 0, sizeof(*ifa)); 420 - } 416 + ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); 421 417 422 418 return ifa; 423 419 } ··· 1101 1105 return NULL; 1102 1106 1103 1107 *err = -ENOBUFS; 1104 - if ((dn_db = kmalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) 1108 + if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) 1105 1109 return NULL; 1106 1110 1107 - memset(dn_db, 0, sizeof(struct dn_dev)); 1108 1111 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1109 1112 smp_wmb(); 1110 1113 dev->dn_ptr = dn_db;
+1 -2
net/decnet/dn_fib.c
··· 283 283 goto err_inval; 284 284 } 285 285 286 - fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); 286 + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); 287 287 err = -ENOBUFS; 288 288 if (fi == NULL) 289 289 goto failure; 290 - memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct dn_fib_nh)); 291 290 292 291 fi->fib_protocol = r->rtm_protocol; 293 292 fi->fib_nhs = nhs;
+1 -2
net/decnet/dn_neigh.c
··· 580 580 { 581 581 struct seq_file *seq; 582 582 int rc = -ENOMEM; 583 - struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 583 + struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 584 584 585 585 if (!s) 586 586 goto out; 587 587 588 - memset(s, 0, sizeof(*s)); 589 588 rc = seq_open(file, &dn_neigh_seq_ops); 590 589 if (rc) 591 590 goto out_kfree;
+1 -2
net/decnet/dn_rules.c
··· 151 151 } 152 152 } 153 153 154 - new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 154 + new_r = kzalloc(sizeof(*new_r), GFP_KERNEL); 155 155 if (!new_r) 156 156 return -ENOMEM; 157 - memset(new_r, 0, sizeof(*new_r)); 158 157 159 158 if (rta[RTA_SRC-1]) 160 159 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
+3 -8
net/decnet/dn_table.c
··· 158 158 break; 159 159 } 160 160 161 - ht = kmalloc(new_divisor*sizeof(struct dn_fib_node*), GFP_KERNEL); 162 - 161 + ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL); 163 162 if (ht == NULL) 164 163 return; 165 164 166 - memset(ht, 0, new_divisor*sizeof(struct dn_fib_node *)); 167 165 write_lock_bh(&dn_fib_tables_lock); 168 166 old_ht = dz->dz_hash; 169 167 dz->dz_hash = ht; ··· 182 184 static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) 183 185 { 184 186 int i; 185 - struct dn_zone *dz = kmalloc(sizeof(struct dn_zone), GFP_KERNEL); 187 + struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL); 186 188 if (!dz) 187 189 return NULL; 188 190 189 - memset(dz, 0, sizeof(struct dn_zone)); 190 191 if (z) { 191 192 dz->dz_divisor = 16; 192 193 dz->dz_hashmask = 0x0F; ··· 194 197 dz->dz_hashmask = 0; 195 198 } 196 199 197 - dz->dz_hash = kmalloc(dz->dz_divisor*sizeof(struct dn_fib_node *), GFP_KERNEL); 198 - 200 + dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL); 199 201 if (!dz->dz_hash) { 200 202 kfree(dz); 201 203 return NULL; 202 204 } 203 205 204 - memset(dz->dz_hash, 0, dz->dz_divisor*sizeof(struct dn_fib_node*)); 205 206 dz->dz_order = z; 206 207 dz->dz_mask = dnet_make_mask(z); 207 208
+1 -2
net/econet/af_econet.c
··· 673 673 edev = dev->ec_ptr; 674 674 if (edev == NULL) { 675 675 /* Magic up a new one. */ 676 - edev = kmalloc(sizeof(struct ec_device), GFP_KERNEL); 676 + edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL); 677 677 if (edev == NULL) { 678 678 err = -ENOMEM; 679 679 break; 680 680 } 681 - memset(edev, 0, sizeof(struct ec_device)); 682 681 dev->ec_ptr = edev; 683 682 } else 684 683 net2dev_map[edev->net] = NULL;
+1 -2
net/ieee80211/ieee80211_crypt.c
··· 110 110 unsigned long flags; 111 111 struct ieee80211_crypto_alg *alg; 112 112 113 - alg = kmalloc(sizeof(*alg), GFP_KERNEL); 113 + alg = kzalloc(sizeof(*alg), GFP_KERNEL); 114 114 if (alg == NULL) 115 115 return -ENOMEM; 116 116 117 - memset(alg, 0, sizeof(*alg)); 118 117 alg->ops = ops; 119 118 120 119 spin_lock_irqsave(&ieee80211_crypto_lock, flags);
+1 -2
net/ieee80211/ieee80211_crypt_ccmp.c
··· 76 76 { 77 77 struct ieee80211_ccmp_data *priv; 78 78 79 - priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 79 + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); 80 80 if (priv == NULL) 81 81 goto fail; 82 - memset(priv, 0, sizeof(*priv)); 83 82 priv->key_idx = key_idx; 84 83 85 84 priv->tfm = crypto_alloc_tfm("aes", 0);
+1 -2
net/ieee80211/ieee80211_crypt_wep.c
··· 39 39 { 40 40 struct prism2_wep_data *priv; 41 41 42 - priv = kmalloc(sizeof(*priv), GFP_ATOMIC); 42 + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); 43 43 if (priv == NULL) 44 44 goto fail; 45 - memset(priv, 0, sizeof(*priv)); 46 45 priv->key_idx = keyidx; 47 46 48 47 priv->tfm = crypto_alloc_tfm("arc4", 0);
+2 -5
net/ieee80211/ieee80211_wx.c
··· 369 369 struct ieee80211_crypt_data *new_crypt; 370 370 371 371 /* take WEP into use */ 372 - new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data), 372 + new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), 373 373 GFP_KERNEL); 374 374 if (new_crypt == NULL) 375 375 return -ENOMEM; 376 - memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 377 376 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 378 377 if (!new_crypt->ops) { 379 378 request_module("ieee80211_crypt_wep"); ··· 615 616 616 617 ieee80211_crypt_delayed_deinit(ieee, crypt); 617 618 618 - new_crypt = (struct ieee80211_crypt_data *) 619 - kmalloc(sizeof(*new_crypt), GFP_KERNEL); 619 + new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL); 620 620 if (new_crypt == NULL) { 621 621 ret = -ENOMEM; 622 622 goto done; 623 623 } 624 - memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); 625 624 new_crypt->ops = ops; 626 625 if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) 627 626 new_crypt->priv = new_crypt->ops->init(idx);
+1 -2
net/ieee80211/softmac/ieee80211softmac_io.c
··· 96 96 if(size > IEEE80211_DATA_LEN) 97 97 return NULL; 98 98 /* Allocate the frame */ 99 - data = kmalloc(size, GFP_ATOMIC); 100 - memset(data, 0, size); 99 + data = kzalloc(size, GFP_ATOMIC); 101 100 return data; 102 101 } 103 102
+1 -3
net/ipv4/ah4.c
··· 215 215 if (x->encap) 216 216 goto error; 217 217 218 - ahp = kmalloc(sizeof(*ahp), GFP_KERNEL); 218 + ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); 219 219 if (ahp == NULL) 220 220 return -ENOMEM; 221 - 222 - memset(ahp, 0, sizeof(*ahp)); 223 221 224 222 ahp->key = x->aalg->alg_key; 225 223 ahp->key_len = (x->aalg->alg_key_len+7)/8;
+1 -2
net/ipv4/arp.c
··· 1372 1372 { 1373 1373 struct seq_file *seq; 1374 1374 int rc = -ENOMEM; 1375 - struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1375 + struct neigh_seq_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1376 1376 1377 1377 if (!s) 1378 1378 goto out; 1379 1379 1380 - memset(s, 0, sizeof(*s)); 1381 1380 rc = seq_open(file, &arp_seq_ops); 1382 1381 if (rc) 1383 1382 goto out_kfree;
+2 -4
net/ipv4/devinet.c
··· 93 93 94 94 static struct in_ifaddr *inet_alloc_ifa(void) 95 95 { 96 - struct in_ifaddr *ifa = kmalloc(sizeof(*ifa), GFP_KERNEL); 96 + struct in_ifaddr *ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); 97 97 98 98 if (ifa) { 99 - memset(ifa, 0, sizeof(*ifa)); 100 99 INIT_RCU_HEAD(&ifa->rcu_head); 101 100 } 102 101 ··· 139 140 140 141 ASSERT_RTNL(); 141 142 142 - in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); 143 + in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL); 143 144 if (!in_dev) 144 145 goto out; 145 - memset(in_dev, 0, sizeof(*in_dev)); 146 146 INIT_RCU_HEAD(&in_dev->rcu_head); 147 147 memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); 148 148 in_dev->cnf.sysctl = NULL;
+1 -3
net/ipv4/esp4.c
··· 316 316 if (x->ealg == NULL) 317 317 goto error; 318 318 319 - esp = kmalloc(sizeof(*esp), GFP_KERNEL); 319 + esp = kzalloc(sizeof(*esp), GFP_KERNEL); 320 320 if (esp == NULL) 321 321 return -ENOMEM; 322 - 323 - memset(esp, 0, sizeof(*esp)); 324 322 325 323 if (x->aalg) { 326 324 struct xfrm_algo_desc *aalg_desc;
+2 -4
net/ipv4/fib_hash.c
··· 204 204 fn_new_zone(struct fn_hash *table, int z) 205 205 { 206 206 int i; 207 - struct fn_zone *fz = kmalloc(sizeof(struct fn_zone), GFP_KERNEL); 207 + struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL); 208 208 if (!fz) 209 209 return NULL; 210 210 211 - memset(fz, 0, sizeof(struct fn_zone)); 212 211 if (z) { 213 212 fz->fz_divisor = 16; 214 213 } else { ··· 1045 1046 { 1046 1047 struct seq_file *seq; 1047 1048 int rc = -ENOMEM; 1048 - struct fib_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1049 + struct fib_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1049 1050 1050 1051 if (!s) 1051 1052 goto out; ··· 1056 1057 1057 1058 seq = file->private_data; 1058 1059 seq->private = s; 1059 - memset(s, 0, sizeof(*s)); 1060 1060 out: 1061 1061 return rc; 1062 1062 out_kfree:
+1 -2
net/ipv4/fib_rules.c
··· 196 196 } 197 197 } 198 198 199 - new_r = kmalloc(sizeof(*new_r), GFP_KERNEL); 199 + new_r = kzalloc(sizeof(*new_r), GFP_KERNEL); 200 200 if (!new_r) 201 201 return -ENOMEM; 202 - memset(new_r, 0, sizeof(*new_r)); 203 202 204 203 if (rta[RTA_SRC-1]) 205 204 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
+9 -6
net/ipv4/fib_semantics.c
··· 709 709 goto failure; 710 710 } 711 711 712 - fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 712 + fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); 713 713 if (fi == NULL) 714 714 goto failure; 715 715 fib_info_cnt++; 716 - memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh)); 717 716 718 717 fi->fib_protocol = r->rtm_protocol; 719 718 ··· 961 962 rtm->rtm_protocol = fi->fib_protocol; 962 963 if (fi->fib_priority) 963 964 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 964 - #ifdef CONFIG_NET_CLS_ROUTE 965 - if (fi->fib_nh[0].nh_tclassid) 966 - RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid); 967 - #endif 968 965 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 969 966 goto rtattr_failure; 970 967 if (fi->fib_prefsrc) ··· 970 975 RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw); 971 976 if (fi->fib_nh->nh_oif) 972 977 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 978 + #ifdef CONFIG_NET_CLS_ROUTE 979 + if (fi->fib_nh[0].nh_tclassid) 980 + RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid); 981 + #endif 973 982 } 974 983 #ifdef CONFIG_IP_ROUTE_MULTIPATH 975 984 if (fi->fib_nhs > 1) { ··· 992 993 nhp->rtnh_ifindex = nh->nh_oif; 993 994 if (nh->nh_gw) 994 995 RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw); 996 + #ifdef CONFIG_NET_CLS_ROUTE 997 + if (nh->nh_tclassid) 998 + RTA_PUT(skb, RTA_FLOW, 4, &nh->nh_tclassid); 999 + #endif 995 1000 nhp->rtnh_len = skb->tail - (unsigned char*)nhp; 996 1001 } endfor_nexthops(fi); 997 1002 mp_head->rta_type = RTA_MULTIPATH;
+4 -8
net/ipv4/igmp.c
··· 1028 1028 * for deleted items allows change reports to use common code with 1029 1029 * non-deleted or query-response MCA's. 1030 1030 */ 1031 - pmc = kmalloc(sizeof(*pmc), GFP_KERNEL); 1031 + pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1032 1032 if (!pmc) 1033 1033 return; 1034 - memset(pmc, 0, sizeof(*pmc)); 1035 1034 spin_lock_bh(&im->lock); 1036 1035 pmc->interface = im->interface; 1037 1036 in_dev_hold(in_dev); ··· 1528 1529 psf_prev = psf; 1529 1530 } 1530 1531 if (!psf) { 1531 - psf = kmalloc(sizeof(*psf), GFP_ATOMIC); 1532 + psf = kzalloc(sizeof(*psf), GFP_ATOMIC); 1532 1533 if (!psf) 1533 1534 return -ENOBUFS; 1534 - memset(psf, 0, sizeof(*psf)); 1535 1535 psf->sf_inaddr = *psfsrc; 1536 1536 if (psf_prev) { 1537 1537 psf_prev->sf_next = psf; ··· 2378 2380 { 2379 2381 struct seq_file *seq; 2380 2382 int rc = -ENOMEM; 2381 - struct igmp_mc_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2383 + struct igmp_mc_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 2382 2384 2383 2385 if (!s) 2384 2386 goto out; ··· 2388 2390 2389 2391 seq = file->private_data; 2390 2392 seq->private = s; 2391 - memset(s, 0, sizeof(*s)); 2392 2393 out: 2393 2394 return rc; 2394 2395 out_kfree: ··· 2552 2555 { 2553 2556 struct seq_file *seq; 2554 2557 int rc = -ENOMEM; 2555 - struct igmp_mcf_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 2558 + struct igmp_mcf_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 2556 2559 2557 2560 if (!s) 2558 2561 goto out; ··· 2562 2565 2563 2566 seq = file->private_data; 2564 2567 seq->private = s; 2565 - memset(s, 0, sizeof(*s)); 2566 2568 out: 2567 2569 return rc; 2568 2570 out_kfree:
+1 -2
net/ipv4/inet_diag.c
··· 909 909 sizeof(struct inet_diag_handler *)); 910 910 int err = -ENOMEM; 911 911 912 - inet_diag_table = kmalloc(inet_diag_table_size, GFP_KERNEL); 912 + inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL); 913 913 if (!inet_diag_table) 914 914 goto out; 915 915 916 - memset(inet_diag_table, 0, inet_diag_table_size); 917 916 idiagnl = netlink_kernel_create(NETLINK_INET_DIAG, 0, inet_diag_rcv, 918 917 THIS_MODULE); 919 918 if (idiagnl == NULL)
-1
net/ipv4/ip_gre.c
··· 617 617 skb->mac.raw = skb->nh.raw; 618 618 skb->nh.raw = __pskb_pull(skb, offset); 619 619 skb_postpull_rcsum(skb, skb->h.raw, offset); 620 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 621 620 skb->pkt_type = PACKET_HOST; 622 621 #ifdef CONFIG_NET_IPGRE_BROADCAST 623 622 if (MULTICAST(iph->daddr)) {
-1
net/ipv4/ip_options.c
··· 256 256 257 257 if (!opt) { 258 258 opt = &(IPCB(skb)->opt); 259 - memset(opt, 0, sizeof(struct ip_options)); 260 259 iph = skb->nh.raw; 261 260 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); 262 261 optptr = iph + sizeof(struct iphdr);
+1 -2
net/ipv4/ipcomp.c
··· 410 410 goto out; 411 411 412 412 err = -ENOMEM; 413 - ipcd = kmalloc(sizeof(*ipcd), GFP_KERNEL); 413 + ipcd = kzalloc(sizeof(*ipcd), GFP_KERNEL); 414 414 if (!ipcd) 415 415 goto out; 416 416 417 - memset(ipcd, 0, sizeof(*ipcd)); 418 417 x->props.header_len = 0; 419 418 if (x->props.mode) 420 419 x->props.header_len += sizeof(struct iphdr);
-1
net/ipv4/ipip.c
··· 487 487 488 488 skb->mac.raw = skb->nh.raw; 489 489 skb->nh.raw = skb->data; 490 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 491 490 skb->protocol = htons(ETH_P_IP); 492 491 skb->pkt_type = PACKET_HOST; 493 492
-2
net/ipv4/ipmr.c
··· 1461 1461 skb_pull(skb, (u8*)encap - skb->data); 1462 1462 skb->nh.iph = (struct iphdr *)skb->data; 1463 1463 skb->dev = reg_dev; 1464 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 1465 1464 skb->protocol = htons(ETH_P_IP); 1466 1465 skb->ip_summed = 0; 1467 1466 skb->pkt_type = PACKET_HOST; ··· 1516 1517 skb_pull(skb, (u8*)encap - skb->data); 1517 1518 skb->nh.iph = (struct iphdr *)skb->data; 1518 1519 skb->dev = reg_dev; 1519 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 1520 1520 skb->protocol = htons(ETH_P_IP); 1521 1521 skb->ip_summed = 0; 1522 1522 skb->pkt_type = PACKET_HOST;
+3 -7
net/ipv4/ipvs/ip_vs_ctl.c
··· 735 735 if (atype != RTN_LOCAL && atype != RTN_UNICAST) 736 736 return -EINVAL; 737 737 738 - dest = kmalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 738 + dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 739 739 if (dest == NULL) { 740 740 IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n"); 741 741 return -ENOMEM; 742 742 } 743 - memset(dest, 0, sizeof(struct ip_vs_dest)); 744 743 745 744 dest->protocol = svc->protocol; 746 745 dest->vaddr = svc->addr; ··· 1049 1050 goto out_mod_dec; 1050 1051 } 1051 1052 1052 - svc = (struct ip_vs_service *) 1053 - kmalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); 1053 + svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); 1054 1054 if (svc == NULL) { 1055 1055 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); 1056 1056 ret = -ENOMEM; 1057 1057 goto out_err; 1058 1058 } 1059 - memset(svc, 0, sizeof(struct ip_vs_service)); 1060 1059 1061 1060 /* I'm the first user of the service */ 1062 1061 atomic_set(&svc->usecnt, 1); ··· 1794 1797 { 1795 1798 struct seq_file *seq; 1796 1799 int rc = -ENOMEM; 1797 - struct ip_vs_iter *s = kmalloc(sizeof(*s), GFP_KERNEL); 1800 + struct ip_vs_iter *s = kzalloc(sizeof(*s), GFP_KERNEL); 1798 1801 1799 1802 if (!s) 1800 1803 goto out; ··· 1805 1808 1806 1809 seq = file->private_data; 1807 1810 seq->private = s; 1808 - memset(s, 0, sizeof(*s)); 1809 1811 out: 1810 1812 return rc; 1811 1813 out_kfree:
+1 -2
net/ipv4/ipvs/ip_vs_est.c
··· 123 123 { 124 124 struct ip_vs_estimator *est; 125 125 126 - est = kmalloc(sizeof(*est), GFP_KERNEL); 126 + est = kzalloc(sizeof(*est), GFP_KERNEL); 127 127 if (est == NULL) 128 128 return -ENOMEM; 129 129 130 - memset(est, 0, sizeof(*est)); 131 130 est->stats = stats; 132 131 est->last_conns = stats->conns; 133 132 est->cps = stats->cps<<10;
+1 -2
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 172 172 struct clusterip_config *c; 173 173 char buffer[16]; 174 174 175 - c = kmalloc(sizeof(*c), GFP_ATOMIC); 175 + c = kzalloc(sizeof(*c), GFP_ATOMIC); 176 176 if (!c) 177 177 return NULL; 178 178 179 - memset(c, 0, sizeof(*c)); 180 179 c->dev = dev; 181 180 c->clusterip = ip; 182 181 memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
+1 -2
net/ipv4/tcp_ipv4.c
··· 1640 1640 if (unlikely(afinfo == NULL)) 1641 1641 return -EINVAL; 1642 1642 1643 - s = kmalloc(sizeof(*s), GFP_KERNEL); 1643 + s = kzalloc(sizeof(*s), GFP_KERNEL); 1644 1644 if (!s) 1645 1645 return -ENOMEM; 1646 - memset(s, 0, sizeof(*s)); 1647 1646 s->family = afinfo->family; 1648 1647 s->seq_ops.start = tcp_seq_start; 1649 1648 s->seq_ops.next = tcp_seq_next;
+1 -2
net/ipv4/udp.c
··· 1468 1468 struct udp_seq_afinfo *afinfo = PDE(inode)->data; 1469 1469 struct seq_file *seq; 1470 1470 int rc = -ENOMEM; 1471 - struct udp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1471 + struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1472 1472 1473 1473 if (!s) 1474 1474 goto out; 1475 - memset(s, 0, sizeof(*s)); 1476 1475 s->family = afinfo->family; 1477 1476 s->seq_ops.start = udp_seq_start; 1478 1477 s->seq_ops.next = udp_seq_next;
-1
net/ipv4/xfrm4_mode_tunnel.c
··· 92 92 skb->mac.raw = memmove(skb->data - skb->mac_len, 93 93 skb->mac.raw, skb->mac_len); 94 94 skb->nh.raw = skb->data; 95 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 96 95 err = 0; 97 96 98 97 out:
+1 -2
net/ipv6/ip6_tunnel.c
··· 567 567 568 568 int opt_len = sizeof(*opt) + 8; 569 569 570 - if (!(opt = kmalloc(opt_len, GFP_ATOMIC))) { 570 + if (!(opt = kzalloc(opt_len, GFP_ATOMIC))) { 571 571 return NULL; 572 572 } 573 - memset(opt, 0, opt_len); 574 573 opt->tot_len = opt_len; 575 574 opt->dst0opt = (struct ipv6_opt_hdr *) (opt + 1); 576 575 opt->opt_nflen = 8;
-1
net/ipv6/sit.c
··· 380 380 secpath_reset(skb); 381 381 skb->mac.raw = skb->nh.raw; 382 382 skb->nh.raw = skb->data; 383 - memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); 384 383 IPCB(skb)->flags = 0; 385 384 skb->protocol = htons(ETH_P_IPV6); 386 385 skb->pkt_type = PACKET_HOST;
+1 -1
net/irda/af_irda.c
··· 308 308 309 309 IRDA_ASSERT(self != NULL, return;); 310 310 311 - skb = dev_alloc_skb(64); 311 + skb = alloc_skb(64, GFP_ATOMIC); 312 312 if (skb == NULL) { 313 313 IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", 314 314 __FUNCTION__);
+1 -3
net/irda/ircomm/ircomm_core.c
··· 115 115 116 116 IRDA_ASSERT(ircomm != NULL, return NULL;); 117 117 118 - self = kmalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); 118 + self = kzalloc(sizeof(struct ircomm_cb), GFP_ATOMIC); 119 119 if (self == NULL) 120 120 return NULL; 121 - 122 - memset(self, 0, sizeof(struct ircomm_cb)); 123 121 124 122 self->notify = *notify; 125 123 self->magic = IRCOMM_MAGIC;
+2 -2
net/irda/ircomm/ircomm_lmp.c
··· 81 81 82 82 /* Any userdata supplied? */ 83 83 if (userdata == NULL) { 84 - tx_skb = dev_alloc_skb(64); 84 + tx_skb = alloc_skb(64, GFP_ATOMIC); 85 85 if (!tx_skb) 86 86 return -ENOMEM; 87 87 ··· 115 115 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ ); 116 116 117 117 if (!userdata) { 118 - tx_skb = dev_alloc_skb(64); 118 + tx_skb = alloc_skb(64, GFP_ATOMIC); 119 119 if (!tx_skb) 120 120 return -ENOMEM; 121 121
+1 -1
net/irda/ircomm/ircomm_param.c
··· 121 121 122 122 skb = self->ctrl_skb; 123 123 if (!skb) { 124 - skb = dev_alloc_skb(256); 124 + skb = alloc_skb(256, GFP_ATOMIC); 125 125 if (!skb) { 126 126 spin_unlock_irqrestore(&self->spinlock, flags); 127 127 return -ENOMEM;
+4 -4
net/irda/ircomm/ircomm_tty.c
··· 379 379 self = hashbin_lock_find(ircomm_tty, line, NULL); 380 380 if (!self) { 381 381 /* No, so make new instance */ 382 - self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); 382 + self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); 383 383 if (self == NULL) { 384 384 IRDA_ERROR("%s(), kmalloc failed!\n", __FUNCTION__); 385 385 return -ENOMEM; 386 386 } 387 - memset(self, 0, sizeof(struct ircomm_tty_cb)); 388 387 389 388 self->magic = IRCOMM_TTY_MAGIC; 390 389 self->flow = FLOW_STOP; ··· 758 759 } 759 760 } else { 760 761 /* Prepare a full sized frame */ 761 - skb = dev_alloc_skb(self->max_data_size+ 762 - self->max_header_size); 762 + skb = alloc_skb(self->max_data_size+ 763 + self->max_header_size, 764 + GFP_ATOMIC); 763 765 if (!skb) { 764 766 spin_unlock_irqrestore(&self->spinlock, flags); 765 767 return -ENOBUFS;
+1 -3
net/irda/irda_device.c
··· 401 401 } 402 402 403 403 /* Allocate dongle info for this instance */ 404 - dongle = kmalloc(sizeof(dongle_t), GFP_KERNEL); 404 + dongle = kzalloc(sizeof(dongle_t), GFP_KERNEL); 405 405 if (!dongle) 406 406 goto out; 407 - 408 - memset(dongle, 0, sizeof(dongle_t)); 409 407 410 408 /* Bind the registration info to this particular instance */ 411 409 dongle->issue = reg;
+5 -4
net/irda/iriap.c
··· 345 345 IRDA_ASSERT(self != NULL, return;); 346 346 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 347 347 348 - tx_skb = dev_alloc_skb(64); 348 + tx_skb = alloc_skb(64, GFP_ATOMIC); 349 349 if (tx_skb == NULL) { 350 350 IRDA_DEBUG(0, "%s(), Could not allocate an sk_buff of length %d\n", 351 351 __FUNCTION__, 64); ··· 396 396 attr_len = strlen(attr); /* Up to IAS_MAX_ATTRIBNAME = 60 */ 397 397 398 398 skb_len = self->max_header_size+2+name_len+1+attr_len+4; 399 - tx_skb = dev_alloc_skb(skb_len); 399 + tx_skb = alloc_skb(skb_len, GFP_ATOMIC); 400 400 if (!tx_skb) 401 401 return -ENOMEM; 402 402 ··· 562 562 * value. We add 32 bytes because of the 6 bytes for the frame and 563 563 * max 5 bytes for the value coding. 564 564 */ 565 - tx_skb = dev_alloc_skb(value->len + self->max_header_size + 32); 565 + tx_skb = alloc_skb(value->len + self->max_header_size + 32, 566 + GFP_ATOMIC); 566 567 if (!tx_skb) 567 568 return; 568 569 ··· 701 700 IRDA_ASSERT(self != NULL, return;); 702 701 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 703 702 704 - tx_skb = dev_alloc_skb(64); 703 + tx_skb = alloc_skb(64, GFP_ATOMIC); 705 704 if (!tx_skb) 706 705 return; 707 706
+1 -1
net/irda/iriap_event.c
··· 365 365 366 366 switch (event) { 367 367 case IAP_LM_CONNECT_INDICATION: 368 - tx_skb = dev_alloc_skb(64); 368 + tx_skb = alloc_skb(64, GFP_ATOMIC); 369 369 if (tx_skb == NULL) { 370 370 IRDA_WARNING("%s: unable to malloc!\n", __FUNCTION__); 371 371 return;
+8 -16
net/irda/irias_object.c
··· 82 82 83 83 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__); 84 84 85 - obj = kmalloc(sizeof(struct ias_object), GFP_ATOMIC); 85 + obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); 86 86 if (obj == NULL) { 87 87 IRDA_WARNING("%s(), Unable to allocate object!\n", 88 88 __FUNCTION__); 89 89 return NULL; 90 90 } 91 - memset(obj, 0, sizeof( struct ias_object)); 92 91 93 92 obj->magic = IAS_OBJECT_MAGIC; 94 93 obj->name = strndup(name, IAS_MAX_CLASSNAME); ··· 345 346 IRDA_ASSERT(obj->magic == IAS_OBJECT_MAGIC, return;); 346 347 IRDA_ASSERT(name != NULL, return;); 347 348 348 - attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 349 + attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 349 350 if (attrib == NULL) { 350 351 IRDA_WARNING("%s: Unable to allocate attribute!\n", 351 352 __FUNCTION__); 352 353 return; 353 354 } 354 - memset(attrib, 0, sizeof( struct ias_attrib)); 355 355 356 356 attrib->magic = IAS_ATTRIB_MAGIC; 357 357 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); ··· 380 382 IRDA_ASSERT(name != NULL, return;); 381 383 IRDA_ASSERT(octets != NULL, return;); 382 384 383 - attrib = kmalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 385 + attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); 384 386 if (attrib == NULL) { 385 387 IRDA_WARNING("%s: Unable to allocate attribute!\n", 386 388 __FUNCTION__); 387 389 return; 388 390 } 389 - memset(attrib, 0, sizeof( struct ias_attrib)); 390 391 391 392 attrib->magic = IAS_ATTRIB_MAGIC; 392 393 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); ··· 413 416 IRDA_ASSERT(name != NULL, return;); 414 417 IRDA_ASSERT(value != NULL, return;); 415 418 416 - attrib = kmalloc(sizeof( struct ias_attrib), GFP_ATOMIC); 419 + attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); 417 420 if (attrib == NULL) { 418 421 IRDA_WARNING("%s: Unable to allocate attribute!\n", 419 422 __FUNCTION__); 420 423 return; 421 424 } 422 - memset(attrib, 0, sizeof( struct ias_attrib)); 423 425 424 426 attrib->magic = IAS_ATTRIB_MAGIC; 425 427 attrib->name = strndup(name, IAS_MAX_ATTRIBNAME); ··· 439 443 { 440 444 struct ias_value *value; 441 445 442 - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 446 + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 443 447 if (value == NULL) { 444 448 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 445 449 return NULL; 446 450 } 447 - memset(value, 0, sizeof(struct ias_value)); 448 451 449 452 value->type = IAS_INTEGER; 450 453 value->len = 4; ··· 464 469 { 465 470 struct ias_value *value; 466 471 467 - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 472 + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 468 473 if (value == NULL) { 469 474 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 470 475 return NULL; 471 476 } 472 - memset( value, 0, sizeof( struct ias_value)); 473 477 474 478 value->type = IAS_STRING; 475 479 value->charset = CS_ASCII; ··· 489 495 { 490 496 struct ias_value *value; 491 497 492 - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 498 + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 493 499 if (value == NULL) { 494 500 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 495 501 return NULL; 496 502 } 497 - memset(value, 0, sizeof(struct ias_value)); 498 503 499 504 value->type = IAS_OCT_SEQ; 500 505 /* Check length */ ··· 515 522 { 516 523 struct ias_value *value; 517 524 518 - value = kmalloc(sizeof(struct ias_value), GFP_ATOMIC); 525 + value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); 519 526 if (value == NULL) { 520 527 IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__); 521 528 return NULL; 522 529 } 523 - memset(value, 0, sizeof(struct ias_value)); 524 530 525 531 value->type = IAS_MISSING; 526 532 value->len = 0;
+8 -8
net/irda/irlan/irlan_common.c
··· 636 636 IRDA_ASSERT(self != NULL, return;); 637 637 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 638 638 639 - skb = dev_alloc_skb(64); 639 + skb = alloc_skb(64, GFP_ATOMIC); 640 640 if (!skb) 641 641 return; 642 642 ··· 668 668 IRDA_ASSERT(self != NULL, return;); 669 669 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 670 670 671 - skb = dev_alloc_skb(64); 671 + skb = alloc_skb(64, GFP_ATOMIC); 672 672 if (!skb) 673 673 return; 674 674 ··· 704 704 if (self->client.tsap_ctrl == NULL) 705 705 return; 706 706 707 - skb = dev_alloc_skb(64); 707 + skb = alloc_skb(64, GFP_ATOMIC); 708 708 if (!skb) 709 709 return; 710 710 ··· 739 739 IRDA_ASSERT(self != NULL, return;); 740 740 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 741 741 742 - skb = dev_alloc_skb(128); 742 + skb = alloc_skb(128, GFP_ATOMIC); 743 743 if (!skb) 744 744 return; 745 745 ··· 777 777 IRDA_ASSERT(self != NULL, return;); 778 778 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 779 779 780 - skb = dev_alloc_skb(128); 780 + skb = alloc_skb(128, GFP_ATOMIC); 781 781 if (!skb) 782 782 return; 783 783 ··· 816 816 IRDA_ASSERT(self != NULL, return;); 817 817 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 818 818 819 - skb = dev_alloc_skb(128); 819 + skb = alloc_skb(128, GFP_ATOMIC); 820 820 if (!skb) 821 821 return; 822 822 ··· 856 856 IRDA_ASSERT(self != NULL, return;); 857 857 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 858 858 859 - skb = dev_alloc_skb(128); 859 + skb = alloc_skb(128, GFP_ATOMIC); 860 860 if (!skb) 861 861 return; 862 862 ··· 891 891 IRDA_ASSERT(self != NULL, return;); 892 892 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 893 893 894 - skb = dev_alloc_skb(64); 894 + skb = alloc_skb(64, GFP_ATOMIC); 895 895 if (!skb) 896 896 return; 897 897
+1 -1
net/irda/irlan/irlan_provider.c
··· 296 296 IRDA_ASSERT(self != NULL, return;); 297 297 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 298 298 299 - skb = dev_alloc_skb(128); 299 + skb = alloc_skb(128, GFP_ATOMIC); 300 300 if (!skb) 301 301 return; 302 302
+3 -5
net/irda/irlap.c
··· 116 116 IRDA_DEBUG(4, "%s()\n", __FUNCTION__); 117 117 118 118 /* Initialize the irlap structure. */ 119 - self = kmalloc(sizeof(struct irlap_cb), GFP_KERNEL); 119 + self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); 120 120 if (self == NULL) 121 121 return NULL; 122 122 123 - memset(self, 0, sizeof(struct irlap_cb)); 124 123 self->magic = LAP_MAGIC; 125 124 126 125 /* Make a binding between the layers */ ··· 881 882 /* Change speed now, or just piggyback speed on frames */ 882 883 if (now) { 883 884 /* Send down empty frame to trigger speed change */ 884 - skb = dev_alloc_skb(0); 885 + skb = alloc_skb(0, GFP_ATOMIC); 885 886 if (skb) 886 887 irlap_queue_xmit(self, skb); 887 888 } ··· 1221 1222 { 1222 1223 struct seq_file *seq; 1223 1224 int rc = -ENOMEM; 1224 - struct irlap_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1225 + struct irlap_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); 1225 1226 1226 1227 if (!s) 1227 1228 goto out; ··· 1237 1238 1238 1239 seq = file->private_data; 1239 1240 seq->private = s; 1240 - memset(s, 0, sizeof(*s)); 1241 1241 out: 1242 1242 return rc; 1243 1243 out_kfree:
+9 -10
net/irda/irlap_frame.c
··· 117 117 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 118 118 119 119 /* Allocate frame */ 120 - tx_skb = dev_alloc_skb(64); 120 + tx_skb = alloc_skb(64, GFP_ATOMIC); 121 121 if (!tx_skb) 122 122 return; 123 123 ··· 210 210 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 211 211 212 212 /* Allocate frame */ 213 - tx_skb = dev_alloc_skb(64); 213 + tx_skb = alloc_skb(64, GFP_ATOMIC); 214 214 if (!tx_skb) 215 215 return; 216 216 ··· 250 250 IRDA_ASSERT(self != NULL, return;); 251 251 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 252 252 253 - tx_skb = dev_alloc_skb(32); 253 + tx_skb = alloc_skb(32, GFP_ATOMIC); 254 254 if (!tx_skb) 255 255 return; 256 256 ··· 282 282 IRDA_ASSERT(self != NULL, return;); 283 283 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 284 284 285 - tx_skb = dev_alloc_skb(16); 285 + tx_skb = alloc_skb(16, GFP_ATOMIC); 286 286 if (!tx_skb) 287 287 return; 288 288 ··· 315 315 IRDA_ASSERT(self->magic == LAP_MAGIC, return;); 316 316 IRDA_ASSERT(discovery != NULL, return;); 317 317 318 - tx_skb = dev_alloc_skb(64); 318 + tx_skb = alloc_skb(64, GFP_ATOMIC); 319 319 if (!tx_skb) 320 320 return; 321 321 ··· 422 422 return; 423 423 } 424 424 425 - if ((discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { 425 + if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { 426 426 IRDA_WARNING("%s: kmalloc failed!\n", __FUNCTION__); 427 427 return; 428 428 } 429 - memset(discovery, 0, sizeof(discovery_t)); 430 429 431 430 discovery->data.daddr = info->daddr; 432 431 discovery->data.saddr = self->saddr; ··· 575 576 struct sk_buff *tx_skb; 576 577 __u8 *frame; 577 578 578 - tx_skb = dev_alloc_skb(16); 579 + tx_skb = alloc_skb(16, GFP_ATOMIC); 579 580 if (!tx_skb) 580 581 return; 581 582 ··· 600 601 struct sk_buff *tx_skb; 601 602 __u8 *frame; 602 603 603 - tx_skb = dev_alloc_skb(16); 604 + tx_skb = alloc_skb(16, GFP_ATOMIC); 604 605 if (!tx_skb) 605 606 return; 606 607 ··· 1214 1215 struct test_frame *frame; 1215 1216 __u8 *info; 1216 1217 1217 - tx_skb = dev_alloc_skb(cmd->len+sizeof(struct test_frame)); 1218 + tx_skb = alloc_skb(cmd->len+sizeof(struct test_frame), GFP_ATOMIC); 1218 1219 if (!tx_skb) 1219 1220 return; 1220 1221
+4 -7
net/irda/irlmp.c
··· 78 78 { 79 79 IRDA_DEBUG(1, "%s()\n", __FUNCTION__); 80 80 /* Initialize the irlmp structure. */ 81 - irlmp = kmalloc( sizeof(struct irlmp_cb), GFP_KERNEL); 81 + irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); 82 82 if (irlmp == NULL) 83 83 return -ENOMEM; 84 - memset(irlmp, 0, sizeof(struct irlmp_cb)); 85 84 86 85 irlmp->magic = LMP_MAGIC; 87 86 ··· 159 160 return NULL; 160 161 161 162 /* Allocate new instance of a LSAP connection */ 162 - self = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC); 163 + self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); 163 164 if (self == NULL) { 164 165 IRDA_ERROR("%s: can't allocate memory\n", __FUNCTION__); 165 166 return NULL; 166 167 } 167 - memset(self, 0, sizeof(struct lsap_cb)); 168 168 169 169 self->magic = LMP_LSAP_MAGIC; 170 170 self->slsap_sel = slsap_sel; ··· 286 288 /* 287 289 * Allocate new instance of a LSAP connection 288 290 */ 289 - lap = kmalloc(sizeof(struct lap_cb), GFP_KERNEL); 291 + lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); 290 292 if (lap == NULL) { 291 293 IRDA_ERROR("%s: unable to kmalloc\n", __FUNCTION__); 292 294 return; 293 295 } 294 - memset(lap, 0, sizeof(struct lap_cb)); 295 296 296 297 lap->irlap = irlap; 297 298 lap->magic = LMP_LAP_MAGIC; ··· 392 395 393 396 /* Any userdata? */ 394 397 if (tx_skb == NULL) { 395 - tx_skb = dev_alloc_skb(64); 398 + tx_skb = alloc_skb(64, GFP_ATOMIC); 396 399 if (!tx_skb) 397 400 return -ENOMEM; 398 401
+1 -2
net/irda/irnet/irnet_ppp.c
··· 476 476 #endif /* SECURE_DEVIRNET */ 477 477 478 478 /* Allocate a private structure for this IrNET instance */ 479 - ap = kmalloc(sizeof(*ap), GFP_KERNEL); 479 + ap = kzalloc(sizeof(*ap), GFP_KERNEL); 480 480 DABORT(ap == NULL, -ENOMEM, FS_ERROR, "Can't allocate struct irnet...\n"); 481 481 482 482 /* initialize the irnet structure */ 483 - memset(ap, 0, sizeof(*ap)); 484 483 ap->file = file; 485 484 486 485 /* PPP channel setup */
+9 -11
net/irda/irttp.c
··· 85 85 */ 86 86 int __init irttp_init(void) 87 87 { 88 - irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); 88 + irttp = kzalloc(sizeof(struct irttp_cb), GFP_KERNEL); 89 89 if (irttp == NULL) 90 90 return -ENOMEM; 91 - memset(irttp, 0, sizeof(struct irttp_cb)); 92 91 93 92 irttp->magic = TTP_MAGIC; 94 93 ··· 305 306 IRDA_DEBUG(2, "%s(), fragmenting ...\n", __FUNCTION__); 306 307 307 308 /* Make new segment */ 308 - frag = dev_alloc_skb(self->max_seg_size+self->max_header_size); 309 + frag = alloc_skb(self->max_seg_size+self->max_header_size, 310 + GFP_ATOMIC); 309 311 if (!frag) 310 312 return; 311 313 ··· 389 389 return NULL; 390 390 } 391 391 392 - self = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 392 + self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); 393 393 if (self == NULL) { 394 394 IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __FUNCTION__); 395 395 return NULL; 396 396 } 397 - memset(self, 0, sizeof(struct tsap_cb)); 398 397 spin_lock_init(&self->lock); 399 398 400 399 /* Initialise todo timer */ ··· 804 805 self->send_credit, self->avail_credit, self->remote_credit); 805 806 806 807 /* Give credit to peer */ 807 - tx_skb = dev_alloc_skb(64); 808 + tx_skb = alloc_skb(64, GFP_ATOMIC); 808 809 if (!tx_skb) 809 810 return; 810 811 ··· 1093 1094 1094 1095 /* Any userdata supplied? */ 1095 1096 if (userdata == NULL) { 1096 - tx_skb = dev_alloc_skb(64); 1097 + tx_skb = alloc_skb(64, GFP_ATOMIC); 1097 1098 if (!tx_skb) 1098 1099 return -ENOMEM; 1099 1100 ··· 1341 1342 1342 1343 /* Any userdata supplied? */ 1343 1344 if (userdata == NULL) { 1344 - tx_skb = dev_alloc_skb(64); 1345 + tx_skb = alloc_skb(64, GFP_ATOMIC); 1345 1346 if (!tx_skb) 1346 1347 return -ENOMEM; 1347 1348 ··· 1540 1541 1541 1542 if (!userdata) { 1542 1543 struct sk_buff *tx_skb; 1543 - tx_skb = dev_alloc_skb(64); 1544 + tx_skb = alloc_skb(64, GFP_ATOMIC); 1544 1545 if (!tx_skb) 1545 1546 return -ENOMEM; 1546 1547 ··· 1875 1876 int rc = -ENOMEM; 1876 1877 struct irttp_iter_state *s; 1877 1878 1878 - s = kmalloc(sizeof(*s), GFP_KERNEL); 1879 + s = kzalloc(sizeof(*s), GFP_KERNEL); 1879 1880 if (!s) 1880 1881 goto out; 1881 1882 ··· 1885 1886 1886 1887 seq = file->private_data; 1887 1888 seq->private = s; 1888 - memset(s, 0, sizeof(*s)); 1889 1889 out: 1890 1890 return rc; 1891 1891 out_kfree:
+1 -3
net/lapb/lapb_iface.c
··· 115 115 */ 116 116 static struct lapb_cb *lapb_create_cb(void) 117 117 { 118 - struct lapb_cb *lapb = kmalloc(sizeof(*lapb), GFP_ATOMIC); 118 + struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC); 119 119 120 120 121 121 if (!lapb) 122 122 goto out; 123 - 124 - memset(lapb, 0x00, sizeof(*lapb)); 125 123 126 124 skb_queue_head_init(&lapb->write_queue); 127 125 skb_queue_head_init(&lapb->ack_queue);
+1 -2
net/llc/llc_core.c
··· 33 33 */ 34 34 static struct llc_sap *llc_sap_alloc(void) 35 35 { 36 - struct llc_sap *sap = kmalloc(sizeof(*sap), GFP_ATOMIC); 36 + struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); 37 37 38 38 if (sap) { 39 - memset(sap, 0, sizeof(*sap)); 40 39 sap->state = LLC_SAP_STATE_ACTIVE; 41 40 memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN); 42 41 rwlock_init(&sap->sk_list.lock);
+4 -9
net/netlink/af_netlink.c
··· 562 562 if (err) 563 563 return err; 564 564 565 - nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL); 565 + nlk->groups = kzalloc(NLGRPSZ(groups), GFP_KERNEL); 566 566 if (nlk->groups == NULL) 567 567 return -ENOMEM; 568 - memset(nlk->groups, 0, NLGRPSZ(groups)); 569 568 nlk->ngroups = groups; 570 569 return 0; 571 570 } ··· 1392 1393 struct sock *sk; 1393 1394 struct netlink_sock *nlk; 1394 1395 1395 - cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1396 + cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1396 1397 if (cb == NULL) 1397 1398 return -ENOBUFS; 1398 1399 1399 - memset(cb, 0, sizeof(*cb)); 1400 1400 cb->dump = dump; 1401 1401 cb->done = done; 1402 1402 cb->nlh = nlh; ··· 1666 1668 struct nl_seq_iter *iter; 1667 1669 int err; 1668 1670 1669 - iter = kmalloc(sizeof(*iter), GFP_KERNEL); 1671 + iter = kzalloc(sizeof(*iter), GFP_KERNEL); 1670 1672 if (!iter) 1671 1673 return -ENOMEM; 1672 1674 ··· 1676 1678 return err; 1677 1679 } 1678 1680 1679 - memset(iter, 0, sizeof(*iter)); 1680 1681 seq = file->private_data; 1681 1682 seq->private = iter; 1682 1683 return 0; ··· 1744 1747 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) 1745 1748 netlink_skb_parms_too_large(); 1746 1749 1747 - nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL); 1750 + nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 1748 1751 if (!nl_table) { 1749 1752 enomem: 1750 1753 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n"); 1751 1754 return -ENOMEM; 1752 1755 } 1753 - 1754 - memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS); 1755 1756 1756 1757 if (num_physpages >= (128 * 1024)) 1757 1758 max = num_physpages >> (21 - PAGE_SHIFT);
+2 -4
net/rxrpc/connection.c
··· 58 58 _enter("%p",peer); 59 59 60 60 /* allocate and initialise a connection record */ 61 - conn = kmalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); 61 + conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL); 62 62 if (!conn) { 63 63 _leave(" = -ENOMEM"); 64 64 return -ENOMEM; 65 65 } 66 66 67 - memset(conn, 0, sizeof(struct rxrpc_connection)); 68 67 atomic_set(&conn->usage, 1); 69 68 70 69 INIT_LIST_HEAD(&conn->link); ··· 534 535 return -EINVAL; 535 536 } 536 537 537 - msg = kmalloc(sizeof(struct rxrpc_message), alloc_flags); 538 + msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags); 538 539 if (!msg) { 539 540 _leave(" = -ENOMEM"); 540 541 return -ENOMEM; 541 542 } 542 543 543 - memset(msg, 0, sizeof(*msg)); 544 544 atomic_set(&msg->usage, 1); 545 545 546 546 INIT_LIST_HEAD(&msg->link);
+1 -2
net/rxrpc/peer.c
··· 58 58 _enter("%p,%08x", trans, ntohl(addr)); 59 59 60 60 /* allocate and initialise a peer record */ 61 - peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); 61 + peer = kzalloc(sizeof(struct rxrpc_peer), GFP_KERNEL); 62 62 if (!peer) { 63 63 _leave(" = -ENOMEM"); 64 64 return -ENOMEM; 65 65 } 66 66 67 - memset(peer, 0, sizeof(struct rxrpc_peer)); 68 67 atomic_set(&peer->usage, 1); 69 68 70 69 INIT_LIST_HEAD(&peer->link);
+2 -4
net/rxrpc/transport.c
··· 68 68 69 69 _enter("%hu", port); 70 70 71 - trans = kmalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); 71 + trans = kzalloc(sizeof(struct rxrpc_transport), GFP_KERNEL); 72 72 if (!trans) 73 73 return -ENOMEM; 74 74 75 - memset(trans, 0, sizeof(struct rxrpc_transport)); 76 75 atomic_set(&trans->usage, 1); 77 76 INIT_LIST_HEAD(&trans->services); 78 77 INIT_LIST_HEAD(&trans->link); ··· 311 312 312 313 _enter(""); 313 314 314 - msg = kmalloc(sizeof(struct rxrpc_message), GFP_KERNEL); 315 + msg = kzalloc(sizeof(struct rxrpc_message), GFP_KERNEL); 315 316 if (!msg) { 316 317 _leave(" = -ENOMEM"); 317 318 return -ENOMEM; 318 319 } 319 320 320 - memset(msg, 0, sizeof(*msg)); 321 321 atomic_set(&msg->usage, 1); 322 322 list_add_tail(&msg->link,msgq); 323 323
+3 -6
net/sched/act_api.c
··· 312 312 } 313 313 314 314 *err = -ENOMEM; 315 - a = kmalloc(sizeof(*a), GFP_KERNEL); 315 + a = kzalloc(sizeof(*a), GFP_KERNEL); 316 316 if (a == NULL) 317 317 goto err_mod; 318 - memset(a, 0, sizeof(*a)); 319 318 320 319 /* backward compatibility for policer */ 321 320 if (name == NULL) ··· 491 492 index = *(int *)RTA_DATA(tb[TCA_ACT_INDEX - 1]); 492 493 493 494 *err = -ENOMEM; 494 - a = kmalloc(sizeof(struct tc_action), GFP_KERNEL); 495 + a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); 495 496 if (a == NULL) 496 497 return NULL; 497 - memset(a, 0, sizeof(struct tc_action)); 498 498 499 499 *err = -EINVAL; 500 500 a->ops = tc_lookup_action(tb[TCA_ACT_KIND - 1]); ··· 529 531 { 530 532 struct tc_action *act; 531 533 532 - act = kmalloc(sizeof(*act), GFP_KERNEL); 534 + act = kzalloc(sizeof(*act), GFP_KERNEL); 533 535 if (act == NULL) { 534 536 printk("create_a: failed to alloc!\n"); 535 537 return NULL; 536 538 } 537 - memset(act, 0, sizeof(*act)); 538 539 act->order = i; 539 540 return act; 540 541 }
+1 -2
net/sched/act_pedit.c
··· 209 209 s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key); 210 210 211 211 /* netlink spinlocks held above us - must use ATOMIC */ 212 - opt = kmalloc(s, GFP_ATOMIC); 212 + opt = kzalloc(s, GFP_ATOMIC); 213 213 if (opt == NULL) 214 214 return -ENOBUFS; 215 - memset(opt, 0, s); 216 215 217 216 memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key)); 218 217 opt->index = p->index;
+2 -4
net/sched/act_police.c
··· 196 196 return ret; 197 197 } 198 198 199 - p = kmalloc(sizeof(*p), GFP_KERNEL); 199 + p = kzalloc(sizeof(*p), GFP_KERNEL); 200 200 if (p == NULL) 201 201 return -ENOMEM; 202 - memset(p, 0, sizeof(*p)); 203 202 204 203 ret = ACT_P_CREATED; 205 204 p->refcnt = 1; ··· 428 429 return p; 429 430 } 430 431 431 - p = kmalloc(sizeof(*p), GFP_KERNEL); 432 + p = kzalloc(sizeof(*p), GFP_KERNEL); 432 433 if (p == NULL) 433 434 return NULL; 434 435 435 - memset(p, 0, sizeof(*p)); 436 436 p->refcnt = 1; 437 437 spin_lock_init(&p->lock); 438 438 p->stats_lock = &p->lock;
+2 -4
net/sched/cls_basic.c
··· 178 178 179 179 err = -ENOBUFS; 180 180 if (head == NULL) { 181 - head = kmalloc(sizeof(*head), GFP_KERNEL); 181 + head = kzalloc(sizeof(*head), GFP_KERNEL); 182 182 if (head == NULL) 183 183 goto errout; 184 184 185 - memset(head, 0, sizeof(*head)); 186 185 INIT_LIST_HEAD(&head->flist); 187 186 tp->root = head; 188 187 } 189 188 190 - f = kmalloc(sizeof(*f), GFP_KERNEL); 189 + f = kzalloc(sizeof(*f), GFP_KERNEL); 191 190 if (f == NULL) 192 191 goto errout; 193 - memset(f, 0, sizeof(*f)); 194 192 195 193 err = -EINVAL; 196 194 if (handle)
+2 -4
net/sched/cls_fw.c
··· 267 267 return -EINVAL; 268 268 269 269 if (head == NULL) { 270 - head = kmalloc(sizeof(struct fw_head), GFP_KERNEL); 270 + head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); 271 271 if (head == NULL) 272 272 return -ENOBUFS; 273 - memset(head, 0, sizeof(*head)); 274 273 275 274 tcf_tree_lock(tp); 276 275 tp->root = head; 277 276 tcf_tree_unlock(tp); 278 277 } 279 278 280 - f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL); 279 + f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); 281 280 if (f == NULL) 282 281 return -ENOBUFS; 283 - memset(f, 0, sizeof(*f)); 284 282 285 283 f->id = handle; 286 284
+3 -6
net/sched/cls_route.c
··· 396 396 h1 = to_hash(nhandle); 397 397 if ((b = head->table[h1]) == NULL) { 398 398 err = -ENOBUFS; 399 - b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL); 399 + b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); 400 400 if (b == NULL) 401 401 goto errout; 402 - memset(b, 0, sizeof(*b)); 403 402 404 403 tcf_tree_lock(tp); 405 404 head->table[h1] = b; ··· 474 475 475 476 err = -ENOBUFS; 476 477 if (head == NULL) { 477 - head = kmalloc(sizeof(struct route4_head), GFP_KERNEL); 478 + head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); 478 479 if (head == NULL) 479 480 goto errout; 480 - memset(head, 0, sizeof(struct route4_head)); 481 481 482 482 tcf_tree_lock(tp); 483 483 tp->root = head; 484 484 tcf_tree_unlock(tp); 485 485 } 486 486 487 - f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL); 487 + f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); 488 488 if (f == NULL) 489 489 goto errout; 490 - memset(f, 0, sizeof(*f)); 491 490 492 491 err = route4_set_parms(tp, base, f, handle, head, tb, 493 492 tca[TCA_RATE-1], 1);
+3 -6
net/sched/cls_rsvp.h
··· 240 240 { 241 241 struct rsvp_head *data; 242 242 243 - data = kmalloc(sizeof(struct rsvp_head), GFP_KERNEL); 243 + data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL); 244 244 if (data) { 245 - memset(data, 0, sizeof(struct rsvp_head)); 246 245 tp->root = data; 247 246 return 0; 248 247 } ··· 445 446 goto errout2; 446 447 447 448 err = -ENOBUFS; 448 - f = kmalloc(sizeof(struct rsvp_filter), GFP_KERNEL); 449 + f = kzalloc(sizeof(struct rsvp_filter), GFP_KERNEL); 449 450 if (f == NULL) 450 451 goto errout2; 451 452 452 - memset(f, 0, sizeof(*f)); 453 453 h2 = 16; 454 454 if (tb[TCA_RSVP_SRC-1]) { 455 455 err = -EINVAL; ··· 530 532 /* No session found. Create new one. */ 531 533 532 534 err = -ENOBUFS; 533 - s = kmalloc(sizeof(struct rsvp_session), GFP_KERNEL); 535 + s = kzalloc(sizeof(struct rsvp_session), GFP_KERNEL); 534 536 if (s == NULL) 535 537 goto errout; 536 - memset(s, 0, sizeof(*s)); 537 538 memcpy(s->dst, dst, sizeof(s->dst)); 538 539 539 540 if (pinfo) {
+4 -8
net/sched/cls_tcindex.c
··· 148 148 struct tcindex_data *p; 149 149 150 150 DPRINTK("tcindex_init(tp %p)\n",tp); 151 - p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL); 151 + p = kzalloc(sizeof(struct tcindex_data),GFP_KERNEL); 152 152 if (!p) 153 153 return -ENOMEM; 154 154 155 - memset(p, 0, sizeof(*p)); 156 155 p->mask = 0xffff; 157 156 p->hash = DEFAULT_HASH_SIZE; 158 157 p->fall_through = 1; ··· 295 296 err = -ENOMEM; 296 297 if (!cp.perfect && !cp.h) { 297 298 if (valid_perfect_hash(&cp)) { 298 - cp.perfect = kmalloc(cp.hash * sizeof(*r), GFP_KERNEL); 299 + cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); 299 300 if (!cp.perfect) 300 301 goto errout; 301 - memset(cp.perfect, 0, cp.hash * sizeof(*r)); 302 302 balloc = 1; 303 303 } else { 304 - cp.h = kmalloc(cp.hash * sizeof(f), GFP_KERNEL); 304 + cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); 305 305 if (!cp.h) 306 306 goto errout; 307 - memset(cp.h, 0, cp.hash * sizeof(f)); 308 307 balloc = 2; 309 308 } 310 309 } ··· 313 316 r = tcindex_lookup(&cp, handle) ? : &new_filter_result; 314 317 315 318 if (r == &new_filter_result) { 316 - f = kmalloc(sizeof(*f), GFP_KERNEL); 319 + f = kzalloc(sizeof(*f), GFP_KERNEL); 317 320 if (!f) 318 321 goto errout_alloc; 319 - memset(f, 0, sizeof(*f)); 320 322 } 321 323 322 324 if (tb[TCA_TCINDEX_CLASSID-1]) {
+5 -10
net/sched/cls_u32.c
··· 307 307 if (tp_c->q == tp->q) 308 308 break; 309 309 310 - root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL); 310 + root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); 311 311 if (root_ht == NULL) 312 312 return -ENOBUFS; 313 313 314 - memset(root_ht, 0, sizeof(*root_ht)); 315 314 root_ht->divisor = 0; 316 315 root_ht->refcnt++; 317 316 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; 318 317 root_ht->prio = tp->prio; 319 318 320 319 if (tp_c == NULL) { 321 - tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL); 320 + tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); 322 321 if (tp_c == NULL) { 323 322 kfree(root_ht); 324 323 return -ENOBUFS; 325 324 } 326 - memset(tp_c, 0, sizeof(*tp_c)); 327 325 tp_c->q = tp->q; 328 326 tp_c->next = u32_list; 329 327 u32_list = tp_c; ··· 569 571 if (handle == 0) 570 572 return -ENOMEM; 571 573 } 572 - ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); 574 + ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); 573 575 if (ht == NULL) 574 576 return -ENOBUFS; 575 - memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*)); 576 577 ht->tp_c = tp_c; 577 578 ht->refcnt = 0; 578 579 ht->divisor = divisor; ··· 614 617 615 618 s = RTA_DATA(tb[TCA_U32_SEL-1]); 616 619 617 - n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 620 + n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); 618 621 if (n == NULL) 619 622 return -ENOBUFS; 620 623 621 - memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key)); 622 624 #ifdef CONFIG_CLS_U32_PERF 623 - n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 625 + n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); 624 626 if (n->pf == NULL) { 625 627 kfree(n); 626 628 return -ENOBUFS; 627 629 } 628 - memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64)); 629 630 #endif 630 631 631 632 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
+1 -2
net/sched/em_meta.c
··· 773 773 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX) 774 774 goto errout; 775 775 776 - meta = kmalloc(sizeof(*meta), GFP_KERNEL); 776 + meta = kzalloc(sizeof(*meta), GFP_KERNEL); 777 777 if (meta == NULL) 778 778 goto errout; 779 - memset(meta, 0, sizeof(*meta)); 780 779 781 780 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left)); 782 781 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
+1 -2
net/sched/ematch.c
··· 321 321 list_len = RTA_PAYLOAD(rt_list); 322 322 matches_len = tree_hdr->nmatches * sizeof(*em); 323 323 324 - tree->matches = kmalloc(matches_len, GFP_KERNEL); 324 + tree->matches = kzalloc(matches_len, GFP_KERNEL); 325 325 if (tree->matches == NULL) 326 326 goto errout; 327 - memset(tree->matches, 0, matches_len); 328 327 329 328 /* We do not use rtattr_parse_nested here because the maximum 330 329 * number of attributes is unknown. This saves us the allocation
+1 -2
net/sched/estimator.c
··· 139 139 if (parm->interval < -2 || parm->interval > 3) 140 140 return -EINVAL; 141 141 142 - est = kmalloc(sizeof(*est), GFP_KERNEL); 142 + est = kzalloc(sizeof(*est), GFP_KERNEL); 143 143 if (est == NULL) 144 144 return -ENOBUFS; 145 145 146 - memset(est, 0, sizeof(*est)); 147 146 est->interval = parm->interval + 2; 148 147 est->stats = stats; 149 148 est->stats_lock = stats_lock;
+1 -2
net/sched/sch_cbq.c
··· 1926 1926 } 1927 1927 1928 1928 err = -ENOBUFS; 1929 - cl = kmalloc(sizeof(*cl), GFP_KERNEL); 1929 + cl = kzalloc(sizeof(*cl), GFP_KERNEL); 1930 1930 if (cl == NULL) 1931 1931 goto failure; 1932 - memset(cl, 0, sizeof(*cl)); 1933 1932 cl->R_tab = rtab; 1934 1933 rtab = NULL; 1935 1934 cl->refcnt = 1;
+1 -2
net/sched/sch_generic.c
··· 432 432 size = QDISC_ALIGN(sizeof(*sch)); 433 433 size += ops->priv_size + (QDISC_ALIGNTO - 1); 434 434 435 - p = kmalloc(size, GFP_KERNEL); 435 + p = kzalloc(size, GFP_KERNEL); 436 436 if (!p) 437 437 goto errout; 438 - memset(p, 0, size); 439 438 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); 440 439 sch->padded = (char *) sch - (char *) p; 441 440
+1 -2
net/sched/sch_gred.c
··· 406 406 struct gred_sched_data *q; 407 407 408 408 if (table->tab[dp] == NULL) { 409 - table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL); 409 + table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); 410 410 if (table->tab[dp] == NULL) 411 411 return -ENOMEM; 412 - memset(table->tab[dp], 0, sizeof(*q)); 413 412 } 414 413 415 414 q = table->tab[dp];
+1 -2
net/sched/sch_hfsc.c
··· 1123 1123 if (rsc == NULL && fsc == NULL) 1124 1124 return -EINVAL; 1125 1125 1126 - cl = kmalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1126 + cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL); 1127 1127 if (cl == NULL) 1128 1128 return -ENOBUFS; 1129 - memset(cl, 0, sizeof(struct hfsc_class)); 1130 1129 1131 1130 if (rsc != NULL) 1132 1131 hfsc_change_rsc(cl, rsc, 0);
+1 -2
net/sched/sch_htb.c
··· 1559 1559 goto failure; 1560 1560 } 1561 1561 err = -ENOBUFS; 1562 - if ((cl = kmalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1562 + if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) 1563 1563 goto failure; 1564 1564 1565 - memset(cl, 0, sizeof(*cl)); 1566 1565 cl->refcnt = 1; 1567 1566 INIT_LIST_HEAD(&cl->sibling); 1568 1567 INIT_LIST_HEAD(&cl->hlist);
+3 -1
net/sched/sch_netem.c
··· 148 148 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) 149 149 { 150 150 struct netem_sched_data *q = qdisc_priv(sch); 151 - struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; 151 + /* We don't fill cb now as skb_unshare() may invalidate it */ 152 + struct netem_skb_cb *cb; 152 153 struct sk_buff *skb2; 153 154 int ret; 154 155 int count = 1; ··· 201 200 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 202 201 } 203 202 203 + cb = (struct netem_skb_cb *)skb->cb; 204 204 if (q->gap == 0 /* not doing reordering */ 205 205 || q->counter < q->gap /* inside last reordering gap */ 206 206 || q->reorder < get_crandom(&q->reorder_cor)) {
+17 -10
net/sctp/associola.c
··· 441 441 /* If the primary path is changing, assume that the 442 442 * user wants to use this new path. 443 443 */ 444 - if (transport->state != SCTP_INACTIVE) 444 + if ((transport->state == SCTP_ACTIVE) || 445 + (transport->state == SCTP_UNKNOWN)) 445 446 asoc->peer.active_path = transport; 446 447 447 448 /* ··· 533 532 port = addr->v4.sin_port; 534 533 535 534 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", 536 - " port: %d state:%s\n", 535 + " port: %d state:%d\n", 537 536 asoc, 538 537 addr, 539 538 addr->v4.sin_port, 540 - peer_state == SCTP_UNKNOWN?"UNKNOWN":"ACTIVE"); 539 + peer_state); 541 540 542 541 /* Set the port if it has not been set yet. */ 543 542 if (0 == asoc->peer.port) ··· 546 545 /* Check to see if this is a duplicate. */ 547 546 peer = sctp_assoc_lookup_paddr(asoc, addr); 548 547 if (peer) { 549 - if (peer_state == SCTP_ACTIVE && 550 - peer->state == SCTP_UNKNOWN) 551 - peer->state = SCTP_ACTIVE; 548 + if (peer->state == SCTP_UNKNOWN) { 549 + if (peer_state == SCTP_ACTIVE) 550 + peer->state = SCTP_ACTIVE; 551 + if (peer_state == SCTP_UNCONFIRMED) 552 + peer->state = SCTP_UNCONFIRMED; 553 + } 552 554 return peer; 553 555 } 554 556 ··· 743 739 list_for_each(pos, &asoc->peer.transport_addr_list) { 744 740 t = list_entry(pos, struct sctp_transport, transports); 745 741 746 - if (t->state == SCTP_INACTIVE) 742 + if ((t->state == SCTP_INACTIVE) || 743 + (t->state == SCTP_UNCONFIRMED)) 747 744 continue; 748 745 if (!first || t->last_time_heard > first->last_time_heard) { 749 746 second = first; ··· 764 759 * [If the primary is active but not most recent, bump the most 765 760 * recently used transport.] 766 761 */ 767 - if (asoc->peer.primary_path->state != SCTP_INACTIVE && 762 + if (((asoc->peer.primary_path->state == SCTP_ACTIVE) || 763 + (asoc->peer.primary_path->state == SCTP_UNKNOWN)) && 768 764 first != asoc->peer.primary_path) { 769 765 second = first; 770 766 first = asoc->peer.primary_path; ··· 1060 1054 transports); 1061 1055 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) 1062 1056 sctp_assoc_add_peer(asoc, &trans->ipaddr, 1063 - GFP_ATOMIC, SCTP_ACTIVE); 1057 + GFP_ATOMIC, trans->state); 1064 1058 } 1065 1059 1066 1060 asoc->ctsn_ack_point = asoc->next_tsn - 1; ··· 1100 1094 1101 1095 /* Try to find an active transport. */ 1102 1096 1103 - if (t->state != SCTP_INACTIVE) { 1097 + if ((t->state == SCTP_ACTIVE) || 1098 + (t->state == SCTP_UNKNOWN)) { 1104 1099 break; 1105 1100 } else { 1106 1101 /* Keep track of the next transport in case
+5 -3
net/sctp/bind_addr.c
··· 146 146 147 147 /* Add an address to the bind address list in the SCTP_bind_addr structure. */ 148 148 int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, 149 - gfp_t gfp) 149 + __u8 use_as_src, gfp_t gfp) 150 150 { 151 151 struct sctp_sockaddr_entry *addr; 152 152 ··· 162 162 */ 163 163 if (!addr->a.v4.sin_port) 164 164 addr->a.v4.sin_port = bp->port; 165 + 166 + addr->use_as_src = use_as_src; 165 167 166 168 INIT_LIST_HEAD(&addr->list); 167 169 list_add_tail(&addr->list, &bp->address_list); ··· 276 274 } 277 275 278 276 af->from_addr_param(&addr, rawaddr, port, 0); 279 - retval = sctp_add_bind_addr(bp, &addr, gfp); 277 + retval = sctp_add_bind_addr(bp, &addr, 1, gfp); 280 278 if (retval) { 281 279 /* Can't finish building the list, clean up. */ 282 280 sctp_bind_addr_clean(bp); ··· 369 367 (((AF_INET6 == addr->sa.sa_family) && 370 368 (flags & SCTP_ADDR6_ALLOWED) && 371 369 (flags & SCTP_ADDR6_PEERSUPP)))) 372 - error = sctp_add_bind_addr(dest, addr, gfp); 370 + error = sctp_add_bind_addr(dest, addr, 1, gfp); 373 371 } 374 372 375 373 return error;
+6 -5
net/sctp/endpointola.c
··· 158 158 void sctp_endpoint_free(struct sctp_endpoint *ep) 159 159 { 160 160 ep->base.dead = 1; 161 + 162 + ep->base.sk->sk_state = SCTP_SS_CLOSED; 163 + 164 + /* Unlink this endpoint, so we can't find it again! */ 165 + sctp_unhash_endpoint(ep); 166 + 161 167 sctp_endpoint_put(ep); 162 168 } 163 169 ··· 171 165 static void sctp_endpoint_destroy(struct sctp_endpoint *ep) 172 166 { 173 167 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); 174 - 175 - ep->base.sk->sk_state = SCTP_SS_CLOSED; 176 - 177 - /* Unlink this endpoint, so we can't find it again! */ 178 - sctp_unhash_endpoint(ep); 179 168 180 169 /* Free up the HMAC transform. */ 181 170 sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
+2 -1
net/sctp/ipv6.c
··· 290 290 sctp_read_lock(addr_lock); 291 291 list_for_each(pos, &bp->address_list) { 292 292 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 293 - if ((laddr->a.sa.sa_family == AF_INET6) && 293 + if ((laddr->use_as_src) && 294 + (laddr->a.sa.sa_family == AF_INET6) && 294 295 (scope <= sctp_scope(&laddr->a))) { 295 296 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); 296 297 if (!baddr || (matchlen < bmatchlen)) {
+6 -3
net/sctp/outqueue.c
··· 691 691 692 692 if (!new_transport) { 693 693 new_transport = asoc->peer.active_path; 694 - } else if (new_transport->state == SCTP_INACTIVE) { 694 + } else if ((new_transport->state == SCTP_INACTIVE) || 695 + (new_transport->state == SCTP_UNCONFIRMED)) { 695 696 /* If the chunk is Heartbeat or Heartbeat Ack, 696 697 * send it to chunk->transport, even if it's 697 698 * inactive. ··· 849 848 */ 850 849 new_transport = chunk->transport; 851 850 if (!new_transport || 852 - new_transport->state == SCTP_INACTIVE) 851 + ((new_transport->state == SCTP_INACTIVE) || 852 + (new_transport->state == SCTP_UNCONFIRMED))) 853 853 new_transport = asoc->peer.active_path; 854 854 855 855 /* Change packets if necessary. */ ··· 1466 1464 /* Mark the destination transport address as 1467 1465 * active if it is not so marked. 1468 1466 */ 1469 - if (transport->state == SCTP_INACTIVE) { 1467 + if ((transport->state == SCTP_INACTIVE) || 1468 + (transport->state == SCTP_UNCONFIRMED)) { 1470 1469 sctp_assoc_control_transport( 1471 1470 transport->asoc, 1472 1471 transport,
+5 -2
net/sctp/protocol.c
··· 240 240 (((AF_INET6 == addr->a.sa.sa_family) && 241 241 (copy_flags & SCTP_ADDR6_ALLOWED) && 242 242 (copy_flags & SCTP_ADDR6_PEERSUPP)))) { 243 - error = sctp_add_bind_addr(bp, &addr->a, 243 + error = sctp_add_bind_addr(bp, &addr->a, 1, 244 244 GFP_ATOMIC); 245 245 if (error) 246 246 goto end_copy; ··· 486 486 list_for_each(pos, &bp->address_list) { 487 487 laddr = list_entry(pos, struct sctp_sockaddr_entry, 488 488 list); 489 + if (!laddr->use_as_src) 490 + continue; 489 491 sctp_v4_dst_saddr(&dst_saddr, dst, bp->port); 490 492 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 491 493 goto out_unlock; ··· 508 506 list_for_each(pos, &bp->address_list) { 509 507 laddr = list_entry(pos, struct sctp_sockaddr_entry, list); 510 508 511 - if (AF_INET == laddr->a.sa.sa_family) { 509 + if ((laddr->use_as_src) && 510 + (AF_INET == laddr->a.sa.sa_family)) { 512 511 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 513 512 if (!ip_route_output_key(&rt, &fl)) { 514 513 dst = &rt->u.dst;
+10 -4
net/sctp/sm_make_chunk.c
··· 1493 1493 1494 1494 /* Also, add the destination address. */ 1495 1495 if (list_empty(&retval->base.bind_addr.address_list)) { 1496 - sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1496 + sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1, 1497 1497 GFP_ATOMIC); 1498 1498 } 1499 1499 ··· 2017 2017 af->from_addr_param(&addr, param.addr, asoc->peer.port, 0); 2018 2018 scope = sctp_scope(peer_addr); 2019 2019 if (sctp_in_scope(&addr, scope)) 2020 - if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_ACTIVE)) 2020 + if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) 2021 2021 return 0; 2022 2022 break; 2023 2023 ··· 2418 2418 * Due to Resource Shortage'. 2419 2419 */ 2420 2420 2421 - peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_ACTIVE); 2421 + peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); 2422 2422 if (!peer) 2423 2423 return SCTP_ERROR_RSRC_LOW; 2424 2424 ··· 2565 2565 union sctp_addr_param *addr_param; 2566 2566 struct list_head *pos; 2567 2567 struct sctp_transport *transport; 2568 + struct sctp_sockaddr_entry *saddr; 2568 2569 int retval = 0; 2569 2570 2570 2571 addr_param = (union sctp_addr_param *) ··· 2579 2578 case SCTP_PARAM_ADD_IP: 2580 2579 sctp_local_bh_disable(); 2581 2580 sctp_write_lock(&asoc->base.addr_lock); 2582 - retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC); 2581 + list_for_each(pos, &bp->address_list) { 2582 + saddr = list_entry(pos, struct sctp_sockaddr_entry, list); 2583 + if (sctp_cmp_addr_exact(&saddr->a, &addr)) 2584 + saddr->use_as_src = 1; 2585 + } 2583 2586 sctp_write_unlock(&asoc->base.addr_lock); 2584 2587 sctp_local_bh_enable(); 2585 2588 break; ··· 2596 2591 list_for_each(pos, &asoc->peer.transport_addr_list) { 2597 2592 transport = list_entry(pos, struct sctp_transport, 2598 2593 transports); 2594 + dst_release(transport->dst); 2599 2595 sctp_transport_route(transport, NULL, 2600 2596 sctp_sk(asoc->base.sk)); 2601 2597 }
+10 -2
net/sctp/sm_sideeffect.c
··· 430 430 /* The check for association's overall error counter exceeding the 431 431 * threshold is done in the state function. 432 432 */ 433 - asoc->overall_error_count++; 433 + /* When probing UNCONFIRMED addresses, the association overall 434 + * error count is NOT incremented 435 + */ 436 + if (transport->state != SCTP_UNCONFIRMED) 437 + asoc->overall_error_count++; 434 438 435 439 if (transport->state != SCTP_INACTIVE && 436 440 (transport->error_count++ >= transport->pathmaxrxt)) { ··· 614 610 /* Mark the destination transport address as active if it is not so 615 611 * marked. 616 612 */ 617 - if (t->state == SCTP_INACTIVE) 613 + if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) 618 614 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 619 615 SCTP_HEARTBEAT_SUCCESS); 620 616 ··· 624 620 */ 625 621 hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; 626 622 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 623 + 624 + /* Update the heartbeat timer. */ 625 + if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 626 + sctp_transport_hold(t); 627 627 } 628 628 629 629 /* Helper function to do a transport reset at the expiry of the hearbeat
+7 -1
net/sctp/sm_statefuns.c
··· 846 846 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); 847 847 hbinfo.daddr = transport->ipaddr; 848 848 hbinfo.sent_at = jiffies; 849 + hbinfo.hb_nonce = transport->hb_nonce; 849 850 850 851 /* Send a heartbeat to our peer. */ 851 852 paylen = sizeof(sctp_sender_hb_info_t); ··· 1048 1047 } 1049 1048 return SCTP_DISPOSITION_DISCARD; 1050 1049 } 1050 + 1051 + /* Validate the 64-bit random nonce. */ 1052 + if (hbinfo->hb_nonce != link->hb_nonce) 1053 + return SCTP_DISPOSITION_DISCARD; 1051 1054 1052 1055 max_interval = link->hbinterval + link->rto; 1053 1056 ··· 5283 5278 datalen -= sizeof(sctp_data_chunk_t); 5284 5279 5285 5280 deliver = SCTP_CMD_CHUNK_ULP; 5286 - chunk->data_accepted = 1; 5287 5281 5288 5282 /* Think about partial delivery. */ 5289 5283 if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { ··· 5360 5356 */ 5361 5357 if (SCTP_CMD_CHUNK_ULP == deliver) 5362 5358 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); 5359 + 5360 + chunk->data_accepted = 1; 5363 5361 5364 5362 /* Note: Some chunks may get overcounted (if we drop) or overcounted 5365 5363 * if we renege and the chunk arrives again.
+60 -16
net/sctp/socket.c
··· 369 369 370 370 /* Use GFP_ATOMIC since BHs are disabled. */ 371 371 addr->v4.sin_port = ntohs(addr->v4.sin_port); 372 - ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC); 372 + ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC); 373 373 addr->v4.sin_port = htons(addr->v4.sin_port); 374 374 sctp_write_unlock(&ep->base.addr_lock); 375 375 sctp_local_bh_enable(); ··· 491 491 struct sctp_chunk *chunk; 492 492 struct sctp_sockaddr_entry *laddr; 493 493 union sctp_addr *addr; 494 + union sctp_addr saveaddr; 494 495 void *addr_buf; 495 496 struct sctp_af *af; 496 497 struct list_head *pos; ··· 559 558 } 560 559 561 560 retval = sctp_send_asconf(asoc, chunk); 561 + if (retval) 562 + goto out; 562 563 563 - /* FIXME: After sending the add address ASCONF chunk, we 564 - * cannot append the address to the association's binding 565 - * address list, because the new address may be used as the 566 - * source of a message sent to the peer before the ASCONF 567 - * chunk is received by the peer. So we should wait until 568 - * ASCONF_ACK is received. 564 + /* Add the new addresses to the bind address list with 565 + * use_as_src set to 0. 569 566 */ 567 + sctp_local_bh_disable(); 568 + sctp_write_lock(&asoc->base.addr_lock); 569 + addr_buf = addrs; 570 + for (i = 0; i < addrcnt; i++) { 571 + addr = (union sctp_addr *)addr_buf; 572 + af = sctp_get_af_specific(addr->v4.sin_family); 573 + memcpy(&saveaddr, addr, af->sockaddr_len); 574 + saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); 575 + retval = sctp_add_bind_addr(bp, &saveaddr, 0, 576 + GFP_ATOMIC); 577 + addr_buf += af->sockaddr_len; 578 + } 579 + sctp_write_unlock(&asoc->base.addr_lock); 580 + sctp_local_bh_enable(); 570 581 } 571 582 572 583 out: ··· 689 676 struct sctp_sock *sp; 690 677 struct sctp_endpoint *ep; 691 678 struct sctp_association *asoc; 679 + struct sctp_transport *transport; 692 680 struct sctp_bind_addr *bp; 693 681 struct sctp_chunk *chunk; 694 682 union sctp_addr *laddr; 683 + union sctp_addr saveaddr; 695 684 void *addr_buf; 696 685 struct sctp_af *af; 697 - struct list_head *pos; 686 + struct list_head *pos, *pos1; 687 + struct sctp_sockaddr_entry *saddr; 698 688 int i; 699 689 int retval = 0; 700 690 ··· 764 748 goto out; 765 749 } 766 750 767 - retval = sctp_send_asconf(asoc, chunk); 768 - 769 - /* FIXME: After sending the delete address ASCONF chunk, we 770 - * cannot remove the addresses from the association's bind 771 - * address list, because there maybe some packet send to 772 - * the delete addresses, so we should wait until ASCONF_ACK 773 - * packet is received. 751 + /* Reset use_as_src flag for the addresses in the bind address 752 + * list that are to be deleted. 774 753 */ 754 + sctp_local_bh_disable(); 755 + sctp_write_lock(&asoc->base.addr_lock); 756 + addr_buf = addrs; 757 + for (i = 0; i < addrcnt; i++) { 758 + laddr = (union sctp_addr *)addr_buf; 759 + af = sctp_get_af_specific(laddr->v4.sin_family); 760 + memcpy(&saveaddr, laddr, af->sockaddr_len); 761 + saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); 762 + list_for_each(pos1, &bp->address_list) { 763 + saddr = list_entry(pos1, 764 + struct sctp_sockaddr_entry, 765 + list); 766 + if (sctp_cmp_addr_exact(&saddr->a, &saveaddr)) 767 + saddr->use_as_src = 0; 768 + } 769 + addr_buf += af->sockaddr_len; 770 + } 771 + sctp_write_unlock(&asoc->base.addr_lock); 772 + sctp_local_bh_enable(); 773 + 774 + /* Update the route and saddr entries for all the transports 775 + * as some of the addresses in the bind address list are 776 + * about to be deleted and cannot be used as source addresses. 777 + */ 778 + list_for_each(pos1, &asoc->peer.transport_addr_list) { 779 + transport = list_entry(pos1, struct sctp_transport, 780 + transports); 781 + dst_release(transport->dst); 782 + sctp_transport_route(transport, NULL, 783 + sctp_sk(asoc->base.sk)); 784 + } 785 + 786 + retval = sctp_send_asconf(asoc, chunk); 775 787 } 776 788 out: 777 789 return retval; ··· 5021 4977 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 5022 4978 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) 5023 4979 { 5024 - if (hlist_empty(&pp->owner)) { 4980 + if (pp && hlist_empty(&pp->owner)) { 5025 4981 if (pp->next) 5026 4982 pp->next->pprev = pp->pprev; 5027 4983 *(pp->pprev) = pp->next;
+7 -2
net/sctp/transport.c
··· 49 49 */ 50 50 51 51 #include <linux/types.h> 52 + #include <linux/random.h> 52 53 #include <net/sctp/sctp.h> 53 54 #include <net/sctp/sm.h> 54 55 ··· 86 85 87 86 peer->init_sent_count = 0; 88 87 89 - peer->state = SCTP_ACTIVE; 90 88 peer->param_flags = SPP_HB_DISABLE | 91 89 SPP_PMTUD_ENABLE | 92 90 SPP_SACKDELAY_ENABLE; ··· 108 108 init_timer(&peer->hb_timer); 109 109 peer->hb_timer.function = sctp_generate_heartbeat_event; 110 110 peer->hb_timer.data = (unsigned long)peer; 111 + 112 + /* Initialize the 64-bit random nonce sent with heartbeat. */ 113 + get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); 111 114 112 115 atomic_set(&peer->refcnt, 1); 113 116 peer->dead = 0; ··· 520 517 unsigned long sctp_transport_timeout(struct sctp_transport *t) 521 518 { 522 519 unsigned long timeout; 523 - timeout = t->hbinterval + t->rto + sctp_jitter(t->rto); 520 + timeout = t->rto + sctp_jitter(t->rto); 521 + if (t->state != SCTP_UNCONFIRMED) 522 + timeout += t->hbinterval; 524 523 timeout += jiffies; 525 524 return timeout; 526 525 }
+3 -6
net/sunrpc/auth_gss/auth_gss.c
··· 225 225 { 226 226 struct gss_cl_ctx *ctx; 227 227 228 - ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 228 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 229 229 if (ctx != NULL) { 230 - memset(ctx, 0, sizeof(*ctx)); 231 230 ctx->gc_proc = RPC_GSS_PROC_DATA; 232 231 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 233 232 spin_lock_init(&ctx->gc_seq_lock); ··· 390 391 { 391 392 struct gss_upcall_msg *gss_msg; 392 393 393 - gss_msg = kmalloc(sizeof(*gss_msg), GFP_KERNEL); 394 + gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL); 394 395 if (gss_msg != NULL) { 395 - memset(gss_msg, 0, sizeof(*gss_msg)); 396 396 INIT_LIST_HEAD(&gss_msg->list); 397 397 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 398 398 init_waitqueue_head(&gss_msg->waitqueue); ··· 774 776 dprintk("RPC: gss_create_cred for uid %d, flavor %d\n", 775 777 acred->uid, auth->au_flavor); 776 778 777 - if (!(cred = kmalloc(sizeof(*cred), GFP_KERNEL))) 779 + if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL))) 778 780 goto out_err; 779 781 780 - memset(cred, 0, sizeof(*cred)); 781 782 atomic_set(&cred->gc_count, 1); 782 783 cred->gc_uid = acred->uid; 783 784 /*
+1 -2
net/sunrpc/auth_gss/gss_krb5_mech.c
··· 129 129 const void *end = (const void *)((const char *)p + len); 130 130 struct krb5_ctx *ctx; 131 131 132 - if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 132 + if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) 133 133 goto out_err; 134 - memset(ctx, 0, sizeof(*ctx)); 135 134 136 135 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 137 136 if (IS_ERR(p))
+1 -2
net/sunrpc/auth_gss/gss_mech_switch.c
··· 237 237 struct gss_api_mech *mech, 238 238 struct gss_ctx **ctx_id) 239 239 { 240 - if (!(*ctx_id = kmalloc(sizeof(**ctx_id), GFP_KERNEL))) 240 + if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) 241 241 return GSS_S_FAILURE; 242 - memset(*ctx_id, 0, sizeof(**ctx_id)); 243 242 (*ctx_id)->mech_type = gss_mech_get(mech); 244 243 245 244 return mech->gm_ops
+1 -2
net/sunrpc/auth_gss/gss_spkm3_mech.c
··· 152 152 const void *end = (const void *)((const char *)p + len); 153 153 struct spkm3_ctx *ctx; 154 154 155 - if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL))) 155 + if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) 156 156 goto out_err; 157 - memset(ctx, 0, sizeof(*ctx)); 158 157 159 158 p = simple_get_netobj(p, end, &ctx->ctx_id); 160 159 if (IS_ERR(p))
+1 -2
net/sunrpc/auth_gss/gss_spkm3_token.c
··· 90 90 int 91 91 decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen) 92 92 { 93 - if (!(out->data = kmalloc(explen,GFP_KERNEL))) 93 + if (!(out->data = kzalloc(explen,GFP_KERNEL))) 94 94 return 0; 95 95 out->len = explen; 96 - memset(out->data, 0, explen); 97 96 memcpy(out->data, in, enclen); 98 97 return 1; 99 98 }
+1 -2
net/sunrpc/clnt.c
··· 125 125 goto out_err; 126 126 127 127 err = -ENOMEM; 128 - clnt = kmalloc(sizeof(*clnt), GFP_KERNEL); 128 + clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 129 129 if (!clnt) 130 130 goto out_err; 131 - memset(clnt, 0, sizeof(*clnt)); 132 131 atomic_set(&clnt->cl_users, 0); 133 132 atomic_set(&clnt->cl_count, 1); 134 133 clnt->cl_parent = clnt;
+1 -6
net/sunrpc/stats.c
··· 114 114 */ 115 115 struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) 116 116 { 117 - unsigned int ops = clnt->cl_maxproc; 118 - size_t size = ops * sizeof(struct rpc_iostats); 119 117 struct rpc_iostats *new; 120 - 121 - new = kmalloc(size, GFP_KERNEL); 122 - if (new) 123 - memset(new, 0 , size); 118 + new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); 124 119 return new; 125 120 } 126 121 EXPORT_SYMBOL(rpc_alloc_iostats);
+2 -4
net/sunrpc/svc.c
··· 32 32 int vers; 33 33 unsigned int xdrsize; 34 34 35 - if (!(serv = kmalloc(sizeof(*serv), GFP_KERNEL))) 35 + if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) 36 36 return NULL; 37 - memset(serv, 0, sizeof(*serv)); 38 37 serv->sv_name = prog->pg_name; 39 38 serv->sv_program = prog; 40 39 serv->sv_nrthreads = 1; ··· 158 159 struct svc_rqst *rqstp; 159 160 int error = -ENOMEM; 160 161 161 - rqstp = kmalloc(sizeof(*rqstp), GFP_KERNEL); 162 + rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL); 162 163 if (!rqstp) 163 164 goto out; 164 165 165 - memset(rqstp, 0, sizeof(*rqstp)); 166 166 init_waitqueue_head(&rqstp->rq_wait); 167 167 168 168 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
+1 -2
net/sunrpc/svcsock.c
··· 1322 1322 struct sock *inet; 1323 1323 1324 1324 dprintk("svc: svc_setup_socket %p\n", sock); 1325 - if (!(svsk = kmalloc(sizeof(*svsk), GFP_KERNEL))) { 1325 + if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { 1326 1326 *errp = -ENOMEM; 1327 1327 return NULL; 1328 1328 } 1329 - memset(svsk, 0, sizeof(*svsk)); 1330 1329 1331 1330 inet = sock->sk; 1332 1331
+1 -2
net/sunrpc/xprt.c
··· 908 908 struct rpc_xprt *xprt; 909 909 struct rpc_rqst *req; 910 910 911 - if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) 911 + if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) 912 912 return ERR_PTR(-ENOMEM); 913 - memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ 914 913 915 914 xprt->addr = *ap; 916 915
+2 -4
net/sunrpc/xprtsock.c
··· 1276 1276 1277 1277 xprt->max_reqs = xprt_udp_slot_table_entries; 1278 1278 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1279 - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1279 + xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); 1280 1280 if (xprt->slot == NULL) 1281 1281 return -ENOMEM; 1282 - memset(xprt->slot, 0, slot_table_size); 1283 1282 1284 1283 xprt->prot = IPPROTO_UDP; 1285 1284 xprt->port = xs_get_random_port(); ··· 1317 1318 1318 1319 xprt->max_reqs = xprt_tcp_slot_table_entries; 1319 1320 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1320 - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); 1321 + xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); 1321 1322 if (xprt->slot == NULL) 1322 1323 return -ENOMEM; 1323 - memset(xprt->slot, 0, slot_table_size); 1324 1324 1325 1325 xprt->prot = IPPROTO_TCP; 1326 1326 xprt->port = xs_get_random_port();
+2 -4
net/tipc/bearer.c
··· 665 665 int res; 666 666 667 667 write_lock_bh(&tipc_net_lock); 668 - tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC); 669 - media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC); 668 + tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC); 669 + media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC); 670 670 if (tipc_bearers && media_list) { 671 - memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer)); 672 - memset(media_list, 0, MAX_MEDIA * sizeof(struct media)); 673 671 res = TIPC_OK; 674 672 } else { 675 673 kfree(tipc_bearers);
+2 -6
net/tipc/cluster.c
··· 57 57 struct _zone *z_ptr; 58 58 struct cluster *c_ptr; 59 59 int max_nodes; 60 - int alloc; 61 60 62 - c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); 61 + c_ptr = kzalloc(sizeof(*c_ptr), GFP_ATOMIC); 63 62 if (c_ptr == NULL) { 64 63 warn("Cluster creation failure, no memory\n"); 65 64 return NULL; 66 65 } 67 - memset(c_ptr, 0, sizeof(*c_ptr)); 68 66 69 67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 70 68 if (in_own_cluster(addr)) 71 69 max_nodes = LOWEST_SLAVE + tipc_max_slaves; 72 70 else 73 71 max_nodes = tipc_max_nodes + 1; 74 - alloc = sizeof(void *) * (max_nodes + 1); 75 72 76 - c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); 73 + c_ptr->nodes = kcalloc(max_nodes + 1, sizeof(void*), GFP_ATOMIC); 77 74 if (c_ptr->nodes == NULL) { 78 75 warn("Cluster creation failure, no memory for node area\n"); 79 76 kfree(c_ptr); 80 77 return NULL; 81 78 } 82 - memset(c_ptr->nodes, 0, alloc); 83 79 84 80 if (in_own_cluster(addr)) 85 81 tipc_local_nodes = c_ptr->nodes;
+1 -1
net/tipc/discover.c
··· 295 295 { 296 296 struct link_req *req; 297 297 298 - req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC); 298 + req = kmalloc(sizeof(*req), GFP_ATOMIC); 299 299 if (!req) 300 300 return NULL; 301 301
+1 -2
net/tipc/link.c
··· 417 417 struct tipc_msg *msg; 418 418 char *if_name; 419 419 420 - l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC); 420 + l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC); 421 421 if (!l_ptr) { 422 422 warn("Link creation failed, no memory\n"); 423 423 return NULL; 424 424 } 425 - memset(l_ptr, 0, sizeof(*l_ptr)); 426 425 427 426 l_ptr->addr = peer; 428 427 if_name = strchr(b_ptr->publ.name, ':') + 1;
+4 -12
net/tipc/name_table.c
··· 117 117 u32 scope, u32 node, u32 port_ref, 118 118 u32 key) 119 119 { 120 - struct publication *publ = 121 - (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC); 120 + struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); 122 121 if (publ == NULL) { 123 122 warn("Publication creation failure, no memory\n"); 124 123 return NULL; 125 124 } 126 125 127 - memset(publ, 0, sizeof(*publ)); 128 126 publ->type = type; 129 127 publ->lower = lower; 130 128 publ->upper = upper; ··· 142 144 143 145 static struct sub_seq *tipc_subseq_alloc(u32 cnt) 144 146 { 145 - u32 sz = cnt * sizeof(struct sub_seq); 146 - struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC); 147 - 148 - if (sseq) 149 - memset(sseq, 0, sz); 147 + struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); 150 148 return sseq; 151 149 } 152 150 ··· 154 160 155 161 static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) 156 162 { 157 - struct name_seq *nseq = 158 - (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC); 163 + struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); 159 164 struct sub_seq *sseq = tipc_subseq_alloc(1); 160 165 161 166 if (!nseq || !sseq) { ··· 164 171 return NULL; 165 172 } 166 173 167 - memset(nseq, 0, sizeof(*nseq)); 168 174 spin_lock_init(&nseq->lock); 169 175 nseq->type = type; 170 176 nseq->sseqs = sseq; ··· 1052 1060 { 1053 1061 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1054 1062 1055 - table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC); 1063 + table.types = kmalloc(array_size, GFP_ATOMIC); 1056 1064 if (!table.types) 1057 1065 return -ENOMEM; 1058 1066
+1 -4
net/tipc/net.c
··· 160 160 161 161 static int net_init(void) 162 162 { 163 - u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1); 164 - 165 163 memset(&tipc_net, 0, sizeof(tipc_net)); 166 - tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC); 164 + tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC); 167 165 if (!tipc_net.zones) { 168 166 return -ENOMEM; 169 167 } 170 - memset(tipc_net.zones, 0, sz); 171 168 return TIPC_OK; 172 169 } 173 170
+2 -3
net/tipc/port.c
··· 226 226 struct tipc_msg *msg; 227 227 u32 ref; 228 228 229 - p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC); 229 + p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); 230 230 if (!p_ptr) { 231 231 warn("Port creation failed, no memory\n"); 232 232 return 0; 233 233 } 234 - memset(p_ptr, 0, sizeof(*p_ptr)); 235 234 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 236 235 if (!ref) { 237 236 warn("Port creation failed, reference table exhausted\n"); ··· 1057 1058 struct port *p_ptr; 1058 1059 u32 ref; 1059 1060 1060 - up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1061 + up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1061 1062 if (!up_ptr) { 1062 1063 warn("Port creation failed, no memory\n"); 1063 1064 return -ENOMEM;
+1 -1
net/tipc/ref.c
··· 79 79 while (sz < requested_size) { 80 80 sz <<= 1; 81 81 } 82 - table = (struct reference *)vmalloc(sz * sizeof(struct reference)); 82 + table = vmalloc(sz * sizeof(*table)); 83 83 if (table == NULL) 84 84 return -ENOMEM; 85 85
+1 -2
net/tipc/subscr.c
··· 393 393 394 394 /* Create subscriber object */ 395 395 396 - subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC); 396 + subscriber = kzalloc(sizeof(struct subscriber), GFP_ATOMIC); 397 397 if (subscriber == NULL) { 398 398 warn("Subscriber rejected, no memory\n"); 399 399 return; 400 400 } 401 - memset(subscriber, 0, sizeof(struct subscriber)); 402 401 INIT_LIST_HEAD(&subscriber->subscription_list); 403 402 INIT_LIST_HEAD(&subscriber->subscriber_list); 404 403 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
+1 -2
net/tipc/user_reg.c
··· 82 82 83 83 spin_lock_bh(&reg_lock); 84 84 if (!users) { 85 - users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC); 85 + users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC); 86 86 if (users) { 87 - memset(users, 0, USER_LIST_SIZE); 88 87 for (i = 1; i <= MAX_USERID; i++) { 89 88 users[i].next = i - 1; 90 89 }
+1 -2
net/tipc/zone.c
··· 52 52 return NULL; 53 53 } 54 54 55 - z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 55 + z_ptr = kzalloc(sizeof(*z_ptr), GFP_ATOMIC); 56 56 if (!z_ptr) { 57 57 warn("Zone creation failed, insufficient memory\n"); 58 58 return NULL; 59 59 } 60 60 61 - memset(z_ptr, 0, sizeof(*z_ptr)); 62 61 z_num = tipc_zone(addr); 63 62 z_ptr->addr = tipc_addr(z_num, 0, 0); 64 63 tipc_net.zones[z_num] = z_ptr;
+1 -2
net/unix/af_unix.c
··· 663 663 goto out; 664 664 665 665 err = -ENOMEM; 666 - addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); 666 + addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); 667 667 if (!addr) 668 668 goto out; 669 669 670 - memset(addr, 0, sizeof(*addr) + sizeof(short) + 16); 671 670 addr->name->sun_family = AF_UNIX; 672 671 atomic_set(&addr->refcnt, 1); 673 672
+3 -6
net/wanrouter/af_wanpipe.c
··· 370 370 * used by the ioctl call to read call information 371 371 * and to execute commands. 372 372 */ 373 - if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { 373 + if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) { 374 374 wanpipe_kill_sock_irq (newsk); 375 375 release_device(dev); 376 376 return -ENOMEM; 377 377 } 378 - memset(mbox_ptr, 0, sizeof(mbox_cmd_t)); 379 378 memcpy(mbox_ptr,skb->data,skb->len); 380 379 381 380 /* Register the lcn on which incoming call came ··· 506 507 if ((sk = sk_alloc(PF_WANPIPE, GFP_ATOMIC, &wanpipe_proto, 1)) == NULL) 507 508 return NULL; 508 509 509 - if ((wan_opt = kmalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { 510 + if ((wan_opt = kzalloc(sizeof(struct wanpipe_opt), GFP_ATOMIC)) == NULL) { 510 511 sk_free(sk); 511 512 return NULL; 512 513 } 513 - memset(wan_opt, 0x00, sizeof(struct wanpipe_opt)); 514 514 515 515 wp_sk(sk) = wan_opt; 516 516 ··· 2009 2011 2010 2012 dev_put(dev); 2011 2013 2012 - if ((mbox_ptr = kmalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) 2014 + if ((mbox_ptr = kzalloc(sizeof(mbox_cmd_t), GFP_ATOMIC)) == NULL) 2013 2015 return -ENOMEM; 2014 2016 2015 - memset(mbox_ptr, 0, sizeof(mbox_cmd_t)); 2016 2017 wp_sk(sk)->mbox = mbox_ptr; 2017 2018 2018 2019 wanpipe_link_driver(dev,sk);
+3 -6
net/wanrouter/wanmain.c
··· 642 642 643 643 if (cnf->config_id == WANCONFIG_MPPP) { 644 644 #ifdef CONFIG_WANPIPE_MULTPPP 645 - pppdev = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); 645 + pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL); 646 646 err = -ENOBUFS; 647 647 if (pppdev == NULL) 648 648 goto out; 649 - memset(pppdev, 0, sizeof(struct ppp_device)); 650 - pppdev->dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); 649 + pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); 651 650 if (pppdev->dev == NULL) { 652 651 kfree(pppdev); 653 652 err = -ENOBUFS; 654 653 goto out; 655 654 } 656 - memset(pppdev->dev, 0, sizeof(struct net_device)); 657 655 err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); 658 656 dev = pppdev->dev; 659 657 #else ··· 661 663 goto out; 662 664 #endif 663 665 } else { 664 - dev = kmalloc(sizeof(struct net_device), GFP_KERNEL); 666 + dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); 665 667 err = -ENOBUFS; 666 668 if (dev == NULL) 667 669 goto out; 668 - memset(dev, 0, sizeof(struct net_device)); 669 670 err = wandev->new_if(wandev, dev, cnf); 670 671 } 671 672
+1 -2
net/xfrm/xfrm_policy.c
··· 307 307 { 308 308 struct xfrm_policy *policy; 309 309 310 - policy = kmalloc(sizeof(struct xfrm_policy), gfp); 310 + policy = kzalloc(sizeof(struct xfrm_policy), gfp); 311 311 312 312 if (policy) { 313 - memset(policy, 0, sizeof(struct xfrm_policy)); 314 313 atomic_set(&policy->refcnt, 1); 315 314 rwlock_init(&policy->lock); 316 315 init_timer(&policy->timer);
+1 -2
net/xfrm/xfrm_state.c
··· 194 194 { 195 195 struct xfrm_state *x; 196 196 197 - x = kmalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 197 + x = kzalloc(sizeof(struct xfrm_state), GFP_ATOMIC); 198 198 199 199 if (x) { 200 - memset(x, 0, sizeof(struct xfrm_state)); 201 200 atomic_set(&x->refcnt, 1); 202 201 atomic_set(&x->tunnel_users, 0); 203 202 INIT_LIST_HEAD(&x->bydst);