Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] Transform kmem_cache_alloc()+memset(0) -> kmem_cache_zalloc().

Replace appropriate pairs of "kmem_cache_alloc()" + "memset(0)" with the
corresponding "kmem_cache_zalloc()" call.

Signed-off-by: Robert P. J. Day <rpjday@mindspring.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Roland McGrath <roland@redhat.com>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: Greg KH <greg@kroah.com>
Acked-by: Joel Becker <Joel.Becker@oracle.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Jan Kara <jack@ucw.cz>
Cc: Michael Halcrow <mhalcrow@us.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: James Morris <jmorris@namei.org>
Cc: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Robert P. J. Day and committed by
Linus Torvalds
c3762229 1b135431

+48 -103
+4 -9
arch/ia64/ia32/binfmt_elf32.c
··· 91 91 * it with privilege level 3 because the IVE uses non-privileged accesses to these 92 92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. 93 93 */ 94 - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 94 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 95 95 if (vma) { 96 - memset(vma, 0, sizeof(*vma)); 97 96 vma->vm_mm = current->mm; 98 97 vma->vm_start = IA32_GDT_OFFSET; 99 98 vma->vm_end = vma->vm_start + PAGE_SIZE; ··· 116 117 * code is locked in specific gate page, which is pointed by pretcode 117 118 * when setup_frame_ia32 118 119 */ 119 - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 120 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 120 121 if (vma) { 121 - memset(vma, 0, sizeof(*vma)); 122 122 vma->vm_mm = current->mm; 123 123 vma->vm_start = IA32_GATE_OFFSET; 124 124 vma->vm_end = vma->vm_start + PAGE_SIZE; ··· 140 142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors 141 143 * until a task modifies them via modify_ldt(). 142 144 */ 143 - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 145 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 144 146 if (vma) { 145 - memset(vma, 0, sizeof(*vma)); 146 147 vma->vm_mm = current->mm; 147 148 vma->vm_start = IA32_LDT_OFFSET; 148 149 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); ··· 211 214 bprm->loader += stack_base; 212 215 bprm->exec += stack_base; 213 216 214 - mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 217 + mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 215 218 if (!mpnt) 216 219 return -ENOMEM; 217 - 218 - memset(mpnt, 0, sizeof(*mpnt)); 219 220 220 221 down_write(&current->mm->mmap_sem); 221 222 {
+1 -2
arch/ia64/kernel/perfmon.c
··· 2301 2301 DPRINT(("smpl_buf @%p\n", smpl_buf)); 2302 2302 2303 2303 /* allocate vma */ 2304 - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2304 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 2305 2305 if (!vma) { 2306 2306 DPRINT(("Cannot allocate vma\n")); 2307 2307 goto error_kmem; 2308 2308 } 2309 - memset(vma, 0, sizeof(*vma)); 2310 2309 2311 2310 /* 2312 2311 * partially initialize the vma for the sampling buffer
+2 -4
arch/ia64/mm/init.c
··· 176 176 * the problem. When the process attempts to write to the register backing store 177 177 * for the first time, it will get a SEGFAULT in this case. 178 178 */ 179 - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 179 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 180 180 if (vma) { 181 - memset(vma, 0, sizeof(*vma)); 182 181 vma->vm_mm = current->mm; 183 182 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 184 183 vma->vm_end = vma->vm_start + PAGE_SIZE; ··· 194 195 195 196 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 196 197 if (!(current->personality & MMAP_PAGE_ZERO)) { 197 - vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 198 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 198 199 if (vma) { 199 - memset(vma, 0, sizeof(*vma)); 200 200 vma->vm_mm = current->mm; 201 201 vma->vm_end = PAGE_SIZE; 202 202 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
+1 -3
arch/x86_64/ia32/ia32_binfmt.c
··· 300 300 bprm->loader += stack_base; 301 301 bprm->exec += stack_base; 302 302 303 - mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 303 + mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 304 304 if (!mpnt) 305 305 return -ENOMEM; 306 - 307 - memset(mpnt, 0, sizeof(*mpnt)); 308 306 309 307 down_write(&mm->mmap_sem); 310 308 {
+1 -2
drivers/infiniband/hw/ehca/ehca_cq.c
··· 134 134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 135 135 return ERR_PTR(-EINVAL); 136 136 137 - my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL); 137 + my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL); 138 138 if (!my_cq) { 139 139 ehca_err(device, "Out of memory for ehca_cq struct device=%p", 140 140 device); 141 141 return ERR_PTR(-ENOMEM); 142 142 } 143 143 144 - memset(my_cq, 0, sizeof(struct ehca_cq)); 145 144 memset(&param, 0, sizeof(struct ehca_alloc_cq_parms)); 146 145 147 146 spin_lock_init(&my_cq->spinlock);
+2 -4
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 53 53 { 54 54 struct ehca_mr *me; 55 55 56 - me = kmem_cache_alloc(mr_cache, GFP_KERNEL); 56 + me = kmem_cache_zalloc(mr_cache, GFP_KERNEL); 57 57 if (me) { 58 - memset(me, 0, sizeof(struct ehca_mr)); 59 58 spin_lock_init(&me->mrlock); 60 59 } else 61 60 ehca_gen_err("alloc failed"); ··· 71 72 { 72 73 struct ehca_mw *me; 73 74 74 - me = kmem_cache_alloc(mw_cache, GFP_KERNEL); 75 + me = kmem_cache_zalloc(mw_cache, GFP_KERNEL); 75 76 if (me) { 76 - memset(me, 0, sizeof(struct ehca_mw)); 77 77 spin_lock_init(&me->mwlock); 78 78 } else 79 79 ehca_gen_err("alloc failed");
+1 -2
drivers/infiniband/hw/ehca/ehca_pd.c
··· 50 50 { 51 51 struct ehca_pd *pd; 52 52 53 - pd = kmem_cache_alloc(pd_cache, GFP_KERNEL); 53 + pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL); 54 54 if (!pd) { 55 55 ehca_err(device, "device=%p context=%p out of memory", 56 56 device, context); 57 57 return ERR_PTR(-ENOMEM); 58 58 } 59 59 60 - memset(pd, 0, sizeof(struct ehca_pd)); 61 60 pd->ownpid = current->tgid; 62 61 63 62 /*
+1 -2
drivers/infiniband/hw/ehca/ehca_qp.c
··· 450 450 if (pd->uobject && udata) 451 451 context = pd->uobject->context; 452 452 453 - my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL); 453 + my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); 454 454 if (!my_qp) { 455 455 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); 456 456 return ERR_PTR(-ENOMEM); 457 457 } 458 458 459 - memset(my_qp, 0, sizeof(struct ehca_qp)); 460 459 memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms)); 461 460 spin_lock_init(&my_qp->spinlock_s); 462 461 spin_lock_init(&my_qp->spinlock_r);
+1 -2
drivers/scsi/aic94xx/aic94xx_hwi.c
··· 1052 1052 struct asd_ascb *ascb; 1053 1053 unsigned long flags; 1054 1054 1055 - ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags); 1055 + ascb = kmem_cache_zalloc(asd_ascb_cache, gfp_flags); 1056 1056 1057 1057 if (ascb) { 1058 - memset(ascb, 0, sizeof(*ascb)); 1059 1058 ascb->dma_scb.size = sizeof(struct scb); 1060 1059 ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, 1061 1060 gfp_flags,
+1 -2
drivers/scsi/scsi_lib.c
··· 388 388 int err = 0; 389 389 int write = (data_direction == DMA_TO_DEVICE); 390 390 391 - sioc = kmem_cache_alloc(scsi_io_context_cache, gfp); 391 + sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp); 392 392 if (!sioc) 393 393 return DRIVER_ERROR << 24; 394 - memset(sioc, 0, sizeof(*sioc)); 395 394 396 395 req = blk_get_request(sdev->request_queue, write, gfp); 397 396 if (!req)
+1 -2
drivers/usb/host/hc_crisv10.c
··· 2163 2163 2164 2164 maxlen = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); 2165 2165 2166 - sb_desc = (USB_SB_Desc_t*)kmem_cache_alloc(usb_desc_cache, SLAB_FLAG); 2166 + sb_desc = kmem_cache_zalloc(usb_desc_cache, SLAB_FLAG); 2167 2167 assert(sb_desc != NULL); 2168 - memset(sb_desc, 0, sizeof(USB_SB_Desc_t)); 2169 2168 2170 2169 2171 2170 if (usb_pipeout(urb->pipe)) {
+1 -3
drivers/usb/host/uhci-q.c
··· 624 624 { 625 625 struct urb_priv *urbp; 626 626 627 - urbp = kmem_cache_alloc(uhci_up_cachep, GFP_ATOMIC); 627 + urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC); 628 628 if (!urbp) 629 629 return NULL; 630 - 631 - memset((void *)urbp, 0, sizeof(*urbp)); 632 630 633 631 urbp->urb = urb; 634 632 urb->hcpriv = urbp;
+1 -2
fs/aio.c
··· 211 211 if ((unsigned long)nr_events > aio_max_nr) 212 212 return ERR_PTR(-EAGAIN); 213 213 214 - ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); 214 + ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); 215 215 if (!ctx) 216 216 return ERR_PTR(-ENOMEM); 217 217 218 - memset(ctx, 0, sizeof(*ctx)); 219 218 ctx->max_reqs = nr_events; 220 219 mm = ctx->mm = current->mm; 221 220 atomic_inc(&mm->mm_count);
+1 -2
fs/configfs/dir.c
··· 72 72 { 73 73 struct configfs_dirent * sd; 74 74 75 - sd = kmem_cache_alloc(configfs_dir_cachep, GFP_KERNEL); 75 + sd = kmem_cache_zalloc(configfs_dir_cachep, GFP_KERNEL); 76 76 if (!sd) 77 77 return NULL; 78 78 79 - memset(sd, 0, sizeof(*sd)); 80 79 atomic_set(&sd->s_count, 1); 81 80 INIT_LIST_HEAD(&sd->s_links); 82 81 INIT_LIST_HEAD(&sd->s_children);
+1 -3
fs/dlm/memory.c
··· 76 76 { 77 77 struct dlm_lkb *lkb; 78 78 79 - lkb = kmem_cache_alloc(lkb_cache, GFP_KERNEL); 80 - if (lkb) 81 - memset(lkb, 0, sizeof(*lkb)); 79 + lkb = kmem_cache_zalloc(lkb_cache, GFP_KERNEL); 82 80 return lkb; 83 81 } 84 82
+1 -2
fs/dquot.c
··· 600 600 { 601 601 struct dquot *dquot; 602 602 603 - dquot = kmem_cache_alloc(dquot_cachep, GFP_NOFS); 603 + dquot = kmem_cache_zalloc(dquot_cachep, GFP_NOFS); 604 604 if(!dquot) 605 605 return NODQUOT; 606 606 607 - memset((caddr_t)dquot, 0, sizeof(struct dquot)); 608 607 mutex_init(&dquot->dq_lock); 609 608 INIT_LIST_HEAD(&dquot->dq_free); 610 609 INIT_LIST_HEAD(&dquot->dq_inuse);
+2 -2
fs/ecryptfs/crypto.c
··· 1332 1332 goto out; 1333 1333 } 1334 1334 /* Released in this function */ 1335 - page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, GFP_USER); 1335 + page_virt = kmem_cache_zalloc(ecryptfs_header_cache_0, GFP_USER); 1336 1336 if (!page_virt) { 1337 1337 ecryptfs_printk(KERN_ERR, "Out of memory\n"); 1338 1338 rc = -ENOMEM; 1339 1339 goto out; 1340 1340 } 1341 - memset(page_virt, 0, PAGE_CACHE_SIZE); 1341 + 1342 1342 rc = ecryptfs_write_headers_virt(page_virt, crypt_stat, 1343 1343 ecryptfs_dentry); 1344 1344 if (unlikely(rc)) {
+1 -2
fs/ecryptfs/file.c
··· 251 251 int lower_flags; 252 252 253 253 /* Released in ecryptfs_release or end of function if failure */ 254 - file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL); 254 + file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL); 255 255 ecryptfs_set_file_private(file, file_info); 256 256 if (!file_info) { 257 257 ecryptfs_printk(KERN_ERR, ··· 259 259 rc = -ENOMEM; 260 260 goto out; 261 261 } 262 - memset(file_info, 0, sizeof(*file_info)); 263 262 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); 264 263 crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat; 265 264 mount_crypt_stat = &ecryptfs_superblock_to_private(
+2 -3
fs/ecryptfs/inode.c
··· 361 361 goto out; 362 362 } 363 363 /* Released in this function */ 364 - page_virt = 365 - (char *)kmem_cache_alloc(ecryptfs_header_cache_2, 364 + page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, 366 365 GFP_USER); 367 366 if (!page_virt) { 368 367 rc = -ENOMEM; ··· 369 370 "Cannot ecryptfs_kmalloc a page\n"); 370 371 goto out_dput; 371 372 } 372 - memset(page_virt, 0, PAGE_CACHE_SIZE); 373 + 373 374 rc = ecryptfs_read_header_region(page_virt, lower_dentry, nd->mnt); 374 375 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; 375 376 if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_POLICY_APPLIED))
+1 -3
fs/ecryptfs/keystore.c
··· 207 207 /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or 208 208 * at end of function upon failure */ 209 209 auth_tok_list_item = 210 - kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL); 210 + kmem_cache_zalloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL); 211 211 if (!auth_tok_list_item) { 212 212 ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n"); 213 213 rc = -ENOMEM; 214 214 goto out; 215 215 } 216 - memset(auth_tok_list_item, 0, 217 - sizeof(struct ecryptfs_auth_tok_list_item)); 218 216 (*new_auth_tok) = &auth_tok_list_item->auth_tok; 219 217 220 218 /* check for body size - one to two bytes */
+2 -6
fs/ecryptfs/main.c
··· 378 378 379 379 /* Released in ecryptfs_put_super() */ 380 380 ecryptfs_set_superblock_private(sb, 381 - kmem_cache_alloc(ecryptfs_sb_info_cache, 381 + kmem_cache_zalloc(ecryptfs_sb_info_cache, 382 382 GFP_KERNEL)); 383 383 if (!ecryptfs_superblock_to_private(sb)) { 384 384 ecryptfs_printk(KERN_WARNING, "Out of memory\n"); 385 385 rc = -ENOMEM; 386 386 goto out; 387 387 } 388 - memset(ecryptfs_superblock_to_private(sb), 0, 389 - sizeof(struct ecryptfs_sb_info)); 390 388 sb->s_op = &ecryptfs_sops; 391 389 /* Released through deactivate_super(sb) from get_sb_nodev */ 392 390 sb->s_root = d_alloc(NULL, &(const struct qstr) { ··· 400 402 /* Released in d_release when dput(sb->s_root) is called */ 401 403 /* through deactivate_super(sb) from get_sb_nodev() */ 402 404 ecryptfs_set_dentry_private(sb->s_root, 403 - kmem_cache_alloc(ecryptfs_dentry_info_cache, 405 + kmem_cache_zalloc(ecryptfs_dentry_info_cache, 404 406 GFP_KERNEL)); 405 407 if (!ecryptfs_dentry_to_private(sb->s_root)) { 406 408 ecryptfs_printk(KERN_ERR, ··· 408 410 rc = -ENOMEM; 409 411 goto out; 410 412 } 411 - memset(ecryptfs_dentry_to_private(sb->s_root), 0, 412 - sizeof(struct ecryptfs_dentry_info)); 413 413 rc = 0; 414 414 out: 415 415 /* Should be able to rely on deactivate_super called from
+1 -3
fs/exec.c
··· 405 405 bprm->loader += stack_base; 406 406 bprm->exec += stack_base; 407 407 408 - mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 408 + mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 409 409 if (!mpnt) 410 410 return -ENOMEM; 411 - 412 - memset(mpnt, 0, sizeof(*mpnt)); 413 411 414 412 down_write(&mm->mmap_sem); 415 413 {
+1 -2
fs/gfs2/meta_io.c
··· 282 282 return; 283 283 } 284 284 285 - bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL), 286 - memset(bd, 0, sizeof(struct gfs2_bufdata)); 285 + bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL), 287 286 bd->bd_bh = bh; 288 287 bd->bd_gl = gl; 289 288
+1 -2
fs/namespace.c
··· 53 53 54 54 struct vfsmount *alloc_vfsmnt(const char *name) 55 55 { 56 - struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL); 56 + struct vfsmount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); 57 57 if (mnt) { 58 - memset(mnt, 0, sizeof(struct vfsmount)); 59 58 atomic_set(&mnt->mnt_count, 1); 60 59 INIT_LIST_HEAD(&mnt->mnt_hash); 61 60 INIT_LIST_HEAD(&mnt->mnt_child);
+1 -2
fs/smbfs/request.c
··· 61 61 struct smb_request *req; 62 62 unsigned char *buf = NULL; 63 63 64 - req = kmem_cache_alloc(req_cachep, GFP_KERNEL); 64 + req = kmem_cache_zalloc(req_cachep, GFP_KERNEL); 65 65 VERBOSE("allocating request: %p\n", req); 66 66 if (!req) 67 67 goto out; ··· 74 74 } 75 75 } 76 76 77 - memset(req, 0, sizeof(struct smb_request)); 78 77 req->rq_buffer = buf; 79 78 req->rq_bufsize = bufsize; 80 79 req->rq_server = server;
+1 -2
fs/sysfs/dir.c
··· 37 37 { 38 38 struct sysfs_dirent * sd; 39 39 40 - sd = kmem_cache_alloc(sysfs_dir_cachep, GFP_KERNEL); 40 + sd = kmem_cache_zalloc(sysfs_dir_cachep, GFP_KERNEL); 41 41 if (!sd) 42 42 return NULL; 43 43 44 - memset(sd, 0, sizeof(*sd)); 45 44 atomic_set(&sd->s_count, 1); 46 45 atomic_set(&sd->s_event, 1); 47 46 INIT_LIST_HEAD(&sd->s_children);
+1 -2
include/scsi/libsas.h
··· 558 558 static inline struct sas_task *sas_alloc_task(gfp_t flags) 559 559 { 560 560 extern struct kmem_cache *sas_task_cache; 561 - struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags); 561 + struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); 562 562 563 563 if (task) { 564 - memset(task, 0, sizeof(*task)); 565 564 INIT_LIST_HEAD(&task->list); 566 565 spin_lock_init(&task->task_state_lock); 567 566 task->task_state_flags = SAS_TASK_STATE_PENDING;
+1 -2
kernel/posix-timers.c
··· 399 399 static struct k_itimer * alloc_posix_timer(void) 400 400 { 401 401 struct k_itimer *tmr; 402 - tmr = kmem_cache_alloc(posix_timers_cache, GFP_KERNEL); 402 + tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); 403 403 if (!tmr) 404 404 return tmr; 405 - memset(tmr, 0, sizeof (struct k_itimer)); 406 405 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { 407 406 kmem_cache_free(posix_timers_cache, tmr); 408 407 tmr = NULL;
+1 -2
net/core/dst.c
··· 132 132 if (ops->gc()) 133 133 return NULL; 134 134 } 135 - dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); 135 + dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); 136 136 if (!dst) 137 137 return NULL; 138 - memset(dst, 0, ops->entry_size); 139 138 atomic_set(&dst->__refcnt, 0); 140 139 dst->ops = ops; 141 140 dst->lastuse = jiffies;
+1 -3
net/core/neighbour.c
··· 251 251 goto out_entries; 252 252 } 253 253 254 - n = kmem_cache_alloc(tbl->kmem_cachep, GFP_ATOMIC); 254 + n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC); 255 255 if (!n) 256 256 goto out_entries; 257 - 258 - memset(n, 0, tbl->entry_size); 259 257 260 258 skb_queue_head_init(&n->arp_queue); 261 259 rwlock_init(&n->lock);
+1 -3
net/decnet/dn_table.c
··· 593 593 594 594 replace: 595 595 err = -ENOBUFS; 596 - new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL); 596 + new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL); 597 597 if (new_f == NULL) 598 598 goto out; 599 - 600 - memset(new_f, 0, sizeof(struct dn_fib_node)); 601 599 602 600 new_f->fn_key = key; 603 601 new_f->fn_type = type;
+2 -4
net/ipv4/ipmr.c
··· 479 479 */ 480 480 static struct mfc_cache *ipmr_cache_alloc(void) 481 481 { 482 - struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_KERNEL); 482 + struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 483 483 if(c==NULL) 484 484 return NULL; 485 - memset(c, 0, sizeof(*c)); 486 485 c->mfc_un.res.minvif = MAXVIFS; 487 486 return c; 488 487 } 489 488 490 489 static struct mfc_cache *ipmr_cache_alloc_unres(void) 491 490 { 492 - struct mfc_cache *c=kmem_cache_alloc(mrt_cachep, GFP_ATOMIC); 491 + struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 493 492 if(c==NULL) 494 493 return NULL; 495 - memset(c, 0, sizeof(*c)); 496 494 skb_queue_head_init(&c->mfc_un.unres.unresolved); 497 495 c->mfc_un.unres.expires = jiffies + 10*HZ; 498 496 return c;
+1 -2
net/ipv4/ipvs/ip_vs_conn.c
··· 603 603 struct ip_vs_conn *cp; 604 604 struct ip_vs_protocol *pp = ip_vs_proto_get(proto); 605 605 606 - cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC); 606 + cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC); 607 607 if (cp == NULL) { 608 608 IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n"); 609 609 return NULL; 610 610 } 611 611 612 - memset(cp, 0, sizeof(*cp)); 613 612 INIT_LIST_HEAD(&cp->c_list); 614 613 init_timer(&cp->timer); 615 614 cp->timer.data = (unsigned long)cp;
+1 -2
net/ipv4/netfilter/ip_conntrack_core.c
··· 638 638 } 639 639 } 640 640 641 - conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC); 641 + conntrack = kmem_cache_zalloc(ip_conntrack_cachep, GFP_ATOMIC); 642 642 if (!conntrack) { 643 643 DEBUGP("Can't allocate conntrack.\n"); 644 644 atomic_dec(&ip_conntrack_count); 645 645 return ERR_PTR(-ENOMEM); 646 646 } 647 647 648 - memset(conntrack, 0, sizeof(*conntrack)); 649 648 atomic_set(&conntrack->ct_general.use, 1); 650 649 conntrack->ct_general.destroy = destroy_conntrack; 651 650 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
+1 -2
net/ipv6/ip6_fib.c
··· 150 150 { 151 151 struct fib6_node *fn; 152 152 153 - if ((fn = kmem_cache_alloc(fib6_node_kmem, GFP_ATOMIC)) != NULL) 154 - memset(fn, 0, sizeof(struct fib6_node)); 153 + fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC); 155 154 156 155 return fn; 157 156 }
+1 -2
net/sctp/sm_make_chunk.c
··· 979 979 { 980 980 struct sctp_chunk *retval; 981 981 982 - retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC); 982 + retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); 983 983 984 984 if (!retval) 985 985 goto nodata; 986 - memset(retval, 0, sizeof(struct sctp_chunk)); 987 986 988 987 if (!sk) { 989 988 SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb);
+1 -2
security/selinux/avc.c
··· 332 332 { 333 333 struct avc_node *node; 334 334 335 - node = kmem_cache_alloc(avc_node_cachep, GFP_ATOMIC); 335 + node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC); 336 336 if (!node) 337 337 goto out; 338 338 339 - memset(node, 0, sizeof(*node)); 340 339 INIT_RCU_HEAD(&node->rhead); 341 340 INIT_LIST_HEAD(&node->list); 342 341 atomic_set(&node->ae.used, 1);
+1 -2
security/selinux/hooks.c
··· 181 181 struct task_security_struct *tsec = current->security; 182 182 struct inode_security_struct *isec; 183 183 184 - isec = kmem_cache_alloc(sel_inode_cache, GFP_KERNEL); 184 + isec = kmem_cache_zalloc(sel_inode_cache, GFP_KERNEL); 185 185 if (!isec) 186 186 return -ENOMEM; 187 187 188 - memset(isec, 0, sizeof(*isec)); 189 188 mutex_init(&isec->lock); 190 189 INIT_LIST_HEAD(&isec->list); 191 190 isec->inode = inode;
+1 -2
security/selinux/ss/avtab.c
··· 36 36 struct avtab_key *key, struct avtab_datum *datum) 37 37 { 38 38 struct avtab_node * newnode; 39 - newnode = kmem_cache_alloc(avtab_node_cachep, GFP_KERNEL); 39 + newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL); 40 40 if (newnode == NULL) 41 41 return NULL; 42 - memset(newnode, 0, sizeof(struct avtab_node)); 43 42 newnode->key = *key; 44 43 newnode->datum = *datum; 45 44 if (prev) {