Merge branch 'kmem_death' of master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6

* 'kmem_death' of master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6:
mm: Remove slab destructors from kmem_cache_create().

+247 -268
+1 -1
arch/arm/plat-s3c24xx/dma.c
··· 1333 1333 dma_kmem = kmem_cache_create("dma_desc", 1334 1334 sizeof(struct s3c2410_dma_buf), 0, 1335 1335 SLAB_HWCACHE_ALIGN, 1336 - s3c2410_dma_cache_ctor, NULL); 1336 + s3c2410_dma_cache_ctor); 1337 1337 1338 1338 if (dma_kmem == NULL) { 1339 1339 printk(KERN_ERR "dma failed to make kmem cache\n");
+2 -2
arch/arm26/mm/memc.c
··· 176 176 { 177 177 pte_cache = kmem_cache_create("pte-cache", 178 178 sizeof(pte_t) * PTRS_PER_PTE, 179 - 0, SLAB_PANIC, pte_cache_ctor, NULL); 179 + 0, SLAB_PANIC, pte_cache_ctor); 180 180 181 181 pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE + 182 182 sizeof(pgd_t) * PTRS_PER_PGD, 183 - 0, SLAB_PANIC, pgd_cache_ctor, NULL); 183 + 0, SLAB_PANIC, pgd_cache_ctor); 184 184 }
+1 -2
arch/i386/mm/init.c
··· 752 752 PTRS_PER_PMD*sizeof(pmd_t), 753 753 PTRS_PER_PMD*sizeof(pmd_t), 754 754 SLAB_PANIC, 755 - pmd_ctor, 756 - NULL); 755 + pmd_ctor); 757 756 if (!SHARED_KERNEL_PMD) { 758 757 /* If we're in PAE mode and have a non-shared 759 758 kernel pmd, then the pgd size must be a
+1 -1
arch/ia64/ia32/ia32_support.c
··· 253 253 254 254 partial_page_cachep = kmem_cache_create("partial_page_cache", 255 255 sizeof(struct partial_page), 256 - 0, SLAB_PANIC, NULL, NULL); 256 + 0, SLAB_PANIC, NULL); 257 257 } 258 258 #endif 259 259 return 0;
+1 -1
arch/powerpc/kernel/rtas_flash.c
··· 804 804 805 805 flash_block_cache = kmem_cache_create("rtas_flash_cache", 806 806 RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, 807 - rtas_block_ctor, NULL); 807 + rtas_block_ctor); 808 808 if (!flash_block_cache) { 809 809 printk(KERN_ERR "%s: failed to create block cache\n", 810 810 __FUNCTION__);
+1 -1
arch/powerpc/mm/hugetlbpage.c
··· 542 542 HUGEPTE_TABLE_SIZE, 543 543 HUGEPTE_TABLE_SIZE, 544 544 0, 545 - zero_ctor, NULL); 545 + zero_ctor); 546 546 if (! huge_pgtable_cache) 547 547 panic("hugetlbpage_init(): could not create hugepte cache\n"); 548 548
+1 -2
arch/powerpc/mm/init_64.c
··· 178 178 pgtable_cache[i] = kmem_cache_create(name, 179 179 size, size, 180 180 SLAB_PANIC, 181 - zero_ctor, 182 - NULL); 181 + zero_ctor); 183 182 } 184 183 }
+1 -1
arch/powerpc/platforms/cell/spufs/inode.c
··· 654 654 ret = -ENOMEM; 655 655 spufs_inode_cache = kmem_cache_create("spufs_inode_cache", 656 656 sizeof(struct spufs_inode_info), 0, 657 - SLAB_HWCACHE_ALIGN, spufs_init_once, NULL); 657 + SLAB_HWCACHE_ALIGN, spufs_init_once); 658 658 659 659 if (!spufs_inode_cache) 660 660 goto out;
+1 -2
arch/sh/kernel/cpu/sh4/sq.c
··· 371 371 printk(KERN_NOTICE "sq: Registering store queue API.\n"); 372 372 373 373 sq_cache = kmem_cache_create("store_queue_cache", 374 - sizeof(struct sq_mapping), 0, 0, 375 - NULL, NULL); 374 + sizeof(struct sq_mapping), 0, 0, NULL); 376 375 if (unlikely(!sq_cache)) 377 376 return ret; 378 377
+1 -1
arch/sh/mm/pmb.c
··· 310 310 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); 311 311 312 312 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, 313 - SLAB_PANIC, pmb_cache_ctor, NULL); 313 + SLAB_PANIC, pmb_cache_ctor); 314 314 315 315 jump_to_P2(); 316 316
+1 -2
arch/sparc64/mm/tsb.c
··· 262 262 263 263 tsb_caches[i] = kmem_cache_create(name, 264 264 size, size, 265 - 0, 266 - NULL, NULL); 265 + 0, NULL); 267 266 if (!tsb_caches[i]) { 268 267 prom_printf("Could not create %s cache\n", name); 269 268 prom_halt();
+1 -1
block/bsg.c
··· 1043 1043 dev_t devid; 1044 1044 1045 1045 bsg_cmd_cachep = kmem_cache_create("bsg_cmd", 1046 - sizeof(struct bsg_command), 0, 0, NULL, NULL); 1046 + sizeof(struct bsg_command), 0, 0, NULL); 1047 1047 if (!bsg_cmd_cachep) { 1048 1048 printk(KERN_ERR "bsg: failed creating slab cache\n"); 1049 1049 return -ENOMEM;
+3 -3
block/ll_rw_blk.c
··· 3698 3698 panic("Failed to create kblockd\n"); 3699 3699 3700 3700 request_cachep = kmem_cache_create("blkdev_requests", 3701 - sizeof(struct request), 0, SLAB_PANIC, NULL, NULL); 3701 + sizeof(struct request), 0, SLAB_PANIC, NULL); 3702 3702 3703 3703 requestq_cachep = kmem_cache_create("blkdev_queue", 3704 - sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL); 3704 + sizeof(request_queue_t), 0, SLAB_PANIC, NULL); 3705 3705 3706 3706 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3707 - sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3707 + sizeof(struct io_context), 0, SLAB_PANIC, NULL); 3708 3708 3709 3709 for_each_possible_cpu(i) 3710 3710 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+1 -1
drivers/acpi/osl.c
··· 1098 1098 acpi_status 1099 1099 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1100 1100 { 1101 - *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL); 1101 + *cache = kmem_cache_create(name, size, 0, 0, NULL); 1102 1102 if (*cache == NULL) 1103 1103 return AE_ERROR; 1104 1104 else
+2 -2
drivers/block/aoe/aoeblk.c
··· 257 257 int __init 258 258 aoeblk_init(void) 259 259 { 260 - buf_pool_cache = kmem_cache_create("aoe_bufs", 260 + buf_pool_cache = kmem_cache_create("aoe_bufs", 261 261 sizeof(struct buf), 262 - 0, 0, NULL, NULL); 262 + 0, 0, NULL); 263 263 if (buf_pool_cache == NULL) 264 264 return -ENOMEM; 265 265
+1 -1
drivers/ieee1394/eth1394.c
··· 1729 1729 1730 1730 packet_task_cache = kmem_cache_create("packet_task", 1731 1731 sizeof(struct packet_task), 1732 - 0, 0, NULL, NULL); 1732 + 0, 0, NULL); 1733 1733 if (!packet_task_cache) 1734 1734 return -ENOMEM; 1735 1735
-1
drivers/infiniband/core/mad.c
··· 2998 2998 sizeof(struct ib_mad_private), 2999 2999 0, 3000 3000 SLAB_HWCACHE_ALIGN, 3001 - NULL, 3002 3001 NULL); 3003 3002 if (!ib_mad_cache) { 3004 3003 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
+1 -1
drivers/infiniband/hw/amso1100/c2_vq.c
··· 85 85 (char) ('0' + c2dev->devnum)); 86 86 c2dev->host_msg_cache = 87 87 kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, 88 - SLAB_HWCACHE_ALIGN, NULL, NULL); 88 + SLAB_HWCACHE_ALIGN, NULL); 89 89 if (c2dev->host_msg_cache == NULL) { 90 90 return -ENOMEM; 91 91 }
+1 -1
drivers/infiniband/hw/ehca/ehca_av.c
··· 259 259 av_cache = kmem_cache_create("ehca_cache_av", 260 260 sizeof(struct ehca_av), 0, 261 261 SLAB_HWCACHE_ALIGN, 262 - NULL, NULL); 262 + NULL); 263 263 if (!av_cache) 264 264 return -ENOMEM; 265 265 return 0;
+1 -1
drivers/infiniband/hw/ehca/ehca_cq.c
··· 387 387 cq_cache = kmem_cache_create("ehca_cache_cq", 388 388 sizeof(struct ehca_cq), 0, 389 389 SLAB_HWCACHE_ALIGN, 390 - NULL, NULL); 390 + NULL); 391 391 if (!cq_cache) 392 392 return -ENOMEM; 393 393 return 0;
+1 -1
drivers/infiniband/hw/ehca/ehca_main.c
··· 163 163 ctblk_cache = kmem_cache_create("ehca_cache_ctblk", 164 164 EHCA_PAGESIZE, H_CB_ALIGNMENT, 165 165 SLAB_HWCACHE_ALIGN, 166 - NULL, NULL); 166 + NULL); 167 167 if (!ctblk_cache) { 168 168 ehca_gen_err("Cannot create ctblk SLAB cache."); 169 169 ehca_cleanup_mrmw_cache();
+2 -2
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 1950 1950 mr_cache = kmem_cache_create("ehca_cache_mr", 1951 1951 sizeof(struct ehca_mr), 0, 1952 1952 SLAB_HWCACHE_ALIGN, 1953 - NULL, NULL); 1953 + NULL); 1954 1954 if (!mr_cache) 1955 1955 return -ENOMEM; 1956 1956 mw_cache = kmem_cache_create("ehca_cache_mw", 1957 1957 sizeof(struct ehca_mw), 0, 1958 1958 SLAB_HWCACHE_ALIGN, 1959 - NULL, NULL); 1959 + NULL); 1960 1960 if (!mw_cache) { 1961 1961 kmem_cache_destroy(mr_cache); 1962 1962 mr_cache = NULL;
+1 -1
drivers/infiniband/hw/ehca/ehca_pd.c
··· 100 100 pd_cache = kmem_cache_create("ehca_cache_pd", 101 101 sizeof(struct ehca_pd), 0, 102 102 SLAB_HWCACHE_ALIGN, 103 - NULL, NULL); 103 + NULL); 104 104 if (!pd_cache) 105 105 return -ENOMEM; 106 106 return 0;
+1 -1
drivers/infiniband/hw/ehca/ehca_qp.c
··· 1760 1760 qp_cache = kmem_cache_create("ehca_cache_qp", 1761 1761 sizeof(struct ehca_qp), 0, 1762 1762 SLAB_HWCACHE_ALIGN, 1763 - NULL, NULL); 1763 + NULL); 1764 1764 if (!qp_cache) 1765 1765 return -ENOMEM; 1766 1766 return 0;
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 637 637 ig.desc_cache = kmem_cache_create("iser_descriptors", 638 638 sizeof (struct iser_desc), 639 639 0, SLAB_HWCACHE_ALIGN, 640 - NULL, NULL); 640 + NULL); 641 641 if (ig.desc_cache == NULL) 642 642 return -ENOMEM; 643 643
+4 -4
drivers/kvm/mmu.c
··· 1332 1332 { 1333 1333 pte_chain_cache = kmem_cache_create("kvm_pte_chain", 1334 1334 sizeof(struct kvm_pte_chain), 1335 - 0, 0, NULL, NULL); 1335 + 0, 0, NULL); 1336 1336 if (!pte_chain_cache) 1337 1337 goto nomem; 1338 1338 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", 1339 1339 sizeof(struct kvm_rmap_desc), 1340 - 0, 0, NULL, NULL); 1340 + 0, 0, NULL); 1341 1341 if (!rmap_desc_cache) 1342 1342 goto nomem; 1343 1343 1344 1344 mmu_page_cache = kmem_cache_create("kvm_mmu_page", 1345 1345 PAGE_SIZE, 1346 - PAGE_SIZE, 0, NULL, NULL); 1346 + PAGE_SIZE, 0, NULL); 1347 1347 if (!mmu_page_cache) 1348 1348 goto nomem; 1349 1349 1350 1350 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 1351 1351 sizeof(struct kvm_mmu_page), 1352 - 0, 0, NULL, NULL); 1352 + 0, 0, NULL); 1353 1353 if (!mmu_page_header_cache) 1354 1354 goto nomem; 1355 1355
+2 -2
drivers/md/raid5.c
··· 951 951 conf->active_name = 0; 952 952 sc = kmem_cache_create(conf->cache_name[conf->active_name], 953 953 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 954 - 0, 0, NULL, NULL); 954 + 0, 0, NULL); 955 955 if (!sc) 956 956 return 1; 957 957 conf->slab_cache = sc; ··· 1003 1003 /* Step 1 */ 1004 1004 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1005 1005 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1006 - 0, 0, NULL, NULL); 1006 + 0, 0, NULL); 1007 1007 if (!sc) 1008 1008 return -ENOMEM; 1009 1009
+1 -2
drivers/message/i2o/i2o_block.c
··· 1171 1171 /* Allocate request mempool and slab */ 1172 1172 size = sizeof(struct i2o_block_request); 1173 1173 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, 1174 - SLAB_HWCACHE_ALIGN, NULL, 1175 - NULL); 1174 + SLAB_HWCACHE_ALIGN, NULL); 1176 1175 if (!i2o_blk_req_pool.slab) { 1177 1176 osm_err("can't init request slab\n"); 1178 1177 rc = -ENOMEM;
+1 -1
drivers/mtd/ubi/eba.c
··· 1149 1149 if (ubi_devices_cnt == 0) { 1150 1150 ltree_slab = kmem_cache_create("ubi_ltree_slab", 1151 1151 sizeof(struct ltree_entry), 0, 1152 - 0, &ltree_entry_ctor, NULL); 1152 + 0, &ltree_entry_ctor); 1153 1153 if (!ltree_slab) 1154 1154 return -ENOMEM; 1155 1155 }
+1 -1
drivers/mtd/ubi/wl.c
··· 1452 1452 if (ubi_devices_cnt == 0) { 1453 1453 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", 1454 1454 sizeof(struct ubi_wl_entry), 1455 - 0, 0, NULL, NULL); 1455 + 0, 0, NULL); 1456 1456 if (!wl_entries_slab) 1457 1457 return -ENOMEM; 1458 1458 }
+1 -1
drivers/s390/block/dasd_devmap.c
··· 291 291 dasd_page_cache = 292 292 kmem_cache_create("dasd_page_cache", PAGE_SIZE, 293 293 PAGE_SIZE, SLAB_CACHE_DMA, 294 - NULL, NULL ); 294 + NULL); 295 295 if (!dasd_page_cache) 296 296 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " 297 297 "fixed buffer mode disabled.");
+3 -3
drivers/s390/scsi/zfcp_aux.c
··· 259 259 size = sizeof(struct zfcp_fsf_req_qtcb); 260 260 align = calc_alignment(size); 261 261 zfcp_data.fsf_req_qtcb_cache = 262 - kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL); 262 + kmem_cache_create("zfcp_fsf", size, align, 0, NULL); 263 263 if (!zfcp_data.fsf_req_qtcb_cache) 264 264 goto out; 265 265 266 266 size = sizeof(struct fsf_status_read_buffer); 267 267 align = calc_alignment(size); 268 268 zfcp_data.sr_buffer_cache = 269 - kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL); 269 + kmem_cache_create("zfcp_sr", size, align, 0, NULL); 270 270 if (!zfcp_data.sr_buffer_cache) 271 271 goto out_sr_cache; 272 272 273 273 size = sizeof(struct zfcp_gid_pn_data); 274 274 align = calc_alignment(size); 275 275 zfcp_data.gid_pn_cache = 276 - kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL); 276 + kmem_cache_create("zfcp_gid", size, align, 0, NULL); 277 277 if (!zfcp_data.gid_pn_cache) 278 278 goto out_gid_cache; 279 279
+2 -2
drivers/scsi/aic94xx/aic94xx_init.c
··· 462 462 sizeof(struct asd_dma_tok), 463 463 0, 464 464 SLAB_HWCACHE_ALIGN, 465 - NULL, NULL); 465 + NULL); 466 466 if (!asd_dma_token_cache) { 467 467 asd_printk("couldn't create dma token cache\n"); 468 468 return -ENOMEM; ··· 474 474 sizeof(struct asd_ascb), 475 475 0, 476 476 SLAB_HWCACHE_ALIGN, 477 - NULL, NULL); 477 + NULL); 478 478 if (!asd_ascb_cache) { 479 479 asd_printk("couldn't create ascb cache\n"); 480 480 goto Err;
+1 -1
drivers/scsi/libsas/sas_init.c
··· 292 292 static int __init sas_class_init(void) 293 293 { 294 294 sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task), 295 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 295 + 0, SLAB_HWCACHE_ALIGN, NULL); 296 296 if (!sas_task_cache) 297 297 return -ENOMEM; 298 298
+1 -1
drivers/scsi/qla2xxx/qla_os.c
··· 2723 2723 2724 2724 /* Allocate cache for SRBs. */ 2725 2725 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 2726 - SLAB_HWCACHE_ALIGN, NULL, NULL); 2726 + SLAB_HWCACHE_ALIGN, NULL); 2727 2727 if (srb_cachep == NULL) { 2728 2728 printk(KERN_ERR 2729 2729 "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
+1 -1
drivers/scsi/qla4xxx/ql4_os.c
··· 1677 1677 1678 1678 /* Allocate cache for SRBs. */ 1679 1679 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 1680 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1680 + SLAB_HWCACHE_ALIGN, NULL); 1681 1681 if (srb_cachep == NULL) { 1682 1682 printk(KERN_ERR 1683 1683 "%s: Unable to allocate SRB cache..."
+1 -1
drivers/scsi/scsi.c
··· 288 288 if (!pool->users) { 289 289 pool->slab = kmem_cache_create(pool->name, 290 290 sizeof(struct scsi_cmnd), 0, 291 - pool->slab_flags, NULL, NULL); 291 + pool->slab_flags, NULL); 292 292 if (!pool->slab) 293 293 goto fail; 294 294 }
+2 -2
drivers/scsi/scsi_lib.c
··· 1661 1661 1662 1662 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1663 1663 sizeof(struct scsi_io_context), 1664 - 0, 0, NULL, NULL); 1664 + 0, 0, NULL); 1665 1665 if (!scsi_io_context_cache) { 1666 1666 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1667 1667 return -ENOMEM; ··· 1672 1672 int size = sgp->size * sizeof(struct scatterlist); 1673 1673 1674 1674 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1675 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1675 + SLAB_HWCACHE_ALIGN, NULL); 1676 1676 if (!sgp->slab) { 1677 1677 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1678 1678 sgp->name);
+1 -1
drivers/scsi/scsi_tgt_lib.c
··· 585 585 586 586 scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd", 587 587 sizeof(struct scsi_tgt_cmd), 588 - 0, 0, NULL, NULL); 588 + 0, 0, NULL); 589 589 if (!scsi_tgt_cmd_cache) 590 590 return -ENOMEM; 591 591
+1 -1
drivers/usb/host/uhci-hcd.c
··· 933 933 } 934 934 935 935 uhci_up_cachep = kmem_cache_create("uhci_urb_priv", 936 - sizeof(struct urb_priv), 0, 0, NULL, NULL); 936 + sizeof(struct urb_priv), 0, 0, NULL); 937 937 if (!uhci_up_cachep) 938 938 goto up_failed; 939 939
+1 -1
drivers/usb/mon/mon_text.c
··· 340 340 snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp); 341 341 rp->e_slab = kmem_cache_create(rp->slab_name, 342 342 sizeof(struct mon_event_text), sizeof(long), 0, 343 - mon_text_ctor, NULL); 343 + mon_text_ctor); 344 344 if (rp->e_slab == NULL) { 345 345 rc = -ENOMEM; 346 346 goto err_slab;
+2 -2
fs/adfs/super.c
··· 234 234 235 235 inode_init_once(&ei->vfs_inode); 236 236 } 237 - 237 + 238 238 static int init_inodecache(void) 239 239 { 240 240 adfs_inode_cachep = kmem_cache_create("adfs_inode_cache", 241 241 sizeof(struct adfs_inode_info), 242 242 0, (SLAB_RECLAIM_ACCOUNT| 243 243 SLAB_MEM_SPREAD), 244 - init_once, NULL); 244 + init_once); 245 245 if (adfs_inode_cachep == NULL) 246 246 return -ENOMEM; 247 247 return 0;
+1 -1
fs/affs/super.c
··· 99 99 sizeof(struct affs_inode_info), 100 100 0, (SLAB_RECLAIM_ACCOUNT| 101 101 SLAB_MEM_SPREAD), 102 - init_once, NULL); 102 + init_once); 103 103 if (affs_inode_cachep == NULL) 104 104 return -ENOMEM; 105 105 return 0;
+1 -2
fs/afs/super.c
··· 89 89 sizeof(struct afs_vnode), 90 90 0, 91 91 SLAB_HWCACHE_ALIGN, 92 - afs_i_init_once, 93 - NULL); 92 + afs_i_init_once); 94 93 if (!afs_inode_cachep) { 95 94 printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); 96 95 return ret;
+2 -2
fs/befs/linuxvfs.c
··· 414 414 } 415 415 416 416 /* Initialize the inode cache. Called at fs setup. 417 - * 417 + * 418 418 * Taken from NFS implementation by Al Viro. 419 419 */ 420 420 static int ··· 424 424 sizeof (struct befs_inode_info), 425 425 0, (SLAB_RECLAIM_ACCOUNT| 426 426 SLAB_MEM_SPREAD), 427 - init_once, NULL); 427 + init_once); 428 428 if (befs_inode_cachep == NULL) { 429 429 printk(KERN_ERR "befs_init_inodecache: " 430 430 "Couldn't initalize inode slabcache\n");
+2 -2
fs/bfs/inode.c
··· 250 250 251 251 inode_init_once(&bi->vfs_inode); 252 252 } 253 - 253 + 254 254 static int init_inodecache(void) 255 255 { 256 256 bfs_inode_cachep = kmem_cache_create("bfs_inode_cache", 257 257 sizeof(struct bfs_inode_info), 258 258 0, (SLAB_RECLAIM_ACCOUNT| 259 259 SLAB_MEM_SPREAD), 260 - init_once, NULL); 260 + init_once); 261 261 if (bfs_inode_cachep == NULL) 262 262 return -ENOMEM; 263 263 return 0;
+1 -1
fs/bio.c
··· 1187 1187 1188 1188 size = bvs->nr_vecs * sizeof(struct bio_vec); 1189 1189 bvs->slab = kmem_cache_create(bvs->name, size, 0, 1190 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1190 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1191 1191 } 1192 1192 } 1193 1193
+1 -1
fs/block_dev.c
··· 517 517 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 518 518 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 519 519 SLAB_MEM_SPREAD|SLAB_PANIC), 520 - init_once, NULL); 520 + init_once); 521 521 err = register_filesystem(&bd_type); 522 522 if (err) 523 523 panic("Cannot register bdev pseudo-fs");
+5 -5
fs/cifs/cifsfs.c
··· 719 719 sizeof (struct cifsInodeInfo), 720 720 0, (SLAB_RECLAIM_ACCOUNT| 721 721 SLAB_MEM_SPREAD), 722 - cifs_init_once, NULL); 722 + cifs_init_once); 723 723 if (cifs_inode_cachep == NULL) 724 724 return -ENOMEM; 725 725 ··· 748 748 cifs_req_cachep = kmem_cache_create("cifs_request", 749 749 CIFSMaxBufSize + 750 750 MAX_CIFS_HDR_SIZE, 0, 751 - SLAB_HWCACHE_ALIGN, NULL, NULL); 751 + SLAB_HWCACHE_ALIGN, NULL); 752 752 if (cifs_req_cachep == NULL) 753 753 return -ENOMEM; 754 754 ··· 776 776 alloc of large cifs buffers even when page debugging is on */ 777 777 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", 778 778 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 779 - NULL, NULL); 779 + NULL); 780 780 if (cifs_sm_req_cachep == NULL) { 781 781 mempool_destroy(cifs_req_poolp); 782 782 kmem_cache_destroy(cifs_req_cachep); ··· 817 817 { 818 818 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 819 819 sizeof (struct mid_q_entry), 0, 820 - SLAB_HWCACHE_ALIGN, NULL, NULL); 820 + SLAB_HWCACHE_ALIGN, NULL); 821 821 if (cifs_mid_cachep == NULL) 822 822 return -ENOMEM; 823 823 ··· 830 830 831 831 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs", 832 832 sizeof (struct oplock_q_entry), 0, 833 - SLAB_HWCACHE_ALIGN, NULL, NULL); 833 + SLAB_HWCACHE_ALIGN, NULL); 834 834 if (cifs_oplock_cachep == NULL) { 835 835 mempool_destroy(cifs_mid_poolp); 836 836 kmem_cache_destroy(cifs_mid_cachep);
+2 -2
fs/coda/inode.c
··· 64 64 65 65 inode_init_once(&ei->vfs_inode); 66 66 } 67 - 67 + 68 68 int coda_init_inodecache(void) 69 69 { 70 70 coda_inode_cachep = kmem_cache_create("coda_inode_cache", 71 71 sizeof(struct coda_inode_info), 72 72 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 73 - init_once, NULL); 73 + init_once); 74 74 if (coda_inode_cachep == NULL) 75 75 return -ENOMEM; 76 76 return 0;
+1 -1
fs/configfs/mount.c
··· 136 136 137 137 configfs_dir_cachep = kmem_cache_create("configfs_dir_cache", 138 138 sizeof(struct configfs_dirent), 139 - 0, 0, NULL, NULL); 139 + 0, 0, NULL); 140 140 if (!configfs_dir_cachep) 141 141 goto out; 142 142
+2 -2
fs/dcache.c
··· 2165 2165 mempages -= reserve; 2166 2166 2167 2167 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 2168 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2168 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2169 2169 2170 2170 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 2171 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2171 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2172 2172 2173 2173 dcache_init(mempages); 2174 2174 inode_init(mempages);
+1 -1
fs/dcookies.c
··· 205 205 206 206 dcookie_cache = kmem_cache_create("dcookie_cache", 207 207 sizeof(struct dcookie_struct), 208 - 0, 0, NULL, NULL); 208 + 0, 0, NULL); 209 209 210 210 if (!dcookie_cache) 211 211 goto out;
+1 -1
fs/dlm/lowcomms.c
··· 1449 1449 error = -ENOMEM; 1450 1450 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1451 1451 __alignof__(struct connection), 0, 1452 - NULL, NULL); 1452 + NULL); 1453 1453 if (!con_cache) 1454 1454 goto out; 1455 1455
+1 -1
fs/dlm/memory.c
··· 23 23 int ret = 0; 24 24 25 25 lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb), 26 - __alignof__(struct dlm_lkb), 0, NULL, NULL); 26 + __alignof__(struct dlm_lkb), 0, NULL); 27 27 if (!lkb_cache) 28 28 ret = -ENOMEM; 29 29 return ret;
+1 -1
fs/dnotify.c
··· 176 176 static int __init dnotify_init(void) 177 177 { 178 178 dn_cache = kmem_cache_create("dnotify_cache", 179 - sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL, NULL); 179 + sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); 180 180 return 0; 181 181 } 182 182
+2 -2
fs/dquot.c
··· 1848 1848 1849 1849 register_sysctl_table(sys_table); 1850 1850 1851 - dquot_cachep = kmem_cache_create("dquot", 1851 + dquot_cachep = kmem_cache_create("dquot", 1852 1852 sizeof(struct dquot), sizeof(unsigned long) * 4, 1853 1853 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 1854 1854 SLAB_MEM_SPREAD|SLAB_PANIC), 1855 - NULL, NULL); 1855 + NULL); 1856 1856 1857 1857 order = 0; 1858 1858 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
+1 -1
fs/ecryptfs/main.c
··· 677 677 678 678 info = &ecryptfs_cache_infos[i]; 679 679 *(info->cache) = kmem_cache_create(info->name, info->size, 680 - 0, SLAB_HWCACHE_ALIGN, info->ctor, NULL); 680 + 0, SLAB_HWCACHE_ALIGN, info->ctor); 681 681 if (!*(info->cache)) { 682 682 ecryptfs_free_kmem_caches(); 683 683 ecryptfs_printk(KERN_WARNING, "%s: "
+2 -2
fs/efs/super.c
··· 75 75 76 76 inode_init_once(&ei->vfs_inode); 77 77 } 78 - 78 + 79 79 static int init_inodecache(void) 80 80 { 81 81 efs_inode_cachep = kmem_cache_create("efs_inode_cache", 82 82 sizeof(struct efs_inode_info), 83 83 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 84 - init_once, NULL); 84 + init_once); 85 85 if (efs_inode_cachep == NULL) 86 86 return -ENOMEM; 87 87 return 0;
+2 -2
fs/eventpoll.c
··· 1324 1324 /* Allocates slab cache used to allocate "struct epitem" items */ 1325 1325 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 1326 1326 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC, 1327 - NULL, NULL); 1327 + NULL); 1328 1328 1329 1329 /* Allocates slab cache used to allocate "struct eppoll_entry" */ 1330 1330 pwq_cache = kmem_cache_create("eventpoll_pwq", 1331 1331 sizeof(struct eppoll_entry), 0, 1332 - EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL); 1332 + EPI_SLAB_DEBUG|SLAB_PANIC, NULL); 1333 1333 1334 1334 return 0; 1335 1335 }
+2 -2
fs/ext2/super.c
··· 167 167 #endif 168 168 inode_init_once(&ei->vfs_inode); 169 169 } 170 - 170 + 171 171 static int init_inodecache(void) 172 172 { 173 173 ext2_inode_cachep = kmem_cache_create("ext2_inode_cache", 174 174 sizeof(struct ext2_inode_info), 175 175 0, (SLAB_RECLAIM_ACCOUNT| 176 176 SLAB_MEM_SPREAD), 177 - init_once, NULL); 177 + init_once); 178 178 if (ext2_inode_cachep == NULL) 179 179 return -ENOMEM; 180 180 return 0;
+1 -1
fs/ext3/super.c
··· 490 490 sizeof(struct ext3_inode_info), 491 491 0, (SLAB_RECLAIM_ACCOUNT| 492 492 SLAB_MEM_SPREAD), 493 - init_once, NULL); 493 + init_once); 494 494 if (ext3_inode_cachep == NULL) 495 495 return -ENOMEM; 496 496 return 0;
+1 -1
fs/ext4/super.c
··· 541 541 sizeof(struct ext4_inode_info), 542 542 0, (SLAB_RECLAIM_ACCOUNT| 543 543 SLAB_MEM_SPREAD), 544 - init_once, NULL); 544 + init_once); 545 545 if (ext4_inode_cachep == NULL) 546 546 return -ENOMEM; 547 547 return 0;
+1 -1
fs/fat/cache.c
··· 48 48 fat_cache_cachep = kmem_cache_create("fat_cache", 49 49 sizeof(struct fat_cache), 50 50 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 51 - init_once, NULL); 51 + init_once); 52 52 if (fat_cache_cachep == NULL) 53 53 return -ENOMEM; 54 54 return 0;
+1 -1
fs/fat/inode.c
··· 514 514 sizeof(struct msdos_inode_info), 515 515 0, (SLAB_RECLAIM_ACCOUNT| 516 516 SLAB_MEM_SPREAD), 517 - init_once, NULL); 517 + init_once); 518 518 if (fat_inode_cachep == NULL) 519 519 return -ENOMEM; 520 520 return 0;
+1 -1
fs/fcntl.c
··· 638 638 static int __init fasync_init(void) 639 639 { 640 640 fasync_cache = kmem_cache_create("fasync_cache", 641 - sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL); 641 + sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); 642 642 return 0; 643 643 } 644 644
+2 -2
fs/freevxfs/vxfs_super.c
··· 263 263 int rv; 264 264 265 265 vxfs_inode_cachep = kmem_cache_create("vxfs_inode", 266 - sizeof(struct vxfs_inode_info), 0, 267 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); 266 + sizeof(struct vxfs_inode_info), 0, 267 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 268 268 if (!vxfs_inode_cachep) 269 269 return -ENOMEM; 270 270 rv = register_filesystem(&vxfs_fs_type);
+1 -1
fs/fuse/dev.c
··· 1044 1044 int err = -ENOMEM; 1045 1045 fuse_req_cachep = kmem_cache_create("fuse_request", 1046 1046 sizeof(struct fuse_req), 1047 - 0, 0, NULL, NULL); 1047 + 0, 0, NULL); 1048 1048 if (!fuse_req_cachep) 1049 1049 goto out; 1050 1050
+1 -1
fs/fuse/inode.c
··· 706 706 fuse_inode_cachep = kmem_cache_create("fuse_inode", 707 707 sizeof(struct fuse_inode), 708 708 0, SLAB_HWCACHE_ALIGN, 709 - fuse_inode_init_once, NULL); 709 + fuse_inode_init_once); 710 710 err = -ENOMEM; 711 711 if (!fuse_inode_cachep) 712 712 goto out_unreg2;
+3 -3
fs/gfs2/main.c
··· 72 72 gfs2_glock_cachep = kmem_cache_create("gfs2_glock", 73 73 sizeof(struct gfs2_glock), 74 74 0, 0, 75 - gfs2_init_glock_once, NULL); 75 + gfs2_init_glock_once); 76 76 if (!gfs2_glock_cachep) 77 77 goto fail; 78 78 ··· 80 80 sizeof(struct gfs2_inode), 81 81 0, SLAB_RECLAIM_ACCOUNT| 82 82 SLAB_MEM_SPREAD, 83 - gfs2_init_inode_once, NULL); 83 + gfs2_init_inode_once); 84 84 if (!gfs2_inode_cachep) 85 85 goto fail; 86 86 87 87 gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata", 88 88 sizeof(struct gfs2_bufdata), 89 - 0, 0, NULL, NULL); 89 + 0, 0, NULL); 90 90 if (!gfs2_bufdata_cachep) 91 91 goto fail; 92 92
+1 -1
fs/hfs/super.c
··· 443 443 444 444 hfs_inode_cachep = kmem_cache_create("hfs_inode_cache", 445 445 sizeof(struct hfs_inode_info), 0, SLAB_HWCACHE_ALIGN, 446 - hfs_init_once, NULL); 446 + hfs_init_once); 447 447 if (!hfs_inode_cachep) 448 448 return -ENOMEM; 449 449 err = register_filesystem(&hfs_fs_type);
+1 -1
fs/hfsplus/super.c
··· 479 479 480 480 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache", 481 481 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN, 482 - hfsplus_init_once, NULL); 482 + hfsplus_init_once); 483 483 if (!hfsplus_inode_cachep) 484 484 return -ENOMEM; 485 485 err = register_filesystem(&hfsplus_fs_type);
+2 -2
fs/hpfs/super.c
··· 181 181 mutex_init(&ei->i_parent_mutex); 182 182 inode_init_once(&ei->vfs_inode); 183 183 } 184 - 184 + 185 185 static int init_inodecache(void) 186 186 { 187 187 hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache", 188 188 sizeof(struct hpfs_inode_info), 189 189 0, (SLAB_RECLAIM_ACCOUNT| 190 190 SLAB_MEM_SPREAD), 191 - init_once, NULL); 191 + init_once); 192 192 if (hpfs_inode_cachep == NULL) 193 193 return -ENOMEM; 194 194 return 0;
+1 -1
fs/hugetlbfs/inode.c
··· 848 848 849 849 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 850 850 sizeof(struct hugetlbfs_inode_info), 851 - 0, 0, init_once, NULL); 851 + 0, 0, init_once); 852 852 if (hugetlbfs_inode_cachep == NULL) 853 853 return -ENOMEM; 854 854
+1 -2
fs/inode.c
··· 1388 1388 0, 1389 1389 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1390 1390 SLAB_MEM_SPREAD), 1391 - init_once, 1392 - NULL); 1391 + init_once); 1393 1392 register_shrinker(&icache_shrinker); 1394 1393 1395 1394 /* Hash may have been set up in inode_init_early */
+2 -2
fs/inotify_user.c
··· 716 716 717 717 watch_cachep = kmem_cache_create("inotify_watch_cache", 718 718 sizeof(struct inotify_user_watch), 719 - 0, SLAB_PANIC, NULL, NULL); 719 + 0, SLAB_PANIC, NULL); 720 720 event_cachep = kmem_cache_create("inotify_event_cache", 721 721 sizeof(struct inotify_kernel_event), 722 - 0, SLAB_PANIC, NULL, NULL); 722 + 0, SLAB_PANIC, NULL); 723 723 724 724 return 0; 725 725 }
+1 -1
fs/isofs/inode.c
··· 86 86 sizeof(struct iso_inode_info), 87 87 0, (SLAB_RECLAIM_ACCOUNT| 88 88 SLAB_MEM_SPREAD), 89 - init_once, NULL); 89 + init_once); 90 90 if (isofs_inode_cachep == NULL) 91 91 return -ENOMEM; 92 92 return 0;
+3 -5
fs/jbd/journal.c
··· 1668 1668 * boundary. 1669 1669 */ 1670 1670 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], 1671 - slab_size, slab_size, 0, NULL, NULL); 1671 + slab_size, slab_size, 0, NULL); 1672 1672 if (!jbd_slab[i]) { 1673 1673 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); 1674 1674 return -ENOMEM; ··· 1711 1711 sizeof(struct journal_head), 1712 1712 0, /* offset */ 1713 1713 0, /* flags */ 1714 - NULL, /* ctor */ 1715 - NULL); /* dtor */ 1714 + NULL); /* ctor */ 1716 1715 retval = 0; 1717 1716 if (journal_head_cache == 0) { 1718 1717 retval = -ENOMEM; ··· 2007 2008 sizeof(handle_t), 2008 2009 0, /* offset */ 2009 2010 0, /* flags */ 2010 - NULL, /* ctor */ 2011 - NULL); /* dtor */ 2011 + NULL); /* ctor */ 2012 2012 if (jbd_handle_cache == NULL) { 2013 2013 printk(KERN_EMERG "JBD: failed to create handle cache\n"); 2014 2014 return -ENOMEM;
+2 -2
fs/jbd/revoke.c
··· 170 170 { 171 171 revoke_record_cache = kmem_cache_create("revoke_record", 172 172 sizeof(struct jbd_revoke_record_s), 173 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 173 + 0, SLAB_HWCACHE_ALIGN, NULL); 174 174 if (revoke_record_cache == 0) 175 175 return -ENOMEM; 176 176 177 177 revoke_table_cache = kmem_cache_create("revoke_table", 178 178 sizeof(struct jbd_revoke_table_s), 179 - 0, 0, NULL, NULL); 179 + 0, 0, NULL); 180 180 if (revoke_table_cache == 0) { 181 181 kmem_cache_destroy(revoke_record_cache); 182 182 revoke_record_cache = NULL;
+3 -5
fs/jbd2/journal.c
··· 1680 1680 * boundary. 1681 1681 */ 1682 1682 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], 1683 - slab_size, slab_size, 0, NULL, NULL); 1683 + slab_size, slab_size, 0, NULL); 1684 1684 if (!jbd_slab[i]) { 1685 1685 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); 1686 1686 return -ENOMEM; ··· 1723 1723 sizeof(struct journal_head), 1724 1724 0, /* offset */ 1725 1725 0, /* flags */ 1726 - NULL, /* ctor */ 1727 - NULL); /* dtor */ 1726 + NULL); /* ctor */ 1728 1727 retval = 0; 1729 1728 if (jbd2_journal_head_cache == 0) { 1730 1729 retval = -ENOMEM; ··· 2005 2006 sizeof(handle_t), 2006 2007 0, /* offset */ 2007 2008 0, /* flags */ 2008 - NULL, /* ctor */ 2009 - NULL); /* dtor */ 2009 + NULL); /* ctor */ 2010 2010 if (jbd2_handle_cache == NULL) { 2011 2011 printk(KERN_EMERG "JBD: failed to create handle cache\n"); 2012 2012 return -ENOMEM;
+2 -2
fs/jbd2/revoke.c
··· 171 171 { 172 172 jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record", 173 173 sizeof(struct jbd2_revoke_record_s), 174 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 174 + 0, SLAB_HWCACHE_ALIGN, NULL); 175 175 if (jbd2_revoke_record_cache == 0) 176 176 return -ENOMEM; 177 177 178 178 jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table", 179 179 sizeof(struct jbd2_revoke_table_s), 180 - 0, 0, NULL, NULL); 180 + 0, 0, NULL); 181 181 if (jbd2_revoke_table_cache == 0) { 182 182 kmem_cache_destroy(jbd2_revoke_record_cache); 183 183 jbd2_revoke_record_cache = NULL;
+9 -9
fs/jffs2/malloc.c
··· 33 33 { 34 34 full_dnode_slab = kmem_cache_create("jffs2_full_dnode", 35 35 sizeof(struct jffs2_full_dnode), 36 - 0, 0, NULL, NULL); 36 + 0, 0, NULL); 37 37 if (!full_dnode_slab) 38 38 goto err; 39 39 40 40 raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", 41 41 sizeof(struct jffs2_raw_dirent), 42 - 0, 0, NULL, NULL); 42 + 0, 0, NULL); 43 43 if (!raw_dirent_slab) 44 44 goto err; 45 45 46 46 raw_inode_slab = kmem_cache_create("jffs2_raw_inode", 47 47 sizeof(struct jffs2_raw_inode), 48 - 0, 0, NULL, NULL); 48 + 0, 0, NULL); 49 49 if (!raw_inode_slab) 50 50 goto err; 51 51 52 52 tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", 53 53 sizeof(struct jffs2_tmp_dnode_info), 54 - 0, 0, NULL, NULL); 54 + 0, 0, NULL); 55 55 if (!tmp_dnode_info_slab) 56 56 goto err; 57 57 58 58 raw_node_ref_slab = kmem_cache_create("jffs2_refblock", 59 59 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1), 60 - 0, 0, NULL, NULL); 60 + 0, 0, NULL); 61 61 if (!raw_node_ref_slab) 62 62 goto err; 63 63 64 64 node_frag_slab = kmem_cache_create("jffs2_node_frag", 65 65 sizeof(struct jffs2_node_frag), 66 - 0, 0, NULL, NULL); 66 + 0, 0, NULL); 67 67 if (!node_frag_slab) 68 68 goto err; 69 69 70 70 inode_cache_slab = kmem_cache_create("jffs2_inode_cache", 71 71 sizeof(struct jffs2_inode_cache), 72 - 0, 0, NULL, NULL); 72 + 0, 0, NULL); 73 73 if (!inode_cache_slab) 74 74 goto err; 75 75 76 76 #ifdef CONFIG_JFFS2_FS_XATTR 77 77 xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum", 78 78 sizeof(struct jffs2_xattr_datum), 79 - 0, 0, NULL, NULL); 79 + 0, 0, NULL); 80 80 if (!xattr_datum_cache) 81 81 goto err; 82 82 83 83 xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref", 84 84 sizeof(struct jffs2_xattr_ref), 85 - 0, 0, NULL, NULL); 85 + 0, 0, NULL); 86 86 if (!xattr_ref_cache) 87 87 goto err; 88 88 #endif
+1 -1
fs/jffs2/super.c
··· 192 192 sizeof(struct jffs2_inode_info), 193 193 0, (SLAB_RECLAIM_ACCOUNT| 194 194 SLAB_MEM_SPREAD), 195 - jffs2_i_init_once, NULL); 195 + jffs2_i_init_once); 196 196 if (!jffs2_inode_cachep) { 197 197 printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n"); 198 198 return -ENOMEM;
+1 -1
fs/jfs/jfs_metapage.c
··· 213 213 * Allocate the metapage structures 214 214 */ 215 215 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage), 216 - 0, 0, init_once, NULL); 216 + 0, 0, init_once); 217 217 if (metapage_cache == NULL) 218 218 return -ENOMEM; 219 219
+1 -1
fs/jfs/super.c
··· 776 776 jfs_inode_cachep = 777 777 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, 778 778 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 779 - init_once, NULL); 779 + init_once); 780 780 if (jfs_inode_cachep == NULL) 781 781 return -ENOMEM; 782 782
+1 -1
fs/locks.c
··· 2276 2276 { 2277 2277 filelock_cache = kmem_cache_create("file_lock_cache", 2278 2278 sizeof(struct file_lock), 0, SLAB_PANIC, 2279 - init_once, NULL); 2279 + init_once); 2280 2280 return 0; 2281 2281 } 2282 2282
+1 -1
fs/mbcache.c
··· 292 292 INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); 293 293 } 294 294 cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, 295 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); 295 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 296 296 if (!cache->c_entry_cache) 297 297 goto fail; 298 298
+2 -2
fs/minix/inode.c
··· 75 75 76 76 inode_init_once(&ei->vfs_inode); 77 77 } 78 - 78 + 79 79 static int init_inodecache(void) 80 80 { 81 81 minix_inode_cachep = kmem_cache_create("minix_inode_cache", 82 82 sizeof(struct minix_inode_info), 83 83 0, (SLAB_RECLAIM_ACCOUNT| 84 84 SLAB_MEM_SPREAD), 85 - init_once, NULL); 85 + init_once); 86 86 if (minix_inode_cachep == NULL) 87 87 return -ENOMEM; 88 88 return 0;
+1 -1
fs/namespace.c
··· 1801 1801 init_rwsem(&namespace_sem); 1802 1802 1803 1803 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), 1804 - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL); 1804 + 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 1805 1805 1806 1806 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 1807 1807
+2 -2
fs/ncpfs/inode.c
··· 63 63 mutex_init(&ei->open_mutex); 64 64 inode_init_once(&ei->vfs_inode); 65 65 } 66 - 66 + 67 67 static int init_inodecache(void) 68 68 { 69 69 ncp_inode_cachep = kmem_cache_create("ncp_inode_cache", 70 70 sizeof(struct ncp_inode_info), 71 71 0, (SLAB_RECLAIM_ACCOUNT| 72 72 SLAB_MEM_SPREAD), 73 - init_once, NULL); 73 + init_once); 74 74 if (ncp_inode_cachep == NULL) 75 75 return -ENOMEM; 76 76 return 0;
+1 -1
fs/nfs/direct.c
··· 875 875 sizeof(struct nfs_direct_req), 876 876 0, (SLAB_RECLAIM_ACCOUNT| 877 877 SLAB_MEM_SPREAD), 878 - NULL, NULL); 878 + NULL); 879 879 if (nfs_direct_cachep == NULL) 880 880 return -ENOMEM; 881 881
+2 -2
fs/nfs/inode.c
··· 1165 1165 nfsi->npages = 0; 1166 1166 nfs4_init_once(nfsi); 1167 1167 } 1168 - 1168 + 1169 1169 static int __init nfs_init_inodecache(void) 1170 1170 { 1171 1171 nfs_inode_cachep = kmem_cache_create("nfs_inode_cache", 1172 1172 sizeof(struct nfs_inode), 1173 1173 0, (SLAB_RECLAIM_ACCOUNT| 1174 1174 SLAB_MEM_SPREAD), 1175 - init_once, NULL); 1175 + init_once); 1176 1176 if (nfs_inode_cachep == NULL) 1177 1177 return -ENOMEM; 1178 1178
+1 -1
fs/nfs/pagelist.c
··· 442 442 nfs_page_cachep = kmem_cache_create("nfs_page", 443 443 sizeof(struct nfs_page), 444 444 0, SLAB_HWCACHE_ALIGN, 445 - NULL, NULL); 445 + NULL); 446 446 if (nfs_page_cachep == NULL) 447 447 return -ENOMEM; 448 448
+1 -1
fs/nfs/read.c
··· 598 598 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 599 599 sizeof(struct nfs_read_data), 600 600 0, SLAB_HWCACHE_ALIGN, 601 - NULL, NULL); 601 + NULL); 602 602 if (nfs_rdata_cachep == NULL) 603 603 return -ENOMEM; 604 604
+1 -1
fs/nfs/write.c
··· 1467 1467 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 1468 1468 sizeof(struct nfs_write_data), 1469 1469 0, SLAB_HWCACHE_ALIGN, 1470 - NULL, NULL); 1470 + NULL); 1471 1471 if (nfs_wdata_cachep == NULL) 1472 1472 return -ENOMEM; 1473 1473
+4 -4
fs/nfsd/nfs4state.c
··· 1032 1032 nfsd4_init_slabs(void) 1033 1033 { 1034 1034 stateowner_slab = kmem_cache_create("nfsd4_stateowners", 1035 - sizeof(struct nfs4_stateowner), 0, 0, NULL, NULL); 1035 + sizeof(struct nfs4_stateowner), 0, 0, NULL); 1036 1036 if (stateowner_slab == NULL) 1037 1037 goto out_nomem; 1038 1038 file_slab = kmem_cache_create("nfsd4_files", 1039 - sizeof(struct nfs4_file), 0, 0, NULL, NULL); 1039 + sizeof(struct nfs4_file), 0, 0, NULL); 1040 1040 if (file_slab == NULL) 1041 1041 goto out_nomem; 1042 1042 stateid_slab = kmem_cache_create("nfsd4_stateids", 1043 - sizeof(struct nfs4_stateid), 0, 0, NULL, NULL); 1043 + sizeof(struct nfs4_stateid), 0, 0, NULL); 1044 1044 if (stateid_slab == NULL) 1045 1045 goto out_nomem; 1046 1046 deleg_slab = kmem_cache_create("nfsd4_delegations", 1047 - sizeof(struct nfs4_delegation), 0, 0, NULL, NULL); 1047 + sizeof(struct nfs4_delegation), 0, 0, NULL); 1048 1048 if (deleg_slab == NULL) 1049 1049 goto out_nomem; 1050 1050 return 0;
+5 -5
fs/ntfs/super.c
··· 3143 3143 3144 3144 ntfs_index_ctx_cache = kmem_cache_create(ntfs_index_ctx_cache_name, 3145 3145 sizeof(ntfs_index_context), 0 /* offset */, 3146 - SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */); 3146 + SLAB_HWCACHE_ALIGN, NULL /* ctor */); 3147 3147 if (!ntfs_index_ctx_cache) { 3148 3148 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3149 3149 ntfs_index_ctx_cache_name); ··· 3151 3151 } 3152 3152 ntfs_attr_ctx_cache = kmem_cache_create(ntfs_attr_ctx_cache_name, 3153 3153 sizeof(ntfs_attr_search_ctx), 0 /* offset */, 3154 - SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */); 3154 + SLAB_HWCACHE_ALIGN, NULL /* ctor */); 3155 3155 if (!ntfs_attr_ctx_cache) { 3156 3156 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3157 3157 ntfs_attr_ctx_cache_name); ··· 3160 3160 3161 3161 ntfs_name_cache = kmem_cache_create(ntfs_name_cache_name, 3162 3162 (NTFS_MAX_NAME_LEN+1) * sizeof(ntfschar), 0, 3163 - SLAB_HWCACHE_ALIGN, NULL, NULL); 3163 + SLAB_HWCACHE_ALIGN, NULL); 3164 3164 if (!ntfs_name_cache) { 3165 3165 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3166 3166 ntfs_name_cache_name); ··· 3169 3169 3170 3170 ntfs_inode_cache = kmem_cache_create(ntfs_inode_cache_name, 3171 3171 sizeof(ntfs_inode), 0, 3172 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); 3172 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 3173 3173 if (!ntfs_inode_cache) { 3174 3174 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3175 3175 ntfs_inode_cache_name); ··· 3179 3179 ntfs_big_inode_cache = kmem_cache_create(ntfs_big_inode_cache_name, 3180 3180 sizeof(big_ntfs_inode), 0, 3181 3181 SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 3182 - ntfs_big_inode_init_once, NULL); 3182 + ntfs_big_inode_init_once); 3183 3183 if (!ntfs_big_inode_cache) { 3184 3184 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3185 3185 ntfs_big_inode_cache_name);
+1 -1
fs/ocfs2/dlm/dlmfs.c
··· 592 592 sizeof(struct dlmfs_inode_private), 593 593 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 594 594 SLAB_MEM_SPREAD), 595 - dlmfs_init_once, NULL); 595 + dlmfs_init_once); 596 596 if (!dlmfs_inode_cache) 597 597 return -ENOMEM; 598 598 cleanup_inode = 1;
+1 -1
fs/ocfs2/dlm/dlmmaster.c
··· 510 510 dlm_mle_cache = kmem_cache_create("dlm_mle_cache", 511 511 sizeof(struct dlm_master_list_entry), 512 512 0, SLAB_HWCACHE_ALIGN, 513 - NULL, NULL); 513 + NULL); 514 514 if (dlm_mle_cache == NULL) 515 515 return -ENOMEM; 516 516 return 0;
+1 -1
fs/ocfs2/super.c
··· 984 984 0, 985 985 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 986 986 SLAB_MEM_SPREAD), 987 - ocfs2_inode_init_once, NULL); 987 + ocfs2_inode_init_once); 988 988 if (!ocfs2_inode_cachep) 989 989 return -ENOMEM; 990 990
+1 -1
fs/ocfs2/uptodate.c
··· 548 548 { 549 549 ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate", 550 550 sizeof(struct ocfs2_meta_cache_item), 551 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 551 + 0, SLAB_HWCACHE_ALIGN, NULL); 552 552 if (!ocfs2_uptodate_cachep) 553 553 return -ENOMEM; 554 554
+1 -1
fs/openpromfs/inode.c
··· 431 431 0, 432 432 (SLAB_RECLAIM_ACCOUNT | 433 433 SLAB_MEM_SPREAD), 434 - op_inode_init_once, NULL); 434 + op_inode_init_once); 435 435 if (!op_inode_cachep) 436 436 return -ENOMEM; 437 437
+2 -2
fs/proc/inode.c
··· 112 112 113 113 inode_init_once(&ei->vfs_inode); 114 114 } 115 - 115 + 116 116 int __init proc_init_inodecache(void) 117 117 { 118 118 proc_inode_cachep = kmem_cache_create("proc_inode_cache", 119 119 sizeof(struct proc_inode), 120 120 0, (SLAB_RECLAIM_ACCOUNT| 121 121 SLAB_MEM_SPREAD), 122 - init_once, NULL); 122 + init_once); 123 123 if (proc_inode_cachep == NULL) 124 124 return -ENOMEM; 125 125 return 0;
+1 -1
fs/qnx4/inode.c
··· 545 545 sizeof(struct qnx4_inode_info), 546 546 0, (SLAB_RECLAIM_ACCOUNT| 547 547 SLAB_MEM_SPREAD), 548 - init_once, NULL); 548 + init_once); 549 549 if (qnx4_inode_cachep == NULL) 550 550 return -ENOMEM; 551 551 return 0;
+1 -1
fs/reiserfs/super.c
··· 527 527 reiserfs_inode_info), 528 528 0, (SLAB_RECLAIM_ACCOUNT| 529 529 SLAB_MEM_SPREAD), 530 - init_once, NULL); 530 + init_once); 531 531 if (reiserfs_inode_cachep == NULL) 532 532 return -ENOMEM; 533 533 return 0;
+2 -2
fs/romfs/inode.c
··· 572 572 573 573 inode_init_once(&ei->vfs_inode); 574 574 } 575 - 575 + 576 576 static int init_inodecache(void) 577 577 { 578 578 romfs_inode_cachep = kmem_cache_create("romfs_inode_cache", 579 579 sizeof(struct romfs_inode_info), 580 580 0, (SLAB_RECLAIM_ACCOUNT| 581 581 SLAB_MEM_SPREAD), 582 - init_once, NULL); 582 + init_once); 583 583 if (romfs_inode_cachep == NULL) 584 584 return -ENOMEM; 585 585 return 0;
+2 -2
fs/smbfs/inode.c
··· 73 73 74 74 inode_init_once(&ei->vfs_inode); 75 75 } 76 - 76 + 77 77 static int init_inodecache(void) 78 78 { 79 79 smb_inode_cachep = kmem_cache_create("smb_inode_cache", 80 80 sizeof(struct smb_inode_info), 81 81 0, (SLAB_RECLAIM_ACCOUNT| 82 82 SLAB_MEM_SPREAD), 83 - init_once, NULL); 83 + init_once); 84 84 if (smb_inode_cachep == NULL) 85 85 return -ENOMEM; 86 86 return 0;
+1 -1
fs/smbfs/request.c
··· 40 40 req_cachep = kmem_cache_create("smb_request", 41 41 sizeof(struct smb_request), 0, 42 42 SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN, 43 - NULL, NULL); 43 + NULL); 44 44 if (req_cachep == NULL) 45 45 return -ENOMEM; 46 46
+1 -1
fs/sysfs/mount.c
··· 86 86 87 87 sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache", 88 88 sizeof(struct sysfs_dirent), 89 - 0, 0, NULL, NULL); 89 + 0, 0, NULL); 90 90 if (!sysfs_dir_cachep) 91 91 goto out; 92 92
+1 -1
fs/sysv/inode.c
··· 342 342 sysv_inode_cachep = kmem_cache_create("sysv_inode_cache", 343 343 sizeof(struct sysv_inode_info), 0, 344 344 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 345 - init_once, NULL); 345 + init_once); 346 346 if (!sysv_inode_cachep) 347 347 return -ENOMEM; 348 348 return 0;
+1 -1
fs/udf/super.c
··· 149 149 sizeof(struct udf_inode_info), 150 150 0, (SLAB_RECLAIM_ACCOUNT | 151 151 SLAB_MEM_SPREAD), 152 - init_once, NULL); 152 + init_once); 153 153 if (udf_inode_cachep == NULL) 154 154 return -ENOMEM; 155 155 return 0;
+2 -2
fs/ufs/super.c
··· 1240 1240 1241 1241 inode_init_once(&ei->vfs_inode); 1242 1242 } 1243 - 1243 + 1244 1244 static int init_inodecache(void) 1245 1245 { 1246 1246 ufs_inode_cachep = kmem_cache_create("ufs_inode_cache", 1247 1247 sizeof(struct ufs_inode_info), 1248 1248 0, (SLAB_RECLAIM_ACCOUNT| 1249 1249 SLAB_MEM_SPREAD), 1250 - init_once, NULL); 1250 + init_once); 1251 1251 if (ufs_inode_cachep == NULL) 1252 1252 return -ENOMEM; 1253 1253 return 0;
+2 -2
fs/xfs/linux-2.6/kmem.h
··· 74 74 static inline kmem_zone_t * 75 75 kmem_zone_init(int size, char *zone_name) 76 76 { 77 - return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); 77 + return kmem_cache_create(zone_name, size, 0, 0, NULL); 78 78 } 79 79 80 80 static inline kmem_zone_t * 81 81 kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, 82 82 void (*construct)(void *, kmem_zone_t *, unsigned long)) 83 83 { 84 - return kmem_cache_create(zone_name, size, 0, flags, construct, NULL); 84 + return kmem_cache_create(zone_name, size, 0, flags, construct); 85 85 } 86 86 87 87 static inline void
+1 -2
include/linux/i2o.h
··· 946 946 strcpy(pool->name, name); 947 947 948 948 pool->slab = 949 - kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL, 950 - NULL); 949 + kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); 951 950 if (!pool->slab) 952 951 goto free_name; 953 952
+1 -2
include/linux/slab.h
··· 51 51 52 52 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 53 53 unsigned long, 54 - void (*)(void *, struct kmem_cache *, unsigned long), 55 54 void (*)(void *, struct kmem_cache *, unsigned long)); 56 55 void kmem_cache_destroy(struct kmem_cache *); 57 56 int kmem_cache_shrink(struct kmem_cache *); ··· 69 70 */ 70 71 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 71 72 sizeof(struct __struct), __alignof__(struct __struct),\ 72 - (__flags), NULL, NULL) 73 + (__flags), NULL) 73 74 74 75 /* 75 76 * The largest kmalloc size supported by the slab allocators is
+1 -1
ipc/mqueue.c
··· 1253 1253 1254 1254 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1255 1255 sizeof(struct mqueue_inode_info), 0, 1256 - SLAB_HWCACHE_ALIGN, init_once, NULL); 1256 + SLAB_HWCACHE_ALIGN, init_once); 1257 1257 if (mqueue_inode_cachep == NULL) 1258 1258 return -ENOMEM; 1259 1259
+9 -9
kernel/fork.c
··· 137 137 /* create a slab on which task_structs can be allocated */ 138 138 task_struct_cachep = 139 139 kmem_cache_create("task_struct", sizeof(struct task_struct), 140 - ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL); 140 + ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); 141 141 #endif 142 142 143 143 /* ··· 1446 1446 sighand_cachep = kmem_cache_create("sighand_cache", 1447 1447 sizeof(struct sighand_struct), 0, 1448 1448 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1449 - sighand_ctor, NULL); 1449 + sighand_ctor); 1450 1450 signal_cachep = kmem_cache_create("signal_cache", 1451 1451 sizeof(struct signal_struct), 0, 1452 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1453 - files_cachep = kmem_cache_create("files_cache", 1452 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1453 + files_cachep = kmem_cache_create("files_cache", 1454 1454 sizeof(struct files_struct), 0, 1455 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1456 - fs_cachep = kmem_cache_create("fs_cache", 1455 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1456 + fs_cachep = kmem_cache_create("fs_cache", 1457 1457 sizeof(struct fs_struct), 0, 1458 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1458 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1459 1459 vm_area_cachep = kmem_cache_create("vm_area_struct", 1460 1460 sizeof(struct vm_area_struct), 0, 1461 - SLAB_PANIC, NULL, NULL); 1461 + SLAB_PANIC, NULL); 1462 1462 mm_cachep = kmem_cache_create("mm_struct", 1463 1463 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1464 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1464 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1465 1465 } 1466 1466 1467 1467 /*
+1 -1
kernel/nsproxy.c
··· 193 193 static int __init nsproxy_cache_init(void) 194 194 { 195 195 nsproxy_cachep = kmem_cache_create("nsproxy", sizeof(struct nsproxy), 196 - 0, SLAB_PANIC, NULL, NULL); 196 + 0, SLAB_PANIC, NULL); 197 197 return 0; 198 198 } 199 199
+1 -1
kernel/posix-timers.c
··· 241 241 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); 242 242 243 243 posix_timers_cache = kmem_cache_create("posix_timers_cache", 244 - sizeof (struct k_itimer), 0, 0, NULL, NULL); 244 + sizeof (struct k_itimer), 0, 0, NULL); 245 245 idr_init(&posix_timers_id); 246 246 return 0; 247 247 }
+1 -1
kernel/user.c
··· 208 208 int n; 209 209 210 210 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 211 - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 211 + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 212 212 213 213 for(n = 0; n < UIDHASH_SZ; ++n) 214 214 INIT_LIST_HEAD(init_user_ns.uidhash_table + n);
+1 -1
lib/idr.c
··· 590 590 { 591 591 if (!idr_layer_cache) 592 592 idr_layer_cache = kmem_cache_create("idr_layer_cache", 593 - sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); 593 + sizeof(struct idr_layer), 0, 0, idr_cache_ctor); 594 594 return 0; 595 595 } 596 596
+1 -1
lib/radix-tree.c
··· 1021 1021 { 1022 1022 radix_tree_node_cachep = kmem_cache_create("radix_tree_node", 1023 1023 sizeof(struct radix_tree_node), 0, 1024 - SLAB_PANIC, radix_tree_node_ctor, NULL); 1024 + SLAB_PANIC, radix_tree_node_ctor); 1025 1025 radix_tree_init_maxindex(); 1026 1026 hotcpu_notifier(radix_tree_callback, 0); 1027 1027 }
+2 -2
mm/mempolicy.c
··· 1605 1605 1606 1606 policy_cache = kmem_cache_create("numa_policy", 1607 1607 sizeof(struct mempolicy), 1608 - 0, SLAB_PANIC, NULL, NULL); 1608 + 0, SLAB_PANIC, NULL); 1609 1609 1610 1610 sn_cache = kmem_cache_create("shared_policy_node", 1611 1611 sizeof(struct sp_node), 1612 - 0, SLAB_PANIC, NULL, NULL); 1612 + 0, SLAB_PANIC, NULL); 1613 1613 1614 1614 /* 1615 1615 * Set interleaving policy for system init. Interleaving is only
+1 -1
mm/rmap.c
··· 149 149 void __init anon_vma_init(void) 150 150 { 151 151 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 152 - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); 152 + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 153 153 } 154 154 155 155 /*
+1 -1
mm/shmem.c
··· 2322 2322 { 2323 2323 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2324 2324 sizeof(struct shmem_inode_info), 2325 - 0, 0, init_once, NULL); 2325 + 0, 0, init_once); 2326 2326 if (shmem_inode_cachep == NULL) 2327 2327 return -ENOMEM; 2328 2328 return 0;
+7 -10
mm/slab.c
··· 1484 1484 sizes[INDEX_AC].cs_size, 1485 1485 ARCH_KMALLOC_MINALIGN, 1486 1486 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1487 - NULL, NULL); 1487 + NULL); 1488 1488 1489 1489 if (INDEX_AC != INDEX_L3) { 1490 1490 sizes[INDEX_L3].cs_cachep = ··· 1492 1492 sizes[INDEX_L3].cs_size, 1493 1493 ARCH_KMALLOC_MINALIGN, 1494 1494 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1495 - NULL, NULL); 1495 + NULL); 1496 1496 } 1497 1497 1498 1498 slab_early_init = 0; ··· 1510 1510 sizes->cs_size, 1511 1511 ARCH_KMALLOC_MINALIGN, 1512 1512 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1513 - NULL, NULL); 1513 + NULL); 1514 1514 } 1515 1515 #ifdef CONFIG_ZONE_DMA 1516 1516 sizes->cs_dmacachep = kmem_cache_create( ··· 1519 1519 ARCH_KMALLOC_MINALIGN, 1520 1520 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1521 1521 SLAB_PANIC, 1522 - NULL, NULL); 1522 + NULL); 1523 1523 #endif 1524 1524 sizes++; 1525 1525 names++; ··· 2101 2101 * @align: The required alignment for the objects. 2102 2102 * @flags: SLAB flags 2103 2103 * @ctor: A constructor for the objects. 2104 - * @dtor: A destructor for the objects (not implemented anymore). 2105 2104 * 2106 2105 * Returns a ptr to the cache on success, NULL on failure. 2107 2106 * Cannot be called within a int, but can be interrupted. 2108 - * The @ctor is run when new pages are allocated by the cache 2109 - * and the @dtor is run before the pages are handed back. 2107 + * The @ctor is run when new pages are allocated by the cache. 2110 2108 * 2111 2109 * @name must be valid until the cache is destroyed. This implies that 2112 2110 * the module calling this has to destroy the cache before getting unloaded. ··· 2124 2126 struct kmem_cache * 2125 2127 kmem_cache_create (const char *name, size_t size, size_t align, 2126 2128 unsigned long flags, 2127 - void (*ctor)(void*, struct kmem_cache *, unsigned long), 2128 - void (*dtor)(void*, struct kmem_cache *, unsigned long)) 2129 + void (*ctor)(void*, struct kmem_cache *, unsigned long)) 2129 2130 { 2130 2131 size_t left_over, slab_size, ralign; 2131 2132 struct kmem_cache *cachep = NULL, *pc; ··· 2133 2136 * Sanity checks... these are all serious usage bugs. 2134 2137 */ 2135 2138 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2136 - size > KMALLOC_MAX_SIZE || dtor) { 2139 + size > KMALLOC_MAX_SIZE) { 2137 2140 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2138 2141 name); 2139 2142 BUG();
+1 -2
mm/slob.c
··· 492 492 493 493 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 494 494 size_t align, unsigned long flags, 495 - void (*ctor)(void*, struct kmem_cache *, unsigned long), 496 - void (*dtor)(void*, struct kmem_cache *, unsigned long)) 495 + void (*ctor)(void*, struct kmem_cache *, unsigned long)) 497 496 { 498 497 struct kmem_cache *c; 499 498
+1 -3
mm/slub.c
··· 2668 2668 2669 2669 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 2670 2670 size_t align, unsigned long flags, 2671 - void (*ctor)(void *, struct kmem_cache *, unsigned long), 2672 - void (*dtor)(void *, struct kmem_cache *, unsigned long)) 2671 + void (*ctor)(void *, struct kmem_cache *, unsigned long)) 2673 2672 { 2674 2673 struct kmem_cache *s; 2675 2674 2676 - BUG_ON(dtor); 2677 2675 down_write(&slub_lock); 2678 2676 s = find_mergeable(size, align, flags, ctor); 2679 2677 if (s) {
+1 -1
net/bridge/br_fdb.c
··· 36 36 br_fdb_cache = kmem_cache_create("bridge_fdb_cache", 37 37 sizeof(struct net_bridge_fdb_entry), 38 38 0, 39 - SLAB_HWCACHE_ALIGN, NULL, NULL); 39 + SLAB_HWCACHE_ALIGN, NULL); 40 40 if (!br_fdb_cache) 41 41 return -ENOMEM; 42 42
+1 -1
net/core/flow.c
··· 350 350 flow_cachep = kmem_cache_create("flow_cache", 351 351 sizeof(struct flow_cache_entry), 352 352 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 353 - NULL, NULL); 353 + NULL); 354 354 flow_hash_shift = 10; 355 355 flow_lwm = 2 * flow_hash_size; 356 356 flow_hwm = 4 * flow_hash_size;
+1 -1
net/core/neighbour.c
··· 1347 1347 tbl->kmem_cachep = 1348 1348 kmem_cache_create(tbl->id, tbl->entry_size, 0, 1349 1349 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1350 - NULL, NULL); 1350 + NULL); 1351 1351 tbl->stats = alloc_percpu(struct neigh_statistics); 1352 1352 if (!tbl->stats) 1353 1353 panic("cannot create neighbour cache statistics");
+2 -2
net/core/skbuff.c
··· 2021 2021 sizeof(struct sk_buff), 2022 2022 0, 2023 2023 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2024 - NULL, NULL); 2024 + NULL); 2025 2025 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2026 2026 (2*sizeof(struct sk_buff)) + 2027 2027 sizeof(atomic_t), 2028 2028 0, 2029 2029 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2030 - NULL, NULL); 2030 + NULL); 2031 2031 } 2032 2032 2033 2033 /**
+3 -3
net/core/sock.c
··· 1767 1767 1768 1768 if (alloc_slab) { 1769 1769 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 1770 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1770 + SLAB_HWCACHE_ALIGN, NULL); 1771 1771 1772 1772 if (prot->slab == NULL) { 1773 1773 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", ··· 1785 1785 sprintf(request_sock_slab_name, mask, prot->name); 1786 1786 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name, 1787 1787 prot->rsk_prot->obj_size, 0, 1788 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1788 + SLAB_HWCACHE_ALIGN, NULL); 1789 1789 1790 1790 if (prot->rsk_prot->slab == NULL) { 1791 1791 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", ··· 1807 1807 kmem_cache_create(timewait_sock_slab_name, 1808 1808 prot->twsk_prot->twsk_obj_size, 1809 1809 0, SLAB_HWCACHE_ALIGN, 1810 - NULL, NULL); 1810 + NULL); 1811 1811 if (prot->twsk_prot->twsk_slab == NULL) 1812 1812 goto out_free_timewait_sock_slab_name; 1813 1813 }
+2 -2
net/dccp/ackvec.c
··· 481 481 { 482 482 dccp_ackvec_slab = kmem_cache_create("dccp_ackvec", 483 483 sizeof(struct dccp_ackvec), 0, 484 - SLAB_HWCACHE_ALIGN, NULL, NULL); 484 + SLAB_HWCACHE_ALIGN, NULL); 485 485 if (dccp_ackvec_slab == NULL) 486 486 goto out_err; 487 487 488 488 dccp_ackvec_record_slab = 489 489 kmem_cache_create("dccp_ackvec_record", 490 490 sizeof(struct dccp_ackvec_record), 491 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 491 + 0, SLAB_HWCACHE_ALIGN, NULL); 492 492 if (dccp_ackvec_record_slab == NULL) 493 493 goto out_destroy_slab; 494 494
+1 -1
net/dccp/ccid.c
··· 69 69 if (slab_name == NULL) 70 70 return NULL; 71 71 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0, 72 - SLAB_HWCACHE_ALIGN, NULL, NULL); 72 + SLAB_HWCACHE_ALIGN, NULL); 73 73 if (slab == NULL) 74 74 kfree(slab_name); 75 75 return slab;
+1 -1
net/dccp/ccids/lib/loss_interval.c
··· 282 282 { 283 283 dccp_li_cachep = kmem_cache_create("dccp_li_hist", 284 284 sizeof(struct dccp_li_hist_entry), 285 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 285 + 0, SLAB_HWCACHE_ALIGN, NULL); 286 286 return dccp_li_cachep == NULL ? -ENOBUFS : 0; 287 287 } 288 288
+2 -2
net/dccp/ccids/lib/packet_history.c
··· 59 59 hist->dccptxh_slab = kmem_cache_create(slab_name, 60 60 sizeof(struct dccp_tx_hist_entry), 61 61 0, SLAB_HWCACHE_ALIGN, 62 - NULL, NULL); 62 + NULL); 63 63 if (hist->dccptxh_slab == NULL) 64 64 goto out_free_slab_name; 65 65 out: ··· 148 148 hist->dccprxh_slab = kmem_cache_create(slab_name, 149 149 sizeof(struct dccp_rx_hist_entry), 150 150 0, SLAB_HWCACHE_ALIGN, 151 - NULL, NULL); 151 + NULL); 152 152 if (hist->dccprxh_slab == NULL) 153 153 goto out_free_slab_name; 154 154 out:
+1 -1
net/dccp/proto.c
··· 1003 1003 dccp_hashinfo.bind_bucket_cachep = 1004 1004 kmem_cache_create("dccp_bind_bucket", 1005 1005 sizeof(struct inet_bind_bucket), 0, 1006 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1006 + SLAB_HWCACHE_ALIGN, NULL); 1007 1007 if (!dccp_hashinfo.bind_bucket_cachep) 1008 1008 goto out; 1009 1009
+1 -1
net/decnet/dn_route.c
··· 1770 1770 1771 1771 dn_dst_ops.kmem_cachep = 1772 1772 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, 1773 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1773 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1774 1774 init_timer(&dn_route_timer); 1775 1775 dn_route_timer.function = dn_dst_check_expire; 1776 1776 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
+1 -1
net/decnet/dn_table.c
··· 881 881 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache", 882 882 sizeof(struct dn_fib_info), 883 883 0, SLAB_HWCACHE_ALIGN, 884 - NULL, NULL); 884 + NULL); 885 885 } 886 886 887 887 void __exit dn_fib_table_cleanup(void)
+2 -2
net/ipv4/fib_hash.c
··· 771 771 fn_hash_kmem = kmem_cache_create("ip_fib_hash", 772 772 sizeof(struct fib_node), 773 773 0, SLAB_HWCACHE_ALIGN, 774 - NULL, NULL); 774 + NULL); 775 775 776 776 if (fn_alias_kmem == NULL) 777 777 fn_alias_kmem = kmem_cache_create("ip_fib_alias", 778 778 sizeof(struct fib_alias), 779 779 0, SLAB_HWCACHE_ALIGN, 780 - NULL, NULL); 780 + NULL); 781 781 782 782 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash), 783 783 GFP_KERNEL);
+1 -1
net/ipv4/fib_trie.c
··· 1970 1970 fn_alias_kmem = kmem_cache_create("ip_fib_alias", 1971 1971 sizeof(struct fib_alias), 1972 1972 0, SLAB_HWCACHE_ALIGN, 1973 - NULL, NULL); 1973 + NULL); 1974 1974 1975 1975 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie), 1976 1976 GFP_KERNEL);
+1 -1
net/ipv4/inetpeer.c
··· 123 123 peer_cachep = kmem_cache_create("inet_peer_cache", 124 124 sizeof(struct inet_peer), 125 125 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 126 - NULL, NULL); 126 + NULL); 127 127 128 128 /* All the timers, started at system startup tend 129 129 to synchronize. Perturb it a bit.
+1 -1
net/ipv4/ipmr.c
··· 1917 1917 mrt_cachep = kmem_cache_create("ip_mrt_cache", 1918 1918 sizeof(struct mfc_cache), 1919 1919 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1920 - NULL, NULL); 1920 + NULL); 1921 1921 init_timer(&ipmr_expire_timer); 1922 1922 ipmr_expire_timer.function=ipmr_expire_process; 1923 1923 register_netdevice_notifier(&ip_mr_notifier);
+1 -1
net/ipv4/ipvs/ip_vs_conn.c
··· 901 901 /* Allocate ip_vs_conn slab cache */ 902 902 ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn", 903 903 sizeof(struct ip_vs_conn), 0, 904 - SLAB_HWCACHE_ALIGN, NULL, NULL); 904 + SLAB_HWCACHE_ALIGN, NULL); 905 905 if (!ip_vs_conn_cachep) { 906 906 vfree(ip_vs_conn_tab); 907 907 return -ENOMEM;
+1 -1
net/ipv4/route.c
··· 2967 2967 2968 2968 ipv4_dst_ops.kmem_cachep = 2969 2969 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0, 2970 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2970 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2971 2971 2972 2972 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; 2973 2973
+1 -1
net/ipv4/tcp.c
··· 2430 2430 tcp_hashinfo.bind_bucket_cachep = 2431 2431 kmem_cache_create("tcp_bind_bucket", 2432 2432 sizeof(struct inet_bind_bucket), 0, 2433 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2433 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2434 2434 2435 2435 /* Size and allocate the main established and bind bucket 2436 2436 * hash tables.
+1 -1
net/ipv6/ip6_fib.c
··· 1474 1474 fib6_node_kmem = kmem_cache_create("fib6_nodes", 1475 1475 sizeof(struct fib6_node), 1476 1476 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1477 - NULL, NULL); 1477 + NULL); 1478 1478 1479 1479 fib6_tables_init(); 1480 1480
+1 -1
net/ipv6/route.c
··· 2555 2555 #endif 2556 2556 ip6_dst_ops.kmem_cachep = 2557 2557 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2558 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2558 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2559 2559 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep; 2560 2560 2561 2561 fib6_init();
+1 -1
net/ipv6/xfrm6_tunnel.c
··· 84 84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", 85 85 sizeof(struct xfrm6_tunnel_spi), 86 86 0, SLAB_HWCACHE_ALIGN, 87 - NULL, NULL); 87 + NULL); 88 88 if (!xfrm6_tunnel_spi_kmem) 89 89 return -ENOMEM; 90 90
+1 -1
net/netfilter/nf_conntrack_core.c
··· 1108 1108 1109 1109 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1110 1110 sizeof(struct nf_conn), 1111 - 0, 0, NULL, NULL); 1111 + 0, 0, NULL); 1112 1112 if (!nf_conntrack_cachep) { 1113 1113 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1114 1114 goto err_free_hash;
+1 -1
net/netfilter/nf_conntrack_expect.c
··· 540 540 541 541 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect", 542 542 sizeof(struct nf_conntrack_expect), 543 - 0, 0, NULL, NULL); 543 + 0, 0, NULL); 544 544 if (!nf_ct_expect_cachep) 545 545 goto err2; 546 546
+1 -1
net/netfilter/xt_hashlimit.c
··· 738 738 err = -ENOMEM; 739 739 hashlimit_cachep = kmem_cache_create("xt_hashlimit", 740 740 sizeof(struct dsthash_ent), 0, 0, 741 - NULL, NULL); 741 + NULL); 742 742 if (!hashlimit_cachep) { 743 743 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); 744 744 goto err2;
+1 -1
net/rxrpc/af_rxrpc.c
··· 792 792 ret = -ENOMEM; 793 793 rxrpc_call_jar = kmem_cache_create( 794 794 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, 795 - SLAB_HWCACHE_ALIGN, NULL, NULL); 795 + SLAB_HWCACHE_ALIGN, NULL); 796 796 if (!rxrpc_call_jar) { 797 797 printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); 798 798 goto error_call_jar;
+2 -2
net/sctp/protocol.c
··· 980 980 sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", 981 981 sizeof(struct sctp_bind_bucket), 982 982 0, SLAB_HWCACHE_ALIGN, 983 - NULL, NULL); 983 + NULL); 984 984 if (!sctp_bucket_cachep) 985 985 goto out; 986 986 987 987 sctp_chunk_cachep = kmem_cache_create("sctp_chunk", 988 988 sizeof(struct sctp_chunk), 989 989 0, SLAB_HWCACHE_ALIGN, 990 - NULL, NULL); 990 + NULL); 991 991 if (!sctp_chunk_cachep) 992 992 goto err_chunk_cachep; 993 993
+1 -2
net/socket.c
··· 272 272 (SLAB_HWCACHE_ALIGN | 273 273 SLAB_RECLAIM_ACCOUNT | 274 274 SLAB_MEM_SPREAD), 275 - init_once, 276 - NULL); 275 + init_once); 277 276 if (sock_inode_cachep == NULL) 278 277 return -ENOMEM; 279 278 return 0;
+1 -1
net/sunrpc/rpc_pipe.c
··· 867 867 sizeof(struct rpc_inode), 868 868 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 869 869 SLAB_MEM_SPREAD), 870 - init_once, NULL); 870 + init_once); 871 871 if (!rpc_inode_cachep) 872 872 return -ENOMEM; 873 873 err = register_filesystem(&rpc_pipe_fs_type);
+2 -2
net/sunrpc/sched.c
··· 1031 1031 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1032 1032 sizeof(struct rpc_task), 1033 1033 0, SLAB_HWCACHE_ALIGN, 1034 - NULL, NULL); 1034 + NULL); 1035 1035 if (!rpc_task_slabp) 1036 1036 goto err_nomem; 1037 1037 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1038 1038 RPC_BUFFER_MAXSIZE, 1039 1039 0, SLAB_HWCACHE_ALIGN, 1040 - NULL, NULL); 1040 + NULL); 1041 1041 if (!rpc_buffer_slabp) 1042 1042 goto err_nomem; 1043 1043 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
+1 -1
net/tipc/handler.c
··· 97 97 { 98 98 tipc_queue_item_cache = 99 99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item), 100 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 100 + 0, SLAB_HWCACHE_ALIGN, NULL); 101 101 if (!tipc_queue_item_cache) 102 102 return -ENOMEM; 103 103
+1 -1
net/xfrm/xfrm_input.c
··· 83 83 secpath_cachep = kmem_cache_create("secpath_cache", 84 84 sizeof(struct sec_path), 85 85 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 86 - NULL, NULL); 86 + NULL); 87 87 }
+1 -1
net/xfrm/xfrm_policy.c
··· 2378 2378 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2379 2379 sizeof(struct xfrm_dst), 2380 2380 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2381 - NULL, NULL); 2381 + NULL); 2382 2382 2383 2383 hmask = 8 - 1; 2384 2384 sz = (hmask+1) * sizeof(struct hlist_head);
+1 -1
security/keys/key.c
··· 1001 1001 { 1002 1002 /* allocate a slab in which we can store keys */ 1003 1003 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1004 - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1004 + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1005 1005 1006 1006 /* add the special key types */ 1007 1007 list_add_tail(&key_type_keyring.link, &key_types_list);
+1 -1
security/selinux/avc.c
··· 239 239 atomic_set(&avc_cache.lru_hint, 0); 240 240 241 241 avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), 242 - 0, SLAB_PANIC, NULL, NULL); 242 + 0, SLAB_PANIC, NULL); 243 243 244 244 audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n"); 245 245 }
+1 -1
security/selinux/hooks.c
··· 4913 4913 4914 4914 sel_inode_cache = kmem_cache_create("selinux_inode_security", 4915 4915 sizeof(struct inode_security_struct), 4916 - 0, SLAB_PANIC, NULL, NULL); 4916 + 0, SLAB_PANIC, NULL); 4917 4917 avc_init(); 4918 4918 4919 4919 original_ops = secondary_ops = security_ops;
+1 -1
security/selinux/ss/avtab.c
··· 445 445 { 446 446 avtab_node_cachep = kmem_cache_create("avtab_node", 447 447 sizeof(struct avtab_node), 448 - 0, SLAB_PANIC, NULL, NULL); 448 + 0, SLAB_PANIC, NULL); 449 449 } 450 450 451 451 void avtab_cache_destroy(void)