mm: Remove slab destructors from kmem_cache_create().

Slab destructors were no longer supported after Christoph's
c59def9f222d44bb7e2f0a559f2906191a0862d7 change. They've been
BUGs for both slab and slub, and slob never supported them
either.

This rips out support for the dtor pointer from kmem_cache_create()
completely and fixes up every single callsite in the kernel (there were
about 224, not including the slab allocator definitions themselves,
or the documentation references).

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+247 -268
+1 -1
arch/arm/plat-s3c24xx/dma.c
··· 1333 dma_kmem = kmem_cache_create("dma_desc", 1334 sizeof(struct s3c2410_dma_buf), 0, 1335 SLAB_HWCACHE_ALIGN, 1336 - s3c2410_dma_cache_ctor, NULL); 1337 1338 if (dma_kmem == NULL) { 1339 printk(KERN_ERR "dma failed to make kmem cache\n");
··· 1333 dma_kmem = kmem_cache_create("dma_desc", 1334 sizeof(struct s3c2410_dma_buf), 0, 1335 SLAB_HWCACHE_ALIGN, 1336 + s3c2410_dma_cache_ctor); 1337 1338 if (dma_kmem == NULL) { 1339 printk(KERN_ERR "dma failed to make kmem cache\n");
+2 -2
arch/arm26/mm/memc.c
··· 176 { 177 pte_cache = kmem_cache_create("pte-cache", 178 sizeof(pte_t) * PTRS_PER_PTE, 179 - 0, SLAB_PANIC, pte_cache_ctor, NULL); 180 181 pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE + 182 sizeof(pgd_t) * PTRS_PER_PGD, 183 - 0, SLAB_PANIC, pgd_cache_ctor, NULL); 184 }
··· 176 { 177 pte_cache = kmem_cache_create("pte-cache", 178 sizeof(pte_t) * PTRS_PER_PTE, 179 + 0, SLAB_PANIC, pte_cache_ctor); 180 181 pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE + 182 sizeof(pgd_t) * PTRS_PER_PGD, 183 + 0, SLAB_PANIC, pgd_cache_ctor); 184 }
+1 -2
arch/i386/mm/init.c
··· 752 PTRS_PER_PMD*sizeof(pmd_t), 753 PTRS_PER_PMD*sizeof(pmd_t), 754 SLAB_PANIC, 755 - pmd_ctor, 756 - NULL); 757 if (!SHARED_KERNEL_PMD) { 758 /* If we're in PAE mode and have a non-shared 759 kernel pmd, then the pgd size must be a
··· 752 PTRS_PER_PMD*sizeof(pmd_t), 753 PTRS_PER_PMD*sizeof(pmd_t), 754 SLAB_PANIC, 755 + pmd_ctor); 756 if (!SHARED_KERNEL_PMD) { 757 /* If we're in PAE mode and have a non-shared 758 kernel pmd, then the pgd size must be a
+1 -1
arch/ia64/ia32/ia32_support.c
··· 253 254 partial_page_cachep = kmem_cache_create("partial_page_cache", 255 sizeof(struct partial_page), 256 - 0, SLAB_PANIC, NULL, NULL); 257 } 258 #endif 259 return 0;
··· 253 254 partial_page_cachep = kmem_cache_create("partial_page_cache", 255 sizeof(struct partial_page), 256 + 0, SLAB_PANIC, NULL); 257 } 258 #endif 259 return 0;
+1 -1
arch/powerpc/kernel/rtas_flash.c
··· 804 805 flash_block_cache = kmem_cache_create("rtas_flash_cache", 806 RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, 807 - rtas_block_ctor, NULL); 808 if (!flash_block_cache) { 809 printk(KERN_ERR "%s: failed to create block cache\n", 810 __FUNCTION__);
··· 804 805 flash_block_cache = kmem_cache_create("rtas_flash_cache", 806 RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, 807 + rtas_block_ctor); 808 if (!flash_block_cache) { 809 printk(KERN_ERR "%s: failed to create block cache\n", 810 __FUNCTION__);
+1 -1
arch/powerpc/mm/hugetlbpage.c
··· 542 HUGEPTE_TABLE_SIZE, 543 HUGEPTE_TABLE_SIZE, 544 0, 545 - zero_ctor, NULL); 546 if (! huge_pgtable_cache) 547 panic("hugetlbpage_init(): could not create hugepte cache\n"); 548
··· 542 HUGEPTE_TABLE_SIZE, 543 HUGEPTE_TABLE_SIZE, 544 0, 545 + zero_ctor); 546 if (! huge_pgtable_cache) 547 panic("hugetlbpage_init(): could not create hugepte cache\n"); 548
+1 -2
arch/powerpc/mm/init_64.c
··· 178 pgtable_cache[i] = kmem_cache_create(name, 179 size, size, 180 SLAB_PANIC, 181 - zero_ctor, 182 - NULL); 183 } 184 }
··· 178 pgtable_cache[i] = kmem_cache_create(name, 179 size, size, 180 SLAB_PANIC, 181 + zero_ctor); 182 } 183 }
+1 -1
arch/powerpc/platforms/cell/spufs/inode.c
··· 654 ret = -ENOMEM; 655 spufs_inode_cache = kmem_cache_create("spufs_inode_cache", 656 sizeof(struct spufs_inode_info), 0, 657 - SLAB_HWCACHE_ALIGN, spufs_init_once, NULL); 658 659 if (!spufs_inode_cache) 660 goto out;
··· 654 ret = -ENOMEM; 655 spufs_inode_cache = kmem_cache_create("spufs_inode_cache", 656 sizeof(struct spufs_inode_info), 0, 657 + SLAB_HWCACHE_ALIGN, spufs_init_once); 658 659 if (!spufs_inode_cache) 660 goto out;
+1 -2
arch/sh/kernel/cpu/sh4/sq.c
··· 371 printk(KERN_NOTICE "sq: Registering store queue API.\n"); 372 373 sq_cache = kmem_cache_create("store_queue_cache", 374 - sizeof(struct sq_mapping), 0, 0, 375 - NULL, NULL); 376 if (unlikely(!sq_cache)) 377 return ret; 378
··· 371 printk(KERN_NOTICE "sq: Registering store queue API.\n"); 372 373 sq_cache = kmem_cache_create("store_queue_cache", 374 + sizeof(struct sq_mapping), 0, 0, NULL); 375 if (unlikely(!sq_cache)) 376 return ret; 377
+1 -1
arch/sh/mm/pmb.c
··· 310 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); 311 312 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, 313 - SLAB_PANIC, pmb_cache_ctor, NULL); 314 315 jump_to_P2(); 316
··· 310 BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); 311 312 pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, 313 + SLAB_PANIC, pmb_cache_ctor); 314 315 jump_to_P2(); 316
+1 -2
arch/sparc64/mm/tsb.c
··· 262 263 tsb_caches[i] = kmem_cache_create(name, 264 size, size, 265 - 0, 266 - NULL, NULL); 267 if (!tsb_caches[i]) { 268 prom_printf("Could not create %s cache\n", name); 269 prom_halt();
··· 262 263 tsb_caches[i] = kmem_cache_create(name, 264 size, size, 265 + 0, NULL); 266 if (!tsb_caches[i]) { 267 prom_printf("Could not create %s cache\n", name); 268 prom_halt();
+1 -1
block/bsg.c
··· 1043 dev_t devid; 1044 1045 bsg_cmd_cachep = kmem_cache_create("bsg_cmd", 1046 - sizeof(struct bsg_command), 0, 0, NULL, NULL); 1047 if (!bsg_cmd_cachep) { 1048 printk(KERN_ERR "bsg: failed creating slab cache\n"); 1049 return -ENOMEM;
··· 1043 dev_t devid; 1044 1045 bsg_cmd_cachep = kmem_cache_create("bsg_cmd", 1046 + sizeof(struct bsg_command), 0, 0, NULL); 1047 if (!bsg_cmd_cachep) { 1048 printk(KERN_ERR "bsg: failed creating slab cache\n"); 1049 return -ENOMEM;
+3 -3
block/ll_rw_blk.c
··· 3698 panic("Failed to create kblockd\n"); 3699 3700 request_cachep = kmem_cache_create("blkdev_requests", 3701 - sizeof(struct request), 0, SLAB_PANIC, NULL, NULL); 3702 3703 requestq_cachep = kmem_cache_create("blkdev_queue", 3704 - sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL); 3705 3706 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3707 - sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3708 3709 for_each_possible_cpu(i) 3710 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
··· 3698 panic("Failed to create kblockd\n"); 3699 3700 request_cachep = kmem_cache_create("blkdev_requests", 3701 + sizeof(struct request), 0, SLAB_PANIC, NULL); 3702 3703 requestq_cachep = kmem_cache_create("blkdev_queue", 3704 + sizeof(request_queue_t), 0, SLAB_PANIC, NULL); 3705 3706 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3707 + sizeof(struct io_context), 0, SLAB_PANIC, NULL); 3708 3709 for_each_possible_cpu(i) 3710 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+1 -1
drivers/acpi/osl.c
··· 1098 acpi_status 1099 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1100 { 1101 - *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL); 1102 if (*cache == NULL) 1103 return AE_ERROR; 1104 else
··· 1098 acpi_status 1099 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1100 { 1101 + *cache = kmem_cache_create(name, size, 0, 0, NULL); 1102 if (*cache == NULL) 1103 return AE_ERROR; 1104 else
+2 -2
drivers/block/aoe/aoeblk.c
··· 257 int __init 258 aoeblk_init(void) 259 { 260 - buf_pool_cache = kmem_cache_create("aoe_bufs", 261 sizeof(struct buf), 262 - 0, 0, NULL, NULL); 263 if (buf_pool_cache == NULL) 264 return -ENOMEM; 265
··· 257 int __init 258 aoeblk_init(void) 259 { 260 + buf_pool_cache = kmem_cache_create("aoe_bufs", 261 sizeof(struct buf), 262 + 0, 0, NULL); 263 if (buf_pool_cache == NULL) 264 return -ENOMEM; 265
+1 -1
drivers/ieee1394/eth1394.c
··· 1729 1730 packet_task_cache = kmem_cache_create("packet_task", 1731 sizeof(struct packet_task), 1732 - 0, 0, NULL, NULL); 1733 if (!packet_task_cache) 1734 return -ENOMEM; 1735
··· 1729 1730 packet_task_cache = kmem_cache_create("packet_task", 1731 sizeof(struct packet_task), 1732 + 0, 0, NULL); 1733 if (!packet_task_cache) 1734 return -ENOMEM; 1735
-1
drivers/infiniband/core/mad.c
··· 2998 sizeof(struct ib_mad_private), 2999 0, 3000 SLAB_HWCACHE_ALIGN, 3001 - NULL, 3002 NULL); 3003 if (!ib_mad_cache) { 3004 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
··· 2998 sizeof(struct ib_mad_private), 2999 0, 3000 SLAB_HWCACHE_ALIGN, 3001 NULL); 3002 if (!ib_mad_cache) { 3003 printk(KERN_ERR PFX "Couldn't create ib_mad cache\n");
+1 -1
drivers/infiniband/hw/amso1100/c2_vq.c
··· 85 (char) ('0' + c2dev->devnum)); 86 c2dev->host_msg_cache = 87 kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, 88 - SLAB_HWCACHE_ALIGN, NULL, NULL); 89 if (c2dev->host_msg_cache == NULL) { 90 return -ENOMEM; 91 }
··· 85 (char) ('0' + c2dev->devnum)); 86 c2dev->host_msg_cache = 87 kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, 88 + SLAB_HWCACHE_ALIGN, NULL); 89 if (c2dev->host_msg_cache == NULL) { 90 return -ENOMEM; 91 }
+1 -1
drivers/infiniband/hw/ehca/ehca_av.c
··· 259 av_cache = kmem_cache_create("ehca_cache_av", 260 sizeof(struct ehca_av), 0, 261 SLAB_HWCACHE_ALIGN, 262 - NULL, NULL); 263 if (!av_cache) 264 return -ENOMEM; 265 return 0;
··· 259 av_cache = kmem_cache_create("ehca_cache_av", 260 sizeof(struct ehca_av), 0, 261 SLAB_HWCACHE_ALIGN, 262 + NULL); 263 if (!av_cache) 264 return -ENOMEM; 265 return 0;
+1 -1
drivers/infiniband/hw/ehca/ehca_cq.c
··· 387 cq_cache = kmem_cache_create("ehca_cache_cq", 388 sizeof(struct ehca_cq), 0, 389 SLAB_HWCACHE_ALIGN, 390 - NULL, NULL); 391 if (!cq_cache) 392 return -ENOMEM; 393 return 0;
··· 387 cq_cache = kmem_cache_create("ehca_cache_cq", 388 sizeof(struct ehca_cq), 0, 389 SLAB_HWCACHE_ALIGN, 390 + NULL); 391 if (!cq_cache) 392 return -ENOMEM; 393 return 0;
+1 -1
drivers/infiniband/hw/ehca/ehca_main.c
··· 163 ctblk_cache = kmem_cache_create("ehca_cache_ctblk", 164 EHCA_PAGESIZE, H_CB_ALIGNMENT, 165 SLAB_HWCACHE_ALIGN, 166 - NULL, NULL); 167 if (!ctblk_cache) { 168 ehca_gen_err("Cannot create ctblk SLAB cache."); 169 ehca_cleanup_mrmw_cache();
··· 163 ctblk_cache = kmem_cache_create("ehca_cache_ctblk", 164 EHCA_PAGESIZE, H_CB_ALIGNMENT, 165 SLAB_HWCACHE_ALIGN, 166 + NULL); 167 if (!ctblk_cache) { 168 ehca_gen_err("Cannot create ctblk SLAB cache."); 169 ehca_cleanup_mrmw_cache();
+2 -2
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 1950 mr_cache = kmem_cache_create("ehca_cache_mr", 1951 sizeof(struct ehca_mr), 0, 1952 SLAB_HWCACHE_ALIGN, 1953 - NULL, NULL); 1954 if (!mr_cache) 1955 return -ENOMEM; 1956 mw_cache = kmem_cache_create("ehca_cache_mw", 1957 sizeof(struct ehca_mw), 0, 1958 SLAB_HWCACHE_ALIGN, 1959 - NULL, NULL); 1960 if (!mw_cache) { 1961 kmem_cache_destroy(mr_cache); 1962 mr_cache = NULL;
··· 1950 mr_cache = kmem_cache_create("ehca_cache_mr", 1951 sizeof(struct ehca_mr), 0, 1952 SLAB_HWCACHE_ALIGN, 1953 + NULL); 1954 if (!mr_cache) 1955 return -ENOMEM; 1956 mw_cache = kmem_cache_create("ehca_cache_mw", 1957 sizeof(struct ehca_mw), 0, 1958 SLAB_HWCACHE_ALIGN, 1959 + NULL); 1960 if (!mw_cache) { 1961 kmem_cache_destroy(mr_cache); 1962 mr_cache = NULL;
+1 -1
drivers/infiniband/hw/ehca/ehca_pd.c
··· 100 pd_cache = kmem_cache_create("ehca_cache_pd", 101 sizeof(struct ehca_pd), 0, 102 SLAB_HWCACHE_ALIGN, 103 - NULL, NULL); 104 if (!pd_cache) 105 return -ENOMEM; 106 return 0;
··· 100 pd_cache = kmem_cache_create("ehca_cache_pd", 101 sizeof(struct ehca_pd), 0, 102 SLAB_HWCACHE_ALIGN, 103 + NULL); 104 if (!pd_cache) 105 return -ENOMEM; 106 return 0;
+1 -1
drivers/infiniband/hw/ehca/ehca_qp.c
··· 1760 qp_cache = kmem_cache_create("ehca_cache_qp", 1761 sizeof(struct ehca_qp), 0, 1762 SLAB_HWCACHE_ALIGN, 1763 - NULL, NULL); 1764 if (!qp_cache) 1765 return -ENOMEM; 1766 return 0;
··· 1760 qp_cache = kmem_cache_create("ehca_cache_qp", 1761 sizeof(struct ehca_qp), 0, 1762 SLAB_HWCACHE_ALIGN, 1763 + NULL); 1764 if (!qp_cache) 1765 return -ENOMEM; 1766 return 0;
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 637 ig.desc_cache = kmem_cache_create("iser_descriptors", 638 sizeof (struct iser_desc), 639 0, SLAB_HWCACHE_ALIGN, 640 - NULL, NULL); 641 if (ig.desc_cache == NULL) 642 return -ENOMEM; 643
··· 637 ig.desc_cache = kmem_cache_create("iser_descriptors", 638 sizeof (struct iser_desc), 639 0, SLAB_HWCACHE_ALIGN, 640 + NULL); 641 if (ig.desc_cache == NULL) 642 return -ENOMEM; 643
+4 -4
drivers/kvm/mmu.c
··· 1332 { 1333 pte_chain_cache = kmem_cache_create("kvm_pte_chain", 1334 sizeof(struct kvm_pte_chain), 1335 - 0, 0, NULL, NULL); 1336 if (!pte_chain_cache) 1337 goto nomem; 1338 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", 1339 sizeof(struct kvm_rmap_desc), 1340 - 0, 0, NULL, NULL); 1341 if (!rmap_desc_cache) 1342 goto nomem; 1343 1344 mmu_page_cache = kmem_cache_create("kvm_mmu_page", 1345 PAGE_SIZE, 1346 - PAGE_SIZE, 0, NULL, NULL); 1347 if (!mmu_page_cache) 1348 goto nomem; 1349 1350 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 1351 sizeof(struct kvm_mmu_page), 1352 - 0, 0, NULL, NULL); 1353 if (!mmu_page_header_cache) 1354 goto nomem; 1355
··· 1332 { 1333 pte_chain_cache = kmem_cache_create("kvm_pte_chain", 1334 sizeof(struct kvm_pte_chain), 1335 + 0, 0, NULL); 1336 if (!pte_chain_cache) 1337 goto nomem; 1338 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc", 1339 sizeof(struct kvm_rmap_desc), 1340 + 0, 0, NULL); 1341 if (!rmap_desc_cache) 1342 goto nomem; 1343 1344 mmu_page_cache = kmem_cache_create("kvm_mmu_page", 1345 PAGE_SIZE, 1346 + PAGE_SIZE, 0, NULL); 1347 if (!mmu_page_cache) 1348 goto nomem; 1349 1350 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 1351 sizeof(struct kvm_mmu_page), 1352 + 0, 0, NULL); 1353 if (!mmu_page_header_cache) 1354 goto nomem; 1355
+2 -2
drivers/md/raid5.c
··· 951 conf->active_name = 0; 952 sc = kmem_cache_create(conf->cache_name[conf->active_name], 953 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 954 - 0, 0, NULL, NULL); 955 if (!sc) 956 return 1; 957 conf->slab_cache = sc; ··· 1003 /* Step 1 */ 1004 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1005 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1006 - 0, 0, NULL, NULL); 1007 if (!sc) 1008 return -ENOMEM; 1009
··· 951 conf->active_name = 0; 952 sc = kmem_cache_create(conf->cache_name[conf->active_name], 953 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 954 + 0, 0, NULL); 955 if (!sc) 956 return 1; 957 conf->slab_cache = sc; ··· 1003 /* Step 1 */ 1004 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1005 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1006 + 0, 0, NULL); 1007 if (!sc) 1008 return -ENOMEM; 1009
+1 -2
drivers/message/i2o/i2o_block.c
··· 1171 /* Allocate request mempool and slab */ 1172 size = sizeof(struct i2o_block_request); 1173 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, 1174 - SLAB_HWCACHE_ALIGN, NULL, 1175 - NULL); 1176 if (!i2o_blk_req_pool.slab) { 1177 osm_err("can't init request slab\n"); 1178 rc = -ENOMEM;
··· 1171 /* Allocate request mempool and slab */ 1172 size = sizeof(struct i2o_block_request); 1173 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, 1174 + SLAB_HWCACHE_ALIGN, NULL); 1175 if (!i2o_blk_req_pool.slab) { 1176 osm_err("can't init request slab\n"); 1177 rc = -ENOMEM;
+1 -1
drivers/mtd/ubi/eba.c
··· 1149 if (ubi_devices_cnt == 0) { 1150 ltree_slab = kmem_cache_create("ubi_ltree_slab", 1151 sizeof(struct ltree_entry), 0, 1152 - 0, &ltree_entry_ctor, NULL); 1153 if (!ltree_slab) 1154 return -ENOMEM; 1155 }
··· 1149 if (ubi_devices_cnt == 0) { 1150 ltree_slab = kmem_cache_create("ubi_ltree_slab", 1151 sizeof(struct ltree_entry), 0, 1152 + 0, &ltree_entry_ctor); 1153 if (!ltree_slab) 1154 return -ENOMEM; 1155 }
+1 -1
drivers/mtd/ubi/wl.c
··· 1452 if (ubi_devices_cnt == 0) { 1453 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", 1454 sizeof(struct ubi_wl_entry), 1455 - 0, 0, NULL, NULL); 1456 if (!wl_entries_slab) 1457 return -ENOMEM; 1458 }
··· 1452 if (ubi_devices_cnt == 0) { 1453 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", 1454 sizeof(struct ubi_wl_entry), 1455 + 0, 0, NULL); 1456 if (!wl_entries_slab) 1457 return -ENOMEM; 1458 }
+1 -1
drivers/s390/block/dasd_devmap.c
··· 291 dasd_page_cache = 292 kmem_cache_create("dasd_page_cache", PAGE_SIZE, 293 PAGE_SIZE, SLAB_CACHE_DMA, 294 - NULL, NULL ); 295 if (!dasd_page_cache) 296 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " 297 "fixed buffer mode disabled.");
··· 291 dasd_page_cache = 292 kmem_cache_create("dasd_page_cache", PAGE_SIZE, 293 PAGE_SIZE, SLAB_CACHE_DMA, 294 + NULL); 295 if (!dasd_page_cache) 296 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " 297 "fixed buffer mode disabled.");
+3 -3
drivers/s390/scsi/zfcp_aux.c
··· 259 size = sizeof(struct zfcp_fsf_req_qtcb); 260 align = calc_alignment(size); 261 zfcp_data.fsf_req_qtcb_cache = 262 - kmem_cache_create("zfcp_fsf", size, align, 0, NULL, NULL); 263 if (!zfcp_data.fsf_req_qtcb_cache) 264 goto out; 265 266 size = sizeof(struct fsf_status_read_buffer); 267 align = calc_alignment(size); 268 zfcp_data.sr_buffer_cache = 269 - kmem_cache_create("zfcp_sr", size, align, 0, NULL, NULL); 270 if (!zfcp_data.sr_buffer_cache) 271 goto out_sr_cache; 272 273 size = sizeof(struct zfcp_gid_pn_data); 274 align = calc_alignment(size); 275 zfcp_data.gid_pn_cache = 276 - kmem_cache_create("zfcp_gid", size, align, 0, NULL, NULL); 277 if (!zfcp_data.gid_pn_cache) 278 goto out_gid_cache; 279
··· 259 size = sizeof(struct zfcp_fsf_req_qtcb); 260 align = calc_alignment(size); 261 zfcp_data.fsf_req_qtcb_cache = 262 + kmem_cache_create("zfcp_fsf", size, align, 0, NULL); 263 if (!zfcp_data.fsf_req_qtcb_cache) 264 goto out; 265 266 size = sizeof(struct fsf_status_read_buffer); 267 align = calc_alignment(size); 268 zfcp_data.sr_buffer_cache = 269 + kmem_cache_create("zfcp_sr", size, align, 0, NULL); 270 if (!zfcp_data.sr_buffer_cache) 271 goto out_sr_cache; 272 273 size = sizeof(struct zfcp_gid_pn_data); 274 align = calc_alignment(size); 275 zfcp_data.gid_pn_cache = 276 + kmem_cache_create("zfcp_gid", size, align, 0, NULL); 277 if (!zfcp_data.gid_pn_cache) 278 goto out_gid_cache; 279
+2 -2
drivers/scsi/aic94xx/aic94xx_init.c
··· 462 sizeof(struct asd_dma_tok), 463 0, 464 SLAB_HWCACHE_ALIGN, 465 - NULL, NULL); 466 if (!asd_dma_token_cache) { 467 asd_printk("couldn't create dma token cache\n"); 468 return -ENOMEM; ··· 474 sizeof(struct asd_ascb), 475 0, 476 SLAB_HWCACHE_ALIGN, 477 - NULL, NULL); 478 if (!asd_ascb_cache) { 479 asd_printk("couldn't create ascb cache\n"); 480 goto Err;
··· 462 sizeof(struct asd_dma_tok), 463 0, 464 SLAB_HWCACHE_ALIGN, 465 + NULL); 466 if (!asd_dma_token_cache) { 467 asd_printk("couldn't create dma token cache\n"); 468 return -ENOMEM; ··· 474 sizeof(struct asd_ascb), 475 0, 476 SLAB_HWCACHE_ALIGN, 477 + NULL); 478 if (!asd_ascb_cache) { 479 asd_printk("couldn't create ascb cache\n"); 480 goto Err;
+1 -1
drivers/scsi/libsas/sas_init.c
··· 292 static int __init sas_class_init(void) 293 { 294 sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task), 295 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 296 if (!sas_task_cache) 297 return -ENOMEM; 298
··· 292 static int __init sas_class_init(void) 293 { 294 sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task), 295 + 0, SLAB_HWCACHE_ALIGN, NULL); 296 if (!sas_task_cache) 297 return -ENOMEM; 298
+1 -1
drivers/scsi/qla2xxx/qla_os.c
··· 2723 2724 /* Allocate cache for SRBs. */ 2725 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 2726 - SLAB_HWCACHE_ALIGN, NULL, NULL); 2727 if (srb_cachep == NULL) { 2728 printk(KERN_ERR 2729 "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
··· 2723 2724 /* Allocate cache for SRBs. */ 2725 srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, 2726 + SLAB_HWCACHE_ALIGN, NULL); 2727 if (srb_cachep == NULL) { 2728 printk(KERN_ERR 2729 "qla2xxx: Unable to allocate SRB cache...Failing load!\n");
+1 -1
drivers/scsi/qla4xxx/ql4_os.c
··· 1677 1678 /* Allocate cache for SRBs. */ 1679 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 1680 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1681 if (srb_cachep == NULL) { 1682 printk(KERN_ERR 1683 "%s: Unable to allocate SRB cache..."
··· 1677 1678 /* Allocate cache for SRBs. */ 1679 srb_cachep = kmem_cache_create("qla4xxx_srbs", sizeof(struct srb), 0, 1680 + SLAB_HWCACHE_ALIGN, NULL); 1681 if (srb_cachep == NULL) { 1682 printk(KERN_ERR 1683 "%s: Unable to allocate SRB cache..."
+1 -1
drivers/scsi/scsi.c
··· 288 if (!pool->users) { 289 pool->slab = kmem_cache_create(pool->name, 290 sizeof(struct scsi_cmnd), 0, 291 - pool->slab_flags, NULL, NULL); 292 if (!pool->slab) 293 goto fail; 294 }
··· 288 if (!pool->users) { 289 pool->slab = kmem_cache_create(pool->name, 290 sizeof(struct scsi_cmnd), 0, 291 + pool->slab_flags, NULL); 292 if (!pool->slab) 293 goto fail; 294 }
+2 -2
drivers/scsi/scsi_lib.c
··· 1661 1662 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1663 sizeof(struct scsi_io_context), 1664 - 0, 0, NULL, NULL); 1665 if (!scsi_io_context_cache) { 1666 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1667 return -ENOMEM; ··· 1672 int size = sgp->size * sizeof(struct scatterlist); 1673 1674 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1675 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1676 if (!sgp->slab) { 1677 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1678 sgp->name);
··· 1661 1662 scsi_io_context_cache = kmem_cache_create("scsi_io_context", 1663 sizeof(struct scsi_io_context), 1664 + 0, 0, NULL); 1665 if (!scsi_io_context_cache) { 1666 printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); 1667 return -ENOMEM; ··· 1672 int size = sgp->size * sizeof(struct scatterlist); 1673 1674 sgp->slab = kmem_cache_create(sgp->name, size, 0, 1675 + SLAB_HWCACHE_ALIGN, NULL); 1676 if (!sgp->slab) { 1677 printk(KERN_ERR "SCSI: can't init sg slab %s\n", 1678 sgp->name);
+1 -1
drivers/scsi/scsi_tgt_lib.c
··· 585 586 scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd", 587 sizeof(struct scsi_tgt_cmd), 588 - 0, 0, NULL, NULL); 589 if (!scsi_tgt_cmd_cache) 590 return -ENOMEM; 591
··· 585 586 scsi_tgt_cmd_cache = kmem_cache_create("scsi_tgt_cmd", 587 sizeof(struct scsi_tgt_cmd), 588 + 0, 0, NULL); 589 if (!scsi_tgt_cmd_cache) 590 return -ENOMEM; 591
+1 -1
drivers/usb/host/uhci-hcd.c
··· 933 } 934 935 uhci_up_cachep = kmem_cache_create("uhci_urb_priv", 936 - sizeof(struct urb_priv), 0, 0, NULL, NULL); 937 if (!uhci_up_cachep) 938 goto up_failed; 939
··· 933 } 934 935 uhci_up_cachep = kmem_cache_create("uhci_urb_priv", 936 + sizeof(struct urb_priv), 0, 0, NULL); 937 if (!uhci_up_cachep) 938 goto up_failed; 939
+1 -1
drivers/usb/mon/mon_text.c
··· 340 snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp); 341 rp->e_slab = kmem_cache_create(rp->slab_name, 342 sizeof(struct mon_event_text), sizeof(long), 0, 343 - mon_text_ctor, NULL); 344 if (rp->e_slab == NULL) { 345 rc = -ENOMEM; 346 goto err_slab;
··· 340 snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp); 341 rp->e_slab = kmem_cache_create(rp->slab_name, 342 sizeof(struct mon_event_text), sizeof(long), 0, 343 + mon_text_ctor); 344 if (rp->e_slab == NULL) { 345 rc = -ENOMEM; 346 goto err_slab;
+2 -2
fs/adfs/super.c
··· 234 235 inode_init_once(&ei->vfs_inode); 236 } 237 - 238 static int init_inodecache(void) 239 { 240 adfs_inode_cachep = kmem_cache_create("adfs_inode_cache", 241 sizeof(struct adfs_inode_info), 242 0, (SLAB_RECLAIM_ACCOUNT| 243 SLAB_MEM_SPREAD), 244 - init_once, NULL); 245 if (adfs_inode_cachep == NULL) 246 return -ENOMEM; 247 return 0;
··· 234 235 inode_init_once(&ei->vfs_inode); 236 } 237 + 238 static int init_inodecache(void) 239 { 240 adfs_inode_cachep = kmem_cache_create("adfs_inode_cache", 241 sizeof(struct adfs_inode_info), 242 0, (SLAB_RECLAIM_ACCOUNT| 243 SLAB_MEM_SPREAD), 244 + init_once); 245 if (adfs_inode_cachep == NULL) 246 return -ENOMEM; 247 return 0;
+1 -1
fs/affs/super.c
··· 99 sizeof(struct affs_inode_info), 100 0, (SLAB_RECLAIM_ACCOUNT| 101 SLAB_MEM_SPREAD), 102 - init_once, NULL); 103 if (affs_inode_cachep == NULL) 104 return -ENOMEM; 105 return 0;
··· 99 sizeof(struct affs_inode_info), 100 0, (SLAB_RECLAIM_ACCOUNT| 101 SLAB_MEM_SPREAD), 102 + init_once); 103 if (affs_inode_cachep == NULL) 104 return -ENOMEM; 105 return 0;
+1 -2
fs/afs/super.c
··· 89 sizeof(struct afs_vnode), 90 0, 91 SLAB_HWCACHE_ALIGN, 92 - afs_i_init_once, 93 - NULL); 94 if (!afs_inode_cachep) { 95 printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); 96 return ret;
··· 89 sizeof(struct afs_vnode), 90 0, 91 SLAB_HWCACHE_ALIGN, 92 + afs_i_init_once); 93 if (!afs_inode_cachep) { 94 printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); 95 return ret;
+2 -2
fs/befs/linuxvfs.c
··· 414 } 415 416 /* Initialize the inode cache. Called at fs setup. 417 - * 418 * Taken from NFS implementation by Al Viro. 419 */ 420 static int ··· 424 sizeof (struct befs_inode_info), 425 0, (SLAB_RECLAIM_ACCOUNT| 426 SLAB_MEM_SPREAD), 427 - init_once, NULL); 428 if (befs_inode_cachep == NULL) { 429 printk(KERN_ERR "befs_init_inodecache: " 430 "Couldn't initalize inode slabcache\n");
··· 414 } 415 416 /* Initialize the inode cache. Called at fs setup. 417 + * 418 * Taken from NFS implementation by Al Viro. 419 */ 420 static int ··· 424 sizeof (struct befs_inode_info), 425 0, (SLAB_RECLAIM_ACCOUNT| 426 SLAB_MEM_SPREAD), 427 + init_once); 428 if (befs_inode_cachep == NULL) { 429 printk(KERN_ERR "befs_init_inodecache: " 430 "Couldn't initalize inode slabcache\n");
+2 -2
fs/bfs/inode.c
··· 250 251 inode_init_once(&bi->vfs_inode); 252 } 253 - 254 static int init_inodecache(void) 255 { 256 bfs_inode_cachep = kmem_cache_create("bfs_inode_cache", 257 sizeof(struct bfs_inode_info), 258 0, (SLAB_RECLAIM_ACCOUNT| 259 SLAB_MEM_SPREAD), 260 - init_once, NULL); 261 if (bfs_inode_cachep == NULL) 262 return -ENOMEM; 263 return 0;
··· 250 251 inode_init_once(&bi->vfs_inode); 252 } 253 + 254 static int init_inodecache(void) 255 { 256 bfs_inode_cachep = kmem_cache_create("bfs_inode_cache", 257 sizeof(struct bfs_inode_info), 258 0, (SLAB_RECLAIM_ACCOUNT| 259 SLAB_MEM_SPREAD), 260 + init_once); 261 if (bfs_inode_cachep == NULL) 262 return -ENOMEM; 263 return 0;
+1 -1
fs/bio.c
··· 1187 1188 size = bvs->nr_vecs * sizeof(struct bio_vec); 1189 bvs->slab = kmem_cache_create(bvs->name, size, 0, 1190 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1191 } 1192 } 1193
··· 1187 1188 size = bvs->nr_vecs * sizeof(struct bio_vec); 1189 bvs->slab = kmem_cache_create(bvs->name, size, 0, 1190 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1191 } 1192 } 1193
+1 -1
fs/block_dev.c
··· 517 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 518 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 519 SLAB_MEM_SPREAD|SLAB_PANIC), 520 - init_once, NULL); 521 err = register_filesystem(&bd_type); 522 if (err) 523 panic("Cannot register bdev pseudo-fs");
··· 517 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 518 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 519 SLAB_MEM_SPREAD|SLAB_PANIC), 520 + init_once); 521 err = register_filesystem(&bd_type); 522 if (err) 523 panic("Cannot register bdev pseudo-fs");
+5 -5
fs/cifs/cifsfs.c
··· 719 sizeof (struct cifsInodeInfo), 720 0, (SLAB_RECLAIM_ACCOUNT| 721 SLAB_MEM_SPREAD), 722 - cifs_init_once, NULL); 723 if (cifs_inode_cachep == NULL) 724 return -ENOMEM; 725 ··· 748 cifs_req_cachep = kmem_cache_create("cifs_request", 749 CIFSMaxBufSize + 750 MAX_CIFS_HDR_SIZE, 0, 751 - SLAB_HWCACHE_ALIGN, NULL, NULL); 752 if (cifs_req_cachep == NULL) 753 return -ENOMEM; 754 ··· 776 alloc of large cifs buffers even when page debugging is on */ 777 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", 778 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 779 - NULL, NULL); 780 if (cifs_sm_req_cachep == NULL) { 781 mempool_destroy(cifs_req_poolp); 782 kmem_cache_destroy(cifs_req_cachep); ··· 817 { 818 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 819 sizeof (struct mid_q_entry), 0, 820 - SLAB_HWCACHE_ALIGN, NULL, NULL); 821 if (cifs_mid_cachep == NULL) 822 return -ENOMEM; 823 ··· 830 831 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs", 832 sizeof (struct oplock_q_entry), 0, 833 - SLAB_HWCACHE_ALIGN, NULL, NULL); 834 if (cifs_oplock_cachep == NULL) { 835 mempool_destroy(cifs_mid_poolp); 836 kmem_cache_destroy(cifs_mid_cachep);
··· 719 sizeof (struct cifsInodeInfo), 720 0, (SLAB_RECLAIM_ACCOUNT| 721 SLAB_MEM_SPREAD), 722 + cifs_init_once); 723 if (cifs_inode_cachep == NULL) 724 return -ENOMEM; 725 ··· 748 cifs_req_cachep = kmem_cache_create("cifs_request", 749 CIFSMaxBufSize + 750 MAX_CIFS_HDR_SIZE, 0, 751 + SLAB_HWCACHE_ALIGN, NULL); 752 if (cifs_req_cachep == NULL) 753 return -ENOMEM; 754 ··· 776 alloc of large cifs buffers even when page debugging is on */ 777 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", 778 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 779 + NULL); 780 if (cifs_sm_req_cachep == NULL) { 781 mempool_destroy(cifs_req_poolp); 782 kmem_cache_destroy(cifs_req_cachep); ··· 817 { 818 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 819 sizeof (struct mid_q_entry), 0, 820 + SLAB_HWCACHE_ALIGN, NULL); 821 if (cifs_mid_cachep == NULL) 822 return -ENOMEM; 823 ··· 830 831 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs", 832 sizeof (struct oplock_q_entry), 0, 833 + SLAB_HWCACHE_ALIGN, NULL); 834 if (cifs_oplock_cachep == NULL) { 835 mempool_destroy(cifs_mid_poolp); 836 kmem_cache_destroy(cifs_mid_cachep);
+2 -2
fs/coda/inode.c
··· 64 65 inode_init_once(&ei->vfs_inode); 66 } 67 - 68 int coda_init_inodecache(void) 69 { 70 coda_inode_cachep = kmem_cache_create("coda_inode_cache", 71 sizeof(struct coda_inode_info), 72 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 73 - init_once, NULL); 74 if (coda_inode_cachep == NULL) 75 return -ENOMEM; 76 return 0;
··· 64 65 inode_init_once(&ei->vfs_inode); 66 } 67 + 68 int coda_init_inodecache(void) 69 { 70 coda_inode_cachep = kmem_cache_create("coda_inode_cache", 71 sizeof(struct coda_inode_info), 72 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 73 + init_once); 74 if (coda_inode_cachep == NULL) 75 return -ENOMEM; 76 return 0;
+1 -1
fs/configfs/mount.c
··· 136 137 configfs_dir_cachep = kmem_cache_create("configfs_dir_cache", 138 sizeof(struct configfs_dirent), 139 - 0, 0, NULL, NULL); 140 if (!configfs_dir_cachep) 141 goto out; 142
··· 136 137 configfs_dir_cachep = kmem_cache_create("configfs_dir_cache", 138 sizeof(struct configfs_dirent), 139 + 0, 0, NULL); 140 if (!configfs_dir_cachep) 141 goto out; 142
+2 -2
fs/dcache.c
··· 2165 mempages -= reserve; 2166 2167 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 2168 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2169 2170 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 2171 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2172 2173 dcache_init(mempages); 2174 inode_init(mempages);
··· 2165 mempages -= reserve; 2166 2167 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 2168 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2169 2170 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 2171 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2172 2173 dcache_init(mempages); 2174 inode_init(mempages);
+1 -1
fs/dcookies.c
··· 205 206 dcookie_cache = kmem_cache_create("dcookie_cache", 207 sizeof(struct dcookie_struct), 208 - 0, 0, NULL, NULL); 209 210 if (!dcookie_cache) 211 goto out;
··· 205 206 dcookie_cache = kmem_cache_create("dcookie_cache", 207 sizeof(struct dcookie_struct), 208 + 0, 0, NULL); 209 210 if (!dcookie_cache) 211 goto out;
+1 -1
fs/dlm/lowcomms.c
··· 1449 error = -ENOMEM; 1450 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1451 __alignof__(struct connection), 0, 1452 - NULL, NULL); 1453 if (!con_cache) 1454 goto out; 1455
··· 1449 error = -ENOMEM; 1450 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), 1451 __alignof__(struct connection), 0, 1452 + NULL); 1453 if (!con_cache) 1454 goto out; 1455
+1 -1
fs/dlm/memory.c
··· 23 int ret = 0; 24 25 lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb), 26 - __alignof__(struct dlm_lkb), 0, NULL, NULL); 27 if (!lkb_cache) 28 ret = -ENOMEM; 29 return ret;
··· 23 int ret = 0; 24 25 lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb), 26 + __alignof__(struct dlm_lkb), 0, NULL); 27 if (!lkb_cache) 28 ret = -ENOMEM; 29 return ret;
+1 -1
fs/dnotify.c
··· 176 static int __init dnotify_init(void) 177 { 178 dn_cache = kmem_cache_create("dnotify_cache", 179 - sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL, NULL); 180 return 0; 181 } 182
··· 176 static int __init dnotify_init(void) 177 { 178 dn_cache = kmem_cache_create("dnotify_cache", 179 + sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); 180 return 0; 181 } 182
+2 -2
fs/dquot.c
··· 1848 1849 register_sysctl_table(sys_table); 1850 1851 - dquot_cachep = kmem_cache_create("dquot", 1852 sizeof(struct dquot), sizeof(unsigned long) * 4, 1853 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 1854 SLAB_MEM_SPREAD|SLAB_PANIC), 1855 - NULL, NULL); 1856 1857 order = 0; 1858 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
··· 1848 1849 register_sysctl_table(sys_table); 1850 1851 + dquot_cachep = kmem_cache_create("dquot", 1852 sizeof(struct dquot), sizeof(unsigned long) * 4, 1853 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 1854 SLAB_MEM_SPREAD|SLAB_PANIC), 1855 + NULL); 1856 1857 order = 0; 1858 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
+1 -1
fs/ecryptfs/main.c
··· 677 678 info = &ecryptfs_cache_infos[i]; 679 *(info->cache) = kmem_cache_create(info->name, info->size, 680 - 0, SLAB_HWCACHE_ALIGN, info->ctor, NULL); 681 if (!*(info->cache)) { 682 ecryptfs_free_kmem_caches(); 683 ecryptfs_printk(KERN_WARNING, "%s: "
··· 677 678 info = &ecryptfs_cache_infos[i]; 679 *(info->cache) = kmem_cache_create(info->name, info->size, 680 + 0, SLAB_HWCACHE_ALIGN, info->ctor); 681 if (!*(info->cache)) { 682 ecryptfs_free_kmem_caches(); 683 ecryptfs_printk(KERN_WARNING, "%s: "
+2 -2
fs/efs/super.c
··· 75 76 inode_init_once(&ei->vfs_inode); 77 } 78 - 79 static int init_inodecache(void) 80 { 81 efs_inode_cachep = kmem_cache_create("efs_inode_cache", 82 sizeof(struct efs_inode_info), 83 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 84 - init_once, NULL); 85 if (efs_inode_cachep == NULL) 86 return -ENOMEM; 87 return 0;
··· 75 76 inode_init_once(&ei->vfs_inode); 77 } 78 + 79 static int init_inodecache(void) 80 { 81 efs_inode_cachep = kmem_cache_create("efs_inode_cache", 82 sizeof(struct efs_inode_info), 83 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 84 + init_once); 85 if (efs_inode_cachep == NULL) 86 return -ENOMEM; 87 return 0;
+2 -2
fs/eventpoll.c
··· 1324 /* Allocates slab cache used to allocate "struct epitem" items */ 1325 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 1326 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC, 1327 - NULL, NULL); 1328 1329 /* Allocates slab cache used to allocate "struct eppoll_entry" */ 1330 pwq_cache = kmem_cache_create("eventpoll_pwq", 1331 sizeof(struct eppoll_entry), 0, 1332 - EPI_SLAB_DEBUG|SLAB_PANIC, NULL, NULL); 1333 1334 return 0; 1335 }
··· 1324 /* Allocates slab cache used to allocate "struct epitem" items */ 1325 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 1326 0, SLAB_HWCACHE_ALIGN|EPI_SLAB_DEBUG|SLAB_PANIC, 1327 + NULL); 1328 1329 /* Allocates slab cache used to allocate "struct eppoll_entry" */ 1330 pwq_cache = kmem_cache_create("eventpoll_pwq", 1331 sizeof(struct eppoll_entry), 0, 1332 + EPI_SLAB_DEBUG|SLAB_PANIC, NULL); 1333 1334 return 0; 1335 }
+2 -2
fs/ext2/super.c
··· 167 #endif 168 inode_init_once(&ei->vfs_inode); 169 } 170 - 171 static int init_inodecache(void) 172 { 173 ext2_inode_cachep = kmem_cache_create("ext2_inode_cache", 174 sizeof(struct ext2_inode_info), 175 0, (SLAB_RECLAIM_ACCOUNT| 176 SLAB_MEM_SPREAD), 177 - init_once, NULL); 178 if (ext2_inode_cachep == NULL) 179 return -ENOMEM; 180 return 0;
··· 167 #endif 168 inode_init_once(&ei->vfs_inode); 169 } 170 + 171 static int init_inodecache(void) 172 { 173 ext2_inode_cachep = kmem_cache_create("ext2_inode_cache", 174 sizeof(struct ext2_inode_info), 175 0, (SLAB_RECLAIM_ACCOUNT| 176 SLAB_MEM_SPREAD), 177 + init_once); 178 if (ext2_inode_cachep == NULL) 179 return -ENOMEM; 180 return 0;
+1 -1
fs/ext3/super.c
··· 490 sizeof(struct ext3_inode_info), 491 0, (SLAB_RECLAIM_ACCOUNT| 492 SLAB_MEM_SPREAD), 493 - init_once, NULL); 494 if (ext3_inode_cachep == NULL) 495 return -ENOMEM; 496 return 0;
··· 490 sizeof(struct ext3_inode_info), 491 0, (SLAB_RECLAIM_ACCOUNT| 492 SLAB_MEM_SPREAD), 493 + init_once); 494 if (ext3_inode_cachep == NULL) 495 return -ENOMEM; 496 return 0;
+1 -1
fs/ext4/super.c
··· 541 sizeof(struct ext4_inode_info), 542 0, (SLAB_RECLAIM_ACCOUNT| 543 SLAB_MEM_SPREAD), 544 - init_once, NULL); 545 if (ext4_inode_cachep == NULL) 546 return -ENOMEM; 547 return 0;
··· 541 sizeof(struct ext4_inode_info), 542 0, (SLAB_RECLAIM_ACCOUNT| 543 SLAB_MEM_SPREAD), 544 + init_once); 545 if (ext4_inode_cachep == NULL) 546 return -ENOMEM; 547 return 0;
+1 -1
fs/fat/cache.c
··· 48 fat_cache_cachep = kmem_cache_create("fat_cache", 49 sizeof(struct fat_cache), 50 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 51 - init_once, NULL); 52 if (fat_cache_cachep == NULL) 53 return -ENOMEM; 54 return 0;
··· 48 fat_cache_cachep = kmem_cache_create("fat_cache", 49 sizeof(struct fat_cache), 50 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 51 + init_once); 52 if (fat_cache_cachep == NULL) 53 return -ENOMEM; 54 return 0;
+1 -1
fs/fat/inode.c
··· 514 sizeof(struct msdos_inode_info), 515 0, (SLAB_RECLAIM_ACCOUNT| 516 SLAB_MEM_SPREAD), 517 - init_once, NULL); 518 if (fat_inode_cachep == NULL) 519 return -ENOMEM; 520 return 0;
··· 514 sizeof(struct msdos_inode_info), 515 0, (SLAB_RECLAIM_ACCOUNT| 516 SLAB_MEM_SPREAD), 517 + init_once); 518 if (fat_inode_cachep == NULL) 519 return -ENOMEM; 520 return 0;
+1 -1
fs/fcntl.c
··· 638 static int __init fasync_init(void) 639 { 640 fasync_cache = kmem_cache_create("fasync_cache", 641 - sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL, NULL); 642 return 0; 643 } 644
··· 638 static int __init fasync_init(void) 639 { 640 fasync_cache = kmem_cache_create("fasync_cache", 641 + sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); 642 return 0; 643 } 644
+2 -2
fs/freevxfs/vxfs_super.c
··· 263 int rv; 264 265 vxfs_inode_cachep = kmem_cache_create("vxfs_inode", 266 - sizeof(struct vxfs_inode_info), 0, 267 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); 268 if (!vxfs_inode_cachep) 269 return -ENOMEM; 270 rv = register_filesystem(&vxfs_fs_type);
··· 263 int rv; 264 265 vxfs_inode_cachep = kmem_cache_create("vxfs_inode", 266 + sizeof(struct vxfs_inode_info), 0, 267 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 268 if (!vxfs_inode_cachep) 269 return -ENOMEM; 270 rv = register_filesystem(&vxfs_fs_type);
+1 -1
fs/fuse/dev.c
··· 1044 int err = -ENOMEM; 1045 fuse_req_cachep = kmem_cache_create("fuse_request", 1046 sizeof(struct fuse_req), 1047 - 0, 0, NULL, NULL); 1048 if (!fuse_req_cachep) 1049 goto out; 1050
··· 1044 int err = -ENOMEM; 1045 fuse_req_cachep = kmem_cache_create("fuse_request", 1046 sizeof(struct fuse_req), 1047 + 0, 0, NULL); 1048 if (!fuse_req_cachep) 1049 goto out; 1050
+1 -1
fs/fuse/inode.c
··· 706 fuse_inode_cachep = kmem_cache_create("fuse_inode", 707 sizeof(struct fuse_inode), 708 0, SLAB_HWCACHE_ALIGN, 709 - fuse_inode_init_once, NULL); 710 err = -ENOMEM; 711 if (!fuse_inode_cachep) 712 goto out_unreg2;
··· 706 fuse_inode_cachep = kmem_cache_create("fuse_inode", 707 sizeof(struct fuse_inode), 708 0, SLAB_HWCACHE_ALIGN, 709 + fuse_inode_init_once); 710 err = -ENOMEM; 711 if (!fuse_inode_cachep) 712 goto out_unreg2;
+3 -3
fs/gfs2/main.c
··· 72 gfs2_glock_cachep = kmem_cache_create("gfs2_glock", 73 sizeof(struct gfs2_glock), 74 0, 0, 75 - gfs2_init_glock_once, NULL); 76 if (!gfs2_glock_cachep) 77 goto fail; 78 ··· 80 sizeof(struct gfs2_inode), 81 0, SLAB_RECLAIM_ACCOUNT| 82 SLAB_MEM_SPREAD, 83 - gfs2_init_inode_once, NULL); 84 if (!gfs2_inode_cachep) 85 goto fail; 86 87 gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata", 88 sizeof(struct gfs2_bufdata), 89 - 0, 0, NULL, NULL); 90 if (!gfs2_bufdata_cachep) 91 goto fail; 92
··· 72 gfs2_glock_cachep = kmem_cache_create("gfs2_glock", 73 sizeof(struct gfs2_glock), 74 0, 0, 75 + gfs2_init_glock_once); 76 if (!gfs2_glock_cachep) 77 goto fail; 78 ··· 80 sizeof(struct gfs2_inode), 81 0, SLAB_RECLAIM_ACCOUNT| 82 SLAB_MEM_SPREAD, 83 + gfs2_init_inode_once); 84 if (!gfs2_inode_cachep) 85 goto fail; 86 87 gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata", 88 sizeof(struct gfs2_bufdata), 89 + 0, 0, NULL); 90 if (!gfs2_bufdata_cachep) 91 goto fail; 92
+1 -1
fs/hfs/super.c
··· 443 444 hfs_inode_cachep = kmem_cache_create("hfs_inode_cache", 445 sizeof(struct hfs_inode_info), 0, SLAB_HWCACHE_ALIGN, 446 - hfs_init_once, NULL); 447 if (!hfs_inode_cachep) 448 return -ENOMEM; 449 err = register_filesystem(&hfs_fs_type);
··· 443 444 hfs_inode_cachep = kmem_cache_create("hfs_inode_cache", 445 sizeof(struct hfs_inode_info), 0, SLAB_HWCACHE_ALIGN, 446 + hfs_init_once); 447 if (!hfs_inode_cachep) 448 return -ENOMEM; 449 err = register_filesystem(&hfs_fs_type);
+1 -1
fs/hfsplus/super.c
··· 479 480 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache", 481 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN, 482 - hfsplus_init_once, NULL); 483 if (!hfsplus_inode_cachep) 484 return -ENOMEM; 485 err = register_filesystem(&hfsplus_fs_type);
··· 479 480 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache", 481 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN, 482 + hfsplus_init_once); 483 if (!hfsplus_inode_cachep) 484 return -ENOMEM; 485 err = register_filesystem(&hfsplus_fs_type);
+2 -2
fs/hpfs/super.c
··· 181 mutex_init(&ei->i_parent_mutex); 182 inode_init_once(&ei->vfs_inode); 183 } 184 - 185 static int init_inodecache(void) 186 { 187 hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache", 188 sizeof(struct hpfs_inode_info), 189 0, (SLAB_RECLAIM_ACCOUNT| 190 SLAB_MEM_SPREAD), 191 - init_once, NULL); 192 if (hpfs_inode_cachep == NULL) 193 return -ENOMEM; 194 return 0;
··· 181 mutex_init(&ei->i_parent_mutex); 182 inode_init_once(&ei->vfs_inode); 183 } 184 + 185 static int init_inodecache(void) 186 { 187 hpfs_inode_cachep = kmem_cache_create("hpfs_inode_cache", 188 sizeof(struct hpfs_inode_info), 189 0, (SLAB_RECLAIM_ACCOUNT| 190 SLAB_MEM_SPREAD), 191 + init_once); 192 if (hpfs_inode_cachep == NULL) 193 return -ENOMEM; 194 return 0;
+1 -1
fs/hugetlbfs/inode.c
··· 848 849 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 850 sizeof(struct hugetlbfs_inode_info), 851 - 0, 0, init_once, NULL); 852 if (hugetlbfs_inode_cachep == NULL) 853 return -ENOMEM; 854
··· 848 849 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", 850 sizeof(struct hugetlbfs_inode_info), 851 + 0, 0, init_once); 852 if (hugetlbfs_inode_cachep == NULL) 853 return -ENOMEM; 854
+1 -2
fs/inode.c
··· 1388 0, 1389 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1390 SLAB_MEM_SPREAD), 1391 - init_once, 1392 - NULL); 1393 register_shrinker(&icache_shrinker); 1394 1395 /* Hash may have been set up in inode_init_early */
··· 1388 0, 1389 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| 1390 SLAB_MEM_SPREAD), 1391 + init_once); 1392 register_shrinker(&icache_shrinker); 1393 1394 /* Hash may have been set up in inode_init_early */
+2 -2
fs/inotify_user.c
··· 716 717 watch_cachep = kmem_cache_create("inotify_watch_cache", 718 sizeof(struct inotify_user_watch), 719 - 0, SLAB_PANIC, NULL, NULL); 720 event_cachep = kmem_cache_create("inotify_event_cache", 721 sizeof(struct inotify_kernel_event), 722 - 0, SLAB_PANIC, NULL, NULL); 723 724 return 0; 725 }
··· 716 717 watch_cachep = kmem_cache_create("inotify_watch_cache", 718 sizeof(struct inotify_user_watch), 719 + 0, SLAB_PANIC, NULL); 720 event_cachep = kmem_cache_create("inotify_event_cache", 721 sizeof(struct inotify_kernel_event), 722 + 0, SLAB_PANIC, NULL); 723 724 return 0; 725 }
+1 -1
fs/isofs/inode.c
··· 86 sizeof(struct iso_inode_info), 87 0, (SLAB_RECLAIM_ACCOUNT| 88 SLAB_MEM_SPREAD), 89 - init_once, NULL); 90 if (isofs_inode_cachep == NULL) 91 return -ENOMEM; 92 return 0;
··· 86 sizeof(struct iso_inode_info), 87 0, (SLAB_RECLAIM_ACCOUNT| 88 SLAB_MEM_SPREAD), 89 + init_once); 90 if (isofs_inode_cachep == NULL) 91 return -ENOMEM; 92 return 0;
+3 -5
fs/jbd/journal.c
··· 1668 * boundary. 1669 */ 1670 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], 1671 - slab_size, slab_size, 0, NULL, NULL); 1672 if (!jbd_slab[i]) { 1673 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); 1674 return -ENOMEM; ··· 1711 sizeof(struct journal_head), 1712 0, /* offset */ 1713 0, /* flags */ 1714 - NULL, /* ctor */ 1715 - NULL); /* dtor */ 1716 retval = 0; 1717 if (journal_head_cache == 0) { 1718 retval = -ENOMEM; ··· 2007 sizeof(handle_t), 2008 0, /* offset */ 2009 0, /* flags */ 2010 - NULL, /* ctor */ 2011 - NULL); /* dtor */ 2012 if (jbd_handle_cache == NULL) { 2013 printk(KERN_EMERG "JBD: failed to create handle cache\n"); 2014 return -ENOMEM;
··· 1668 * boundary. 1669 */ 1670 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], 1671 + slab_size, slab_size, 0, NULL); 1672 if (!jbd_slab[i]) { 1673 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); 1674 return -ENOMEM; ··· 1711 sizeof(struct journal_head), 1712 0, /* offset */ 1713 0, /* flags */ 1714 + NULL); /* ctor */ 1715 retval = 0; 1716 if (journal_head_cache == 0) { 1717 retval = -ENOMEM; ··· 2008 sizeof(handle_t), 2009 0, /* offset */ 2010 0, /* flags */ 2011 + NULL); /* ctor */ 2012 if (jbd_handle_cache == NULL) { 2013 printk(KERN_EMERG "JBD: failed to create handle cache\n"); 2014 return -ENOMEM;
+2 -2
fs/jbd/revoke.c
··· 170 { 171 revoke_record_cache = kmem_cache_create("revoke_record", 172 sizeof(struct jbd_revoke_record_s), 173 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 174 if (revoke_record_cache == 0) 175 return -ENOMEM; 176 177 revoke_table_cache = kmem_cache_create("revoke_table", 178 sizeof(struct jbd_revoke_table_s), 179 - 0, 0, NULL, NULL); 180 if (revoke_table_cache == 0) { 181 kmem_cache_destroy(revoke_record_cache); 182 revoke_record_cache = NULL;
··· 170 { 171 revoke_record_cache = kmem_cache_create("revoke_record", 172 sizeof(struct jbd_revoke_record_s), 173 + 0, SLAB_HWCACHE_ALIGN, NULL); 174 if (revoke_record_cache == 0) 175 return -ENOMEM; 176 177 revoke_table_cache = kmem_cache_create("revoke_table", 178 sizeof(struct jbd_revoke_table_s), 179 + 0, 0, NULL); 180 if (revoke_table_cache == 0) { 181 kmem_cache_destroy(revoke_record_cache); 182 revoke_record_cache = NULL;
+3 -5
fs/jbd2/journal.c
··· 1680 * boundary. 1681 */ 1682 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], 1683 - slab_size, slab_size, 0, NULL, NULL); 1684 if (!jbd_slab[i]) { 1685 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); 1686 return -ENOMEM; ··· 1723 sizeof(struct journal_head), 1724 0, /* offset */ 1725 0, /* flags */ 1726 - NULL, /* ctor */ 1727 - NULL); /* dtor */ 1728 retval = 0; 1729 if (jbd2_journal_head_cache == 0) { 1730 retval = -ENOMEM; ··· 2005 sizeof(handle_t), 2006 0, /* offset */ 2007 0, /* flags */ 2008 - NULL, /* ctor */ 2009 - NULL); /* dtor */ 2010 if (jbd2_handle_cache == NULL) { 2011 printk(KERN_EMERG "JBD: failed to create handle cache\n"); 2012 return -ENOMEM;
··· 1680 * boundary. 1681 */ 1682 jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], 1683 + slab_size, slab_size, 0, NULL); 1684 if (!jbd_slab[i]) { 1685 printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); 1686 return -ENOMEM; ··· 1723 sizeof(struct journal_head), 1724 0, /* offset */ 1725 0, /* flags */ 1726 + NULL); /* ctor */ 1727 retval = 0; 1728 if (jbd2_journal_head_cache == 0) { 1729 retval = -ENOMEM; ··· 2006 sizeof(handle_t), 2007 0, /* offset */ 2008 0, /* flags */ 2009 + NULL); /* ctor */ 2010 if (jbd2_handle_cache == NULL) { 2011 printk(KERN_EMERG "JBD: failed to create handle cache\n"); 2012 return -ENOMEM;
+2 -2
fs/jbd2/revoke.c
··· 171 { 172 jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record", 173 sizeof(struct jbd2_revoke_record_s), 174 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 175 if (jbd2_revoke_record_cache == 0) 176 return -ENOMEM; 177 178 jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table", 179 sizeof(struct jbd2_revoke_table_s), 180 - 0, 0, NULL, NULL); 181 if (jbd2_revoke_table_cache == 0) { 182 kmem_cache_destroy(jbd2_revoke_record_cache); 183 jbd2_revoke_record_cache = NULL;
··· 171 { 172 jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record", 173 sizeof(struct jbd2_revoke_record_s), 174 + 0, SLAB_HWCACHE_ALIGN, NULL); 175 if (jbd2_revoke_record_cache == 0) 176 return -ENOMEM; 177 178 jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table", 179 sizeof(struct jbd2_revoke_table_s), 180 + 0, 0, NULL); 181 if (jbd2_revoke_table_cache == 0) { 182 kmem_cache_destroy(jbd2_revoke_record_cache); 183 jbd2_revoke_record_cache = NULL;
+9 -9
fs/jffs2/malloc.c
··· 33 { 34 full_dnode_slab = kmem_cache_create("jffs2_full_dnode", 35 sizeof(struct jffs2_full_dnode), 36 - 0, 0, NULL, NULL); 37 if (!full_dnode_slab) 38 goto err; 39 40 raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", 41 sizeof(struct jffs2_raw_dirent), 42 - 0, 0, NULL, NULL); 43 if (!raw_dirent_slab) 44 goto err; 45 46 raw_inode_slab = kmem_cache_create("jffs2_raw_inode", 47 sizeof(struct jffs2_raw_inode), 48 - 0, 0, NULL, NULL); 49 if (!raw_inode_slab) 50 goto err; 51 52 tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", 53 sizeof(struct jffs2_tmp_dnode_info), 54 - 0, 0, NULL, NULL); 55 if (!tmp_dnode_info_slab) 56 goto err; 57 58 raw_node_ref_slab = kmem_cache_create("jffs2_refblock", 59 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1), 60 - 0, 0, NULL, NULL); 61 if (!raw_node_ref_slab) 62 goto err; 63 64 node_frag_slab = kmem_cache_create("jffs2_node_frag", 65 sizeof(struct jffs2_node_frag), 66 - 0, 0, NULL, NULL); 67 if (!node_frag_slab) 68 goto err; 69 70 inode_cache_slab = kmem_cache_create("jffs2_inode_cache", 71 sizeof(struct jffs2_inode_cache), 72 - 0, 0, NULL, NULL); 73 if (!inode_cache_slab) 74 goto err; 75 76 #ifdef CONFIG_JFFS2_FS_XATTR 77 xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum", 78 sizeof(struct jffs2_xattr_datum), 79 - 0, 0, NULL, NULL); 80 if (!xattr_datum_cache) 81 goto err; 82 83 xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref", 84 sizeof(struct jffs2_xattr_ref), 85 - 0, 0, NULL, NULL); 86 if (!xattr_ref_cache) 87 goto err; 88 #endif
··· 33 { 34 full_dnode_slab = kmem_cache_create("jffs2_full_dnode", 35 sizeof(struct jffs2_full_dnode), 36 + 0, 0, NULL); 37 if (!full_dnode_slab) 38 goto err; 39 40 raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", 41 sizeof(struct jffs2_raw_dirent), 42 + 0, 0, NULL); 43 if (!raw_dirent_slab) 44 goto err; 45 46 raw_inode_slab = kmem_cache_create("jffs2_raw_inode", 47 sizeof(struct jffs2_raw_inode), 48 + 0, 0, NULL); 49 if (!raw_inode_slab) 50 goto err; 51 52 tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", 53 sizeof(struct jffs2_tmp_dnode_info), 54 + 0, 0, NULL); 55 if (!tmp_dnode_info_slab) 56 goto err; 57 58 raw_node_ref_slab = kmem_cache_create("jffs2_refblock", 59 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1), 60 + 0, 0, NULL); 61 if (!raw_node_ref_slab) 62 goto err; 63 64 node_frag_slab = kmem_cache_create("jffs2_node_frag", 65 sizeof(struct jffs2_node_frag), 66 + 0, 0, NULL); 67 if (!node_frag_slab) 68 goto err; 69 70 inode_cache_slab = kmem_cache_create("jffs2_inode_cache", 71 sizeof(struct jffs2_inode_cache), 72 + 0, 0, NULL); 73 if (!inode_cache_slab) 74 goto err; 75 76 #ifdef CONFIG_JFFS2_FS_XATTR 77 xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum", 78 sizeof(struct jffs2_xattr_datum), 79 + 0, 0, NULL); 80 if (!xattr_datum_cache) 81 goto err; 82 83 xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref", 84 sizeof(struct jffs2_xattr_ref), 85 + 0, 0, NULL); 86 if (!xattr_ref_cache) 87 goto err; 88 #endif
+1 -1
fs/jffs2/super.c
··· 192 sizeof(struct jffs2_inode_info), 193 0, (SLAB_RECLAIM_ACCOUNT| 194 SLAB_MEM_SPREAD), 195 - jffs2_i_init_once, NULL); 196 if (!jffs2_inode_cachep) { 197 printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n"); 198 return -ENOMEM;
··· 192 sizeof(struct jffs2_inode_info), 193 0, (SLAB_RECLAIM_ACCOUNT| 194 SLAB_MEM_SPREAD), 195 + jffs2_i_init_once); 196 if (!jffs2_inode_cachep) { 197 printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n"); 198 return -ENOMEM;
+1 -1
fs/jfs/jfs_metapage.c
··· 213 * Allocate the metapage structures 214 */ 215 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage), 216 - 0, 0, init_once, NULL); 217 if (metapage_cache == NULL) 218 return -ENOMEM; 219
··· 213 * Allocate the metapage structures 214 */ 215 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage), 216 + 0, 0, init_once); 217 if (metapage_cache == NULL) 218 return -ENOMEM; 219
+1 -1
fs/jfs/super.c
··· 776 jfs_inode_cachep = 777 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, 778 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 779 - init_once, NULL); 780 if (jfs_inode_cachep == NULL) 781 return -ENOMEM; 782
··· 776 jfs_inode_cachep = 777 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, 778 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 779 + init_once); 780 if (jfs_inode_cachep == NULL) 781 return -ENOMEM; 782
+1 -1
fs/locks.c
··· 2276 { 2277 filelock_cache = kmem_cache_create("file_lock_cache", 2278 sizeof(struct file_lock), 0, SLAB_PANIC, 2279 - init_once, NULL); 2280 return 0; 2281 } 2282
··· 2276 { 2277 filelock_cache = kmem_cache_create("file_lock_cache", 2278 sizeof(struct file_lock), 0, SLAB_PANIC, 2279 + init_once); 2280 return 0; 2281 } 2282
+1 -1
fs/mbcache.c
··· 292 INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); 293 } 294 cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, 295 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); 296 if (!cache->c_entry_cache) 297 goto fail; 298
··· 292 INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]); 293 } 294 cache->c_entry_cache = kmem_cache_create(name, entry_size, 0, 295 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 296 if (!cache->c_entry_cache) 297 goto fail; 298
+2 -2
fs/minix/inode.c
··· 75 76 inode_init_once(&ei->vfs_inode); 77 } 78 - 79 static int init_inodecache(void) 80 { 81 minix_inode_cachep = kmem_cache_create("minix_inode_cache", 82 sizeof(struct minix_inode_info), 83 0, (SLAB_RECLAIM_ACCOUNT| 84 SLAB_MEM_SPREAD), 85 - init_once, NULL); 86 if (minix_inode_cachep == NULL) 87 return -ENOMEM; 88 return 0;
··· 75 76 inode_init_once(&ei->vfs_inode); 77 } 78 + 79 static int init_inodecache(void) 80 { 81 minix_inode_cachep = kmem_cache_create("minix_inode_cache", 82 sizeof(struct minix_inode_info), 83 0, (SLAB_RECLAIM_ACCOUNT| 84 SLAB_MEM_SPREAD), 85 + init_once); 86 if (minix_inode_cachep == NULL) 87 return -ENOMEM; 88 return 0;
+1 -1
fs/namespace.c
··· 1801 init_rwsem(&namespace_sem); 1802 1803 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), 1804 - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL); 1805 1806 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 1807
··· 1801 init_rwsem(&namespace_sem); 1802 1803 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount), 1804 + 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 1805 1806 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 1807
+2 -2
fs/ncpfs/inode.c
··· 63 mutex_init(&ei->open_mutex); 64 inode_init_once(&ei->vfs_inode); 65 } 66 - 67 static int init_inodecache(void) 68 { 69 ncp_inode_cachep = kmem_cache_create("ncp_inode_cache", 70 sizeof(struct ncp_inode_info), 71 0, (SLAB_RECLAIM_ACCOUNT| 72 SLAB_MEM_SPREAD), 73 - init_once, NULL); 74 if (ncp_inode_cachep == NULL) 75 return -ENOMEM; 76 return 0;
··· 63 mutex_init(&ei->open_mutex); 64 inode_init_once(&ei->vfs_inode); 65 } 66 + 67 static int init_inodecache(void) 68 { 69 ncp_inode_cachep = kmem_cache_create("ncp_inode_cache", 70 sizeof(struct ncp_inode_info), 71 0, (SLAB_RECLAIM_ACCOUNT| 72 SLAB_MEM_SPREAD), 73 + init_once); 74 if (ncp_inode_cachep == NULL) 75 return -ENOMEM; 76 return 0;
+1 -1
fs/nfs/direct.c
··· 875 sizeof(struct nfs_direct_req), 876 0, (SLAB_RECLAIM_ACCOUNT| 877 SLAB_MEM_SPREAD), 878 - NULL, NULL); 879 if (nfs_direct_cachep == NULL) 880 return -ENOMEM; 881
··· 875 sizeof(struct nfs_direct_req), 876 0, (SLAB_RECLAIM_ACCOUNT| 877 SLAB_MEM_SPREAD), 878 + NULL); 879 if (nfs_direct_cachep == NULL) 880 return -ENOMEM; 881
+2 -2
fs/nfs/inode.c
··· 1165 nfsi->npages = 0; 1166 nfs4_init_once(nfsi); 1167 } 1168 - 1169 static int __init nfs_init_inodecache(void) 1170 { 1171 nfs_inode_cachep = kmem_cache_create("nfs_inode_cache", 1172 sizeof(struct nfs_inode), 1173 0, (SLAB_RECLAIM_ACCOUNT| 1174 SLAB_MEM_SPREAD), 1175 - init_once, NULL); 1176 if (nfs_inode_cachep == NULL) 1177 return -ENOMEM; 1178
··· 1165 nfsi->npages = 0; 1166 nfs4_init_once(nfsi); 1167 } 1168 + 1169 static int __init nfs_init_inodecache(void) 1170 { 1171 nfs_inode_cachep = kmem_cache_create("nfs_inode_cache", 1172 sizeof(struct nfs_inode), 1173 0, (SLAB_RECLAIM_ACCOUNT| 1174 SLAB_MEM_SPREAD), 1175 + init_once); 1176 if (nfs_inode_cachep == NULL) 1177 return -ENOMEM; 1178
+1 -1
fs/nfs/pagelist.c
··· 442 nfs_page_cachep = kmem_cache_create("nfs_page", 443 sizeof(struct nfs_page), 444 0, SLAB_HWCACHE_ALIGN, 445 - NULL, NULL); 446 if (nfs_page_cachep == NULL) 447 return -ENOMEM; 448
··· 442 nfs_page_cachep = kmem_cache_create("nfs_page", 443 sizeof(struct nfs_page), 444 0, SLAB_HWCACHE_ALIGN, 445 + NULL); 446 if (nfs_page_cachep == NULL) 447 return -ENOMEM; 448
+1 -1
fs/nfs/read.c
··· 598 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 599 sizeof(struct nfs_read_data), 600 0, SLAB_HWCACHE_ALIGN, 601 - NULL, NULL); 602 if (nfs_rdata_cachep == NULL) 603 return -ENOMEM; 604
··· 598 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 599 sizeof(struct nfs_read_data), 600 0, SLAB_HWCACHE_ALIGN, 601 + NULL); 602 if (nfs_rdata_cachep == NULL) 603 return -ENOMEM; 604
+1 -1
fs/nfs/write.c
··· 1467 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 1468 sizeof(struct nfs_write_data), 1469 0, SLAB_HWCACHE_ALIGN, 1470 - NULL, NULL); 1471 if (nfs_wdata_cachep == NULL) 1472 return -ENOMEM; 1473
··· 1467 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 1468 sizeof(struct nfs_write_data), 1469 0, SLAB_HWCACHE_ALIGN, 1470 + NULL); 1471 if (nfs_wdata_cachep == NULL) 1472 return -ENOMEM; 1473
+4 -4
fs/nfsd/nfs4state.c
··· 1032 nfsd4_init_slabs(void) 1033 { 1034 stateowner_slab = kmem_cache_create("nfsd4_stateowners", 1035 - sizeof(struct nfs4_stateowner), 0, 0, NULL, NULL); 1036 if (stateowner_slab == NULL) 1037 goto out_nomem; 1038 file_slab = kmem_cache_create("nfsd4_files", 1039 - sizeof(struct nfs4_file), 0, 0, NULL, NULL); 1040 if (file_slab == NULL) 1041 goto out_nomem; 1042 stateid_slab = kmem_cache_create("nfsd4_stateids", 1043 - sizeof(struct nfs4_stateid), 0, 0, NULL, NULL); 1044 if (stateid_slab == NULL) 1045 goto out_nomem; 1046 deleg_slab = kmem_cache_create("nfsd4_delegations", 1047 - sizeof(struct nfs4_delegation), 0, 0, NULL, NULL); 1048 if (deleg_slab == NULL) 1049 goto out_nomem; 1050 return 0;
··· 1032 nfsd4_init_slabs(void) 1033 { 1034 stateowner_slab = kmem_cache_create("nfsd4_stateowners", 1035 + sizeof(struct nfs4_stateowner), 0, 0, NULL); 1036 if (stateowner_slab == NULL) 1037 goto out_nomem; 1038 file_slab = kmem_cache_create("nfsd4_files", 1039 + sizeof(struct nfs4_file), 0, 0, NULL); 1040 if (file_slab == NULL) 1041 goto out_nomem; 1042 stateid_slab = kmem_cache_create("nfsd4_stateids", 1043 + sizeof(struct nfs4_stateid), 0, 0, NULL); 1044 if (stateid_slab == NULL) 1045 goto out_nomem; 1046 deleg_slab = kmem_cache_create("nfsd4_delegations", 1047 + sizeof(struct nfs4_delegation), 0, 0, NULL); 1048 if (deleg_slab == NULL) 1049 goto out_nomem; 1050 return 0;
+5 -5
fs/ntfs/super.c
··· 3143 3144 ntfs_index_ctx_cache = kmem_cache_create(ntfs_index_ctx_cache_name, 3145 sizeof(ntfs_index_context), 0 /* offset */, 3146 - SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */); 3147 if (!ntfs_index_ctx_cache) { 3148 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3149 ntfs_index_ctx_cache_name); ··· 3151 } 3152 ntfs_attr_ctx_cache = kmem_cache_create(ntfs_attr_ctx_cache_name, 3153 sizeof(ntfs_attr_search_ctx), 0 /* offset */, 3154 - SLAB_HWCACHE_ALIGN, NULL /* ctor */, NULL /* dtor */); 3155 if (!ntfs_attr_ctx_cache) { 3156 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3157 ntfs_attr_ctx_cache_name); ··· 3160 3161 ntfs_name_cache = kmem_cache_create(ntfs_name_cache_name, 3162 (NTFS_MAX_NAME_LEN+1) * sizeof(ntfschar), 0, 3163 - SLAB_HWCACHE_ALIGN, NULL, NULL); 3164 if (!ntfs_name_cache) { 3165 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3166 ntfs_name_cache_name); ··· 3169 3170 ntfs_inode_cache = kmem_cache_create(ntfs_inode_cache_name, 3171 sizeof(ntfs_inode), 0, 3172 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL, NULL); 3173 if (!ntfs_inode_cache) { 3174 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3175 ntfs_inode_cache_name); ··· 3179 ntfs_big_inode_cache = kmem_cache_create(ntfs_big_inode_cache_name, 3180 sizeof(big_ntfs_inode), 0, 3181 SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 3182 - ntfs_big_inode_init_once, NULL); 3183 if (!ntfs_big_inode_cache) { 3184 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3185 ntfs_big_inode_cache_name);
··· 3143 3144 ntfs_index_ctx_cache = kmem_cache_create(ntfs_index_ctx_cache_name, 3145 sizeof(ntfs_index_context), 0 /* offset */, 3146 + SLAB_HWCACHE_ALIGN, NULL /* ctor */); 3147 if (!ntfs_index_ctx_cache) { 3148 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3149 ntfs_index_ctx_cache_name); ··· 3151 } 3152 ntfs_attr_ctx_cache = kmem_cache_create(ntfs_attr_ctx_cache_name, 3153 sizeof(ntfs_attr_search_ctx), 0 /* offset */, 3154 + SLAB_HWCACHE_ALIGN, NULL /* ctor */); 3155 if (!ntfs_attr_ctx_cache) { 3156 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3157 ntfs_attr_ctx_cache_name); ··· 3160 3161 ntfs_name_cache = kmem_cache_create(ntfs_name_cache_name, 3162 (NTFS_MAX_NAME_LEN+1) * sizeof(ntfschar), 0, 3163 + SLAB_HWCACHE_ALIGN, NULL); 3164 if (!ntfs_name_cache) { 3165 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3166 ntfs_name_cache_name); ··· 3169 3170 ntfs_inode_cache = kmem_cache_create(ntfs_inode_cache_name, 3171 sizeof(ntfs_inode), 0, 3172 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); 3173 if (!ntfs_inode_cache) { 3174 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3175 ntfs_inode_cache_name); ··· 3179 ntfs_big_inode_cache = kmem_cache_create(ntfs_big_inode_cache_name, 3180 sizeof(big_ntfs_inode), 0, 3181 SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 3182 + ntfs_big_inode_init_once); 3183 if (!ntfs_big_inode_cache) { 3184 printk(KERN_CRIT "NTFS: Failed to create %s!\n", 3185 ntfs_big_inode_cache_name);
+1 -1
fs/ocfs2/dlm/dlmfs.c
··· 592 sizeof(struct dlmfs_inode_private), 593 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 594 SLAB_MEM_SPREAD), 595 - dlmfs_init_once, NULL); 596 if (!dlmfs_inode_cache) 597 return -ENOMEM; 598 cleanup_inode = 1;
··· 592 sizeof(struct dlmfs_inode_private), 593 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 594 SLAB_MEM_SPREAD), 595 + dlmfs_init_once); 596 if (!dlmfs_inode_cache) 597 return -ENOMEM; 598 cleanup_inode = 1;
+1 -1
fs/ocfs2/dlm/dlmmaster.c
··· 510 dlm_mle_cache = kmem_cache_create("dlm_mle_cache", 511 sizeof(struct dlm_master_list_entry), 512 0, SLAB_HWCACHE_ALIGN, 513 - NULL, NULL); 514 if (dlm_mle_cache == NULL) 515 return -ENOMEM; 516 return 0;
··· 510 dlm_mle_cache = kmem_cache_create("dlm_mle_cache", 511 sizeof(struct dlm_master_list_entry), 512 0, SLAB_HWCACHE_ALIGN, 513 + NULL); 514 if (dlm_mle_cache == NULL) 515 return -ENOMEM; 516 return 0;
+1 -1
fs/ocfs2/super.c
··· 984 0, 985 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 986 SLAB_MEM_SPREAD), 987 - ocfs2_inode_init_once, NULL); 988 if (!ocfs2_inode_cachep) 989 return -ENOMEM; 990
··· 984 0, 985 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 986 SLAB_MEM_SPREAD), 987 + ocfs2_inode_init_once); 988 if (!ocfs2_inode_cachep) 989 return -ENOMEM; 990
+1 -1
fs/ocfs2/uptodate.c
··· 548 { 549 ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate", 550 sizeof(struct ocfs2_meta_cache_item), 551 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 552 if (!ocfs2_uptodate_cachep) 553 return -ENOMEM; 554
··· 548 { 549 ocfs2_uptodate_cachep = kmem_cache_create("ocfs2_uptodate", 550 sizeof(struct ocfs2_meta_cache_item), 551 + 0, SLAB_HWCACHE_ALIGN, NULL); 552 if (!ocfs2_uptodate_cachep) 553 return -ENOMEM; 554
+1 -1
fs/openpromfs/inode.c
··· 431 0, 432 (SLAB_RECLAIM_ACCOUNT | 433 SLAB_MEM_SPREAD), 434 - op_inode_init_once, NULL); 435 if (!op_inode_cachep) 436 return -ENOMEM; 437
··· 431 0, 432 (SLAB_RECLAIM_ACCOUNT | 433 SLAB_MEM_SPREAD), 434 + op_inode_init_once); 435 if (!op_inode_cachep) 436 return -ENOMEM; 437
+2 -2
fs/proc/inode.c
··· 112 113 inode_init_once(&ei->vfs_inode); 114 } 115 - 116 int __init proc_init_inodecache(void) 117 { 118 proc_inode_cachep = kmem_cache_create("proc_inode_cache", 119 sizeof(struct proc_inode), 120 0, (SLAB_RECLAIM_ACCOUNT| 121 SLAB_MEM_SPREAD), 122 - init_once, NULL); 123 if (proc_inode_cachep == NULL) 124 return -ENOMEM; 125 return 0;
··· 112 113 inode_init_once(&ei->vfs_inode); 114 } 115 + 116 int __init proc_init_inodecache(void) 117 { 118 proc_inode_cachep = kmem_cache_create("proc_inode_cache", 119 sizeof(struct proc_inode), 120 0, (SLAB_RECLAIM_ACCOUNT| 121 SLAB_MEM_SPREAD), 122 + init_once); 123 if (proc_inode_cachep == NULL) 124 return -ENOMEM; 125 return 0;
+1 -1
fs/qnx4/inode.c
··· 545 sizeof(struct qnx4_inode_info), 546 0, (SLAB_RECLAIM_ACCOUNT| 547 SLAB_MEM_SPREAD), 548 - init_once, NULL); 549 if (qnx4_inode_cachep == NULL) 550 return -ENOMEM; 551 return 0;
··· 545 sizeof(struct qnx4_inode_info), 546 0, (SLAB_RECLAIM_ACCOUNT| 547 SLAB_MEM_SPREAD), 548 + init_once); 549 if (qnx4_inode_cachep == NULL) 550 return -ENOMEM; 551 return 0;
+1 -1
fs/reiserfs/super.c
··· 527 reiserfs_inode_info), 528 0, (SLAB_RECLAIM_ACCOUNT| 529 SLAB_MEM_SPREAD), 530 - init_once, NULL); 531 if (reiserfs_inode_cachep == NULL) 532 return -ENOMEM; 533 return 0;
··· 527 reiserfs_inode_info), 528 0, (SLAB_RECLAIM_ACCOUNT| 529 SLAB_MEM_SPREAD), 530 + init_once); 531 if (reiserfs_inode_cachep == NULL) 532 return -ENOMEM; 533 return 0;
+2 -2
fs/romfs/inode.c
··· 572 573 inode_init_once(&ei->vfs_inode); 574 } 575 - 576 static int init_inodecache(void) 577 { 578 romfs_inode_cachep = kmem_cache_create("romfs_inode_cache", 579 sizeof(struct romfs_inode_info), 580 0, (SLAB_RECLAIM_ACCOUNT| 581 SLAB_MEM_SPREAD), 582 - init_once, NULL); 583 if (romfs_inode_cachep == NULL) 584 return -ENOMEM; 585 return 0;
··· 572 573 inode_init_once(&ei->vfs_inode); 574 } 575 + 576 static int init_inodecache(void) 577 { 578 romfs_inode_cachep = kmem_cache_create("romfs_inode_cache", 579 sizeof(struct romfs_inode_info), 580 0, (SLAB_RECLAIM_ACCOUNT| 581 SLAB_MEM_SPREAD), 582 + init_once); 583 if (romfs_inode_cachep == NULL) 584 return -ENOMEM; 585 return 0;
+2 -2
fs/smbfs/inode.c
··· 73 74 inode_init_once(&ei->vfs_inode); 75 } 76 - 77 static int init_inodecache(void) 78 { 79 smb_inode_cachep = kmem_cache_create("smb_inode_cache", 80 sizeof(struct smb_inode_info), 81 0, (SLAB_RECLAIM_ACCOUNT| 82 SLAB_MEM_SPREAD), 83 - init_once, NULL); 84 if (smb_inode_cachep == NULL) 85 return -ENOMEM; 86 return 0;
··· 73 74 inode_init_once(&ei->vfs_inode); 75 } 76 + 77 static int init_inodecache(void) 78 { 79 smb_inode_cachep = kmem_cache_create("smb_inode_cache", 80 sizeof(struct smb_inode_info), 81 0, (SLAB_RECLAIM_ACCOUNT| 82 SLAB_MEM_SPREAD), 83 + init_once); 84 if (smb_inode_cachep == NULL) 85 return -ENOMEM; 86 return 0;
+1 -1
fs/smbfs/request.c
··· 40 req_cachep = kmem_cache_create("smb_request", 41 sizeof(struct smb_request), 0, 42 SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN, 43 - NULL, NULL); 44 if (req_cachep == NULL) 45 return -ENOMEM; 46
··· 40 req_cachep = kmem_cache_create("smb_request", 41 sizeof(struct smb_request), 0, 42 SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN, 43 + NULL); 44 if (req_cachep == NULL) 45 return -ENOMEM; 46
+1 -1
fs/sysfs/mount.c
··· 86 87 sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache", 88 sizeof(struct sysfs_dirent), 89 - 0, 0, NULL, NULL); 90 if (!sysfs_dir_cachep) 91 goto out; 92
··· 86 87 sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache", 88 sizeof(struct sysfs_dirent), 89 + 0, 0, NULL); 90 if (!sysfs_dir_cachep) 91 goto out; 92
+1 -1
fs/sysv/inode.c
··· 342 sysv_inode_cachep = kmem_cache_create("sysv_inode_cache", 343 sizeof(struct sysv_inode_info), 0, 344 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 345 - init_once, NULL); 346 if (!sysv_inode_cachep) 347 return -ENOMEM; 348 return 0;
··· 342 sysv_inode_cachep = kmem_cache_create("sysv_inode_cache", 343 sizeof(struct sysv_inode_info), 0, 344 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, 345 + init_once); 346 if (!sysv_inode_cachep) 347 return -ENOMEM; 348 return 0;
+1 -1
fs/udf/super.c
··· 149 sizeof(struct udf_inode_info), 150 0, (SLAB_RECLAIM_ACCOUNT | 151 SLAB_MEM_SPREAD), 152 - init_once, NULL); 153 if (udf_inode_cachep == NULL) 154 return -ENOMEM; 155 return 0;
··· 149 sizeof(struct udf_inode_info), 150 0, (SLAB_RECLAIM_ACCOUNT | 151 SLAB_MEM_SPREAD), 152 + init_once); 153 if (udf_inode_cachep == NULL) 154 return -ENOMEM; 155 return 0;
+2 -2
fs/ufs/super.c
··· 1240 1241 inode_init_once(&ei->vfs_inode); 1242 } 1243 - 1244 static int init_inodecache(void) 1245 { 1246 ufs_inode_cachep = kmem_cache_create("ufs_inode_cache", 1247 sizeof(struct ufs_inode_info), 1248 0, (SLAB_RECLAIM_ACCOUNT| 1249 SLAB_MEM_SPREAD), 1250 - init_once, NULL); 1251 if (ufs_inode_cachep == NULL) 1252 return -ENOMEM; 1253 return 0;
··· 1240 1241 inode_init_once(&ei->vfs_inode); 1242 } 1243 + 1244 static int init_inodecache(void) 1245 { 1246 ufs_inode_cachep = kmem_cache_create("ufs_inode_cache", 1247 sizeof(struct ufs_inode_info), 1248 0, (SLAB_RECLAIM_ACCOUNT| 1249 SLAB_MEM_SPREAD), 1250 + init_once); 1251 if (ufs_inode_cachep == NULL) 1252 return -ENOMEM; 1253 return 0;
+2 -2
fs/xfs/linux-2.6/kmem.h
··· 74 static inline kmem_zone_t * 75 kmem_zone_init(int size, char *zone_name) 76 { 77 - return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL); 78 } 79 80 static inline kmem_zone_t * 81 kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, 82 void (*construct)(void *, kmem_zone_t *, unsigned long)) 83 { 84 - return kmem_cache_create(zone_name, size, 0, flags, construct, NULL); 85 } 86 87 static inline void
··· 74 static inline kmem_zone_t * 75 kmem_zone_init(int size, char *zone_name) 76 { 77 + return kmem_cache_create(zone_name, size, 0, 0, NULL); 78 } 79 80 static inline kmem_zone_t * 81 kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, 82 void (*construct)(void *, kmem_zone_t *, unsigned long)) 83 { 84 + return kmem_cache_create(zone_name, size, 0, flags, construct); 85 } 86 87 static inline void
+1 -2
include/linux/i2o.h
··· 946 strcpy(pool->name, name); 947 948 pool->slab = 949 - kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL, 950 - NULL); 951 if (!pool->slab) 952 goto free_name; 953
··· 946 strcpy(pool->name, name); 947 948 pool->slab = 949 + kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); 950 if (!pool->slab) 951 goto free_name; 952
+1 -2
include/linux/slab.h
··· 51 52 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 53 unsigned long, 54 - void (*)(void *, struct kmem_cache *, unsigned long), 55 void (*)(void *, struct kmem_cache *, unsigned long)); 56 void kmem_cache_destroy(struct kmem_cache *); 57 int kmem_cache_shrink(struct kmem_cache *); ··· 69 */ 70 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 71 sizeof(struct __struct), __alignof__(struct __struct),\ 72 - (__flags), NULL, NULL) 73 74 /* 75 * The largest kmalloc size supported by the slab allocators is
··· 51 52 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 53 unsigned long, 54 void (*)(void *, struct kmem_cache *, unsigned long)); 55 void kmem_cache_destroy(struct kmem_cache *); 56 int kmem_cache_shrink(struct kmem_cache *); ··· 70 */ 71 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ 72 sizeof(struct __struct), __alignof__(struct __struct),\ 73 + (__flags), NULL) 74 75 /* 76 * The largest kmalloc size supported by the slab allocators is
+1 -1
ipc/mqueue.c
··· 1253 1254 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1255 sizeof(struct mqueue_inode_info), 0, 1256 - SLAB_HWCACHE_ALIGN, init_once, NULL); 1257 if (mqueue_inode_cachep == NULL) 1258 return -ENOMEM; 1259
··· 1253 1254 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", 1255 sizeof(struct mqueue_inode_info), 0, 1256 + SLAB_HWCACHE_ALIGN, init_once); 1257 if (mqueue_inode_cachep == NULL) 1258 return -ENOMEM; 1259
+9 -9
kernel/fork.c
··· 137 /* create a slab on which task_structs can be allocated */ 138 task_struct_cachep = 139 kmem_cache_create("task_struct", sizeof(struct task_struct), 140 - ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL); 141 #endif 142 143 /* ··· 1446 sighand_cachep = kmem_cache_create("sighand_cache", 1447 sizeof(struct sighand_struct), 0, 1448 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1449 - sighand_ctor, NULL); 1450 signal_cachep = kmem_cache_create("signal_cache", 1451 sizeof(struct signal_struct), 0, 1452 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1453 - files_cachep = kmem_cache_create("files_cache", 1454 sizeof(struct files_struct), 0, 1455 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1456 - fs_cachep = kmem_cache_create("fs_cache", 1457 sizeof(struct fs_struct), 0, 1458 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1459 vm_area_cachep = kmem_cache_create("vm_area_struct", 1460 sizeof(struct vm_area_struct), 0, 1461 - SLAB_PANIC, NULL, NULL); 1462 mm_cachep = kmem_cache_create("mm_struct", 1463 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1464 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1465 } 1466 1467 /*
··· 137 /* create a slab on which task_structs can be allocated */ 138 task_struct_cachep = 139 kmem_cache_create("task_struct", sizeof(struct task_struct), 140 + ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); 141 #endif 142 143 /* ··· 1446 sighand_cachep = kmem_cache_create("sighand_cache", 1447 sizeof(struct sighand_struct), 0, 1448 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1449 + sighand_ctor); 1450 signal_cachep = kmem_cache_create("signal_cache", 1451 sizeof(struct signal_struct), 0, 1452 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1453 + files_cachep = kmem_cache_create("files_cache", 1454 sizeof(struct files_struct), 0, 1455 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1456 + fs_cachep = kmem_cache_create("fs_cache", 1457 sizeof(struct fs_struct), 0, 1458 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1459 vm_area_cachep = kmem_cache_create("vm_area_struct", 1460 sizeof(struct vm_area_struct), 0, 1461 + SLAB_PANIC, NULL); 1462 mm_cachep = kmem_cache_create("mm_struct", 1463 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1464 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1465 } 1466 1467 /*
+1 -1
kernel/nsproxy.c
··· 193 static int __init nsproxy_cache_init(void) 194 { 195 nsproxy_cachep = kmem_cache_create("nsproxy", sizeof(struct nsproxy), 196 - 0, SLAB_PANIC, NULL, NULL); 197 return 0; 198 } 199
··· 193 static int __init nsproxy_cache_init(void) 194 { 195 nsproxy_cachep = kmem_cache_create("nsproxy", sizeof(struct nsproxy), 196 + 0, SLAB_PANIC, NULL); 197 return 0; 198 } 199
+1 -1
kernel/posix-timers.c
··· 241 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); 242 243 posix_timers_cache = kmem_cache_create("posix_timers_cache", 244 - sizeof (struct k_itimer), 0, 0, NULL, NULL); 245 idr_init(&posix_timers_id); 246 return 0; 247 }
··· 241 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); 242 243 posix_timers_cache = kmem_cache_create("posix_timers_cache", 244 + sizeof (struct k_itimer), 0, 0, NULL); 245 idr_init(&posix_timers_id); 246 return 0; 247 }
+1 -1
kernel/user.c
··· 208 int n; 209 210 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 211 - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 212 213 for(n = 0; n < UIDHASH_SZ; ++n) 214 INIT_LIST_HEAD(init_user_ns.uidhash_table + n);
··· 208 int n; 209 210 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 211 + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 212 213 for(n = 0; n < UIDHASH_SZ; ++n) 214 INIT_LIST_HEAD(init_user_ns.uidhash_table + n);
+1 -1
lib/idr.c
··· 590 { 591 if (!idr_layer_cache) 592 idr_layer_cache = kmem_cache_create("idr_layer_cache", 593 - sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); 594 return 0; 595 } 596
··· 590 { 591 if (!idr_layer_cache) 592 idr_layer_cache = kmem_cache_create("idr_layer_cache", 593 + sizeof(struct idr_layer), 0, 0, idr_cache_ctor); 594 return 0; 595 } 596
+1 -1
lib/radix-tree.c
··· 1021 { 1022 radix_tree_node_cachep = kmem_cache_create("radix_tree_node", 1023 sizeof(struct radix_tree_node), 0, 1024 - SLAB_PANIC, radix_tree_node_ctor, NULL); 1025 radix_tree_init_maxindex(); 1026 hotcpu_notifier(radix_tree_callback, 0); 1027 }
··· 1021 { 1022 radix_tree_node_cachep = kmem_cache_create("radix_tree_node", 1023 sizeof(struct radix_tree_node), 0, 1024 + SLAB_PANIC, radix_tree_node_ctor); 1025 radix_tree_init_maxindex(); 1026 hotcpu_notifier(radix_tree_callback, 0); 1027 }
+2 -2
mm/mempolicy.c
··· 1605 1606 policy_cache = kmem_cache_create("numa_policy", 1607 sizeof(struct mempolicy), 1608 - 0, SLAB_PANIC, NULL, NULL); 1609 1610 sn_cache = kmem_cache_create("shared_policy_node", 1611 sizeof(struct sp_node), 1612 - 0, SLAB_PANIC, NULL, NULL); 1613 1614 /* 1615 * Set interleaving policy for system init. Interleaving is only
··· 1605 1606 policy_cache = kmem_cache_create("numa_policy", 1607 sizeof(struct mempolicy), 1608 + 0, SLAB_PANIC, NULL); 1609 1610 sn_cache = kmem_cache_create("shared_policy_node", 1611 sizeof(struct sp_node), 1612 + 0, SLAB_PANIC, NULL); 1613 1614 /* 1615 * Set interleaving policy for system init. Interleaving is only
+1 -1
mm/rmap.c
··· 149 void __init anon_vma_init(void) 150 { 151 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 152 - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); 153 } 154 155 /*
··· 149 void __init anon_vma_init(void) 150 { 151 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 152 + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 153 } 154 155 /*
+1 -1
mm/shmem.c
··· 2322 { 2323 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2324 sizeof(struct shmem_inode_info), 2325 - 0, 0, init_once, NULL); 2326 if (shmem_inode_cachep == NULL) 2327 return -ENOMEM; 2328 return 0;
··· 2322 { 2323 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2324 sizeof(struct shmem_inode_info), 2325 + 0, 0, init_once); 2326 if (shmem_inode_cachep == NULL) 2327 return -ENOMEM; 2328 return 0;
+7 -10
mm/slab.c
··· 1484 sizes[INDEX_AC].cs_size, 1485 ARCH_KMALLOC_MINALIGN, 1486 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1487 - NULL, NULL); 1488 1489 if (INDEX_AC != INDEX_L3) { 1490 sizes[INDEX_L3].cs_cachep = ··· 1492 sizes[INDEX_L3].cs_size, 1493 ARCH_KMALLOC_MINALIGN, 1494 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1495 - NULL, NULL); 1496 } 1497 1498 slab_early_init = 0; ··· 1510 sizes->cs_size, 1511 ARCH_KMALLOC_MINALIGN, 1512 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1513 - NULL, NULL); 1514 } 1515 #ifdef CONFIG_ZONE_DMA 1516 sizes->cs_dmacachep = kmem_cache_create( ··· 1519 ARCH_KMALLOC_MINALIGN, 1520 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1521 SLAB_PANIC, 1522 - NULL, NULL); 1523 #endif 1524 sizes++; 1525 names++; ··· 2101 * @align: The required alignment for the objects. 2102 * @flags: SLAB flags 2103 * @ctor: A constructor for the objects. 2104 - * @dtor: A destructor for the objects (not implemented anymore). 2105 * 2106 * Returns a ptr to the cache on success, NULL on failure. 2107 * Cannot be called within a int, but can be interrupted. 2108 - * The @ctor is run when new pages are allocated by the cache 2109 - * and the @dtor is run before the pages are handed back. 2110 * 2111 * @name must be valid until the cache is destroyed. This implies that 2112 * the module calling this has to destroy the cache before getting unloaded. ··· 2124 struct kmem_cache * 2125 kmem_cache_create (const char *name, size_t size, size_t align, 2126 unsigned long flags, 2127 - void (*ctor)(void*, struct kmem_cache *, unsigned long), 2128 - void (*dtor)(void*, struct kmem_cache *, unsigned long)) 2129 { 2130 size_t left_over, slab_size, ralign; 2131 struct kmem_cache *cachep = NULL, *pc; ··· 2133 * Sanity checks... these are all serious usage bugs. 2134 */ 2135 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2136 - size > KMALLOC_MAX_SIZE || dtor) { 2137 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2138 name); 2139 BUG();
··· 1484 sizes[INDEX_AC].cs_size, 1485 ARCH_KMALLOC_MINALIGN, 1486 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1487 + NULL); 1488 1489 if (INDEX_AC != INDEX_L3) { 1490 sizes[INDEX_L3].cs_cachep = ··· 1492 sizes[INDEX_L3].cs_size, 1493 ARCH_KMALLOC_MINALIGN, 1494 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1495 + NULL); 1496 } 1497 1498 slab_early_init = 0; ··· 1510 sizes->cs_size, 1511 ARCH_KMALLOC_MINALIGN, 1512 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1513 + NULL); 1514 } 1515 #ifdef CONFIG_ZONE_DMA 1516 sizes->cs_dmacachep = kmem_cache_create( ··· 1519 ARCH_KMALLOC_MINALIGN, 1520 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| 1521 SLAB_PANIC, 1522 + NULL); 1523 #endif 1524 sizes++; 1525 names++; ··· 2101 * @align: The required alignment for the objects. 2102 * @flags: SLAB flags 2103 * @ctor: A constructor for the objects. 2104 * 2105 * Returns a ptr to the cache on success, NULL on failure. 2106 * Cannot be called within a int, but can be interrupted. 2107 + * The @ctor is run when new pages are allocated by the cache. 2108 * 2109 * @name must be valid until the cache is destroyed. This implies that 2110 * the module calling this has to destroy the cache before getting unloaded. ··· 2126 struct kmem_cache * 2127 kmem_cache_create (const char *name, size_t size, size_t align, 2128 unsigned long flags, 2129 + void (*ctor)(void*, struct kmem_cache *, unsigned long)) 2130 { 2131 size_t left_over, slab_size, ralign; 2132 struct kmem_cache *cachep = NULL, *pc; ··· 2136 * Sanity checks... these are all serious usage bugs. 2137 */ 2138 if (!name || in_interrupt() || (size < BYTES_PER_WORD) || 2139 + size > KMALLOC_MAX_SIZE) { 2140 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__, 2141 name); 2142 BUG();
+1 -2
mm/slob.c
··· 492 493 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 494 size_t align, unsigned long flags, 495 - void (*ctor)(void*, struct kmem_cache *, unsigned long), 496 - void (*dtor)(void*, struct kmem_cache *, unsigned long)) 497 { 498 struct kmem_cache *c; 499
··· 492 493 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 494 size_t align, unsigned long flags, 495 + void (*ctor)(void*, struct kmem_cache *, unsigned long)) 496 { 497 struct kmem_cache *c; 498
+1 -3
mm/slub.c
··· 2668 2669 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 2670 size_t align, unsigned long flags, 2671 - void (*ctor)(void *, struct kmem_cache *, unsigned long), 2672 - void (*dtor)(void *, struct kmem_cache *, unsigned long)) 2673 { 2674 struct kmem_cache *s; 2675 2676 - BUG_ON(dtor); 2677 down_write(&slub_lock); 2678 s = find_mergeable(size, align, flags, ctor); 2679 if (s) {
··· 2668 2669 struct kmem_cache *kmem_cache_create(const char *name, size_t size, 2670 size_t align, unsigned long flags, 2671 + void (*ctor)(void *, struct kmem_cache *, unsigned long)) 2672 { 2673 struct kmem_cache *s; 2674 2675 down_write(&slub_lock); 2676 s = find_mergeable(size, align, flags, ctor); 2677 if (s) {
+1 -1
net/bridge/br_fdb.c
··· 36 br_fdb_cache = kmem_cache_create("bridge_fdb_cache", 37 sizeof(struct net_bridge_fdb_entry), 38 0, 39 - SLAB_HWCACHE_ALIGN, NULL, NULL); 40 if (!br_fdb_cache) 41 return -ENOMEM; 42
··· 36 br_fdb_cache = kmem_cache_create("bridge_fdb_cache", 37 sizeof(struct net_bridge_fdb_entry), 38 0, 39 + SLAB_HWCACHE_ALIGN, NULL); 40 if (!br_fdb_cache) 41 return -ENOMEM; 42
+1 -1
net/core/flow.c
··· 350 flow_cachep = kmem_cache_create("flow_cache", 351 sizeof(struct flow_cache_entry), 352 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 353 - NULL, NULL); 354 flow_hash_shift = 10; 355 flow_lwm = 2 * flow_hash_size; 356 flow_hwm = 4 * flow_hash_size;
··· 350 flow_cachep = kmem_cache_create("flow_cache", 351 sizeof(struct flow_cache_entry), 352 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 353 + NULL); 354 flow_hash_shift = 10; 355 flow_lwm = 2 * flow_hash_size; 356 flow_hwm = 4 * flow_hash_size;
+1 -1
net/core/neighbour.c
··· 1347 tbl->kmem_cachep = 1348 kmem_cache_create(tbl->id, tbl->entry_size, 0, 1349 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1350 - NULL, NULL); 1351 tbl->stats = alloc_percpu(struct neigh_statistics); 1352 if (!tbl->stats) 1353 panic("cannot create neighbour cache statistics");
··· 1347 tbl->kmem_cachep = 1348 kmem_cache_create(tbl->id, tbl->entry_size, 0, 1349 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1350 + NULL); 1351 tbl->stats = alloc_percpu(struct neigh_statistics); 1352 if (!tbl->stats) 1353 panic("cannot create neighbour cache statistics");
+2 -2
net/core/skbuff.c
··· 2021 sizeof(struct sk_buff), 2022 0, 2023 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2024 - NULL, NULL); 2025 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2026 (2*sizeof(struct sk_buff)) + 2027 sizeof(atomic_t), 2028 0, 2029 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2030 - NULL, NULL); 2031 } 2032 2033 /**
··· 2021 sizeof(struct sk_buff), 2022 0, 2023 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2024 + NULL); 2025 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2026 (2*sizeof(struct sk_buff)) + 2027 sizeof(atomic_t), 2028 0, 2029 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2030 + NULL); 2031 } 2032 2033 /**
+3 -3
net/core/sock.c
··· 1767 1768 if (alloc_slab) { 1769 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 1770 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1771 1772 if (prot->slab == NULL) { 1773 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", ··· 1785 sprintf(request_sock_slab_name, mask, prot->name); 1786 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name, 1787 prot->rsk_prot->obj_size, 0, 1788 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1789 1790 if (prot->rsk_prot->slab == NULL) { 1791 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", ··· 1807 kmem_cache_create(timewait_sock_slab_name, 1808 prot->twsk_prot->twsk_obj_size, 1809 0, SLAB_HWCACHE_ALIGN, 1810 - NULL, NULL); 1811 if (prot->twsk_prot->twsk_slab == NULL) 1812 goto out_free_timewait_sock_slab_name; 1813 }
··· 1767 1768 if (alloc_slab) { 1769 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 1770 + SLAB_HWCACHE_ALIGN, NULL); 1771 1772 if (prot->slab == NULL) { 1773 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", ··· 1785 sprintf(request_sock_slab_name, mask, prot->name); 1786 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name, 1787 prot->rsk_prot->obj_size, 0, 1788 + SLAB_HWCACHE_ALIGN, NULL); 1789 1790 if (prot->rsk_prot->slab == NULL) { 1791 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", ··· 1807 kmem_cache_create(timewait_sock_slab_name, 1808 prot->twsk_prot->twsk_obj_size, 1809 0, SLAB_HWCACHE_ALIGN, 1810 + NULL); 1811 if (prot->twsk_prot->twsk_slab == NULL) 1812 goto out_free_timewait_sock_slab_name; 1813 }
+2 -2
net/dccp/ackvec.c
··· 481 { 482 dccp_ackvec_slab = kmem_cache_create("dccp_ackvec", 483 sizeof(struct dccp_ackvec), 0, 484 - SLAB_HWCACHE_ALIGN, NULL, NULL); 485 if (dccp_ackvec_slab == NULL) 486 goto out_err; 487 488 dccp_ackvec_record_slab = 489 kmem_cache_create("dccp_ackvec_record", 490 sizeof(struct dccp_ackvec_record), 491 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 492 if (dccp_ackvec_record_slab == NULL) 493 goto out_destroy_slab; 494
··· 481 { 482 dccp_ackvec_slab = kmem_cache_create("dccp_ackvec", 483 sizeof(struct dccp_ackvec), 0, 484 + SLAB_HWCACHE_ALIGN, NULL); 485 if (dccp_ackvec_slab == NULL) 486 goto out_err; 487 488 dccp_ackvec_record_slab = 489 kmem_cache_create("dccp_ackvec_record", 490 sizeof(struct dccp_ackvec_record), 491 + 0, SLAB_HWCACHE_ALIGN, NULL); 492 if (dccp_ackvec_record_slab == NULL) 493 goto out_destroy_slab; 494
+1 -1
net/dccp/ccid.c
··· 69 if (slab_name == NULL) 70 return NULL; 71 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0, 72 - SLAB_HWCACHE_ALIGN, NULL, NULL); 73 if (slab == NULL) 74 kfree(slab_name); 75 return slab;
··· 69 if (slab_name == NULL) 70 return NULL; 71 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0, 72 + SLAB_HWCACHE_ALIGN, NULL); 73 if (slab == NULL) 74 kfree(slab_name); 75 return slab;
+1 -1
net/dccp/ccids/lib/loss_interval.c
··· 282 { 283 dccp_li_cachep = kmem_cache_create("dccp_li_hist", 284 sizeof(struct dccp_li_hist_entry), 285 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 286 return dccp_li_cachep == NULL ? -ENOBUFS : 0; 287 } 288
··· 282 { 283 dccp_li_cachep = kmem_cache_create("dccp_li_hist", 284 sizeof(struct dccp_li_hist_entry), 285 + 0, SLAB_HWCACHE_ALIGN, NULL); 286 return dccp_li_cachep == NULL ? -ENOBUFS : 0; 287 } 288
+2 -2
net/dccp/ccids/lib/packet_history.c
··· 59 hist->dccptxh_slab = kmem_cache_create(slab_name, 60 sizeof(struct dccp_tx_hist_entry), 61 0, SLAB_HWCACHE_ALIGN, 62 - NULL, NULL); 63 if (hist->dccptxh_slab == NULL) 64 goto out_free_slab_name; 65 out: ··· 148 hist->dccprxh_slab = kmem_cache_create(slab_name, 149 sizeof(struct dccp_rx_hist_entry), 150 0, SLAB_HWCACHE_ALIGN, 151 - NULL, NULL); 152 if (hist->dccprxh_slab == NULL) 153 goto out_free_slab_name; 154 out:
··· 59 hist->dccptxh_slab = kmem_cache_create(slab_name, 60 sizeof(struct dccp_tx_hist_entry), 61 0, SLAB_HWCACHE_ALIGN, 62 + NULL); 63 if (hist->dccptxh_slab == NULL) 64 goto out_free_slab_name; 65 out: ··· 148 hist->dccprxh_slab = kmem_cache_create(slab_name, 149 sizeof(struct dccp_rx_hist_entry), 150 0, SLAB_HWCACHE_ALIGN, 151 + NULL); 152 if (hist->dccprxh_slab == NULL) 153 goto out_free_slab_name; 154 out:
+1 -1
net/dccp/proto.c
··· 1003 dccp_hashinfo.bind_bucket_cachep = 1004 kmem_cache_create("dccp_bind_bucket", 1005 sizeof(struct inet_bind_bucket), 0, 1006 - SLAB_HWCACHE_ALIGN, NULL, NULL); 1007 if (!dccp_hashinfo.bind_bucket_cachep) 1008 goto out; 1009
··· 1003 dccp_hashinfo.bind_bucket_cachep = 1004 kmem_cache_create("dccp_bind_bucket", 1005 sizeof(struct inet_bind_bucket), 0, 1006 + SLAB_HWCACHE_ALIGN, NULL); 1007 if (!dccp_hashinfo.bind_bucket_cachep) 1008 goto out; 1009
+1 -1
net/decnet/dn_route.c
··· 1770 1771 dn_dst_ops.kmem_cachep = 1772 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, 1773 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1774 init_timer(&dn_route_timer); 1775 dn_route_timer.function = dn_dst_check_expire; 1776 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
··· 1770 1771 dn_dst_ops.kmem_cachep = 1772 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, 1773 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1774 init_timer(&dn_route_timer); 1775 dn_route_timer.function = dn_dst_check_expire; 1776 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
+1 -1
net/decnet/dn_table.c
··· 881 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache", 882 sizeof(struct dn_fib_info), 883 0, SLAB_HWCACHE_ALIGN, 884 - NULL, NULL); 885 } 886 887 void __exit dn_fib_table_cleanup(void)
··· 881 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache", 882 sizeof(struct dn_fib_info), 883 0, SLAB_HWCACHE_ALIGN, 884 + NULL); 885 } 886 887 void __exit dn_fib_table_cleanup(void)
+2 -2
net/ipv4/fib_hash.c
··· 771 fn_hash_kmem = kmem_cache_create("ip_fib_hash", 772 sizeof(struct fib_node), 773 0, SLAB_HWCACHE_ALIGN, 774 - NULL, NULL); 775 776 if (fn_alias_kmem == NULL) 777 fn_alias_kmem = kmem_cache_create("ip_fib_alias", 778 sizeof(struct fib_alias), 779 0, SLAB_HWCACHE_ALIGN, 780 - NULL, NULL); 781 782 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash), 783 GFP_KERNEL);
··· 771 fn_hash_kmem = kmem_cache_create("ip_fib_hash", 772 sizeof(struct fib_node), 773 0, SLAB_HWCACHE_ALIGN, 774 + NULL); 775 776 if (fn_alias_kmem == NULL) 777 fn_alias_kmem = kmem_cache_create("ip_fib_alias", 778 sizeof(struct fib_alias), 779 0, SLAB_HWCACHE_ALIGN, 780 + NULL); 781 782 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash), 783 GFP_KERNEL);
+1 -1
net/ipv4/fib_trie.c
··· 1970 fn_alias_kmem = kmem_cache_create("ip_fib_alias", 1971 sizeof(struct fib_alias), 1972 0, SLAB_HWCACHE_ALIGN, 1973 - NULL, NULL); 1974 1975 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie), 1976 GFP_KERNEL);
··· 1970 fn_alias_kmem = kmem_cache_create("ip_fib_alias", 1971 sizeof(struct fib_alias), 1972 0, SLAB_HWCACHE_ALIGN, 1973 + NULL); 1974 1975 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie), 1976 GFP_KERNEL);
+1 -1
net/ipv4/inetpeer.c
··· 123 peer_cachep = kmem_cache_create("inet_peer_cache", 124 sizeof(struct inet_peer), 125 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 126 - NULL, NULL); 127 128 /* All the timers, started at system startup tend 129 to synchronize. Perturb it a bit.
··· 123 peer_cachep = kmem_cache_create("inet_peer_cache", 124 sizeof(struct inet_peer), 125 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 126 + NULL); 127 128 /* All the timers, started at system startup tend 129 to synchronize. Perturb it a bit.
+1 -1
net/ipv4/ipmr.c
··· 1917 mrt_cachep = kmem_cache_create("ip_mrt_cache", 1918 sizeof(struct mfc_cache), 1919 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1920 - NULL, NULL); 1921 init_timer(&ipmr_expire_timer); 1922 ipmr_expire_timer.function=ipmr_expire_process; 1923 register_netdevice_notifier(&ip_mr_notifier);
··· 1917 mrt_cachep = kmem_cache_create("ip_mrt_cache", 1918 sizeof(struct mfc_cache), 1919 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1920 + NULL); 1921 init_timer(&ipmr_expire_timer); 1922 ipmr_expire_timer.function=ipmr_expire_process; 1923 register_netdevice_notifier(&ip_mr_notifier);
+1 -1
net/ipv4/ipvs/ip_vs_conn.c
··· 901 /* Allocate ip_vs_conn slab cache */ 902 ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn", 903 sizeof(struct ip_vs_conn), 0, 904 - SLAB_HWCACHE_ALIGN, NULL, NULL); 905 if (!ip_vs_conn_cachep) { 906 vfree(ip_vs_conn_tab); 907 return -ENOMEM;
··· 901 /* Allocate ip_vs_conn slab cache */ 902 ip_vs_conn_cachep = kmem_cache_create("ip_vs_conn", 903 sizeof(struct ip_vs_conn), 0, 904 + SLAB_HWCACHE_ALIGN, NULL); 905 if (!ip_vs_conn_cachep) { 906 vfree(ip_vs_conn_tab); 907 return -ENOMEM;
+1 -1
net/ipv4/route.c
··· 2967 2968 ipv4_dst_ops.kmem_cachep = 2969 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0, 2970 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2971 2972 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; 2973
··· 2967 2968 ipv4_dst_ops.kmem_cachep = 2969 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0, 2970 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2971 2972 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep; 2973
+1 -1
net/ipv4/tcp.c
··· 2430 tcp_hashinfo.bind_bucket_cachep = 2431 kmem_cache_create("tcp_bind_bucket", 2432 sizeof(struct inet_bind_bucket), 0, 2433 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2434 2435 /* Size and allocate the main established and bind bucket 2436 * hash tables.
··· 2430 tcp_hashinfo.bind_bucket_cachep = 2431 kmem_cache_create("tcp_bind_bucket", 2432 sizeof(struct inet_bind_bucket), 0, 2433 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2434 2435 /* Size and allocate the main established and bind bucket 2436 * hash tables.
+1 -1
net/ipv6/ip6_fib.c
··· 1474 fib6_node_kmem = kmem_cache_create("fib6_nodes", 1475 sizeof(struct fib6_node), 1476 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1477 - NULL, NULL); 1478 1479 fib6_tables_init(); 1480
··· 1474 fib6_node_kmem = kmem_cache_create("fib6_nodes", 1475 sizeof(struct fib6_node), 1476 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1477 + NULL); 1478 1479 fib6_tables_init(); 1480
+1 -1
net/ipv6/route.c
··· 2555 #endif 2556 ip6_dst_ops.kmem_cachep = 2557 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2558 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 2559 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep; 2560 2561 fib6_init();
··· 2555 #endif 2556 ip6_dst_ops.kmem_cachep = 2557 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, 2558 + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2559 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep; 2560 2561 fib6_init();
+1 -1
net/ipv6/xfrm6_tunnel.c
··· 84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", 85 sizeof(struct xfrm6_tunnel_spi), 86 0, SLAB_HWCACHE_ALIGN, 87 - NULL, NULL); 88 if (!xfrm6_tunnel_spi_kmem) 89 return -ENOMEM; 90
··· 84 xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", 85 sizeof(struct xfrm6_tunnel_spi), 86 0, SLAB_HWCACHE_ALIGN, 87 + NULL); 88 if (!xfrm6_tunnel_spi_kmem) 89 return -ENOMEM; 90
+1 -1
net/netfilter/nf_conntrack_core.c
··· 1108 1109 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1110 sizeof(struct nf_conn), 1111 - 0, 0, NULL, NULL); 1112 if (!nf_conntrack_cachep) { 1113 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1114 goto err_free_hash;
··· 1108 1109 nf_conntrack_cachep = kmem_cache_create("nf_conntrack", 1110 sizeof(struct nf_conn), 1111 + 0, 0, NULL); 1112 if (!nf_conntrack_cachep) { 1113 printk(KERN_ERR "Unable to create nf_conn slab cache\n"); 1114 goto err_free_hash;
+1 -1
net/netfilter/nf_conntrack_expect.c
··· 540 541 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect", 542 sizeof(struct nf_conntrack_expect), 543 - 0, 0, NULL, NULL); 544 if (!nf_ct_expect_cachep) 545 goto err2; 546
··· 540 541 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect", 542 sizeof(struct nf_conntrack_expect), 543 + 0, 0, NULL); 544 if (!nf_ct_expect_cachep) 545 goto err2; 546
+1 -1
net/netfilter/xt_hashlimit.c
··· 738 err = -ENOMEM; 739 hashlimit_cachep = kmem_cache_create("xt_hashlimit", 740 sizeof(struct dsthash_ent), 0, 0, 741 - NULL, NULL); 742 if (!hashlimit_cachep) { 743 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); 744 goto err2;
··· 738 err = -ENOMEM; 739 hashlimit_cachep = kmem_cache_create("xt_hashlimit", 740 sizeof(struct dsthash_ent), 0, 0, 741 + NULL); 742 if (!hashlimit_cachep) { 743 printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n"); 744 goto err2;
+1 -1
net/rxrpc/af_rxrpc.c
··· 792 ret = -ENOMEM; 793 rxrpc_call_jar = kmem_cache_create( 794 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, 795 - SLAB_HWCACHE_ALIGN, NULL, NULL); 796 if (!rxrpc_call_jar) { 797 printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); 798 goto error_call_jar;
··· 792 ret = -ENOMEM; 793 rxrpc_call_jar = kmem_cache_create( 794 "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, 795 + SLAB_HWCACHE_ALIGN, NULL); 796 if (!rxrpc_call_jar) { 797 printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n"); 798 goto error_call_jar;
+2 -2
net/sctp/protocol.c
··· 980 sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", 981 sizeof(struct sctp_bind_bucket), 982 0, SLAB_HWCACHE_ALIGN, 983 - NULL, NULL); 984 if (!sctp_bucket_cachep) 985 goto out; 986 987 sctp_chunk_cachep = kmem_cache_create("sctp_chunk", 988 sizeof(struct sctp_chunk), 989 0, SLAB_HWCACHE_ALIGN, 990 - NULL, NULL); 991 if (!sctp_chunk_cachep) 992 goto err_chunk_cachep; 993
··· 980 sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", 981 sizeof(struct sctp_bind_bucket), 982 0, SLAB_HWCACHE_ALIGN, 983 + NULL); 984 if (!sctp_bucket_cachep) 985 goto out; 986 987 sctp_chunk_cachep = kmem_cache_create("sctp_chunk", 988 sizeof(struct sctp_chunk), 989 0, SLAB_HWCACHE_ALIGN, 990 + NULL); 991 if (!sctp_chunk_cachep) 992 goto err_chunk_cachep; 993
+1 -2
net/socket.c
··· 272 (SLAB_HWCACHE_ALIGN | 273 SLAB_RECLAIM_ACCOUNT | 274 SLAB_MEM_SPREAD), 275 - init_once, 276 - NULL); 277 if (sock_inode_cachep == NULL) 278 return -ENOMEM; 279 return 0;
··· 272 (SLAB_HWCACHE_ALIGN | 273 SLAB_RECLAIM_ACCOUNT | 274 SLAB_MEM_SPREAD), 275 + init_once); 276 if (sock_inode_cachep == NULL) 277 return -ENOMEM; 278 return 0;
+1 -1
net/sunrpc/rpc_pipe.c
··· 867 sizeof(struct rpc_inode), 868 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 869 SLAB_MEM_SPREAD), 870 - init_once, NULL); 871 if (!rpc_inode_cachep) 872 return -ENOMEM; 873 err = register_filesystem(&rpc_pipe_fs_type);
··· 867 sizeof(struct rpc_inode), 868 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| 869 SLAB_MEM_SPREAD), 870 + init_once); 871 if (!rpc_inode_cachep) 872 return -ENOMEM; 873 err = register_filesystem(&rpc_pipe_fs_type);
+2 -2
net/sunrpc/sched.c
··· 1031 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1032 sizeof(struct rpc_task), 1033 0, SLAB_HWCACHE_ALIGN, 1034 - NULL, NULL); 1035 if (!rpc_task_slabp) 1036 goto err_nomem; 1037 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1038 RPC_BUFFER_MAXSIZE, 1039 0, SLAB_HWCACHE_ALIGN, 1040 - NULL, NULL); 1041 if (!rpc_buffer_slabp) 1042 goto err_nomem; 1043 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
··· 1031 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1032 sizeof(struct rpc_task), 1033 0, SLAB_HWCACHE_ALIGN, 1034 + NULL); 1035 if (!rpc_task_slabp) 1036 goto err_nomem; 1037 rpc_buffer_slabp = kmem_cache_create("rpc_buffers", 1038 RPC_BUFFER_MAXSIZE, 1039 0, SLAB_HWCACHE_ALIGN, 1040 + NULL); 1041 if (!rpc_buffer_slabp) 1042 goto err_nomem; 1043 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
+1 -1
net/tipc/handler.c
··· 97 { 98 tipc_queue_item_cache = 99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item), 100 - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); 101 if (!tipc_queue_item_cache) 102 return -ENOMEM; 103
··· 97 { 98 tipc_queue_item_cache = 99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item), 100 + 0, SLAB_HWCACHE_ALIGN, NULL); 101 if (!tipc_queue_item_cache) 102 return -ENOMEM; 103
+1 -1
net/xfrm/xfrm_input.c
··· 83 secpath_cachep = kmem_cache_create("secpath_cache", 84 sizeof(struct sec_path), 85 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 86 - NULL, NULL); 87 }
··· 83 secpath_cachep = kmem_cache_create("secpath_cache", 84 sizeof(struct sec_path), 85 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 86 + NULL); 87 }
+1 -1
net/xfrm/xfrm_policy.c
··· 2378 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2379 sizeof(struct xfrm_dst), 2380 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2381 - NULL, NULL); 2382 2383 hmask = 8 - 1; 2384 sz = (hmask+1) * sizeof(struct hlist_head);
··· 2378 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2379 sizeof(struct xfrm_dst), 2380 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2381 + NULL); 2382 2383 hmask = 8 - 1; 2384 sz = (hmask+1) * sizeof(struct hlist_head);
+1 -1
security/keys/key.c
··· 1001 { 1002 /* allocate a slab in which we can store keys */ 1003 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1004 - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1005 1006 /* add the special key types */ 1007 list_add_tail(&key_type_keyring.link, &key_types_list);
··· 1001 { 1002 /* allocate a slab in which we can store keys */ 1003 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1004 + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1005 1006 /* add the special key types */ 1007 list_add_tail(&key_type_keyring.link, &key_types_list);
+1 -1
security/selinux/avc.c
··· 239 atomic_set(&avc_cache.lru_hint, 0); 240 241 avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), 242 - 0, SLAB_PANIC, NULL, NULL); 243 244 audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n"); 245 }
··· 239 atomic_set(&avc_cache.lru_hint, 0); 240 241 avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), 242 + 0, SLAB_PANIC, NULL); 243 244 audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n"); 245 }
+1 -1
security/selinux/hooks.c
··· 4913 4914 sel_inode_cache = kmem_cache_create("selinux_inode_security", 4915 sizeof(struct inode_security_struct), 4916 - 0, SLAB_PANIC, NULL, NULL); 4917 avc_init(); 4918 4919 original_ops = secondary_ops = security_ops;
··· 4913 4914 sel_inode_cache = kmem_cache_create("selinux_inode_security", 4915 sizeof(struct inode_security_struct), 4916 + 0, SLAB_PANIC, NULL); 4917 avc_init(); 4918 4919 original_ops = secondary_ops = security_ops;
+1 -1
security/selinux/ss/avtab.c
··· 445 { 446 avtab_node_cachep = kmem_cache_create("avtab_node", 447 sizeof(struct avtab_node), 448 - 0, SLAB_PANIC, NULL, NULL); 449 } 450 451 void avtab_cache_destroy(void)
··· 445 { 446 avtab_node_cachep = kmem_cache_create("avtab_node", 447 sizeof(struct avtab_node), 448 + 0, SLAB_PANIC, NULL); 449 } 450 451 void avtab_cache_destroy(void)