···752 PTRS_PER_PMD*sizeof(pmd_t),753 PTRS_PER_PMD*sizeof(pmd_t),754 SLAB_PANIC,755- pmd_ctor,756- NULL);757 if (!SHARED_KERNEL_PMD) {758 /* If we're in PAE mode and have a non-shared759 kernel pmd, then the pgd size must be a
···752 PTRS_PER_PMD*sizeof(pmd_t),753 PTRS_PER_PMD*sizeof(pmd_t),754 SLAB_PANIC,755+ pmd_ctor);0756 if (!SHARED_KERNEL_PMD) {757 /* If we're in PAE mode and have a non-shared758 kernel pmd, then the pgd size must be a
···804805 flash_block_cache = kmem_cache_create("rtas_flash_cache",806 RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,807- rtas_block_ctor, NULL);808 if (!flash_block_cache) {809 printk(KERN_ERR "%s: failed to create block cache\n",810 __FUNCTION__);
···804805 flash_block_cache = kmem_cache_create("rtas_flash_cache",806 RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,807+ rtas_block_ctor);808 if (!flash_block_cache) {809 printk(KERN_ERR "%s: failed to create block cache\n",810 __FUNCTION__);
+1-1
arch/powerpc/mm/hugetlbpage.c
···542 HUGEPTE_TABLE_SIZE,543 HUGEPTE_TABLE_SIZE,544 0,545- zero_ctor, NULL);546 if (! huge_pgtable_cache)547 panic("hugetlbpage_init(): could not create hugepte cache\n");548
···542 HUGEPTE_TABLE_SIZE,543 HUGEPTE_TABLE_SIZE,544 0,545+ zero_ctor);546 if (! huge_pgtable_cache)547 panic("hugetlbpage_init(): could not create hugepte cache\n");548
···89 sizeof(struct afs_vnode),90 0,91 SLAB_HWCACHE_ALIGN,92- afs_i_init_once,93- NULL);94 if (!afs_inode_cachep) {95 printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");96 return ret;
···89 sizeof(struct afs_vnode),90 0,91 SLAB_HWCACHE_ALIGN,92+ afs_i_init_once);093 if (!afs_inode_cachep) {94 printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");95 return ret;
+2-2
fs/befs/linuxvfs.c
···414}415416/* Initialize the inode cache. Called at fs setup.417- * 418 * Taken from NFS implementation by Al Viro.419 */420static int···424 sizeof (struct befs_inode_info),425 0, (SLAB_RECLAIM_ACCOUNT|426 SLAB_MEM_SPREAD),427- init_once, NULL);428 if (befs_inode_cachep == NULL) {429 printk(KERN_ERR "befs_init_inodecache: "430 "Couldn't initalize inode slabcache\n");
···414}415416/* Initialize the inode cache. Called at fs setup.417+ *418 * Taken from NFS implementation by Al Viro.419 */420static int···424 sizeof (struct befs_inode_info),425 0, (SLAB_RECLAIM_ACCOUNT|426 SLAB_MEM_SPREAD),427+ init_once);428 if (befs_inode_cachep == NULL) {429 printk(KERN_ERR "befs_init_inodecache: "430 "Couldn't initalize inode slabcache\n");
···1388 0,1389 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|1390 SLAB_MEM_SPREAD),1391- init_once,1392- NULL);1393 register_shrinker(&icache_shrinker);13941395 /* Hash may have been set up in inode_init_early */
···1388 0,1389 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|1390 SLAB_MEM_SPREAD),1391+ init_once);01392 register_shrinker(&icache_shrinker);13931394 /* Hash may have been set up in inode_init_early */
···1484 sizes[INDEX_AC].cs_size,1485 ARCH_KMALLOC_MINALIGN,1486 ARCH_KMALLOC_FLAGS|SLAB_PANIC,1487- NULL, NULL);14881489 if (INDEX_AC != INDEX_L3) {1490 sizes[INDEX_L3].cs_cachep =···1492 sizes[INDEX_L3].cs_size,1493 ARCH_KMALLOC_MINALIGN,1494 ARCH_KMALLOC_FLAGS|SLAB_PANIC,1495- NULL, NULL);1496 }14971498 slab_early_init = 0;···1510 sizes->cs_size,1511 ARCH_KMALLOC_MINALIGN,1512 ARCH_KMALLOC_FLAGS|SLAB_PANIC,1513- NULL, NULL);1514 }1515#ifdef CONFIG_ZONE_DMA1516 sizes->cs_dmacachep = kmem_cache_create(···1519 ARCH_KMALLOC_MINALIGN,1520 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|1521 SLAB_PANIC,1522- NULL, NULL);1523#endif1524 sizes++;1525 names++;···2101 * @align: The required alignment for the objects.2102 * @flags: SLAB flags2103 * @ctor: A constructor for the objects.2104- * @dtor: A destructor for the objects (not implemented anymore).2105 *2106 * Returns a ptr to the cache on success, NULL on failure.2107 * Cannot be called within a int, but can be interrupted.2108- * The @ctor is run when new pages are allocated by the cache2109- * and the @dtor is run before the pages are handed back.2110 *2111 * @name must be valid until the cache is destroyed. This implies that2112 * the module calling this has to destroy the cache before getting unloaded.···2124struct kmem_cache *2125kmem_cache_create (const char *name, size_t size, size_t align,2126 unsigned long flags,2127- void (*ctor)(void*, struct kmem_cache *, unsigned long),2128- void (*dtor)(void*, struct kmem_cache *, unsigned long))2129{2130 size_t left_over, slab_size, ralign;2131 struct kmem_cache *cachep = NULL, *pc;···2133 * Sanity checks... these are all serious usage bugs.2134 */2135 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||2136- size > KMALLOC_MAX_SIZE || dtor) {2137 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,2138 name);2139 BUG();
···1484 sizes[INDEX_AC].cs_size,1485 ARCH_KMALLOC_MINALIGN,1486 ARCH_KMALLOC_FLAGS|SLAB_PANIC,1487+ NULL);14881489 if (INDEX_AC != INDEX_L3) {1490 sizes[INDEX_L3].cs_cachep =···1492 sizes[INDEX_L3].cs_size,1493 ARCH_KMALLOC_MINALIGN,1494 ARCH_KMALLOC_FLAGS|SLAB_PANIC,1495+ NULL);1496 }14971498 slab_early_init = 0;···1510 sizes->cs_size,1511 ARCH_KMALLOC_MINALIGN,1512 ARCH_KMALLOC_FLAGS|SLAB_PANIC,1513+ NULL);1514 }1515#ifdef CONFIG_ZONE_DMA1516 sizes->cs_dmacachep = kmem_cache_create(···1519 ARCH_KMALLOC_MINALIGN,1520 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|1521 SLAB_PANIC,1522+ NULL);1523#endif1524 sizes++;1525 names++;···2101 * @align: The required alignment for the objects.2102 * @flags: SLAB flags2103 * @ctor: A constructor for the objects.02104 *2105 * Returns a ptr to the cache on success, NULL on failure.2106 * Cannot be called within a int, but can be interrupted.2107+ * The @ctor is run when new pages are allocated by the cache.02108 *2109 * @name must be valid until the cache is destroyed. This implies that2110 * the module calling this has to destroy the cache before getting unloaded.···2126struct kmem_cache *2127kmem_cache_create (const char *name, size_t size, size_t align,2128 unsigned long flags,2129+ void (*ctor)(void*, struct kmem_cache *, unsigned long))02130{2131 size_t left_over, slab_size, ralign;2132 struct kmem_cache *cachep = NULL, *pc;···2136 * Sanity checks... these are all serious usage bugs.2137 */2138 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||2139+ size > KMALLOC_MAX_SIZE) {2140 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,2141 name);2142 BUG();
···123 peer_cachep = kmem_cache_create("inet_peer_cache",124 sizeof(struct inet_peer),125 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,126- NULL, NULL);127128 /* All the timers, started at system startup tend129 to synchronize. Perturb it a bit.
···123 peer_cachep = kmem_cache_create("inet_peer_cache",124 sizeof(struct inet_peer),125 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,126+ NULL);127128 /* All the timers, started at system startup tend129 to synchronize. Perturb it a bit.
···2430 tcp_hashinfo.bind_bucket_cachep =2431 kmem_cache_create("tcp_bind_bucket",2432 sizeof(struct inet_bind_bucket), 0,2433- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);24342435 /* Size and allocate the main established and bind bucket2436 * hash tables.
···2430 tcp_hashinfo.bind_bucket_cachep =2431 kmem_cache_create("tcp_bind_bucket",2432 sizeof(struct inet_bind_bucket), 0,2433+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);24342435 /* Size and allocate the main established and bind bucket2436 * hash tables.
···1001{1002 /* allocate a slab in which we can store keys */1003 key_jar = kmem_cache_create("key_jar", sizeof(struct key),1004- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);10051006 /* add the special key types */1007 list_add_tail(&key_type_keyring.link, &key_types_list);
···1001{1002 /* allocate a slab in which we can store keys */1003 key_jar = kmem_cache_create("key_jar", sizeof(struct key),1004+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);10051006 /* add the special key types */1007 list_add_tail(&key_type_keyring.link, &key_types_list);