Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] slab: remove kmem_cache_t

Replace all uses of kmem_cache_t with struct kmem_cache.

The patch was generated using the following script:

#!/bin/sh
#
# Replace one string by another in all the kernel sources.
#

set -e

for file in `find * -name "*.c" -o -name "*.h"|xargs grep -l $1`; do
quilt add $file
sed -e "1,\$s/$1/$2/g" $file >/tmp/$$
mv /tmp/$$ $file
quilt refresh
done

The script was run like this

sh replace kmem_cache_t "struct kmem_cache"

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Christoph Lameter and committed by
Linus Torvalds
e18b890b 441e143e

+332 -332
+2 -2
Documentation/DMA-API.txt
··· 77 77 Many drivers need lots of small dma-coherent memory regions for DMA 78 78 descriptors or I/O buffers. Rather than allocating in units of a page 79 79 or more using dma_alloc_coherent(), you can use DMA pools. These work 80 - much like a kmem_cache_t, except that they use the dma-coherent allocator 80 + much like a struct kmem_cache, except that they use the dma-coherent allocator 81 81 not __get_free_pages(). Also, they understand common hardware constraints 82 82 for alignment, like queue heads needing to be aligned on N byte boundaries. 83 83 ··· 94 94 for use with a given device. It must be called in a context which 95 95 can sleep. 96 96 97 - The "name" is for diagnostics (like a kmem_cache_t name); dev and size 97 + The "name" is for diagnostics (like a struct kmem_cache name); dev and size 98 98 are like what you'd pass to dma_alloc_coherent(). The device's hardware 99 99 alignment requirement for this type of data is "align" (which is expressed 100 100 in bytes, and must be a power of two). If your device has no boundary
+2 -2
arch/arm/mach-s3c2410/dma.c
··· 40 40 41 41 /* io map for dma */ 42 42 static void __iomem *dma_base; 43 - static kmem_cache_t *dma_kmem; 43 + static struct kmem_cache *dma_kmem; 44 44 45 45 struct s3c24xx_dma_selection dma_sel; 46 46 ··· 1271 1271 1272 1272 /* kmem cache implementation */ 1273 1273 1274 - static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f) 1274 + static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f) 1275 1275 { 1276 1276 memset(p, 0, sizeof(struct s3c2410_dma_buf)); 1277 1277 }
+3 -3
arch/arm26/mm/memc.c
··· 24 24 25 25 #define MEMC_TABLE_SIZE (256*sizeof(unsigned long)) 26 26 27 - kmem_cache_t *pte_cache, *pgd_cache; 27 + struct kmem_cache *pte_cache, *pgd_cache; 28 28 int page_nr; 29 29 30 30 /* ··· 162 162 { 163 163 } 164 164 165 - static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags) 165 + static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags) 166 166 { 167 167 memzero(pte, sizeof(pte_t) * PTRS_PER_PTE); 168 168 } 169 169 170 - static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags) 170 + static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags) 171 171 { 172 172 memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t)); 173 173 }
+3 -3
arch/frv/mm/pgalloc.c
··· 18 18 #include <asm/cacheflush.h> 19 19 20 20 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); 21 - kmem_cache_t *pgd_cache; 21 + struct kmem_cache *pgd_cache; 22 22 23 23 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 24 24 { ··· 100 100 set_page_private(next, (unsigned long) pprev); 101 101 } 102 102 103 - void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) 103 + void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) 104 104 { 105 105 unsigned long flags; 106 106 ··· 120 120 } 121 121 122 122 /* never called when PTRS_PER_PMD > 1 */ 123 - void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) 123 + void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) 124 124 { 125 125 unsigned long flags; /* can be called from interrupt context */ 126 126
+2 -2
arch/i386/mm/init.c
··· 699 699 #endif 700 700 #endif 701 701 702 - kmem_cache_t *pgd_cache; 703 - kmem_cache_t *pmd_cache; 702 + struct kmem_cache *pgd_cache; 703 + struct kmem_cache *pmd_cache; 704 704 705 705 void __init pgtable_cache_init(void) 706 706 {
+3 -3
arch/i386/mm/pgtable.c
··· 193 193 return pte; 194 194 } 195 195 196 - void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags) 196 + void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags) 197 197 { 198 198 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); 199 199 } ··· 233 233 set_page_private(next, (unsigned long)pprev); 234 234 } 235 235 236 - void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) 236 + void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused) 237 237 { 238 238 unsigned long flags; 239 239 ··· 253 253 } 254 254 255 255 /* never called when PTRS_PER_PMD > 1 */ 256 - void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) 256 + void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused) 257 257 { 258 258 unsigned long flags; /* can be called from interrupt context */ 259 259
+1 -1
arch/ia64/ia32/ia32_support.c
··· 249 249 250 250 #if PAGE_SHIFT > IA32_PAGE_SHIFT 251 251 { 252 - extern kmem_cache_t *partial_page_cachep; 252 + extern struct kmem_cache *partial_page_cachep; 253 253 254 254 partial_page_cachep = kmem_cache_create("partial_page_cache", 255 255 sizeof(struct partial_page), 0, 0,
+1 -1
arch/ia64/ia32/sys_ia32.c
··· 254 254 } 255 255 256 256 /* SLAB cache for partial_page structures */ 257 - kmem_cache_t *partial_page_cachep; 257 + struct kmem_cache *partial_page_cachep; 258 258 259 259 /* 260 260 * init partial_page_list.
+2 -2
arch/powerpc/kernel/rtas_flash.c
··· 101 101 static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; 102 102 103 103 /* Use slab cache to guarantee 4k alignment */ 104 - static kmem_cache_t *flash_block_cache = NULL; 104 + static struct kmem_cache *flash_block_cache = NULL; 105 105 106 106 #define FLASH_BLOCK_LIST_VERSION (1UL) 107 107 ··· 286 286 } 287 287 288 288 /* constructor for flash_block_cache */ 289 - void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags) 289 + void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags) 290 290 { 291 291 memset(ptr, 0, RTAS_BLK_SIZE); 292 292 }
+1 -1
arch/powerpc/mm/hugetlbpage.c
··· 1047 1047 return err; 1048 1048 } 1049 1049 1050 - static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) 1050 + static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) 1051 1051 { 1052 1052 memset(addr, 0, kmem_cache_size(cache)); 1053 1053 }
+3 -3
arch/powerpc/mm/init_64.c
··· 141 141 } 142 142 module_init(setup_kcore); 143 143 144 - static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) 144 + static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) 145 145 { 146 146 memset(addr, 0, kmem_cache_size(cache)); 147 147 } ··· 166 166 /* Hugepages need one extra cache, initialized in hugetlbpage.c. We 167 167 * can't put into the tables above, because HPAGE_SHIFT is not compile 168 168 * time constant. */ 169 - kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; 169 + struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1]; 170 170 #else 171 - kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 171 + struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; 172 172 #endif 173 173 174 174 void pgtable_cache_init(void)
+2 -2
arch/powerpc/platforms/cell/spufs/inode.c
··· 40 40 41 41 #include "spufs.h" 42 42 43 - static kmem_cache_t *spufs_inode_cache; 43 + static struct kmem_cache *spufs_inode_cache; 44 44 char *isolated_loader; 45 45 46 46 static struct inode * ··· 65 65 } 66 66 67 67 static void 68 - spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags) 68 + spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags) 69 69 { 70 70 struct spufs_inode_info *ei = p; 71 71
+1 -1
arch/sh/kernel/cpu/sh4/sq.c
··· 38 38 39 39 static struct sq_mapping *sq_mapping_list; 40 40 static DEFINE_SPINLOCK(sq_mapping_lock); 41 - static kmem_cache_t *sq_cache; 41 + static struct kmem_cache *sq_cache; 42 42 static unsigned long *sq_bitmap; 43 43 44 44 #define store_queue_barrier() \
+3 -3
arch/sh/mm/pmb.c
··· 30 30 31 31 #define NR_PMB_ENTRIES 16 32 32 33 - static kmem_cache_t *pmb_cache; 33 + static struct kmem_cache *pmb_cache; 34 34 static unsigned long pmb_map; 35 35 36 36 static struct pmb_entry pmb_init_map[] = { ··· 283 283 } while (pmbe); 284 284 } 285 285 286 - static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) 286 + static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags) 287 287 { 288 288 struct pmb_entry *pmbe = pmb; 289 289 ··· 297 297 spin_unlock_irq(&pmb_list_lock); 298 298 } 299 299 300 - static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags) 300 + static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags) 301 301 { 302 302 spin_lock_irq(&pmb_list_lock); 303 303 pmb_list_del(pmb);
+2 -2
arch/sparc64/mm/init.c
··· 176 176 177 177 int bigkernel = 0; 178 178 179 - kmem_cache_t *pgtable_cache __read_mostly; 179 + struct kmem_cache *pgtable_cache __read_mostly; 180 180 181 - static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) 181 + static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags) 182 182 { 183 183 clear_page(addr); 184 184 }
+1 -1
arch/sparc64/mm/tsb.c
··· 239 239 } 240 240 } 241 241 242 - static kmem_cache_t *tsb_caches[8] __read_mostly; 242 + static struct kmem_cache *tsb_caches[8] __read_mostly; 243 243 244 244 static const char *tsb_cache_names[8] = { 245 245 "tsb_8KB",
+2 -2
block/cfq-iosched.c
··· 43 43 #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) 44 44 #define RQ_CFQQ(rq) ((rq)->elevator_private2) 45 45 46 - static kmem_cache_t *cfq_pool; 47 - static kmem_cache_t *cfq_ioc_pool; 46 + static struct kmem_cache *cfq_pool; 47 + static struct kmem_cache *cfq_ioc_pool; 48 48 49 49 static DEFINE_PER_CPU(unsigned long, ioc_count); 50 50 static struct completion *ioc_gone;
+3 -3
block/ll_rw_blk.c
··· 44 44 /* 45 45 * For the allocated request tables 46 46 */ 47 - static kmem_cache_t *request_cachep; 47 + static struct kmem_cache *request_cachep; 48 48 49 49 /* 50 50 * For queue allocation 51 51 */ 52 - static kmem_cache_t *requestq_cachep; 52 + static struct kmem_cache *requestq_cachep; 53 53 54 54 /* 55 55 * For io context allocations 56 56 */ 57 - static kmem_cache_t *iocontext_cachep; 57 + static struct kmem_cache *iocontext_cachep; 58 58 59 59 /* 60 60 * Controlling structure to kblockd
+1 -1
drivers/block/aoe/aoeblk.c
··· 12 12 #include <linux/netdevice.h> 13 13 #include "aoe.h" 14 14 15 - static kmem_cache_t *buf_pool_cache; 15 + static struct kmem_cache *buf_pool_cache; 16 16 17 17 static ssize_t aoedisk_show_state(struct gendisk * disk, char *page) 18 18 {
+1 -1
drivers/ieee1394/eth1394.c
··· 133 133 #define ETH1394_DRIVER_NAME "eth1394" 134 134 static const char driver_name[] = ETH1394_DRIVER_NAME; 135 135 136 - static kmem_cache_t *packet_task_cache; 136 + static struct kmem_cache *packet_task_cache; 137 137 138 138 static struct hpsb_highlevel eth1394_highlevel; 139 139
+1 -1
drivers/md/dm-crypt.c
··· 101 101 #define MIN_POOL_PAGES 32 102 102 #define MIN_BIO_PAGES 8 103 103 104 - static kmem_cache_t *_crypt_io_pool; 104 + static struct kmem_cache *_crypt_io_pool; 105 105 106 106 /* 107 107 * Different IV generation algorithms:
+1 -1
drivers/md/dm-mpath.c
··· 101 101 102 102 #define MIN_IOS 256 /* Mempool size */ 103 103 104 - static kmem_cache_t *_mpio_cache; 104 + static struct kmem_cache *_mpio_cache; 105 105 106 106 struct workqueue_struct *kmultipathd; 107 107 static void process_queued_ios(struct work_struct *work);
+3 -3
drivers/md/dm-snap.c
··· 88 88 * Hash table mapping origin volumes to lists of snapshots and 89 89 * a lock to protect it 90 90 */ 91 - static kmem_cache_t *exception_cache; 92 - static kmem_cache_t *pending_cache; 91 + static struct kmem_cache *exception_cache; 92 + static struct kmem_cache *pending_cache; 93 93 static mempool_t *pending_pool; 94 94 95 95 /* ··· 228 228 return 0; 229 229 } 230 230 231 - static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) 231 + static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) 232 232 { 233 233 struct list_head *slot; 234 234 struct exception *ex, *next;
+2 -2
drivers/md/dm.c
··· 121 121 }; 122 122 123 123 #define MIN_IOS 256 124 - static kmem_cache_t *_io_cache; 125 - static kmem_cache_t *_tio_cache; 124 + static struct kmem_cache *_io_cache; 125 + static struct kmem_cache *_tio_cache; 126 126 127 127 static int __init local_init(void) 128 128 {
+1 -1
drivers/md/kcopyd.c
··· 203 203 /* FIXME: this should scale with the number of pages */ 204 204 #define MIN_JOBS 512 205 205 206 - static kmem_cache_t *_job_cache; 206 + static struct kmem_cache *_job_cache; 207 207 static mempool_t *_job_pool; 208 208 209 209 /*
+2 -2
drivers/md/raid5.c
··· 348 348 349 349 static int grow_stripes(raid5_conf_t *conf, int num) 350 350 { 351 - kmem_cache_t *sc; 351 + struct kmem_cache *sc; 352 352 int devs = conf->raid_disks; 353 353 354 354 sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev)); ··· 397 397 LIST_HEAD(newstripes); 398 398 struct disk_info *ndisks; 399 399 int err = 0; 400 - kmem_cache_t *sc; 400 + struct kmem_cache *sc; 401 401 int i; 402 402 403 403 if (newsize <= conf->pool_size)
+1 -1
drivers/message/i2o/i2o_block.h
··· 64 64 65 65 /* I2O Block OSM mempool struct */ 66 66 struct i2o_block_mempool { 67 - kmem_cache_t *slab; 67 + struct kmem_cache *slab; 68 68 mempool_t *pool; 69 69 }; 70 70
+1 -1
drivers/pci/msi.c
··· 26 26 27 27 static DEFINE_SPINLOCK(msi_lock); 28 28 static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; 29 - static kmem_cache_t* msi_cachep; 29 + static struct kmem_cache* msi_cachep; 30 30 31 31 static int pci_msi_enable = 1; 32 32
+1 -1
drivers/s390/block/dasd_devmap.c
··· 25 25 26 26 #include "dasd_int.h" 27 27 28 - kmem_cache_t *dasd_page_cache; 28 + struct kmem_cache *dasd_page_cache; 29 29 EXPORT_SYMBOL_GPL(dasd_page_cache); 30 30 31 31 /*
+1 -1
drivers/s390/block/dasd_int.h
··· 474 474 extern unsigned int dasd_profile_level; 475 475 extern struct block_device_operations dasd_device_operations; 476 476 477 - extern kmem_cache_t *dasd_page_cache; 477 + extern struct kmem_cache *dasd_page_cache; 478 478 479 479 struct dasd_ccw_req * 480 480 dasd_kmalloc_request(char *, int, int, struct dasd_device *);
+3 -3
drivers/s390/scsi/zfcp_def.h
··· 1032 1032 wwn_t init_wwpn; 1033 1033 fcp_lun_t init_fcp_lun; 1034 1034 char *driver_version; 1035 - kmem_cache_t *fsf_req_qtcb_cache; 1036 - kmem_cache_t *sr_buffer_cache; 1037 - kmem_cache_t *gid_pn_cache; 1035 + struct kmem_cache *fsf_req_qtcb_cache; 1036 + struct kmem_cache *sr_buffer_cache; 1037 + struct kmem_cache *gid_pn_cache; 1038 1038 }; 1039 1039 1040 1040 /**
+2 -2
drivers/scsi/aic94xx/aic94xx.h
··· 56 56 /* 2*ITNL timeout + 1 second */ 57 57 #define AIC94XX_SCB_TIMEOUT (5*HZ) 58 58 59 - extern kmem_cache_t *asd_dma_token_cache; 60 - extern kmem_cache_t *asd_ascb_cache; 59 + extern struct kmem_cache *asd_dma_token_cache; 60 + extern struct kmem_cache *asd_ascb_cache; 61 61 extern char sas_addr_str[2*SAS_ADDR_SIZE + 1]; 62 62 63 63 static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
+1 -1
drivers/scsi/aic94xx/aic94xx_hwi.c
··· 1047 1047 static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, 1048 1048 gfp_t gfp_flags) 1049 1049 { 1050 - extern kmem_cache_t *asd_ascb_cache; 1050 + extern struct kmem_cache *asd_ascb_cache; 1051 1051 struct asd_seq_data *seq = &asd_ha->seq; 1052 1052 struct asd_ascb *ascb; 1053 1053 unsigned long flags;
+2 -2
drivers/scsi/aic94xx/aic94xx_init.c
··· 450 450 asd_ha->scb_pool = NULL; 451 451 } 452 452 453 - kmem_cache_t *asd_dma_token_cache; 454 - kmem_cache_t *asd_ascb_cache; 453 + struct kmem_cache *asd_dma_token_cache; 454 + struct kmem_cache *asd_ascb_cache; 455 455 456 456 static int asd_create_global_caches(void) 457 457 {
+1 -1
drivers/scsi/libsas/sas_init.c
··· 36 36 37 37 #include "../scsi_sas_internal.h" 38 38 39 - kmem_cache_t *sas_task_cache; 39 + struct kmem_cache *sas_task_cache; 40 40 41 41 /*------------ SAS addr hash -----------*/ 42 42 void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
+1 -1
drivers/scsi/qla2xxx/qla_os.c
··· 24 24 /* 25 25 * SRB allocation cache 26 26 */ 27 - static kmem_cache_t *srb_cachep; 27 + static struct kmem_cache *srb_cachep; 28 28 29 29 /* 30 30 * Ioctl related information.
+1 -1
drivers/scsi/qla4xxx/ql4_os.c
··· 19 19 /* 20 20 * SRB allocation cache 21 21 */ 22 - static kmem_cache_t *srb_cachep; 22 + static struct kmem_cache *srb_cachep; 23 23 24 24 /* 25 25 * Module parameter information and variables
+1 -1
drivers/scsi/scsi.c
··· 136 136 EXPORT_SYMBOL(scsi_device_type); 137 137 138 138 struct scsi_host_cmd_pool { 139 - kmem_cache_t *slab; 139 + struct kmem_cache *slab; 140 140 unsigned int users; 141 141 char *name; 142 142 unsigned int slab_flags;
+2 -2
drivers/scsi/scsi_lib.c
··· 36 36 struct scsi_host_sg_pool { 37 37 size_t size; 38 38 char *name; 39 - kmem_cache_t *slab; 39 + struct kmem_cache *slab; 40 40 mempool_t *pool; 41 41 }; 42 42 ··· 241 241 char sense[SCSI_SENSE_BUFFERSIZE]; 242 242 }; 243 243 244 - static kmem_cache_t *scsi_io_context_cache; 244 + static struct kmem_cache *scsi_io_context_cache; 245 245 246 246 static void scsi_end_async(struct request *req, int uptodate) 247 247 {
+1 -1
drivers/scsi/scsi_tgt_lib.c
··· 33 33 #include "scsi_tgt_priv.h" 34 34 35 35 static struct workqueue_struct *scsi_tgtd; 36 - static kmem_cache_t *scsi_tgt_cmd_cache; 36 + static struct kmem_cache *scsi_tgt_cmd_cache; 37 37 38 38 /* 39 39 * TODO: this struct will be killed when the block layer supports large bios
+3 -3
drivers/usb/host/hc_crisv10.c
··· 275 275 static int zout_buffer[4] __attribute__ ((aligned (4))); 276 276 277 277 /* Cache for allocating new EP and SB descriptors. */ 278 - static kmem_cache_t *usb_desc_cache; 278 + static struct kmem_cache *usb_desc_cache; 279 279 280 280 /* Cache for the registers allocated in the top half. */ 281 - static kmem_cache_t *top_half_reg_cache; 281 + static struct kmem_cache *top_half_reg_cache; 282 282 283 283 /* Cache for the data allocated in the isoc descr top half. */ 284 - static kmem_cache_t *isoc_compl_cache; 284 + static struct kmem_cache *isoc_compl_cache; 285 285 286 286 static struct usb_bus *etrax_usb_bus; 287 287
+1 -1
drivers/usb/host/uhci-hcd.c
··· 81 81 static char *errbuf; 82 82 #define ERRBUF_LEN (32 * 1024) 83 83 84 - static kmem_cache_t *uhci_up_cachep; /* urb_priv */ 84 + static struct kmem_cache *uhci_up_cachep; /* urb_priv */ 85 85 86 86 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state); 87 87 static void wakeup_rh(struct uhci_hcd *uhci);
+3 -3
drivers/usb/mon/mon_text.c
··· 50 50 51 51 #define SLAB_NAME_SZ 30 52 52 struct mon_reader_text { 53 - kmem_cache_t *e_slab; 53 + struct kmem_cache *e_slab; 54 54 int nevents; 55 55 struct list_head e_list; 56 56 struct mon_reader r; /* In C, parent class can be placed anywhere */ ··· 63 63 char slab_name[SLAB_NAME_SZ]; 64 64 }; 65 65 66 - static void mon_text_ctor(void *, kmem_cache_t *, unsigned long); 66 + static void mon_text_ctor(void *, struct kmem_cache *, unsigned long); 67 67 68 68 /* 69 69 * mon_text_submit ··· 450 450 /* 451 451 * Slab interface: constructor. 452 452 */ 453 - static void mon_text_ctor(void *mem, kmem_cache_t *slab, unsigned long sflags) 453 + static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sflags) 454 454 { 455 455 /* 456 456 * Nothing to initialize. No, really!
+2 -2
fs/adfs/super.c
··· 212 212 return 0; 213 213 } 214 214 215 - static kmem_cache_t *adfs_inode_cachep; 215 + static struct kmem_cache *adfs_inode_cachep; 216 216 217 217 static struct inode *adfs_alloc_inode(struct super_block *sb) 218 218 { ··· 228 228 kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); 229 229 } 230 230 231 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 231 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 232 232 { 233 233 struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; 234 234
+2 -2
fs/affs/super.c
··· 66 66 pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean); 67 67 } 68 68 69 - static kmem_cache_t * affs_inode_cachep; 69 + static struct kmem_cache * affs_inode_cachep; 70 70 71 71 static struct inode *affs_alloc_inode(struct super_block *sb) 72 72 { ··· 83 83 kmem_cache_free(affs_inode_cachep, AFFS_I(inode)); 84 84 } 85 85 86 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 86 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 87 87 { 88 88 struct affs_inode_info *ei = (struct affs_inode_info *) foo; 89 89
+3 -3
fs/afs/super.c
··· 35 35 struct afs_volume *volume; 36 36 }; 37 37 38 - static void afs_i_init_once(void *foo, kmem_cache_t *cachep, 38 + static void afs_i_init_once(void *foo, struct kmem_cache *cachep, 39 39 unsigned long flags); 40 40 41 41 static int afs_get_sb(struct file_system_type *fs_type, ··· 65 65 .put_super = afs_put_super, 66 66 }; 67 67 68 - static kmem_cache_t *afs_inode_cachep; 68 + static struct kmem_cache *afs_inode_cachep; 69 69 static atomic_t afs_count_active_inodes; 70 70 71 71 /*****************************************************************************/ ··· 384 384 /* 385 385 * initialise an inode cache slab element prior to any use 386 386 */ 387 - static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep, 387 + static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep, 388 388 unsigned long flags) 389 389 { 390 390 struct afs_vnode *vnode = (struct afs_vnode *) _vnode;
+2 -2
fs/aio.c
··· 47 47 unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 48 48 /*----end sysctl variables---*/ 49 49 50 - static kmem_cache_t *kiocb_cachep; 51 - static kmem_cache_t *kioctx_cachep; 50 + static struct kmem_cache *kiocb_cachep; 51 + static struct kmem_cache *kioctx_cachep; 52 52 53 53 static struct workqueue_struct *aio_wq; 54 54
+2 -2
fs/befs/linuxvfs.c
··· 61 61 }; 62 62 63 63 /* slab cache for befs_inode_info objects */ 64 - static kmem_cache_t *befs_inode_cachep; 64 + static struct kmem_cache *befs_inode_cachep; 65 65 66 66 static const struct file_operations befs_dir_operations = { 67 67 .read = generic_read_dir, ··· 289 289 kmem_cache_free(befs_inode_cachep, BEFS_I(inode)); 290 290 } 291 291 292 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 292 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 293 293 { 294 294 struct befs_inode_info *bi = (struct befs_inode_info *) foo; 295 295
+2 -2
fs/bfs/inode.c
··· 228 228 unlock_kernel(); 229 229 } 230 230 231 - static kmem_cache_t * bfs_inode_cachep; 231 + static struct kmem_cache * bfs_inode_cachep; 232 232 233 233 static struct inode *bfs_alloc_inode(struct super_block *sb) 234 234 { ··· 244 244 kmem_cache_free(bfs_inode_cachep, BFS_I(inode)); 245 245 } 246 246 247 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 247 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 248 248 { 249 249 struct bfs_inode_info *bi = foo; 250 250
+2 -2
fs/bio.c
··· 30 30 31 31 #define BIO_POOL_SIZE 256 32 32 33 - static kmem_cache_t *bio_slab __read_mostly; 33 + static struct kmem_cache *bio_slab __read_mostly; 34 34 35 35 #define BIOVEC_NR_POOLS 6 36 36 ··· 44 44 struct biovec_slab { 45 45 int nr_vecs; 46 46 char *name; 47 - kmem_cache_t *slab; 47 + struct kmem_cache *slab; 48 48 }; 49 49 50 50 /*
+2 -2
fs/block_dev.c
··· 235 235 */ 236 236 237 237 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); 238 - static kmem_cache_t * bdev_cachep __read_mostly; 238 + static struct kmem_cache * bdev_cachep __read_mostly; 239 239 240 240 static struct inode *bdev_alloc_inode(struct super_block *sb) 241 241 { ··· 253 253 kmem_cache_free(bdev_cachep, bdi); 254 254 } 255 255 256 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 256 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 257 257 { 258 258 struct bdev_inode *ei = (struct bdev_inode *) foo; 259 259 struct block_device *bdev = &ei->bdev;
+2 -2
fs/buffer.c
··· 2908 2908 /* 2909 2909 * Buffer-head allocation 2910 2910 */ 2911 - static kmem_cache_t *bh_cachep; 2911 + static struct kmem_cache *bh_cachep; 2912 2912 2913 2913 /* 2914 2914 * Once the number of bh's in the machine exceeds this level, we start ··· 2961 2961 EXPORT_SYMBOL(free_buffer_head); 2962 2962 2963 2963 static void 2964 - init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags) 2964 + init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags) 2965 2965 { 2966 2966 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 2967 2967 SLAB_CTOR_CONSTRUCTOR) {
+7 -7
fs/cifs/cifsfs.c
··· 81 81 extern mempool_t *cifs_req_poolp; 82 82 extern mempool_t *cifs_mid_poolp; 83 83 84 - extern kmem_cache_t *cifs_oplock_cachep; 84 + extern struct kmem_cache *cifs_oplock_cachep; 85 85 86 86 static int 87 87 cifs_read_super(struct super_block *sb, void *data, ··· 232 232 return generic_permission(inode, mask, NULL); 233 233 } 234 234 235 - static kmem_cache_t *cifs_inode_cachep; 236 - static kmem_cache_t *cifs_req_cachep; 237 - static kmem_cache_t *cifs_mid_cachep; 238 - kmem_cache_t *cifs_oplock_cachep; 239 - static kmem_cache_t *cifs_sm_req_cachep; 235 + static struct kmem_cache *cifs_inode_cachep; 236 + static struct kmem_cache *cifs_req_cachep; 237 + static struct kmem_cache *cifs_mid_cachep; 238 + struct kmem_cache *cifs_oplock_cachep; 239 + static struct kmem_cache *cifs_sm_req_cachep; 240 240 mempool_t *cifs_sm_req_poolp; 241 241 mempool_t *cifs_req_poolp; 242 242 mempool_t *cifs_mid_poolp; ··· 668 668 }; 669 669 670 670 static void 671 - cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags) 671 + cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags) 672 672 { 673 673 struct cifsInodeInfo *cifsi = inode; 674 674
+1 -1
fs/cifs/transport.c
··· 34 34 #include "cifs_debug.h" 35 35 36 36 extern mempool_t *cifs_mid_poolp; 37 - extern kmem_cache_t *cifs_oplock_cachep; 37 + extern struct kmem_cache *cifs_oplock_cachep; 38 38 39 39 static struct mid_q_entry * 40 40 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)
+2 -2
fs/coda/inode.c
··· 38 38 static void coda_put_super(struct super_block *); 39 39 static int coda_statfs(struct dentry *dentry, struct kstatfs *buf); 40 40 41 - static kmem_cache_t * coda_inode_cachep; 41 + static struct kmem_cache * coda_inode_cachep; 42 42 43 43 static struct inode *coda_alloc_inode(struct super_block *sb) 44 44 { ··· 58 58 kmem_cache_free(coda_inode_cachep, ITOC(inode)); 59 59 } 60 60 61 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 61 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 62 62 { 63 63 struct coda_inode_info *ei = (struct coda_inode_info *) foo; 64 64
+1 -1
fs/configfs/configfs_internal.h
··· 49 49 #define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR) 50 50 51 51 extern struct vfsmount * configfs_mount; 52 - extern kmem_cache_t *configfs_dir_cachep; 52 + extern struct kmem_cache *configfs_dir_cachep; 53 53 54 54 extern int configfs_is_root(struct config_item *item); 55 55
+1 -1
fs/configfs/mount.c
··· 38 38 39 39 struct vfsmount * configfs_mount = NULL; 40 40 struct super_block * configfs_sb = NULL; 41 - kmem_cache_t *configfs_dir_cachep; 41 + struct kmem_cache *configfs_dir_cachep; 42 42 static int configfs_mnt_count = 0; 43 43 44 44 static struct super_operations configfs_ops = {
+3 -3
fs/dcache.c
··· 43 43 44 44 EXPORT_SYMBOL(dcache_lock); 45 45 46 - static kmem_cache_t *dentry_cache __read_mostly; 46 + static struct kmem_cache *dentry_cache __read_mostly; 47 47 48 48 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) 49 49 ··· 2072 2072 } 2073 2073 2074 2074 /* SLAB cache for __getname() consumers */ 2075 - kmem_cache_t *names_cachep __read_mostly; 2075 + struct kmem_cache *names_cachep __read_mostly; 2076 2076 2077 2077 /* SLAB cache for file structures */ 2078 - kmem_cache_t *filp_cachep __read_mostly; 2078 + struct kmem_cache *filp_cachep __read_mostly; 2079 2079 2080 2080 EXPORT_SYMBOL(d_genocide); 2081 2081
+1 -1
fs/dcookies.c
··· 37 37 38 38 static LIST_HEAD(dcookie_users); 39 39 static DEFINE_MUTEX(dcookie_mutex); 40 - static kmem_cache_t *dcookie_cache __read_mostly; 40 + static struct kmem_cache *dcookie_cache __read_mostly; 41 41 static struct list_head *dcookie_hashtable __read_mostly; 42 42 static size_t hash_size __read_mostly; 43 43
+1 -1
fs/dlm/memory.c
··· 15 15 #include "config.h" 16 16 #include "memory.h" 17 17 18 - static kmem_cache_t *lkb_cache; 18 + static struct kmem_cache *lkb_cache; 19 19 20 20 21 21 int dlm_memory_init(void)
+1 -1
fs/dnotify.c
··· 23 23 24 24 int dir_notify_enable __read_mostly = 1; 25 25 26 - static kmem_cache_t *dn_cache __read_mostly; 26 + static struct kmem_cache *dn_cache __read_mostly; 27 27 28 28 static void redo_inode_mask(struct inode *inode) 29 29 {
+1 -1
fs/dquot.c
··· 131 131 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; 132 132 133 133 /* SLAB cache for dquot structures */ 134 - static kmem_cache_t *dquot_cachep; 134 + static struct kmem_cache *dquot_cachep; 135 135 136 136 int register_quota_format(struct quota_format_type *fmt) 137 137 {
+1 -1
fs/ecryptfs/main.c
··· 546 546 } 547 547 548 548 static struct ecryptfs_cache_info { 549 - kmem_cache_t **cache; 549 + struct kmem_cache **cache; 550 550 const char *name; 551 551 size_t size; 552 552 void (*ctor)(void*, struct kmem_cache *, unsigned long);
+2 -2
fs/efs/super.c
··· 52 52 }; 53 53 54 54 55 - static kmem_cache_t * efs_inode_cachep; 55 + static struct kmem_cache * efs_inode_cachep; 56 56 57 57 static struct inode *efs_alloc_inode(struct super_block *sb) 58 58 { ··· 68 68 kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); 69 69 } 70 70 71 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 71 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 72 72 { 73 73 struct efs_inode_info *ei = (struct efs_inode_info *) foo; 74 74
+2 -2
fs/eventpoll.c
··· 283 283 static struct poll_safewake psw; 284 284 285 285 /* Slab cache used to allocate "struct epitem" */ 286 - static kmem_cache_t *epi_cache __read_mostly; 286 + static struct kmem_cache *epi_cache __read_mostly; 287 287 288 288 /* Slab cache used to allocate "struct eppoll_entry" */ 289 - static kmem_cache_t *pwq_cache __read_mostly; 289 + static struct kmem_cache *pwq_cache __read_mostly; 290 290 291 291 /* Virtual fs used to allocate inodes for eventpoll files */ 292 292 static struct vfsmount *eventpoll_mnt __read_mostly;
+2 -2
fs/ext2/super.c
··· 135 135 return; 136 136 } 137 137 138 - static kmem_cache_t * ext2_inode_cachep; 138 + static struct kmem_cache * ext2_inode_cachep; 139 139 140 140 static struct inode *ext2_alloc_inode(struct super_block *sb) 141 141 { ··· 156 156 kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); 157 157 } 158 158 159 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 159 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 160 160 { 161 161 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; 162 162
+2 -2
fs/ext3/super.c
··· 436 436 return; 437 437 } 438 438 439 - static kmem_cache_t *ext3_inode_cachep; 439 + static struct kmem_cache *ext3_inode_cachep; 440 440 441 441 /* 442 442 * Called inside transaction, so use GFP_NOFS ··· 462 462 kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); 463 463 } 464 464 465 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 465 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 466 466 { 467 467 struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; 468 468
+2 -2
fs/ext4/super.c
··· 486 486 return; 487 487 } 488 488 489 - static kmem_cache_t *ext4_inode_cachep; 489 + static struct kmem_cache *ext4_inode_cachep; 490 490 491 491 /* 492 492 * Called inside transaction, so use GFP_NOFS ··· 513 513 kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); 514 514 } 515 515 516 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 516 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 517 517 { 518 518 struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; 519 519
+2 -2
fs/fat/cache.c
··· 34 34 return FAT_MAX_CACHE; 35 35 } 36 36 37 - static kmem_cache_t *fat_cache_cachep; 37 + static struct kmem_cache *fat_cache_cachep; 38 38 39 - static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 39 + static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 40 40 { 41 41 struct fat_cache *cache = (struct fat_cache *)foo; 42 42
+2 -2
fs/fat/inode.c
··· 477 477 kfree(sbi); 478 478 } 479 479 480 - static kmem_cache_t *fat_inode_cachep; 480 + static struct kmem_cache *fat_inode_cachep; 481 481 482 482 static struct inode *fat_alloc_inode(struct super_block *sb) 483 483 { ··· 493 493 kmem_cache_free(fat_inode_cachep, MSDOS_I(inode)); 494 494 } 495 495 496 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 496 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 497 497 { 498 498 struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; 499 499
+1 -1
fs/fcntl.c
··· 553 553 } 554 554 555 555 static DEFINE_RWLOCK(fasync_lock); 556 - static kmem_cache_t *fasync_cache __read_mostly; 556 + static struct kmem_cache *fasync_cache __read_mostly; 557 557 558 558 /* 559 559 * fasync_helper() is used by some character device drivers (mainly mice)
+1 -1
fs/freevxfs/vxfs_inode.c
··· 46 46 47 47 extern struct inode_operations vxfs_immed_symlink_iops; 48 48 49 - kmem_cache_t *vxfs_inode_cachep; 49 + struct kmem_cache *vxfs_inode_cachep; 50 50 51 51 52 52 #ifdef DIAGNOSTIC
+1 -1
fs/fuse/dev.c
··· 19 19 20 20 MODULE_ALIAS_MISCDEV(FUSE_MINOR); 21 21 22 - static kmem_cache_t *fuse_req_cachep; 22 + static struct kmem_cache *fuse_req_cachep; 23 23 24 24 static struct fuse_conn *fuse_get_conn(struct file *file) 25 25 {
+2 -2
fs/fuse/inode.c
··· 22 22 MODULE_DESCRIPTION("Filesystem in Userspace"); 23 23 MODULE_LICENSE("GPL"); 24 24 25 - static kmem_cache_t *fuse_inode_cachep; 25 + static struct kmem_cache *fuse_inode_cachep; 26 26 struct list_head fuse_conn_list; 27 27 DEFINE_MUTEX(fuse_mutex); 28 28 ··· 601 601 static decl_subsys(fuse, NULL, NULL); 602 602 static decl_subsys(connections, NULL, NULL); 603 603 604 - static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, 604 + static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep, 605 605 unsigned long flags) 606 606 { 607 607 struct inode * inode = foo;
+2 -2
fs/gfs2/main.c
··· 25 25 #include "util.h" 26 26 #include "glock.h" 27 27 28 - static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 28 + static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 29 29 { 30 30 struct gfs2_inode *ip = foo; 31 31 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == ··· 37 37 } 38 38 } 39 39 40 - static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 40 + static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 41 41 { 42 42 struct gfs2_glock *gl = foo; 43 43 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+3 -3
fs/gfs2/util.c
··· 23 23 #include "lm.h" 24 24 #include "util.h" 25 25 26 - kmem_cache_t *gfs2_glock_cachep __read_mostly; 27 - kmem_cache_t *gfs2_inode_cachep __read_mostly; 28 - kmem_cache_t *gfs2_bufdata_cachep __read_mostly; 26 + struct kmem_cache *gfs2_glock_cachep __read_mostly; 27 + struct kmem_cache *gfs2_inode_cachep __read_mostly; 28 + struct kmem_cache *gfs2_bufdata_cachep __read_mostly; 29 29 30 30 void gfs2_assert_i(struct gfs2_sbd *sdp) 31 31 {
+3 -3
fs/gfs2/util.h
··· 146 146 gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__); 147 147 148 148 149 - extern kmem_cache_t *gfs2_glock_cachep; 150 - extern kmem_cache_t *gfs2_inode_cachep; 151 - extern kmem_cache_t *gfs2_bufdata_cachep; 149 + extern struct kmem_cache *gfs2_glock_cachep; 150 + extern struct kmem_cache *gfs2_inode_cachep; 151 + extern struct kmem_cache *gfs2_bufdata_cachep; 152 152 153 153 static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, 154 154 unsigned int *p)
+2 -2
fs/hfs/super.c
··· 24 24 #include "hfs_fs.h" 25 25 #include "btree.h" 26 26 27 - static kmem_cache_t *hfs_inode_cachep; 27 + static struct kmem_cache *hfs_inode_cachep; 28 28 29 29 MODULE_LICENSE("GPL"); 30 30 ··· 430 430 .fs_flags = FS_REQUIRES_DEV, 431 431 }; 432 432 433 - static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags) 433 + static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flags) 434 434 { 435 435 struct hfs_inode_info *i = p; 436 436
+2 -2
fs/hfsplus/super.c
··· 434 434 MODULE_DESCRIPTION("Extended Macintosh Filesystem"); 435 435 MODULE_LICENSE("GPL"); 436 436 437 - static kmem_cache_t *hfsplus_inode_cachep; 437 + static struct kmem_cache *hfsplus_inode_cachep; 438 438 439 439 static struct inode *hfsplus_alloc_inode(struct super_block *sb) 440 440 { ··· 467 467 .fs_flags = FS_REQUIRES_DEV, 468 468 }; 469 469 470 - static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags) 470 + static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long flags) 471 471 { 472 472 struct hfsplus_inode_info *i = p; 473 473
+2 -2
fs/hpfs/super.c
··· 160 160 return 0; 161 161 } 162 162 163 - static kmem_cache_t * hpfs_inode_cachep; 163 + static struct kmem_cache * hpfs_inode_cachep; 164 164 165 165 static struct inode *hpfs_alloc_inode(struct super_block *sb) 166 166 { ··· 177 177 kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); 178 178 } 179 179 180 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 180 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 181 181 { 182 182 struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; 183 183
+2 -2
fs/hugetlbfs/inode.c
··· 513 513 } 514 514 515 515 516 - static kmem_cache_t *hugetlbfs_inode_cachep; 516 + static struct kmem_cache *hugetlbfs_inode_cachep; 517 517 518 518 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) 519 519 { ··· 545 545 }; 546 546 547 547 548 - static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 548 + static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 549 549 { 550 550 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; 551 551
+2 -2
fs/inode.c
··· 97 97 */ 98 98 struct inodes_stat_t inodes_stat; 99 99 100 - static kmem_cache_t * inode_cachep __read_mostly; 100 + static struct kmem_cache * inode_cachep __read_mostly; 101 101 102 102 static struct inode *alloc_inode(struct super_block *sb) 103 103 { ··· 209 209 210 210 EXPORT_SYMBOL(inode_init_once); 211 211 212 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 212 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 213 213 { 214 214 struct inode * inode = (struct inode *) foo; 215 215
+2 -2
fs/inotify_user.c
··· 34 34 35 35 #include <asm/ioctls.h> 36 36 37 - static kmem_cache_t *watch_cachep __read_mostly; 38 - static kmem_cache_t *event_cachep __read_mostly; 37 + static struct kmem_cache *watch_cachep __read_mostly; 38 + static struct kmem_cache *event_cachep __read_mostly; 39 39 40 40 static struct vfsmount *inotify_mnt __read_mostly; 41 41
+2 -2
fs/isofs/inode.c
··· 57 57 static void isofs_read_inode(struct inode *); 58 58 static int isofs_statfs (struct dentry *, struct kstatfs *); 59 59 60 - static kmem_cache_t *isofs_inode_cachep; 60 + static struct kmem_cache *isofs_inode_cachep; 61 61 62 62 static struct inode *isofs_alloc_inode(struct super_block *sb) 63 63 { ··· 73 73 kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode)); 74 74 } 75 75 76 - static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) 76 + static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) 77 77 { 78 78 struct iso_inode_info *ei = foo; 79 79
+3 -3
fs/jbd/journal.c
··· 1630 1630 #define JBD_MAX_SLABS 5 1631 1631 #define JBD_SLAB_INDEX(size) (size >> 11) 1632 1632 1633 - static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; 1633 + static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; 1634 1634 static const char *jbd_slab_names[JBD_MAX_SLABS] = { 1635 1635 "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" 1636 1636 }; ··· 1693 1693 /* 1694 1694 * Journal_head storage management 1695 1695 */ 1696 - static kmem_cache_t *journal_head_cache; 1696 + static struct kmem_cache *journal_head_cache; 1697 1697 #ifdef CONFIG_JBD_DEBUG 1698 1698 static atomic_t nr_journal_heads = ATOMIC_INIT(0); 1699 1699 #endif ··· 1996 1996 1997 1997 #endif 1998 1998 1999 - kmem_cache_t *jbd_handle_cache; 1999 + struct kmem_cache *jbd_handle_cache; 2000 2000 2001 2001 static int __init journal_init_handle_cache(void) 2002 2002 {
+2 -2
fs/jbd/revoke.c
··· 70 70 #include <linux/init.h> 71 71 #endif 72 72 73 - static kmem_cache_t *revoke_record_cache; 74 - static kmem_cache_t *revoke_table_cache; 73 + static struct kmem_cache *revoke_record_cache; 74 + static struct kmem_cache *revoke_table_cache; 75 75 76 76 /* Each revoke record represents one single revoked block. During 77 77 journal replay, this involves recording the transaction ID of the
+3 -3
fs/jbd2/journal.c
··· 1641 1641 #define JBD_MAX_SLABS 5 1642 1642 #define JBD_SLAB_INDEX(size) (size >> 11) 1643 1643 1644 - static kmem_cache_t *jbd_slab[JBD_MAX_SLABS]; 1644 + static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; 1645 1645 static const char *jbd_slab_names[JBD_MAX_SLABS] = { 1646 1646 "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k" 1647 1647 }; ··· 1704 1704 /* 1705 1705 * Journal_head storage management 1706 1706 */ 1707 - static kmem_cache_t *jbd2_journal_head_cache; 1707 + static struct kmem_cache *jbd2_journal_head_cache; 1708 1708 #ifdef CONFIG_JBD_DEBUG 1709 1709 static atomic_t nr_journal_heads = ATOMIC_INIT(0); 1710 1710 #endif ··· 2007 2007 2008 2008 #endif 2009 2009 2010 - kmem_cache_t *jbd2_handle_cache; 2010 + struct kmem_cache *jbd2_handle_cache; 2011 2011 2012 2012 static int __init journal_init_handle_cache(void) 2013 2013 {
+2 -2
fs/jbd2/revoke.c
··· 70 70 #include <linux/init.h> 71 71 #endif 72 72 73 - static kmem_cache_t *jbd2_revoke_record_cache; 74 - static kmem_cache_t *jbd2_revoke_table_cache; 73 + static struct kmem_cache *jbd2_revoke_record_cache; 74 + static struct kmem_cache *jbd2_revoke_table_cache; 75 75 76 76 /* Each revoke record represents one single revoked block. During 77 77 journal replay, this involves recording the transaction ID of the
+2 -2
fs/jffs/inode-v23.c
··· 61 61 static struct inode_operations jffs_dir_inode_operations; 62 62 static const struct address_space_operations jffs_address_operations; 63 63 64 - kmem_cache_t *node_cache = NULL; 65 - kmem_cache_t *fm_cache = NULL; 64 + struct kmem_cache *node_cache = NULL; 65 + struct kmem_cache *fm_cache = NULL; 66 66 67 67 /* Called by the VFS at mount time to initialize the whole file system. */ 68 68 static int jffs_fill_super(struct super_block *sb, void *data, int silent)
+2 -2
fs/jffs/jffs_fm.c
··· 29 29 static struct jffs_fm *jffs_alloc_fm(void); 30 30 static void jffs_free_fm(struct jffs_fm *n); 31 31 32 - extern kmem_cache_t *fm_cache; 33 - extern kmem_cache_t *node_cache; 32 + extern struct kmem_cache *fm_cache; 33 + extern struct kmem_cache *node_cache; 34 34 35 35 #if CONFIG_JFFS_FS_VERBOSE > 0 36 36 void
+9 -9
fs/jffs2/malloc.c
··· 19 19 20 20 /* These are initialised to NULL in the kernel startup code. 21 21 If you're porting to other operating systems, beware */ 22 - static kmem_cache_t *full_dnode_slab; 23 - static kmem_cache_t *raw_dirent_slab; 24 - static kmem_cache_t *raw_inode_slab; 25 - static kmem_cache_t *tmp_dnode_info_slab; 26 - static kmem_cache_t *raw_node_ref_slab; 27 - static kmem_cache_t *node_frag_slab; 28 - static kmem_cache_t *inode_cache_slab; 22 + static struct kmem_cache *full_dnode_slab; 23 + static struct kmem_cache *raw_dirent_slab; 24 + static struct kmem_cache *raw_inode_slab; 25 + static struct kmem_cache *tmp_dnode_info_slab; 26 + static struct kmem_cache *raw_node_ref_slab; 27 + static struct kmem_cache *node_frag_slab; 28 + static struct kmem_cache *inode_cache_slab; 29 29 #ifdef CONFIG_JFFS2_FS_XATTR 30 - static kmem_cache_t *xattr_datum_cache; 31 - static kmem_cache_t *xattr_ref_cache; 30 + static struct kmem_cache *xattr_datum_cache; 31 + static struct kmem_cache *xattr_ref_cache; 32 32 #endif 33 33 34 34 int __init jffs2_create_slab_caches(void)
+2 -2
fs/jffs2/super.c
··· 28 28 29 29 static void jffs2_put_super(struct super_block *); 30 30 31 - static kmem_cache_t *jffs2_inode_cachep; 31 + static struct kmem_cache *jffs2_inode_cachep; 32 32 33 33 static struct inode *jffs2_alloc_inode(struct super_block *sb) 34 34 { ··· 44 44 kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); 45 45 } 46 46 47 - static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 47 + static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 48 48 { 49 49 struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; 50 50
+2 -2
fs/jfs/jfs_metapage.c
··· 74 74 } 75 75 76 76 #define METAPOOL_MIN_PAGES 32 77 - static kmem_cache_t *metapage_cache; 77 + static struct kmem_cache *metapage_cache; 78 78 static mempool_t *metapage_mempool; 79 79 80 80 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) ··· 180 180 181 181 #endif 182 182 183 - static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 183 + static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 184 184 { 185 185 struct metapage *mp = (struct metapage *)foo; 186 186
+2 -2
fs/jfs/super.c
··· 44 44 MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM"); 45 45 MODULE_LICENSE("GPL"); 46 46 47 - static kmem_cache_t * jfs_inode_cachep; 47 + static struct kmem_cache * jfs_inode_cachep; 48 48 49 49 static struct super_operations jfs_super_operations; 50 50 static struct export_operations jfs_export_operations; ··· 748 748 .fs_flags = FS_REQUIRES_DEV, 749 749 }; 750 750 751 - static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) 751 + static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) 752 752 { 753 753 struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; 754 754
+2 -2
fs/locks.c
··· 142 142 static LIST_HEAD(file_lock_list); 143 143 static LIST_HEAD(blocked_list); 144 144 145 - static kmem_cache_t *filelock_cache __read_mostly; 145 + static struct kmem_cache *filelock_cache __read_mostly; 146 146 147 147 /* Allocate an empty lock structure. */ 148 148 static struct file_lock *locks_alloc_lock(void) ··· 199 199 * Initialises the fields of the file lock which are invariant for 200 200 * free file_locks. 201 201 */ 202 - static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) 202 + static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags) 203 203 { 204 204 struct file_lock *lock = (struct file_lock *) foo; 205 205
+1 -1
fs/mbcache.c
··· 85 85 #ifndef MB_CACHE_INDEXES_COUNT 86 86 int c_indexes_count; 87 87 #endif 88 - kmem_cache_t *c_entry_cache; 88 + struct kmem_cache *c_entry_cache; 89 89 struct list_head *c_block_hash; 90 90 struct list_head *c_indexes_hash[0]; 91 91 };
+2 -2
fs/minix/inode.c
··· 51 51 return; 52 52 } 53 53 54 - static kmem_cache_t * minix_inode_cachep; 54 + static struct kmem_cache * minix_inode_cachep; 55 55 56 56 static struct inode *minix_alloc_inode(struct super_block *sb) 57 57 { ··· 67 67 kmem_cache_free(minix_inode_cachep, minix_i(inode)); 68 68 } 69 69 70 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 70 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 71 71 { 72 72 struct minix_inode_info *ei = (struct minix_inode_info *) foo; 73 73
+1 -1
fs/namespace.c
··· 36 36 37 37 static struct list_head *mount_hashtable __read_mostly; 38 38 static int hash_mask __read_mostly, hash_bits __read_mostly; 39 - static kmem_cache_t *mnt_cache __read_mostly; 39 + static struct kmem_cache *mnt_cache __read_mostly; 40 40 static struct rw_semaphore namespace_sem; 41 41 42 42 /* /sys/fs */
+2 -2
fs/ncpfs/inode.c
··· 40 40 static void ncp_put_super(struct super_block *); 41 41 static int ncp_statfs(struct dentry *, struct kstatfs *); 42 42 43 - static kmem_cache_t * ncp_inode_cachep; 43 + static struct kmem_cache * ncp_inode_cachep; 44 44 45 45 static struct inode *ncp_alloc_inode(struct super_block *sb) 46 46 { ··· 56 56 kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode)); 57 57 } 58 58 59 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 59 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 60 60 { 61 61 struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; 62 62
+1 -1
fs/nfs/direct.c
··· 58 58 59 59 #define NFSDBG_FACILITY NFSDBG_VFS 60 60 61 - static kmem_cache_t *nfs_direct_cachep; 61 + static struct kmem_cache *nfs_direct_cachep; 62 62 63 63 /* 64 64 * This represents a set of asynchronous requests that we're waiting on
+2 -2
fs/nfs/inode.c
··· 55 55 56 56 static void nfs_zap_acl_cache(struct inode *); 57 57 58 - static kmem_cache_t * nfs_inode_cachep; 58 + static struct kmem_cache * nfs_inode_cachep; 59 59 60 60 static inline unsigned long 61 61 nfs_fattr_to_ino_t(struct nfs_fattr *fattr) ··· 1111 1111 #endif 1112 1112 } 1113 1113 1114 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 1114 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 1115 1115 { 1116 1116 struct nfs_inode *nfsi = (struct nfs_inode *) foo; 1117 1117
+1 -1
fs/nfs/pagelist.c
··· 20 20 21 21 #define NFS_PARANOIA 1 22 22 23 - static kmem_cache_t *nfs_page_cachep; 23 + static struct kmem_cache *nfs_page_cachep; 24 24 25 25 static inline struct nfs_page * 26 26 nfs_page_alloc(void)
+1 -1
fs/nfs/read.c
··· 38 38 static const struct rpc_call_ops nfs_read_partial_ops; 39 39 static const struct rpc_call_ops nfs_read_full_ops; 40 40 41 - static kmem_cache_t *nfs_rdata_cachep; 41 + static struct kmem_cache *nfs_rdata_cachep; 42 42 static mempool_t *nfs_rdata_mempool; 43 43 44 44 #define MIN_POOL_READ (32)
+1 -1
fs/nfs/write.c
··· 85 85 static const struct rpc_call_ops nfs_write_full_ops; 86 86 static const struct rpc_call_ops nfs_commit_ops; 87 87 88 - static kmem_cache_t *nfs_wdata_cachep; 88 + static struct kmem_cache *nfs_wdata_cachep; 89 89 static mempool_t *nfs_wdata_mempool; 90 90 static mempool_t *nfs_commit_mempool; 91 91
+5 -5
fs/nfsd/nfs4state.c
··· 84 84 */ 85 85 static DEFINE_MUTEX(client_mutex); 86 86 87 - static kmem_cache_t *stateowner_slab = NULL; 88 - static kmem_cache_t *file_slab = NULL; 89 - static kmem_cache_t *stateid_slab = NULL; 90 - static kmem_cache_t *deleg_slab = NULL; 87 + static struct kmem_cache *stateowner_slab = NULL; 88 + static struct kmem_cache *file_slab = NULL; 89 + static struct kmem_cache *stateid_slab = NULL; 90 + static struct kmem_cache *deleg_slab = NULL; 91 91 92 92 void 93 93 nfs4_lock_state(void) ··· 1003 1003 } 1004 1004 1005 1005 static void 1006 - nfsd4_free_slab(kmem_cache_t **slab) 1006 + nfsd4_free_slab(struct kmem_cache **slab) 1007 1007 { 1008 1008 if (*slab == NULL) 1009 1009 return;
+2 -2
fs/ocfs2/dlm/dlmfs.c
··· 66 66 static struct inode_operations dlmfs_dir_inode_operations; 67 67 static struct inode_operations dlmfs_root_inode_operations; 68 68 static struct inode_operations dlmfs_file_inode_operations; 69 - static kmem_cache_t *dlmfs_inode_cache; 69 + static struct kmem_cache *dlmfs_inode_cache; 70 70 71 71 struct workqueue_struct *user_dlm_worker; 72 72 ··· 257 257 } 258 258 259 259 static void dlmfs_init_once(void *foo, 260 - kmem_cache_t *cachep, 260 + struct kmem_cache *cachep, 261 261 unsigned long flags) 262 262 { 263 263 struct dlmfs_inode_private *ip =
+1 -1
fs/ocfs2/dlm/dlmmaster.c
··· 221 221 #endif /* 0 */ 222 222 223 223 224 - static kmem_cache_t *dlm_mle_cache = NULL; 224 + static struct kmem_cache *dlm_mle_cache = NULL; 225 225 226 226 227 227 static void dlm_mle_release(struct kref *kref);
+1 -1
fs/ocfs2/extent_map.c
··· 61 61 struct ocfs2_extent_map_entry *right_ent; 62 62 }; 63 63 64 - static kmem_cache_t *ocfs2_em_ent_cachep = NULL; 64 + static struct kmem_cache *ocfs2_em_ent_cachep = NULL; 65 65 66 66 67 67 static struct ocfs2_extent_map_entry *
+1 -1
fs/ocfs2/inode.h
··· 106 106 #define INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags & OCFS2_INODE_JOURNAL) 107 107 #define SET_INODE_JOURNAL(i) (OCFS2_I(i)->ip_flags |= OCFS2_INODE_JOURNAL) 108 108 109 - extern kmem_cache_t *ocfs2_inode_cache; 109 + extern struct kmem_cache *ocfs2_inode_cache; 110 110 111 111 extern const struct address_space_operations ocfs2_aops; 112 112
+2 -2
fs/ocfs2/super.c
··· 68 68 69 69 #include "buffer_head_io.h" 70 70 71 - static kmem_cache_t *ocfs2_inode_cachep = NULL; 71 + static struct kmem_cache *ocfs2_inode_cachep = NULL; 72 72 73 73 /* OCFS2 needs to schedule several differnt types of work which 74 74 * require cluster locking, disk I/O, recovery waits, etc. Since these ··· 914 914 } 915 915 916 916 static void ocfs2_inode_init_once(void *data, 917 - kmem_cache_t *cachep, 917 + struct kmem_cache *cachep, 918 918 unsigned long flags) 919 919 { 920 920 struct ocfs2_inode_info *oi = data;
+1 -1
fs/ocfs2/uptodate.c
··· 69 69 sector_t c_block; 70 70 }; 71 71 72 - static kmem_cache_t *ocfs2_uptodate_cachep = NULL; 72 + static struct kmem_cache *ocfs2_uptodate_cachep = NULL; 73 73 74 74 void ocfs2_metadata_cache_init(struct inode *inode) 75 75 {
+2 -2
fs/openpromfs/inode.c
··· 330 330 return 0; 331 331 } 332 332 333 - static kmem_cache_t *op_inode_cachep; 333 + static struct kmem_cache *op_inode_cachep; 334 334 335 335 static struct inode *openprom_alloc_inode(struct super_block *sb) 336 336 { ··· 415 415 .kill_sb = kill_anon_super, 416 416 }; 417 417 418 - static void op_inode_init_once(void *data, kmem_cache_t * cachep, unsigned long flags) 418 + static void op_inode_init_once(void *data, struct kmem_cache * cachep, unsigned long flags) 419 419 { 420 420 struct op_inode_info *oi = (struct op_inode_info *) data; 421 421
+2 -2
fs/proc/inode.c
··· 81 81 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 82 82 } 83 83 84 - static kmem_cache_t * proc_inode_cachep; 84 + static struct kmem_cache * proc_inode_cachep; 85 85 86 86 static struct inode *proc_alloc_inode(struct super_block *sb) 87 87 { ··· 105 105 kmem_cache_free(proc_inode_cachep, PROC_I(inode)); 106 106 } 107 107 108 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 108 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 109 109 { 110 110 struct proc_inode *ei = (struct proc_inode *) foo; 111 111
+2 -2
fs/qnx4/inode.c
··· 515 515 brelse(bh); 516 516 } 517 517 518 - static kmem_cache_t *qnx4_inode_cachep; 518 + static struct kmem_cache *qnx4_inode_cachep; 519 519 520 520 static struct inode *qnx4_alloc_inode(struct super_block *sb) 521 521 { ··· 531 531 kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode)); 532 532 } 533 533 534 - static void init_once(void *foo, kmem_cache_t * cachep, 534 + static void init_once(void *foo, struct kmem_cache * cachep, 535 535 unsigned long flags) 536 536 { 537 537 struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
+2 -2
fs/reiserfs/super.c
··· 490 490 return; 491 491 } 492 492 493 - static kmem_cache_t *reiserfs_inode_cachep; 493 + static struct kmem_cache *reiserfs_inode_cachep; 494 494 495 495 static struct inode *reiserfs_alloc_inode(struct super_block *sb) 496 496 { ··· 507 507 kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); 508 508 } 509 509 510 - static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) 510 + static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) 511 511 { 512 512 struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; 513 513
+2 -2
fs/romfs/inode.c
··· 550 550 } 551 551 } 552 552 553 - static kmem_cache_t * romfs_inode_cachep; 553 + static struct kmem_cache * romfs_inode_cachep; 554 554 555 555 static struct inode *romfs_alloc_inode(struct super_block *sb) 556 556 { ··· 566 566 kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); 567 567 } 568 568 569 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 569 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 570 570 { 571 571 struct romfs_inode_info *ei = (struct romfs_inode_info *) foo; 572 572
+2 -2
fs/smbfs/inode.c
··· 50 50 static int smb_statfs(struct dentry *, struct kstatfs *); 51 51 static int smb_show_options(struct seq_file *, struct vfsmount *); 52 52 53 - static kmem_cache_t *smb_inode_cachep; 53 + static struct kmem_cache *smb_inode_cachep; 54 54 55 55 static struct inode *smb_alloc_inode(struct super_block *sb) 56 56 { ··· 66 66 kmem_cache_free(smb_inode_cachep, SMB_I(inode)); 67 67 } 68 68 69 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 69 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 70 70 { 71 71 struct smb_inode_info *ei = (struct smb_inode_info *) foo; 72 72 unsigned long flagmask = SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR;
+1 -1
fs/smbfs/request.c
··· 25 25 #define ROUND_UP(x) (((x)+3) & ~3) 26 26 27 27 /* cache for request structures */ 28 - static kmem_cache_t *req_cachep; 28 + static struct kmem_cache *req_cachep; 29 29 30 30 static int smb_request_send_req(struct smb_request *req); 31 31
+1 -1
fs/sysfs/mount.c
··· 16 16 17 17 struct vfsmount *sysfs_mount; 18 18 struct super_block * sysfs_sb = NULL; 19 - kmem_cache_t *sysfs_dir_cachep; 19 + struct kmem_cache *sysfs_dir_cachep; 20 20 21 21 static struct super_operations sysfs_ops = { 22 22 .statfs = simple_statfs,
+1 -1
fs/sysfs/sysfs.h
··· 1 1 2 2 extern struct vfsmount * sysfs_mount; 3 - extern kmem_cache_t *sysfs_dir_cachep; 3 + extern struct kmem_cache *sysfs_dir_cachep; 4 4 5 5 extern struct inode * sysfs_new_inode(mode_t mode, struct sysfs_dirent *); 6 6 extern int sysfs_create(struct dentry *, int mode, int (*init)(struct inode *));
+2 -2
fs/sysv/inode.c
··· 301 301 unlock_kernel(); 302 302 } 303 303 304 - static kmem_cache_t *sysv_inode_cachep; 304 + static struct kmem_cache *sysv_inode_cachep; 305 305 306 306 static struct inode *sysv_alloc_inode(struct super_block *sb) 307 307 { ··· 318 318 kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); 319 319 } 320 320 321 - static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags) 321 + static void init_once(void *p, struct kmem_cache *cachep, unsigned long flags) 322 322 { 323 323 struct sysv_inode_info *si = (struct sysv_inode_info *)p; 324 324
+2 -2
fs/udf/super.c
··· 107 107 .fs_flags = FS_REQUIRES_DEV, 108 108 }; 109 109 110 - static kmem_cache_t * udf_inode_cachep; 110 + static struct kmem_cache * udf_inode_cachep; 111 111 112 112 static struct inode *udf_alloc_inode(struct super_block *sb) 113 113 { ··· 130 130 kmem_cache_free(udf_inode_cachep, UDF_I(inode)); 131 131 } 132 132 133 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 133 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 134 134 { 135 135 struct udf_inode_info *ei = (struct udf_inode_info *) foo; 136 136
+2 -2
fs/ufs/super.c
··· 1204 1204 return 0; 1205 1205 } 1206 1206 1207 - static kmem_cache_t * ufs_inode_cachep; 1207 + static struct kmem_cache * ufs_inode_cachep; 1208 1208 1209 1209 static struct inode *ufs_alloc_inode(struct super_block *sb) 1210 1210 { ··· 1221 1221 kmem_cache_free(ufs_inode_cachep, UFS_I(inode)); 1222 1222 } 1223 1223 1224 - static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 1224 + static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 1225 1225 { 1226 1226 struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; 1227 1227
+1 -1
include/acpi/platform/aclinux.h
··· 64 64 /* Host-dependent types and defines */ 65 65 66 66 #define ACPI_MACHINE_WIDTH BITS_PER_LONG 67 - #define acpi_cache_t kmem_cache_t 67 + #define acpi_cache_t struct kmem_cache 68 68 #define acpi_spinlock spinlock_t * 69 69 #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); 70 70 #define strtoul simple_strtoul
+1 -1
include/asm-arm26/pgalloc.h
··· 15 15 #include <asm/tlbflush.h> 16 16 #include <linux/slab.h> 17 17 18 - extern kmem_cache_t *pte_cache; 18 + extern struct kmem_cache *pte_cache; 19 19 20 20 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){ 21 21 return kmem_cache_alloc(pte_cache, GFP_KERNEL);
+5 -5
include/asm-i386/pgtable.h
··· 34 34 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 35 35 extern unsigned long empty_zero_page[1024]; 36 36 extern pgd_t swapper_pg_dir[1024]; 37 - extern kmem_cache_t *pgd_cache; 38 - extern kmem_cache_t *pmd_cache; 37 + extern struct kmem_cache *pgd_cache; 38 + extern struct kmem_cache *pmd_cache; 39 39 extern spinlock_t pgd_lock; 40 40 extern struct page *pgd_list; 41 41 42 - void pmd_ctor(void *, kmem_cache_t *, unsigned long); 43 - void pgd_ctor(void *, kmem_cache_t *, unsigned long); 44 - void pgd_dtor(void *, kmem_cache_t *, unsigned long); 42 + void pmd_ctor(void *, struct kmem_cache *, unsigned long); 43 + void pgd_ctor(void *, struct kmem_cache *, unsigned long); 44 + void pgd_dtor(void *, struct kmem_cache *, unsigned long); 45 45 void pgtable_cache_init(void); 46 46 void paging_init(void); 47 47
+1 -1
include/asm-powerpc/pgalloc.h
··· 11 11 #include <linux/cpumask.h> 12 12 #include <linux/percpu.h> 13 13 14 - extern kmem_cache_t *pgtable_cache[]; 14 + extern struct kmem_cache *pgtable_cache[]; 15 15 16 16 #ifdef CONFIG_PPC_64K_PAGES 17 17 #define PTE_CACHE_NUM 0
+1 -1
include/asm-sparc64/pgalloc.h
··· 13 13 #include <asm/page.h> 14 14 15 15 /* Page table allocation/freeing. */ 16 - extern kmem_cache_t *pgtable_cache; 16 + extern struct kmem_cache *pgtable_cache; 17 17 18 18 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 19 19 {
+1 -1
include/linux/delayacct.h
··· 30 30 #ifdef CONFIG_TASK_DELAY_ACCT 31 31 32 32 extern int delayacct_on; /* Delay accounting turned on/off */ 33 - extern kmem_cache_t *delayacct_cache; 33 + extern struct kmem_cache *delayacct_cache; 34 34 extern void delayacct_init(void); 35 35 extern void __delayacct_tsk_init(struct task_struct *); 36 36 extern void __delayacct_tsk_exit(struct task_struct *);
+1 -1
include/linux/i2o.h
··· 490 490 */ 491 491 struct i2o_pool { 492 492 char *name; 493 - kmem_cache_t *slab; 493 + struct kmem_cache *slab; 494 494 mempool_t *mempool; 495 495 }; 496 496
+1 -1
include/linux/jbd.h
··· 949 949 /* 950 950 * handle management 951 951 */ 952 - extern kmem_cache_t *jbd_handle_cache; 952 + extern struct kmem_cache *jbd_handle_cache; 953 953 954 954 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) 955 955 {
+1 -1
include/linux/jbd2.h
··· 958 958 /* 959 959 * handle management 960 960 */ 961 - extern kmem_cache_t *jbd2_handle_cache; 961 + extern struct kmem_cache *jbd2_handle_cache; 962 962 963 963 static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) 964 964 {
+1 -1
include/linux/raid/raid5.h
··· 235 235 */ 236 236 int active_name; 237 237 char cache_name[2][20]; 238 - kmem_cache_t *slab_cache; /* for allocating stripes */ 238 + struct kmem_cache *slab_cache; /* for allocating stripes */ 239 239 240 240 int seq_flush, seq_write; 241 241 int quiesce;
+1 -1
include/linux/rmap.h
··· 30 30 31 31 #ifdef CONFIG_MMU 32 32 33 - extern kmem_cache_t *anon_vma_cachep; 33 + extern struct kmem_cache *anon_vma_cachep; 34 34 35 35 static inline struct anon_vma *anon_vma_alloc(void) 36 36 {
+1 -1
include/linux/skbuff.h
··· 345 345 return __alloc_skb(size, priority, 1, -1); 346 346 } 347 347 348 - extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 348 + extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, 349 349 unsigned int size, 350 350 gfp_t priority); 351 351 extern void kfree_skbmem(struct sk_buff *skb);
+1 -1
include/linux/taskstats_kern.h
··· 12 12 #include <net/genetlink.h> 13 13 14 14 #ifdef CONFIG_TASKSTATS 15 - extern kmem_cache_t *taskstats_cache; 15 + extern struct kmem_cache *taskstats_cache; 16 16 extern struct mutex taskstats_exit_mutex; 17 17 18 18 static inline void taskstats_exit_free(struct taskstats *tidstats)
+1 -1
include/net/dst.h
··· 98 98 int entry_size; 99 99 100 100 atomic_t entries; 101 - kmem_cache_t *kmem_cachep; 101 + struct kmem_cache *kmem_cachep; 102 102 }; 103 103 104 104 #ifdef __KERNEL__
+3 -3
include/net/inet_hashtables.h
··· 125 125 rwlock_t lhash_lock ____cacheline_aligned; 126 126 atomic_t lhash_users; 127 127 wait_queue_head_t lhash_wait; 128 - kmem_cache_t *bind_bucket_cachep; 128 + struct kmem_cache *bind_bucket_cachep; 129 129 }; 130 130 131 131 static inline struct inet_ehash_bucket *inet_ehash_bucket( ··· 136 136 } 137 137 138 138 extern struct inet_bind_bucket * 139 - inet_bind_bucket_create(kmem_cache_t *cachep, 139 + inet_bind_bucket_create(struct kmem_cache *cachep, 140 140 struct inet_bind_hashbucket *head, 141 141 const unsigned short snum); 142 - extern void inet_bind_bucket_destroy(kmem_cache_t *cachep, 142 + extern void inet_bind_bucket_destroy(struct kmem_cache *cachep, 143 143 struct inet_bind_bucket *tb); 144 144 145 145 static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
+1 -1
include/net/neighbour.h
··· 160 160 atomic_t entries; 161 161 rwlock_t lock; 162 162 unsigned long last_rand; 163 - kmem_cache_t *kmem_cachep; 163 + struct kmem_cache *kmem_cachep; 164 164 struct neigh_statistics *stats; 165 165 struct neighbour **hash_buckets; 166 166 unsigned int hash_mask;
+1 -1
include/net/netfilter/nf_conntrack_expect.h
··· 7 7 #include <net/netfilter/nf_conntrack.h> 8 8 9 9 extern struct list_head nf_conntrack_expect_list; 10 - extern kmem_cache_t *nf_conntrack_expect_cachep; 10 + extern struct kmem_cache *nf_conntrack_expect_cachep; 11 11 extern struct file_operations exp_file_ops; 12 12 13 13 struct nf_conntrack_expect
+1 -1
include/net/request_sock.h
··· 29 29 struct request_sock_ops { 30 30 int family; 31 31 int obj_size; 32 - kmem_cache_t *slab; 32 + struct kmem_cache *slab; 33 33 int (*rtx_syn_ack)(struct sock *sk, 34 34 struct request_sock *req, 35 35 struct dst_entry *dst);
+1 -1
include/net/sock.h
··· 571 571 int *sysctl_rmem; 572 572 int max_header; 573 573 574 - kmem_cache_t *slab; 574 + struct kmem_cache *slab; 575 575 unsigned int obj_size; 576 576 577 577 atomic_t *orphan_count;
+1 -1
include/net/timewait_sock.h
··· 15 15 #include <net/sock.h> 16 16 17 17 struct timewait_sock_ops { 18 - kmem_cache_t *twsk_slab; 18 + struct kmem_cache *twsk_slab; 19 19 unsigned int twsk_obj_size; 20 20 int (*twsk_unique)(struct sock *sk, 21 21 struct sock *sktw, void *twp);
+2 -2
include/scsi/libsas.h
··· 557 557 558 558 static inline struct sas_task *sas_alloc_task(gfp_t flags) 559 559 { 560 - extern kmem_cache_t *sas_task_cache; 560 + extern struct kmem_cache *sas_task_cache; 561 561 struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags); 562 562 563 563 if (task) { ··· 575 575 static inline void sas_free_task(struct sas_task *task) 576 576 { 577 577 if (task) { 578 - extern kmem_cache_t *sas_task_cache; 578 + extern struct kmem_cache *sas_task_cache; 579 579 BUG_ON(!list_empty(&task->list)); 580 580 kmem_cache_free(sas_task_cache, task); 581 581 }
+2 -2
ipc/mqueue.c
··· 90 90 static void remove_notification(struct mqueue_inode_info *info); 91 91 92 92 static spinlock_t mq_lock; 93 - static kmem_cache_t *mqueue_inode_cachep; 93 + static struct kmem_cache *mqueue_inode_cachep; 94 94 static struct vfsmount *mqueue_mnt; 95 95 96 96 static unsigned int queues_count; ··· 211 211 return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); 212 212 } 213 213 214 - static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) 214 + static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags) 215 215 { 216 216 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; 217 217
+1 -1
kernel/delayacct.c
··· 20 20 #include <linux/delayacct.h> 21 21 22 22 int delayacct_on __read_mostly = 1; /* Delay accounting turned on/off */ 23 - kmem_cache_t *delayacct_cache; 23 + struct kmem_cache *delayacct_cache; 24 24 25 25 static int __init delayacct_setup_disable(char *str) 26 26 {
+8 -8
kernel/fork.c
··· 82 82 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 83 83 # define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL) 84 84 # define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk)) 85 - static kmem_cache_t *task_struct_cachep; 85 + static struct kmem_cache *task_struct_cachep; 86 86 #endif 87 87 88 88 /* SLAB cache for signal_struct structures (tsk->signal) */ 89 - static kmem_cache_t *signal_cachep; 89 + static struct kmem_cache *signal_cachep; 90 90 91 91 /* SLAB cache for sighand_struct structures (tsk->sighand) */ 92 - kmem_cache_t *sighand_cachep; 92 + struct kmem_cache *sighand_cachep; 93 93 94 94 /* SLAB cache for files_struct structures (tsk->files) */ 95 - kmem_cache_t *files_cachep; 95 + struct kmem_cache *files_cachep; 96 96 97 97 /* SLAB cache for fs_struct structures (tsk->fs) */ 98 - kmem_cache_t *fs_cachep; 98 + struct kmem_cache *fs_cachep; 99 99 100 100 /* SLAB cache for vm_area_struct structures */ 101 - kmem_cache_t *vm_area_cachep; 101 + struct kmem_cache *vm_area_cachep; 102 102 103 103 /* SLAB cache for mm_struct structures (tsk->mm) */ 104 - static kmem_cache_t *mm_cachep; 104 + static struct kmem_cache *mm_cachep; 105 105 106 106 void free_task(struct task_struct *tsk) 107 107 { ··· 1421 1421 #define ARCH_MIN_MMSTRUCT_ALIGN 0 1422 1422 #endif 1423 1423 1424 - static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags) 1424 + static void sighand_ctor(void *data, struct kmem_cache *cachep, unsigned long flags) 1425 1425 { 1426 1426 struct sighand_struct *sighand = data; 1427 1427
+1 -1
kernel/pid.c
··· 31 31 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) 32 32 static struct hlist_head *pid_hash; 33 33 static int pidhash_shift; 34 - static kmem_cache_t *pid_cachep; 34 + static struct kmem_cache *pid_cachep; 35 35 36 36 int pid_max = PID_MAX_DEFAULT; 37 37
+1 -1
kernel/posix-timers.c
··· 70 70 /* 71 71 * Lets keep our timers in a slab cache :-) 72 72 */ 73 - static kmem_cache_t *posix_timers_cache; 73 + static struct kmem_cache *posix_timers_cache; 74 74 static struct idr posix_timers_id; 75 75 static DEFINE_SPINLOCK(idr_lock); 76 76
+1 -1
kernel/signal.c
··· 33 33 * SLAB caches for signal bits. 34 34 */ 35 35 36 - static kmem_cache_t *sigqueue_cachep; 36 + static struct kmem_cache *sigqueue_cachep; 37 37 38 38 /* 39 39 * In POSIX a signal is sent either to a specific thread (Linux task)
+1 -1
kernel/taskstats.c
··· 34 34 35 35 static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 }; 36 36 static int family_registered; 37 - kmem_cache_t *taskstats_cache; 37 + struct kmem_cache *taskstats_cache; 38 38 39 39 static struct genl_family family = { 40 40 .id = GENL_ID_GENERATE,
+1 -1
kernel/user.c
··· 26 26 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) 27 27 #define uidhashentry(uid) (uidhash_table + __uidhashfn((uid))) 28 28 29 - static kmem_cache_t *uid_cachep; 29 + static struct kmem_cache *uid_cachep; 30 30 static struct list_head uidhash_table[UIDHASH_SZ]; 31 31 32 32 /*
+2 -2
lib/idr.c
··· 33 33 #include <linux/string.h> 34 34 #include <linux/idr.h> 35 35 36 - static kmem_cache_t *idr_layer_cache; 36 + static struct kmem_cache *idr_layer_cache; 37 37 38 38 static struct idr_layer *alloc_layer(struct idr *idp) 39 39 { ··· 445 445 } 446 446 EXPORT_SYMBOL(idr_replace); 447 447 448 - static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, 448 + static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache, 449 449 unsigned long flags) 450 450 { 451 451 memset(idr_layer, 0, sizeof(struct idr_layer));
+2 -2
lib/radix-tree.c
··· 63 63 /* 64 64 * Radix tree node cache. 65 65 */ 66 - static kmem_cache_t *radix_tree_node_cachep; 66 + static struct kmem_cache *radix_tree_node_cachep; 67 67 68 68 /* 69 69 * Per-cpu pool of preloaded nodes ··· 846 846 EXPORT_SYMBOL(radix_tree_tagged); 847 847 848 848 static void 849 - radix_tree_node_ctor(void *node, kmem_cache_t *cachep, unsigned long flags) 849 + radix_tree_node_ctor(void *node, struct kmem_cache *cachep, unsigned long flags) 850 850 { 851 851 memset(node, 0, sizeof(struct radix_tree_node)); 852 852 }
+1 -1
net/bridge/br_fdb.c
··· 23 23 #include <asm/atomic.h> 24 24 #include "br_private.h" 25 25 26 - static kmem_cache_t *br_fdb_cache __read_mostly; 26 + static struct kmem_cache *br_fdb_cache __read_mostly; 27 27 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 28 28 const unsigned char *addr); 29 29
+1 -1
net/core/flow.c
··· 44 44 45 45 #define flow_table(cpu) (per_cpu(flow_tables, cpu)) 46 46 47 - static kmem_cache_t *flow_cachep __read_mostly; 47 + static struct kmem_cache *flow_cachep __read_mostly; 48 48 49 49 static int flow_lwm, flow_hwm; 50 50
+4 -4
net/core/skbuff.c
··· 68 68 69 69 #include "kmap_skb.h" 70 70 71 - static kmem_cache_t *skbuff_head_cache __read_mostly; 72 - static kmem_cache_t *skbuff_fclone_cache __read_mostly; 71 + static struct kmem_cache *skbuff_head_cache __read_mostly; 72 + static struct kmem_cache *skbuff_fclone_cache __read_mostly; 73 73 74 74 /* 75 75 * Keep out-of-line to prevent kernel bloat. ··· 144 144 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 145 145 int fclone, int node) 146 146 { 147 - kmem_cache_t *cache; 147 + struct kmem_cache *cache; 148 148 struct skb_shared_info *shinfo; 149 149 struct sk_buff *skb; 150 150 u8 *data; ··· 211 211 * Buffers may only be allocated from interrupts using a @gfp_mask of 212 212 * %GFP_ATOMIC. 213 213 */ 214 - struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 214 + struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp, 215 215 unsigned int size, 216 216 gfp_t gfp_mask) 217 217 {
+1 -1
net/core/sock.c
··· 841 841 struct proto *prot, int zero_it) 842 842 { 843 843 struct sock *sk = NULL; 844 - kmem_cache_t *slab = prot->slab; 844 + struct kmem_cache *slab = prot->slab; 845 845 846 846 if (slab != NULL) 847 847 sk = kmem_cache_alloc(slab, priority);
+2 -2
net/dccp/ackvec.c
··· 21 21 22 22 #include <net/sock.h> 23 23 24 - static kmem_cache_t *dccp_ackvec_slab; 25 - static kmem_cache_t *dccp_ackvec_record_slab; 24 + static struct kmem_cache *dccp_ackvec_slab; 25 + static struct kmem_cache *dccp_ackvec_record_slab; 26 26 27 27 static struct dccp_ackvec_record *dccp_ackvec_record_new(void) 28 28 {
+3 -3
net/dccp/ccid.c
··· 55 55 #define ccids_read_unlock() do { } while(0) 56 56 #endif 57 57 58 - static kmem_cache_t *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 58 + static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 59 59 { 60 - kmem_cache_t *slab; 60 + struct kmem_cache *slab; 61 61 char slab_name_fmt[32], *slab_name; 62 62 va_list args; 63 63 ··· 75 75 return slab; 76 76 } 77 77 78 - static void ccid_kmem_cache_destroy(kmem_cache_t *slab) 78 + static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 79 79 { 80 80 if (slab != NULL) { 81 81 const char *name = kmem_cache_name(slab);
+2 -2
net/dccp/ccid.h
··· 27 27 unsigned char ccid_id; 28 28 const char *ccid_name; 29 29 struct module *ccid_owner; 30 - kmem_cache_t *ccid_hc_rx_slab; 30 + struct kmem_cache *ccid_hc_rx_slab; 31 31 __u32 ccid_hc_rx_obj_size; 32 - kmem_cache_t *ccid_hc_tx_slab; 32 + struct kmem_cache *ccid_hc_tx_slab; 33 33 __u32 ccid_hc_tx_obj_size; 34 34 int (*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk); 35 35 int (*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
+1 -1
net/dccp/ccids/lib/loss_interval.h
··· 20 20 #define DCCP_LI_HIST_IVAL_F_LENGTH 8 21 21 22 22 struct dccp_li_hist { 23 - kmem_cache_t *dccplih_slab; 23 + struct kmem_cache *dccplih_slab; 24 24 }; 25 25 26 26 extern struct dccp_li_hist *dccp_li_hist_new(const char *name);
+2 -2
net/dccp/ccids/lib/packet_history.h
··· 68 68 }; 69 69 70 70 struct dccp_tx_hist { 71 - kmem_cache_t *dccptxh_slab; 71 + struct kmem_cache *dccptxh_slab; 72 72 }; 73 73 74 74 extern struct dccp_tx_hist *dccp_tx_hist_new(const char *name); 75 75 extern void dccp_tx_hist_delete(struct dccp_tx_hist *hist); 76 76 77 77 struct dccp_rx_hist { 78 - kmem_cache_t *dccprxh_slab; 78 + struct kmem_cache *dccprxh_slab; 79 79 }; 80 80 81 81 extern struct dccp_rx_hist *dccp_rx_hist_new(const char *name);
+1 -1
net/decnet/dn_table.c
··· 79 79 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; 80 80 static DEFINE_RWLOCK(dn_fib_tables_lock); 81 81 82 - static kmem_cache_t *dn_hash_kmem __read_mostly; 82 + static struct kmem_cache *dn_hash_kmem __read_mostly; 83 83 static int dn_fib_hash_zombies; 84 84 85 85 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
+2 -2
net/ipv4/fib_hash.c
··· 45 45 46 46 #include "fib_lookup.h" 47 47 48 - static kmem_cache_t *fn_hash_kmem __read_mostly; 49 - static kmem_cache_t *fn_alias_kmem __read_mostly; 48 + static struct kmem_cache *fn_hash_kmem __read_mostly; 49 + static struct kmem_cache *fn_alias_kmem __read_mostly; 50 50 51 51 struct fib_node { 52 52 struct hlist_node fn_hash;
+1 -1
net/ipv4/fib_trie.c
··· 172 172 static struct tnode *halve(struct trie *t, struct tnode *tn); 173 173 static void tnode_free(struct tnode *tn); 174 174 175 - static kmem_cache_t *fn_alias_kmem __read_mostly; 175 + static struct kmem_cache *fn_alias_kmem __read_mostly; 176 176 static struct trie *trie_local = NULL, *trie_main = NULL; 177 177 178 178
+2 -2
net/ipv4/inet_hashtables.c
··· 27 27 * Allocate and initialize a new local port bind bucket. 28 28 * The bindhash mutex for snum's hash chain must be held here. 29 29 */ 30 - struct inet_bind_bucket *inet_bind_bucket_create(kmem_cache_t *cachep, 30 + struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, 31 31 struct inet_bind_hashbucket *head, 32 32 const unsigned short snum) 33 33 { ··· 45 45 /* 46 46 * Caller must hold hashbucket lock for this tb with local BH disabled 47 47 */ 48 - void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb) 48 + void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) 49 49 { 50 50 if (hlist_empty(&tb->owners)) { 51 51 __hlist_del(&tb->node);
+1 -1
net/ipv4/inetpeer.c
··· 73 73 /* Exported for inet_getid inline function. */ 74 74 DEFINE_SPINLOCK(inet_peer_idlock); 75 75 76 - static kmem_cache_t *peer_cachep __read_mostly; 76 + static struct kmem_cache *peer_cachep __read_mostly; 77 77 78 78 #define node_height(x) x->avl_height 79 79 static struct inet_peer peer_fake_node = {
+1 -1
net/ipv4/ipmr.c
··· 105 105 In this case data path is free of exclusive locks at all. 106 106 */ 107 107 108 - static kmem_cache_t *mrt_cachep __read_mostly; 108 + static struct kmem_cache *mrt_cachep __read_mostly; 109 109 110 110 static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); 111 111 static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
+1 -1
net/ipv4/ipvs/ip_vs_conn.c
··· 44 44 static struct list_head *ip_vs_conn_tab; 45 45 46 46 /* SLAB cache for IPVS connections */ 47 - static kmem_cache_t *ip_vs_conn_cachep __read_mostly; 47 + static struct kmem_cache *ip_vs_conn_cachep __read_mostly; 48 48 49 49 /* counter for current IPVS connections */ 50 50 static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
+2 -2
net/ipv4/netfilter/ip_conntrack_core.c
··· 65 65 unsigned int ip_conntrack_htable_size __read_mostly = 0; 66 66 int ip_conntrack_max __read_mostly; 67 67 struct list_head *ip_conntrack_hash __read_mostly; 68 - static kmem_cache_t *ip_conntrack_cachep __read_mostly; 69 - static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly; 68 + static struct kmem_cache *ip_conntrack_cachep __read_mostly; 69 + static struct kmem_cache *ip_conntrack_expect_cachep __read_mostly; 70 70 struct ip_conntrack ip_conntrack_untracked; 71 71 unsigned int ip_ct_log_invalid __read_mostly; 72 72 static LIST_HEAD(unconfirmed);
+1 -1
net/ipv6/ip6_fib.c
··· 50 50 51 51 struct rt6_statistics rt6_stats; 52 52 53 - static kmem_cache_t * fib6_node_kmem __read_mostly; 53 + static struct kmem_cache * fib6_node_kmem __read_mostly; 54 54 55 55 enum fib_walk_state_t 56 56 {
+1 -1
net/ipv6/xfrm6_tunnel.c
··· 50 50 #define XFRM6_TUNNEL_SPI_MIN 1 51 51 #define XFRM6_TUNNEL_SPI_MAX 0xffffffff 52 52 53 - static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly; 53 + static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 54 54 55 55 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 56 56 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
+3 -3
net/netfilter/nf_conntrack_core.c
··· 108 108 size_t size; 109 109 110 110 /* slab cache pointer */ 111 - kmem_cache_t *cachep; 111 + struct kmem_cache *cachep; 112 112 113 113 /* allocated slab cache + modules which uses this slab cache */ 114 114 int use; ··· 147 147 { 148 148 int ret = 0; 149 149 char *cache_name; 150 - kmem_cache_t *cachep; 150 + struct kmem_cache *cachep; 151 151 152 152 DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n", 153 153 features, name, size); ··· 226 226 /* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */ 227 227 void nf_conntrack_unregister_cache(u_int32_t features) 228 228 { 229 - kmem_cache_t *cachep; 229 + struct kmem_cache *cachep; 230 230 char *name; 231 231 232 232 /*
+1 -1
net/netfilter/nf_conntrack_expect.c
··· 29 29 LIST_HEAD(nf_conntrack_expect_list); 30 30 EXPORT_SYMBOL_GPL(nf_conntrack_expect_list); 31 31 32 - kmem_cache_t *nf_conntrack_expect_cachep __read_mostly; 32 + struct kmem_cache *nf_conntrack_expect_cachep __read_mostly; 33 33 static unsigned int nf_conntrack_expect_next_id; 34 34 35 35 /* nf_conntrack_expect helper functions */
+1 -1
net/netfilter/xt_hashlimit.c
··· 92 92 static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ 93 93 static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ 94 94 static HLIST_HEAD(hashlimit_htables); 95 - static kmem_cache_t *hashlimit_cachep __read_mostly; 95 + static struct kmem_cache *hashlimit_cachep __read_mostly; 96 96 97 97 static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) 98 98 {
+2 -2
net/sctp/protocol.c
··· 79 79 static struct sctp_af *sctp_af_v4_specific; 80 80 static struct sctp_af *sctp_af_v6_specific; 81 81 82 - kmem_cache_t *sctp_chunk_cachep __read_mostly; 83 - kmem_cache_t *sctp_bucket_cachep __read_mostly; 82 + struct kmem_cache *sctp_chunk_cachep __read_mostly; 83 + struct kmem_cache *sctp_bucket_cachep __read_mostly; 84 84 85 85 /* Return the address of the control sock. */ 86 86 struct sock *sctp_get_ctl_sock(void)
+1 -1
net/sctp/sm_make_chunk.c
··· 65 65 #include <net/sctp/sctp.h> 66 66 #include <net/sctp/sm.h> 67 67 68 - extern kmem_cache_t *sctp_chunk_cachep; 68 + extern struct kmem_cache *sctp_chunk_cachep; 69 69 70 70 SCTP_STATIC 71 71 struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
+1 -1
net/sctp/socket.c
··· 107 107 struct sctp_association *, sctp_socket_type_t); 108 108 static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; 109 109 110 - extern kmem_cache_t *sctp_bucket_cachep; 110 + extern struct kmem_cache *sctp_bucket_cachep; 111 111 112 112 /* Get the sndbuf space available at the time on the association. */ 113 113 static inline int sctp_wspace(struct sctp_association *asoc)
+2 -2
net/socket.c
··· 230 230 231 231 #define SOCKFS_MAGIC 0x534F434B 232 232 233 - static kmem_cache_t *sock_inode_cachep __read_mostly; 233 + static struct kmem_cache *sock_inode_cachep __read_mostly; 234 234 235 235 static struct inode *sock_alloc_inode(struct super_block *sb) 236 236 { ··· 257 257 container_of(inode, struct socket_alloc, vfs_inode)); 258 258 } 259 259 260 - static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 260 + static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags) 261 261 { 262 262 struct socket_alloc *ei = (struct socket_alloc *)foo; 263 263
+2 -2
net/sunrpc/rpc_pipe.c
··· 33 33 static struct file_system_type rpc_pipe_fs_type; 34 34 35 35 36 - static kmem_cache_t *rpc_inode_cachep __read_mostly; 36 + static struct kmem_cache *rpc_inode_cachep __read_mostly; 37 37 38 38 #define RPC_UPCALL_TIMEOUT (30*HZ) 39 39 ··· 824 824 }; 825 825 826 826 static void 827 - init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) 827 + init_once(void * foo, struct kmem_cache * cachep, unsigned long flags) 828 828 { 829 829 struct rpc_inode *rpci = (struct rpc_inode *) foo; 830 830
+2 -2
net/sunrpc/sched.c
··· 34 34 #define RPC_BUFFER_MAXSIZE (2048) 35 35 #define RPC_BUFFER_POOLSIZE (8) 36 36 #define RPC_TASK_POOLSIZE (8) 37 - static kmem_cache_t *rpc_task_slabp __read_mostly; 38 - static kmem_cache_t *rpc_buffer_slabp __read_mostly; 37 + static struct kmem_cache *rpc_task_slabp __read_mostly; 38 + static struct kmem_cache *rpc_buffer_slabp __read_mostly; 39 39 static mempool_t *rpc_task_mempool __read_mostly; 40 40 static mempool_t *rpc_buffer_mempool __read_mostly; 41 41
+1 -1
net/tipc/handler.c
··· 42 42 unsigned long data; 43 43 }; 44 44 45 - static kmem_cache_t *tipc_queue_item_cache; 45 + static struct kmem_cache *tipc_queue_item_cache; 46 46 static struct list_head signal_queue_head; 47 47 static DEFINE_SPINLOCK(qitem_lock); 48 48 static int handler_enabled = 0;
+1 -1
net/xfrm/xfrm_input.c
··· 12 12 #include <net/ip.h> 13 13 #include <net/xfrm.h> 14 14 15 - static kmem_cache_t *secpath_cachep __read_mostly; 15 + static struct kmem_cache *secpath_cachep __read_mostly; 16 16 17 17 void __secpath_destroy(struct sec_path *sp) 18 18 {
+1 -1
net/xfrm/xfrm_policy.c
··· 39 39 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 40 40 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 41 41 42 - static kmem_cache_t *xfrm_dst_cache __read_mostly; 42 + static struct kmem_cache *xfrm_dst_cache __read_mostly; 43 43 44 44 static struct work_struct xfrm_policy_gc_work; 45 45 static HLIST_HEAD(xfrm_policy_gc_list);
+1 -1
security/keys/key.c
··· 20 20 #include <linux/err.h> 21 21 #include "internal.h" 22 22 23 - static kmem_cache_t *key_jar; 23 + static struct kmem_cache *key_jar; 24 24 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 25 25 DEFINE_SPINLOCK(key_serial_lock); 26 26
+1 -1
security/selinux/avc.c
··· 124 124 125 125 static struct avc_cache avc_cache; 126 126 static struct avc_callback_node *avc_callbacks; 127 - static kmem_cache_t *avc_node_cachep; 127 + static struct kmem_cache *avc_node_cachep; 128 128 129 129 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) 130 130 {
+1 -1
security/selinux/hooks.c
··· 124 124 static LIST_HEAD(superblock_security_head); 125 125 static DEFINE_SPINLOCK(sb_security_lock); 126 126 127 - static kmem_cache_t *sel_inode_cache; 127 + static struct kmem_cache *sel_inode_cache; 128 128 129 129 /* Return security context for a given sid or just the context 130 130 length if the buffer is null or length is 0 */
+1 -1
security/selinux/ss/avtab.c
··· 28 28 (keyp->source_type << 9)) & \ 29 29 AVTAB_HASH_MASK) 30 30 31 - static kmem_cache_t *avtab_node_cachep; 31 + static struct kmem_cache *avtab_node_cachep; 32 32 33 33 static struct avtab_node* 34 34 avtab_insert_node(struct avtab *h, int hvalue,