[PATCH] gfp flags annotations - part 1

- added typedef unsigned int __nocast gfp_t;

- replaced __nocast uses for gfp flags with gfp_t - it gives exactly
the same warnings as far as sparse is concerned, doesn't change
generated code (from gcc point of view we replaced unsigned int with
typedef) and documents what's going on far better.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Al Viro and committed by Linus Torvalds dd0fc66f 3b0e77bd

+340 -360
+1 -1
arch/cris/arch-v32/drivers/pci/dma.c
··· 24 24 }; 25 25 26 26 void *dma_alloc_coherent(struct device *dev, size_t size, 27 - dma_addr_t *dma_handle, unsigned int __nocast gfp) 27 + dma_addr_t *dma_handle, gfp_t gfp) 28 28 { 29 29 void *ret; 30 30 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+1 -1
arch/i386/kernel/pci-dma.c
··· 23 23 }; 24 24 25 25 void *dma_alloc_coherent(struct device *dev, size_t size, 26 - dma_addr_t *dma_handle, unsigned int __nocast gfp) 26 + dma_addr_t *dma_handle, gfp_t gfp) 27 27 { 28 28 void *ret; 29 29 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+1 -1
arch/ppc64/kernel/bpa_iommu.c
··· 310 310 311 311 312 312 static void *bpa_alloc_coherent(struct device *hwdev, size_t size, 313 - dma_addr_t *dma_handle, unsigned int __nocast flag) 313 + dma_addr_t *dma_handle, gfp_t flag) 314 314 { 315 315 void *ret; 316 316
+1 -1
arch/ppc64/kernel/dma.c
··· 53 53 EXPORT_SYMBOL(dma_set_mask); 54 54 55 55 void *dma_alloc_coherent(struct device *dev, size_t size, 56 - dma_addr_t *dma_handle, unsigned int __nocast flag) 56 + dma_addr_t *dma_handle, gfp_t flag) 57 57 { 58 58 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 59 59
+1 -1
arch/ppc64/kernel/iommu.c
··· 519 519 * to the dma address (mapping) of the first page. 520 520 */ 521 521 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 522 - dma_addr_t *dma_handle, unsigned int __nocast flag) 522 + dma_addr_t *dma_handle, gfp_t flag) 523 523 { 524 524 void *ret = NULL; 525 525 dma_addr_t mapping;
+1 -1
arch/ppc64/kernel/pci_direct_iommu.c
··· 31 31 #include "pci.h" 32 32 33 33 static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 34 - dma_addr_t *dma_handle, unsigned int __nocast flag) 34 + dma_addr_t *dma_handle, gfp_t flag) 35 35 { 36 36 void *ret; 37 37
+1 -1
arch/ppc64/kernel/pci_iommu.c
··· 76 76 * to the dma address (mapping) of the first page. 77 77 */ 78 78 static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, 79 - dma_addr_t *dma_handle, unsigned int __nocast flag) 79 + dma_addr_t *dma_handle, gfp_t flag) 80 80 { 81 81 return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, 82 82 flag);
+1 -1
arch/ppc64/kernel/vio.c
··· 218 218 } 219 219 220 220 static void *vio_alloc_coherent(struct device *dev, size_t size, 221 - dma_addr_t *dma_handle, unsigned int __nocast flag) 221 + dma_addr_t *dma_handle, gfp_t flag) 222 222 { 223 223 return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, 224 224 dma_handle, flag);
+1 -1
drivers/atm/ambassador.c
··· 795 795 } 796 796 797 797 static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, 798 - unsigned int __nocast priority) 798 + gfp_t priority) 799 799 { 800 800 rx_in rx; 801 801 amb_rxq * rxq;
+2 -3
drivers/atm/firestream.c
··· 1374 1374 } 1375 1375 } 1376 1376 1377 - static void __devinit *aligned_kmalloc (int size, unsigned int __nocast flags, 1378 - int alignment) 1377 + static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment) 1379 1378 { 1380 1379 void *t; 1381 1380 ··· 1465 1466 working again after that... -- REW */ 1466 1467 1467 1468 static void top_off_fp (struct fs_dev *dev, struct freepool *fp, 1468 - unsigned int __nocast gfp_flags) 1469 + gfp_t gfp_flags) 1469 1470 { 1470 1471 struct FS_BPENTRY *qe, *ne; 1471 1472 struct sk_buff *skb;
+1 -1
drivers/atm/fore200e.c
··· 178 178 179 179 180 180 static void* 181 - fore200e_kmalloc(int size, unsigned int __nocast flags) 181 + fore200e_kmalloc(int size, gfp_t flags) 182 182 { 183 183 void *chunk = kzalloc(size, flags); 184 184
+2 -3
drivers/base/dmapool.c
··· 156 156 157 157 158 158 static struct dma_page * 159 - pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags) 159 + pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) 160 160 { 161 161 struct dma_page *page; 162 162 int mapsize; ··· 262 262 * If such a memory block can't be allocated, null is returned. 263 263 */ 264 264 void * 265 - dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags, 266 - dma_addr_t *handle) 265 + dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) 267 266 { 268 267 unsigned long flags; 269 268 struct dma_page *page;
+2 -2
drivers/block/pktcdvd.c
··· 229 229 return 1; 230 230 } 231 231 232 - static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data) 232 + static void *pkt_rb_alloc(gfp_t gfp_mask, void *data) 233 233 { 234 234 return kmalloc(sizeof(struct pkt_rb_node), gfp_mask); 235 235 } ··· 2082 2082 } 2083 2083 2084 2084 2085 - static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data) 2085 + static void *psd_pool_alloc(gfp_t gfp_mask, void *data) 2086 2086 { 2087 2087 return kmalloc(sizeof(struct packet_stacked_data), gfp_mask); 2088 2088 }
+1 -1
drivers/bluetooth/bpa10x.c
··· 308 308 } 309 309 310 310 static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, 311 - size_t size, unsigned int __nocast flags, void *data) 311 + size_t size, gfp_t flags, void *data) 312 312 { 313 313 struct urb *urb; 314 314 struct usb_ctrlrequest *cr;
+1 -1
drivers/bluetooth/hci_usb.c
··· 132 132 { } /* Terminating entry */ 133 133 }; 134 134 135 - static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp) 135 + static struct _urb *_urb_alloc(int isoc, gfp_t gfp) 136 136 { 137 137 struct _urb *_urb = kmalloc(sizeof(struct _urb) + 138 138 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
+1 -2
drivers/connector/connector.c
··· 69 69 * a new message. 70 70 * 71 71 */ 72 - int cn_netlink_send(struct cn_msg *msg, u32 __group, 73 - unsigned int __nocast gfp_mask) 72 + int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) 74 73 { 75 74 struct cn_callback_entry *__cbq; 76 75 unsigned int size;
+1 -1
drivers/ieee1394/raw1394.c
··· 98 98 99 99 static void queue_complete_cb(struct pending_request *req); 100 100 101 - static struct pending_request *__alloc_pending_request(unsigned int __nocast flags) 101 + static struct pending_request *__alloc_pending_request(gfp_t flags) 102 102 { 103 103 struct pending_request *req; 104 104
+1 -1
drivers/infiniband/core/mad.c
··· 783 783 u32 remote_qpn, u16 pkey_index, 784 784 struct ib_ah *ah, int rmpp_active, 785 785 int hdr_len, int data_len, 786 - unsigned int __nocast gfp_mask) 786 + gfp_t gfp_mask) 787 787 { 788 788 struct ib_mad_agent_private *mad_agent_priv; 789 789 struct ib_mad_send_buf *send_buf;
+3 -3
drivers/infiniband/core/sa_query.c
··· 574 574 int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 575 575 struct ib_sa_path_rec *rec, 576 576 ib_sa_comp_mask comp_mask, 577 - int timeout_ms, unsigned int __nocast gfp_mask, 577 + int timeout_ms, gfp_t gfp_mask, 578 578 void (*callback)(int status, 579 579 struct ib_sa_path_rec *resp, 580 580 void *context), ··· 676 676 int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, 677 677 struct ib_sa_service_rec *rec, 678 678 ib_sa_comp_mask comp_mask, 679 - int timeout_ms, unsigned int __nocast gfp_mask, 679 + int timeout_ms, gfp_t gfp_mask, 680 680 void (*callback)(int status, 681 681 struct ib_sa_service_rec *resp, 682 682 void *context), ··· 759 759 u8 method, 760 760 struct ib_sa_mcmember_rec *rec, 761 761 ib_sa_comp_mask comp_mask, 762 - int timeout_ms, unsigned int __nocast gfp_mask, 762 + int timeout_ms, gfp_t gfp_mask, 763 763 void (*callback)(int status, 764 764 struct ib_sa_mcmember_rec *resp, 765 765 void *context),
+1 -1
drivers/md/dm-crypt.c
··· 96 96 /* 97 97 * Mempool alloc and free functions for the page 98 98 */ 99 - static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data) 99 + static void *mempool_alloc_page(gfp_t gfp_mask, void *data) 100 100 { 101 101 return alloc_page(gfp_mask); 102 102 }
+1 -1
drivers/md/dm-io.c
··· 32 32 static unsigned _num_ios; 33 33 static mempool_t *_io_pool; 34 34 35 - static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data) 35 + static void *alloc_io(gfp_t gfp_mask, void *pool_data) 36 36 { 37 37 return kmalloc(sizeof(struct io), gfp_mask); 38 38 }
+1 -1
drivers/md/dm-raid1.c
··· 122 122 /* FIXME move this */ 123 123 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 124 124 125 - static void *region_alloc(unsigned int __nocast gfp_mask, void *pool_data) 125 + static void *region_alloc(gfp_t gfp_mask, void *pool_data) 126 126 { 127 127 return kmalloc(sizeof(struct region), gfp_mask); 128 128 }
+1 -1
drivers/md/multipath.c
··· 38 38 static mdk_personality_t multipath_personality; 39 39 40 40 41 - static void *mp_pool_alloc(unsigned int __nocast gfp_flags, void *data) 41 + static void *mp_pool_alloc(gfp_t gfp_flags, void *data) 42 42 { 43 43 struct multipath_bh *mpb; 44 44 mpb = kmalloc(sizeof(*mpb), gfp_flags);
+2 -2
drivers/md/raid1.c
··· 52 52 static void unplug_slaves(mddev_t *mddev); 53 53 54 54 55 - static void * r1bio_pool_alloc(unsigned int __nocast gfp_flags, void *data) 55 + static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 56 56 { 57 57 struct pool_info *pi = data; 58 58 r1bio_t *r1_bio; ··· 79 79 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 80 80 #define RESYNC_WINDOW (2048*1024) 81 81 82 - static void * r1buf_pool_alloc(unsigned int __nocast gfp_flags, void *data) 82 + static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 83 83 { 84 84 struct pool_info *pi = data; 85 85 struct page *page;
+2 -2
drivers/md/raid10.c
··· 47 47 48 48 static void unplug_slaves(mddev_t *mddev); 49 49 50 - static void * r10bio_pool_alloc(unsigned int __nocast gfp_flags, void *data) 50 + static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 51 51 { 52 52 conf_t *conf = data; 53 53 r10bio_t *r10_bio; ··· 81 81 * one for write (we recover only one drive per r10buf) 82 82 * 83 83 */ 84 - static void * r10buf_pool_alloc(unsigned int __nocast gfp_flags, void *data) 84 + static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 85 85 { 86 86 conf_t *conf = data; 87 87 struct page *page;
+1 -1
drivers/net/bonding/bond_main.c
··· 1290 1290 * Copy all the Multicast addresses from src to the bonding device dst 1291 1291 */ 1292 1292 static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, 1293 - unsigned int __nocast gfp_flag) 1293 + gfp_t gfp_flag) 1294 1294 { 1295 1295 struct dev_mc_list *dmi, *new_dmi; 1296 1296
+1 -1
drivers/net/ns83820.c
··· 584 584 return 0; 585 585 } 586 586 587 - static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp) 587 + static inline int rx_refill(struct net_device *ndev, gfp_t gfp) 588 588 { 589 589 struct ns83820 *dev = PRIV(ndev); 590 590 unsigned i;
+1 -1
drivers/net/sungem.h
··· 1036 1036 #define ALIGNED_RX_SKB_ADDR(addr) \ 1037 1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 1038 1038 static __inline__ struct sk_buff *gem_alloc_skb(int size, 1039 - unsigned int __nocast gfp_flags) 1039 + gfp_t gfp_flags) 1040 1040 { 1041 1041 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 1042 1042
+1 -1
drivers/s390/scsi/zfcp_aux.c
··· 833 833 } 834 834 835 835 static void * 836 - zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size) 836 + zfcp_mempool_alloc(gfp_t gfp_mask, void *size) 837 837 { 838 838 return kmalloc((size_t) size, gfp_mask); 839 839 }
+5 -5
fs/bio.c
··· 75 75 */ 76 76 static struct bio_set *fs_bio_set; 77 77 78 - static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) 78 + static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) 79 79 { 80 80 struct bio_vec *bvl; 81 81 struct biovec_slab *bp; ··· 155 155 * allocate bio and iovecs from the memory pools specified by the 156 156 * bio_set structure. 157 157 **/ 158 - struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs) 158 + struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 159 159 { 160 160 struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); 161 161 ··· 181 181 return bio; 182 182 } 183 183 184 - struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs) 184 + struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) 185 185 { 186 186 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 187 187 ··· 277 277 * 278 278 * Like __bio_clone, only also allocates the returned bio 279 279 */ 280 - struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask) 280 + struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) 281 281 { 282 282 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); 283 283 ··· 1078 1078 return bp; 1079 1079 } 1080 1080 1081 - static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data) 1081 + static void *bio_pair_alloc(gfp_t gfp_flags, void *data) 1082 1082 { 1083 1083 return kmalloc(sizeof(struct bio_pair), gfp_flags); 1084 1084 }
+1 -1
fs/buffer.c
··· 3045 3045 buffer_heads_over_limit = (tot > max_buffer_heads); 3046 3046 } 3047 3047 3048 - struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags) 3048 + struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3049 3049 { 3050 3050 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3051 3051 if (ret) {
+1 -1
fs/mpage.c
··· 102 102 static struct bio * 103 103 mpage_alloc(struct block_device *bdev, 104 104 sector_t first_sector, int nr_vecs, 105 - unsigned int __nocast gfp_flags) 105 + gfp_t gfp_flags) 106 106 { 107 107 struct bio *bio; 108 108
+1 -1
fs/ntfs/malloc.h
··· 40 40 * Depending on @gfp_mask the allocation may be guaranteed to succeed. 41 41 */ 42 42 static inline void *__ntfs_malloc(unsigned long size, 43 - unsigned int __nocast gfp_mask) 43 + gfp_t gfp_mask) 44 44 { 45 45 if (likely(size <= PAGE_SIZE)) { 46 46 BUG_ON(!size);
+3 -3
fs/posix_acl.c
··· 35 35 * Allocate a new ACL with the specified number of entries. 36 36 */ 37 37 struct posix_acl * 38 - posix_acl_alloc(int count, unsigned int __nocast flags) 38 + posix_acl_alloc(int count, gfp_t flags) 39 39 { 40 40 const size_t size = sizeof(struct posix_acl) + 41 41 count * sizeof(struct posix_acl_entry); ··· 51 51 * Clone an ACL. 52 52 */ 53 53 struct posix_acl * 54 - posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags) 54 + posix_acl_clone(const struct posix_acl *acl, gfp_t flags) 55 55 { 56 56 struct posix_acl *clone = NULL; 57 57 ··· 185 185 * Create an ACL representing the file mode permission bits of an inode. 186 186 */ 187 187 struct posix_acl * 188 - posix_acl_from_mode(mode_t mode, unsigned int __nocast flags) 188 + posix_acl_from_mode(mode_t mode, gfp_t flags) 189 189 { 190 190 struct posix_acl *acl = posix_acl_alloc(3, flags); 191 191 if (!acl)
+5 -5
fs/xfs/linux-2.6/kmem.c
··· 45 45 46 46 47 47 void * 48 - kmem_alloc(size_t size, unsigned int __nocast flags) 48 + kmem_alloc(size_t size, gfp_t flags) 49 49 { 50 50 int retries = 0; 51 51 unsigned int lflags = kmem_flags_convert(flags); ··· 67 67 } 68 68 69 69 void * 70 - kmem_zalloc(size_t size, unsigned int __nocast flags) 70 + kmem_zalloc(size_t size, gfp_t flags) 71 71 { 72 72 void *ptr; 73 73 ··· 90 90 91 91 void * 92 92 kmem_realloc(void *ptr, size_t newsize, size_t oldsize, 93 - unsigned int __nocast flags) 93 + gfp_t flags) 94 94 { 95 95 void *new; 96 96 ··· 105 105 } 106 106 107 107 void * 108 - kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) 108 + kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags) 109 109 { 110 110 int retries = 0; 111 111 unsigned int lflags = kmem_flags_convert(flags); ··· 124 124 } 125 125 126 126 void * 127 - kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags) 127 + kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags) 128 128 { 129 129 void *ptr; 130 130
+6 -7
fs/xfs/linux-2.6/kmem.h
··· 81 81 *(NSTATEP) = *(OSTATEP); \ 82 82 } while (0) 83 83 84 - static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags) 84 + static __inline unsigned int kmem_flags_convert(gfp_t flags) 85 85 { 86 86 unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ 87 87 ··· 125 125 BUG(); 126 126 } 127 127 128 - extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); 129 - extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 128 + extern void *kmem_zone_zalloc(kmem_zone_t *, gfp_t); 129 + extern void *kmem_zone_alloc(kmem_zone_t *, gfp_t); 130 130 131 - extern void *kmem_alloc(size_t, unsigned int __nocast); 132 - extern void *kmem_realloc(void *, size_t, size_t, 133 - unsigned int __nocast); 134 - extern void *kmem_zalloc(size_t, unsigned int __nocast); 131 + extern void *kmem_alloc(size_t, gfp_t); 132 + extern void *kmem_realloc(void *, size_t, size_t, gfp_t); 133 + extern void *kmem_zalloc(size_t, gfp_t); 135 134 extern void kmem_free(void *, size_t); 136 135 137 136 typedef struct shrinker *kmem_shaker_t;
+2 -2
include/asm-generic/dma-mapping.h
··· 35 35 36 36 static inline void * 37 37 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 38 - unsigned int __nocast flag) 38 + gfp_t flag) 39 39 { 40 40 BUG_ON(dev->bus != &pci_bus_type); 41 41 ··· 168 168 169 169 static inline void * 170 170 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 171 - unsigned int __nocast flag) 171 + gfp_t flag) 172 172 { 173 173 BUG(); 174 174 return NULL;
+1 -1
include/asm-i386/dma-mapping.h
··· 11 11 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 12 12 13 13 void *dma_alloc_coherent(struct device *dev, size_t size, 14 - dma_addr_t *dma_handle, unsigned int __nocast flag); 14 + dma_addr_t *dma_handle, gfp_t flag); 15 15 16 16 void dma_free_coherent(struct device *dev, size_t size, 17 17 void *vaddr, dma_addr_t dma_handle);
+1 -1
include/asm-ppc/dma-mapping.h
··· 61 61 62 62 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 63 63 dma_addr_t * dma_handle, 64 - unsigned int __nocast gfp) 64 + gfp_t gfp) 65 65 { 66 66 #ifdef CONFIG_NOT_COHERENT_CACHE 67 67 return __dma_alloc_coherent(size, dma_handle, gfp);
+2 -2
include/asm-ppc64/dma-mapping.h
··· 19 19 extern int dma_supported(struct device *dev, u64 mask); 20 20 extern int dma_set_mask(struct device *dev, u64 dma_mask); 21 21 extern void *dma_alloc_coherent(struct device *dev, size_t size, 22 - dma_addr_t *dma_handle, unsigned int __nocast flag); 22 + dma_addr_t *dma_handle, gfp_t flag); 23 23 extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 24 24 dma_addr_t dma_handle); 25 25 extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, ··· 118 118 */ 119 119 struct dma_mapping_ops { 120 120 void * (*alloc_coherent)(struct device *dev, size_t size, 121 - dma_addr_t *dma_handle, unsigned int __nocast flag); 121 + dma_addr_t *dma_handle, gfp_t flag); 122 122 void (*free_coherent)(struct device *dev, size_t size, 123 123 void *vaddr, dma_addr_t dma_handle); 124 124 dma_addr_t (*map_single)(struct device *dev, void *ptr,
+1 -1
include/asm-ppc64/iommu.h
··· 122 122 int nelems, enum dma_data_direction direction); 123 123 124 124 extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 125 - dma_addr_t *dma_handle, unsigned int __nocast flag); 125 + dma_addr_t *dma_handle, gfp_t flag); 126 126 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 127 127 void *vaddr, dma_addr_t dma_handle); 128 128 extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
+1 -1
include/linux/atmdev.h
··· 467 467 468 468 int atm_charge(struct atm_vcc *vcc,int truesize); 469 469 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 470 - unsigned int __nocast gfp_flags); 470 + gfp_t gfp_flags); 471 471 int atm_pcr_goal(struct atm_trafprm *tp); 472 472 473 473 void vcc_release_async(struct atm_vcc *vcc, int reply);
+3 -3
include/linux/bio.h
··· 276 276 extern struct bio_set *bioset_create(int, int, int); 277 277 extern void bioset_free(struct bio_set *); 278 278 279 - extern struct bio *bio_alloc(unsigned int __nocast, int); 280 - extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *); 279 + extern struct bio *bio_alloc(gfp_t, int); 280 + extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 281 281 extern void bio_put(struct bio *); 282 282 extern void bio_free(struct bio *, struct bio_set *); 283 283 ··· 287 287 extern int bio_hw_segments(struct request_queue *, struct bio *); 288 288 289 289 extern void __bio_clone(struct bio *, struct bio *); 290 - extern struct bio *bio_clone(struct bio *, unsigned int __nocast); 290 + extern struct bio *bio_clone(struct bio *, gfp_t); 291 291 292 292 extern void bio_init(struct bio *); 293 293
+1 -1
include/linux/buffer_head.h
··· 172 172 void __bforget(struct buffer_head *); 173 173 void __breadahead(struct block_device *, sector_t block, int size); 174 174 struct buffer_head *__bread(struct block_device *, sector_t block, int size); 175 - struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags); 175 + struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 176 176 void free_buffer_head(struct buffer_head * bh); 177 177 void FASTCALL(unlock_buffer(struct buffer_head *bh)); 178 178 void FASTCALL(__lock_buffer(struct buffer_head *bh));
+1 -1
include/linux/connector.h
··· 149 149 150 150 int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); 151 151 void cn_del_callback(struct cb_id *); 152 - int cn_netlink_send(struct cn_msg *, u32, unsigned int __nocast); 152 + int cn_netlink_send(struct cn_msg *, u32, gfp_t); 153 153 154 154 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); 155 155 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+2 -3
include/linux/cpuset.h
··· 23 23 void cpuset_update_current_mems_allowed(void); 24 24 void cpuset_restrict_to_mems_allowed(unsigned long *nodes); 25 25 int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); 26 - extern int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask); 26 + extern int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask); 27 27 extern int cpuset_excl_nodes_overlap(const struct task_struct *p); 28 28 extern struct file_operations proc_cpuset_operations; 29 29 extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer); ··· 49 49 return 1; 50 50 } 51 51 52 - static inline int cpuset_zone_allowed(struct zone *z, 53 - unsigned int __nocast gfp_mask) 52 + static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 54 53 { 55 54 return 1; 56 55 }
+1 -1
include/linux/dmapool.h
··· 19 19 20 20 void dma_pool_destroy(struct dma_pool *pool); 21 21 22 - void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags, 22 + void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 23 23 dma_addr_t *handle); 24 24 25 25 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
+7 -7
include/linux/gfp.h
··· 85 85 #endif 86 86 87 87 extern struct page * 88 - FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *)); 88 + FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *)); 89 89 90 - static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask, 90 + static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 91 91 unsigned int order) 92 92 { 93 93 if (unlikely(order >= MAX_ORDER)) ··· 98 98 } 99 99 100 100 #ifdef CONFIG_NUMA 101 - extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order); 101 + extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 102 102 103 103 static inline struct page * 104 - alloc_pages(unsigned int __nocast gfp_mask, unsigned int order) 104 + alloc_pages(gfp_t gfp_mask, unsigned int order) 105 105 { 106 106 if (unlikely(order >= MAX_ORDER)) 107 107 return NULL; 108 108 109 109 return alloc_pages_current(gfp_mask, order); 110 110 } 111 - extern struct page *alloc_page_vma(unsigned __nocast gfp_mask, 111 + extern struct page *alloc_page_vma(gfp_t gfp_mask, 112 112 struct vm_area_struct *vma, unsigned long addr); 113 113 #else 114 114 #define alloc_pages(gfp_mask, order) \ ··· 117 117 #endif 118 118 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 119 119 120 - extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)); 121 - extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask)); 120 + extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order)); 121 + extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask)); 122 122 123 123 #define __get_free_page(gfp_mask) \ 124 124 __get_free_pages((gfp_mask),0)
+1 -1
include/linux/jbd.h
··· 935 935 */ 936 936 extern kmem_cache_t *jbd_handle_cache; 937 937 938 - static inline handle_t *jbd_alloc_handle(unsigned int __nocast gfp_flags) 938 + static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) 939 939 { 940 940 return kmem_cache_alloc(jbd_handle_cache, gfp_flags); 941 941 }
+2 -2
include/linux/kfifo.h
··· 35 35 }; 36 36 37 37 extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 38 - unsigned int __nocast gfp_mask, spinlock_t *lock); 39 - extern struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, 38 + gfp_t gfp_mask, spinlock_t *lock); 39 + extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, 40 40 spinlock_t *lock); 41 41 extern void kfifo_free(struct kfifo *fifo); 42 42 extern unsigned int __kfifo_put(struct kfifo *fifo,
+4 -5
include/linux/mempool.h
··· 6 6 7 7 #include <linux/wait.h> 8 8 9 - typedef void * (mempool_alloc_t)(unsigned int __nocast gfp_mask, void *pool_data); 9 + typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 10 10 typedef void (mempool_free_t)(void *element, void *pool_data); 11 11 12 12 typedef struct mempool_s { ··· 26 26 extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, 27 27 mempool_free_t *free_fn, void *pool_data, int nid); 28 28 29 - extern int mempool_resize(mempool_t *pool, int new_min_nr, 30 - unsigned int __nocast gfp_mask); 29 + extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); 31 30 extern void mempool_destroy(mempool_t *pool); 32 - extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask); 31 + extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); 33 32 extern void mempool_free(void *element, mempool_t *pool); 34 33 35 34 /* 36 35 * A mempool_alloc_t and mempool_free_t that get the memory from 37 36 * a slab that is passed in through pool_data. 38 37 */ 39 - void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data); 38 + void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 40 39 void mempool_free_slab(void *element, void *pool_data); 41 40 42 41 #endif /* _LINUX_MEMPOOL_H */
+1 -1
include/linux/netlink.h
··· 131 131 extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 132 132 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); 133 133 extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, 134 - __u32 group, unsigned int __nocast allocation); 134 + __u32 group, gfp_t allocation); 135 135 extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); 136 136 extern int netlink_register_notifier(struct notifier_block *nb); 137 137 extern int netlink_unregister_notifier(struct notifier_block *nb);
+1 -1
include/linux/pagemap.h
··· 19 19 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 20 20 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 21 21 22 - static inline unsigned int __nocast mapping_gfp_mask(struct address_space * mapping) 22 + static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 23 23 { 24 24 return mapping->flags & __GFP_BITS_MASK; 25 25 }
+3 -3
include/linux/posix_acl.h
··· 71 71 72 72 /* posix_acl.c */ 73 73 74 - extern struct posix_acl *posix_acl_alloc(int, unsigned int __nocast); 75 - extern struct posix_acl *posix_acl_clone(const struct posix_acl *, unsigned int __nocast); 74 + extern struct posix_acl *posix_acl_alloc(int, gfp_t); 75 + extern struct posix_acl *posix_acl_clone(const struct posix_acl *, gfp_t); 76 76 extern int posix_acl_valid(const struct posix_acl *); 77 77 extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); 78 - extern struct posix_acl *posix_acl_from_mode(mode_t, unsigned int __nocast); 78 + extern struct posix_acl *posix_acl_from_mode(mode_t, gfp_t); 79 79 extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *); 80 80 extern int posix_acl_create_masq(struct posix_acl *, mode_t *); 81 81 extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
+1 -1
include/linux/radix-tree.h
··· 50 50 unsigned int 51 51 radix_tree_gang_lookup(struct radix_tree_root *root, void **results, 52 52 unsigned long first_index, unsigned int max_items); 53 - int radix_tree_preload(unsigned int __nocast gfp_mask); 53 + int radix_tree_preload(gfp_t gfp_mask); 54 54 void radix_tree_init(void); 55 55 void *radix_tree_tag_set(struct radix_tree_root *root, 56 56 unsigned long index, int tag);
+2 -4
include/linux/security.h
··· 2634 2634 return security_ops->socket_getpeersec(sock, optval, optlen, len); 2635 2635 } 2636 2636 2637 - static inline int security_sk_alloc(struct sock *sk, int family, 2638 - unsigned int __nocast priority) 2637 + static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) 2639 2638 { 2640 2639 return security_ops->sk_alloc_security(sk, family, priority); 2641 2640 } ··· 2751 2752 return -ENOPROTOOPT; 2752 2753 } 2753 2754 2754 - static inline int security_sk_alloc(struct sock *sk, int family, 2755 - unsigned int __nocast priority) 2755 + static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) 2756 2756 { 2757 2757 return 0; 2758 2758 }
+14 -14
include/linux/skbuff.h
··· 302 302 303 303 extern void __kfree_skb(struct sk_buff *skb); 304 304 extern struct sk_buff *__alloc_skb(unsigned int size, 305 - unsigned int __nocast priority, int fclone); 305 + gfp_t priority, int fclone); 306 306 static inline struct sk_buff *alloc_skb(unsigned int size, 307 - unsigned int __nocast priority) 307 + gfp_t priority) 308 308 { 309 309 return __alloc_skb(size, priority, 0); 310 310 } 311 311 312 312 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 313 - unsigned int __nocast priority) 313 + gfp_t priority) 314 314 { 315 315 return __alloc_skb(size, priority, 1); 316 316 } 317 317 318 318 extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 319 319 unsigned int size, 320 - unsigned int __nocast priority); 320 + gfp_t priority); 321 321 extern void kfree_skbmem(struct sk_buff *skb); 322 322 extern struct sk_buff *skb_clone(struct sk_buff *skb, 323 - unsigned int __nocast priority); 323 + gfp_t priority); 324 324 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 325 - unsigned int __nocast priority); 325 + gfp_t priority); 326 326 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 327 - unsigned int __nocast gfp_mask); 327 + gfp_t gfp_mask); 328 328 extern int pskb_expand_head(struct sk_buff *skb, 329 329 int nhead, int ntail, 330 - unsigned int __nocast gfp_mask); 330 + gfp_t gfp_mask); 331 331 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 332 332 unsigned int headroom); 333 333 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 334 334 int newheadroom, int newtailroom, 335 - unsigned int __nocast priority); 335 + gfp_t priority); 336 336 extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); 337 337 #define dev_kfree_skb(a) kfree_skb(a) 338 338 extern void skb_over_panic(struct sk_buff *skb, int len, ··· 484 484 * NULL is returned on a memory allocation failure. 485 485 */ 486 486 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 487 - unsigned int __nocast pri) 487 + gfp_t pri) 488 488 { 489 489 might_sleep_if(pri & __GFP_WAIT); 490 490 if (skb_shared(skb)) { ··· 516 516 * %NULL is returned on a memory allocation failure. 517 517 */ 518 518 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 519 - unsigned int __nocast pri) 519 + gfp_t pri) 520 520 { 521 521 might_sleep_if(pri & __GFP_WAIT); 522 522 if (skb_cloned(skb)) { ··· 1017 1017 * %NULL is returned in there is no free memory. 1018 1018 */ 1019 1019 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1020 - unsigned int __nocast gfp_mask) 1020 + gfp_t gfp_mask) 1021 1021 { 1022 1022 struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); 1023 1023 if (likely(skb)) ··· 1130 1130 * If there is no free memory -ENOMEM is returned, otherwise zero 1131 1131 * is returned and the old skb data released. 1132 1132 */ 1133 - extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp); 1134 - static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp) 1133 + extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp); 1134 + static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp) 1135 1135 { 1136 1136 return __skb_linearize(skb, gfp); 1137 1137 }
+9 -10
include/linux/slab.h
··· 61 61 void (*)(void *, kmem_cache_t *, unsigned long)); 62 62 extern int kmem_cache_destroy(kmem_cache_t *); 63 63 extern int kmem_cache_shrink(kmem_cache_t *); 64 - extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); 64 + extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); 65 65 extern void kmem_cache_free(kmem_cache_t *, void *); 66 66 extern unsigned int kmem_cache_size(kmem_cache_t *); 67 67 extern const char *kmem_cache_name(kmem_cache_t *); 68 - extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int __nocast gfpflags); 68 + extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags); 69 69 70 70 /* Size description struct for general caches. */ 71 71 struct cache_sizes { ··· 74 74 kmem_cache_t *cs_dmacachep; 75 75 }; 76 76 extern struct cache_sizes malloc_sizes[]; 77 - extern void *__kmalloc(size_t, unsigned int __nocast); 77 + extern void *__kmalloc(size_t, gfp_t); 78 78 79 - static inline void *kmalloc(size_t size, unsigned int __nocast flags) 79 + static inline void *kmalloc(size_t size, gfp_t flags) 80 80 { 81 81 if (__builtin_constant_p(size)) { 82 82 int i = 0; ··· 99 99 return __kmalloc(size, flags); 100 100 } 101 101 102 - extern void *kzalloc(size_t, unsigned int __nocast); 102 + extern void *kzalloc(size_t, gfp_t); 103 103 104 104 /** 105 105 * kcalloc - allocate memory for an array. The memory is set to zero. ··· 107 107 * @size: element size. 108 108 * @flags: the type of memory to allocate. 109 109 */ 110 - static inline void *kcalloc(size_t n, size_t size, unsigned int __nocast flags) 110 + static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 111 111 { 112 112 if (n != 0 && size > INT_MAX / n) 113 113 return NULL; ··· 118 118 extern unsigned int ksize(const void *); 119 119 120 120 #ifdef CONFIG_NUMA 121 - extern void *kmem_cache_alloc_node(kmem_cache_t *, 122 - unsigned int __nocast flags, int node); 123 - extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); 121 + extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); 122 + extern void *kmalloc_node(size_t size, gfp_t flags, int node); 124 123 #else 125 124 static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) 126 125 { 127 126 return kmem_cache_alloc(cachep, flags); 128 127 } 129 - static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) 128 + static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 130 129 { 131 130 return kmalloc(size, flags); 132 131 }
+1 -1
include/linux/string.h
··· 88 88 extern void * memchr(const void *,int,__kernel_size_t); 89 89 #endif 90 90 91 - extern char *kstrdup(const char *s, unsigned int __nocast gfp); 91 + extern char *kstrdup(const char *s, gfp_t gfp); 92 92 93 93 #ifdef __cplusplus 94 94 }
+1 -1
include/linux/swap.h
··· 147 147 #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) 148 148 149 149 /* linux/mm/oom_kill.c */ 150 - extern void out_of_memory(unsigned int __nocast gfp_mask, int order); 150 + extern void out_of_memory(gfp_t gfp_mask, int order); 151 151 152 152 /* linux/mm/memory.c */ 153 153 extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
+1 -1
include/linux/textsearch.h
··· 159 159 #define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) 160 160 161 161 static inline struct ts_config *alloc_ts_config(size_t payload, 162 - unsigned int __nocast gfp_mask) 162 + gfp_t gfp_mask) 163 163 { 164 164 struct ts_config *conf; 165 165
+4
include/linux/types.h
··· 165 165 typedef __u64 __bitwise __be64; 166 166 #endif 167 167 168 + #ifdef __KERNEL__ 169 + typedef unsigned __nocast gfp_t; 170 + #endif 171 + 168 172 struct ustat { 169 173 __kernel_daddr_t f_tfree; 170 174 __kernel_ino_t f_tinode;
+2 -2
include/linux/vmalloc.h
··· 34 34 extern void *vmalloc(unsigned long size); 35 35 extern void *vmalloc_exec(unsigned long size); 36 36 extern void *vmalloc_32(unsigned long size); 37 - extern void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot); 38 - extern void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot); 37 + extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 38 + extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot); 39 39 extern void vfree(void *addr); 40 40 41 41 extern void *vmap(struct page **pages, unsigned int count,
+1 -1
include/net/bluetooth/bluetooth.h
··· 136 136 }; 137 137 #define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb)) 138 138 139 - static inline struct sk_buff *bt_skb_alloc(unsigned int len, unsigned int __nocast how) 139 + static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) 140 140 { 141 141 struct sk_buff *skb; 142 142
+1 -1
include/net/bluetooth/rfcomm.h
··· 230 230 u8 xon_char, u8 xoff_char, u16 param_mask); 231 231 232 232 /* ---- RFCOMM DLCs (channels) ---- */ 233 - struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio); 233 + struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio); 234 234 void rfcomm_dlc_free(struct rfcomm_dlc *d); 235 235 int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel); 236 236 int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
+4 -4
include/net/dn_nsp.h
··· 19 19 extern void dn_nsp_send_oth_ack(struct sock *sk); 20 20 extern void dn_nsp_delayed_ack(struct sock *sk); 21 21 extern void dn_send_conn_ack(struct sock *sk); 22 - extern void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp); 22 + extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp); 23 23 extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 24 - unsigned short reason, unsigned int __nocast gfp); 24 + unsigned short reason, gfp_t gfp); 25 25 extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, 26 26 unsigned short reason); 27 27 extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); ··· 29 29 30 30 extern void dn_nsp_output(struct sock *sk); 31 31 extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); 32 - extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, unsigned int __nocast gfp, int oob); 32 + extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob); 33 33 extern unsigned long dn_nsp_persist(struct sock *sk); 34 34 extern int dn_nsp_xmit_timeout(struct sock *sk); 35 35 36 36 extern int dn_nsp_rx(struct sk_buff *); 37 37 extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 38 38 39 - extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri); 39 + extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); 40 40 extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); 41 41 42 42 #define NSP_REASON_OK 0 /* No error */
+1 -1
include/net/dn_route.h
··· 15 15 GNU General Public License for more details. 16 16 *******************************************************************************/ 17 17 18 - extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri); 18 + extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); 19 19 extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); 20 20 extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); 21 21 extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+1 -1
include/net/inet_connection_sock.h
··· 94 94 95 95 extern struct sock *inet_csk_clone(struct sock *sk, 96 96 const struct request_sock *req, 97 - const unsigned int __nocast priority); 97 + const gfp_t priority); 98 98 99 99 enum inet_csk_ack_state_t { 100 100 ICSK_ACK_SCHED = 1,
+1 -1
include/net/ip_vs.h
··· 832 832 833 833 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); 834 834 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); 835 - extern int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri, 835 + extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, 836 836 char *o_buf, int o_len, char *n_buf, int n_len); 837 837 extern int ip_vs_app_init(void); 838 838 extern void ip_vs_app_cleanup(void);
+1 -1
include/net/llc_conn.h
··· 93 93 return skb->cb[sizeof(skb->cb) - 1]; 94 94 } 95 95 96 - extern struct sock *llc_sk_alloc(int family, unsigned int __nocast priority, 96 + extern struct sock *llc_sk_alloc(int family, gfp_t priority, 97 97 struct proto *prot); 98 98 extern void llc_sk_free(struct sock *sk); 99 99
+1 -1
include/net/sctp/sctp.h
··· 125 125 */ 126 126 extern struct sock *sctp_get_ctl_sock(void); 127 127 extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, 128 - sctp_scope_t, unsigned int __nocast gfp, 128 + sctp_scope_t, gfp_t gfp, 129 129 int flags); 130 130 extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family); 131 131 extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
+5 -5
include/net/sctp/sm.h
··· 181 181 int sctp_chunk_iif(const struct sctp_chunk *); 182 182 struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *, 183 183 struct sctp_chunk *, 184 - unsigned int __nocast gfp); 184 + gfp_t gfp); 185 185 __u32 sctp_generate_verification_tag(void); 186 186 void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag); 187 187 188 188 /* Prototypes for chunk-building functions. */ 189 189 struct sctp_chunk *sctp_make_init(const struct sctp_association *, 190 190 const struct sctp_bind_addr *, 191 - unsigned int __nocast gfp, int vparam_len); 191 + gfp_t gfp, int vparam_len); 192 192 struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *, 193 193 const struct sctp_chunk *, 194 - const unsigned int __nocast gfp, 194 + const gfp_t gfp, 195 195 const int unkparam_len); 196 196 struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *, 197 197 const struct sctp_chunk *); ··· 265 265 struct sctp_endpoint *, 266 266 struct sctp_association *asoc, 267 267 void *event_arg, 268 - unsigned int __nocast gfp); 268 + gfp_t gfp); 269 269 270 270 /* 2nd level prototypes */ 271 271 void sctp_generate_t3_rtx_event(unsigned long peer); ··· 276 276 struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *, 277 277 const struct sctp_association *, 278 278 struct sctp_chunk *, 279 - unsigned int __nocast gfp, int *err, 279 + gfp_t gfp, int *err, 280 280 struct sctp_chunk **err_chk_p); 281 281 int sctp_addip_addr_config(struct sctp_association *, sctp_param_t, 282 282 struct sockaddr_storage*, int);
+12 -12
include/net/sctp/structs.h
··· 446 446 }; 447 447 448 448 struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, 449 - unsigned int __nocast gfp); 449 + gfp_t gfp); 450 450 void sctp_ssnmap_free(struct sctp_ssnmap *map); 451 451 void sctp_ssnmap_clear(struct sctp_ssnmap *map); 452 452 ··· 947 947 }; 948 948 949 949 struct sctp_transport *sctp_transport_new(const union sctp_addr *, 950 - unsigned int __nocast); 950 + gfp_t); 951 951 void sctp_transport_set_owner(struct sctp_transport *, 952 952 struct sctp_association *); 953 953 void sctp_transport_route(struct sctp_transport *, union sctp_addr *, ··· 1095 1095 void sctp_bind_addr_free(struct sctp_bind_addr *); 1096 1096 int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 1097 1097 const struct sctp_bind_addr *src, 1098 - sctp_scope_t scope, unsigned int __nocast gfp, 1098 + sctp_scope_t scope, gfp_t gfp, 1099 1099 int flags); 1100 1100 int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1101 - unsigned int __nocast gfp); 1101 + gfp_t gfp); 1102 1102 int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); 1103 1103 int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1104 1104 struct sctp_sock *); ··· 1108 1108 struct sctp_sock *opt); 1109 1109 union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, 1110 1110 int *addrs_len, 1111 - unsigned int __nocast gfp); 1111 + gfp_t gfp); 1112 1112 int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, 1113 - __u16 port, unsigned int __nocast gfp); 1113 + __u16 port, gfp_t gfp); 1114 1114 1115 1115 sctp_scope_t sctp_scope(const union sctp_addr *); 1116 1116 int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope); ··· 1239 1239 } 1240 1240 1241 1241 /* These are function signatures for manipulating endpoints. */ 1242 - struct sctp_endpoint *sctp_endpoint_new(struct sock *, unsigned int __nocast); 1242 + struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t); 1243 1243 void sctp_endpoint_free(struct sctp_endpoint *); 1244 1244 void sctp_endpoint_put(struct sctp_endpoint *); 1245 1245 void sctp_endpoint_hold(struct sctp_endpoint *); ··· 1260 1260 struct sctp_chunk **err_chunk); 1261 1261 int sctp_process_init(struct sctp_association *, sctp_cid_t cid, 1262 1262 const union sctp_addr *peer, 1263 - sctp_init_chunk_t *init, unsigned int __nocast gfp); 1263 + sctp_init_chunk_t *init, gfp_t gfp); 1264 1264 __u32 sctp_generate_tag(const struct sctp_endpoint *); 1265 1265 __u32 sctp_generate_tsn(const struct sctp_endpoint *); 1266 1266 ··· 1723 1723 1724 1724 struct sctp_association * 1725 1725 sctp_association_new(const struct sctp_endpoint *, const struct sock *, 1726 - sctp_scope_t scope, unsigned int __nocast gfp); 1726 + sctp_scope_t scope, gfp_t gfp); 1727 1727 void sctp_association_free(struct sctp_association *); 1728 1728 void sctp_association_put(struct sctp_association *); 1729 1729 void sctp_association_hold(struct sctp_association *); ··· 1739 1739 const union sctp_addr *laddr); 1740 1740 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *, 1741 1741 const union sctp_addr *address, 1742 - const unsigned int __nocast gfp, 1742 + const gfp_t gfp, 1743 1743 const int peer_state); 1744 1744 void sctp_assoc_del_peer(struct sctp_association *asoc, 1745 1745 const union sctp_addr *addr); ··· 1764 1764 void sctp_assoc_set_primary(struct sctp_association *, 1765 1765 struct sctp_transport *); 1766 1766 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, 1767 - unsigned int __nocast); 1767 + gfp_t); 1768 1768 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, 1769 1769 struct sctp_cookie*, 1770 - unsigned int __nocast gfp); 1770 + gfp_t gfp); 1771 1771 1772 1772 int sctp_cmp_addr_exact(const union sctp_addr *ss1, 1773 1773 const union sctp_addr *ss2);
+8 -8
include/net/sctp/ulpevent.h
··· 88 88 __u16 error, 89 89 __u16 outbound, 90 90 __u16 inbound, 91 - unsigned int __nocast gfp); 91 + gfp_t gfp); 92 92 93 93 struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( 94 94 const struct sctp_association *asoc, ··· 96 96 int flags, 97 97 int state, 98 98 int error, 99 - unsigned int __nocast gfp); 99 + gfp_t gfp); 100 100 101 101 struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 102 102 const struct sctp_association *asoc, 103 103 struct sctp_chunk *chunk, 104 104 __u16 flags, 105 - unsigned int __nocast gfp); 105 + gfp_t gfp); 106 106 struct sctp_ulpevent *sctp_ulpevent_make_send_failed( 107 107 const struct sctp_association *asoc, 108 108 struct sctp_chunk *chunk, 109 109 __u16 flags, 110 110 __u32 error, 111 - unsigned int __nocast gfp); 111 + gfp_t gfp); 112 112 113 113 struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( 114 114 const struct sctp_association *asoc, 115 115 __u16 flags, 116 - unsigned int __nocast gfp); 116 + gfp_t gfp); 117 117 118 118 struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 119 119 const struct sctp_association *asoc, 120 - __u32 indication, unsigned int __nocast gfp); 120 + __u32 indication, gfp_t gfp); 121 121 122 122 struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( 123 - const struct sctp_association *asoc, unsigned int __nocast gfp); 123 + const struct sctp_association *asoc, gfp_t gfp); 124 124 125 125 struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, 126 126 struct sctp_chunk *chunk, 127 - unsigned int __nocast gfp); 127 + gfp_t gfp); 128 128 129 129 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 130 130 struct msghdr *);
+4 -7
include/net/sctp/ulpqueue.h
··· 62 62 void sctp_ulpq_free(struct sctp_ulpq *); 63 63 64 64 /* Add a new DATA chunk for processing. */ 65 - int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, 66 - unsigned int __nocast); 65 + int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); 67 66 68 67 /* Add a new event for propagation to the ULP. */ 69 68 int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev); 70 69 71 70 /* Renege previously received chunks. */ 72 - void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, 73 - unsigned int __nocast); 71 + void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); 74 72 75 73 /* Perform partial delivery. */ 76 - void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, 77 - unsigned int __nocast); 74 + void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); 78 75 79 76 /* Abort the partial delivery. */ 80 - void sctp_ulpq_abort_pd(struct sctp_ulpq *, unsigned int __nocast); 77 + void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t); 81 78 82 79 /* Clear the partial data delivery condition on this socket. */ 83 80 int sctp_clear_pd(struct sock *sk);
+8 -8
include/net/sock.h
··· 739 739 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 740 740 741 741 extern struct sock *sk_alloc(int family, 742 - unsigned int __nocast priority, 742 + gfp_t priority, 743 743 struct proto *prot, int zero_it); 744 744 extern void sk_free(struct sock *sk); 745 745 extern struct sock *sk_clone(const struct sock *sk, 746 - const unsigned int __nocast priority); 746 + const gfp_t priority); 747 747 748 748 extern struct sk_buff *sock_wmalloc(struct sock *sk, 749 749 unsigned long size, int force, 750 - unsigned int __nocast priority); 750 + gfp_t priority); 751 751 extern struct sk_buff *sock_rmalloc(struct sock *sk, 752 752 unsigned long size, int force, 753 - unsigned int __nocast priority); 753 + gfp_t priority); 754 754 extern void sock_wfree(struct sk_buff *skb); 755 755 extern void sock_rfree(struct sk_buff *skb); 756 756 ··· 766 766 int noblock, 767 767 int *errcode); 768 768 extern void *sock_kmalloc(struct sock *sk, int size, 769 - unsigned int __nocast priority); 769 + gfp_t priority); 770 770 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 771 771 extern void sk_send_sigurg(struct sock *sk); 772 772 ··· 1201 1201 1202 1202 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, 1203 1203 int size, int mem, 1204 - unsigned int __nocast gfp) 1204 + gfp_t gfp) 1205 1205 { 1206 1206 struct sk_buff *skb; 1207 1207 int hdr_len; ··· 1224 1224 1225 1225 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, 1226 1226 int size, 1227 - unsigned int __nocast gfp) 1227 + gfp_t gfp) 1228 1228 { 1229 1229 return sk_stream_alloc_pskb(sk, size, 0, gfp); 1230 1230 } ··· 1255 1255 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); 1256 1256 } 1257 1257 1258 - static inline unsigned int __nocast gfp_any(void) 1258 + static inline gfp_t gfp_any(void) 1259 1259 { 1260 1260 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1261 1261 }
+1 -2
include/net/tcp.h
··· 460 460 extern void tcp_send_partial(struct sock *); 461 461 extern int tcp_write_wakeup(struct sock *); 462 462 extern void tcp_send_fin(struct sock *sk); 463 - extern void tcp_send_active_reset(struct sock *sk, 464 - unsigned int __nocast priority); 463 + extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); 465 464 extern int tcp_send_synack(struct sock *); 466 465 extern void tcp_push_one(struct sock *, unsigned int mss_now); 467 466 extern void tcp_send_ack(struct sock *sk);
+1 -1
include/net/xfrm.h
··· 875 875 } 876 876 #endif 877 877 878 - struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp); 878 + struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp); 879 879 extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); 880 880 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 881 881 struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
+1 -1
include/rdma/ib_mad.h
··· 596 596 u32 remote_qpn, u16 pkey_index, 597 597 struct ib_ah *ah, int rmpp_active, 598 598 int hdr_len, int data_len, 599 - unsigned int __nocast gfp_mask); 599 + gfp_t gfp_mask); 600 600 601 601 /** 602 602 * ib_free_send_mad - Returns data buffers used to send a MAD.
+5 -5
include/rdma/ib_sa.h
··· 285 285 int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 286 286 struct ib_sa_path_rec *rec, 287 287 ib_sa_comp_mask comp_mask, 288 - int timeout_ms, unsigned int __nocast gfp_mask, 288 + int timeout_ms, gfp_t gfp_mask, 289 289 void (*callback)(int status, 290 290 struct ib_sa_path_rec *resp, 291 291 void *context), ··· 296 296 u8 method, 297 297 struct ib_sa_mcmember_rec *rec, 298 298 ib_sa_comp_mask comp_mask, 299 - int timeout_ms, unsigned int __nocast gfp_mask, 299 + int timeout_ms, gfp_t gfp_mask, 300 300 void (*callback)(int status, 301 301 struct ib_sa_mcmember_rec *resp, 302 302 void *context), ··· 307 307 u8 method, 308 308 struct ib_sa_service_rec *rec, 309 309 ib_sa_comp_mask comp_mask, 310 - int timeout_ms, unsigned int __nocast gfp_mask, 310 + int timeout_ms, gfp_t gfp_mask, 311 311 void (*callback)(int status, 312 312 struct ib_sa_service_rec *resp, 313 313 void *context), ··· 342 342 ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, 343 343 struct ib_sa_mcmember_rec *rec, 344 344 ib_sa_comp_mask comp_mask, 345 - int timeout_ms, unsigned int __nocast gfp_mask, 345 + int timeout_ms, gfp_t gfp_mask, 346 346 void (*callback)(int status, 347 347 struct ib_sa_mcmember_rec *resp, 348 348 void *context), ··· 384 384 ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, 385 385 struct ib_sa_mcmember_rec *rec, 386 386 ib_sa_comp_mask comp_mask, 387 - int timeout_ms, unsigned int __nocast gfp_mask, 387 + int timeout_ms, gfp_t gfp_mask, 388 388 void (*callback)(int status, 389 389 struct ib_sa_mcmember_rec *resp, 390 390 void *context),
+1 -1
include/rxrpc/call.h
··· 203 203 size_t sioc, 204 204 struct kvec *siov, 205 205 uint8_t rxhdr_flags, 206 - unsigned int __nocast alloc_flags, 206 + gfp_t alloc_flags, 207 207 int dup_data, 208 208 size_t *size_sent); 209 209
+1 -1
include/rxrpc/message.h
··· 63 63 uint8_t type, 64 64 int count, 65 65 struct kvec *diov, 66 - unsigned int __nocast alloc_flags, 66 + gfp_t alloc_flags, 67 67 struct rxrpc_message **_msg); 68 68 69 69 extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
+4 -4
include/sound/core.h
··· 290 290 void snd_memory_done(void); 291 291 int snd_memory_info_init(void); 292 292 int snd_memory_info_done(void); 293 - void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags); 294 - void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags); 295 - void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags); 293 + void *snd_hidden_kmalloc(size_t size, gfp_t flags); 294 + void *snd_hidden_kzalloc(size_t size, gfp_t flags); 295 + void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags); 296 296 void snd_hidden_kfree(const void *obj); 297 297 void *snd_hidden_vmalloc(unsigned long size); 298 298 void snd_hidden_vfree(void *obj); 299 - char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags); 299 + char *snd_hidden_kstrdup(const char *s, gfp_t flags); 300 300 #define kmalloc(size, flags) snd_hidden_kmalloc(size, flags) 301 301 #define kzalloc(size, flags) snd_hidden_kzalloc(size, flags) 302 302 #define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags)
+1 -1
include/sound/driver.h
··· 51 51 #ifdef CONFIG_SND_DEBUG_MEMORY 52 52 #include <linux/slab.h> 53 53 #include <linux/vmalloc.h> 54 - void *snd_wrapper_kmalloc(size_t, unsigned int __nocast); 54 + void *snd_wrapper_kmalloc(size_t, gfp_t); 55 55 #undef kmalloc 56 56 void snd_wrapper_kfree(const void *); 57 57 #undef kfree
+1 -1
kernel/audit.c
··· 560 560 } 561 561 562 562 static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, 563 - unsigned int __nocast gfp_mask, int type) 563 + gfp_t gfp_mask, int type) 564 564 { 565 565 unsigned long flags; 566 566 struct audit_buffer *ab = NULL;
+1 -1
kernel/cpuset.c
··· 1670 1670 * GFP_USER - only nodes in current tasks mems allowed ok. 1671 1671 **/ 1672 1672 1673 - int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask) 1673 + int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 1674 1674 { 1675 1675 int node; /* node that zone z is on */ 1676 1676 const struct cpuset *cs; /* current cpuset ancestors */
+2 -2
kernel/kfifo.c
··· 36 36 * struct kfifo with kfree(). 37 37 */ 38 38 struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 39 - unsigned int __nocast gfp_mask, spinlock_t *lock) 39 + gfp_t gfp_mask, spinlock_t *lock) 40 40 { 41 41 struct kfifo *fifo; 42 42 ··· 64 64 * 65 65 * The size will be rounded-up to a power of 2. 66 66 */ 67 - struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, spinlock_t *lock) 67 + struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) 68 68 { 69 69 unsigned char *buffer; 70 70 struct kfifo *ret;
+1 -1
kernel/signal.c
··· 262 262 return sig; 263 263 } 264 264 265 - static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags, 265 + static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 266 266 int override_rlimit) 267 267 { 268 268 struct sigqueue *q = NULL;
+1 -1
lib/radix-tree.c
··· 110 110 * success, return zero, with preemption disabled. On error, return -ENOMEM 111 111 * with preemption not disabled. 112 112 */ 113 - int radix_tree_preload(unsigned int __nocast gfp_mask) 113 + int radix_tree_preload(gfp_t gfp_mask) 114 114 { 115 115 struct radix_tree_preload *rtp; 116 116 struct radix_tree_node *node;
+1 -1
lib/ts_bm.c
··· 127 127 } 128 128 129 129 static struct ts_config *bm_init(const void *pattern, unsigned int len, 130 - unsigned int __nocast gfp_mask) 130 + gfp_t gfp_mask) 131 131 { 132 132 struct ts_config *conf; 133 133 struct ts_bm *bm;
+1 -1
lib/ts_fsm.c
··· 258 258 } 259 259 260 260 static struct ts_config *fsm_init(const void *pattern, unsigned int len, 261 - unsigned int __nocast gfp_mask) 261 + gfp_t gfp_mask) 262 262 { 263 263 int i, err = -EINVAL; 264 264 struct ts_config *conf;
+1 -1
lib/ts_kmp.c
··· 87 87 } 88 88 89 89 static struct ts_config *kmp_init(const void *pattern, unsigned int len, 90 - unsigned int __nocast gfp_mask) 90 + gfp_t gfp_mask) 91 91 { 92 92 struct ts_config *conf; 93 93 struct ts_kmp *kmp;
+1 -1
mm/highmem.c
··· 30 30 31 31 static mempool_t *page_pool, *isa_page_pool; 32 32 33 - static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data) 33 + static void *page_pool_alloc(gfp_t gfp_mask, void *data) 34 34 { 35 35 unsigned int gfp = gfp_mask | (unsigned int) (long) data; 36 36
+4 -4
mm/mempolicy.c
··· 687 687 } 688 688 689 689 /* Return a zonelist representing a mempolicy */ 690 - static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy) 690 + static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) 691 691 { 692 692 int nd; 693 693 ··· 751 751 752 752 /* Allocate a page in interleaved policy. 753 753 Own path because it needs to do special accounting. */ 754 - static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid) 754 + static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) 755 755 { 756 756 struct zonelist *zl; 757 757 struct page *page; ··· 789 789 * Should be called with the mm_sem of the vma hold. 790 790 */ 791 791 struct page * 792 - alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr) 792 + alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 793 793 { 794 794 struct mempolicy *pol = get_vma_policy(current, vma, addr); 795 795 ··· 832 832 * 1) it's ok to take cpuset_sem (can WAIT), and 833 833 * 2) allocating for current task (not interrupt). 834 834 */ 835 - struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order) 835 + struct page *alloc_pages_current(gfp_t gfp, unsigned order) 836 836 { 837 837 struct mempolicy *pol = current->mempolicy; 838 838
+3 -3
mm/mempool.c
··· 112 112 * while this function is running. mempool_alloc() & mempool_free() 113 113 * might be called (eg. from IRQ contexts) while this function executes. 114 114 */ 115 - int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask) 115 + int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) 116 116 { 117 117 void *element; 118 118 void **new_elements; ··· 200 200 * *never* fails when called from process contexts. (it might 201 201 * fail if called from an IRQ context.) 202 202 */ 203 - void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) 203 + void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) 204 204 { 205 205 void *element; 206 206 unsigned long flags; ··· 276 276 /* 277 277 * A commonly used alloc and free fn. 278 278 */ 279 - void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data) 279 + void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) 280 280 { 281 281 kmem_cache_t *mem = (kmem_cache_t *) pool_data; 282 282 return kmem_cache_alloc(mem, gfp_mask);
+1 -2
mm/nommu.c
··· 157 157 kfree(addr); 158 158 } 159 159 160 - void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, 161 - pgprot_t prot) 160 + void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 162 161 { 163 162 /* 164 163 * kmalloc doesn't like __GFP_HIGHMEM for some reason
+1 -1
mm/oom_kill.c
··· 263 263 * OR try to be smart about which process to kill. Note that we 264 264 * don't have to be perfect here, we just have to be good. 265 265 */ 266 - void out_of_memory(unsigned int __nocast gfp_mask, int order) 266 + void out_of_memory(gfp_t gfp_mask, int order) 267 267 { 268 268 struct mm_struct *mm = NULL; 269 269 task_t * p;
+6 -6
mm/page_alloc.c
··· 671 671 free_hot_cold_page(page, 1); 672 672 } 673 673 674 - static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags) 674 + static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 675 675 { 676 676 int i; 677 677 ··· 686 686 * or two. 687 687 */ 688 688 static struct page * 689 - buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags) 689 + buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 690 690 { 691 691 unsigned long flags; 692 692 struct page *page = NULL; ··· 761 761 } 762 762 763 763 static inline int 764 - should_reclaim_zone(struct zone *z, unsigned int gfp_mask) 764 + should_reclaim_zone(struct zone *z, gfp_t gfp_mask) 765 765 { 766 766 if (!z->reclaim_pages) 767 767 return 0; ··· 774 774 * This is the 'heart' of the zoned buddy allocator. 775 775 */ 776 776 struct page * fastcall 777 - __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, 777 + __alloc_pages(gfp_t gfp_mask, unsigned int order, 778 778 struct zonelist *zonelist) 779 779 { 780 780 const int wait = gfp_mask & __GFP_WAIT; ··· 977 977 /* 978 978 * Common helper functions. 979 979 */ 980 - fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order) 980 + fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 981 981 { 982 982 struct page * page; 983 983 page = alloc_pages(gfp_mask, order); ··· 988 988 989 989 EXPORT_SYMBOL(__get_free_pages); 990 990 991 - fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask) 991 + fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 992 992 { 993 993 struct page * page; 994 994
+1 -1
mm/page_io.c
··· 19 19 #include <linux/writeback.h> 20 20 #include <asm/pgtable.h> 21 21 22 - static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index, 22 + static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, 23 23 struct page *page, bio_end_io_t end_io) 24 24 { 25 25 struct bio *bio;
+1 -2
mm/shmem.c
··· 921 921 } 922 922 923 923 static inline struct page * 924 - shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info, 925 - unsigned long idx) 924 + shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 926 925 { 927 926 return alloc_page(gfp | __GFP_ZERO); 928 927 }
+16 -18
mm/slab.c
··· 650 650 return cachep->array[smp_processor_id()]; 651 651 } 652 652 653 - static inline kmem_cache_t *__find_general_cachep(size_t size, 654 - unsigned int __nocast gfpflags) 653 + static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) 655 654 { 656 655 struct cache_sizes *csizep = malloc_sizes; 657 656 ··· 674 675 return csizep->cs_cachep; 675 676 } 676 677 677 - kmem_cache_t *kmem_find_general_cachep(size_t size, 678 - unsigned int __nocast gfpflags) 678 + kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 679 679 { 680 680 return __find_general_cachep(size, gfpflags); 681 681 } ··· 1183 1185 * did not request dmaable memory, we might get it, but that 1184 1186 * would be relatively rare and ignorable. 1185 1187 */ 1186 - static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 1188 + static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) 1187 1189 { 1188 1190 struct page *page; 1189 1191 void *addr; ··· 2046 2048 2047 2049 /* Get the memory for a slab management obj. */ 2048 2050 static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, 2049 - int colour_off, unsigned int __nocast local_flags) 2051 + int colour_off, gfp_t local_flags) 2050 2052 { 2051 2053 struct slab *slabp; 2052 2054 ··· 2147 2149 * Grow (by 1) the number of slabs within a cache. This is called by 2148 2150 * kmem_cache_alloc() when there are no active objs left in a cache. 2149 2151 */ 2150 - static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2152 + static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2151 2153 { 2152 2154 struct slab *slabp; 2153 2155 void *objp; ··· 2354 2356 #define check_slabp(x,y) do { } while(0) 2355 2357 #endif 2356 2358 2357 - static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) 2359 + static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) 2358 2360 { 2359 2361 int batchcount; 2360 2362 struct kmem_list3 *l3; ··· 2454 2456 } 2455 2457 2456 2458 static inline void 2457 - cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) 2459 + cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) 2458 2460 { 2459 2461 might_sleep_if(flags & __GFP_WAIT); 2460 2462 #if DEBUG ··· 2465 2467 #if DEBUG 2466 2468 static void * 2467 2469 cache_alloc_debugcheck_after(kmem_cache_t *cachep, 2468 - unsigned int __nocast flags, void *objp, void *caller) 2470 + gfp_t flags, void *objp, void *caller) 2469 2471 { 2470 2472 if (!objp) 2471 2473 return objp; ··· 2508 2510 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2509 2511 #endif 2510 2512 2511 - static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2513 + static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2512 2514 { 2513 2515 void* objp; 2514 2516 struct array_cache *ac; ··· 2526 2528 return objp; 2527 2529 } 2528 2530 2529 - static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2531 + static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2530 2532 { 2531 2533 unsigned long save_flags; 2532 2534 void* objp; ··· 2785 2787 * Allocate an object from this cache. The flags are only relevant 2786 2788 * if the cache has no available objects. 2787 2789 */ 2788 - void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2790 + void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2789 2791 { 2790 2792 return __cache_alloc(cachep, flags); 2791 2793 } ··· 2846 2848 * New and improved: it will now make sure that the object gets 2847 2849 * put on the correct node list so that there is no false sharing. 2848 2850 */ 2849 - void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2851 + void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2850 2852 { 2851 2853 unsigned long save_flags; 2852 2854 void *ptr; ··· 2873 2875 } 2874 2876 EXPORT_SYMBOL(kmem_cache_alloc_node); 2875 2877 2876 - void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) 2878 + void *kmalloc_node(size_t size, gfp_t flags, int node) 2877 2879 { 2878 2880 kmem_cache_t *cachep; 2879 2881 ··· 2906 2908 * platforms. For example, on i386, it means that the memory must come 2907 2909 * from the first 16MB. 2908 2910 */ 2909 - void *__kmalloc(size_t size, unsigned int __nocast flags) 2911 + void *__kmalloc(size_t size, gfp_t flags) 2910 2912 { 2911 2913 kmem_cache_t *cachep; 2912 2914 ··· 2995 2997 * @size: how many bytes of memory are required. 2996 2998 * @flags: the type of memory to allocate. 2997 2999 */ 2998 - void *kzalloc(size_t size, unsigned int __nocast flags) 3000 + void *kzalloc(size_t size, gfp_t flags) 2999 3001 { 3000 3002 void *ret = kmalloc(size, flags); 3001 3003 if (ret) ··· 3601 3603 * @s: the string to duplicate 3602 3604 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 3603 3605 */ 3604 - char *kstrdup(const char *s, unsigned int __nocast gfp) 3606 + char *kstrdup(const char *s, gfp_t gfp) 3605 3607 { 3606 3608 size_t len; 3607 3609 char *buf;
+1 -1
mm/swap_state.c
··· 68 68 * but sets SwapCache flag and private instead of mapping and index. 69 69 */ 70 70 static int __add_to_swap_cache(struct page *page, swp_entry_t entry, 71 - unsigned int __nocast gfp_mask) 71 + gfp_t gfp_mask) 72 72 { 73 73 int error; 74 74
+2 -2
mm/vmalloc.c
··· 395 395 396 396 EXPORT_SYMBOL(vmap); 397 397 398 - void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) 398 + void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 399 399 { 400 400 struct page **pages; 401 401 unsigned int nr_pages, array_size, i; ··· 446 446 * allocator with @gfp_mask flags. Map them into contiguous 447 447 * kernel virtual space, using a pagetable protection of @prot. 448 448 */ 449 - void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) 449 + void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 450 450 { 451 451 struct vm_struct *area; 452 452
+1 -1
net/atm/atm_misc.c
··· 25 25 26 26 27 27 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 28 - unsigned int __nocast gfp_flags) 28 + gfp_t gfp_flags) 29 29 { 30 30 struct sock *sk = sk_atm(vcc); 31 31 int guess = atm_guess_pdu2truesize(pdu_size);
+1 -1
net/bluetooth/l2cap.c
··· 372 372 .obj_size = sizeof(struct l2cap_pinfo) 373 373 }; 374 374 375 - static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) 375 + static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio) 376 376 { 377 377 struct sock *sk; 378 378
+1 -1
net/bluetooth/rfcomm/core.c
··· 229 229 d->rx_credits = RFCOMM_DEFAULT_CREDITS; 230 230 } 231 231 232 - struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio) 232 + struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) 233 233 { 234 234 struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); 235 235 if (!d)
+1 -1
net/bluetooth/rfcomm/sock.c
··· 284 284 .obj_size = sizeof(struct rfcomm_pinfo) 285 285 }; 286 286 287 - static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) 287 + static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, gfp_t prio) 288 288 { 289 289 struct rfcomm_dlc *d; 290 290 struct sock *sk;
+1 -1
net/bluetooth/rfcomm/tty.c
··· 286 286 skb->destructor = rfcomm_wfree; 287 287 } 288 288 289 - static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, unsigned int __nocast priority) 289 + static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority) 290 290 { 291 291 if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { 292 292 struct sk_buff *skb = alloc_skb(size, priority);
+1 -1
net/bluetooth/sco.c
··· 418 418 .obj_size = sizeof(struct sco_pinfo) 419 419 }; 420 420 421 - static struct sock *sco_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) 421 + static struct sock *sco_sock_alloc(struct socket *sock, int proto, gfp_t prio) 422 422 { 423 423 struct sock *sk; 424 424
+1 -1
net/core/dev.c
··· 1132 1132 #endif 1133 1133 1134 1134 /* Keep head the same: replace data */ 1135 - int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) 1135 + int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask) 1136 1136 { 1137 1137 unsigned int size; 1138 1138 u8 *data;
+7 -7
net/core/skbuff.c
··· 130 130 * Buffers may only be allocated from interrupts using a @gfp_mask of 131 131 * %GFP_ATOMIC. 132 132 */ 133 - struct sk_buff *__alloc_skb(unsigned int size, unsigned int __nocast gfp_mask, 133 + struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 134 134 int fclone) 135 135 { 136 136 struct sk_buff *skb; ··· 198 198 */ 199 199 struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 200 200 unsigned int size, 201 - unsigned int __nocast gfp_mask) 201 + gfp_t gfp_mask) 202 202 { 203 203 struct sk_buff *skb; 204 204 u8 *data; ··· 361 361 * %GFP_ATOMIC. 362 362 */ 363 363 364 - struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) 364 + struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 365 365 { 366 366 struct sk_buff *n; 367 367 ··· 500 500 * header is going to be modified. Use pskb_copy() instead. 501 501 */ 502 502 503 - struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) 503 + struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 504 504 { 505 505 int headerlen = skb->data - skb->head; 506 506 /* ··· 539 539 * The returned buffer has a reference count of 1. 540 540 */ 541 541 542 - struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) 542 + struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 543 543 { 544 544 /* 545 545 * Allocate the copy buffer ··· 598 598 */ 599 599 600 600 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 601 - unsigned int __nocast gfp_mask) 601 + gfp_t gfp_mask) 602 602 { 603 603 int i; 604 604 u8 *data; ··· 689 689 */ 690 690 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 691 691 int newheadroom, int newtailroom, 692 - unsigned int __nocast gfp_mask) 692 + gfp_t gfp_mask) 693 693 { 694 694 /* 695 695 * Allocate the copy buffer
+5 -5
net/core/sock.c
··· 637 637 * @prot: struct proto associated with this new sock instance 638 638 * @zero_it: if we should zero the newly allocated sock 639 639 */ 640 - struct sock *sk_alloc(int family, unsigned int __nocast priority, 640 + struct sock *sk_alloc(int family, gfp_t priority, 641 641 struct proto *prot, int zero_it) 642 642 { 643 643 struct sock *sk = NULL; ··· 704 704 module_put(owner); 705 705 } 706 706 707 - struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority) 707 + struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 708 708 { 709 709 struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); 710 710 ··· 845 845 * Allocate a skb from the socket's send buffer. 846 846 */ 847 847 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 848 - unsigned int __nocast priority) 848 + gfp_t priority) 849 849 { 850 850 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 851 851 struct sk_buff * skb = alloc_skb(size, priority); ··· 861 861 * Allocate a skb from the socket's receive buffer. 862 862 */ 863 863 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 864 - unsigned int __nocast priority) 864 + gfp_t priority) 865 865 { 866 866 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 867 867 struct sk_buff *skb = alloc_skb(size, priority); ··· 876 876 /* 877 877 * Allocate a memory block from the socket's option memory buffer. 878 878 */ 879 - void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) 879 + void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 880 880 { 881 881 if ((unsigned)size <= sysctl_optmem_max && 882 882 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+1 -1
net/dccp/ackvec.c
··· 91 91 } 92 92 93 93 struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len, 94 - const unsigned int __nocast priority) 94 + const gfp_t priority) 95 95 { 96 96 struct dccp_ackvec *av = kmalloc(sizeof(*av) + len, priority); 97 97
+2 -2
net/dccp/ackvec.h
··· 74 74 75 75 #ifdef CONFIG_IP_DCCP_ACKVEC 76 76 extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, 77 - const unsigned int __nocast priority); 77 + const gfp_t priority); 78 78 extern void dccp_ackvec_free(struct dccp_ackvec *av); 79 79 80 80 extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, ··· 93 93 } 94 94 #else /* CONFIG_IP_DCCP_ACKVEC */ 95 95 static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, 96 - const unsigned int __nocast priority) 96 + const gfp_t priority) 97 97 { 98 98 return NULL; 99 99 }
+1 -1
net/dccp/ccids/lib/loss_interval.h
··· 36 36 37 37 static inline struct dccp_li_hist_entry * 38 38 dccp_li_hist_entry_new(struct dccp_li_hist *hist, 39 - const unsigned int __nocast prio) 39 + const gfp_t prio) 40 40 { 41 41 return kmem_cache_alloc(hist->dccplih_slab, prio); 42 42 }
+2 -2
net/dccp/ccids/lib/packet_history.h
··· 86 86 87 87 static inline struct dccp_tx_hist_entry * 88 88 dccp_tx_hist_entry_new(struct dccp_tx_hist *hist, 89 - const unsigned int __nocast prio) 89 + const gfp_t prio) 90 90 { 91 91 struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab, 92 92 prio); ··· 137 137 const struct sock *sk, 138 138 const u32 ndp, 139 139 const struct sk_buff *skb, 140 - const unsigned int __nocast prio) 140 + const gfp_t prio) 141 141 { 142 142 struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab, 143 143 prio);
+2 -4
net/decnet/af_decnet.c
··· 452 452 .obj_size = sizeof(struct dn_sock), 453 453 }; 454 454 455 - static struct sock *dn_alloc_sock(struct socket *sock, 456 - unsigned int __nocast gfp) 455 + static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp) 457 456 { 458 457 struct dn_scp *scp; 459 458 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); ··· 804 805 return rv; 805 806 } 806 807 807 - static int dn_confirm_accept(struct sock *sk, long *timeo, 808 - unsigned int __nocast allocation) 808 + static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) 809 809 { 810 810 struct dn_scp *scp = DN_SK(sk); 811 811 DEFINE_WAIT(wait);
+9 -11
net/decnet/dn_nsp_out.c
··· 117 117 * The eventual aim is for each socket to have a cached header size 118 118 * for its outgoing packets, and to set hdr from this when sk != NULL. 119 119 */ 120 - struct sk_buff *dn_alloc_skb(struct sock *sk, int size, 121 - unsigned int __nocast pri) 120 + struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri) 122 121 { 123 122 struct sk_buff *skb; 124 123 int hdr = 64; ··· 211 212 * Returns: The number of times the packet has been sent previously 212 213 */ 213 214 static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, 214 - unsigned int __nocast gfp) 215 + gfp_t gfp) 215 216 { 216 217 struct dn_skb_cb *cb = DN_SKB_CB(skb); 217 218 struct sk_buff *skb2; ··· 352 353 } 353 354 354 355 void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, 355 - unsigned int __nocast gfp, int oth) 356 + gfp_t gfp, int oth) 356 357 { 357 358 struct dn_scp *scp = DN_SK(sk); 358 359 struct dn_skb_cb *cb = DN_SKB_CB(skb); ··· 519 520 return 0; 520 521 } 521 522 522 - void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp) 523 + void dn_send_conn_conf(struct sock *sk, gfp_t gfp) 523 524 { 524 525 struct dn_scp *scp = DN_SK(sk); 525 526 struct sk_buff *skb = NULL; ··· 551 552 552 553 553 554 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, 554 - unsigned short reason, unsigned int __nocast gfp, 555 + unsigned short reason, gfp_t gfp, 555 556 struct dst_entry *dst, 556 557 int ddl, unsigned char *dd, __u16 rem, __u16 loc) 557 558 { ··· 594 595 595 596 596 597 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, 597 - unsigned short reason, unsigned int __nocast gfp) 598 + unsigned short reason, gfp_t gfp) 598 599 { 599 600 struct dn_scp *scp = DN_SK(sk); 600 601 int ddl = 0; ··· 615 616 { 616 617 struct dn_skb_cb *cb = DN_SKB_CB(skb); 617 618 int ddl = 0; 618 - unsigned int __nocast gfp = GFP_ATOMIC; 619 + gfp_t gfp = GFP_ATOMIC; 619 620 620 621 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 621 622 NULL, cb->src_port, cb->dst_port); ··· 627 628 struct dn_scp *scp = DN_SK(sk); 628 629 struct sk_buff *skb; 629 630 unsigned char *ptr; 630 - unsigned int __nocast gfp = GFP_ATOMIC; 631 + gfp_t gfp = GFP_ATOMIC; 631 632 632 633 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) 633 634 return; ··· 662 663 unsigned char menuver; 663 664 struct dn_skb_cb *cb; 664 665 unsigned char type = 1; 665 - unsigned int __nocast allocation = 666 - (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; 666 + gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; 667 667 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); 668 668 669 669 if (!skb)
+1 -1
net/ieee80211/ieee80211_tx.c
··· 207 207 } 208 208 209 209 static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, 210 - unsigned int __nocast gfp_mask) 210 + gfp_t gfp_mask) 211 211 { 212 212 struct ieee80211_txb *txb; 213 213 int i;
+1 -1
net/ipv4/inet_connection_sock.c
··· 494 494 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); 495 495 496 496 struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, 497 - const unsigned int __nocast priority) 497 + const gfp_t priority) 498 498 { 499 499 struct sock *newsk = sk_clone(sk, priority); 500 500
+1 -1
net/ipv4/ipvs/ip_vs_app.c
··· 604 604 /* 605 605 * Replace a segment of data with a new segment 606 606 */ 607 - int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri, 607 + int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, 608 608 char *o_buf, int o_len, char *n_buf, int n_len) 609 609 { 610 610 struct iphdr *iph;
+1 -1
net/ipv4/tcp_output.c
··· 1610 1610 * was unread data in the receive queue. This behavior is recommended 1611 1611 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 1612 1612 */ 1613 - void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) 1613 + void tcp_send_active_reset(struct sock *sk, gfp_t priority) 1614 1614 { 1615 1615 struct tcp_sock *tp = tcp_sk(sk); 1616 1616 struct sk_buff *skb;
+3 -3
net/key/af_key.c
··· 185 185 } 186 186 187 187 static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 188 - unsigned int __nocast allocation, struct sock *sk) 188 + gfp_t allocation, struct sock *sk) 189 189 { 190 190 int err = -ENOBUFS; 191 191 ··· 217 217 #define BROADCAST_ONE 1 218 218 #define BROADCAST_REGISTERED 2 219 219 #define BROADCAST_PROMISC_ONLY 4 220 - static int pfkey_broadcast(struct sk_buff *skb, unsigned int __nocast allocation, 220 + static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, 221 221 int broadcast_flags, struct sock *one_sk) 222 222 { 223 223 struct sock *sk; ··· 1417 1417 } 1418 1418 1419 1419 static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, 1420 - unsigned int __nocast allocation) 1420 + gfp_t allocation) 1421 1421 { 1422 1422 struct sk_buff *skb; 1423 1423 struct sadb_msg *hdr;
+1 -2
net/llc/llc_conn.c
··· 867 867 * Allocates a LLC sock and initializes it. Returns the new LLC sock 868 868 * or %NULL if there's no memory available for one 869 869 */ 870 - struct sock *llc_sk_alloc(int family, unsigned int __nocast priority, 871 - struct proto *prot) 870 + struct sock *llc_sk_alloc(int family, gfp_t priority, struct proto *prot) 872 871 { 873 872 struct sock *sk = sk_alloc(family, priority, prot, 1); 874 873
+1 -2
net/netfilter/nfnetlink.c
··· 195 195 196 196 int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) 197 197 { 198 - unsigned int __nocast allocation = 199 - in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 198 + gfp_t allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 200 199 int err = 0; 201 200 202 201 NETLINK_CB(skb).dst_group = group;
+2 -2
net/netlink/af_netlink.c
··· 758 758 } 759 759 760 760 static inline struct sk_buff *netlink_trim(struct sk_buff *skb, 761 - unsigned int __nocast allocation) 761 + gfp_t allocation) 762 762 { 763 763 int delta; 764 764 ··· 880 880 } 881 881 882 882 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, 883 - u32 group, unsigned int __nocast allocation) 883 + u32 group, gfp_t allocation) 884 884 { 885 885 struct netlink_broadcast_data info; 886 886 struct hlist_node *node;
+1 -1
net/rxrpc/call.c
··· 1923 1923 size_t sioc, 1924 1924 struct kvec *siov, 1925 1925 u8 rxhdr_flags, 1926 - unsigned int __nocast alloc_flags, 1926 + gfp_t alloc_flags, 1927 1927 int dup_data, 1928 1928 size_t *size_sent) 1929 1929 {
+1 -1
net/rxrpc/connection.c
··· 522 522 uint8_t type, 523 523 int dcount, 524 524 struct kvec diov[], 525 - unsigned int __nocast alloc_flags, 525 + gfp_t alloc_flags, 526 526 struct rxrpc_message **_msg) 527 527 { 528 528 struct rxrpc_message *msg;
+5 -5
net/sctp/associola.c
··· 71 71 const struct sctp_endpoint *ep, 72 72 const struct sock *sk, 73 73 sctp_scope_t scope, 74 - unsigned int __nocast gfp) 74 + gfp_t gfp) 75 75 { 76 76 struct sctp_sock *sp; 77 77 int i; ··· 273 273 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, 274 274 const struct sock *sk, 275 275 sctp_scope_t scope, 276 - unsigned int __nocast gfp) 276 + gfp_t gfp) 277 277 { 278 278 struct sctp_association *asoc; 279 279 ··· 479 479 /* Add a transport address to an association. */ 480 480 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, 481 481 const union sctp_addr *addr, 482 - const unsigned int __nocast gfp, 482 + const gfp_t gfp, 483 483 const int peer_state) 484 484 { 485 485 struct sctp_transport *peer; ··· 1231 1231 * local endpoint and the remote peer. 1232 1232 */ 1233 1233 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1234 - unsigned int __nocast gfp) 1234 + gfp_t gfp) 1235 1235 { 1236 1236 sctp_scope_t scope; 1237 1237 int flags; ··· 1254 1254 /* Build the association's bind address list from the cookie. */ 1255 1255 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, 1256 1256 struct sctp_cookie *cookie, 1257 - unsigned int __nocast gfp) 1257 + gfp_t gfp) 1258 1258 { 1259 1259 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); 1260 1260 int var_size3 = cookie->raw_addr_list_len;
+6 -6
net/sctp/bind_addr.c
··· 53 53 54 54 /* Forward declarations for internal helpers. */ 55 55 static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *, 56 - sctp_scope_t scope, unsigned int __nocast gfp, 56 + sctp_scope_t scope, gfp_t gfp, 57 57 int flags); 58 58 static void sctp_bind_addr_clean(struct sctp_bind_addr *); 59 59 ··· 64 64 */ 65 65 int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 66 66 const struct sctp_bind_addr *src, 67 - sctp_scope_t scope, unsigned int __nocast gfp, 67 + sctp_scope_t scope, gfp_t gfp, 68 68 int flags) 69 69 { 70 70 struct sctp_sockaddr_entry *addr; ··· 146 146 147 147 /* Add an address to the bind address list in the SCTP_bind_addr structure. */ 148 148 int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, 149 - unsigned int __nocast gfp) 149 + gfp_t gfp) 150 150 { 151 151 struct sctp_sockaddr_entry *addr; 152 152 ··· 200 200 */ 201 201 union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, 202 202 int *addrs_len, 203 - unsigned int __nocast gfp) 203 + gfp_t gfp) 204 204 { 205 205 union sctp_params addrparms; 206 206 union sctp_params retval; ··· 252 252 * address parameters). 253 253 */ 254 254 int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, 255 - int addrs_len, __u16 port, unsigned int __nocast gfp) 255 + int addrs_len, __u16 port, gfp_t gfp) 256 256 { 257 257 union sctp_addr_param *rawaddr; 258 258 struct sctp_paramhdr *param; ··· 350 350 /* Copy out addresses from the global local address list. */ 351 351 static int sctp_copy_one_addr(struct sctp_bind_addr *dest, 352 352 union sctp_addr *addr, 353 - sctp_scope_t scope, unsigned int __nocast gfp, 353 + sctp_scope_t scope, gfp_t gfp, 354 354 int flags) 355 355 { 356 356 int error = 0;
+1 -1
net/sctp/chunk.c
··· 62 62 } 63 63 64 64 /* Allocate and initialize datamsg. */ 65 - SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(unsigned int __nocast gfp) 65 + SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) 66 66 { 67 67 struct sctp_datamsg *msg; 68 68 msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
+2 -3
net/sctp/endpointola.c
··· 68 68 */ 69 69 static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, 70 70 struct sock *sk, 71 - unsigned int __nocast gfp) 71 + gfp_t gfp) 72 72 { 73 73 struct sctp_sock *sp = sctp_sk(sk); 74 74 memset(ep, 0, sizeof(struct sctp_endpoint)); ··· 138 138 /* Create a sctp_endpoint with all that boring stuff initialized. 139 139 * Returns NULL if there isn't enough memory. 140 140 */ 141 - struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, 142 - unsigned int __nocast gfp) 141 + struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) 143 142 { 144 143 struct sctp_endpoint *ep; 145 144
+1 -1
net/sctp/protocol.c
··· 219 219 220 220 /* Copy the local addresses which are valid for 'scope' into 'bp'. */ 221 221 int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, 222 - unsigned int __nocast gfp, int copy_flags) 222 + gfp_t gfp, int copy_flags) 223 223 { 224 224 struct sctp_sockaddr_entry *addr; 225 225 int error = 0;
+7 -7
net/sctp/sm_make_chunk.c
··· 78 78 static int sctp_process_param(struct sctp_association *asoc, 79 79 union sctp_params param, 80 80 const union sctp_addr *peer_addr, 81 - unsigned int __nocast gfp); 81 + gfp_t gfp); 82 82 83 83 /* What was the inbound interface for this chunk? */ 84 84 int sctp_chunk_iif(const struct sctp_chunk *chunk) ··· 174 174 */ 175 175 struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, 176 176 const struct sctp_bind_addr *bp, 177 - unsigned int __nocast gfp, int vparam_len) 177 + gfp_t gfp, int vparam_len) 178 178 { 179 179 sctp_inithdr_t init; 180 180 union sctp_params addrs; ··· 261 261 262 262 struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, 263 263 const struct sctp_chunk *chunk, 264 - unsigned int __nocast gfp, int unkparam_len) 264 + gfp_t gfp, int unkparam_len) 265 265 { 266 266 sctp_inithdr_t initack; 267 267 struct sctp_chunk *retval; ··· 1234 1234 /* Create a CLOSED association to use with an incoming packet. */ 1235 1235 struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, 1236 1236 struct sctp_chunk *chunk, 1237 - unsigned int __nocast gfp) 1237 + gfp_t gfp) 1238 1238 { 1239 1239 struct sctp_association *asoc; 1240 1240 struct sk_buff *skb; ··· 1349 1349 struct sctp_association *sctp_unpack_cookie( 1350 1350 const struct sctp_endpoint *ep, 1351 1351 const struct sctp_association *asoc, 1352 - struct sctp_chunk *chunk, unsigned int __nocast gfp, 1352 + struct sctp_chunk *chunk, gfp_t gfp, 1353 1353 int *error, struct sctp_chunk **errp) 1354 1354 { 1355 1355 struct sctp_association *retval = NULL; ··· 1814 1814 */ 1815 1815 int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, 1816 1816 const union sctp_addr *peer_addr, 1817 - sctp_init_chunk_t *peer_init, unsigned int __nocast gfp) 1817 + sctp_init_chunk_t *peer_init, gfp_t gfp) 1818 1818 { 1819 1819 union sctp_params param; 1820 1820 struct sctp_transport *transport; ··· 1985 1985 static int sctp_process_param(struct sctp_association *asoc, 1986 1986 union sctp_params param, 1987 1987 const union sctp_addr *peer_addr, 1988 - unsigned int __nocast gfp) 1988 + gfp_t gfp) 1989 1989 { 1990 1990 union sctp_addr addr; 1991 1991 int i;
+6 -6
net/sctp/sm_sideeffect.c
··· 63 63 void *event_arg, 64 64 sctp_disposition_t status, 65 65 sctp_cmd_seq_t *commands, 66 - unsigned int __nocast gfp); 66 + gfp_t gfp); 67 67 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, 68 68 sctp_state_t state, 69 69 struct sctp_endpoint *ep, ··· 71 71 void *event_arg, 72 72 sctp_disposition_t status, 73 73 sctp_cmd_seq_t *commands, 74 - unsigned int __nocast gfp); 74 + gfp_t gfp); 75 75 76 76 /******************************************************************** 77 77 * Helper functions ··· 498 498 struct sctp_association *asoc, 499 499 struct sctp_chunk *chunk, 500 500 sctp_init_chunk_t *peer_init, 501 - unsigned int __nocast gfp) 501 + gfp_t gfp) 502 502 { 503 503 int error; 504 504 ··· 853 853 struct sctp_endpoint *ep, 854 854 struct sctp_association *asoc, 855 855 void *event_arg, 856 - unsigned int __nocast gfp) 856 + gfp_t gfp) 857 857 { 858 858 sctp_cmd_seq_t commands; 859 859 const sctp_sm_table_entry_t *state_fn; ··· 898 898 void *event_arg, 899 899 sctp_disposition_t status, 900 900 sctp_cmd_seq_t *commands, 901 - unsigned int __nocast gfp) 901 + gfp_t gfp) 902 902 { 903 903 int error; 904 904 ··· 986 986 void *event_arg, 987 987 sctp_disposition_t status, 988 988 sctp_cmd_seq_t *commands, 989 - unsigned int __nocast gfp) 989 + gfp_t gfp) 990 990 { 991 991 int error = 0; 992 992 int force;
+1 -1
net/sctp/ssnmap.c
··· 58 58 * Allocate room to store at least 'len' contiguous TSNs. 59 59 */ 60 60 struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, 61 - unsigned int __nocast gfp) 61 + gfp_t gfp) 62 62 { 63 63 struct sctp_ssnmap *retval; 64 64 int size;
+2 -2
net/sctp/transport.c
··· 57 57 /* Initialize a new transport from provided memory. */ 58 58 static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, 59 59 const union sctp_addr *addr, 60 - unsigned int __nocast gfp) 60 + gfp_t gfp) 61 61 { 62 62 /* Copy in the address. */ 63 63 peer->ipaddr = *addr; ··· 122 122 123 123 /* Allocate and initialize a new transport. */ 124 124 struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, 125 - unsigned int __nocast gfp) 125 + gfp_t gfp) 126 126 { 127 127 struct sctp_transport *transport; 128 128
+9 -9
net/sctp/ulpevent.c
··· 74 74 75 75 /* Create a new sctp_ulpevent. */ 76 76 SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, 77 - unsigned int __nocast gfp) 77 + gfp_t gfp) 78 78 { 79 79 struct sctp_ulpevent *event; 80 80 struct sk_buff *skb; ··· 136 136 struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( 137 137 const struct sctp_association *asoc, 138 138 __u16 flags, __u16 state, __u16 error, __u16 outbound, 139 - __u16 inbound, unsigned int __nocast gfp) 139 + __u16 inbound, gfp_t gfp) 140 140 { 141 141 struct sctp_ulpevent *event; 142 142 struct sctp_assoc_change *sac; ··· 237 237 struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( 238 238 const struct sctp_association *asoc, 239 239 const struct sockaddr_storage *aaddr, 240 - int flags, int state, int error, unsigned int __nocast gfp) 240 + int flags, int state, int error, gfp_t gfp) 241 241 { 242 242 struct sctp_ulpevent *event; 243 243 struct sctp_paddr_change *spc; ··· 350 350 */ 351 351 struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 352 352 const struct sctp_association *asoc, struct sctp_chunk *chunk, 353 - __u16 flags, unsigned int __nocast gfp) 353 + __u16 flags, gfp_t gfp) 354 354 { 355 355 struct sctp_ulpevent *event; 356 356 struct sctp_remote_error *sre; ··· 448 448 */ 449 449 struct sctp_ulpevent *sctp_ulpevent_make_send_failed( 450 450 const struct sctp_association *asoc, struct sctp_chunk *chunk, 451 - __u16 flags, __u32 error, unsigned int __nocast gfp) 451 + __u16 flags, __u32 error, gfp_t gfp) 452 452 { 453 453 struct sctp_ulpevent *event; 454 454 struct sctp_send_failed *ssf; ··· 557 557 */ 558 558 struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( 559 559 const struct sctp_association *asoc, 560 - __u16 flags, unsigned int __nocast gfp) 560 + __u16 flags, gfp_t gfp) 561 561 { 562 562 struct sctp_ulpevent *event; 563 563 struct sctp_shutdown_event *sse; ··· 620 620 * 5.3.1.6 SCTP_ADAPTION_INDICATION 621 621 */ 622 622 struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( 623 - const struct sctp_association *asoc, unsigned int __nocast gfp) 623 + const struct sctp_association *asoc, gfp_t gfp) 624 624 { 625 625 struct sctp_ulpevent *event; 626 626 struct sctp_adaption_event *sai; ··· 657 657 */ 658 658 struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, 659 659 struct sctp_chunk *chunk, 660 - unsigned int __nocast gfp) 660 + gfp_t gfp) 661 661 { 662 662 struct sctp_ulpevent *event = NULL; 663 663 struct sk_buff *skb; ··· 719 719 */ 720 720 struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 721 721 const struct sctp_association *asoc, __u32 indication, 722 - unsigned int __nocast gfp) 722 + gfp_t gfp) 723 723 { 724 724 struct sctp_ulpevent *event; 725 725 struct sctp_pdapi_event *pd;
+4 -4
net/sctp/ulpqueue.c
··· 100 100 101 101 /* Process an incoming DATA chunk. */ 102 102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 103 - unsigned int __nocast gfp) 103 + gfp_t gfp) 104 104 { 105 105 struct sk_buff_head temp; 106 106 sctp_data_chunk_t *hdr; ··· 792 792 /* Partial deliver the first message as there is pressure on rwnd. */ 793 793 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, 794 794 struct sctp_chunk *chunk, 795 - unsigned int __nocast gfp) 795 + gfp_t gfp) 796 796 { 797 797 struct sctp_ulpevent *event; 798 798 struct sctp_association *asoc; ··· 816 816 817 817 /* Renege some packets to make room for an incoming chunk. */ 818 818 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 819 - unsigned int __nocast gfp) 819 + gfp_t gfp) 820 820 { 821 821 struct sctp_association *asoc; 822 822 __u16 needed, freed; ··· 855 855 /* Notify the application if an association is aborted and in 856 856 * partial delivery mode. Send up any pending received messages. 857 857 */ 858 - void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, unsigned int __nocast gfp) 858 + void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) 859 859 { 860 860 struct sctp_ulpevent *ev = NULL; 861 861 struct sock *sk;
+1 -1
net/sunrpc/sched.c
··· 719 719 void * 720 720 rpc_malloc(struct rpc_task *task, size_t size) 721 721 { 722 - unsigned int __nocast gfp; 722 + gfp_t gfp; 723 723 724 724 if (task->tk_flags & RPC_TASK_SWAPPER) 725 725 gfp = GFP_ATOMIC;
+1 -1
net/xfrm/xfrm_policy.c
··· 225 225 * SPD calls. 226 226 */ 227 227 228 - struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp) 228 + struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) 229 229 { 230 230 struct xfrm_policy *policy; 231 231
+1 -1
sound/core/memalloc.c
··· 106 106 107 107 static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, 108 108 dma_addr_t *dma_handle, 109 - unsigned int __nocast flags) 109 + gfp_t flags) 110 110 { 111 111 void *ret; 112 112 u64 dma_mask, coherent_dma_mask;
+5 -5
sound/core/memory.c
··· 89 89 } 90 90 } 91 91 92 - static void *__snd_kmalloc(size_t size, unsigned int __nocast flags, void *caller) 92 + static void *__snd_kmalloc(size_t size, gfp_t flags, void *caller) 93 93 { 94 94 unsigned long cpu_flags; 95 95 struct snd_alloc_track *t; ··· 111 111 } 112 112 113 113 #define _snd_kmalloc(size, flags) __snd_kmalloc((size), (flags), __builtin_return_address(0)); 114 - void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags) 114 + void *snd_hidden_kmalloc(size_t size, gfp_t flags) 115 115 { 116 116 return _snd_kmalloc(size, flags); 117 117 } 118 118 119 - void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags) 119 + void *snd_hidden_kzalloc(size_t size, gfp_t flags) 120 120 { 121 121 void *ret = _snd_kmalloc(size, flags); 122 122 if (ret) ··· 125 125 } 126 126 EXPORT_SYMBOL(snd_hidden_kzalloc); 127 127 128 - void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags) 128 + void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags) 129 129 { 130 130 void *ret = NULL; 131 131 if (n != 0 && size > INT_MAX / n) ··· 190 190 snd_wrapper_vfree(obj); 191 191 } 192 192 193 - char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags) 193 + char *snd_hidden_kstrdup(const char *s, gfp_t flags) 194 194 { 195 195 int len; 196 196 char *buf;
+1 -1
sound/core/seq/instr/ainstr_iw.c
··· 58 58 iwffff_xenv_t *ex, 59 59 char __user **data, 60 60 long *len, 61 - unsigned int __nocast gfp_mask) 61 + gfp_t gfp_mask) 62 62 { 63 63 __u32 stype; 64 64 iwffff_env_record_t *rp, *rp_last;
+1 -1
sound/core/wrappers.c
··· 27 27 #include <linux/fs.h> 28 28 29 29 #ifdef CONFIG_SND_DEBUG_MEMORY 30 - void *snd_wrapper_kmalloc(size_t size, unsigned int __nocast flags) 30 + void *snd_wrapper_kmalloc(size_t size, gfp_t flags) 31 31 { 32 32 return kmalloc(size, flags); 33 33 }