[PATCH] gfp flags annotations - part 1

- added typedef unsigned int __nocast gfp_t;

- replaced __nocast uses for gfp flags with gfp_t - it gives exactly
the same warnings as far as sparse is concerned, doesn't change
generated code (from gcc point of view we replaced unsigned int with
typedef) and documents what's going on far better.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Al Viro and committed by Linus Torvalds dd0fc66f 3b0e77bd

+340 -360
+1 -1
arch/cris/arch-v32/drivers/pci/dma.c
··· 24 }; 25 26 void *dma_alloc_coherent(struct device *dev, size_t size, 27 - dma_addr_t *dma_handle, unsigned int __nocast gfp) 28 { 29 void *ret; 30 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
··· 24 }; 25 26 void *dma_alloc_coherent(struct device *dev, size_t size, 27 + dma_addr_t *dma_handle, gfp_t gfp) 28 { 29 void *ret; 30 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+1 -1
arch/i386/kernel/pci-dma.c
··· 23 }; 24 25 void *dma_alloc_coherent(struct device *dev, size_t size, 26 - dma_addr_t *dma_handle, unsigned int __nocast gfp) 27 { 28 void *ret; 29 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
··· 23 }; 24 25 void *dma_alloc_coherent(struct device *dev, size_t size, 26 + dma_addr_t *dma_handle, gfp_t gfp) 27 { 28 void *ret; 29 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+1 -1
arch/ppc64/kernel/bpa_iommu.c
··· 310 311 312 static void *bpa_alloc_coherent(struct device *hwdev, size_t size, 313 - dma_addr_t *dma_handle, unsigned int __nocast flag) 314 { 315 void *ret; 316
··· 310 311 312 static void *bpa_alloc_coherent(struct device *hwdev, size_t size, 313 + dma_addr_t *dma_handle, gfp_t flag) 314 { 315 void *ret; 316
+1 -1
arch/ppc64/kernel/dma.c
··· 53 EXPORT_SYMBOL(dma_set_mask); 54 55 void *dma_alloc_coherent(struct device *dev, size_t size, 56 - dma_addr_t *dma_handle, unsigned int __nocast flag) 57 { 58 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 59
··· 53 EXPORT_SYMBOL(dma_set_mask); 54 55 void *dma_alloc_coherent(struct device *dev, size_t size, 56 + dma_addr_t *dma_handle, gfp_t flag) 57 { 58 struct dma_mapping_ops *dma_ops = get_dma_ops(dev); 59
+1 -1
arch/ppc64/kernel/iommu.c
··· 519 * to the dma address (mapping) of the first page. 520 */ 521 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 522 - dma_addr_t *dma_handle, unsigned int __nocast flag) 523 { 524 void *ret = NULL; 525 dma_addr_t mapping;
··· 519 * to the dma address (mapping) of the first page. 520 */ 521 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 522 + dma_addr_t *dma_handle, gfp_t flag) 523 { 524 void *ret = NULL; 525 dma_addr_t mapping;
+1 -1
arch/ppc64/kernel/pci_direct_iommu.c
··· 31 #include "pci.h" 32 33 static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 34 - dma_addr_t *dma_handle, unsigned int __nocast flag) 35 { 36 void *ret; 37
··· 31 #include "pci.h" 32 33 static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 34 + dma_addr_t *dma_handle, gfp_t flag) 35 { 36 void *ret; 37
+1 -1
arch/ppc64/kernel/pci_iommu.c
··· 76 * to the dma address (mapping) of the first page. 77 */ 78 static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, 79 - dma_addr_t *dma_handle, unsigned int __nocast flag) 80 { 81 return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, 82 flag);
··· 76 * to the dma address (mapping) of the first page. 77 */ 78 static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, 79 + dma_addr_t *dma_handle, gfp_t flag) 80 { 81 return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, 82 flag);
+1 -1
arch/ppc64/kernel/vio.c
··· 218 } 219 220 static void *vio_alloc_coherent(struct device *dev, size_t size, 221 - dma_addr_t *dma_handle, unsigned int __nocast flag) 222 { 223 return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, 224 dma_handle, flag);
··· 218 } 219 220 static void *vio_alloc_coherent(struct device *dev, size_t size, 221 + dma_addr_t *dma_handle, gfp_t flag) 222 { 223 return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, 224 dma_handle, flag);
+1 -1
drivers/atm/ambassador.c
··· 795 } 796 797 static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, 798 - unsigned int __nocast priority) 799 { 800 rx_in rx; 801 amb_rxq * rxq;
··· 795 } 796 797 static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, 798 + gfp_t priority) 799 { 800 rx_in rx; 801 amb_rxq * rxq;
+2 -3
drivers/atm/firestream.c
··· 1374 } 1375 } 1376 1377 - static void __devinit *aligned_kmalloc (int size, unsigned int __nocast flags, 1378 - int alignment) 1379 { 1380 void *t; 1381 ··· 1465 working again after that... -- REW */ 1466 1467 static void top_off_fp (struct fs_dev *dev, struct freepool *fp, 1468 - unsigned int __nocast gfp_flags) 1469 { 1470 struct FS_BPENTRY *qe, *ne; 1471 struct sk_buff *skb;
··· 1374 } 1375 } 1376 1377 + static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment) 1378 { 1379 void *t; 1380 ··· 1466 working again after that... -- REW */ 1467 1468 static void top_off_fp (struct fs_dev *dev, struct freepool *fp, 1469 + gfp_t gfp_flags) 1470 { 1471 struct FS_BPENTRY *qe, *ne; 1472 struct sk_buff *skb;
+1 -1
drivers/atm/fore200e.c
··· 178 179 180 static void* 181 - fore200e_kmalloc(int size, unsigned int __nocast flags) 182 { 183 void *chunk = kzalloc(size, flags); 184
··· 178 179 180 static void* 181 + fore200e_kmalloc(int size, gfp_t flags) 182 { 183 void *chunk = kzalloc(size, flags); 184
+2 -3
drivers/base/dmapool.c
··· 156 157 158 static struct dma_page * 159 - pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags) 160 { 161 struct dma_page *page; 162 int mapsize; ··· 262 * If such a memory block can't be allocated, null is returned. 263 */ 264 void * 265 - dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags, 266 - dma_addr_t *handle) 267 { 268 unsigned long flags; 269 struct dma_page *page;
··· 156 157 158 static struct dma_page * 159 + pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) 160 { 161 struct dma_page *page; 162 int mapsize; ··· 262 * If such a memory block can't be allocated, null is returned. 263 */ 264 void * 265 + dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) 266 { 267 unsigned long flags; 268 struct dma_page *page;
+2 -2
drivers/block/pktcdvd.c
··· 229 return 1; 230 } 231 232 - static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data) 233 { 234 return kmalloc(sizeof(struct pkt_rb_node), gfp_mask); 235 } ··· 2082 } 2083 2084 2085 - static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data) 2086 { 2087 return kmalloc(sizeof(struct packet_stacked_data), gfp_mask); 2088 }
··· 229 return 1; 230 } 231 232 + static void *pkt_rb_alloc(gfp_t gfp_mask, void *data) 233 { 234 return kmalloc(sizeof(struct pkt_rb_node), gfp_mask); 235 } ··· 2082 } 2083 2084 2085 + static void *psd_pool_alloc(gfp_t gfp_mask, void *data) 2086 { 2087 return kmalloc(sizeof(struct packet_stacked_data), gfp_mask); 2088 }
+1 -1
drivers/bluetooth/bpa10x.c
··· 308 } 309 310 static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, 311 - size_t size, unsigned int __nocast flags, void *data) 312 { 313 struct urb *urb; 314 struct usb_ctrlrequest *cr;
··· 308 } 309 310 static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, 311 + size_t size, gfp_t flags, void *data) 312 { 313 struct urb *urb; 314 struct usb_ctrlrequest *cr;
+1 -1
drivers/bluetooth/hci_usb.c
··· 132 { } /* Terminating entry */ 133 }; 134 135 - static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp) 136 { 137 struct _urb *_urb = kmalloc(sizeof(struct _urb) + 138 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
··· 132 { } /* Terminating entry */ 133 }; 134 135 + static struct _urb *_urb_alloc(int isoc, gfp_t gfp) 136 { 137 struct _urb *_urb = kmalloc(sizeof(struct _urb) + 138 sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
+1 -2
drivers/connector/connector.c
··· 69 * a new message. 70 * 71 */ 72 - int cn_netlink_send(struct cn_msg *msg, u32 __group, 73 - unsigned int __nocast gfp_mask) 74 { 75 struct cn_callback_entry *__cbq; 76 unsigned int size;
··· 69 * a new message. 70 * 71 */ 72 + int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) 73 { 74 struct cn_callback_entry *__cbq; 75 unsigned int size;
+1 -1
drivers/ieee1394/raw1394.c
··· 98 99 static void queue_complete_cb(struct pending_request *req); 100 101 - static struct pending_request *__alloc_pending_request(unsigned int __nocast flags) 102 { 103 struct pending_request *req; 104
··· 98 99 static void queue_complete_cb(struct pending_request *req); 100 101 + static struct pending_request *__alloc_pending_request(gfp_t flags) 102 { 103 struct pending_request *req; 104
+1 -1
drivers/infiniband/core/mad.c
··· 783 u32 remote_qpn, u16 pkey_index, 784 struct ib_ah *ah, int rmpp_active, 785 int hdr_len, int data_len, 786 - unsigned int __nocast gfp_mask) 787 { 788 struct ib_mad_agent_private *mad_agent_priv; 789 struct ib_mad_send_buf *send_buf;
··· 783 u32 remote_qpn, u16 pkey_index, 784 struct ib_ah *ah, int rmpp_active, 785 int hdr_len, int data_len, 786 + gfp_t gfp_mask) 787 { 788 struct ib_mad_agent_private *mad_agent_priv; 789 struct ib_mad_send_buf *send_buf;
+3 -3
drivers/infiniband/core/sa_query.c
··· 574 int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 575 struct ib_sa_path_rec *rec, 576 ib_sa_comp_mask comp_mask, 577 - int timeout_ms, unsigned int __nocast gfp_mask, 578 void (*callback)(int status, 579 struct ib_sa_path_rec *resp, 580 void *context), ··· 676 int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, 677 struct ib_sa_service_rec *rec, 678 ib_sa_comp_mask comp_mask, 679 - int timeout_ms, unsigned int __nocast gfp_mask, 680 void (*callback)(int status, 681 struct ib_sa_service_rec *resp, 682 void *context), ··· 759 u8 method, 760 struct ib_sa_mcmember_rec *rec, 761 ib_sa_comp_mask comp_mask, 762 - int timeout_ms, unsigned int __nocast gfp_mask, 763 void (*callback)(int status, 764 struct ib_sa_mcmember_rec *resp, 765 void *context),
··· 574 int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 575 struct ib_sa_path_rec *rec, 576 ib_sa_comp_mask comp_mask, 577 + int timeout_ms, gfp_t gfp_mask, 578 void (*callback)(int status, 579 struct ib_sa_path_rec *resp, 580 void *context), ··· 676 int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, 677 struct ib_sa_service_rec *rec, 678 ib_sa_comp_mask comp_mask, 679 + int timeout_ms, gfp_t gfp_mask, 680 void (*callback)(int status, 681 struct ib_sa_service_rec *resp, 682 void *context), ··· 759 u8 method, 760 struct ib_sa_mcmember_rec *rec, 761 ib_sa_comp_mask comp_mask, 762 + int timeout_ms, gfp_t gfp_mask, 763 void (*callback)(int status, 764 struct ib_sa_mcmember_rec *resp, 765 void *context),
+1 -1
drivers/md/dm-crypt.c
··· 96 /* 97 * Mempool alloc and free functions for the page 98 */ 99 - static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data) 100 { 101 return alloc_page(gfp_mask); 102 }
··· 96 /* 97 * Mempool alloc and free functions for the page 98 */ 99 + static void *mempool_alloc_page(gfp_t gfp_mask, void *data) 100 { 101 return alloc_page(gfp_mask); 102 }
+1 -1
drivers/md/dm-io.c
··· 32 static unsigned _num_ios; 33 static mempool_t *_io_pool; 34 35 - static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data) 36 { 37 return kmalloc(sizeof(struct io), gfp_mask); 38 }
··· 32 static unsigned _num_ios; 33 static mempool_t *_io_pool; 34 35 + static void *alloc_io(gfp_t gfp_mask, void *pool_data) 36 { 37 return kmalloc(sizeof(struct io), gfp_mask); 38 }
+1 -1
drivers/md/dm-raid1.c
··· 122 /* FIXME move this */ 123 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 124 125 - static void *region_alloc(unsigned int __nocast gfp_mask, void *pool_data) 126 { 127 return kmalloc(sizeof(struct region), gfp_mask); 128 }
··· 122 /* FIXME move this */ 123 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 124 125 + static void *region_alloc(gfp_t gfp_mask, void *pool_data) 126 { 127 return kmalloc(sizeof(struct region), gfp_mask); 128 }
+1 -1
drivers/md/multipath.c
··· 38 static mdk_personality_t multipath_personality; 39 40 41 - static void *mp_pool_alloc(unsigned int __nocast gfp_flags, void *data) 42 { 43 struct multipath_bh *mpb; 44 mpb = kmalloc(sizeof(*mpb), gfp_flags);
··· 38 static mdk_personality_t multipath_personality; 39 40 41 + static void *mp_pool_alloc(gfp_t gfp_flags, void *data) 42 { 43 struct multipath_bh *mpb; 44 mpb = kmalloc(sizeof(*mpb), gfp_flags);
+2 -2
drivers/md/raid1.c
··· 52 static void unplug_slaves(mddev_t *mddev); 53 54 55 - static void * r1bio_pool_alloc(unsigned int __nocast gfp_flags, void *data) 56 { 57 struct pool_info *pi = data; 58 r1bio_t *r1_bio; ··· 79 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 80 #define RESYNC_WINDOW (2048*1024) 81 82 - static void * r1buf_pool_alloc(unsigned int __nocast gfp_flags, void *data) 83 { 84 struct pool_info *pi = data; 85 struct page *page;
··· 52 static void unplug_slaves(mddev_t *mddev); 53 54 55 + static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 56 { 57 struct pool_info *pi = data; 58 r1bio_t *r1_bio; ··· 79 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 80 #define RESYNC_WINDOW (2048*1024) 81 82 + static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 83 { 84 struct pool_info *pi = data; 85 struct page *page;
+2 -2
drivers/md/raid10.c
··· 47 48 static void unplug_slaves(mddev_t *mddev); 49 50 - static void * r10bio_pool_alloc(unsigned int __nocast gfp_flags, void *data) 51 { 52 conf_t *conf = data; 53 r10bio_t *r10_bio; ··· 81 * one for write (we recover only one drive per r10buf) 82 * 83 */ 84 - static void * r10buf_pool_alloc(unsigned int __nocast gfp_flags, void *data) 85 { 86 conf_t *conf = data; 87 struct page *page;
··· 47 48 static void unplug_slaves(mddev_t *mddev); 49 50 + static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 51 { 52 conf_t *conf = data; 53 r10bio_t *r10_bio; ··· 81 * one for write (we recover only one drive per r10buf) 82 * 83 */ 84 + static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) 85 { 86 conf_t *conf = data; 87 struct page *page;
+1 -1
drivers/net/bonding/bond_main.c
··· 1290 * Copy all the Multicast addresses from src to the bonding device dst 1291 */ 1292 static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, 1293 - unsigned int __nocast gfp_flag) 1294 { 1295 struct dev_mc_list *dmi, *new_dmi; 1296
··· 1290 * Copy all the Multicast addresses from src to the bonding device dst 1291 */ 1292 static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, 1293 + gfp_t gfp_flag) 1294 { 1295 struct dev_mc_list *dmi, *new_dmi; 1296
+1 -1
drivers/net/ns83820.c
··· 584 return 0; 585 } 586 587 - static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp) 588 { 589 struct ns83820 *dev = PRIV(ndev); 590 unsigned i;
··· 584 return 0; 585 } 586 587 + static inline int rx_refill(struct net_device *ndev, gfp_t gfp) 588 { 589 struct ns83820 *dev = PRIV(ndev); 590 unsigned i;
+1 -1
drivers/net/sungem.h
··· 1036 #define ALIGNED_RX_SKB_ADDR(addr) \ 1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 1038 static __inline__ struct sk_buff *gem_alloc_skb(int size, 1039 - unsigned int __nocast gfp_flags) 1040 { 1041 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 1042
··· 1036 #define ALIGNED_RX_SKB_ADDR(addr) \ 1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 1038 static __inline__ struct sk_buff *gem_alloc_skb(int size, 1039 + gfp_t gfp_flags) 1040 { 1041 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 1042
+1 -1
drivers/s390/scsi/zfcp_aux.c
··· 833 } 834 835 static void * 836 - zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size) 837 { 838 return kmalloc((size_t) size, gfp_mask); 839 }
··· 833 } 834 835 static void * 836 + zfcp_mempool_alloc(gfp_t gfp_mask, void *size) 837 { 838 return kmalloc((size_t) size, gfp_mask); 839 }
+5 -5
fs/bio.c
··· 75 */ 76 static struct bio_set *fs_bio_set; 77 78 - static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) 79 { 80 struct bio_vec *bvl; 81 struct biovec_slab *bp; ··· 155 * allocate bio and iovecs from the memory pools specified by the 156 * bio_set structure. 157 **/ 158 - struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs) 159 { 160 struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); 161 ··· 181 return bio; 182 } 183 184 - struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs) 185 { 186 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 187 ··· 277 * 278 * Like __bio_clone, only also allocates the returned bio 279 */ 280 - struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask) 281 { 282 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); 283 ··· 1078 return bp; 1079 } 1080 1081 - static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data) 1082 { 1083 return kmalloc(sizeof(struct bio_pair), gfp_flags); 1084 }
··· 75 */ 76 static struct bio_set *fs_bio_set; 77 78 + static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) 79 { 80 struct bio_vec *bvl; 81 struct biovec_slab *bp; ··· 155 * allocate bio and iovecs from the memory pools specified by the 156 * bio_set structure. 157 **/ 158 + struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 159 { 160 struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); 161 ··· 181 return bio; 182 } 183 184 + struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) 185 { 186 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 187 ··· 277 * 278 * Like __bio_clone, only also allocates the returned bio 279 */ 280 + struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) 281 { 282 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); 283 ··· 1078 return bp; 1079 } 1080 1081 + static void *bio_pair_alloc(gfp_t gfp_flags, void *data) 1082 { 1083 return kmalloc(sizeof(struct bio_pair), gfp_flags); 1084 }
+1 -1
fs/buffer.c
··· 3045 buffer_heads_over_limit = (tot > max_buffer_heads); 3046 } 3047 3048 - struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags) 3049 { 3050 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3051 if (ret) {
··· 3045 buffer_heads_over_limit = (tot > max_buffer_heads); 3046 } 3047 3048 + struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) 3049 { 3050 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3051 if (ret) {
+1 -1
fs/mpage.c
··· 102 static struct bio * 103 mpage_alloc(struct block_device *bdev, 104 sector_t first_sector, int nr_vecs, 105 - unsigned int __nocast gfp_flags) 106 { 107 struct bio *bio; 108
··· 102 static struct bio * 103 mpage_alloc(struct block_device *bdev, 104 sector_t first_sector, int nr_vecs, 105 + gfp_t gfp_flags) 106 { 107 struct bio *bio; 108
+1 -1
fs/ntfs/malloc.h
··· 40 * Depending on @gfp_mask the allocation may be guaranteed to succeed. 41 */ 42 static inline void *__ntfs_malloc(unsigned long size, 43 - unsigned int __nocast gfp_mask) 44 { 45 if (likely(size <= PAGE_SIZE)) { 46 BUG_ON(!size);
··· 40 * Depending on @gfp_mask the allocation may be guaranteed to succeed. 41 */ 42 static inline void *__ntfs_malloc(unsigned long size, 43 + gfp_t gfp_mask) 44 { 45 if (likely(size <= PAGE_SIZE)) { 46 BUG_ON(!size);
+3 -3
fs/posix_acl.c
··· 35 * Allocate a new ACL with the specified number of entries. 36 */ 37 struct posix_acl * 38 - posix_acl_alloc(int count, unsigned int __nocast flags) 39 { 40 const size_t size = sizeof(struct posix_acl) + 41 count * sizeof(struct posix_acl_entry); ··· 51 * Clone an ACL. 52 */ 53 struct posix_acl * 54 - posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags) 55 { 56 struct posix_acl *clone = NULL; 57 ··· 185 * Create an ACL representing the file mode permission bits of an inode. 186 */ 187 struct posix_acl * 188 - posix_acl_from_mode(mode_t mode, unsigned int __nocast flags) 189 { 190 struct posix_acl *acl = posix_acl_alloc(3, flags); 191 if (!acl)
··· 35 * Allocate a new ACL with the specified number of entries. 36 */ 37 struct posix_acl * 38 + posix_acl_alloc(int count, gfp_t flags) 39 { 40 const size_t size = sizeof(struct posix_acl) + 41 count * sizeof(struct posix_acl_entry); ··· 51 * Clone an ACL. 52 */ 53 struct posix_acl * 54 + posix_acl_clone(const struct posix_acl *acl, gfp_t flags) 55 { 56 struct posix_acl *clone = NULL; 57 ··· 185 * Create an ACL representing the file mode permission bits of an inode. 186 */ 187 struct posix_acl * 188 + posix_acl_from_mode(mode_t mode, gfp_t flags) 189 { 190 struct posix_acl *acl = posix_acl_alloc(3, flags); 191 if (!acl)
+5 -5
fs/xfs/linux-2.6/kmem.c
··· 45 46 47 void * 48 - kmem_alloc(size_t size, unsigned int __nocast flags) 49 { 50 int retries = 0; 51 unsigned int lflags = kmem_flags_convert(flags); ··· 67 } 68 69 void * 70 - kmem_zalloc(size_t size, unsigned int __nocast flags) 71 { 72 void *ptr; 73 ··· 90 91 void * 92 kmem_realloc(void *ptr, size_t newsize, size_t oldsize, 93 - unsigned int __nocast flags) 94 { 95 void *new; 96 ··· 105 } 106 107 void * 108 - kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) 109 { 110 int retries = 0; 111 unsigned int lflags = kmem_flags_convert(flags); ··· 124 } 125 126 void * 127 - kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags) 128 { 129 void *ptr; 130
··· 45 46 47 void * 48 + kmem_alloc(size_t size, gfp_t flags) 49 { 50 int retries = 0; 51 unsigned int lflags = kmem_flags_convert(flags); ··· 67 } 68 69 void * 70 + kmem_zalloc(size_t size, gfp_t flags) 71 { 72 void *ptr; 73 ··· 90 91 void * 92 kmem_realloc(void *ptr, size_t newsize, size_t oldsize, 93 + gfp_t flags) 94 { 95 void *new; 96 ··· 105 } 106 107 void * 108 + kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags) 109 { 110 int retries = 0; 111 unsigned int lflags = kmem_flags_convert(flags); ··· 124 } 125 126 void * 127 + kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags) 128 { 129 void *ptr; 130
+6 -7
fs/xfs/linux-2.6/kmem.h
··· 81 *(NSTATEP) = *(OSTATEP); \ 82 } while (0) 83 84 - static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags) 85 { 86 unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ 87 ··· 125 BUG(); 126 } 127 128 - extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); 129 - extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 130 131 - extern void *kmem_alloc(size_t, unsigned int __nocast); 132 - extern void *kmem_realloc(void *, size_t, size_t, 133 - unsigned int __nocast); 134 - extern void *kmem_zalloc(size_t, unsigned int __nocast); 135 extern void kmem_free(void *, size_t); 136 137 typedef struct shrinker *kmem_shaker_t;
··· 81 *(NSTATEP) = *(OSTATEP); \ 82 } while (0) 83 84 + static __inline unsigned int kmem_flags_convert(gfp_t flags) 85 { 86 unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ 87 ··· 125 BUG(); 126 } 127 128 + extern void *kmem_zone_zalloc(kmem_zone_t *, gfp_t); 129 + extern void *kmem_zone_alloc(kmem_zone_t *, gfp_t); 130 131 + extern void *kmem_alloc(size_t, gfp_t); 132 + extern void *kmem_realloc(void *, size_t, size_t, gfp_t); 133 + extern void *kmem_zalloc(size_t, gfp_t); 134 extern void kmem_free(void *, size_t); 135 136 typedef struct shrinker *kmem_shaker_t;
+2 -2
include/asm-generic/dma-mapping.h
··· 35 36 static inline void * 37 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 38 - unsigned int __nocast flag) 39 { 40 BUG_ON(dev->bus != &pci_bus_type); 41 ··· 168 169 static inline void * 170 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 171 - unsigned int __nocast flag) 172 { 173 BUG(); 174 return NULL;
··· 35 36 static inline void * 37 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 38 + gfp_t flag) 39 { 40 BUG_ON(dev->bus != &pci_bus_type); 41 ··· 168 169 static inline void * 170 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 171 + gfp_t flag) 172 { 173 BUG(); 174 return NULL;
+1 -1
include/asm-i386/dma-mapping.h
··· 11 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 12 13 void *dma_alloc_coherent(struct device *dev, size_t size, 14 - dma_addr_t *dma_handle, unsigned int __nocast flag); 15 16 void dma_free_coherent(struct device *dev, size_t size, 17 void *vaddr, dma_addr_t dma_handle);
··· 11 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 12 13 void *dma_alloc_coherent(struct device *dev, size_t size, 14 + dma_addr_t *dma_handle, gfp_t flag); 15 16 void dma_free_coherent(struct device *dev, size_t size, 17 void *vaddr, dma_addr_t dma_handle);
+1 -1
include/asm-ppc/dma-mapping.h
··· 61 62 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 63 dma_addr_t * dma_handle, 64 - unsigned int __nocast gfp) 65 { 66 #ifdef CONFIG_NOT_COHERENT_CACHE 67 return __dma_alloc_coherent(size, dma_handle, gfp);
··· 61 62 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 63 dma_addr_t * dma_handle, 64 + gfp_t gfp) 65 { 66 #ifdef CONFIG_NOT_COHERENT_CACHE 67 return __dma_alloc_coherent(size, dma_handle, gfp);
+2 -2
include/asm-ppc64/dma-mapping.h
··· 19 extern int dma_supported(struct device *dev, u64 mask); 20 extern int dma_set_mask(struct device *dev, u64 dma_mask); 21 extern void *dma_alloc_coherent(struct device *dev, size_t size, 22 - dma_addr_t *dma_handle, unsigned int __nocast flag); 23 extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 24 dma_addr_t dma_handle); 25 extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, ··· 118 */ 119 struct dma_mapping_ops { 120 void * (*alloc_coherent)(struct device *dev, size_t size, 121 - dma_addr_t *dma_handle, unsigned int __nocast flag); 122 void (*free_coherent)(struct device *dev, size_t size, 123 void *vaddr, dma_addr_t dma_handle); 124 dma_addr_t (*map_single)(struct device *dev, void *ptr,
··· 19 extern int dma_supported(struct device *dev, u64 mask); 20 extern int dma_set_mask(struct device *dev, u64 dma_mask); 21 extern void *dma_alloc_coherent(struct device *dev, size_t size, 22 + dma_addr_t *dma_handle, gfp_t flag); 23 extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 24 dma_addr_t dma_handle); 25 extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, ··· 118 */ 119 struct dma_mapping_ops { 120 void * (*alloc_coherent)(struct device *dev, size_t size, 121 + dma_addr_t *dma_handle, gfp_t flag); 122 void (*free_coherent)(struct device *dev, size_t size, 123 void *vaddr, dma_addr_t dma_handle); 124 dma_addr_t (*map_single)(struct device *dev, void *ptr,
+1 -1
include/asm-ppc64/iommu.h
··· 122 int nelems, enum dma_data_direction direction); 123 124 extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 125 - dma_addr_t *dma_handle, unsigned int __nocast flag); 126 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 127 void *vaddr, dma_addr_t dma_handle); 128 extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
··· 122 int nelems, enum dma_data_direction direction); 123 124 extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 125 + dma_addr_t *dma_handle, gfp_t flag); 126 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 127 void *vaddr, dma_addr_t dma_handle); 128 extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
+1 -1
include/linux/atmdev.h
··· 467 468 int atm_charge(struct atm_vcc *vcc,int truesize); 469 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 470 - unsigned int __nocast gfp_flags); 471 int atm_pcr_goal(struct atm_trafprm *tp); 472 473 void vcc_release_async(struct atm_vcc *vcc, int reply);
··· 467 468 int atm_charge(struct atm_vcc *vcc,int truesize); 469 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 470 + gfp_t gfp_flags); 471 int atm_pcr_goal(struct atm_trafprm *tp); 472 473 void vcc_release_async(struct atm_vcc *vcc, int reply);
+3 -3
include/linux/bio.h
··· 276 extern struct bio_set *bioset_create(int, int, int); 277 extern void bioset_free(struct bio_set *); 278 279 - extern struct bio *bio_alloc(unsigned int __nocast, int); 280 - extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *); 281 extern void bio_put(struct bio *); 282 extern void bio_free(struct bio *, struct bio_set *); 283 ··· 287 extern int bio_hw_segments(struct request_queue *, struct bio *); 288 289 extern void __bio_clone(struct bio *, struct bio *); 290 - extern struct bio *bio_clone(struct bio *, unsigned int __nocast); 291 292 extern void bio_init(struct bio *); 293
··· 276 extern struct bio_set *bioset_create(int, int, int); 277 extern void bioset_free(struct bio_set *); 278 279 + extern struct bio *bio_alloc(gfp_t, int); 280 + extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 281 extern void bio_put(struct bio *); 282 extern void bio_free(struct bio *, struct bio_set *); 283 ··· 287 extern int bio_hw_segments(struct request_queue *, struct bio *); 288 289 extern void __bio_clone(struct bio *, struct bio *); 290 + extern struct bio *bio_clone(struct bio *, gfp_t); 291 292 extern void bio_init(struct bio *); 293
+1 -1
include/linux/buffer_head.h
··· 172 void __bforget(struct buffer_head *); 173 void __breadahead(struct block_device *, sector_t block, int size); 174 struct buffer_head *__bread(struct block_device *, sector_t block, int size); 175 - struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags); 176 void free_buffer_head(struct buffer_head * bh); 177 void FASTCALL(unlock_buffer(struct buffer_head *bh)); 178 void FASTCALL(__lock_buffer(struct buffer_head *bh));
··· 172 void __bforget(struct buffer_head *); 173 void __breadahead(struct block_device *, sector_t block, int size); 174 struct buffer_head *__bread(struct block_device *, sector_t block, int size); 175 + struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 176 void free_buffer_head(struct buffer_head * bh); 177 void FASTCALL(unlock_buffer(struct buffer_head *bh)); 178 void FASTCALL(__lock_buffer(struct buffer_head *bh));
+1 -1
include/linux/connector.h
··· 149 150 int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); 151 void cn_del_callback(struct cb_id *); 152 - int cn_netlink_send(struct cn_msg *, u32, unsigned int __nocast); 153 154 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); 155 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
··· 149 150 int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); 151 void cn_del_callback(struct cb_id *); 152 + int cn_netlink_send(struct cn_msg *, u32, gfp_t); 153 154 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); 155 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+2 -3
include/linux/cpuset.h
··· 23 void cpuset_update_current_mems_allowed(void); 24 void cpuset_restrict_to_mems_allowed(unsigned long *nodes); 25 int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); 26 - extern int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask); 27 extern int cpuset_excl_nodes_overlap(const struct task_struct *p); 28 extern struct file_operations proc_cpuset_operations; 29 extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer); ··· 49 return 1; 50 } 51 52 - static inline int cpuset_zone_allowed(struct zone *z, 53 - unsigned int __nocast gfp_mask) 54 { 55 return 1; 56 }
··· 23 void cpuset_update_current_mems_allowed(void); 24 void cpuset_restrict_to_mems_allowed(unsigned long *nodes); 25 int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); 26 + extern int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask); 27 extern int cpuset_excl_nodes_overlap(const struct task_struct *p); 28 extern struct file_operations proc_cpuset_operations; 29 extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer); ··· 49 return 1; 50 } 51 52 + static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 53 { 54 return 1; 55 }
+1 -1
include/linux/dmapool.h
··· 19 20 void dma_pool_destroy(struct dma_pool *pool); 21 22 - void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags, 23 dma_addr_t *handle); 24 25 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
··· 19 20 void dma_pool_destroy(struct dma_pool *pool); 21 22 + void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 23 dma_addr_t *handle); 24 25 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
+7 -7
include/linux/gfp.h
··· 85 #endif 86 87 extern struct page * 88 - FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *)); 89 90 - static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask, 91 unsigned int order) 92 { 93 if (unlikely(order >= MAX_ORDER)) ··· 98 } 99 100 #ifdef CONFIG_NUMA 101 - extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order); 102 103 static inline struct page * 104 - alloc_pages(unsigned int __nocast gfp_mask, unsigned int order) 105 { 106 if (unlikely(order >= MAX_ORDER)) 107 return NULL; 108 109 return alloc_pages_current(gfp_mask, order); 110 } 111 - extern struct page *alloc_page_vma(unsigned __nocast gfp_mask, 112 struct vm_area_struct *vma, unsigned long addr); 113 #else 114 #define alloc_pages(gfp_mask, order) \ ··· 117 #endif 118 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 119 120 - extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)); 121 - extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask)); 122 123 #define __get_free_page(gfp_mask) \ 124 __get_free_pages((gfp_mask),0)
··· 85 #endif 86 87 extern struct page * 88 + FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *)); 89 90 + static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, 91 unsigned int order) 92 { 93 if (unlikely(order >= MAX_ORDER)) ··· 98 } 99 100 #ifdef CONFIG_NUMA 101 + extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 102 103 static inline struct page * 104 + alloc_pages(gfp_t gfp_mask, unsigned int order) 105 { 106 if (unlikely(order >= MAX_ORDER)) 107 return NULL; 108 109 return alloc_pages_current(gfp_mask, order); 110 } 111 + extern struct page *alloc_page_vma(gfp_t gfp_mask, 112 struct vm_area_struct *vma, unsigned long addr); 113 #else 114 #define alloc_pages(gfp_mask, order) \ ··· 117 #endif 118 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 119 120 + extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order)); 121 + extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask)); 122 123 #define __get_free_page(gfp_mask) \ 124 __get_free_pages((gfp_mask),0)
+1 -1
include/linux/jbd.h
··· 935 */ 936 extern kmem_cache_t *jbd_handle_cache; 937 938 - static inline handle_t *jbd_alloc_handle(unsigned int __nocast gfp_flags) 939 { 940 return kmem_cache_alloc(jbd_handle_cache, gfp_flags); 941 }
··· 935 */ 936 extern kmem_cache_t *jbd_handle_cache; 937 938 + static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) 939 { 940 return kmem_cache_alloc(jbd_handle_cache, gfp_flags); 941 }
+2 -2
include/linux/kfifo.h
··· 35 }; 36 37 extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 38 - unsigned int __nocast gfp_mask, spinlock_t *lock); 39 - extern struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, 40 spinlock_t *lock); 41 extern void kfifo_free(struct kfifo *fifo); 42 extern unsigned int __kfifo_put(struct kfifo *fifo,
··· 35 }; 36 37 extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 38 + gfp_t gfp_mask, spinlock_t *lock); 39 + extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, 40 spinlock_t *lock); 41 extern void kfifo_free(struct kfifo *fifo); 42 extern unsigned int __kfifo_put(struct kfifo *fifo,
+4 -5
include/linux/mempool.h
··· 6 7 #include <linux/wait.h> 8 9 - typedef void * (mempool_alloc_t)(unsigned int __nocast gfp_mask, void *pool_data); 10 typedef void (mempool_free_t)(void *element, void *pool_data); 11 12 typedef struct mempool_s { ··· 26 extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, 27 mempool_free_t *free_fn, void *pool_data, int nid); 28 29 - extern int mempool_resize(mempool_t *pool, int new_min_nr, 30 - unsigned int __nocast gfp_mask); 31 extern void mempool_destroy(mempool_t *pool); 32 - extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask); 33 extern void mempool_free(void *element, mempool_t *pool); 34 35 /* 36 * A mempool_alloc_t and mempool_free_t that get the memory from 37 * a slab that is passed in through pool_data. 38 */ 39 - void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data); 40 void mempool_free_slab(void *element, void *pool_data); 41 42 #endif /* _LINUX_MEMPOOL_H */
··· 6 7 #include <linux/wait.h> 8 9 + typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 10 typedef void (mempool_free_t)(void *element, void *pool_data); 11 12 typedef struct mempool_s { ··· 26 extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, 27 mempool_free_t *free_fn, void *pool_data, int nid); 28 29 + extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); 30 extern void mempool_destroy(mempool_t *pool); 31 + extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); 32 extern void mempool_free(void *element, mempool_t *pool); 33 34 /* 35 * A mempool_alloc_t and mempool_free_t that get the memory from 36 * a slab that is passed in through pool_data. 37 */ 38 + void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 39 void mempool_free_slab(void *element, void *pool_data); 40 41 #endif /* _LINUX_MEMPOOL_H */
+1 -1
include/linux/netlink.h
··· 131 extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 132 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); 133 extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, 134 - __u32 group, unsigned int __nocast allocation); 135 extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); 136 extern int netlink_register_notifier(struct notifier_block *nb); 137 extern int netlink_unregister_notifier(struct notifier_block *nb);
··· 131 extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); 132 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); 133 extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, 134 + __u32 group, gfp_t allocation); 135 extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); 136 extern int netlink_register_notifier(struct notifier_block *nb); 137 extern int netlink_unregister_notifier(struct notifier_block *nb);
+1 -1
include/linux/pagemap.h
··· 19 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 20 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 21 22 - static inline unsigned int __nocast mapping_gfp_mask(struct address_space * mapping) 23 { 24 return mapping->flags & __GFP_BITS_MASK; 25 }
··· 19 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 20 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 21 22 + static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 23 { 24 return mapping->flags & __GFP_BITS_MASK; 25 }
+3 -3
include/linux/posix_acl.h
··· 71 72 /* posix_acl.c */ 73 74 - extern struct posix_acl *posix_acl_alloc(int, unsigned int __nocast); 75 - extern struct posix_acl *posix_acl_clone(const struct posix_acl *, unsigned int __nocast); 76 extern int posix_acl_valid(const struct posix_acl *); 77 extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); 78 - extern struct posix_acl *posix_acl_from_mode(mode_t, unsigned int __nocast); 79 extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *); 80 extern int posix_acl_create_masq(struct posix_acl *, mode_t *); 81 extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
··· 71 72 /* posix_acl.c */ 73 74 + extern struct posix_acl *posix_acl_alloc(int, gfp_t); 75 + extern struct posix_acl *posix_acl_clone(const struct posix_acl *, gfp_t); 76 extern int posix_acl_valid(const struct posix_acl *); 77 extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); 78 + extern struct posix_acl *posix_acl_from_mode(mode_t, gfp_t); 79 extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *); 80 extern int posix_acl_create_masq(struct posix_acl *, mode_t *); 81 extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
+1 -1
include/linux/radix-tree.h
··· 50 unsigned int 51 radix_tree_gang_lookup(struct radix_tree_root *root, void **results, 52 unsigned long first_index, unsigned int max_items); 53 - int radix_tree_preload(unsigned int __nocast gfp_mask); 54 void radix_tree_init(void); 55 void *radix_tree_tag_set(struct radix_tree_root *root, 56 unsigned long index, int tag);
··· 50 unsigned int 51 radix_tree_gang_lookup(struct radix_tree_root *root, void **results, 52 unsigned long first_index, unsigned int max_items); 53 + int radix_tree_preload(gfp_t gfp_mask); 54 void radix_tree_init(void); 55 void *radix_tree_tag_set(struct radix_tree_root *root, 56 unsigned long index, int tag);
+2 -4
include/linux/security.h
··· 2634 return security_ops->socket_getpeersec(sock, optval, optlen, len); 2635 } 2636 2637 - static inline int security_sk_alloc(struct sock *sk, int family, 2638 - unsigned int __nocast priority) 2639 { 2640 return security_ops->sk_alloc_security(sk, family, priority); 2641 } ··· 2751 return -ENOPROTOOPT; 2752 } 2753 2754 - static inline int security_sk_alloc(struct sock *sk, int family, 2755 - unsigned int __nocast priority) 2756 { 2757 return 0; 2758 }
··· 2634 return security_ops->socket_getpeersec(sock, optval, optlen, len); 2635 } 2636 2637 + static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) 2638 { 2639 return security_ops->sk_alloc_security(sk, family, priority); 2640 } ··· 2752 return -ENOPROTOOPT; 2753 } 2754 2755 + static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) 2756 { 2757 return 0; 2758 }
+14 -14
include/linux/skbuff.h
··· 302 303 extern void __kfree_skb(struct sk_buff *skb); 304 extern struct sk_buff *__alloc_skb(unsigned int size, 305 - unsigned int __nocast priority, int fclone); 306 static inline struct sk_buff *alloc_skb(unsigned int size, 307 - unsigned int __nocast priority) 308 { 309 return __alloc_skb(size, priority, 0); 310 } 311 312 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 313 - unsigned int __nocast priority) 314 { 315 return __alloc_skb(size, priority, 1); 316 } 317 318 extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 319 unsigned int size, 320 - unsigned int __nocast priority); 321 extern void kfree_skbmem(struct sk_buff *skb); 322 extern struct sk_buff *skb_clone(struct sk_buff *skb, 323 - unsigned int __nocast priority); 324 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 325 - unsigned int __nocast priority); 326 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 327 - unsigned int __nocast gfp_mask); 328 extern int pskb_expand_head(struct sk_buff *skb, 329 int nhead, int ntail, 330 - unsigned int __nocast gfp_mask); 331 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 332 unsigned int headroom); 333 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 334 int newheadroom, int newtailroom, 335 - unsigned int __nocast priority); 336 extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); 337 #define dev_kfree_skb(a) kfree_skb(a) 338 extern void skb_over_panic(struct sk_buff *skb, int len, ··· 484 * NULL is returned on a memory allocation failure. 485 */ 486 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 487 - unsigned int __nocast pri) 488 { 489 might_sleep_if(pri & __GFP_WAIT); 490 if (skb_shared(skb)) { ··· 516 * %NULL is returned on a memory allocation failure. 517 */ 518 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 519 - unsigned int __nocast pri) 520 { 521 might_sleep_if(pri & __GFP_WAIT); 522 if (skb_cloned(skb)) { ··· 1017 * %NULL is returned in there is no free memory. 1018 */ 1019 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1020 - unsigned int __nocast gfp_mask) 1021 { 1022 struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); 1023 if (likely(skb)) ··· 1130 * If there is no free memory -ENOMEM is returned, otherwise zero 1131 * is returned and the old skb data released. 1132 */ 1133 - extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp); 1134 - static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp) 1135 { 1136 return __skb_linearize(skb, gfp); 1137 }
··· 302 303 extern void __kfree_skb(struct sk_buff *skb); 304 extern struct sk_buff *__alloc_skb(unsigned int size, 305 + gfp_t priority, int fclone); 306 static inline struct sk_buff *alloc_skb(unsigned int size, 307 + gfp_t priority) 308 { 309 return __alloc_skb(size, priority, 0); 310 } 311 312 static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 313 + gfp_t priority) 314 { 315 return __alloc_skb(size, priority, 1); 316 } 317 318 extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 319 unsigned int size, 320 + gfp_t priority); 321 extern void kfree_skbmem(struct sk_buff *skb); 322 extern struct sk_buff *skb_clone(struct sk_buff *skb, 323 + gfp_t priority); 324 extern struct sk_buff *skb_copy(const struct sk_buff *skb, 325 + gfp_t priority); 326 extern struct sk_buff *pskb_copy(struct sk_buff *skb, 327 + gfp_t gfp_mask); 328 extern int pskb_expand_head(struct sk_buff *skb, 329 int nhead, int ntail, 330 + gfp_t gfp_mask); 331 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 332 unsigned int headroom); 333 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 334 int newheadroom, int newtailroom, 335 + gfp_t priority); 336 extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); 337 #define dev_kfree_skb(a) kfree_skb(a) 338 extern void skb_over_panic(struct sk_buff *skb, int len, ··· 484 * NULL is returned on a memory allocation failure. 485 */ 486 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 487 + gfp_t pri) 488 { 489 might_sleep_if(pri & __GFP_WAIT); 490 if (skb_shared(skb)) { ··· 516 * %NULL is returned on a memory allocation failure. 517 */ 518 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 519 + gfp_t pri) 520 { 521 might_sleep_if(pri & __GFP_WAIT); 522 if (skb_cloned(skb)) { ··· 1017 * %NULL is returned in there is no free memory. 1018 */ 1019 static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1020 + gfp_t gfp_mask) 1021 { 1022 struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); 1023 if (likely(skb)) ··· 1130 * If there is no free memory -ENOMEM is returned, otherwise zero 1131 * is returned and the old skb data released. 1132 */ 1133 + extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp); 1134 + static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp) 1135 { 1136 return __skb_linearize(skb, gfp); 1137 }
+9 -10
include/linux/slab.h
··· 61 void (*)(void *, kmem_cache_t *, unsigned long)); 62 extern int kmem_cache_destroy(kmem_cache_t *); 63 extern int kmem_cache_shrink(kmem_cache_t *); 64 - extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); 65 extern void kmem_cache_free(kmem_cache_t *, void *); 66 extern unsigned int kmem_cache_size(kmem_cache_t *); 67 extern const char *kmem_cache_name(kmem_cache_t *); 68 - extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int __nocast gfpflags); 69 70 /* Size description struct for general caches. */ 71 struct cache_sizes { ··· 74 kmem_cache_t *cs_dmacachep; 75 }; 76 extern struct cache_sizes malloc_sizes[]; 77 - extern void *__kmalloc(size_t, unsigned int __nocast); 78 79 - static inline void *kmalloc(size_t size, unsigned int __nocast flags) 80 { 81 if (__builtin_constant_p(size)) { 82 int i = 0; ··· 99 return __kmalloc(size, flags); 100 } 101 102 - extern void *kzalloc(size_t, unsigned int __nocast); 103 104 /** 105 * kcalloc - allocate memory for an array. The memory is set to zero. ··· 107 * @size: element size. 108 * @flags: the type of memory to allocate. 109 */ 110 - static inline void *kcalloc(size_t n, size_t size, unsigned int __nocast flags) 111 { 112 if (n != 0 && size > INT_MAX / n) 113 return NULL; ··· 118 extern unsigned int ksize(const void *); 119 120 #ifdef CONFIG_NUMA 121 - extern void *kmem_cache_alloc_node(kmem_cache_t *, 122 - unsigned int __nocast flags, int node); 123 - extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); 124 #else 125 static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) 126 { 127 return kmem_cache_alloc(cachep, flags); 128 } 129 - static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) 130 { 131 return kmalloc(size, flags); 132 }
··· 61 void (*)(void *, kmem_cache_t *, unsigned long)); 62 extern int kmem_cache_destroy(kmem_cache_t *); 63 extern int kmem_cache_shrink(kmem_cache_t *); 64 + extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); 65 extern void kmem_cache_free(kmem_cache_t *, void *); 66 extern unsigned int kmem_cache_size(kmem_cache_t *); 67 extern const char *kmem_cache_name(kmem_cache_t *); 68 + extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags); 69 70 /* Size description struct for general caches. */ 71 struct cache_sizes { ··· 74 kmem_cache_t *cs_dmacachep; 75 }; 76 extern struct cache_sizes malloc_sizes[]; 77 + extern void *__kmalloc(size_t, gfp_t); 78 79 + static inline void *kmalloc(size_t size, gfp_t flags) 80 { 81 if (__builtin_constant_p(size)) { 82 int i = 0; ··· 99 return __kmalloc(size, flags); 100 } 101 102 + extern void *kzalloc(size_t, gfp_t); 103 104 /** 105 * kcalloc - allocate memory for an array. The memory is set to zero. ··· 107 * @size: element size. 108 * @flags: the type of memory to allocate. 109 */ 110 + static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 111 { 112 if (n != 0 && size > INT_MAX / n) 113 return NULL; ··· 118 extern unsigned int ksize(const void *); 119 120 #ifdef CONFIG_NUMA 121 + extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); 122 + extern void *kmalloc_node(size_t size, gfp_t flags, int node); 123 #else 124 static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) 125 { 126 return kmem_cache_alloc(cachep, flags); 127 } 128 + static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 129 { 130 return kmalloc(size, flags); 131 }
+1 -1
include/linux/string.h
··· 88 extern void * memchr(const void *,int,__kernel_size_t); 89 #endif 90 91 - extern char *kstrdup(const char *s, unsigned int __nocast gfp); 92 93 #ifdef __cplusplus 94 }
··· 88 extern void * memchr(const void *,int,__kernel_size_t); 89 #endif 90 91 + extern char *kstrdup(const char *s, gfp_t gfp); 92 93 #ifdef __cplusplus 94 }
+1 -1
include/linux/swap.h
··· 147 #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) 148 149 /* linux/mm/oom_kill.c */ 150 - extern void out_of_memory(unsigned int __nocast gfp_mask, int order); 151 152 /* linux/mm/memory.c */ 153 extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
··· 147 #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) 148 149 /* linux/mm/oom_kill.c */ 150 + extern void out_of_memory(gfp_t gfp_mask, int order); 151 152 /* linux/mm/memory.c */ 153 extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
+1 -1
include/linux/textsearch.h
··· 159 #define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) 160 161 static inline struct ts_config *alloc_ts_config(size_t payload, 162 - unsigned int __nocast gfp_mask) 163 { 164 struct ts_config *conf; 165
··· 159 #define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) 160 161 static inline struct ts_config *alloc_ts_config(size_t payload, 162 + gfp_t gfp_mask) 163 { 164 struct ts_config *conf; 165
+4
include/linux/types.h
··· 165 typedef __u64 __bitwise __be64; 166 #endif 167 168 struct ustat { 169 __kernel_daddr_t f_tfree; 170 __kernel_ino_t f_tinode;
··· 165 typedef __u64 __bitwise __be64; 166 #endif 167 168 + #ifdef __KERNEL__ 169 + typedef unsigned __nocast gfp_t; 170 + #endif 171 + 172 struct ustat { 173 __kernel_daddr_t f_tfree; 174 __kernel_ino_t f_tinode;
+2 -2
include/linux/vmalloc.h
··· 34 extern void *vmalloc(unsigned long size); 35 extern void *vmalloc_exec(unsigned long size); 36 extern void *vmalloc_32(unsigned long size); 37 - extern void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot); 38 - extern void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot); 39 extern void vfree(void *addr); 40 41 extern void *vmap(struct page **pages, unsigned int count,
··· 34 extern void *vmalloc(unsigned long size); 35 extern void *vmalloc_exec(unsigned long size); 36 extern void *vmalloc_32(unsigned long size); 37 + extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 38 + extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot); 39 extern void vfree(void *addr); 40 41 extern void *vmap(struct page **pages, unsigned int count,
+1 -1
include/net/bluetooth/bluetooth.h
··· 136 }; 137 #define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb)) 138 139 - static inline struct sk_buff *bt_skb_alloc(unsigned int len, unsigned int __nocast how) 140 { 141 struct sk_buff *skb; 142
··· 136 }; 137 #define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb)) 138 139 + static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) 140 { 141 struct sk_buff *skb; 142
+1 -1
include/net/bluetooth/rfcomm.h
··· 230 u8 xon_char, u8 xoff_char, u16 param_mask); 231 232 /* ---- RFCOMM DLCs (channels) ---- */ 233 - struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio); 234 void rfcomm_dlc_free(struct rfcomm_dlc *d); 235 int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel); 236 int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
··· 230 u8 xon_char, u8 xoff_char, u16 param_mask); 231 232 /* ---- RFCOMM DLCs (channels) ---- */ 233 + struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio); 234 void rfcomm_dlc_free(struct rfcomm_dlc *d); 235 int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel); 236 int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
+4 -4
include/net/dn_nsp.h
··· 19 extern void dn_nsp_send_oth_ack(struct sock *sk); 20 extern void dn_nsp_delayed_ack(struct sock *sk); 21 extern void dn_send_conn_ack(struct sock *sk); 22 - extern void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp); 23 extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 24 - unsigned short reason, unsigned int __nocast gfp); 25 extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, 26 unsigned short reason); 27 extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); ··· 29 30 extern void dn_nsp_output(struct sock *sk); 31 extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); 32 - extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, unsigned int __nocast gfp, int oob); 33 extern unsigned long dn_nsp_persist(struct sock *sk); 34 extern int dn_nsp_xmit_timeout(struct sock *sk); 35 36 extern int dn_nsp_rx(struct sk_buff *); 37 extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 38 39 - extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri); 40 extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); 41 42 #define NSP_REASON_OK 0 /* No error */
··· 19 extern void dn_nsp_send_oth_ack(struct sock *sk); 20 extern void dn_nsp_delayed_ack(struct sock *sk); 21 extern void dn_send_conn_ack(struct sock *sk); 22 + extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp); 23 extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 24 + unsigned short reason, gfp_t gfp); 25 extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, 26 unsigned short reason); 27 extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); ··· 29 30 extern void dn_nsp_output(struct sock *sk); 31 extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); 32 + extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob); 33 extern unsigned long dn_nsp_persist(struct sock *sk); 34 extern int dn_nsp_xmit_timeout(struct sock *sk); 35 36 extern int dn_nsp_rx(struct sk_buff *); 37 extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 38 39 + extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); 40 extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); 41 42 #define NSP_REASON_OK 0 /* No error */
+1 -1
include/net/dn_route.h
··· 15 GNU General Public License for more details. 16 *******************************************************************************/ 17 18 - extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri); 19 extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); 20 extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); 21 extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
··· 15 GNU General Public License for more details. 16 *******************************************************************************/ 17 18 + extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); 19 extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); 20 extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); 21 extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+1 -1
include/net/inet_connection_sock.h
··· 94 95 extern struct sock *inet_csk_clone(struct sock *sk, 96 const struct request_sock *req, 97 - const unsigned int __nocast priority); 98 99 enum inet_csk_ack_state_t { 100 ICSK_ACK_SCHED = 1,
··· 94 95 extern struct sock *inet_csk_clone(struct sock *sk, 96 const struct request_sock *req, 97 + const gfp_t priority); 98 99 enum inet_csk_ack_state_t { 100 ICSK_ACK_SCHED = 1,
+1 -1
include/net/ip_vs.h
··· 832 833 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); 834 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); 835 - extern int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri, 836 char *o_buf, int o_len, char *n_buf, int n_len); 837 extern int ip_vs_app_init(void); 838 extern void ip_vs_app_cleanup(void);
··· 832 833 extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); 834 extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); 835 + extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, 836 char *o_buf, int o_len, char *n_buf, int n_len); 837 extern int ip_vs_app_init(void); 838 extern void ip_vs_app_cleanup(void);
+1 -1
include/net/llc_conn.h
··· 93 return skb->cb[sizeof(skb->cb) - 1]; 94 } 95 96 - extern struct sock *llc_sk_alloc(int family, unsigned int __nocast priority, 97 struct proto *prot); 98 extern void llc_sk_free(struct sock *sk); 99
··· 93 return skb->cb[sizeof(skb->cb) - 1]; 94 } 95 96 + extern struct sock *llc_sk_alloc(int family, gfp_t priority, 97 struct proto *prot); 98 extern void llc_sk_free(struct sock *sk); 99
+1 -1
include/net/sctp/sctp.h
··· 125 */ 126 extern struct sock *sctp_get_ctl_sock(void); 127 extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, 128 - sctp_scope_t, unsigned int __nocast gfp, 129 int flags); 130 extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family); 131 extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
··· 125 */ 126 extern struct sock *sctp_get_ctl_sock(void); 127 extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, 128 + sctp_scope_t, gfp_t gfp, 129 int flags); 130 extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family); 131 extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
+5 -5
include/net/sctp/sm.h
··· 181 int sctp_chunk_iif(const struct sctp_chunk *); 182 struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *, 183 struct sctp_chunk *, 184 - unsigned int __nocast gfp); 185 __u32 sctp_generate_verification_tag(void); 186 void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag); 187 188 /* Prototypes for chunk-building functions. */ 189 struct sctp_chunk *sctp_make_init(const struct sctp_association *, 190 const struct sctp_bind_addr *, 191 - unsigned int __nocast gfp, int vparam_len); 192 struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *, 193 const struct sctp_chunk *, 194 - const unsigned int __nocast gfp, 195 const int unkparam_len); 196 struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *, 197 const struct sctp_chunk *); ··· 265 struct sctp_endpoint *, 266 struct sctp_association *asoc, 267 void *event_arg, 268 - unsigned int __nocast gfp); 269 270 /* 2nd level prototypes */ 271 void sctp_generate_t3_rtx_event(unsigned long peer); ··· 276 struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *, 277 const struct sctp_association *, 278 struct sctp_chunk *, 279 - unsigned int __nocast gfp, int *err, 280 struct sctp_chunk **err_chk_p); 281 int sctp_addip_addr_config(struct sctp_association *, sctp_param_t, 282 struct sockaddr_storage*, int);
··· 181 int sctp_chunk_iif(const struct sctp_chunk *); 182 struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *, 183 struct sctp_chunk *, 184 + gfp_t gfp); 185 __u32 sctp_generate_verification_tag(void); 186 void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag); 187 188 /* Prototypes for chunk-building functions. */ 189 struct sctp_chunk *sctp_make_init(const struct sctp_association *, 190 const struct sctp_bind_addr *, 191 + gfp_t gfp, int vparam_len); 192 struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *, 193 const struct sctp_chunk *, 194 + const gfp_t gfp, 195 const int unkparam_len); 196 struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *, 197 const struct sctp_chunk *); ··· 265 struct sctp_endpoint *, 266 struct sctp_association *asoc, 267 void *event_arg, 268 + gfp_t gfp); 269 270 /* 2nd level prototypes */ 271 void sctp_generate_t3_rtx_event(unsigned long peer); ··· 276 struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *, 277 const struct sctp_association *, 278 struct sctp_chunk *, 279 + gfp_t gfp, int *err, 280 struct sctp_chunk **err_chk_p); 281 int sctp_addip_addr_config(struct sctp_association *, sctp_param_t, 282 struct sockaddr_storage*, int);
+12 -12
include/net/sctp/structs.h
··· 446 }; 447 448 struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, 449 - unsigned int __nocast gfp); 450 void sctp_ssnmap_free(struct sctp_ssnmap *map); 451 void sctp_ssnmap_clear(struct sctp_ssnmap *map); 452 ··· 947 }; 948 949 struct sctp_transport *sctp_transport_new(const union sctp_addr *, 950 - unsigned int __nocast); 951 void sctp_transport_set_owner(struct sctp_transport *, 952 struct sctp_association *); 953 void sctp_transport_route(struct sctp_transport *, union sctp_addr *, ··· 1095 void sctp_bind_addr_free(struct sctp_bind_addr *); 1096 int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 1097 const struct sctp_bind_addr *src, 1098 - sctp_scope_t scope, unsigned int __nocast gfp, 1099 int flags); 1100 int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1101 - unsigned int __nocast gfp); 1102 int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); 1103 int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1104 struct sctp_sock *); ··· 1108 struct sctp_sock *opt); 1109 union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, 1110 int *addrs_len, 1111 - unsigned int __nocast gfp); 1112 int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, 1113 - __u16 port, unsigned int __nocast gfp); 1114 1115 sctp_scope_t sctp_scope(const union sctp_addr *); 1116 int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope); ··· 1239 } 1240 1241 /* These are function signatures for manipulating endpoints. */ 1242 - struct sctp_endpoint *sctp_endpoint_new(struct sock *, unsigned int __nocast); 1243 void sctp_endpoint_free(struct sctp_endpoint *); 1244 void sctp_endpoint_put(struct sctp_endpoint *); 1245 void sctp_endpoint_hold(struct sctp_endpoint *); ··· 1260 struct sctp_chunk **err_chunk); 1261 int sctp_process_init(struct sctp_association *, sctp_cid_t cid, 1262 const union sctp_addr *peer, 1263 - sctp_init_chunk_t *init, unsigned int __nocast gfp); 1264 __u32 sctp_generate_tag(const struct sctp_endpoint *); 1265 __u32 sctp_generate_tsn(const struct sctp_endpoint *); 1266 ··· 1723 1724 struct sctp_association * 1725 sctp_association_new(const struct sctp_endpoint *, const struct sock *, 1726 - sctp_scope_t scope, unsigned int __nocast gfp); 1727 void sctp_association_free(struct sctp_association *); 1728 void sctp_association_put(struct sctp_association *); 1729 void sctp_association_hold(struct sctp_association *); ··· 1739 const union sctp_addr *laddr); 1740 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *, 1741 const union sctp_addr *address, 1742 - const unsigned int __nocast gfp, 1743 const int peer_state); 1744 void sctp_assoc_del_peer(struct sctp_association *asoc, 1745 const union sctp_addr *addr); ··· 1764 void sctp_assoc_set_primary(struct sctp_association *, 1765 struct sctp_transport *); 1766 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, 1767 - unsigned int __nocast); 1768 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, 1769 struct sctp_cookie*, 1770 - unsigned int __nocast gfp); 1771 1772 int sctp_cmp_addr_exact(const union sctp_addr *ss1, 1773 const union sctp_addr *ss2);
··· 446 }; 447 448 struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, 449 + gfp_t gfp); 450 void sctp_ssnmap_free(struct sctp_ssnmap *map); 451 void sctp_ssnmap_clear(struct sctp_ssnmap *map); 452 ··· 947 }; 948 949 struct sctp_transport *sctp_transport_new(const union sctp_addr *, 950 + gfp_t); 951 void sctp_transport_set_owner(struct sctp_transport *, 952 struct sctp_association *); 953 void sctp_transport_route(struct sctp_transport *, union sctp_addr *, ··· 1095 void sctp_bind_addr_free(struct sctp_bind_addr *); 1096 int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 1097 const struct sctp_bind_addr *src, 1098 + sctp_scope_t scope, gfp_t gfp, 1099 int flags); 1100 int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, 1101 + gfp_t gfp); 1102 int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); 1103 int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, 1104 struct sctp_sock *); ··· 1108 struct sctp_sock *opt); 1109 union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, 1110 int *addrs_len, 1111 + gfp_t gfp); 1112 int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, 1113 + __u16 port, gfp_t gfp); 1114 1115 sctp_scope_t sctp_scope(const union sctp_addr *); 1116 int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope); ··· 1239 } 1240 1241 /* These are function signatures for manipulating endpoints. */ 1242 + struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t); 1243 void sctp_endpoint_free(struct sctp_endpoint *); 1244 void sctp_endpoint_put(struct sctp_endpoint *); 1245 void sctp_endpoint_hold(struct sctp_endpoint *); ··· 1260 struct sctp_chunk **err_chunk); 1261 int sctp_process_init(struct sctp_association *, sctp_cid_t cid, 1262 const union sctp_addr *peer, 1263 + sctp_init_chunk_t *init, gfp_t gfp); 1264 __u32 sctp_generate_tag(const struct sctp_endpoint *); 1265 __u32 sctp_generate_tsn(const struct sctp_endpoint *); 1266 ··· 1723 1724 struct sctp_association * 1725 sctp_association_new(const struct sctp_endpoint *, const struct sock *, 1726 + sctp_scope_t scope, gfp_t gfp); 1727 void sctp_association_free(struct sctp_association *); 1728 void sctp_association_put(struct sctp_association *); 1729 void sctp_association_hold(struct sctp_association *); ··· 1739 const union sctp_addr *laddr); 1740 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *, 1741 const union sctp_addr *address, 1742 + const gfp_t gfp, 1743 const int peer_state); 1744 void sctp_assoc_del_peer(struct sctp_association *asoc, 1745 const union sctp_addr *addr); ··· 1764 void sctp_assoc_set_primary(struct sctp_association *, 1765 struct sctp_transport *); 1766 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, 1767 + gfp_t); 1768 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, 1769 struct sctp_cookie*, 1770 + gfp_t gfp); 1771 1772 int sctp_cmp_addr_exact(const union sctp_addr *ss1, 1773 const union sctp_addr *ss2);
+8 -8
include/net/sctp/ulpevent.h
··· 88 __u16 error, 89 __u16 outbound, 90 __u16 inbound, 91 - unsigned int __nocast gfp); 92 93 struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( 94 const struct sctp_association *asoc, ··· 96 int flags, 97 int state, 98 int error, 99 - unsigned int __nocast gfp); 100 101 struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 102 const struct sctp_association *asoc, 103 struct sctp_chunk *chunk, 104 __u16 flags, 105 - unsigned int __nocast gfp); 106 struct sctp_ulpevent *sctp_ulpevent_make_send_failed( 107 const struct sctp_association *asoc, 108 struct sctp_chunk *chunk, 109 __u16 flags, 110 __u32 error, 111 - unsigned int __nocast gfp); 112 113 struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( 114 const struct sctp_association *asoc, 115 __u16 flags, 116 - unsigned int __nocast gfp); 117 118 struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 119 const struct sctp_association *asoc, 120 - __u32 indication, unsigned int __nocast gfp); 121 122 struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( 123 - const struct sctp_association *asoc, unsigned int __nocast gfp); 124 125 struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, 126 struct sctp_chunk *chunk, 127 - unsigned int __nocast gfp); 128 129 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 130 struct msghdr *);
··· 88 __u16 error, 89 __u16 outbound, 90 __u16 inbound, 91 + gfp_t gfp); 92 93 struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( 94 const struct sctp_association *asoc, ··· 96 int flags, 97 int state, 98 int error, 99 + gfp_t gfp); 100 101 struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 102 const struct sctp_association *asoc, 103 struct sctp_chunk *chunk, 104 __u16 flags, 105 + gfp_t gfp); 106 struct sctp_ulpevent *sctp_ulpevent_make_send_failed( 107 const struct sctp_association *asoc, 108 struct sctp_chunk *chunk, 109 __u16 flags, 110 __u32 error, 111 + gfp_t gfp); 112 113 struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( 114 const struct sctp_association *asoc, 115 __u16 flags, 116 + gfp_t gfp); 117 118 struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 119 const struct sctp_association *asoc, 120 + __u32 indication, gfp_t gfp); 121 122 struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( 123 + const struct sctp_association *asoc, gfp_t gfp); 124 125 struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, 126 struct sctp_chunk *chunk, 127 + gfp_t gfp); 128 129 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 130 struct msghdr *);
+4 -7
include/net/sctp/ulpqueue.h
··· 62 void sctp_ulpq_free(struct sctp_ulpq *); 63 64 /* Add a new DATA chunk for processing. */ 65 - int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, 66 - unsigned int __nocast); 67 68 /* Add a new event for propagation to the ULP. */ 69 int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev); 70 71 /* Renege previously received chunks. */ 72 - void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, 73 - unsigned int __nocast); 74 75 /* Perform partial delivery. */ 76 - void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, 77 - unsigned int __nocast); 78 79 /* Abort the partial delivery. */ 80 - void sctp_ulpq_abort_pd(struct sctp_ulpq *, unsigned int __nocast); 81 82 /* Clear the partial data delivery condition on this socket. */ 83 int sctp_clear_pd(struct sock *sk);
··· 62 void sctp_ulpq_free(struct sctp_ulpq *); 63 64 /* Add a new DATA chunk for processing. */ 65 + int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); 66 67 /* Add a new event for propagation to the ULP. */ 68 int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev); 69 70 /* Renege previously received chunks. */ 71 + void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); 72 73 /* Perform partial delivery. */ 74 + void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); 75 76 /* Abort the partial delivery. */ 77 + void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t); 78 79 /* Clear the partial data delivery condition on this socket. */ 80 int sctp_clear_pd(struct sock *sk);
+8 -8
include/net/sock.h
··· 739 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 740 741 extern struct sock *sk_alloc(int family, 742 - unsigned int __nocast priority, 743 struct proto *prot, int zero_it); 744 extern void sk_free(struct sock *sk); 745 extern struct sock *sk_clone(const struct sock *sk, 746 - const unsigned int __nocast priority); 747 748 extern struct sk_buff *sock_wmalloc(struct sock *sk, 749 unsigned long size, int force, 750 - unsigned int __nocast priority); 751 extern struct sk_buff *sock_rmalloc(struct sock *sk, 752 unsigned long size, int force, 753 - unsigned int __nocast priority); 754 extern void sock_wfree(struct sk_buff *skb); 755 extern void sock_rfree(struct sk_buff *skb); 756 ··· 766 int noblock, 767 int *errcode); 768 extern void *sock_kmalloc(struct sock *sk, int size, 769 - unsigned int __nocast priority); 770 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 771 extern void sk_send_sigurg(struct sock *sk); 772 ··· 1201 1202 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, 1203 int size, int mem, 1204 - unsigned int __nocast gfp) 1205 { 1206 struct sk_buff *skb; 1207 int hdr_len; ··· 1224 1225 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, 1226 int size, 1227 - unsigned int __nocast gfp) 1228 { 1229 return sk_stream_alloc_pskb(sk, size, 0, gfp); 1230 } ··· 1255 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); 1256 } 1257 1258 - static inline unsigned int __nocast gfp_any(void) 1259 { 1260 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1261 }
··· 739 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 740 741 extern struct sock *sk_alloc(int family, 742 + gfp_t priority, 743 struct proto *prot, int zero_it); 744 extern void sk_free(struct sock *sk); 745 extern struct sock *sk_clone(const struct sock *sk, 746 + const gfp_t priority); 747 748 extern struct sk_buff *sock_wmalloc(struct sock *sk, 749 unsigned long size, int force, 750 + gfp_t priority); 751 extern struct sk_buff *sock_rmalloc(struct sock *sk, 752 unsigned long size, int force, 753 + gfp_t priority); 754 extern void sock_wfree(struct sk_buff *skb); 755 extern void sock_rfree(struct sk_buff *skb); 756 ··· 766 int noblock, 767 int *errcode); 768 extern void *sock_kmalloc(struct sock *sk, int size, 769 + gfp_t priority); 770 extern void sock_kfree_s(struct sock *sk, void *mem, int size); 771 extern void sk_send_sigurg(struct sock *sk); 772 ··· 1201 1202 static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, 1203 int size, int mem, 1204 + gfp_t gfp) 1205 { 1206 struct sk_buff *skb; 1207 int hdr_len; ··· 1224 1225 static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, 1226 int size, 1227 + gfp_t gfp) 1228 { 1229 return sk_stream_alloc_pskb(sk, size, 0, gfp); 1230 } ··· 1255 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); 1256 } 1257 1258 + static inline gfp_t gfp_any(void) 1259 { 1260 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 1261 }
+1 -2
include/net/tcp.h
··· 460 extern void tcp_send_partial(struct sock *); 461 extern int tcp_write_wakeup(struct sock *); 462 extern void tcp_send_fin(struct sock *sk); 463 - extern void tcp_send_active_reset(struct sock *sk, 464 - unsigned int __nocast priority); 465 extern int tcp_send_synack(struct sock *); 466 extern void tcp_push_one(struct sock *, unsigned int mss_now); 467 extern void tcp_send_ack(struct sock *sk);
··· 460 extern void tcp_send_partial(struct sock *); 461 extern int tcp_write_wakeup(struct sock *); 462 extern void tcp_send_fin(struct sock *sk); 463 + extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); 464 extern int tcp_send_synack(struct sock *); 465 extern void tcp_push_one(struct sock *, unsigned int mss_now); 466 extern void tcp_send_ack(struct sock *sk);
+1 -1
include/net/xfrm.h
··· 875 } 876 #endif 877 878 - struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp); 879 extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); 880 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 881 struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
··· 875 } 876 #endif 877 878 + struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp); 879 extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); 880 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 881 struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
+1 -1
include/rdma/ib_mad.h
··· 596 u32 remote_qpn, u16 pkey_index, 597 struct ib_ah *ah, int rmpp_active, 598 int hdr_len, int data_len, 599 - unsigned int __nocast gfp_mask); 600 601 /** 602 * ib_free_send_mad - Returns data buffers used to send a MAD.
··· 596 u32 remote_qpn, u16 pkey_index, 597 struct ib_ah *ah, int rmpp_active, 598 int hdr_len, int data_len, 599 + gfp_t gfp_mask); 600 601 /** 602 * ib_free_send_mad - Returns data buffers used to send a MAD.
+5 -5
include/rdma/ib_sa.h
··· 285 int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 286 struct ib_sa_path_rec *rec, 287 ib_sa_comp_mask comp_mask, 288 - int timeout_ms, unsigned int __nocast gfp_mask, 289 void (*callback)(int status, 290 struct ib_sa_path_rec *resp, 291 void *context), ··· 296 u8 method, 297 struct ib_sa_mcmember_rec *rec, 298 ib_sa_comp_mask comp_mask, 299 - int timeout_ms, unsigned int __nocast gfp_mask, 300 void (*callback)(int status, 301 struct ib_sa_mcmember_rec *resp, 302 void *context), ··· 307 u8 method, 308 struct ib_sa_service_rec *rec, 309 ib_sa_comp_mask comp_mask, 310 - int timeout_ms, unsigned int __nocast gfp_mask, 311 void (*callback)(int status, 312 struct ib_sa_service_rec *resp, 313 void *context), ··· 342 ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, 343 struct ib_sa_mcmember_rec *rec, 344 ib_sa_comp_mask comp_mask, 345 - int timeout_ms, unsigned int __nocast gfp_mask, 346 void (*callback)(int status, 347 struct ib_sa_mcmember_rec *resp, 348 void *context), ··· 384 ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, 385 struct ib_sa_mcmember_rec *rec, 386 ib_sa_comp_mask comp_mask, 387 - int timeout_ms, unsigned int __nocast gfp_mask, 388 void (*callback)(int status, 389 struct ib_sa_mcmember_rec *resp, 390 void *context),
··· 285 int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 286 struct ib_sa_path_rec *rec, 287 ib_sa_comp_mask comp_mask, 288 + int timeout_ms, gfp_t gfp_mask, 289 void (*callback)(int status, 290 struct ib_sa_path_rec *resp, 291 void *context), ··· 296 u8 method, 297 struct ib_sa_mcmember_rec *rec, 298 ib_sa_comp_mask comp_mask, 299 + int timeout_ms, gfp_t gfp_mask, 300 void (*callback)(int status, 301 struct ib_sa_mcmember_rec *resp, 302 void *context), ··· 307 u8 method, 308 struct ib_sa_service_rec *rec, 309 ib_sa_comp_mask comp_mask, 310 + int timeout_ms, gfp_t gfp_mask, 311 void (*callback)(int status, 312 struct ib_sa_service_rec *resp, 313 void *context), ··· 342 ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, 343 struct ib_sa_mcmember_rec *rec, 344 ib_sa_comp_mask comp_mask, 345 + int timeout_ms, gfp_t gfp_mask, 346 void (*callback)(int status, 347 struct ib_sa_mcmember_rec *resp, 348 void *context), ··· 384 ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, 385 struct ib_sa_mcmember_rec *rec, 386 ib_sa_comp_mask comp_mask, 387 + int timeout_ms, gfp_t gfp_mask, 388 void (*callback)(int status, 389 struct ib_sa_mcmember_rec *resp, 390 void *context),
+1 -1
include/rxrpc/call.h
··· 203 size_t sioc, 204 struct kvec *siov, 205 uint8_t rxhdr_flags, 206 - unsigned int __nocast alloc_flags, 207 int dup_data, 208 size_t *size_sent); 209
··· 203 size_t sioc, 204 struct kvec *siov, 205 uint8_t rxhdr_flags, 206 + gfp_t alloc_flags, 207 int dup_data, 208 size_t *size_sent); 209
+1 -1
include/rxrpc/message.h
··· 63 uint8_t type, 64 int count, 65 struct kvec *diov, 66 - unsigned int __nocast alloc_flags, 67 struct rxrpc_message **_msg); 68 69 extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
··· 63 uint8_t type, 64 int count, 65 struct kvec *diov, 66 + gfp_t alloc_flags, 67 struct rxrpc_message **_msg); 68 69 extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
+4 -4
include/sound/core.h
··· 290 void snd_memory_done(void); 291 int snd_memory_info_init(void); 292 int snd_memory_info_done(void); 293 - void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags); 294 - void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags); 295 - void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags); 296 void snd_hidden_kfree(const void *obj); 297 void *snd_hidden_vmalloc(unsigned long size); 298 void snd_hidden_vfree(void *obj); 299 - char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags); 300 #define kmalloc(size, flags) snd_hidden_kmalloc(size, flags) 301 #define kzalloc(size, flags) snd_hidden_kzalloc(size, flags) 302 #define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags)
··· 290 void snd_memory_done(void); 291 int snd_memory_info_init(void); 292 int snd_memory_info_done(void); 293 + void *snd_hidden_kmalloc(size_t size, gfp_t flags); 294 + void *snd_hidden_kzalloc(size_t size, gfp_t flags); 295 + void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags); 296 void snd_hidden_kfree(const void *obj); 297 void *snd_hidden_vmalloc(unsigned long size); 298 void snd_hidden_vfree(void *obj); 299 + char *snd_hidden_kstrdup(const char *s, gfp_t flags); 300 #define kmalloc(size, flags) snd_hidden_kmalloc(size, flags) 301 #define kzalloc(size, flags) snd_hidden_kzalloc(size, flags) 302 #define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags)
+1 -1
include/sound/driver.h
··· 51 #ifdef CONFIG_SND_DEBUG_MEMORY 52 #include <linux/slab.h> 53 #include <linux/vmalloc.h> 54 - void *snd_wrapper_kmalloc(size_t, unsigned int __nocast); 55 #undef kmalloc 56 void snd_wrapper_kfree(const void *); 57 #undef kfree
··· 51 #ifdef CONFIG_SND_DEBUG_MEMORY 52 #include <linux/slab.h> 53 #include <linux/vmalloc.h> 54 + void *snd_wrapper_kmalloc(size_t, gfp_t); 55 #undef kmalloc 56 void snd_wrapper_kfree(const void *); 57 #undef kfree
+1 -1
kernel/audit.c
··· 560 } 561 562 static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, 563 - unsigned int __nocast gfp_mask, int type) 564 { 565 unsigned long flags; 566 struct audit_buffer *ab = NULL;
··· 560 } 561 562 static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, 563 + gfp_t gfp_mask, int type) 564 { 565 unsigned long flags; 566 struct audit_buffer *ab = NULL;
+1 -1
kernel/cpuset.c
··· 1670 * GFP_USER - only nodes in current tasks mems allowed ok. 1671 **/ 1672 1673 - int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask) 1674 { 1675 int node; /* node that zone z is on */ 1676 const struct cpuset *cs; /* current cpuset ancestors */
··· 1670 * GFP_USER - only nodes in current tasks mems allowed ok. 1671 **/ 1672 1673 + int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 1674 { 1675 int node; /* node that zone z is on */ 1676 const struct cpuset *cs; /* current cpuset ancestors */
+2 -2
kernel/kfifo.c
··· 36 * struct kfifo with kfree(). 37 */ 38 struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 39 - unsigned int __nocast gfp_mask, spinlock_t *lock) 40 { 41 struct kfifo *fifo; 42 ··· 64 * 65 * The size will be rounded-up to a power of 2. 66 */ 67 - struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, spinlock_t *lock) 68 { 69 unsigned char *buffer; 70 struct kfifo *ret;
··· 36 * struct kfifo with kfree(). 37 */ 38 struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 39 + gfp_t gfp_mask, spinlock_t *lock) 40 { 41 struct kfifo *fifo; 42 ··· 64 * 65 * The size will be rounded-up to a power of 2. 66 */ 67 + struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) 68 { 69 unsigned char *buffer; 70 struct kfifo *ret;
+1 -1
kernel/signal.c
··· 262 return sig; 263 } 264 265 - static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags, 266 int override_rlimit) 267 { 268 struct sigqueue *q = NULL;
··· 262 return sig; 263 } 264 265 + static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 266 int override_rlimit) 267 { 268 struct sigqueue *q = NULL;
+1 -1
lib/radix-tree.c
··· 110 * success, return zero, with preemption disabled. On error, return -ENOMEM 111 * with preemption not disabled. 112 */ 113 - int radix_tree_preload(unsigned int __nocast gfp_mask) 114 { 115 struct radix_tree_preload *rtp; 116 struct radix_tree_node *node;
··· 110 * success, return zero, with preemption disabled. On error, return -ENOMEM 111 * with preemption not disabled. 112 */ 113 + int radix_tree_preload(gfp_t gfp_mask) 114 { 115 struct radix_tree_preload *rtp; 116 struct radix_tree_node *node;
+1 -1
lib/ts_bm.c
··· 127 } 128 129 static struct ts_config *bm_init(const void *pattern, unsigned int len, 130 - unsigned int __nocast gfp_mask) 131 { 132 struct ts_config *conf; 133 struct ts_bm *bm;
··· 127 } 128 129 static struct ts_config *bm_init(const void *pattern, unsigned int len, 130 + gfp_t gfp_mask) 131 { 132 struct ts_config *conf; 133 struct ts_bm *bm;
+1 -1
lib/ts_fsm.c
··· 258 } 259 260 static struct ts_config *fsm_init(const void *pattern, unsigned int len, 261 - unsigned int __nocast gfp_mask) 262 { 263 int i, err = -EINVAL; 264 struct ts_config *conf;
··· 258 } 259 260 static struct ts_config *fsm_init(const void *pattern, unsigned int len, 261 + gfp_t gfp_mask) 262 { 263 int i, err = -EINVAL; 264 struct ts_config *conf;
+1 -1
lib/ts_kmp.c
··· 87 } 88 89 static struct ts_config *kmp_init(const void *pattern, unsigned int len, 90 - unsigned int __nocast gfp_mask) 91 { 92 struct ts_config *conf; 93 struct ts_kmp *kmp;
··· 87 } 88 89 static struct ts_config *kmp_init(const void *pattern, unsigned int len, 90 + gfp_t gfp_mask) 91 { 92 struct ts_config *conf; 93 struct ts_kmp *kmp;
+1 -1
mm/highmem.c
··· 30 31 static mempool_t *page_pool, *isa_page_pool; 32 33 - static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data) 34 { 35 unsigned int gfp = gfp_mask | (unsigned int) (long) data; 36
··· 30 31 static mempool_t *page_pool, *isa_page_pool; 32 33 + static void *page_pool_alloc(gfp_t gfp_mask, void *data) 34 { 35 unsigned int gfp = gfp_mask | (unsigned int) (long) data; 36
+4 -4
mm/mempolicy.c
··· 687 } 688 689 /* Return a zonelist representing a mempolicy */ 690 - static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy) 691 { 692 int nd; 693 ··· 751 752 /* Allocate a page in interleaved policy. 753 Own path because it needs to do special accounting. */ 754 - static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid) 755 { 756 struct zonelist *zl; 757 struct page *page; ··· 789 * Should be called with the mm_sem of the vma hold. 790 */ 791 struct page * 792 - alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr) 793 { 794 struct mempolicy *pol = get_vma_policy(current, vma, addr); 795 ··· 832 * 1) it's ok to take cpuset_sem (can WAIT), and 833 * 2) allocating for current task (not interrupt). 834 */ 835 - struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order) 836 { 837 struct mempolicy *pol = current->mempolicy; 838
··· 687 } 688 689 /* Return a zonelist representing a mempolicy */ 690 + static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) 691 { 692 int nd; 693 ··· 751 752 /* Allocate a page in interleaved policy. 753 Own path because it needs to do special accounting. */ 754 + static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) 755 { 756 struct zonelist *zl; 757 struct page *page; ··· 789 * Should be called with the mm_sem of the vma hold. 790 */ 791 struct page * 792 + alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) 793 { 794 struct mempolicy *pol = get_vma_policy(current, vma, addr); 795 ··· 832 * 1) it's ok to take cpuset_sem (can WAIT), and 833 * 2) allocating for current task (not interrupt). 834 */ 835 + struct page *alloc_pages_current(gfp_t gfp, unsigned order) 836 { 837 struct mempolicy *pol = current->mempolicy; 838
+3 -3
mm/mempool.c
··· 112 * while this function is running. mempool_alloc() & mempool_free() 113 * might be called (eg. from IRQ contexts) while this function executes. 114 */ 115 - int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask) 116 { 117 void *element; 118 void **new_elements; ··· 200 * *never* fails when called from process contexts. (it might 201 * fail if called from an IRQ context.) 202 */ 203 - void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) 204 { 205 void *element; 206 unsigned long flags; ··· 276 /* 277 * A commonly used alloc and free fn. 278 */ 279 - void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data) 280 { 281 kmem_cache_t *mem = (kmem_cache_t *) pool_data; 282 return kmem_cache_alloc(mem, gfp_mask);
··· 112 * while this function is running. mempool_alloc() & mempool_free() 113 * might be called (eg. from IRQ contexts) while this function executes. 114 */ 115 + int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) 116 { 117 void *element; 118 void **new_elements; ··· 200 * *never* fails when called from process contexts. (it might 201 * fail if called from an IRQ context.) 202 */ 203 + void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) 204 { 205 void *element; 206 unsigned long flags; ··· 276 /* 277 * A commonly used alloc and free fn. 278 */ 279 + void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) 280 { 281 kmem_cache_t *mem = (kmem_cache_t *) pool_data; 282 return kmem_cache_alloc(mem, gfp_mask);
+1 -2
mm/nommu.c
··· 157 kfree(addr); 158 } 159 160 - void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, 161 - pgprot_t prot) 162 { 163 /* 164 * kmalloc doesn't like __GFP_HIGHMEM for some reason
··· 157 kfree(addr); 158 } 159 160 + void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 161 { 162 /* 163 * kmalloc doesn't like __GFP_HIGHMEM for some reason
+1 -1
mm/oom_kill.c
··· 263 * OR try to be smart about which process to kill. Note that we 264 * don't have to be perfect here, we just have to be good. 265 */ 266 - void out_of_memory(unsigned int __nocast gfp_mask, int order) 267 { 268 struct mm_struct *mm = NULL; 269 task_t * p;
··· 263 * OR try to be smart about which process to kill. Note that we 264 * don't have to be perfect here, we just have to be good. 265 */ 266 + void out_of_memory(gfp_t gfp_mask, int order) 267 { 268 struct mm_struct *mm = NULL; 269 task_t * p;
+6 -6
mm/page_alloc.c
··· 671 free_hot_cold_page(page, 1); 672 } 673 674 - static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags) 675 { 676 int i; 677 ··· 686 * or two. 687 */ 688 static struct page * 689 - buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags) 690 { 691 unsigned long flags; 692 struct page *page = NULL; ··· 761 } 762 763 static inline int 764 - should_reclaim_zone(struct zone *z, unsigned int gfp_mask) 765 { 766 if (!z->reclaim_pages) 767 return 0; ··· 774 * This is the 'heart' of the zoned buddy allocator. 775 */ 776 struct page * fastcall 777 - __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, 778 struct zonelist *zonelist) 779 { 780 const int wait = gfp_mask & __GFP_WAIT; ··· 977 /* 978 * Common helper functions. 979 */ 980 - fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order) 981 { 982 struct page * page; 983 page = alloc_pages(gfp_mask, order); ··· 988 989 EXPORT_SYMBOL(__get_free_pages); 990 991 - fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask) 992 { 993 struct page * page; 994
··· 671 free_hot_cold_page(page, 1); 672 } 673 674 + static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 675 { 676 int i; 677 ··· 686 * or two. 687 */ 688 static struct page * 689 + buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) 690 { 691 unsigned long flags; 692 struct page *page = NULL; ··· 761 } 762 763 static inline int 764 + should_reclaim_zone(struct zone *z, gfp_t gfp_mask) 765 { 766 if (!z->reclaim_pages) 767 return 0; ··· 774 * This is the 'heart' of the zoned buddy allocator. 775 */ 776 struct page * fastcall 777 + __alloc_pages(gfp_t gfp_mask, unsigned int order, 778 struct zonelist *zonelist) 779 { 780 const int wait = gfp_mask & __GFP_WAIT; ··· 977 /* 978 * Common helper functions. 979 */ 980 + fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 981 { 982 struct page * page; 983 page = alloc_pages(gfp_mask, order); ··· 988 989 EXPORT_SYMBOL(__get_free_pages); 990 991 + fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 992 { 993 struct page * page; 994
+1 -1
mm/page_io.c
··· 19 #include <linux/writeback.h> 20 #include <asm/pgtable.h> 21 22 - static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index, 23 struct page *page, bio_end_io_t end_io) 24 { 25 struct bio *bio;
··· 19 #include <linux/writeback.h> 20 #include <asm/pgtable.h> 21 22 + static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, 23 struct page *page, bio_end_io_t end_io) 24 { 25 struct bio *bio;
+1 -2
mm/shmem.c
··· 921 } 922 923 static inline struct page * 924 - shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info, 925 - unsigned long idx) 926 { 927 return alloc_page(gfp | __GFP_ZERO); 928 }
··· 921 } 922 923 static inline struct page * 924 + shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 925 { 926 return alloc_page(gfp | __GFP_ZERO); 927 }
+16 -18
mm/slab.c
··· 650 return cachep->array[smp_processor_id()]; 651 } 652 653 - static inline kmem_cache_t *__find_general_cachep(size_t size, 654 - unsigned int __nocast gfpflags) 655 { 656 struct cache_sizes *csizep = malloc_sizes; 657 ··· 674 return csizep->cs_cachep; 675 } 676 677 - kmem_cache_t *kmem_find_general_cachep(size_t size, 678 - unsigned int __nocast gfpflags) 679 { 680 return __find_general_cachep(size, gfpflags); 681 } ··· 1183 * did not request dmaable memory, we might get it, but that 1184 * would be relatively rare and ignorable. 1185 */ 1186 - static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 1187 { 1188 struct page *page; 1189 void *addr; ··· 2046 2047 /* Get the memory for a slab management obj. */ 2048 static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, 2049 - int colour_off, unsigned int __nocast local_flags) 2050 { 2051 struct slab *slabp; 2052 ··· 2147 * Grow (by 1) the number of slabs within a cache. This is called by 2148 * kmem_cache_alloc() when there are no active objs left in a cache. 2149 */ 2150 - static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2151 { 2152 struct slab *slabp; 2153 void *objp; ··· 2354 #define check_slabp(x,y) do { } while(0) 2355 #endif 2356 2357 - static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) 2358 { 2359 int batchcount; 2360 struct kmem_list3 *l3; ··· 2454 } 2455 2456 static inline void 2457 - cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) 2458 { 2459 might_sleep_if(flags & __GFP_WAIT); 2460 #if DEBUG ··· 2465 #if DEBUG 2466 static void * 2467 cache_alloc_debugcheck_after(kmem_cache_t *cachep, 2468 - unsigned int __nocast flags, void *objp, void *caller) 2469 { 2470 if (!objp) 2471 return objp; ··· 2508 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2509 #endif 2510 2511 - static inline void *____cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2512 { 2513 void* objp; 2514 struct array_cache *ac; ··· 2526 return objp; 2527 } 2528 2529 - static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2530 { 2531 unsigned long save_flags; 2532 void* objp; ··· 2785 * Allocate an object from this cache. The flags are only relevant 2786 * if the cache has no available objects. 2787 */ 2788 - void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) 2789 { 2790 return __cache_alloc(cachep, flags); 2791 } ··· 2846 * New and improved: it will now make sure that the object gets 2847 * put on the correct node list so that there is no false sharing. 2848 */ 2849 - void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) 2850 { 2851 unsigned long save_flags; 2852 void *ptr; ··· 2873 } 2874 EXPORT_SYMBOL(kmem_cache_alloc_node); 2875 2876 - void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) 2877 { 2878 kmem_cache_t *cachep; 2879 ··· 2906 * platforms. For example, on i386, it means that the memory must come 2907 * from the first 16MB. 2908 */ 2909 - void *__kmalloc(size_t size, unsigned int __nocast flags) 2910 { 2911 kmem_cache_t *cachep; 2912 ··· 2995 * @size: how many bytes of memory are required. 2996 * @flags: the type of memory to allocate. 2997 */ 2998 - void *kzalloc(size_t size, unsigned int __nocast flags) 2999 { 3000 void *ret = kmalloc(size, flags); 3001 if (ret) ··· 3601 * @s: the string to duplicate 3602 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 3603 */ 3604 - char *kstrdup(const char *s, unsigned int __nocast gfp) 3605 { 3606 size_t len; 3607 char *buf;
··· 650 return cachep->array[smp_processor_id()]; 651 } 652 653 + static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) 654 { 655 struct cache_sizes *csizep = malloc_sizes; 656 ··· 675 return csizep->cs_cachep; 676 } 677 678 + kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) 679 { 680 return __find_general_cachep(size, gfpflags); 681 } ··· 1185 * did not request dmaable memory, we might get it, but that 1186 * would be relatively rare and ignorable. 1187 */ 1188 + static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) 1189 { 1190 struct page *page; 1191 void *addr; ··· 2048 2049 /* Get the memory for a slab management obj. */ 2050 static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, 2051 + int colour_off, gfp_t local_flags) 2052 { 2053 struct slab *slabp; 2054 ··· 2149 * Grow (by 1) the number of slabs within a cache. This is called by 2150 * kmem_cache_alloc() when there are no active objs left in a cache. 2151 */ 2152 + static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2153 { 2154 struct slab *slabp; 2155 void *objp; ··· 2356 #define check_slabp(x,y) do { } while(0) 2357 #endif 2358 2359 + static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) 2360 { 2361 int batchcount; 2362 struct kmem_list3 *l3; ··· 2456 } 2457 2458 static inline void 2459 + cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) 2460 { 2461 might_sleep_if(flags & __GFP_WAIT); 2462 #if DEBUG ··· 2467 #if DEBUG 2468 static void * 2469 cache_alloc_debugcheck_after(kmem_cache_t *cachep, 2470 + gfp_t flags, void *objp, void *caller) 2471 { 2472 if (!objp) 2473 return objp; ··· 2510 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 2511 #endif 2512 2513 + static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2514 { 2515 void* objp; 2516 struct array_cache *ac; ··· 2528 return objp; 2529 } 2530 2531 + static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2532 { 2533 unsigned long save_flags; 2534 void* objp; ··· 2787 * Allocate an object from this cache. The flags are only relevant 2788 * if the cache has no available objects. 2789 */ 2790 + void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) 2791 { 2792 return __cache_alloc(cachep, flags); 2793 } ··· 2848 * New and improved: it will now make sure that the object gets 2849 * put on the correct node list so that there is no false sharing. 2850 */ 2851 + void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) 2852 { 2853 unsigned long save_flags; 2854 void *ptr; ··· 2875 } 2876 EXPORT_SYMBOL(kmem_cache_alloc_node); 2877 2878 + void *kmalloc_node(size_t size, gfp_t flags, int node) 2879 { 2880 kmem_cache_t *cachep; 2881 ··· 2908 * platforms. For example, on i386, it means that the memory must come 2909 * from the first 16MB. 2910 */ 2911 + void *__kmalloc(size_t size, gfp_t flags) 2912 { 2913 kmem_cache_t *cachep; 2914 ··· 2997 * @size: how many bytes of memory are required. 2998 * @flags: the type of memory to allocate. 2999 */ 3000 + void *kzalloc(size_t size, gfp_t flags) 3001 { 3002 void *ret = kmalloc(size, flags); 3003 if (ret) ··· 3603 * @s: the string to duplicate 3604 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 3605 */ 3606 + char *kstrdup(const char *s, gfp_t gfp) 3607 { 3608 size_t len; 3609 char *buf;
+1 -1
mm/swap_state.c
··· 68 * but sets SwapCache flag and private instead of mapping and index. 69 */ 70 static int __add_to_swap_cache(struct page *page, swp_entry_t entry, 71 - unsigned int __nocast gfp_mask) 72 { 73 int error; 74
··· 68 * but sets SwapCache flag and private instead of mapping and index. 69 */ 70 static int __add_to_swap_cache(struct page *page, swp_entry_t entry, 71 + gfp_t gfp_mask) 72 { 73 int error; 74
+2 -2
mm/vmalloc.c
··· 395 396 EXPORT_SYMBOL(vmap); 397 398 - void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) 399 { 400 struct page **pages; 401 unsigned int nr_pages, array_size, i; ··· 446 * allocator with @gfp_mask flags. Map them into contiguous 447 * kernel virtual space, using a pagetable protection of @prot. 448 */ 449 - void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) 450 { 451 struct vm_struct *area; 452
··· 395 396 EXPORT_SYMBOL(vmap); 397 398 + void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 399 { 400 struct page **pages; 401 unsigned int nr_pages, array_size, i; ··· 446 * allocator with @gfp_mask flags. Map them into contiguous 447 * kernel virtual space, using a pagetable protection of @prot. 448 */ 449 + void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 450 { 451 struct vm_struct *area; 452
+1 -1
net/atm/atm_misc.c
··· 25 26 27 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 28 - unsigned int __nocast gfp_flags) 29 { 30 struct sock *sk = sk_atm(vcc); 31 int guess = atm_guess_pdu2truesize(pdu_size);
··· 25 26 27 struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 28 + gfp_t gfp_flags) 29 { 30 struct sock *sk = sk_atm(vcc); 31 int guess = atm_guess_pdu2truesize(pdu_size);
+1 -1
net/bluetooth/l2cap.c
··· 372 .obj_size = sizeof(struct l2cap_pinfo) 373 }; 374 375 - static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) 376 { 377 struct sock *sk; 378
··· 372 .obj_size = sizeof(struct l2cap_pinfo) 373 }; 374 375 + static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio) 376 { 377 struct sock *sk; 378
+1 -1
net/bluetooth/rfcomm/core.c
··· 229 d->rx_credits = RFCOMM_DEFAULT_CREDITS; 230 } 231 232 - struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio) 233 { 234 struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); 235 if (!d)
··· 229 d->rx_credits = RFCOMM_DEFAULT_CREDITS; 230 } 231 232 + struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) 233 { 234 struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); 235 if (!d)
+1 -1
net/bluetooth/rfcomm/sock.c
··· 284 .obj_size = sizeof(struct rfcomm_pinfo) 285 }; 286 287 - static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) 288 { 289 struct rfcomm_dlc *d; 290 struct sock *sk;
··· 284 .obj_size = sizeof(struct rfcomm_pinfo) 285 }; 286 287 + static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, gfp_t prio) 288 { 289 struct rfcomm_dlc *d; 290 struct sock *sk;
+1 -1
net/bluetooth/rfcomm/tty.c
··· 286 skb->destructor = rfcomm_wfree; 287 } 288 289 - static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, unsigned int __nocast priority) 290 { 291 if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { 292 struct sk_buff *skb = alloc_skb(size, priority);
··· 286 skb->destructor = rfcomm_wfree; 287 } 288 289 + static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority) 290 { 291 if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { 292 struct sk_buff *skb = alloc_skb(size, priority);
+1 -1
net/bluetooth/sco.c
··· 418 .obj_size = sizeof(struct sco_pinfo) 419 }; 420 421 - static struct sock *sco_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) 422 { 423 struct sock *sk; 424
··· 418 .obj_size = sizeof(struct sco_pinfo) 419 }; 420 421 + static struct sock *sco_sock_alloc(struct socket *sock, int proto, gfp_t prio) 422 { 423 struct sock *sk; 424
+1 -1
net/core/dev.c
··· 1132 #endif 1133 1134 /* Keep head the same: replace data */ 1135 - int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) 1136 { 1137 unsigned int size; 1138 u8 *data;
··· 1132 #endif 1133 1134 /* Keep head the same: replace data */ 1135 + int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask) 1136 { 1137 unsigned int size; 1138 u8 *data;
+7 -7
net/core/skbuff.c
··· 130 * Buffers may only be allocated from interrupts using a @gfp_mask of 131 * %GFP_ATOMIC. 132 */ 133 - struct sk_buff *__alloc_skb(unsigned int size, unsigned int __nocast gfp_mask, 134 int fclone) 135 { 136 struct sk_buff *skb; ··· 198 */ 199 struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 200 unsigned int size, 201 - unsigned int __nocast gfp_mask) 202 { 203 struct sk_buff *skb; 204 u8 *data; ··· 361 * %GFP_ATOMIC. 362 */ 363 364 - struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) 365 { 366 struct sk_buff *n; 367 ··· 500 * header is going to be modified. Use pskb_copy() instead. 501 */ 502 503 - struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) 504 { 505 int headerlen = skb->data - skb->head; 506 /* ··· 539 * The returned buffer has a reference count of 1. 540 */ 541 542 - struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) 543 { 544 /* 545 * Allocate the copy buffer ··· 598 */ 599 600 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 601 - unsigned int __nocast gfp_mask) 602 { 603 int i; 604 u8 *data; ··· 689 */ 690 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 691 int newheadroom, int newtailroom, 692 - unsigned int __nocast gfp_mask) 693 { 694 /* 695 * Allocate the copy buffer
··· 130 * Buffers may only be allocated from interrupts using a @gfp_mask of 131 * %GFP_ATOMIC. 132 */ 133 + struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 134 int fclone) 135 { 136 struct sk_buff *skb; ··· 198 */ 199 struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 200 unsigned int size, 201 + gfp_t gfp_mask) 202 { 203 struct sk_buff *skb; 204 u8 *data; ··· 361 * %GFP_ATOMIC. 362 */ 363 364 + struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 365 { 366 struct sk_buff *n; 367 ··· 500 * header is going to be modified. Use pskb_copy() instead. 501 */ 502 503 + struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 504 { 505 int headerlen = skb->data - skb->head; 506 /* ··· 539 * The returned buffer has a reference count of 1. 540 */ 541 542 + struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 543 { 544 /* 545 * Allocate the copy buffer ··· 598 */ 599 600 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 601 + gfp_t gfp_mask) 602 { 603 int i; 604 u8 *data; ··· 689 */ 690 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 691 int newheadroom, int newtailroom, 692 + gfp_t gfp_mask) 693 { 694 /* 695 * Allocate the copy buffer
+5 -5
net/core/sock.c
··· 637 * @prot: struct proto associated with this new sock instance 638 * @zero_it: if we should zero the newly allocated sock 639 */ 640 - struct sock *sk_alloc(int family, unsigned int __nocast priority, 641 struct proto *prot, int zero_it) 642 { 643 struct sock *sk = NULL; ··· 704 module_put(owner); 705 } 706 707 - struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority) 708 { 709 struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); 710 ··· 845 * Allocate a skb from the socket's send buffer. 846 */ 847 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 848 - unsigned int __nocast priority) 849 { 850 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 851 struct sk_buff * skb = alloc_skb(size, priority); ··· 861 * Allocate a skb from the socket's receive buffer. 862 */ 863 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 864 - unsigned int __nocast priority) 865 { 866 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 867 struct sk_buff *skb = alloc_skb(size, priority); ··· 876 /* 877 * Allocate a memory block from the socket's option memory buffer. 878 */ 879 - void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) 880 { 881 if ((unsigned)size <= sysctl_optmem_max && 882 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
··· 637 * @prot: struct proto associated with this new sock instance 638 * @zero_it: if we should zero the newly allocated sock 639 */ 640 + struct sock *sk_alloc(int family, gfp_t priority, 641 struct proto *prot, int zero_it) 642 { 643 struct sock *sk = NULL; ··· 704 module_put(owner); 705 } 706 707 + struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 708 { 709 struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); 710 ··· 845 * Allocate a skb from the socket's send buffer. 846 */ 847 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 848 + gfp_t priority) 849 { 850 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 851 struct sk_buff * skb = alloc_skb(size, priority); ··· 861 * Allocate a skb from the socket's receive buffer. 862 */ 863 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 864 + gfp_t priority) 865 { 866 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 867 struct sk_buff *skb = alloc_skb(size, priority); ··· 876 /* 877 * Allocate a memory block from the socket's option memory buffer. 878 */ 879 + void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 880 { 881 if ((unsigned)size <= sysctl_optmem_max && 882 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+1 -1
net/dccp/ackvec.c
··· 91 } 92 93 struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len, 94 - const unsigned int __nocast priority) 95 { 96 struct dccp_ackvec *av = kmalloc(sizeof(*av) + len, priority); 97
··· 91 } 92 93 struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len, 94 + const gfp_t priority) 95 { 96 struct dccp_ackvec *av = kmalloc(sizeof(*av) + len, priority); 97
+2 -2
net/dccp/ackvec.h
··· 74 75 #ifdef CONFIG_IP_DCCP_ACKVEC 76 extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, 77 - const unsigned int __nocast priority); 78 extern void dccp_ackvec_free(struct dccp_ackvec *av); 79 80 extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, ··· 93 } 94 #else /* CONFIG_IP_DCCP_ACKVEC */ 95 static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, 96 - const unsigned int __nocast priority) 97 { 98 return NULL; 99 }
··· 74 75 #ifdef CONFIG_IP_DCCP_ACKVEC 76 extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, 77 + const gfp_t priority); 78 extern void dccp_ackvec_free(struct dccp_ackvec *av); 79 80 extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, ··· 93 } 94 #else /* CONFIG_IP_DCCP_ACKVEC */ 95 static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, 96 + const gfp_t priority) 97 { 98 return NULL; 99 }
+1 -1
net/dccp/ccids/lib/loss_interval.h
··· 36 37 static inline struct dccp_li_hist_entry * 38 dccp_li_hist_entry_new(struct dccp_li_hist *hist, 39 - const unsigned int __nocast prio) 40 { 41 return kmem_cache_alloc(hist->dccplih_slab, prio); 42 }
··· 36 37 static inline struct dccp_li_hist_entry * 38 dccp_li_hist_entry_new(struct dccp_li_hist *hist, 39 + const gfp_t prio) 40 { 41 return kmem_cache_alloc(hist->dccplih_slab, prio); 42 }
+2 -2
net/dccp/ccids/lib/packet_history.h
··· 86 87 static inline struct dccp_tx_hist_entry * 88 dccp_tx_hist_entry_new(struct dccp_tx_hist *hist, 89 - const unsigned int __nocast prio) 90 { 91 struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab, 92 prio); ··· 137 const struct sock *sk, 138 const u32 ndp, 139 const struct sk_buff *skb, 140 - const unsigned int __nocast prio) 141 { 142 struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab, 143 prio);
··· 86 87 static inline struct dccp_tx_hist_entry * 88 dccp_tx_hist_entry_new(struct dccp_tx_hist *hist, 89 + const gfp_t prio) 90 { 91 struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab, 92 prio); ··· 137 const struct sock *sk, 138 const u32 ndp, 139 const struct sk_buff *skb, 140 + const gfp_t prio) 141 { 142 struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab, 143 prio);
+2 -4
net/decnet/af_decnet.c
··· 452 .obj_size = sizeof(struct dn_sock), 453 }; 454 455 - static struct sock *dn_alloc_sock(struct socket *sock, 456 - unsigned int __nocast gfp) 457 { 458 struct dn_scp *scp; 459 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); ··· 804 return rv; 805 } 806 807 - static int dn_confirm_accept(struct sock *sk, long *timeo, 808 - unsigned int __nocast allocation) 809 { 810 struct dn_scp *scp = DN_SK(sk); 811 DEFINE_WAIT(wait);
··· 452 .obj_size = sizeof(struct dn_sock), 453 }; 454 455 + static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp) 456 { 457 struct dn_scp *scp; 458 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); ··· 805 return rv; 806 } 807 808 + static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) 809 { 810 struct dn_scp *scp = DN_SK(sk); 811 DEFINE_WAIT(wait);
+9 -11
net/decnet/dn_nsp_out.c
··· 117 * The eventual aim is for each socket to have a cached header size 118 * for its outgoing packets, and to set hdr from this when sk != NULL. 119 */ 120 - struct sk_buff *dn_alloc_skb(struct sock *sk, int size, 121 - unsigned int __nocast pri) 122 { 123 struct sk_buff *skb; 124 int hdr = 64; ··· 211 * Returns: The number of times the packet has been sent previously 212 */ 213 static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, 214 - unsigned int __nocast gfp) 215 { 216 struct dn_skb_cb *cb = DN_SKB_CB(skb); 217 struct sk_buff *skb2; ··· 352 } 353 354 void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, 355 - unsigned int __nocast gfp, int oth) 356 { 357 struct dn_scp *scp = DN_SK(sk); 358 struct dn_skb_cb *cb = DN_SKB_CB(skb); ··· 519 return 0; 520 } 521 522 - void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp) 523 { 524 struct dn_scp *scp = DN_SK(sk); 525 struct sk_buff *skb = NULL; ··· 551 552 553 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, 554 - unsigned short reason, unsigned int __nocast gfp, 555 struct dst_entry *dst, 556 int ddl, unsigned char *dd, __u16 rem, __u16 loc) 557 { ··· 594 595 596 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, 597 - unsigned short reason, unsigned int __nocast gfp) 598 { 599 struct dn_scp *scp = DN_SK(sk); 600 int ddl = 0; ··· 615 { 616 struct dn_skb_cb *cb = DN_SKB_CB(skb); 617 int ddl = 0; 618 - unsigned int __nocast gfp = GFP_ATOMIC; 619 620 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 621 NULL, cb->src_port, cb->dst_port); ··· 627 struct dn_scp *scp = DN_SK(sk); 628 struct sk_buff *skb; 629 unsigned char *ptr; 630 - unsigned int __nocast gfp = GFP_ATOMIC; 631 632 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) 633 return; ··· 662 unsigned char menuver; 663 struct dn_skb_cb *cb; 664 unsigned char type = 1; 665 - unsigned int __nocast allocation = 666 - (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; 667 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); 668 669 if (!skb)
··· 117 * The eventual aim is for each socket to have a cached header size 118 * for its outgoing packets, and to set hdr from this when sk != NULL. 119 */ 120 + struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri) 121 { 122 struct sk_buff *skb; 123 int hdr = 64; ··· 212 * Returns: The number of times the packet has been sent previously 213 */ 214 static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, 215 + gfp_t gfp) 216 { 217 struct dn_skb_cb *cb = DN_SKB_CB(skb); 218 struct sk_buff *skb2; ··· 353 } 354 355 void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, 356 + gfp_t gfp, int oth) 357 { 358 struct dn_scp *scp = DN_SK(sk); 359 struct dn_skb_cb *cb = DN_SKB_CB(skb); ··· 520 return 0; 521 } 522 523 + void dn_send_conn_conf(struct sock *sk, gfp_t gfp) 524 { 525 struct dn_scp *scp = DN_SK(sk); 526 struct sk_buff *skb = NULL; ··· 552 553 554 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, 555 + unsigned short reason, gfp_t gfp, 556 struct dst_entry *dst, 557 int ddl, unsigned char *dd, __u16 rem, __u16 loc) 558 { ··· 595 596 597 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, 598 + unsigned short reason, gfp_t gfp) 599 { 600 struct dn_scp *scp = DN_SK(sk); 601 int ddl = 0; ··· 616 { 617 struct dn_skb_cb *cb = DN_SKB_CB(skb); 618 int ddl = 0; 619 + gfp_t gfp = GFP_ATOMIC; 620 621 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 622 NULL, cb->src_port, cb->dst_port); ··· 628 struct dn_scp *scp = DN_SK(sk); 629 struct sk_buff *skb; 630 unsigned char *ptr; 631 + gfp_t gfp = GFP_ATOMIC; 632 633 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) 634 return; ··· 663 unsigned char menuver; 664 struct dn_skb_cb *cb; 665 unsigned char type = 1; 666 + gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; 667 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); 668 669 if (!skb)
+1 -1
net/ieee80211/ieee80211_tx.c
··· 207 } 208 209 static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, 210 - unsigned int __nocast gfp_mask) 211 { 212 struct ieee80211_txb *txb; 213 int i;
··· 207 } 208 209 static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, 210 + gfp_t gfp_mask) 211 { 212 struct ieee80211_txb *txb; 213 int i;
+1 -1
net/ipv4/inet_connection_sock.c
··· 494 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); 495 496 struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, 497 - const unsigned int __nocast priority) 498 { 499 struct sock *newsk = sk_clone(sk, priority); 500
··· 494 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); 495 496 struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, 497 + const gfp_t priority) 498 { 499 struct sock *newsk = sk_clone(sk, priority); 500
+1 -1
net/ipv4/ipvs/ip_vs_app.c
··· 604 /* 605 * Replace a segment of data with a new segment 606 */ 607 - int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri, 608 char *o_buf, int o_len, char *n_buf, int n_len) 609 { 610 struct iphdr *iph;
··· 604 /* 605 * Replace a segment of data with a new segment 606 */ 607 + int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, 608 char *o_buf, int o_len, char *n_buf, int n_len) 609 { 610 struct iphdr *iph;
+1 -1
net/ipv4/tcp_output.c
··· 1610 * was unread data in the receive queue. This behavior is recommended 1611 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 1612 */ 1613 - void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) 1614 { 1615 struct tcp_sock *tp = tcp_sk(sk); 1616 struct sk_buff *skb;
··· 1610 * was unread data in the receive queue. This behavior is recommended 1611 * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM 1612 */ 1613 + void tcp_send_active_reset(struct sock *sk, gfp_t priority) 1614 { 1615 struct tcp_sock *tp = tcp_sk(sk); 1616 struct sk_buff *skb;
+3 -3
net/key/af_key.c
··· 185 } 186 187 static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 188 - unsigned int __nocast allocation, struct sock *sk) 189 { 190 int err = -ENOBUFS; 191 ··· 217 #define BROADCAST_ONE 1 218 #define BROADCAST_REGISTERED 2 219 #define BROADCAST_PROMISC_ONLY 4 220 - static int pfkey_broadcast(struct sk_buff *skb, unsigned int __nocast allocation, 221 int broadcast_flags, struct sock *one_sk) 222 { 223 struct sock *sk; ··· 1417 } 1418 1419 static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, 1420 - unsigned int __nocast allocation) 1421 { 1422 struct sk_buff *skb; 1423 struct sadb_msg *hdr;
··· 185 } 186 187 static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 188 + gfp_t allocation, struct sock *sk) 189 { 190 int err = -ENOBUFS; 191 ··· 217 #define BROADCAST_ONE 1 218 #define BROADCAST_REGISTERED 2 219 #define BROADCAST_PROMISC_ONLY 4 220 + static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, 221 int broadcast_flags, struct sock *one_sk) 222 { 223 struct sock *sk; ··· 1417 } 1418 1419 static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, 1420 + gfp_t allocation) 1421 { 1422 struct sk_buff *skb; 1423 struct sadb_msg *hdr;
+1 -2
net/llc/llc_conn.c
··· 867 * Allocates a LLC sock and initializes it. Returns the new LLC sock 868 * or %NULL if there's no memory available for one 869 */ 870 - struct sock *llc_sk_alloc(int family, unsigned int __nocast priority, 871 - struct proto *prot) 872 { 873 struct sock *sk = sk_alloc(family, priority, prot, 1); 874
··· 867 * Allocates a LLC sock and initializes it. Returns the new LLC sock 868 * or %NULL if there's no memory available for one 869 */ 870 + struct sock *llc_sk_alloc(int family, gfp_t priority, struct proto *prot) 871 { 872 struct sock *sk = sk_alloc(family, priority, prot, 1); 873
+1 -2
net/netfilter/nfnetlink.c
··· 195 196 int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) 197 { 198 - unsigned int __nocast allocation = 199 - in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 200 int err = 0; 201 202 NETLINK_CB(skb).dst_group = group;
··· 195 196 int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) 197 { 198 + gfp_t allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 199 int err = 0; 200 201 NETLINK_CB(skb).dst_group = group;
+2 -2
net/netlink/af_netlink.c
··· 758 } 759 760 static inline struct sk_buff *netlink_trim(struct sk_buff *skb, 761 - unsigned int __nocast allocation) 762 { 763 int delta; 764 ··· 880 } 881 882 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, 883 - u32 group, unsigned int __nocast allocation) 884 { 885 struct netlink_broadcast_data info; 886 struct hlist_node *node;
··· 758 } 759 760 static inline struct sk_buff *netlink_trim(struct sk_buff *skb, 761 + gfp_t allocation) 762 { 763 int delta; 764 ··· 880 } 881 882 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, 883 + u32 group, gfp_t allocation) 884 { 885 struct netlink_broadcast_data info; 886 struct hlist_node *node;
+1 -1
net/rxrpc/call.c
··· 1923 size_t sioc, 1924 struct kvec *siov, 1925 u8 rxhdr_flags, 1926 - unsigned int __nocast alloc_flags, 1927 int dup_data, 1928 size_t *size_sent) 1929 {
··· 1923 size_t sioc, 1924 struct kvec *siov, 1925 u8 rxhdr_flags, 1926 + gfp_t alloc_flags, 1927 int dup_data, 1928 size_t *size_sent) 1929 {
+1 -1
net/rxrpc/connection.c
··· 522 uint8_t type, 523 int dcount, 524 struct kvec diov[], 525 - unsigned int __nocast alloc_flags, 526 struct rxrpc_message **_msg) 527 { 528 struct rxrpc_message *msg;
··· 522 uint8_t type, 523 int dcount, 524 struct kvec diov[], 525 + gfp_t alloc_flags, 526 struct rxrpc_message **_msg) 527 { 528 struct rxrpc_message *msg;
+5 -5
net/sctp/associola.c
··· 71 const struct sctp_endpoint *ep, 72 const struct sock *sk, 73 sctp_scope_t scope, 74 - unsigned int __nocast gfp) 75 { 76 struct sctp_sock *sp; 77 int i; ··· 273 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, 274 const struct sock *sk, 275 sctp_scope_t scope, 276 - unsigned int __nocast gfp) 277 { 278 struct sctp_association *asoc; 279 ··· 479 /* Add a transport address to an association. */ 480 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, 481 const union sctp_addr *addr, 482 - const unsigned int __nocast gfp, 483 const int peer_state) 484 { 485 struct sctp_transport *peer; ··· 1231 * local endpoint and the remote peer. 1232 */ 1233 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1234 - unsigned int __nocast gfp) 1235 { 1236 sctp_scope_t scope; 1237 int flags; ··· 1254 /* Build the association's bind address list from the cookie. */ 1255 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, 1256 struct sctp_cookie *cookie, 1257 - unsigned int __nocast gfp) 1258 { 1259 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); 1260 int var_size3 = cookie->raw_addr_list_len;
··· 71 const struct sctp_endpoint *ep, 72 const struct sock *sk, 73 sctp_scope_t scope, 74 + gfp_t gfp) 75 { 76 struct sctp_sock *sp; 77 int i; ··· 273 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, 274 const struct sock *sk, 275 sctp_scope_t scope, 276 + gfp_t gfp) 277 { 278 struct sctp_association *asoc; 279 ··· 479 /* Add a transport address to an association. */ 480 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, 481 const union sctp_addr *addr, 482 + const gfp_t gfp, 483 const int peer_state) 484 { 485 struct sctp_transport *peer; ··· 1231 * local endpoint and the remote peer. 1232 */ 1233 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1234 + gfp_t gfp) 1235 { 1236 sctp_scope_t scope; 1237 int flags; ··· 1254 /* Build the association's bind address list from the cookie. */ 1255 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, 1256 struct sctp_cookie *cookie, 1257 + gfp_t gfp) 1258 { 1259 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); 1260 int var_size3 = cookie->raw_addr_list_len;
+6 -6
net/sctp/bind_addr.c
··· 53 54 /* Forward declarations for internal helpers. */ 55 static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *, 56 - sctp_scope_t scope, unsigned int __nocast gfp, 57 int flags); 58 static void sctp_bind_addr_clean(struct sctp_bind_addr *); 59 ··· 64 */ 65 int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 66 const struct sctp_bind_addr *src, 67 - sctp_scope_t scope, unsigned int __nocast gfp, 68 int flags) 69 { 70 struct sctp_sockaddr_entry *addr; ··· 146 147 /* Add an address to the bind address list in the SCTP_bind_addr structure. */ 148 int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, 149 - unsigned int __nocast gfp) 150 { 151 struct sctp_sockaddr_entry *addr; 152 ··· 200 */ 201 union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, 202 int *addrs_len, 203 - unsigned int __nocast gfp) 204 { 205 union sctp_params addrparms; 206 union sctp_params retval; ··· 252 * address parameters). 253 */ 254 int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, 255 - int addrs_len, __u16 port, unsigned int __nocast gfp) 256 { 257 union sctp_addr_param *rawaddr; 258 struct sctp_paramhdr *param; ··· 350 /* Copy out addresses from the global local address list. */ 351 static int sctp_copy_one_addr(struct sctp_bind_addr *dest, 352 union sctp_addr *addr, 353 - sctp_scope_t scope, unsigned int __nocast gfp, 354 int flags) 355 { 356 int error = 0;
··· 53 54 /* Forward declarations for internal helpers. */ 55 static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *, 56 + sctp_scope_t scope, gfp_t gfp, 57 int flags); 58 static void sctp_bind_addr_clean(struct sctp_bind_addr *); 59 ··· 64 */ 65 int sctp_bind_addr_copy(struct sctp_bind_addr *dest, 66 const struct sctp_bind_addr *src, 67 + sctp_scope_t scope, gfp_t gfp, 68 int flags) 69 { 70 struct sctp_sockaddr_entry *addr; ··· 146 147 /* Add an address to the bind address list in the SCTP_bind_addr structure. */ 148 int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, 149 + gfp_t gfp) 150 { 151 struct sctp_sockaddr_entry *addr; 152 ··· 200 */ 201 union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, 202 int *addrs_len, 203 + gfp_t gfp) 204 { 205 union sctp_params addrparms; 206 union sctp_params retval; ··· 252 * address parameters). 253 */ 254 int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, 255 + int addrs_len, __u16 port, gfp_t gfp) 256 { 257 union sctp_addr_param *rawaddr; 258 struct sctp_paramhdr *param; ··· 350 /* Copy out addresses from the global local address list. */ 351 static int sctp_copy_one_addr(struct sctp_bind_addr *dest, 352 union sctp_addr *addr, 353 + sctp_scope_t scope, gfp_t gfp, 354 int flags) 355 { 356 int error = 0;
+1 -1
net/sctp/chunk.c
··· 62 } 63 64 /* Allocate and initialize datamsg. */ 65 - SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(unsigned int __nocast gfp) 66 { 67 struct sctp_datamsg *msg; 68 msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
··· 62 } 63 64 /* Allocate and initialize datamsg. */ 65 + SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) 66 { 67 struct sctp_datamsg *msg; 68 msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
+2 -3
net/sctp/endpointola.c
··· 68 */ 69 static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, 70 struct sock *sk, 71 - unsigned int __nocast gfp) 72 { 73 struct sctp_sock *sp = sctp_sk(sk); 74 memset(ep, 0, sizeof(struct sctp_endpoint)); ··· 138 /* Create a sctp_endpoint with all that boring stuff initialized. 139 * Returns NULL if there isn't enough memory. 140 */ 141 - struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, 142 - unsigned int __nocast gfp) 143 { 144 struct sctp_endpoint *ep; 145
··· 68 */ 69 static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, 70 struct sock *sk, 71 + gfp_t gfp) 72 { 73 struct sctp_sock *sp = sctp_sk(sk); 74 memset(ep, 0, sizeof(struct sctp_endpoint)); ··· 138 /* Create a sctp_endpoint with all that boring stuff initialized. 139 * Returns NULL if there isn't enough memory. 140 */ 141 + struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) 142 { 143 struct sctp_endpoint *ep; 144
+1 -1
net/sctp/protocol.c
··· 219 220 /* Copy the local addresses which are valid for 'scope' into 'bp'. */ 221 int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, 222 - unsigned int __nocast gfp, int copy_flags) 223 { 224 struct sctp_sockaddr_entry *addr; 225 int error = 0;
··· 219 220 /* Copy the local addresses which are valid for 'scope' into 'bp'. */ 221 int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, 222 + gfp_t gfp, int copy_flags) 223 { 224 struct sctp_sockaddr_entry *addr; 225 int error = 0;
+7 -7
net/sctp/sm_make_chunk.c
··· 78 static int sctp_process_param(struct sctp_association *asoc, 79 union sctp_params param, 80 const union sctp_addr *peer_addr, 81 - unsigned int __nocast gfp); 82 83 /* What was the inbound interface for this chunk? */ 84 int sctp_chunk_iif(const struct sctp_chunk *chunk) ··· 174 */ 175 struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, 176 const struct sctp_bind_addr *bp, 177 - unsigned int __nocast gfp, int vparam_len) 178 { 179 sctp_inithdr_t init; 180 union sctp_params addrs; ··· 261 262 struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, 263 const struct sctp_chunk *chunk, 264 - unsigned int __nocast gfp, int unkparam_len) 265 { 266 sctp_inithdr_t initack; 267 struct sctp_chunk *retval; ··· 1234 /* Create a CLOSED association to use with an incoming packet. */ 1235 struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, 1236 struct sctp_chunk *chunk, 1237 - unsigned int __nocast gfp) 1238 { 1239 struct sctp_association *asoc; 1240 struct sk_buff *skb; ··· 1349 struct sctp_association *sctp_unpack_cookie( 1350 const struct sctp_endpoint *ep, 1351 const struct sctp_association *asoc, 1352 - struct sctp_chunk *chunk, unsigned int __nocast gfp, 1353 int *error, struct sctp_chunk **errp) 1354 { 1355 struct sctp_association *retval = NULL; ··· 1814 */ 1815 int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, 1816 const union sctp_addr *peer_addr, 1817 - sctp_init_chunk_t *peer_init, unsigned int __nocast gfp) 1818 { 1819 union sctp_params param; 1820 struct sctp_transport *transport; ··· 1985 static int sctp_process_param(struct sctp_association *asoc, 1986 union sctp_params param, 1987 const union sctp_addr *peer_addr, 1988 - unsigned int __nocast gfp) 1989 { 1990 union sctp_addr addr; 1991 int i;
··· 78 static int sctp_process_param(struct sctp_association *asoc, 79 union sctp_params param, 80 const union sctp_addr *peer_addr, 81 + gfp_t gfp); 82 83 /* What was the inbound interface for this chunk? */ 84 int sctp_chunk_iif(const struct sctp_chunk *chunk) ··· 174 */ 175 struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, 176 const struct sctp_bind_addr *bp, 177 + gfp_t gfp, int vparam_len) 178 { 179 sctp_inithdr_t init; 180 union sctp_params addrs; ··· 261 262 struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, 263 const struct sctp_chunk *chunk, 264 + gfp_t gfp, int unkparam_len) 265 { 266 sctp_inithdr_t initack; 267 struct sctp_chunk *retval; ··· 1234 /* Create a CLOSED association to use with an incoming packet. */ 1235 struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, 1236 struct sctp_chunk *chunk, 1237 + gfp_t gfp) 1238 { 1239 struct sctp_association *asoc; 1240 struct sk_buff *skb; ··· 1349 struct sctp_association *sctp_unpack_cookie( 1350 const struct sctp_endpoint *ep, 1351 const struct sctp_association *asoc, 1352 + struct sctp_chunk *chunk, gfp_t gfp, 1353 int *error, struct sctp_chunk **errp) 1354 { 1355 struct sctp_association *retval = NULL; ··· 1814 */ 1815 int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, 1816 const union sctp_addr *peer_addr, 1817 + sctp_init_chunk_t *peer_init, gfp_t gfp) 1818 { 1819 union sctp_params param; 1820 struct sctp_transport *transport; ··· 1985 static int sctp_process_param(struct sctp_association *asoc, 1986 union sctp_params param, 1987 const union sctp_addr *peer_addr, 1988 + gfp_t gfp) 1989 { 1990 union sctp_addr addr; 1991 int i;
+6 -6
net/sctp/sm_sideeffect.c
··· 63 void *event_arg, 64 sctp_disposition_t status, 65 sctp_cmd_seq_t *commands, 66 - unsigned int __nocast gfp); 67 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, 68 sctp_state_t state, 69 struct sctp_endpoint *ep, ··· 71 void *event_arg, 72 sctp_disposition_t status, 73 sctp_cmd_seq_t *commands, 74 - unsigned int __nocast gfp); 75 76 /******************************************************************** 77 * Helper functions ··· 498 struct sctp_association *asoc, 499 struct sctp_chunk *chunk, 500 sctp_init_chunk_t *peer_init, 501 - unsigned int __nocast gfp) 502 { 503 int error; 504 ··· 853 struct sctp_endpoint *ep, 854 struct sctp_association *asoc, 855 void *event_arg, 856 - unsigned int __nocast gfp) 857 { 858 sctp_cmd_seq_t commands; 859 const sctp_sm_table_entry_t *state_fn; ··· 898 void *event_arg, 899 sctp_disposition_t status, 900 sctp_cmd_seq_t *commands, 901 - unsigned int __nocast gfp) 902 { 903 int error; 904 ··· 986 void *event_arg, 987 sctp_disposition_t status, 988 sctp_cmd_seq_t *commands, 989 - unsigned int __nocast gfp) 990 { 991 int error = 0; 992 int force;
··· 63 void *event_arg, 64 sctp_disposition_t status, 65 sctp_cmd_seq_t *commands, 66 + gfp_t gfp); 67 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, 68 sctp_state_t state, 69 struct sctp_endpoint *ep, ··· 71 void *event_arg, 72 sctp_disposition_t status, 73 sctp_cmd_seq_t *commands, 74 + gfp_t gfp); 75 76 /******************************************************************** 77 * Helper functions ··· 498 struct sctp_association *asoc, 499 struct sctp_chunk *chunk, 500 sctp_init_chunk_t *peer_init, 501 + gfp_t gfp) 502 { 503 int error; 504 ··· 853 struct sctp_endpoint *ep, 854 struct sctp_association *asoc, 855 void *event_arg, 856 + gfp_t gfp) 857 { 858 sctp_cmd_seq_t commands; 859 const sctp_sm_table_entry_t *state_fn; ··· 898 void *event_arg, 899 sctp_disposition_t status, 900 sctp_cmd_seq_t *commands, 901 + gfp_t gfp) 902 { 903 int error; 904 ··· 986 void *event_arg, 987 sctp_disposition_t status, 988 sctp_cmd_seq_t *commands, 989 + gfp_t gfp) 990 { 991 int error = 0; 992 int force;
+1 -1
net/sctp/ssnmap.c
··· 58 * Allocate room to store at least 'len' contiguous TSNs. 59 */ 60 struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, 61 - unsigned int __nocast gfp) 62 { 63 struct sctp_ssnmap *retval; 64 int size;
··· 58 * Allocate room to store at least 'len' contiguous TSNs. 59 */ 60 struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, 61 + gfp_t gfp) 62 { 63 struct sctp_ssnmap *retval; 64 int size;
+2 -2
net/sctp/transport.c
··· 57 /* Initialize a new transport from provided memory. */ 58 static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, 59 const union sctp_addr *addr, 60 - unsigned int __nocast gfp) 61 { 62 /* Copy in the address. */ 63 peer->ipaddr = *addr; ··· 122 123 /* Allocate and initialize a new transport. */ 124 struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, 125 - unsigned int __nocast gfp) 126 { 127 struct sctp_transport *transport; 128
··· 57 /* Initialize a new transport from provided memory. */ 58 static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, 59 const union sctp_addr *addr, 60 + gfp_t gfp) 61 { 62 /* Copy in the address. */ 63 peer->ipaddr = *addr; ··· 122 123 /* Allocate and initialize a new transport. */ 124 struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, 125 + gfp_t gfp) 126 { 127 struct sctp_transport *transport; 128
+9 -9
net/sctp/ulpevent.c
··· 74 75 /* Create a new sctp_ulpevent. */ 76 SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, 77 - unsigned int __nocast gfp) 78 { 79 struct sctp_ulpevent *event; 80 struct sk_buff *skb; ··· 136 struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( 137 const struct sctp_association *asoc, 138 __u16 flags, __u16 state, __u16 error, __u16 outbound, 139 - __u16 inbound, unsigned int __nocast gfp) 140 { 141 struct sctp_ulpevent *event; 142 struct sctp_assoc_change *sac; ··· 237 struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( 238 const struct sctp_association *asoc, 239 const struct sockaddr_storage *aaddr, 240 - int flags, int state, int error, unsigned int __nocast gfp) 241 { 242 struct sctp_ulpevent *event; 243 struct sctp_paddr_change *spc; ··· 350 */ 351 struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 352 const struct sctp_association *asoc, struct sctp_chunk *chunk, 353 - __u16 flags, unsigned int __nocast gfp) 354 { 355 struct sctp_ulpevent *event; 356 struct sctp_remote_error *sre; ··· 448 */ 449 struct sctp_ulpevent *sctp_ulpevent_make_send_failed( 450 const struct sctp_association *asoc, struct sctp_chunk *chunk, 451 - __u16 flags, __u32 error, unsigned int __nocast gfp) 452 { 453 struct sctp_ulpevent *event; 454 struct sctp_send_failed *ssf; ··· 557 */ 558 struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( 559 const struct sctp_association *asoc, 560 - __u16 flags, unsigned int __nocast gfp) 561 { 562 struct sctp_ulpevent *event; 563 struct sctp_shutdown_event *sse; ··· 620 * 5.3.1.6 SCTP_ADAPTION_INDICATION 621 */ 622 struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( 623 - const struct sctp_association *asoc, unsigned int __nocast gfp) 624 { 625 struct sctp_ulpevent *event; 626 struct sctp_adaption_event *sai; ··· 657 */ 658 struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, 659 struct sctp_chunk *chunk, 660 - unsigned int __nocast gfp) 661 { 662 struct sctp_ulpevent *event = NULL; 663 struct sk_buff *skb; ··· 719 */ 720 struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 721 const struct sctp_association *asoc, __u32 indication, 722 - unsigned int __nocast gfp) 723 { 724 struct sctp_ulpevent *event; 725 struct sctp_pdapi_event *pd;
··· 74 75 /* Create a new sctp_ulpevent. */ 76 SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, 77 + gfp_t gfp) 78 { 79 struct sctp_ulpevent *event; 80 struct sk_buff *skb; ··· 136 struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( 137 const struct sctp_association *asoc, 138 __u16 flags, __u16 state, __u16 error, __u16 outbound, 139 + __u16 inbound, gfp_t gfp) 140 { 141 struct sctp_ulpevent *event; 142 struct sctp_assoc_change *sac; ··· 237 struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( 238 const struct sctp_association *asoc, 239 const struct sockaddr_storage *aaddr, 240 + int flags, int state, int error, gfp_t gfp) 241 { 242 struct sctp_ulpevent *event; 243 struct sctp_paddr_change *spc; ··· 350 */ 351 struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 352 const struct sctp_association *asoc, struct sctp_chunk *chunk, 353 + __u16 flags, gfp_t gfp) 354 { 355 struct sctp_ulpevent *event; 356 struct sctp_remote_error *sre; ··· 448 */ 449 struct sctp_ulpevent *sctp_ulpevent_make_send_failed( 450 const struct sctp_association *asoc, struct sctp_chunk *chunk, 451 + __u16 flags, __u32 error, gfp_t gfp) 452 { 453 struct sctp_ulpevent *event; 454 struct sctp_send_failed *ssf; ··· 557 */ 558 struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( 559 const struct sctp_association *asoc, 560 + __u16 flags, gfp_t gfp) 561 { 562 struct sctp_ulpevent *event; 563 struct sctp_shutdown_event *sse; ··· 620 * 5.3.1.6 SCTP_ADAPTION_INDICATION 621 */ 622 struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( 623 + const struct sctp_association *asoc, gfp_t gfp) 624 { 625 struct sctp_ulpevent *event; 626 struct sctp_adaption_event *sai; ··· 657 */ 658 struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, 659 struct sctp_chunk *chunk, 660 + gfp_t gfp) 661 { 662 struct sctp_ulpevent *event = NULL; 663 struct sk_buff *skb; ··· 719 */ 720 struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 721 const struct sctp_association *asoc, __u32 indication, 722 + gfp_t gfp) 723 { 724 struct sctp_ulpevent *event; 725 struct sctp_pdapi_event *pd;
+4 -4
net/sctp/ulpqueue.c
··· 100 101 /* Process an incoming DATA chunk. */ 102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 103 - unsigned int __nocast gfp) 104 { 105 struct sk_buff_head temp; 106 sctp_data_chunk_t *hdr; ··· 792 /* Partial deliver the first message as there is pressure on rwnd. */ 793 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, 794 struct sctp_chunk *chunk, 795 - unsigned int __nocast gfp) 796 { 797 struct sctp_ulpevent *event; 798 struct sctp_association *asoc; ··· 816 817 /* Renege some packets to make room for an incoming chunk. */ 818 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 819 - unsigned int __nocast gfp) 820 { 821 struct sctp_association *asoc; 822 __u16 needed, freed; ··· 855 /* Notify the application if an association is aborted and in 856 * partial delivery mode. Send up any pending received messages. 857 */ 858 - void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, unsigned int __nocast gfp) 859 { 860 struct sctp_ulpevent *ev = NULL; 861 struct sock *sk;
··· 100 101 /* Process an incoming DATA chunk. */ 102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 103 + gfp_t gfp) 104 { 105 struct sk_buff_head temp; 106 sctp_data_chunk_t *hdr; ··· 792 /* Partial deliver the first message as there is pressure on rwnd. */ 793 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, 794 struct sctp_chunk *chunk, 795 + gfp_t gfp) 796 { 797 struct sctp_ulpevent *event; 798 struct sctp_association *asoc; ··· 816 817 /* Renege some packets to make room for an incoming chunk. */ 818 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 819 + gfp_t gfp) 820 { 821 struct sctp_association *asoc; 822 __u16 needed, freed; ··· 855 /* Notify the application if an association is aborted and in 856 * partial delivery mode. Send up any pending received messages. 857 */ 858 + void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) 859 { 860 struct sctp_ulpevent *ev = NULL; 861 struct sock *sk;
+1 -1
net/sunrpc/sched.c
··· 719 void * 720 rpc_malloc(struct rpc_task *task, size_t size) 721 { 722 - unsigned int __nocast gfp; 723 724 if (task->tk_flags & RPC_TASK_SWAPPER) 725 gfp = GFP_ATOMIC;
··· 719 void * 720 rpc_malloc(struct rpc_task *task, size_t size) 721 { 722 + gfp_t gfp; 723 724 if (task->tk_flags & RPC_TASK_SWAPPER) 725 gfp = GFP_ATOMIC;
+1 -1
net/xfrm/xfrm_policy.c
··· 225 * SPD calls. 226 */ 227 228 - struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp) 229 { 230 struct xfrm_policy *policy; 231
··· 225 * SPD calls. 226 */ 227 228 + struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) 229 { 230 struct xfrm_policy *policy; 231
+1 -1
sound/core/memalloc.c
··· 106 107 static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, 108 dma_addr_t *dma_handle, 109 - unsigned int __nocast flags) 110 { 111 void *ret; 112 u64 dma_mask, coherent_dma_mask;
··· 106 107 static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, 108 dma_addr_t *dma_handle, 109 + gfp_t flags) 110 { 111 void *ret; 112 u64 dma_mask, coherent_dma_mask;
+5 -5
sound/core/memory.c
··· 89 } 90 } 91 92 - static void *__snd_kmalloc(size_t size, unsigned int __nocast flags, void *caller) 93 { 94 unsigned long cpu_flags; 95 struct snd_alloc_track *t; ··· 111 } 112 113 #define _snd_kmalloc(size, flags) __snd_kmalloc((size), (flags), __builtin_return_address(0)); 114 - void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags) 115 { 116 return _snd_kmalloc(size, flags); 117 } 118 119 - void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags) 120 { 121 void *ret = _snd_kmalloc(size, flags); 122 if (ret) ··· 125 } 126 EXPORT_SYMBOL(snd_hidden_kzalloc); 127 128 - void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags) 129 { 130 void *ret = NULL; 131 if (n != 0 && size > INT_MAX / n) ··· 190 snd_wrapper_vfree(obj); 191 } 192 193 - char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags) 194 { 195 int len; 196 char *buf;
··· 89 } 90 } 91 92 + static void *__snd_kmalloc(size_t size, gfp_t flags, void *caller) 93 { 94 unsigned long cpu_flags; 95 struct snd_alloc_track *t; ··· 111 } 112 113 #define _snd_kmalloc(size, flags) __snd_kmalloc((size), (flags), __builtin_return_address(0)); 114 + void *snd_hidden_kmalloc(size_t size, gfp_t flags) 115 { 116 return _snd_kmalloc(size, flags); 117 } 118 119 + void *snd_hidden_kzalloc(size_t size, gfp_t flags) 120 { 121 void *ret = _snd_kmalloc(size, flags); 122 if (ret) ··· 125 } 126 EXPORT_SYMBOL(snd_hidden_kzalloc); 127 128 + void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags) 129 { 130 void *ret = NULL; 131 if (n != 0 && size > INT_MAX / n) ··· 190 snd_wrapper_vfree(obj); 191 } 192 193 + char *snd_hidden_kstrdup(const char *s, gfp_t flags) 194 { 195 int len; 196 char *buf;
+1 -1
sound/core/seq/instr/ainstr_iw.c
··· 58 iwffff_xenv_t *ex, 59 char __user **data, 60 long *len, 61 - unsigned int __nocast gfp_mask) 62 { 63 __u32 stype; 64 iwffff_env_record_t *rp, *rp_last;
··· 58 iwffff_xenv_t *ex, 59 char __user **data, 60 long *len, 61 + gfp_t gfp_mask) 62 { 63 __u32 stype; 64 iwffff_env_record_t *rp, *rp_last;
+1 -1
sound/core/wrappers.c
··· 27 #include <linux/fs.h> 28 29 #ifdef CONFIG_SND_DEBUG_MEMORY 30 - void *snd_wrapper_kmalloc(size_t size, unsigned int __nocast flags) 31 { 32 return kmalloc(size, flags); 33 }
··· 27 #include <linux/fs.h> 28 29 #ifdef CONFIG_SND_DEBUG_MEMORY 30 + void *snd_wrapper_kmalloc(size_t size, gfp_t flags) 31 { 32 return kmalloc(size, flags); 33 }