Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] slab: remove SLAB_KERNEL

SLAB_KERNEL is an alias of GFP_KERNEL.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Christoph Lameter and committed by
Linus Torvalds
e94b1766 54e6ecb2

+164 -165
+1 -1
arch/i386/kernel/sysenter.c
··· 132 132 goto up_fail; 133 133 } 134 134 135 - vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); 135 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 136 136 if (!vma) { 137 137 ret = -ENOMEM; 138 138 goto up_fail;
+4 -4
arch/ia64/ia32/binfmt_elf32.c
··· 91 91 * it with privilege level 3 because the IVE uses non-privileged accesses to these 92 92 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them. 93 93 */ 94 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 94 + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 95 95 if (vma) { 96 96 memset(vma, 0, sizeof(*vma)); 97 97 vma->vm_mm = current->mm; ··· 117 117 * code is locked in specific gate page, which is pointed by pretcode 118 118 * when setup_frame_ia32 119 119 */ 120 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 120 + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 121 121 if (vma) { 122 122 memset(vma, 0, sizeof(*vma)); 123 123 vma->vm_mm = current->mm; ··· 142 142 * Install LDT as anonymous memory. This gives us all-zero segment descriptors 143 143 * until a task modifies them via modify_ldt(). 144 144 */ 145 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 145 + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 146 146 if (vma) { 147 147 memset(vma, 0, sizeof(*vma)); 148 148 vma->vm_mm = current->mm; ··· 214 214 bprm->loader += stack_base; 215 215 bprm->exec += stack_base; 216 216 217 - mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 217 + mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 218 218 if (!mpnt) 219 219 return -ENOMEM; 220 220
+1 -1
arch/ia64/kernel/perfmon.c
··· 2302 2302 DPRINT(("smpl_buf @%p\n", smpl_buf)); 2303 2303 2304 2304 /* allocate vma */ 2305 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 2305 + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2306 2306 if (!vma) { 2307 2307 DPRINT(("Cannot allocate vma\n")); 2308 2308 goto error_kmem;
+2 -2
arch/ia64/mm/init.c
··· 156 156 * the problem. When the process attempts to write to the register backing store 157 157 * for the first time, it will get a SEGFAULT in this case. 158 158 */ 159 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 159 + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 160 160 if (vma) { 161 161 memset(vma, 0, sizeof(*vma)); 162 162 vma->vm_mm = current->mm; ··· 175 175 176 176 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ 177 177 if (!(current->personality & MMAP_PAGE_ZERO)) { 178 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 178 + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 179 179 if (vma) { 180 180 memset(vma, 0, sizeof(*vma)); 181 181 vma->vm_mm = current->mm;
+1 -1
arch/powerpc/kernel/vdso.c
··· 264 264 265 265 266 266 /* Allocate a VMA structure and fill it up */ 267 - vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); 267 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 268 268 if (vma == NULL) { 269 269 rc = -ENOMEM; 270 270 goto fail_mmapsem;
+1 -1
arch/powerpc/platforms/cell/spufs/inode.c
··· 48 48 { 49 49 struct spufs_inode_info *ei; 50 50 51 - ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL); 51 + ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL); 52 52 if (!ei) 53 53 return NULL; 54 54
+1 -1
arch/sh/kernel/vsyscall/vsyscall.c
··· 97 97 goto up_fail; 98 98 } 99 99 100 - vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); 100 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 101 101 if (!vma) { 102 102 ret = -ENOMEM; 103 103 goto up_fail;
+1 -1
arch/x86_64/ia32/ia32_binfmt.c
··· 351 351 bprm->loader += stack_base; 352 352 bprm->exec += stack_base; 353 353 354 - mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 354 + mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 355 355 if (!mpnt) 356 356 return -ENOMEM; 357 357
+1 -1
arch/x86_64/ia32/syscall32.c
··· 49 49 struct mm_struct *mm = current->mm; 50 50 int ret; 51 51 52 - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 52 + vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 53 53 if (!vma) 54 54 return -ENOMEM; 55 55
+2 -2
drivers/atm/he.c
··· 820 820 void *cpuaddr; 821 821 822 822 #ifdef USE_RBPS_POOL 823 - cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle); 823 + cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|SLAB_DMA, &dma_handle); 824 824 if (cpuaddr == NULL) 825 825 return -ENOMEM; 826 826 #else ··· 884 884 void *cpuaddr; 885 885 886 886 #ifdef USE_RBPL_POOL 887 - cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle); 887 + cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|SLAB_DMA, &dma_handle); 888 888 if (cpuaddr == NULL) 889 889 return -ENOMEM; 890 890 #else
+1 -1
drivers/base/dmapool.c
··· 126 126 } else if (allocation < size) 127 127 return NULL; 128 128 129 - if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL))) 129 + if (!(retval = kmalloc (sizeof *retval, GFP_KERNEL))) 130 130 return retval; 131 131 132 132 strlcpy (retval->name, name, sizeof retval->name);
+2 -2
drivers/dma/ioatdma.c
··· 636 636 dma_cookie_t cookie; 637 637 int err = 0; 638 638 639 - src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL); 639 + src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 640 640 if (!src) 641 641 return -ENOMEM; 642 - dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, SLAB_KERNEL); 642 + dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 643 643 if (!dest) { 644 644 kfree(src); 645 645 return -ENOMEM;
+1 -1
drivers/ieee1394/hosts.c
··· 123 123 int i; 124 124 int hostnum = 0; 125 125 126 - h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL); 126 + h = kzalloc(sizeof(*h) + extra, GFP_KERNEL); 127 127 if (!h) 128 128 return NULL; 129 129
+4 -4
drivers/ieee1394/ohci1394.c
··· 1225 1225 int ctx; 1226 1226 int ret = -ENOMEM; 1227 1227 1228 - recv = kmalloc(sizeof(*recv), SLAB_KERNEL); 1228 + recv = kmalloc(sizeof(*recv), GFP_KERNEL); 1229 1229 if (!recv) 1230 1230 return -ENOMEM; 1231 1231 ··· 1918 1918 int ctx; 1919 1919 int ret = -ENOMEM; 1920 1920 1921 - xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL); 1921 + xmit = kmalloc(sizeof(*xmit), GFP_KERNEL); 1922 1922 if (!xmit) 1923 1923 return -ENOMEM; 1924 1924 ··· 3021 3021 return -ENOMEM; 3022 3022 } 3023 3023 3024 - d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i); 3024 + d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); 3025 3025 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i); 3026 3026 3027 3027 if (d->prg_cpu[i] != NULL) { ··· 3117 3117 OHCI_DMA_ALLOC("dma_rcv prg pool"); 3118 3118 3119 3119 for (i = 0; i < d->num_desc; i++) { 3120 - d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i); 3120 + d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i); 3121 3121 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i); 3122 3122 3123 3123 if (d->prg_cpu[i] != NULL) {
+1 -1
drivers/ieee1394/pcilynx.c
··· 1428 1428 struct i2c_algo_bit_data i2c_adapter_data; 1429 1429 1430 1430 error = -ENOMEM; 1431 - i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL); 1431 + i2c_ad = kmalloc(sizeof(*i2c_ad), GFP_KERNEL); 1432 1432 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); 1433 1433 1434 1434 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
+5 -5
drivers/ieee1394/raw1394.c
··· 112 112 113 113 static inline struct pending_request *alloc_pending_request(void) 114 114 { 115 - return __alloc_pending_request(SLAB_KERNEL); 115 + return __alloc_pending_request(GFP_KERNEL); 116 116 } 117 117 118 118 static void free_pending_request(struct pending_request *req) ··· 1737 1737 return (-EINVAL); 1738 1738 } 1739 1739 /* addr-list-entry for fileinfo */ 1740 - addr = kmalloc(sizeof(*addr), SLAB_KERNEL); 1740 + addr = kmalloc(sizeof(*addr), GFP_KERNEL); 1741 1741 if (!addr) { 1742 1742 req->req.length = 0; 1743 1743 return (-ENOMEM); ··· 2103 2103 static int get_config_rom(struct file_info *fi, struct pending_request *req) 2104 2104 { 2105 2105 int ret = sizeof(struct raw1394_request); 2106 - quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL); 2106 + quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); 2107 2107 int status; 2108 2108 2109 2109 if (!data) ··· 2133 2133 static int update_config_rom(struct file_info *fi, struct pending_request *req) 2134 2134 { 2135 2135 int ret = sizeof(struct raw1394_request); 2136 - quadlet_t *data = kmalloc(req->req.length, SLAB_KERNEL); 2136 + quadlet_t *data = kmalloc(req->req.length, GFP_KERNEL); 2137 2137 if (!data) 2138 2138 return -ENOMEM; 2139 2139 if (copy_from_user(data, int2ptr(req->req.sendb), req->req.length)) { ··· 2779 2779 { 2780 2780 struct file_info *fi; 2781 2781 2782 - fi = kzalloc(sizeof(*fi), SLAB_KERNEL); 2782 + fi = kzalloc(sizeof(*fi), GFP_KERNEL); 2783 2783 if (!fi) 2784 2784 return -ENOMEM; 2785 2785
+1 -1
drivers/infiniband/hw/ehca/ehca_av.c
··· 57 57 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 58 58 ib_device); 59 59 60 - av = kmem_cache_alloc(av_cache, SLAB_KERNEL); 60 + av = kmem_cache_alloc(av_cache, GFP_KERNEL); 61 61 if (!av) { 62 62 ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", 63 63 pd, ah_attr);
+1 -1
drivers/infiniband/hw/ehca/ehca_cq.c
··· 134 134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) 135 135 return ERR_PTR(-EINVAL); 136 136 137 - my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL); 137 + my_cq = kmem_cache_alloc(cq_cache, GFP_KERNEL); 138 138 if (!my_cq) { 139 139 ehca_err(device, "Out of memory for ehca_cq struct device=%p", 140 140 device);
+1 -1
drivers/infiniband/hw/ehca/ehca_main.c
··· 108 108 109 109 void *ehca_alloc_fw_ctrlblock(void) 110 110 { 111 - void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL); 111 + void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); 112 112 if (!ret) 113 113 ehca_gen_err("Out of memory for ctblk"); 114 114 return ret;
+2 -2
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 53 53 { 54 54 struct ehca_mr *me; 55 55 56 - me = kmem_cache_alloc(mr_cache, SLAB_KERNEL); 56 + me = kmem_cache_alloc(mr_cache, GFP_KERNEL); 57 57 if (me) { 58 58 memset(me, 0, sizeof(struct ehca_mr)); 59 59 spin_lock_init(&me->mrlock); ··· 72 72 { 73 73 struct ehca_mw *me; 74 74 75 - me = kmem_cache_alloc(mw_cache, SLAB_KERNEL); 75 + me = kmem_cache_alloc(mw_cache, GFP_KERNEL); 76 76 if (me) { 77 77 memset(me, 0, sizeof(struct ehca_mw)); 78 78 spin_lock_init(&me->mwlock);
+1 -1
drivers/infiniband/hw/ehca/ehca_pd.c
··· 50 50 { 51 51 struct ehca_pd *pd; 52 52 53 - pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL); 53 + pd = kmem_cache_alloc(pd_cache, GFP_KERNEL); 54 54 if (!pd) { 55 55 ehca_err(device, "device=%p context=%p out of memory", 56 56 device, context);
+1 -1
drivers/infiniband/hw/ehca/ehca_qp.c
··· 450 450 if (pd->uobject && udata) 451 451 context = pd->uobject->context; 452 452 453 - my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL); 453 + my_qp = kmem_cache_alloc(qp_cache, GFP_KERNEL); 454 454 if (!my_qp) { 455 455 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); 456 456 return ERR_PTR(-ENOMEM);
+1 -1
drivers/input/touchscreen/ads7846.c
··· 189 189 { 190 190 struct spi_device *spi = to_spi_device(dev); 191 191 struct ads7846 *ts = dev_get_drvdata(dev); 192 - struct ser_req *req = kzalloc(sizeof *req, SLAB_KERNEL); 192 + struct ser_req *req = kzalloc(sizeof *req, GFP_KERNEL); 193 193 int status; 194 194 int sample; 195 195 int i;
+7 -7
drivers/isdn/gigaset/bas-gigaset.c
··· 2218 2218 * - three for the different uses of the default control pipe 2219 2219 * - three for each isochronous pipe 2220 2220 */ 2221 - if (!(ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL)) || 2222 - !(ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL)) || 2223 - !(ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL)) || 2224 - !(ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL))) 2221 + if (!(ucs->urb_int_in = usb_alloc_urb(0, GFP_KERNEL)) || 2222 + !(ucs->urb_cmd_in = usb_alloc_urb(0, GFP_KERNEL)) || 2223 + !(ucs->urb_cmd_out = usb_alloc_urb(0, GFP_KERNEL)) || 2224 + !(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL))) 2225 2225 goto allocerr; 2226 2226 2227 2227 for (j = 0; j < 2; ++j) { 2228 2228 ubc = cs->bcs[j].hw.bas; 2229 2229 for (i = 0; i < BAS_OUTURBS; ++i) 2230 2230 if (!(ubc->isoouturbs[i].urb = 2231 - usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL))) 2231 + usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL))) 2232 2232 goto allocerr; 2233 2233 for (i = 0; i < BAS_INURBS; ++i) 2234 2234 if (!(ubc->isoinurbs[i] = 2235 - usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL))) 2235 + usb_alloc_urb(BAS_NUMFRAMES, GFP_KERNEL))) 2236 2236 goto allocerr; 2237 2237 } 2238 2238 ··· 2246 2246 (endpoint->bEndpointAddress) & 0x0f), 2247 2247 ucs->int_in_buf, 3, read_int_callback, cs, 2248 2248 endpoint->bInterval); 2249 - if ((rc = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL)) != 0) { 2249 + if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) { 2250 2250 dev_err(cs->dev, "could not submit interrupt URB: %s\n", 2251 2251 get_usb_rcmsg(rc)); 2252 2252 goto error;
+3 -3
drivers/isdn/gigaset/usb-gigaset.c
··· 763 763 goto error; 764 764 } 765 765 766 - ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL); 766 + ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL); 767 767 if (!ucs->bulk_out_urb) { 768 768 dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n"); 769 769 retval = -ENOMEM; ··· 774 774 775 775 atomic_set(&ucs->busy, 0); 776 776 777 - ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL); 777 + ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL); 778 778 if (!ucs->read_urb) { 779 779 dev_err(cs->dev, "No free urbs available\n"); 780 780 retval = -ENOMEM; ··· 797 797 gigaset_read_int_callback, 798 798 cs->inbuf + 0, endpoint->bInterval); 799 799 800 - retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL); 800 + retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL); 801 801 if (retval) { 802 802 dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval); 803 803 goto error;
+1 -1
drivers/media/dvb/cinergyT2/cinergyT2.c
··· 287 287 int i; 288 288 289 289 cinergyt2->streambuf = usb_buffer_alloc(cinergyt2->udev, STREAM_URB_COUNT*STREAM_BUF_SIZE, 290 - SLAB_KERNEL, &cinergyt2->streambuf_dmahandle); 290 + GFP_KERNEL, &cinergyt2->streambuf_dmahandle); 291 291 if (!cinergyt2->streambuf) { 292 292 dprintk(1, "failed to alloc consistent stream memory area, bailing out!\n"); 293 293 return -ENOMEM;
+1 -1
drivers/mtd/devices/m25p80.c
··· 451 451 return -ENODEV; 452 452 } 453 453 454 - flash = kzalloc(sizeof *flash, SLAB_KERNEL); 454 + flash = kzalloc(sizeof *flash, GFP_KERNEL); 455 455 if (!flash) 456 456 return -ENOMEM; 457 457
+1 -1
drivers/scsi/ipr.c
··· 6940 6940 return -ENOMEM; 6941 6941 6942 6942 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { 6943 - ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr); 6943 + ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); 6944 6944 6945 6945 if (!ipr_cmd) { 6946 6946 ipr_free_cmd_blks(ioa_cfg);
+2 -2
drivers/spi/spi.c
··· 360 360 if (!dev) 361 361 return NULL; 362 362 363 - master = kzalloc(size + sizeof *master, SLAB_KERNEL); 363 + master = kzalloc(size + sizeof *master, GFP_KERNEL); 364 364 if (!master) 365 365 return NULL; 366 366 ··· 607 607 { 608 608 int status; 609 609 610 - buf = kmalloc(SPI_BUFSIZ, SLAB_KERNEL); 610 + buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 611 611 if (!buf) { 612 612 status = -ENOMEM; 613 613 goto err0;
+1 -1
drivers/spi/spi_bitbang.c
··· 196 196 return -EINVAL; 197 197 198 198 if (!cs) { 199 - cs = kzalloc(sizeof *cs, SLAB_KERNEL); 199 + cs = kzalloc(sizeof *cs, GFP_KERNEL); 200 200 if (!cs) 201 201 return -ENOMEM; 202 202 spi->controller_state = cs;
+2 -2
drivers/usb/core/hub.c
··· 2371 2371 struct usb_qualifier_descriptor *qual; 2372 2372 int status; 2373 2373 2374 - qual = kmalloc (sizeof *qual, SLAB_KERNEL); 2374 + qual = kmalloc (sizeof *qual, GFP_KERNEL); 2375 2375 if (qual == NULL) 2376 2376 return; 2377 2377 ··· 2922 2922 if (len < le16_to_cpu(udev->config[index].desc.wTotalLength)) 2923 2923 len = le16_to_cpu(udev->config[index].desc.wTotalLength); 2924 2924 } 2925 - buf = kmalloc (len, SLAB_KERNEL); 2925 + buf = kmalloc (len, GFP_KERNEL); 2926 2926 if (buf == NULL) { 2927 2927 dev_err(&udev->dev, "no mem to re-read configs after reset\n"); 2928 2928 /* assume the worst */
+1 -1
drivers/usb/gadget/gmidi.c
··· 1236 1236 1237 1237 1238 1238 /* ok, we made sense of the hardware ... */ 1239 - dev = kzalloc(sizeof(*dev), SLAB_KERNEL); 1239 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1240 1240 if (!dev) { 1241 1241 return -ENOMEM; 1242 1242 }
+1 -1
drivers/usb/gadget/goku_udc.c
··· 1864 1864 } 1865 1865 1866 1866 /* alloc, and start init */ 1867 - dev = kmalloc (sizeof *dev, SLAB_KERNEL); 1867 + dev = kmalloc (sizeof *dev, GFP_KERNEL); 1868 1868 if (dev == NULL){ 1869 1869 pr_debug("enomem %s\n", pci_name(pdev)); 1870 1870 retval = -ENOMEM;
+3 -3
drivers/usb/gadget/inode.c
··· 412 412 /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ 413 413 414 414 value = -ENOMEM; 415 - kbuf = kmalloc (len, SLAB_KERNEL); 415 + kbuf = kmalloc (len, GFP_KERNEL); 416 416 if (unlikely (!kbuf)) 417 417 goto free1; 418 418 ··· 456 456 /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ 457 457 458 458 value = -ENOMEM; 459 - kbuf = kmalloc (len, SLAB_KERNEL); 459 + kbuf = kmalloc (len, GFP_KERNEL); 460 460 if (!kbuf) 461 461 goto free1; 462 462 if (copy_from_user (kbuf, buf, len)) { ··· 1898 1898 buf += 4; 1899 1899 length -= 4; 1900 1900 1901 - kbuf = kmalloc (length, SLAB_KERNEL); 1901 + kbuf = kmalloc (length, GFP_KERNEL); 1902 1902 if (!kbuf) 1903 1903 return -ENOMEM; 1904 1904 if (copy_from_user (kbuf, buf, length)) {
+1 -1
drivers/usb/gadget/net2280.c
··· 2861 2861 } 2862 2862 2863 2863 /* alloc, and start init */ 2864 - dev = kzalloc (sizeof *dev, SLAB_KERNEL); 2864 + dev = kzalloc (sizeof *dev, GFP_KERNEL); 2865 2865 if (dev == NULL){ 2866 2866 retval = -ENOMEM; 2867 2867 goto done;
+1 -1
drivers/usb/gadget/omap_udc.c
··· 2581 2581 /* UDC_PULLUP_EN gates the chip clock */ 2582 2582 // OTG_SYSCON_1_REG |= DEV_IDLE_EN; 2583 2583 2584 - udc = kzalloc(sizeof(*udc), SLAB_KERNEL); 2584 + udc = kzalloc(sizeof(*udc), GFP_KERNEL); 2585 2585 if (!udc) 2586 2586 return -ENOMEM; 2587 2587
+1 -1
drivers/usb/gadget/zero.c
··· 1190 1190 1191 1191 1192 1192 /* ok, we made sense of the hardware ... */ 1193 - dev = kzalloc(sizeof(*dev), SLAB_KERNEL); 1193 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1194 1194 if (!dev) 1195 1195 return -ENOMEM; 1196 1196 spin_lock_init (&dev->lock);
+1 -1
drivers/usb/host/hc_crisv10.c
··· 188 188 #define CHECK_ALIGN(x) if (((__u32)(x)) & 0x00000003) \ 189 189 {panic("Alignment check (DWORD) failed at %s:%s:%d\n", __FILE__, __FUNCTION__, __LINE__);} 190 190 191 - #define SLAB_FLAG (in_interrupt() ? GFP_ATOMIC : SLAB_KERNEL) 191 + #define SLAB_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) 192 192 #define KMALLOC_FLAG (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL) 193 193 194 194 /* Most helpful debugging aid */
+1 -1
drivers/usb/host/ohci-pnx4008.c
··· 134 134 { 135 135 struct i2c_client *c; 136 136 137 - c = (struct i2c_client *)kzalloc(sizeof(*c), SLAB_KERNEL); 137 + c = (struct i2c_client *)kzalloc(sizeof(*c), GFP_KERNEL); 138 138 139 139 if (!c) 140 140 return -ENOMEM;
+1 -1
drivers/usb/input/acecad.c
··· 152 152 if (!acecad || !input_dev) 153 153 goto fail1; 154 154 155 - acecad->data = usb_buffer_alloc(dev, 8, SLAB_KERNEL, &acecad->data_dma); 155 + acecad->data = usb_buffer_alloc(dev, 8, GFP_KERNEL, &acecad->data_dma); 156 156 if (!acecad->data) 157 157 goto fail1; 158 158
+1 -1
drivers/usb/input/usbtouchscreen.c
··· 680 680 type->process_pkt = usbtouch_process_pkt; 681 681 682 682 usbtouch->data = usb_buffer_alloc(udev, type->rept_size, 683 - SLAB_KERNEL, &usbtouch->data_dma); 683 + GFP_KERNEL, &usbtouch->data_dma); 684 684 if (!usbtouch->data) 685 685 goto out_free; 686 686
+14 -14
drivers/usb/misc/usbtest.c
··· 213 213 214 214 if (bytes < 0) 215 215 return NULL; 216 - urb = usb_alloc_urb (0, SLAB_KERNEL); 216 + urb = usb_alloc_urb (0, GFP_KERNEL); 217 217 if (!urb) 218 218 return urb; 219 219 usb_fill_bulk_urb (urb, udev, pipe, NULL, bytes, simple_callback, NULL); ··· 223 223 urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; 224 224 if (usb_pipein (pipe)) 225 225 urb->transfer_flags |= URB_SHORT_NOT_OK; 226 - urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL, 226 + urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL, 227 227 &urb->transfer_dma); 228 228 if (!urb->transfer_buffer) { 229 229 usb_free_urb (urb); ··· 315 315 init_completion (&completion); 316 316 if (usb_pipeout (urb->pipe)) 317 317 simple_fill_buf (urb); 318 - if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0) 318 + if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) 319 319 break; 320 320 321 321 /* NOTE: no timeouts; can't be broken out of by interrupt */ ··· 374 374 unsigned i; 375 375 unsigned size = max; 376 376 377 - sg = kmalloc (nents * sizeof *sg, SLAB_KERNEL); 377 + sg = kmalloc (nents * sizeof *sg, GFP_KERNEL); 378 378 if (!sg) 379 379 return NULL; 380 380 ··· 382 382 char *buf; 383 383 unsigned j; 384 384 385 - buf = kzalloc (size, SLAB_KERNEL); 385 + buf = kzalloc (size, GFP_KERNEL); 386 386 if (!buf) { 387 387 free_sglist (sg, i); 388 388 return NULL; ··· 428 428 (udev->speed == USB_SPEED_HIGH) 429 429 ? (INTERRUPT_RATE << 3) 430 430 : INTERRUPT_RATE, 431 - sg, nents, 0, SLAB_KERNEL); 431 + sg, nents, 0, GFP_KERNEL); 432 432 433 433 if (retval) 434 434 break; ··· 855 855 * as with bulk/intr sglists, sglen is the queue depth; it also 856 856 * controls which subtests run (more tests than sglen) or rerun. 857 857 */ 858 - urb = kcalloc(param->sglen, sizeof(struct urb *), SLAB_KERNEL); 858 + urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL); 859 859 if (!urb) 860 860 return -ENOMEM; 861 861 for (i = 0; i < param->sglen; i++) { ··· 981 981 if (!u) 982 982 goto cleanup; 983 983 984 - reqp = usb_buffer_alloc (udev, sizeof *reqp, SLAB_KERNEL, 984 + reqp = usb_buffer_alloc (udev, sizeof *reqp, GFP_KERNEL, 985 985 &u->setup_dma); 986 986 if (!reqp) 987 987 goto cleanup; ··· 1067 1067 * FIXME want additional tests for when endpoint is STALLing 1068 1068 * due to errors, or is just NAKing requests. 1069 1069 */ 1070 - if ((retval = usb_submit_urb (urb, SLAB_KERNEL)) != 0) { 1070 + if ((retval = usb_submit_urb (urb, GFP_KERNEL)) != 0) { 1071 1071 dev_dbg (&dev->intf->dev, "submit fail %d\n", retval); 1072 1072 return retval; 1073 1073 } ··· 1251 1251 if (length < 1 || length > 0xffff || vary >= length) 1252 1252 return -EINVAL; 1253 1253 1254 - buf = kmalloc(length, SLAB_KERNEL); 1254 + buf = kmalloc(length, GFP_KERNEL); 1255 1255 if (!buf) 1256 1256 return -ENOMEM; 1257 1257 ··· 1403 1403 maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11)); 1404 1404 packets = (bytes + maxp - 1) / maxp; 1405 1405 1406 - urb = usb_alloc_urb (packets, SLAB_KERNEL); 1406 + urb = usb_alloc_urb (packets, GFP_KERNEL); 1407 1407 if (!urb) 1408 1408 return urb; 1409 1409 urb->dev = udev; ··· 1411 1411 1412 1412 urb->number_of_packets = packets; 1413 1413 urb->transfer_buffer_length = bytes; 1414 - urb->transfer_buffer = usb_buffer_alloc (udev, bytes, SLAB_KERNEL, 1414 + urb->transfer_buffer = usb_buffer_alloc (udev, bytes, GFP_KERNEL, 1415 1415 &urb->transfer_dma); 1416 1416 if (!urb->transfer_buffer) { 1417 1417 usb_free_urb (urb); ··· 1900 1900 } 1901 1901 #endif 1902 1902 1903 - dev = kzalloc(sizeof(*dev), SLAB_KERNEL); 1903 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1904 1904 if (!dev) 1905 1905 return -ENOMEM; 1906 1906 info = (struct usbtest_info *) id->driver_info; ··· 1910 1910 dev->intf = intf; 1911 1911 1912 1912 /* cacheline-aligned scratch for i/o */ 1913 - if ((dev->buf = kmalloc (TBUF_SIZE, SLAB_KERNEL)) == NULL) { 1913 + if ((dev->buf = kmalloc (TBUF_SIZE, GFP_KERNEL)) == NULL) { 1914 1914 kfree (dev); 1915 1915 return -ENOMEM; 1916 1916 }
+1 -1
drivers/usb/net/rndis_host.c
··· 469 469 struct rndis_halt *halt; 470 470 471 471 /* try to clear any rndis state/activity (no i/o from stack!) */ 472 - halt = kcalloc(1, sizeof *halt, SLAB_KERNEL); 472 + halt = kcalloc(1, sizeof *halt, GFP_KERNEL); 473 473 if (halt) { 474 474 halt->msg_type = RNDIS_MSG_HALT; 475 475 halt->msg_len = ccpu2(sizeof *halt);
+2 -2
drivers/usb/net/usbnet.c
··· 179 179 period = max ((int) dev->status->desc.bInterval, 180 180 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3); 181 181 182 - buf = kmalloc (maxp, SLAB_KERNEL); 182 + buf = kmalloc (maxp, GFP_KERNEL); 183 183 if (buf) { 184 - dev->interrupt = usb_alloc_urb (0, SLAB_KERNEL); 184 + dev->interrupt = usb_alloc_urb (0, GFP_KERNEL); 185 185 if (!dev->interrupt) { 186 186 kfree (buf); 187 187 return -ENOMEM;
+1 -1
fs/adfs/super.c
··· 217 217 static struct inode *adfs_alloc_inode(struct super_block *sb) 218 218 { 219 219 struct adfs_inode_info *ei; 220 - ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, SLAB_KERNEL); 220 + ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); 221 221 if (!ei) 222 222 return NULL; 223 223 return &ei->vfs_inode;
+1 -1
fs/affs/super.c
··· 71 71 static struct inode *affs_alloc_inode(struct super_block *sb) 72 72 { 73 73 struct affs_inode_info *ei; 74 - ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, SLAB_KERNEL); 74 + ei = (struct affs_inode_info *)kmem_cache_alloc(affs_inode_cachep, GFP_KERNEL); 75 75 if (!ei) 76 76 return NULL; 77 77 ei->vfs_inode.i_version = 1;
+1 -1
fs/afs/super.c
··· 412 412 struct afs_vnode *vnode; 413 413 414 414 vnode = (struct afs_vnode *) 415 - kmem_cache_alloc(afs_inode_cachep, SLAB_KERNEL); 415 + kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL); 416 416 if (!vnode) 417 417 return NULL; 418 418
+1 -1
fs/befs/linuxvfs.c
··· 277 277 { 278 278 struct befs_inode_info *bi; 279 279 bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep, 280 - SLAB_KERNEL); 280 + GFP_KERNEL); 281 281 if (!bi) 282 282 return NULL; 283 283 return &bi->vfs_inode;
+1 -1
fs/bfs/inode.c
··· 233 233 static struct inode *bfs_alloc_inode(struct super_block *sb) 234 234 { 235 235 struct bfs_inode_info *bi; 236 - bi = kmem_cache_alloc(bfs_inode_cachep, SLAB_KERNEL); 236 + bi = kmem_cache_alloc(bfs_inode_cachep, GFP_KERNEL); 237 237 if (!bi) 238 238 return NULL; 239 239 return &bi->vfs_inode;
+1 -1
fs/block_dev.c
··· 239 239 240 240 static struct inode *bdev_alloc_inode(struct super_block *sb) 241 241 { 242 - struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, SLAB_KERNEL); 242 + struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); 243 243 if (!ei) 244 244 return NULL; 245 245 return &ei->vfs_inode;
+1 -1
fs/cifs/cifsfs.c
··· 245 245 cifs_alloc_inode(struct super_block *sb) 246 246 { 247 247 struct cifsInodeInfo *cifs_inode; 248 - cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL); 248 + cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL); 249 249 if (!cifs_inode) 250 250 return NULL; 251 251 cifs_inode->cifsAttrs = 0x20; /* default */
+2 -2
fs/cifs/misc.c
··· 153 153 albeit slightly larger than necessary and maxbuffersize 154 154 defaults to this and can not be bigger */ 155 155 ret_buf = 156 - (struct smb_hdr *) mempool_alloc(cifs_req_poolp, SLAB_KERNEL | GFP_NOFS); 156 + (struct smb_hdr *) mempool_alloc(cifs_req_poolp, GFP_KERNEL | GFP_NOFS); 157 157 158 158 /* clear the first few header bytes */ 159 159 /* for most paths, more is cleared in header_assemble */ ··· 192 192 albeit slightly larger than necessary and maxbuffersize 193 193 defaults to this and can not be bigger */ 194 194 ret_buf = 195 - (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, SLAB_KERNEL | GFP_NOFS); 195 + (struct smb_hdr *) mempool_alloc(cifs_sm_req_poolp, GFP_KERNEL | GFP_NOFS); 196 196 if (ret_buf) { 197 197 /* No need to clear memory here, cleared in header assemble */ 198 198 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
+2 -2
fs/cifs/transport.c
··· 51 51 } 52 52 53 53 temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp, 54 - SLAB_KERNEL | GFP_NOFS); 54 + GFP_KERNEL | GFP_NOFS); 55 55 if (temp == NULL) 56 56 return temp; 57 57 else { ··· 118 118 return NULL; 119 119 } 120 120 temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep, 121 - SLAB_KERNEL); 121 + GFP_KERNEL); 122 122 if (temp == NULL) 123 123 return temp; 124 124 else {
+1 -1
fs/coda/inode.c
··· 43 43 static struct inode *coda_alloc_inode(struct super_block *sb) 44 44 { 45 45 struct coda_inode_info *ei; 46 - ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, SLAB_KERNEL); 46 + ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL); 47 47 if (!ei) 48 48 return NULL; 49 49 memset(&ei->c_fid, 0, sizeof(struct CodaFid));
+1 -1
fs/dnotify.c
··· 77 77 inode = filp->f_dentry->d_inode; 78 78 if (!S_ISDIR(inode->i_mode)) 79 79 return -ENOTDIR; 80 - dn = kmem_cache_alloc(dn_cache, SLAB_KERNEL); 80 + dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); 81 81 if (dn == NULL) 82 82 return -ENOMEM; 83 83 spin_lock(&inode->i_lock);
+1 -1
fs/ecryptfs/crypto.c
··· 628 628 num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size; 629 629 base_extent = (page->index * num_extents_per_page); 630 630 lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache, 631 - SLAB_KERNEL); 631 + GFP_KERNEL); 632 632 if (!lower_page_virt) { 633 633 rc = -ENOMEM; 634 634 ecryptfs_printk(KERN_ERR, "Error getting page for encrypted "
+1 -1
fs/ecryptfs/file.c
··· 250 250 int lower_flags; 251 251 252 252 /* Released in ecryptfs_release or end of function if failure */ 253 - file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL); 253 + file_info = kmem_cache_alloc(ecryptfs_file_info_cache, GFP_KERNEL); 254 254 ecryptfs_set_file_private(file, file_info); 255 255 if (!file_info) { 256 256 ecryptfs_printk(KERN_ERR,
+2 -2
fs/ecryptfs/inode.c
··· 369 369 BUG_ON(!atomic_read(&lower_dentry->d_count)); 370 370 ecryptfs_set_dentry_private(dentry, 371 371 kmem_cache_alloc(ecryptfs_dentry_info_cache, 372 - SLAB_KERNEL)); 372 + GFP_KERNEL)); 373 373 if (!ecryptfs_dentry_to_private(dentry)) { 374 374 rc = -ENOMEM; 375 375 ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting " ··· 795 795 /* Released at out_free: label */ 796 796 ecryptfs_set_file_private(&fake_ecryptfs_file, 797 797 kmem_cache_alloc(ecryptfs_file_info_cache, 798 - SLAB_KERNEL)); 798 + GFP_KERNEL)); 799 799 if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) { 800 800 rc = -ENOMEM; 801 801 goto out;
+1 -1
fs/ecryptfs/keystore.c
··· 207 207 /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or 208 208 * at end of function upon failure */ 209 209 auth_tok_list_item = 210 - kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL); 210 + kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, GFP_KERNEL); 211 211 if (!auth_tok_list_item) { 212 212 ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n"); 213 213 rc = -ENOMEM;
+2 -2
fs/ecryptfs/main.c
··· 378 378 /* Released in ecryptfs_put_super() */ 379 379 ecryptfs_set_superblock_private(sb, 380 380 kmem_cache_alloc(ecryptfs_sb_info_cache, 381 - SLAB_KERNEL)); 381 + GFP_KERNEL)); 382 382 if (!ecryptfs_superblock_to_private(sb)) { 383 383 ecryptfs_printk(KERN_WARNING, "Out of memory\n"); 384 384 rc = -ENOMEM; ··· 402 402 /* through deactivate_super(sb) from get_sb_nodev() */ 403 403 ecryptfs_set_dentry_private(sb->s_root, 404 404 kmem_cache_alloc(ecryptfs_dentry_info_cache, 405 - SLAB_KERNEL)); 405 + GFP_KERNEL)); 406 406 if (!ecryptfs_dentry_to_private(sb->s_root)) { 407 407 ecryptfs_printk(KERN_ERR, 408 408 "dentry_info_cache alloc failed\n");
+1 -1
fs/ecryptfs/super.c
··· 50 50 struct inode *inode = NULL; 51 51 52 52 ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache, 53 - SLAB_KERNEL); 53 + GFP_KERNEL); 54 54 if (unlikely(!ecryptfs_inode)) 55 55 goto out; 56 56 ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat);
+1 -1
fs/efs/super.c
··· 57 57 static struct inode *efs_alloc_inode(struct super_block *sb) 58 58 { 59 59 struct efs_inode_info *ei; 60 - ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, SLAB_KERNEL); 60 + ei = (struct efs_inode_info *)kmem_cache_alloc(efs_inode_cachep, GFP_KERNEL); 61 61 if (!ei) 62 62 return NULL; 63 63 return &ei->vfs_inode;
+2 -2
fs/eventpoll.c
··· 961 961 struct epitem *epi = ep_item_from_epqueue(pt); 962 962 struct eppoll_entry *pwq; 963 963 964 - if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, SLAB_KERNEL))) { 964 + if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { 965 965 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); 966 966 pwq->whead = whead; 967 967 pwq->base = epi; ··· 1004 1004 struct ep_pqueue epq; 1005 1005 1006 1006 error = -ENOMEM; 1007 - if (!(epi = kmem_cache_alloc(epi_cache, SLAB_KERNEL))) 1007 + if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) 1008 1008 goto eexit_1; 1009 1009 1010 1010 /* Item initialization follow here ... */
+1 -1
fs/exec.c
··· 404 404 bprm->loader += stack_base; 405 405 bprm->exec += stack_base; 406 406 407 - mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 407 + mpnt = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 408 408 if (!mpnt) 409 409 return -ENOMEM; 410 410
+1 -1
fs/ext2/super.c
··· 140 140 static struct inode *ext2_alloc_inode(struct super_block *sb) 141 141 { 142 142 struct ext2_inode_info *ei; 143 - ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, SLAB_KERNEL); 143 + ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); 144 144 if (!ei) 145 145 return NULL; 146 146 #ifdef CONFIG_EXT2_FS_POSIX_ACL
+1 -1
fs/fat/cache.c
··· 63 63 64 64 static inline struct fat_cache *fat_cache_alloc(struct inode *inode) 65 65 { 66 - return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL); 66 + return kmem_cache_alloc(fat_cache_cachep, GFP_KERNEL); 67 67 } 68 68 69 69 static inline void fat_cache_free(struct fat_cache *cache)
+1 -1
fs/fat/inode.c
··· 482 482 static struct inode *fat_alloc_inode(struct super_block *sb) 483 483 { 484 484 struct msdos_inode_info *ei; 485 - ei = kmem_cache_alloc(fat_inode_cachep, SLAB_KERNEL); 485 + ei = kmem_cache_alloc(fat_inode_cachep, GFP_KERNEL); 486 486 if (!ei) 487 487 return NULL; 488 488 return &ei->vfs_inode;
+1 -1
fs/fcntl.c
··· 567 567 int result = 0; 568 568 569 569 if (on) { 570 - new = kmem_cache_alloc(fasync_cache, SLAB_KERNEL); 570 + new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); 571 571 if (!new) 572 572 return -ENOMEM; 573 573 }
+2 -2
fs/freevxfs/vxfs_inode.c
··· 103 103 struct vxfs_inode_info *vip; 104 104 struct vxfs_dinode *dip; 105 105 106 - if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL))) 106 + if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL))) 107 107 goto fail; 108 108 dip = (struct vxfs_dinode *)(bp->b_data + offset); 109 109 memcpy(vip, dip, sizeof(*vip)); ··· 145 145 struct vxfs_dinode *dip; 146 146 caddr_t kaddr = (char *)page_address(pp); 147 147 148 - if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, SLAB_KERNEL))) 148 + if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL))) 149 149 goto fail; 150 150 dip = (struct vxfs_dinode *)(kaddr + offset); 151 151 memcpy(vip, dip, sizeof(*vip));
+1 -1
fs/fuse/dev.c
··· 41 41 42 42 struct fuse_req *fuse_request_alloc(void) 43 43 { 44 - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); 44 + struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL); 45 45 if (req) 46 46 fuse_request_init(req); 47 47 return req;
+1 -1
fs/fuse/inode.c
··· 46 46 struct inode *inode; 47 47 struct fuse_inode *fi; 48 48 49 - inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL); 49 + inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL); 50 50 if (!inode) 51 51 return NULL; 52 52
+1 -1
fs/hfs/super.c
··· 145 145 { 146 146 struct hfs_inode_info *i; 147 147 148 - i = kmem_cache_alloc(hfs_inode_cachep, SLAB_KERNEL); 148 + i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL); 149 149 return i ? &i->vfs_inode : NULL; 150 150 } 151 151
+1 -1
fs/hfsplus/super.c
··· 440 440 { 441 441 struct hfsplus_inode_info *i; 442 442 443 - i = kmem_cache_alloc(hfsplus_inode_cachep, SLAB_KERNEL); 443 + i = kmem_cache_alloc(hfsplus_inode_cachep, GFP_KERNEL); 444 444 return i ? &i->vfs_inode : NULL; 445 445 } 446 446
+1 -1
fs/hugetlbfs/inode.c
··· 522 522 523 523 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) 524 524 return NULL; 525 - p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL); 525 + p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); 526 526 if (unlikely(!p)) { 527 527 hugetlbfs_inc_free_inodes(sbinfo); 528 528 return NULL;
+1 -1
fs/inode.c
··· 109 109 if (sb->s_op->alloc_inode) 110 110 inode = sb->s_op->alloc_inode(sb); 111 111 else 112 - inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL); 112 + inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); 113 113 114 114 if (inode) { 115 115 struct address_space * const mapping = &inode->i_data;
+1 -1
fs/isofs/inode.c
··· 62 62 static struct inode *isofs_alloc_inode(struct super_block *sb) 63 63 { 64 64 struct iso_inode_info *ei; 65 - ei = kmem_cache_alloc(isofs_inode_cachep, SLAB_KERNEL); 65 + ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL); 66 66 if (!ei) 67 67 return NULL; 68 68 return &ei->vfs_inode;
+1 -1
fs/jffs2/super.c
··· 33 33 static struct inode *jffs2_alloc_inode(struct super_block *sb) 34 34 { 35 35 struct jffs2_inode_info *ei; 36 - ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL); 36 + ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL); 37 37 if (!ei) 38 38 return NULL; 39 39 return &ei->vfs_inode;
+1 -1
fs/locks.c
··· 147 147 /* Allocate an empty lock structure. */ 148 148 static struct file_lock *locks_alloc_lock(void) 149 149 { 150 - return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); 150 + return kmem_cache_alloc(filelock_cache, GFP_KERNEL); 151 151 } 152 152 153 153 static void locks_release_private(struct file_lock *fl)
+1 -1
fs/minix/inode.c
··· 56 56 static struct inode *minix_alloc_inode(struct super_block *sb) 57 57 { 58 58 struct minix_inode_info *ei; 59 - ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, SLAB_KERNEL); 59 + ei = (struct minix_inode_info *)kmem_cache_alloc(minix_inode_cachep, GFP_KERNEL); 60 60 if (!ei) 61 61 return NULL; 62 62 return &ei->vfs_inode;
+1 -1
fs/ncpfs/inode.c
··· 45 45 static struct inode *ncp_alloc_inode(struct super_block *sb) 46 46 { 47 47 struct ncp_inode_info *ei; 48 - ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, SLAB_KERNEL); 48 + ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL); 49 49 if (!ei) 50 50 return NULL; 51 51 return &ei->vfs_inode;
+1 -1
fs/nfs/direct.c
··· 143 143 { 144 144 struct nfs_direct_req *dreq; 145 145 146 - dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL); 146 + dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL); 147 147 if (!dreq) 148 148 return NULL; 149 149
+1 -1
fs/nfs/inode.c
··· 1080 1080 struct inode *nfs_alloc_inode(struct super_block *sb) 1081 1081 { 1082 1082 struct nfs_inode *nfsi; 1083 - nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, SLAB_KERNEL); 1083 + nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL); 1084 1084 if (!nfsi) 1085 1085 return NULL; 1086 1086 nfsi->flags = 0UL;
+1 -1
fs/nfs/pagelist.c
··· 26 26 nfs_page_alloc(void) 27 27 { 28 28 struct nfs_page *p; 29 - p = kmem_cache_alloc(nfs_page_cachep, SLAB_KERNEL); 29 + p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); 30 30 if (p) { 31 31 memset(p, 0, sizeof(*p)); 32 32 INIT_LIST_HEAD(&p->wb_list);
+1 -1
fs/openpromfs/inode.c
··· 336 336 { 337 337 struct op_inode_info *oi; 338 338 339 - oi = kmem_cache_alloc(op_inode_cachep, SLAB_KERNEL); 339 + oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL); 340 340 if (!oi) 341 341 return NULL; 342 342
+1 -1
fs/proc/inode.c
··· 88 88 struct proc_inode *ei; 89 89 struct inode *inode; 90 90 91 - ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, SLAB_KERNEL); 91 + ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); 92 92 if (!ei) 93 93 return NULL; 94 94 ei->pid = NULL;
+1 -1
fs/qnx4/inode.c
··· 520 520 static struct inode *qnx4_alloc_inode(struct super_block *sb) 521 521 { 522 522 struct qnx4_inode_info *ei; 523 - ei = kmem_cache_alloc(qnx4_inode_cachep, SLAB_KERNEL); 523 + ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL); 524 524 if (!ei) 525 525 return NULL; 526 526 return &ei->vfs_inode;
+1 -1
fs/reiserfs/super.c
··· 496 496 { 497 497 struct reiserfs_inode_info *ei; 498 498 ei = (struct reiserfs_inode_info *) 499 - kmem_cache_alloc(reiserfs_inode_cachep, SLAB_KERNEL); 499 + kmem_cache_alloc(reiserfs_inode_cachep, GFP_KERNEL); 500 500 if (!ei) 501 501 return NULL; 502 502 return &ei->vfs_inode;
+1 -1
fs/romfs/inode.c
··· 555 555 static struct inode *romfs_alloc_inode(struct super_block *sb) 556 556 { 557 557 struct romfs_inode_info *ei; 558 - ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, SLAB_KERNEL); 558 + ei = (struct romfs_inode_info *)kmem_cache_alloc(romfs_inode_cachep, GFP_KERNEL); 559 559 if (!ei) 560 560 return NULL; 561 561 return &ei->vfs_inode;
+1 -1
fs/smbfs/inode.c
··· 55 55 static struct inode *smb_alloc_inode(struct super_block *sb) 56 56 { 57 57 struct smb_inode_info *ei; 58 - ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, SLAB_KERNEL); 58 + ei = (struct smb_inode_info *)kmem_cache_alloc(smb_inode_cachep, GFP_KERNEL); 59 59 if (!ei) 60 60 return NULL; 61 61 return &ei->vfs_inode;
+1 -1
fs/smbfs/request.c
··· 61 61 struct smb_request *req; 62 62 unsigned char *buf = NULL; 63 63 64 - req = kmem_cache_alloc(req_cachep, SLAB_KERNEL); 64 + req = kmem_cache_alloc(req_cachep, GFP_KERNEL); 65 65 VERBOSE("allocating request: %p\n", req); 66 66 if (!req) 67 67 goto out;
+1 -1
fs/sysv/inode.c
··· 307 307 { 308 308 struct sysv_inode_info *si; 309 309 310 - si = kmem_cache_alloc(sysv_inode_cachep, SLAB_KERNEL); 310 + si = kmem_cache_alloc(sysv_inode_cachep, GFP_KERNEL); 311 311 if (!si) 312 312 return NULL; 313 313 return &si->vfs_inode;
+1 -1
fs/udf/super.c
··· 112 112 static struct inode *udf_alloc_inode(struct super_block *sb) 113 113 { 114 114 struct udf_inode_info *ei; 115 - ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, SLAB_KERNEL); 115 + ei = (struct udf_inode_info *)kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL); 116 116 if (!ei) 117 117 return NULL; 118 118
+1 -1
fs/ufs/super.c
··· 1209 1209 static struct inode *ufs_alloc_inode(struct super_block *sb) 1210 1210 { 1211 1211 struct ufs_inode_info *ei; 1212 - ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, SLAB_KERNEL); 1212 + ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_KERNEL); 1213 1213 if (!ei) 1214 1214 return NULL; 1215 1215 ei->vfs_inode.i_version = 1;
+1 -1
include/linux/fs.h
··· 1483 1483 1484 1484 extern struct kmem_cache *names_cachep; 1485 1485 1486 - #define __getname() kmem_cache_alloc(names_cachep, SLAB_KERNEL) 1486 + #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) 1487 1487 #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) 1488 1488 #ifndef CONFIG_AUDITSYSCALL 1489 1489 #define putname(name) __putname(name)
+1 -1
include/linux/rmap.h
··· 34 34 35 35 static inline struct anon_vma *anon_vma_alloc(void) 36 36 { 37 - return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL); 37 + return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 38 38 } 39 39 40 40 static inline void anon_vma_free(struct anon_vma *anon_vma)
-1
include/linux/slab.h
··· 19 19 #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 20 20 21 21 /* flags for kmem_cache_alloc() */ 22 - #define SLAB_KERNEL GFP_KERNEL 23 22 #define SLAB_DMA GFP_DMA 24 23 25 24 /* flags to pass to kmem_cache_create().
+1 -1
include/linux/taskstats_kern.h
··· 35 35 return; 36 36 37 37 /* No problem if kmem_cache_zalloc() fails */ 38 - stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL); 38 + stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); 39 39 40 40 spin_lock_irq(&tsk->sighand->siglock); 41 41 if (!sig->stats) {
+1 -1
ipc/mqueue.c
··· 224 224 { 225 225 struct mqueue_inode_info *ei; 226 226 227 - ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL); 227 + ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); 228 228 if (!ei) 229 229 return NULL; 230 230 return &ei->vfs_inode;
+1 -1
kernel/delayacct.c
··· 41 41 42 42 void __delayacct_tsk_init(struct task_struct *tsk) 43 43 { 44 - tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL); 44 + tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); 45 45 if (tsk->delays) 46 46 spin_lock_init(&tsk->delays->lock); 47 47 }
+3 -3
kernel/fork.c
··· 237 237 goto fail_nomem; 238 238 charge = len; 239 239 } 240 - tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 240 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 241 241 if (!tmp) 242 242 goto fail_nomem; 243 243 *tmp = *mpnt; ··· 319 319 320 320 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 321 321 322 - #define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL)) 322 + #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 323 323 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 324 324 325 325 #include <linux/init_task.h> ··· 621 621 struct files_struct *newf; 622 622 struct fdtable *fdt; 623 623 624 - newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL); 624 + newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 625 625 if (!newf) 626 626 goto out; 627 627
+1 -1
kernel/taskstats.c
··· 425 425 *mycpu = raw_smp_processor_id(); 426 426 427 427 *ptidstats = NULL; 428 - tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL); 428 + tmp = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); 429 429 if (!tmp) 430 430 return; 431 431
+1 -1
kernel/user.c
··· 132 132 if (!up) { 133 133 struct user_struct *new; 134 134 135 - new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL); 135 + new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); 136 136 if (!new) 137 137 return NULL; 138 138 new->uid = uid;
+1 -1
mm/mempolicy.c
··· 1326 1326 atomic_set(&new->refcnt, 1); 1327 1327 if (new->policy == MPOL_BIND) { 1328 1328 int sz = ksize(old->v.zonelist); 1329 - new->v.zonelist = kmemdup(old->v.zonelist, sz, SLAB_KERNEL); 1329 + new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL); 1330 1330 if (!new->v.zonelist) { 1331 1331 kmem_cache_free(policy_cache, new); 1332 1332 return ERR_PTR(-ENOMEM);
+2 -2
mm/mmap.c
··· 1736 1736 if (mm->map_count >= sysctl_max_map_count) 1737 1737 return -ENOMEM; 1738 1738 1739 - new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 1739 + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 1740 1740 if (!new) 1741 1741 return -ENOMEM; 1742 1742 ··· 2057 2057 vma_start < new_vma->vm_end) 2058 2058 *vmap = new_vma; 2059 2059 } else { 2060 - new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); 2060 + new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2061 2061 if (new_vma) { 2062 2062 *new_vma = *vma; 2063 2063 pol = mpol_copy(vma_policy(vma));
+1 -1
mm/shmem.c
··· 2263 2263 static struct inode *shmem_alloc_inode(struct super_block *sb) 2264 2264 { 2265 2265 struct shmem_inode_info *p; 2266 - p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL); 2266 + p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2267 2267 if (!p) 2268 2268 return NULL; 2269 2269 return &p->vfs_inode;
+1 -1
mm/slab.c
··· 2237 2237 align = ralign; 2238 2238 2239 2239 /* Get cache's description obj. */ 2240 - cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL); 2240 + cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL); 2241 2241 if (!cachep) 2242 2242 goto oops; 2243 2243
+1 -1
net/decnet/dn_table.c
··· 590 590 591 591 replace: 592 592 err = -ENOBUFS; 593 - new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL); 593 + new_f = kmem_cache_alloc(dn_hash_kmem, GFP_KERNEL); 594 594 if (new_f == NULL) 595 595 goto out; 596 596
+2 -2
net/ipv4/fib_hash.c
··· 485 485 goto out; 486 486 487 487 err = -ENOBUFS; 488 - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); 488 + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); 489 489 if (new_fa == NULL) 490 490 goto out; 491 491 492 492 new_f = NULL; 493 493 if (!f) { 494 - new_f = kmem_cache_alloc(fn_hash_kmem, SLAB_KERNEL); 494 + new_f = kmem_cache_alloc(fn_hash_kmem, GFP_KERNEL); 495 495 if (new_f == NULL) 496 496 goto out_free_new_fa; 497 497
+2 -2
net/ipv4/fib_trie.c
··· 1187 1187 u8 state; 1188 1188 1189 1189 err = -ENOBUFS; 1190 - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); 1190 + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); 1191 1191 if (new_fa == NULL) 1192 1192 goto out; 1193 1193 ··· 1232 1232 goto out; 1233 1233 1234 1234 err = -ENOBUFS; 1235 - new_fa = kmem_cache_alloc(fn_alias_kmem, SLAB_KERNEL); 1235 + new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); 1236 1236 if (new_fa == NULL) 1237 1237 goto out; 1238 1238
+1 -1
net/socket.c
··· 236 236 { 237 237 struct socket_alloc *ei; 238 238 239 - ei = kmem_cache_alloc(sock_inode_cachep, SLAB_KERNEL); 239 + ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); 240 240 if (!ei) 241 241 return NULL; 242 242 init_waitqueue_head(&ei->socket.wait);
+1 -1
net/sunrpc/rpc_pipe.c
··· 143 143 rpc_alloc_inode(struct super_block *sb) 144 144 { 145 145 struct rpc_inode *rpci; 146 - rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, SLAB_KERNEL); 146 + rpci = (struct rpc_inode *)kmem_cache_alloc(rpc_inode_cachep, GFP_KERNEL); 147 147 if (!rpci) 148 148 return NULL; 149 149 return &rpci->vfs_inode;
+1 -1
security/keys/key.c
··· 285 285 } 286 286 287 287 /* allocate and initialise the key and its description */ 288 - key = kmem_cache_alloc(key_jar, SLAB_KERNEL); 288 + key = kmem_cache_alloc(key_jar, GFP_KERNEL); 289 289 if (!key) 290 290 goto no_memory_2; 291 291
+1 -1
security/selinux/hooks.c
··· 181 181 struct task_security_struct *tsec = current->security; 182 182 struct inode_security_struct *isec; 183 183 184 - isec = kmem_cache_alloc(sel_inode_cache, SLAB_KERNEL); 184 + isec = kmem_cache_alloc(sel_inode_cache, GFP_KERNEL); 185 185 if (!isec) 186 186 return -ENOMEM; 187 187
+1 -1
security/selinux/ss/avtab.c
··· 36 36 struct avtab_key *key, struct avtab_datum *datum) 37 37 { 38 38 struct avtab_node * newnode; 39 - newnode = kmem_cache_alloc(avtab_node_cachep, SLAB_KERNEL); 39 + newnode = kmem_cache_alloc(avtab_node_cachep, GFP_KERNEL); 40 40 if (newnode == NULL) 41 41 return NULL; 42 42 memset(newnode, 0, sizeof(struct avtab_node));