Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, page_alloc: rename __GFP_WAIT to __GFP_RECLAIM

__GFP_WAIT was used to signal that the caller was in atomic context and
could not sleep. Now it is possible to distinguish between true atomic
context and callers that are not willing to sleep. The latter should
clear __GFP_DIRECT_RECLAIM so kswapd will still wake. As clearing
__GFP_WAIT behaves differently, there is a risk that people will clear the
wrong flags. This patch renames __GFP_WAIT to __GFP_RECLAIM to clearly
indicate what it does -- setting it allows all reclaim activity, clearing
them prevents it.

[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Vitaly Wool <vitalywool@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Mel Gorman and committed by
Linus Torvalds
71baba4b 40113370

+71 -68
+2 -2
block/blk-core.c
··· 638 638 if (percpu_ref_tryget_live(&q->q_usage_counter)) 639 639 return 0; 640 640 641 - if (!(gfp & __GFP_WAIT)) 641 + if (!gfpflags_allow_blocking(gfp)) 642 642 return -EBUSY; 643 643 644 644 ret = wait_event_interruptible(q->mq_freeze_wq, ··· 2038 2038 do { 2039 2039 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 2040 2040 2041 - if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) { 2041 + if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) { 2042 2042 2043 2043 q->make_request_fn(q, bio); 2044 2044
+1 -1
block/blk-mq.c
··· 1186 1186 ctx = blk_mq_get_ctx(q); 1187 1187 hctx = q->mq_ops->map_queue(q, ctx->cpu); 1188 1188 blk_mq_set_alloc_data(&alloc_data, q, 1189 - __GFP_WAIT|__GFP_HIGH, false, ctx, hctx); 1189 + __GFP_RECLAIM|__GFP_HIGH, false, ctx, hctx); 1190 1190 rq = __blk_mq_alloc_request(&alloc_data, rw); 1191 1191 ctx = alloc_data.ctx; 1192 1192 hctx = alloc_data.hctx;
+3 -3
block/scsi_ioctl.c
··· 444 444 445 445 } 446 446 447 - rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 447 + rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM); 448 448 if (IS_ERR(rq)) { 449 449 err = PTR_ERR(rq); 450 450 goto error_free_buffer; ··· 495 495 break; 496 496 } 497 497 498 - if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { 498 + if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_RECLAIM)) { 499 499 err = DRIVER_ERROR << 24; 500 500 goto error; 501 501 } ··· 536 536 struct request *rq; 537 537 int err; 538 538 539 - rq = blk_get_request(q, WRITE, __GFP_WAIT); 539 + rq = blk_get_request(q, WRITE, __GFP_RECLAIM); 540 540 if (IS_ERR(rq)) 541 541 return PTR_ERR(rq); 542 542 blk_rq_set_block_pc(rq);
+1 -1
drivers/block/drbd/drbd_bitmap.c
··· 1007 1007 bm_set_page_unchanged(b->bm_pages[page_nr]); 1008 1008 1009 1009 if (ctx->flags & BM_AIO_COPY_PAGES) { 1010 - page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); 1010 + page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM); 1011 1011 copy_highpage(page, b->bm_pages[page_nr]); 1012 1012 bm_store_page_idx(page, page_nr); 1013 1013 } else
+1 -1
drivers/block/mtip32xx/mtip32xx.c
··· 173 173 { 174 174 struct request *rq; 175 175 176 - rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); 176 + rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true); 177 177 return blk_mq_rq_to_pdu(rq); 178 178 } 179 179
+1 -1
drivers/block/paride/pd.c
··· 723 723 struct request *rq; 724 724 int err = 0; 725 725 726 - rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); 726 + rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM); 727 727 if (IS_ERR(rq)) 728 728 return PTR_ERR(rq); 729 729
+2 -2
drivers/block/pktcdvd.c
··· 704 704 int ret = 0; 705 705 706 706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? 707 - WRITE : READ, __GFP_WAIT); 707 + WRITE : READ, __GFP_RECLAIM); 708 708 if (IS_ERR(rq)) 709 709 return PTR_ERR(rq); 710 710 blk_rq_set_block_pc(rq); 711 711 712 712 if (cgc->buflen) { 713 713 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, 714 - __GFP_WAIT); 714 + __GFP_RECLAIM); 715 715 if (ret) 716 716 goto out; 717 717 }
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 2216 2216 mapping = file_inode(obj->base.filp)->i_mapping; 2217 2217 gfp = mapping_gfp_mask(mapping); 2218 2218 gfp |= __GFP_NORETRY | __GFP_NOWARN; 2219 - gfp &= ~(__GFP_IO | __GFP_WAIT); 2219 + gfp &= ~(__GFP_IO | __GFP_RECLAIM); 2220 2220 sg = st->sgl; 2221 2221 st->nents = 0; 2222 2222 for (i = 0; i < page_count; i++) {
+1 -1
drivers/ide/ide-atapi.c
··· 92 92 struct request *rq; 93 93 int error; 94 94 95 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 95 + rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 96 96 rq->cmd_type = REQ_TYPE_DRV_PRIV; 97 97 rq->special = (char *)pc; 98 98
+1 -1
drivers/ide/ide-cd.c
··· 441 441 struct request *rq; 442 442 int error; 443 443 444 - rq = blk_get_request(drive->queue, write, __GFP_WAIT); 444 + rq = blk_get_request(drive->queue, write, __GFP_RECLAIM); 445 445 446 446 memcpy(rq->cmd, cmd, BLK_MAX_CDB); 447 447 rq->cmd_type = REQ_TYPE_ATA_PC;
+1 -1
drivers/ide/ide-cd_ioctl.c
··· 303 303 struct request *rq; 304 304 int ret; 305 305 306 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 306 + rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 307 307 rq->cmd_type = REQ_TYPE_DRV_PRIV; 308 308 rq->cmd_flags = REQ_QUIET; 309 309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
+1 -1
drivers/ide/ide-devsets.c
··· 165 165 if (!(setting->flags & DS_SYNC)) 166 166 return setting->set(drive, arg); 167 167 168 - rq = blk_get_request(q, READ, __GFP_WAIT); 168 + rq = blk_get_request(q, READ, __GFP_RECLAIM); 169 169 rq->cmd_type = REQ_TYPE_DRV_PRIV; 170 170 rq->cmd_len = 5; 171 171 rq->cmd[0] = REQ_DEVSET_EXEC;
+1 -1
drivers/ide/ide-disk.c
··· 477 477 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) 478 478 return -EBUSY; 479 479 480 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 480 + rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 481 481 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 482 482 483 483 drive->mult_req = arg;
+2 -2
drivers/ide/ide-ioctls.c
··· 125 125 if (NULL == (void *) arg) { 126 126 struct request *rq; 127 127 128 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 128 + rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 129 129 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 130 130 err = blk_execute_rq(drive->queue, NULL, rq, 0); 131 131 blk_put_request(rq); ··· 221 221 struct request *rq; 222 222 int ret = 0; 223 223 224 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 224 + rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 225 225 rq->cmd_type = REQ_TYPE_DRV_PRIV; 226 226 rq->cmd_len = 1; 227 227 rq->cmd[0] = REQ_DRIVE_RESET;
+1 -1
drivers/ide/ide-park.c
··· 31 31 } 32 32 spin_unlock_irq(&hwif->lock); 33 33 34 - rq = blk_get_request(q, READ, __GFP_WAIT); 34 + rq = blk_get_request(q, READ, __GFP_RECLAIM); 35 35 rq->cmd[0] = REQ_PARK_HEADS; 36 36 rq->cmd_len = 1; 37 37 rq->cmd_type = REQ_TYPE_DRV_PRIV;
+2 -2
drivers/ide/ide-pm.c
··· 18 18 } 19 19 20 20 memset(&rqpm, 0, sizeof(rqpm)); 21 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 21 + rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 22 22 rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND; 23 23 rq->special = &rqpm; 24 24 rqpm.pm_step = IDE_PM_START_SUSPEND; ··· 88 88 } 89 89 90 90 memset(&rqpm, 0, sizeof(rqpm)); 91 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 91 + rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 92 92 rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; 93 93 rq->cmd_flags |= REQ_PREEMPT; 94 94 rq->special = &rqpm;
+2 -2
drivers/ide/ide-tape.c
··· 852 852 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE); 853 853 BUG_ON(size < 0 || size % tape->blk_size); 854 854 855 - rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 855 + rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 856 856 rq->cmd_type = REQ_TYPE_DRV_PRIV; 857 857 rq->cmd[13] = cmd; 858 858 rq->rq_disk = tape->disk; ··· 860 860 861 861 if (size) { 862 862 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size, 863 - __GFP_WAIT); 863 + __GFP_RECLAIM); 864 864 if (ret) 865 865 goto out_put; 866 866 }
+2 -2
drivers/ide/ide-taskfile.c
··· 430 430 int error; 431 431 int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE; 432 432 433 - rq = blk_get_request(drive->queue, rw, __GFP_WAIT); 433 + rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM); 434 434 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 435 435 436 436 /* ··· 441 441 */ 442 442 if (nsect) { 443 443 error = blk_rq_map_kern(drive->queue, rq, buf, 444 - nsect * SECTOR_SIZE, __GFP_WAIT); 444 + nsect * SECTOR_SIZE, __GFP_RECLAIM); 445 445 if (error) 446 446 goto put_req; 447 447 }
+1 -1
drivers/infiniband/hw/qib/qib_init.c
··· 1680 1680 * heavy filesystem activity makes these fail, and we can 1681 1681 * use compound pages. 1682 1682 */ 1683 - gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 1683 + gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1684 1684 1685 1685 egrcnt = rcd->rcvegrcnt; 1686 1686 egroff = rcd->rcvegr_tid_base;
+1 -1
drivers/misc/vmw_balloon.c
··· 75 75 76 76 /* 77 77 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't 78 - * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use 78 + * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use 79 79 * __GFP_NOWARN, to suppress page allocation failure warnings. 80 80 */ 81 81 #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
+4 -2
drivers/nvme/host/pci.c
··· 1025 1025 req->special = (void *)0; 1026 1026 1027 1027 if (buffer && bufflen) { 1028 - ret = blk_rq_map_kern(q, req, buffer, bufflen, __GFP_WAIT); 1028 + ret = blk_rq_map_kern(q, req, buffer, bufflen, 1029 + __GFP_DIRECT_RECLAIM); 1029 1030 if (ret) 1030 1031 goto out; 1031 1032 } else if (ubuffer && bufflen) { 1032 - ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, __GFP_WAIT); 1033 + ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 1034 + __GFP_DIRECT_RECLAIM); 1033 1035 if (ret) 1034 1036 goto out; 1035 1037 bio = req->bio;
+1 -1
drivers/scsi/scsi_error.c
··· 1970 1970 struct request *req; 1971 1971 1972 1972 /* 1973 - * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a 1973 + * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a 1974 1974 * request becomes available 1975 1975 */ 1976 1976 req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
+2 -2
drivers/scsi/scsi_lib.c
··· 222 222 int write = (data_direction == DMA_TO_DEVICE); 223 223 int ret = DRIVER_ERROR << 24; 224 224 225 - req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); 225 + req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM); 226 226 if (IS_ERR(req)) 227 227 return ret; 228 228 blk_rq_set_block_pc(req); 229 229 230 230 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, 231 - buffer, bufflen, __GFP_WAIT)) 231 + buffer, bufflen, __GFP_RECLAIM)) 232 232 goto out; 233 233 234 234 req->cmd_len = COMMAND_SIZE(cmd[0]);
+1 -1
drivers/staging/rdma/hfi1/init.c
··· 1560 1560 * heavy filesystem activity makes these fail, and we can 1561 1561 * use compound pages. 1562 1562 */ 1563 - gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 1563 + gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 1564 1564 1565 1565 /* 1566 1566 * The minimum size of the eager buffers is a groups of MTU-sized
+1 -1
drivers/staging/rdma/ipath/ipath_file_ops.c
··· 905 905 * heavy filesystem activity makes these fail, and we can 906 906 * use compound pages. 907 907 */ 908 - gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; 908 + gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; 909 909 910 910 egrcnt = dd->ipath_rcvegrcnt; 911 911 /* TID number offset for this port */
+1 -1
fs/cachefiles/internal.h
··· 30 30 #define CACHEFILES_DEBUG_KLEAVE 2 31 31 #define CACHEFILES_DEBUG_KDEBUG 4 32 32 33 - #define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC) 33 + #define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC) 34 34 35 35 /* 36 36 * node records
+1 -1
fs/direct-io.c
··· 361 361 362 362 /* 363 363 * bio_alloc() is guaranteed to return a bio when called with 364 - * __GFP_WAIT and we request a valid number of vectors. 364 + * __GFP_RECLAIM and we request a valid number of vectors. 365 365 */ 366 366 bio = bio_alloc(GFP_KERNEL, nr_vecs); 367 367
+1 -1
fs/nilfs2/mdt.h
··· 72 72 } 73 73 74 74 /* Default GFP flags using highmem */ 75 - #define NILFS_MDT_GFP (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM) 75 + #define NILFS_MDT_GFP (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM) 76 76 77 77 int nilfs_mdt_get_block(struct inode *, unsigned long, int, 78 78 void (*init_block)(struct inode *,
+8 -8
include/linux/gfp.h
··· 107 107 * can be cleared when the reclaiming of pages would cause unnecessary 108 108 * disruption. 109 109 */ 110 - #define __GFP_WAIT ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) 110 + #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) 111 111 #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ 112 112 #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ 113 113 ··· 126 126 */ 127 127 #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) 128 128 #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) 129 - #define GFP_NOIO (__GFP_WAIT) 130 - #define GFP_NOFS (__GFP_WAIT | __GFP_IO) 131 - #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) 132 - #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ 129 + #define GFP_NOIO (__GFP_RECLAIM) 130 + #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) 131 + #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) 132 + #define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ 133 133 __GFP_RECLAIMABLE) 134 - #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 134 + #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 135 135 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) 136 136 #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) 137 137 #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ ··· 143 143 #define GFP_MOVABLE_SHIFT 3 144 144 145 145 /* Control page allocator reclaim behavior */ 146 - #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 146 + #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 147 147 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ 148 148 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) 149 149 150 150 /* Control slab gfp mask during early boot */ 151 - #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) 151 + #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) 152 152 153 153 /* Control allocation constraints */ 154 154 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
+8 -8
kernel/power/swap.c
··· 257 257 struct bio *bio; 258 258 int error = 0; 259 259 260 - bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 260 + bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1); 261 261 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); 262 262 bio->bi_bdev = hib_resume_bdev; 263 263 ··· 356 356 return -ENOSPC; 357 357 358 358 if (hb) { 359 - src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | 359 + src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN | 360 360 __GFP_NORETRY); 361 361 if (src) { 362 362 copy_page(src, buf); ··· 364 364 ret = hib_wait_io(hb); /* Free pages */ 365 365 if (ret) 366 366 return ret; 367 - src = (void *)__get_free_page(__GFP_WAIT | 367 + src = (void *)__get_free_page(__GFP_RECLAIM | 368 368 __GFP_NOWARN | 369 369 __GFP_NORETRY); 370 370 if (src) { ··· 672 672 nr_threads = num_online_cpus() - 1; 673 673 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); 674 674 675 - page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 675 + page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH); 676 676 if (!page) { 677 677 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 678 678 ret = -ENOMEM; ··· 975 975 last = tmp; 976 976 977 977 tmp->map = (struct swap_map_page *) 978 - __get_free_page(__GFP_WAIT | __GFP_HIGH); 978 + __get_free_page(__GFP_RECLAIM | __GFP_HIGH); 979 979 if (!tmp->map) { 980 980 release_swap_reader(handle); 981 981 return -ENOMEM; ··· 1242 1242 1243 1243 for (i = 0; i < read_pages; i++) { 1244 1244 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? 1245 - __GFP_WAIT | __GFP_HIGH : 1246 - __GFP_WAIT | __GFP_NOWARN | 1247 - __GFP_NORETRY); 1245 + __GFP_RECLAIM | __GFP_HIGH : 1246 + __GFP_RECLAIM | __GFP_NOWARN | 1247 + __GFP_NORETRY); 1248 1248 1249 1249 if (!page[i]) { 1250 1250 if (i < LZO_CMP_PAGES) {
+1 -1
lib/percpu_ida.c
··· 135 135 * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). 136 136 * 137 137 * @gfp indicates whether or not to wait until a free id is available (it's not 138 - * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep 138 + * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep 139 139 * however long it takes until another thread frees an id (same semantics as a 140 140 * mempool). 141 141 *
+4 -4
mm/failslab.c
··· 3 3 4 4 static struct { 5 5 struct fault_attr attr; 6 - bool ignore_gfp_wait; 6 + bool ignore_gfp_reclaim; 7 7 bool cache_filter; 8 8 } failslab = { 9 9 .attr = FAULT_ATTR_INITIALIZER, 10 - .ignore_gfp_wait = true, 10 + .ignore_gfp_reclaim = true, 11 11 .cache_filter = false, 12 12 }; 13 13 ··· 16 16 if (gfpflags & __GFP_NOFAIL) 17 17 return false; 18 18 19 - if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) 19 + if (failslab.ignore_gfp_reclaim && (gfpflags & __GFP_RECLAIM)) 20 20 return false; 21 21 22 22 if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB)) ··· 42 42 return PTR_ERR(dir); 43 43 44 44 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 45 - &failslab.ignore_gfp_wait)) 45 + &failslab.ignore_gfp_reclaim)) 46 46 goto fail; 47 47 if (!debugfs_create_bool("cache-filter", mode, dir, 48 48 &failslab.cache_filter))
+1 -1
mm/filemap.c
··· 2713 2713 * page is known to the local caching routines. 2714 2714 * 2715 2715 * The @gfp_mask argument specifies whether I/O may be performed to release 2716 - * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2716 + * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 2717 2717 * 2718 2718 */ 2719 2719 int try_to_release_page(struct page *page, gfp_t gfp_mask)
+1 -1
mm/huge_memory.c
··· 786 786 787 787 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 788 788 { 789 - return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; 789 + return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp; 790 790 } 791 791 792 792 /* Caller must hold page table lock. */
+1 -1
mm/memcontrol.c
··· 2120 2120 /* 2121 2121 * If the hierarchy is above the normal consumption range, schedule 2122 2122 * reclaim on returning to userland. We can perform reclaim here 2123 - * if __GFP_WAIT but let's always punt for simplicity and so that 2123 + * if __GFP_RECLAIM but let's always punt for simplicity and so that 2124 2124 * GFP_KERNEL can consistently be used during reclaim. @memcg is 2125 2125 * not recorded as it most likely matches current's and won't 2126 2126 * change in the meantime. As high limit is checked again before
+1 -1
mm/migrate.c
··· 1752 1752 goto out_dropref; 1753 1753 1754 1754 new_page = alloc_pages_node(node, 1755 - (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT, 1755 + (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM, 1756 1756 HPAGE_PMD_ORDER); 1757 1757 if (!new_page) 1758 1758 goto out_fail;
+5 -4
mm/page_alloc.c
··· 2160 2160 struct fault_attr attr; 2161 2161 2162 2162 bool ignore_gfp_highmem; 2163 - bool ignore_gfp_wait; 2163 + bool ignore_gfp_reclaim; 2164 2164 u32 min_order; 2165 2165 } fail_page_alloc = { 2166 2166 .attr = FAULT_ATTR_INITIALIZER, 2167 - .ignore_gfp_wait = true, 2167 + .ignore_gfp_reclaim = true, 2168 2168 .ignore_gfp_highmem = true, 2169 2169 .min_order = 1, 2170 2170 }; ··· 2183 2183 return false; 2184 2184 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 2185 2185 return false; 2186 - if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_DIRECT_RECLAIM)) 2186 + if (fail_page_alloc.ignore_gfp_reclaim && 2187 + (gfp_mask & __GFP_DIRECT_RECLAIM)) 2187 2188 return false; 2188 2189 2189 2190 return should_fail(&fail_page_alloc.attr, 1 << order); ··· 2203 2202 return PTR_ERR(dir); 2204 2203 2205 2204 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 2206 - &fail_page_alloc.ignore_gfp_wait)) 2205 + &fail_page_alloc.ignore_gfp_reclaim)) 2207 2206 goto fail; 2208 2207 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 2209 2208 &fail_page_alloc.ignore_gfp_highmem))
+1 -1
security/integrity/ima/ima_crypto.c
··· 126 126 { 127 127 void *ptr; 128 128 int order = ima_maxorder; 129 - gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY; 129 + gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; 130 130 131 131 if (order) 132 132 order = min(get_order(max_size), order);