Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage

Mostly direct substitution with occasional adjustment or removing
outdated comments.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
ea1754a0 09cbfeaf

+120 -135
+1 -1
Documentation/filesystems/cramfs.txt
··· 38 38 which the timestamp reverts to 1970, i.e. moves backwards in time. 39 39 40 40 Currently, cramfs must be written and read with architectures of the 41 - same endianness, and can be read only by kernels with PAGE_CACHE_SIZE 41 + same endianness, and can be read only by kernels with PAGE_SIZE 42 42 == 4096. At least the latter of these is a bug, but it hasn't been 43 43 decided what the best fix is. For the moment if you have larger pages 44 44 you can just change the #define in mkcramfs.c, so long as you don't
+1 -1
Documentation/filesystems/tmpfs.txt
··· 60 60 default is half of your physical RAM without swap. If you 61 61 oversize your tmpfs instances the machine will deadlock 62 62 since the OOM handler will not be able to free that memory. 63 - nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE. 63 + nr_blocks: The same as size, but in blocks of PAGE_SIZE. 64 64 nr_inodes: The maximum number of inodes for this instance. The default 65 65 is half of the number of your physical RAM pages, or (on a 66 66 machine with highmem) the number of lowmem RAM pages,
+2 -2
Documentation/filesystems/vfs.txt
··· 708 708 from the address space. This generally corresponds to either a 709 709 truncation, punch hole or a complete invalidation of the address 710 710 space (in the latter case 'offset' will always be 0 and 'length' 711 - will be PAGE_CACHE_SIZE). Any private data associated with the page 711 + will be PAGE_SIZE). Any private data associated with the page 712 712 should be updated to reflect this truncation. If offset is 0 and 713 - length is PAGE_CACHE_SIZE, then the private data should be released, 713 + length is PAGE_SIZE, then the private data should be released, 714 714 because the page must be able to be completely discarded. This may 715 715 be done by calling the ->releasepage function, but in this case the 716 716 release MUST succeed.
+1 -1
arch/parisc/mm/init.c
··· 22 22 #include <linux/swap.h> 23 23 #include <linux/unistd.h> 24 24 #include <linux/nodemask.h> /* for node_online_map */ 25 - #include <linux/pagemap.h> /* for release_pages and page_cache_release */ 25 + #include <linux/pagemap.h> /* for release_pages */ 26 26 #include <linux/compat.h> 27 27 28 28 #include <asm/pgalloc.h>
+2 -2
block/bio.c
··· 1615 1615 * the BIO and the offending pages and re-dirty the pages in process context. 1616 1616 * 1617 1617 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1618 - * here on. It will run one page_cache_release() against each page and will 1619 - * run one bio_put() against the BIO. 1618 + * here on. It will run one put_page() against each page and will run one 1619 + * bio_put() against the BIO. 1620 1620 */ 1621 1621 1622 1622 static void bio_dirty_fn(struct work_struct *work);
+2 -2
drivers/block/drbd/drbd_int.h
··· 1327 1327 #endif 1328 1328 #endif 1329 1329 1330 - /* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, 1331 - * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. 1330 + /* BIO_MAX_SIZE is 256 * PAGE_SIZE, 1331 + * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte. 1332 1332 * Since we may live in a mixed-platform cluster, 1333 1333 * we limit us to a platform agnostic constant here for now. 1334 1334 * A followup commit may allow even bigger BIO sizes,
+1 -1
drivers/staging/lustre/include/linux/lnet/types.h
··· 514 514 /** 515 515 * Starting offset of the fragment within the page. Note that the 516 516 * end of the fragment must not pass the end of the page; i.e., 517 - * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. 517 + * kiov_len + kiov_offset <= PAGE_SIZE. 518 518 */ 519 519 unsigned int kiov_offset; 520 520 } lnet_kiov_t;
+2 -2
drivers/staging/lustre/lnet/selftest/selftest.h
··· 390 390 } tsi_u; 391 391 } sfw_test_instance_t; 392 392 393 - /* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at 394 - * the end of pages are not used */ 393 + /* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of 394 + * pages are not used */ 395 395 #define SFW_MAX_CONCUR LST_MAX_CONCUR 396 396 #define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t)) 397 397 #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
+1 -1
drivers/staging/lustre/lustre/include/lu_object.h
··· 1118 1118 { \ 1119 1119 type *value; \ 1120 1120 \ 1121 - CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ 1121 + CLASSERT(PAGE_SIZE >= sizeof (*value)); \ 1122 1122 \ 1123 1123 value = kzalloc(sizeof(*value), GFP_NOFS); \ 1124 1124 if (!value) \
+2 -2
drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
··· 1022 1022 * MDS_READPAGE page size 1023 1023 * 1024 1024 * This is the directory page size packed in MDS_READPAGE RPC. 1025 - * It's different than PAGE_CACHE_SIZE because the client needs to 1025 + * It's different than PAGE_SIZE because the client needs to 1026 1026 * access the struct lu_dirpage header packed at the beginning of 1027 1027 * the "page" and without this there isn't any way to know find the 1028 - * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. 1028 + * lu_dirpage header is if client and server PAGE_SIZE differ. 1029 1029 */ 1030 1030 #define LU_PAGE_SHIFT 12 1031 1031 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
+2 -2
drivers/staging/lustre/lustre/include/lustre_net.h
··· 112 112 # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) 113 113 # error "PTLRPC_MAX_BRW_PAGES isn't a power of two" 114 114 # endif 115 - # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) 116 - # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" 115 + # if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE)) 116 + # error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE" 117 117 # endif 118 118 # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) 119 119 # error "PTLRPC_MAX_BRW_SIZE too big"
+1 -1
drivers/staging/lustre/lustre/include/obd.h
··· 272 272 int cl_grant_shrink_interval; /* seconds */ 273 273 274 274 /* A chunk is an optimal size used by osc_extent to determine 275 - * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) 275 + * the extent size. A chunk is max(PAGE_SIZE, OST block size) 276 276 */ 277 277 int cl_chunkbits; 278 278 int cl_chunk;
+2 -3
drivers/staging/lustre/lustre/llite/dir.c
··· 134 134 * a header lu_dirpage which describes the start/end hash, and whether this 135 135 * page is empty (contains no dir entry) or hash collide with next page. 136 136 * After client receives reply, several pages will be integrated into dir page 137 - * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the 138 - * lu_dirpage for this integrated page will be adjusted. See 139 - * lmv_adjust_dirpages(). 137 + * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage 138 + * for this integrated page will be adjusted. See lmv_adjust_dirpages(). 140 139 * 141 140 */ 142 141
+1 -1
drivers/staging/lustre/lustre/llite/rw.c
··· 521 521 * striped over, rather than having a constant value for all files here. 522 522 */ 523 523 524 - /* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). 524 + /* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)). 525 525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled 526 526 * by default, this should be adjusted corresponding with max_read_ahead_mb 527 527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
+1 -1
drivers/staging/lustre/lustre/llite/vvp_io.c
··· 512 512 vio->cui_ra_window_set = 1; 513 513 bead->lrr_start = cl_index(obj, pos); 514 514 /* 515 - * XXX: explicit PAGE_CACHE_SIZE 515 + * XXX: explicit PAGE_SIZE 516 516 */ 517 517 bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1); 518 518 ll_ra_read_in(file, bead);
+4 -4
drivers/staging/lustre/lustre/lmv/lmv_obd.c
··· 2017 2017 * |s|e|f|p|ent| 0 | ... | 0 | 2018 2018 * '----------------- -----' 2019 2019 * 2020 - * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is 2020 + * However, on hosts where the native VM page size (PAGE_SIZE) is 2021 2021 * larger than LU_PAGE_SIZE, a single host page may contain multiple 2022 2022 * lu_dirpages. After reading the lu_dirpages from the MDS, the 2023 2023 * ldp_hash_end of the first lu_dirpage refers to the one immediately ··· 2048 2048 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span 2049 2049 * to the first entry of the next lu_dirpage. 2050 2050 */ 2051 - #if PAGE_CACHE_SIZE > LU_PAGE_SIZE 2051 + #if PAGE_SIZE > LU_PAGE_SIZE 2052 2052 static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) 2053 2053 { 2054 2054 int i; ··· 2101 2101 } 2102 2102 #else 2103 2103 #define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) 2104 - #endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ 2104 + #endif /* PAGE_SIZE > LU_PAGE_SIZE */ 2105 2105 2106 2106 static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, 2107 2107 struct page **pages, struct ptlrpc_request **request) ··· 2110 2110 struct lmv_obd *lmv = &obd->u.lmv; 2111 2111 __u64 offset = op_data->op_offset; 2112 2112 int rc; 2113 - int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ 2113 + int ncfspgs; /* pages read in PAGE_SIZE */ 2114 2114 int nlupgs; /* pages read in LU_PAGE_SIZE */ 2115 2115 struct lmv_tgt_desc *tgt; 2116 2116
-1
drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
··· 47 47 #include "../../include/lustre/lustre_idl.h" 48 48 49 49 #include <linux/fs.h> 50 - #include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */ 51 50 52 51 void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) 53 52 {
+1 -1
drivers/staging/lustre/lustre/osc/osc_cache.c
··· 1456 1456 * used, we should return these grants to OST. There're two cases where grants 1457 1457 * can be lost: 1458 1458 * 1. truncate; 1459 - * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was 1459 + * 2. blocksize at OST is less than PAGE_SIZE and a partial page was 1460 1460 * written. In this case OST may use less chunks to serve this partial 1461 1461 * write. OSTs don't actually know the page size on the client side. so 1462 1462 * clients have to calculate lost grant by the blocksize on the OST.
+2 -2
fs/btrfs/check-integrity.c
··· 3039 3039 3040 3040 if (root->nodesize & ((u64)PAGE_SIZE - 1)) { 3041 3041 printk(KERN_INFO 3042 - "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3042 + "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n", 3043 3043 root->nodesize, PAGE_SIZE); 3044 3044 return -1; 3045 3045 } 3046 3046 if (root->sectorsize & ((u64)PAGE_SIZE - 1)) { 3047 3047 printk(KERN_INFO 3048 - "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", 3048 + "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n", 3049 3049 root->sectorsize, PAGE_SIZE); 3050 3050 return -1; 3051 3051 }
+3 -5
fs/btrfs/extent_io.c
··· 3264 3264 goto done; 3265 3265 } 3266 3266 /* 3267 - * delalloc_end is already one less than the total 3268 - * length, so we don't subtract one from 3269 - * PAGE_CACHE_SIZE 3267 + * delalloc_end is already one less than the total length, so 3268 + * we don't subtract one from PAGE_SIZE 3270 3269 */ 3271 3270 delalloc_to_write += (delalloc_end - delalloc_start + 3272 - PAGE_SIZE) >> 3273 - PAGE_SHIFT; 3271 + PAGE_SIZE) >> PAGE_SHIFT; 3274 3272 delalloc_start = delalloc_end + 1; 3275 3273 } 3276 3274 if (wbc->nr_to_write < delalloc_to_write) {
+2 -2
fs/btrfs/struct-funcs.c
··· 66 66 \ 67 67 if (token && token->kaddr && token->offset <= offset && \ 68 68 token->eb == eb && \ 69 - (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 69 + (token->offset + PAGE_SIZE >= offset + size)) { \ 70 70 kaddr = token->kaddr; \ 71 71 p = kaddr + part_offset - token->offset; \ 72 72 res = get_unaligned_le##bits(p + off); \ ··· 104 104 \ 105 105 if (token && token->kaddr && token->offset <= offset && \ 106 106 token->eb == eb && \ 107 - (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ 107 + (token->offset + PAGE_SIZE >= offset + size)) { \ 108 108 kaddr = token->kaddr; \ 109 109 p = kaddr + part_offset - token->offset; \ 110 110 put_unaligned_le##bits(val, p + off); \
+1 -1
fs/btrfs/tests/extent-io-tests.c
··· 239 239 end = 0; 240 240 /* 241 241 * Currently if we fail to find dirty pages in the delalloc range we 242 - * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If 242 + * will adjust max_bytes down to PAGE_SIZE and then re-search. If 243 243 * this changes at any point in the future we will need to fix this 244 244 * tests expected behavior. 245 245 */
+2 -2
fs/cifs/cifsglob.h
··· 714 714 * 715 715 * Note that this might make for "interesting" allocation problems during 716 716 * writeback however as we have to allocate an array of pointers for the 717 - * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. 717 + * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096. 718 718 * 719 719 * For reads, there is a similar problem as we need to allocate an array 720 720 * of kvecs to handle the receive, though that should only need to be done ··· 733 733 734 734 /* 735 735 * The default wsize is 1M. find_get_pages seems to return a maximum of 256 736 - * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill 736 + * pages in a single call. With PAGE_SIZE == 4k, this means we can fill 737 737 * a single wsize request with a single call. 738 738 */ 739 739 #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
+1 -1
fs/cifs/file.c
··· 1902 1902 * find_get_pages_tag seems to return a max of 256 on each 1903 1903 * iteration, so we must call it several times in order to 1904 1904 * fill the array or the wsize is effectively limited to 1905 - * 256 * PAGE_CACHE_SIZE. 1905 + * 256 * PAGE_SIZE. 1906 1906 */ 1907 1907 *found_pages = 0; 1908 1908 pages = wdata->pages;
+13 -13
fs/cramfs/README
··· 86 86 87 87 (Block size in cramfs refers to the size of input data that is 88 88 compressed at a time. It's intended to be somewhere around 89 - PAGE_CACHE_SIZE for cramfs_readpage's convenience.) 89 + PAGE_SIZE for cramfs_readpage's convenience.) 90 90 91 91 The superblock ought to indicate the block size that the fs was 92 92 written for, since comments in <linux/pagemap.h> indicate that 93 - PAGE_CACHE_SIZE may grow in future (if I interpret the comment 93 + PAGE_SIZE may grow in future (if I interpret the comment 94 94 correctly). 95 95 96 - Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that 97 - for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in 96 + Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that 97 + for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in 98 98 turn is defined as PAGE_SIZE (which can be as large as 32KB on arm). 99 99 This discrepancy is a bug, though it's not clear which should be 100 100 changed. 101 101 102 - One option is to change mkcramfs to take its PAGE_CACHE_SIZE from 102 + One option is to change mkcramfs to take its PAGE_SIZE from 103 103 <asm/page.h>. Personally I don't like this option, but it does 104 104 require the least amount of change: just change `#define 105 - PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage 105 + PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage 106 106 is that the generated cramfs cannot always be shared between different 107 107 kernels, not even necessarily kernels of the same architecture if 108 - PAGE_CACHE_SIZE is subject to change between kernel versions 108 + PAGE_SIZE is subject to change between kernel versions 109 109 (currently possible with arm and ia64). 110 110 111 111 The remaining options try to make cramfs more sharable. ··· 126 126 1. Always 4096 bytes. 127 127 128 128 2. Writer chooses blocksize; kernel adapts but rejects blocksize > 129 - PAGE_CACHE_SIZE. 129 + PAGE_SIZE. 130 130 131 131 3. Writer chooses blocksize; kernel adapts even to blocksize > 132 - PAGE_CACHE_SIZE. 132 + PAGE_SIZE. 133 133 134 134 It's easy enough to change the kernel to use a smaller value than 135 - PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks. 135 + PAGE_SIZE: just make cramfs_readpage read multiple blocks. 136 136 137 - The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE 137 + The cost of option 1 is that kernels with a larger PAGE_SIZE 138 138 value don't get as good compression as they can. 139 139 140 140 The cost of option 2 relative to option 1 is that the code uses 141 141 variables instead of #define'd constants. The gain is that people 142 - with kernels having larger PAGE_CACHE_SIZE can make use of that if 142 + with kernels having larger PAGE_SIZE can make use of that if 143 143 they don't mind their cramfs being inaccessible to kernels with 144 - smaller PAGE_CACHE_SIZE values. 144 + smaller PAGE_SIZE values. 145 145 146 146 Option 3 is easy to implement if we don't mind being CPU-inefficient: 147 147 e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which
+1 -1
fs/cramfs/inode.c
··· 137 137 * page cache and dentry tree anyway.. 138 138 * 139 139 * This also acts as a way to guarantee contiguous areas of up to 140 - * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to 140 + * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to 141 141 * worry about end-of-buffer issues even when decompressing a full 142 142 * page cache. 143 143 */
+2 -2
fs/dax.c
··· 1094 1094 * you are truncating a file, the helper function dax_truncate_page() may be 1095 1095 * more convenient. 1096 1096 * 1097 - * We work in terms of PAGE_CACHE_SIZE here for commonality with 1097 + * We work in terms of PAGE_SIZE here for commonality with 1098 1098 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1099 1099 * took care of disposing of the unnecessary blocks. Even if the filesystem 1100 1100 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page ··· 1146 1146 * Similar to block_truncate_page(), this function can be called by a 1147 1147 * filesystem when it is truncating a DAX file to handle the partial page. 1148 1148 * 1149 - * We work in terms of PAGE_CACHE_SIZE here for commonality with 1149 + * We work in terms of PAGE_SIZE here for commonality with 1150 1150 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem 1151 1151 * took care of disposing of the unnecessary blocks. Even if the filesystem 1152 1152 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
+2 -2
fs/ecryptfs/inode.c
··· 763 763 } else { /* ia->ia_size < i_size_read(inode) */ 764 764 /* We're chopping off all the pages down to the page 765 765 * in which ia->ia_size is located. Fill in the end of 766 - * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to 767 - * PAGE_CACHE_SIZE with zeros. */ 766 + * that page from (ia->ia_size & ~PAGE_MASK) to 767 + * PAGE_SIZE with zeros. */ 768 768 size_t num_zeros = (PAGE_SIZE 769 769 - (ia->ia_size & ~PAGE_MASK)); 770 770
+2 -2
fs/ext2/dir.c
··· 37 37 { 38 38 unsigned len = le16_to_cpu(dlen); 39 39 40 - #if (PAGE_CACHE_SIZE >= 65536) 40 + #if (PAGE_SIZE >= 65536) 41 41 if (len == EXT2_MAX_REC_LEN) 42 42 return 1 << 16; 43 43 #endif ··· 46 46 47 47 static inline __le16 ext2_rec_len_to_disk(unsigned len) 48 48 { 49 - #if (PAGE_CACHE_SIZE >= 65536) 49 + #if (PAGE_SIZE >= 65536) 50 50 if (len == (1 << 16)) 51 51 return cpu_to_le16(EXT2_MAX_REC_LEN); 52 52 else
+2 -2
fs/ext4/ext4.h
··· 1961 1961 { 1962 1962 unsigned len = le16_to_cpu(dlen); 1963 1963 1964 - #if (PAGE_CACHE_SIZE >= 65536) 1964 + #if (PAGE_SIZE >= 65536) 1965 1965 if (len == EXT4_MAX_REC_LEN || len == 0) 1966 1966 return blocksize; 1967 1967 return (len & 65532) | ((len & 3) << 16); ··· 1974 1974 { 1975 1975 if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) 1976 1976 BUG(); 1977 - #if (PAGE_CACHE_SIZE >= 65536) 1977 + #if (PAGE_SIZE >= 65536) 1978 1978 if (len < 65536) 1979 1979 return cpu_to_le16(len); 1980 1980 if (len == blocksize) {
+1 -1
fs/ext4/inode.c
··· 4894 4894 offset = inode->i_size & (PAGE_SIZE - 1); 4895 4895 /* 4896 4896 * All buffers in the last page remain valid? Then there's nothing to 4897 - * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == 4897 + * do. We do the check mainly to optimize the common PAGE_SIZE == 4898 4898 * blocksize case 4899 4899 */ 4900 4900 if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
+2 -2
fs/ext4/mballoc.c
··· 119 119 * 120 120 * 121 121 * one block each for bitmap and buddy information. So for each group we 122 - * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / 122 + * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 123 123 * blocksize) blocks. So it can have information regarding groups_per_page 124 124 * which is blocks_per_page/2 125 125 * ··· 807 807 * 808 808 * one block each for bitmap and buddy information. 809 809 * So for each group we take up 2 blocks. A page can 810 - * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. 810 + * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 811 811 * So it can have information regarding groups_per_page which 812 812 * is blocks_per_page/2 813 813 *
+1 -1
fs/ext4/readpage.c
··· 23 23 * 24 24 * then this code just gives up and calls the buffer_head-based read function. 25 25 * It does handle a page which has holes at the end - that is a common case: 26 - * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 26 + * the end-of-file on blocksize < PAGE_SIZE setups. 27 27 * 28 28 */ 29 29
+1 -1
fs/hugetlbfs/inode.c
··· 237 237 /* 238 238 * Support for read() - Find the page attached to f_mapping and copy out the 239 239 * data. Its *very* similar to do_generic_mapping_read(), we can't use that 240 - * since it has PAGE_CACHE_SIZE assumptions. 240 + * since it has PAGE_SIZE assumptions. 241 241 */ 242 242 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) 243 243 {
+1 -1
fs/mpage.c
··· 331 331 * 332 332 * then this code just gives up and calls the buffer_head-based read function. 333 333 * It does handle a page which has holes at the end - that is a common case: 334 - * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. 334 + * the end-of-file on blocksize < PAGE_SIZE setups. 335 335 * 336 336 * BH_Boundary explanation: 337 337 *
+1 -1
fs/ntfs/aops.c
··· 674 674 // in the inode. 675 675 // Again, for each page do: 676 676 // __set_page_dirty_buffers(); 677 - // page_cache_release() 677 + // put_page() 678 678 // We don't need to wait on the writes. 679 679 // Update iblock. 680 680 }
+1 -1
fs/ntfs/aops.h
··· 49 49 * @index: index into the page cache for @mapping of the page to map 50 50 * 51 51 * Read a page from the page cache of the address space @mapping at position 52 - * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes. 52 + * @index, where @index is in units of PAGE_SIZE, and not in bytes. 53 53 * 54 54 * If the page is not in memory it is loaded from disk first using the readpage 55 55 * method defined in the address space operations of @mapping and the page is
+6 -15
fs/ntfs/compress.c
··· 105 105 106 106 ntfs_debug("Zeroing page region outside initialized size."); 107 107 if (((s64)page->index << PAGE_SHIFT) >= initialized_size) { 108 - /* 109 - * FIXME: Using clear_page() will become wrong when we get 110 - * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. 111 - */ 112 108 clear_page(kp); 113 109 return; 114 110 } ··· 156 160 * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was 157 161 * completed during the decompression of the compression block (@cb_start). 158 162 * 159 - * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up 163 + * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up 160 164 * unpredicatbly! You have been warned! 161 165 * 162 166 * Note to hackers: This function may not sleep until it has finished accessing ··· 458 462 * have been written to so that we would lose data if we were to just overwrite 459 463 * them with the out-of-date uncompressed data. 460 464 * 461 - * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at 465 + * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at 462 466 * the end of the file I think. We need to detect this case and zero the out 463 467 * of bounds remainder of the page in question and mark it as handled. At the 464 468 * moment we would just return -EIO on such a page. This bug will only become ··· 466 470 * clusters so is probably not going to be seen by anyone. Still this should 467 471 * be fixed. (AIA) 468 472 * 469 - * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in 473 + * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in 470 474 * handling sparse and compressed cbs. (AIA) 471 475 * 472 476 * FIXME: At the moment we don't do any zeroing out in the case that ··· 493 497 u64 cb_size_mask = cb_size - 1UL; 494 498 VCN vcn; 495 499 LCN lcn; 496 - /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ 500 + /* The first wanted vcn (minimum alignment is PAGE_SIZE). */ 497 501 VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >> 498 502 vol->cluster_size_bits; 499 503 /* 500 504 * The first vcn after the last wanted vcn (minimum alignment is again 501 - * PAGE_CACHE_SIZE. 505 + * PAGE_SIZE. 502 506 */ 503 507 VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1) 504 508 & ~cb_size_mask) >> vol->cluster_size_bits; ··· 749 753 for (; cur_page < cb_max_page; cur_page++) { 750 754 page = pages[cur_page]; 751 755 if (page) { 752 - /* 753 - * FIXME: Using clear_page() will become wrong 754 - * when we get PAGE_CACHE_SIZE != PAGE_SIZE but 755 - * for now there is no problem. 756 - */ 757 756 if (likely(!cur_ofs)) 758 757 clear_page(page_address(page)); 759 758 else ··· 798 807 * synchronous io for the majority of pages. 799 808 * Or if we choose not to do the read-ahead/-behind stuff, we 800 809 * could just return block_read_full_page(pages[xpage]) as long 801 - * as PAGE_CACHE_SIZE <= cb_size. 810 + * as PAGE_SIZE <= cb_size. 802 811 */ 803 812 if (cb_max_ofs) 804 813 cb_max_page--;
+8 -8
fs/ntfs/dir.c
··· 315 315 descend_into_child_node: 316 316 /* 317 317 * Convert vcn to index into the index allocation attribute in units 318 - * of PAGE_CACHE_SIZE and map the page cache page, reading it from 318 + * of PAGE_SIZE and map the page cache page, reading it from 319 319 * disk if necessary. 320 320 */ 321 321 page = ntfs_map_page(ia_mapping, vcn << ··· 793 793 descend_into_child_node: 794 794 /* 795 795 * Convert vcn to index into the index allocation attribute in units 796 - * of PAGE_CACHE_SIZE and map the page cache page, reading it from 796 + * of PAGE_SIZE and map the page cache page, reading it from 797 797 * disk if necessary. 798 798 */ 799 799 page = ntfs_map_page(ia_mapping, vcn << 800 - dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); 800 + dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT); 801 801 if (IS_ERR(page)) { 802 802 ntfs_error(sb, "Failed to map directory index page, error %ld.", 803 803 -PTR_ERR(page)); ··· 809 809 fast_descend_into_child_node: 810 810 /* Get to the index allocation block. */ 811 811 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << 812 - dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); 812 + dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK)); 813 813 /* Bounds checks. */ 814 - if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { 814 + if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) { 815 815 ntfs_error(sb, "Out of bounds check failed. Corrupt directory " 816 816 "inode 0x%lx or driver bug.", dir_ni->mft_no); 817 817 goto unm_err_out; ··· 844 844 goto unm_err_out; 845 845 } 846 846 index_end = (u8*)ia + dir_ni->itype.index.block_size; 847 - if (index_end > kaddr + PAGE_CACHE_SIZE) { 847 + if (index_end > kaddr + PAGE_SIZE) { 848 848 ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " 849 849 "0x%lx crosses page boundary. Impossible! " 850 850 "Cannot access! This is probably a bug in the " ··· 968 968 /* If vcn is in the same page cache page as old_vcn we 969 969 * recycle the mapped page. */ 970 970 if (old_vcn << vol->cluster_size_bits >> 971 - PAGE_CACHE_SHIFT == vcn << 971 + PAGE_SHIFT == vcn << 972 972 vol->cluster_size_bits >> 973 - PAGE_CACHE_SHIFT) 973 + PAGE_SHIFT) 974 974 goto fast_descend_into_child_node; 975 975 unlock_page(page); 976 976 ntfs_unmap_page(page);
+1 -1
fs/ntfs/file.c
··· 573 573 * only partially being written to. 574 574 * 575 575 * If @nr_pages is greater than one, we are guaranteed that the cluster size is 576 - * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside 576 + * greater than PAGE_SIZE, that all pages in @pages are entirely inside 577 577 * the same cluster and that they are the entirety of that cluster, and that 578 578 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. 579 579 *
+1 -1
fs/ntfs/index.c
··· 272 272 descend_into_child_node: 273 273 /* 274 274 * Convert vcn to index into the index allocation attribute in units 275 - * of PAGE_CACHE_SIZE and map the page cache page, reading it from 275 + * of PAGE_SIZE and map the page cache page, reading it from 276 276 * disk if necessary. 277 277 */ 278 278 page = ntfs_map_page(ia_mapping, vcn <<
+2 -2
fs/ntfs/inode.c
··· 870 870 } 871 871 if (ni->itype.index.block_size > PAGE_SIZE) { 872 872 ntfs_error(vi->i_sb, "Index block size (%u) > " 873 - "PAGE_CACHE_SIZE (%ld) is not " 873 + "PAGE_SIZE (%ld) is not " 874 874 "supported. Sorry.", 875 875 ni->itype.index.block_size, 876 876 PAGE_SIZE); ··· 1586 1586 goto unm_err_out; 1587 1587 } 1588 1588 if (ni->itype.index.block_size > PAGE_SIZE) { 1589 - ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE " 1589 + ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE " 1590 1590 "(%ld) is not supported. Sorry.", 1591 1591 ni->itype.index.block_size, PAGE_SIZE); 1592 1592 err = -EOPNOTSUPP;
+7 -7
fs/ntfs/super.c
··· 823 823 ntfs_debug("vol->mft_record_size_bits = %i (0x%x)", 824 824 vol->mft_record_size_bits, vol->mft_record_size_bits); 825 825 /* 826 - * We cannot support mft record sizes above the PAGE_CACHE_SIZE since 826 + * We cannot support mft record sizes above the PAGE_SIZE since 827 827 * we store $MFT/$DATA, the table of mft records in the page cache. 828 828 */ 829 829 if (vol->mft_record_size > PAGE_SIZE) { 830 830 ntfs_error(vol->sb, "Mft record size (%i) exceeds the " 831 - "PAGE_CACHE_SIZE on your system (%lu). " 831 + "PAGE_SIZE on your system (%lu). " 832 832 "This is not supported. Sorry.", 833 833 vol->mft_record_size, PAGE_SIZE); 834 834 return false; ··· 2471 2471 down_read(&vol->lcnbmp_lock); 2472 2472 /* 2473 2473 * Convert the number of bits into bytes rounded up, then convert into 2474 - * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one 2474 + * multiples of PAGE_SIZE, rounding up so that if we have one 2475 2475 * full and one partial page max_index = 2. 2476 2476 */ 2477 2477 max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >> 2478 2478 PAGE_SHIFT; 2479 - /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2479 + /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */ 2480 2480 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2481 2481 max_index, PAGE_SIZE / 4); 2482 2482 for (index = 0; index < max_index; index++) { ··· 2547 2547 pgoff_t index; 2548 2548 2549 2549 ntfs_debug("Entering."); 2550 - /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ 2550 + /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */ 2551 2551 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2552 2552 "0x%lx.", max_index, PAGE_SIZE / 4); 2553 2553 for (index = 0; index < max_index; index++) { ··· 2639 2639 size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits; 2640 2640 /* 2641 2641 * Convert the maximum number of set bits into bytes rounded up, then 2642 - * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we 2642 + * convert into multiples of PAGE_SIZE, rounding up so that if we 2643 2643 * have one full and one partial page max_index = 2. 2644 2644 */ 2645 2645 max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits) ··· 2765 2765 if (!parse_options(vol, (char*)opt)) 2766 2766 goto err_out_now; 2767 2767 2768 - /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2768 + /* We support sector sizes up to the PAGE_SIZE. */ 2769 2769 if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) { 2770 2770 if (!silent) 2771 2771 ntfs_error(sb, "Device has unsupported sector size "
+1 -1
fs/ocfs2/aops.c
··· 684 684 return ret; 685 685 } 686 686 687 - #if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 687 + #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) 688 688 #define OCFS2_MAX_CTXT_PAGES 1 689 689 #else 690 690 #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE)
+1 -1
fs/ocfs2/refcounttree.c
··· 2956 2956 } 2957 2957 2958 2958 /* 2959 - * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page 2959 + * In case PAGE_SIZE <= CLUSTER_SIZE, This page 2960 2960 * can't be dirtied before we CoW it out. 2961 2961 */ 2962 2962 if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+1 -1
fs/reiserfs/journal.c
··· 599 599 * This does a check to see if the buffer belongs to one of these 600 600 * lost pages before doing the final put_bh. If page->mapping was 601 601 * null, it tries to free buffers on the page, which should make the 602 - * final page_cache_release drop the page from the lru. 602 + * final put_page drop the page from the lru. 603 603 */ 604 604 static void release_buffer_page(struct buffer_head *bh) 605 605 {
+2 -2
fs/squashfs/cache.c
··· 30 30 * access the metadata and fragment caches. 31 31 * 32 32 * To avoid out of memory and fragmentation issues with vmalloc the cache 33 - * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. 33 + * uses sequences of kmalloced PAGE_SIZE buffers. 34 34 * 35 35 * It should be noted that the cache is not used for file datablocks, these 36 36 * are decompressed and cached in the page-cache in the normal way. The ··· 231 231 /* 232 232 * Initialise cache allocating the specified number of entries, each of 233 233 * size block_size. To avoid vmalloc fragmentation issues each entry 234 - * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. 234 + * is allocated as a sequence of kmalloced PAGE_SIZE buffers. 235 235 */ 236 236 struct squashfs_cache *squashfs_cache_init(char *name, int entries, 237 237 int block_size)
+1 -1
fs/squashfs/file.c
··· 382 382 383 383 /* 384 384 * Loop copying datablock into pages. As the datablock likely covers 385 - * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly 385 + * many PAGE_SIZE pages (default block size is 128 KiB) explicitly 386 386 * grab the pages from the page cache, except for the page that we've 387 387 * been called to fill. 388 388 */
+1 -1
fs/ubifs/file.c
··· 554 554 * VFS copied less data to the page that it intended and 555 555 * declared in its '->write_begin()' call via the @len 556 556 * argument. If the page was not up-to-date, and @len was 557 - * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did 557 + * @PAGE_SIZE, the 'ubifs_write_begin()' function did 558 558 * not load it from the media (for optimization reasons). This 559 559 * means that part of the page contains garbage. So read the 560 560 * page now.
+1 -1
fs/ubifs/super.c
··· 2237 2237 BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); 2238 2238 2239 2239 /* 2240 - * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to 2240 + * We require that PAGE_SIZE is greater-than-or-equal-to 2241 2241 * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. 2242 2242 */ 2243 2243 if (PAGE_SIZE < UBIFS_BLOCK_SIZE) {
+2 -2
fs/xfs/xfs_aops.c
··· 1563 1563 int status; 1564 1564 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount; 1565 1565 1566 - ASSERT(len <= PAGE_CACHE_SIZE); 1566 + ASSERT(len <= PAGE_SIZE); 1567 1567 1568 1568 page = grab_cache_page_write_begin(mapping, index, flags); 1569 1569 if (!page) ··· 1620 1620 { 1621 1621 int ret; 1622 1622 1623 - ASSERT(len <= PAGE_CACHE_SIZE); 1623 + ASSERT(len <= PAGE_SIZE); 1624 1624 1625 1625 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 1626 1626 if (unlikely(ret < len)) {
+2 -2
fs/xfs/xfs_super.c
··· 556 556 /* Figure out maximum filesize, on Linux this can depend on 557 557 * the filesystem blocksize (on 32 bit platforms). 558 558 * __block_write_begin does this in an [unsigned] long... 559 - * page->index << (PAGE_CACHE_SHIFT - bbits) 559 + * page->index << (PAGE_SHIFT - bbits) 560 560 * So, for page sized blocks (4K on 32 bit platforms), 561 561 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is 562 - * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 562 + * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1) 563 563 * but for smaller blocksizes it is less (bbits = log2 bsize). 564 564 * Note1: get_block_t takes a long (implicit cast from above) 565 565 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
+1 -1
include/linux/backing-dev-defs.h
··· 135 135 136 136 struct backing_dev_info { 137 137 struct list_head bdi_list; 138 - unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 138 + unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ 139 139 unsigned int capabilities; /* Device capabilities */ 140 140 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 141 141 void *congested_data; /* Pointer to aux data for congested func */
+1 -1
include/linux/mm.h
··· 623 623 * 624 624 * A page may belong to an inode's memory mapping. In this case, page->mapping 625 625 * is the pointer to the inode, and page->index is the file offset of the page, 626 - * in units of PAGE_CACHE_SIZE. 626 + * in units of PAGE_SIZE. 627 627 * 628 628 * If pagecache pages are not associated with an inode, they are said to be 629 629 * anonymous pages. These may become associated with the swapcache, and in that
+1 -1
include/linux/mm_types.h
··· 341 341 342 342 /* Information about our backing store: */ 343 343 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 344 - units, *not* PAGE_CACHE_SIZE */ 344 + units */ 345 345 struct file * vm_file; /* File we map to (can be NULL). */ 346 346 void * vm_private_data; /* was vm_pte (shared mem) */ 347 347
+2 -2
include/linux/nfs_page.h
··· 41 41 struct page *wb_page; /* page to read in/write out */ 42 42 struct nfs_open_context *wb_context; /* File state context info */ 43 43 struct nfs_lock_context *wb_lock_context; /* lock context info */ 44 - pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ 45 - unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 44 + pgoff_t wb_index; /* Offset >> PAGE_SHIFT */ 45 + unsigned int wb_offset, /* Offset & ~PAGE_MASK */ 46 46 wb_pgbase, /* Start of page data */ 47 47 wb_bytes; /* Length of request */ 48 48 struct kref wb_kref; /* reference count */
+2 -2
include/linux/nilfs2_fs.h
··· 331 331 { 332 332 unsigned len = le16_to_cpu(dlen); 333 333 334 - #if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 334 + #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) 335 335 if (len == NILFS_MAX_REC_LEN) 336 336 return 1 << 16; 337 337 #endif ··· 340 340 341 341 static inline __le16 nilfs_rec_len_to_disk(unsigned len) 342 342 { 343 - #if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) 343 + #if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) 344 344 if (len == (1 << 16)) 345 345 return cpu_to_le16(NILFS_MAX_REC_LEN); 346 346 else if (len > (1 << 16))
+1 -2
include/linux/pagemap.h
··· 535 535 /* 536 536 * Fault a userspace page into pagetables. Return non-zero on a fault. 537 537 * 538 - * This assumes that two userspace pages are always sufficient. That's 539 - * not true if PAGE_CACHE_SIZE > PAGE_SIZE. 538 + * This assumes that two userspace pages are always sufficient. 540 539 */ 541 540 static inline int fault_in_pages_writeable(char __user *uaddr, int size) 542 541 {
+1 -1
include/linux/sunrpc/svc.h
··· 129 129 * 130 130 * These happen to all be powers of 2, which is not strictly 131 131 * necessary but helps enforce the real limitation, which is 132 - * that they should be multiples of PAGE_CACHE_SIZE. 132 + * that they should be multiples of PAGE_SIZE. 133 133 * 134 134 * For UDP transports, a block plus NFS,RPC, and UDP headers 135 135 * has to fit into the IP datagram limit of 64K. The largest
+1 -1
include/linux/swap.h
··· 433 433 #define si_swapinfo(val) \ 434 434 do { (val)->freeswap = (val)->totalswap = 0; } while (0) 435 435 /* only sparc can not include linux/pagemap.h in this file 436 - * so leave page_cache_release and release_pages undeclared... */ 436 + * so leave put_page and release_pages undeclared... */ 437 437 #define free_page_and_swap_cache(page) \ 438 438 put_page(page) 439 439 #define free_pages_and_swap_cache(pages, nr) \
+1 -1
mm/gup.c
··· 1107 1107 * @addr: user address 1108 1108 * 1109 1109 * Returns struct page pointer of user page pinned for dump, 1110 - * to be freed afterwards by page_cache_release() or put_page(). 1110 + * to be freed afterwards by put_page(). 1111 1111 * 1112 1112 * Returns NULL on any kind of failure - a hole must then be inserted into 1113 1113 * the corefile, to preserve alignment with its headers; and also returns
-1
mm/memory.c
··· 2400 2400 2401 2401 vba = vma->vm_pgoff; 2402 2402 vea = vba + vma_pages(vma) - 1; 2403 - /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ 2404 2403 zba = details->first_index; 2405 2404 if (zba < vba) 2406 2405 zba = vba;
+2 -2
mm/mincore.c
··· 211 211 * return values: 212 212 * zero - success 213 213 * -EFAULT - vec points to an illegal address 214 - * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE 214 + * -EINVAL - addr is not a multiple of PAGE_SIZE 215 215 * -ENOMEM - Addresses in the range [addr, addr + len] are 216 216 * invalid for the address space of this process, or 217 217 * specify one or more pages which are not currently ··· 233 233 if (!access_ok(VERIFY_READ, (void __user *) start, len)) 234 234 return -ENOMEM; 235 235 236 - /* This also avoids any overflows on PAGE_CACHE_ALIGN */ 236 + /* This also avoids any overflows on PAGE_ALIGN */ 237 237 pages = len >> PAGE_SHIFT; 238 238 pages += (offset_in_page(len)) != 0; 239 239
+1 -1
mm/swap.c
··· 698 698 } 699 699 700 700 /** 701 - * release_pages - batched page_cache_release() 701 + * release_pages - batched put_page() 702 702 * @pages: array of pages to release 703 703 * @nr: number of pages 704 704 * @cold: whether the pages are cache cold
+1 -1
net/sunrpc/xdr.c
··· 164 164 * Note: the addresses pgto_base and pgfrom_base are both calculated in 165 165 * the same way: 166 166 * if a memory area starts at byte 'base' in page 'pages[i]', 167 - * then its address is given as (i << PAGE_CACHE_SHIFT) + base 167 + * then its address is given as (i << PAGE_SHIFT) + base 168 168 * Also note: pgfrom_base must be < pgto_base, but the memory areas 169 169 * they point to may overlap. 170 170 */