Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Btrfs: use linux/sizes.h to represent constants

We use many constants to represent size and offset value. And to make
code readable we use '256 * 1024 * 1024' instead of '268435456' to
represent '256MB'. However we can make far more readable with 'SZ_256MB'
which is defined in the 'linux/sizes.h'.

So this patch replaces 'xxx * 1024 * 1024' kind of expression with
single 'SZ_xxxMB' if 'xxx' is a power of 2 then 'xxx * SZ_1M' if 'xxx' is
not a power of 2. And I haven't touched to '4096' & '8192' because it's
more intuitive than 'SZ_4KB' & 'SZ_8KB'.

Signed-off-by: Byongho Lee <bhlee.kernel@gmail.com>
Signed-off-by: David Sterba <dsterba@suse.com>

authored by

Byongho Lee and committed by
David Sterba
ee22184b 7928d672

+147 -177
+1 -1
fs/btrfs/ctree.c
··· 1555 1555 return 0; 1556 1556 } 1557 1557 1558 - search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); 1558 + search_start = buf->start & ~((u64)SZ_1G - 1); 1559 1559 1560 1560 if (parent) 1561 1561 btrfs_set_lock_blocking(parent);
+3 -2
fs/btrfs/ctree.h
··· 35 35 #include <linux/btrfs.h> 36 36 #include <linux/workqueue.h> 37 37 #include <linux/security.h> 38 + #include <linux/sizes.h> 38 39 #include "extent_io.h" 39 40 #include "extent_map.h" 40 41 #include "async-thread.h" ··· 197 196 /* ioprio of readahead is set to idle */ 198 197 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) 199 198 200 - #define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) 199 + #define BTRFS_DIRTY_METADATA_THRESH SZ_32M 201 200 202 - #define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024) 201 + #define BTRFS_MAX_EXTENT_SIZE SZ_128M 203 202 204 203 /* 205 204 * The key defines the order in the tree, and so it also defines (optimal)
+1 -1
fs/btrfs/disk-io.c
··· 2809 2809 2810 2810 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 2811 2811 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 2812 - 4 * 1024 * 1024 / PAGE_CACHE_SIZE); 2812 + SZ_4M / PAGE_CACHE_SIZE); 2813 2813 2814 2814 tree_root->nodesize = nodesize; 2815 2815 tree_root->sectorsize = sectorsize;
+2 -2
fs/btrfs/disk-io.h
··· 19 19 #ifndef __DISKIO__ 20 20 #define __DISKIO__ 21 21 22 - #define BTRFS_SUPER_INFO_OFFSET (64 * 1024) 22 + #define BTRFS_SUPER_INFO_OFFSET SZ_64K 23 23 #define BTRFS_SUPER_INFO_SIZE 4096 24 24 25 25 #define BTRFS_SUPER_MIRROR_MAX 3 ··· 35 35 36 36 static inline u64 btrfs_sb_offset(int mirror) 37 37 { 38 - u64 start = 16 * 1024; 38 + u64 start = SZ_16K; 39 39 if (mirror) 40 40 return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror); 41 41 return BTRFS_SUPER_INFO_OFFSET;
+13 -16
fs/btrfs/extent-tree.c
··· 521 521 else 522 522 last = key.objectid + key.offset; 523 523 524 - if (total_found > (1024 * 1024 * 2)) { 524 + if (total_found > SZ_2M) { 525 525 total_found = 0; 526 526 if (wakeup) 527 527 wake_up(&caching_ctl->wait); ··· 3328 3328 * If this block group is smaller than 100 megs don't bother caching the 3329 3329 * block group. 3330 3330 */ 3331 - if (block_group->key.offset < (100 * 1024 * 1024)) { 3331 + if (block_group->key.offset < (100 * SZ_1M)) { 3332 3332 spin_lock(&block_group->lock); 3333 3333 block_group->disk_cache_state = BTRFS_DC_WRITTEN; 3334 3334 spin_unlock(&block_group->lock); ··· 3428 3428 * taking up quite a bit since it's not folded into the other space 3429 3429 * cache. 3430 3430 */ 3431 - num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024); 3431 + num_pages = div_u64(block_group->key.offset, SZ_256M); 3432 3432 if (!num_pages) 3433 3433 num_pages = 1; 3434 3434 ··· 4239 4239 */ 4240 4240 if (force == CHUNK_ALLOC_LIMITED) { 4241 4241 thresh = btrfs_super_total_bytes(root->fs_info->super_copy); 4242 - thresh = max_t(u64, 64 * 1024 * 1024, 4243 - div_factor_fine(thresh, 1)); 4242 + thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); 4244 4243 4245 4244 if (num_bytes - num_allocated < thresh) 4246 4245 return 1; 4247 4246 } 4248 4247 4249 - if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8)) 4248 + if (num_allocated + SZ_2M < div_factor(num_bytes, 8)) 4250 4249 return 0; 4251 4250 return 1; 4252 4251 } ··· 4445 4446 * transaction. 4446 4447 */ 4447 4448 if (trans->can_flush_pending_bgs && 4448 - trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { 4449 + trans->chunk_bytes_reserved >= (u64)SZ_2M) { 4449 4450 btrfs_create_pending_block_groups(trans, trans->root); 4450 4451 btrfs_trans_release_chunk_metadata(trans); 4451 4452 } ··· 4543 4544 return nr; 4544 4545 } 4545 4546 4546 - #define EXTENT_SIZE_PER_ITEM (256 * 1024) 4547 + #define EXTENT_SIZE_PER_ITEM SZ_256K 4547 4548 4548 4549 /* 4549 4550 * shrink metadata reservation for delalloc ··· 4748 4749 u64 expected; 4749 4750 u64 to_reclaim; 4750 4751 4751 - to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024, 4752 - 16 * 1024 * 1024); 4752 + to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M); 4753 4753 spin_lock(&space_info->lock); 4754 4754 if (can_overcommit(root, space_info, to_reclaim, 4755 4755 BTRFS_RESERVE_FLUSH_ALL)) { ··· 4759 4761 used = space_info->bytes_used + space_info->bytes_reserved + 4760 4762 space_info->bytes_pinned + space_info->bytes_readonly + 4761 4763 space_info->bytes_may_use; 4762 - if (can_overcommit(root, space_info, 1024 * 1024, 4763 - BTRFS_RESERVE_FLUSH_ALL)) 4764 + if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL)) 4764 4765 expected = div_factor_fine(space_info->total_bytes, 95); 4765 4766 else 4766 4767 expected = div_factor_fine(space_info->total_bytes, 90); ··· 5315 5318 spin_lock(&sinfo->lock); 5316 5319 spin_lock(&block_rsv->lock); 5317 5320 5318 - block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024); 5321 + block_rsv->size = min_t(u64, num_bytes, SZ_512M); 5319 5322 5320 5323 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + 5321 5324 sinfo->bytes_reserved + sinfo->bytes_readonly + ··· 6219 6222 return ret; 6220 6223 6221 6224 if (ssd) 6222 - *empty_cluster = 2 * 1024 * 1024; 6225 + *empty_cluster = SZ_2M; 6223 6226 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { 6224 6227 ret = &root->fs_info->meta_alloc_cluster; 6225 6228 if (!ssd) 6226 - *empty_cluster = 64 * 1024; 6229 + *empty_cluster = SZ_64K; 6227 6230 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) { 6228 6231 ret = &root->fs_info->data_alloc_cluster; 6229 6232 } ··· 9121 9124 if ((sinfo->flags & 9122 9125 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) && 9123 9126 !force) 9124 - min_allocable_bytes = 1 * 1024 * 1024; 9127 + min_allocable_bytes = SZ_1M; 9125 9128 else 9126 9129 min_allocable_bytes = 0; 9127 9130
+1 -1
fs/btrfs/extent_io.c
··· 4387 4387 u64 end = start + PAGE_CACHE_SIZE - 1; 4388 4388 4389 4389 if (gfpflags_allow_blocking(mask) && 4390 - page->mapping->host->i_size > 16 * 1024 * 1024) { 4390 + page->mapping->host->i_size > SZ_16M) { 4391 4391 u64 len; 4392 4392 while (start <= end) { 4393 4393 len = end - start + 1;
+4 -6
fs/btrfs/free-space-cache.c
··· 30 30 #include "volumes.h" 31 31 32 32 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 33 - #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) 33 + #define MAX_CACHE_BYTES_PER_GIG SZ_32K 34 34 35 35 struct btrfs_trim_range { 36 36 u64 start; ··· 1656 1656 * at or below 32k, so we need to adjust how much memory we allow to be 1657 1657 * used by extent based free space tracking 1658 1658 */ 1659 - if (size < 1024 * 1024 * 1024) 1659 + if (size < SZ_1G) 1660 1660 max_bytes = MAX_CACHE_BYTES_PER_GIG; 1661 1661 else 1662 - max_bytes = MAX_CACHE_BYTES_PER_GIG * 1663 - div_u64(size, 1024 * 1024 * 1024); 1662 + max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G); 1664 1663 1665 1664 /* 1666 1665 * we want to account for 1 more bitmap than what we have so we can make ··· 2488 2489 * track of free space, and if we pass 1/2 of that we want to 2489 2490 * start converting things over to using bitmaps 2490 2491 */ 2491 - ctl->extents_thresh = ((1024 * 32) / 2) / 2492 - sizeof(struct btrfs_free_space); 2492 + ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space); 2493 2493 } 2494 2494 2495 2495 /*
+1 -1
fs/btrfs/inode-map.c
··· 282 282 } 283 283 } 284 284 285 - #define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) 285 + #define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space)) 286 286 #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) 287 287 288 288 /*
+11 -11
fs/btrfs/inode.c
··· 414 414 unsigned long nr_pages_ret = 0; 415 415 unsigned long total_compressed = 0; 416 416 unsigned long total_in = 0; 417 - unsigned long max_compressed = 128 * 1024; 418 - unsigned long max_uncompressed = 128 * 1024; 417 + unsigned long max_compressed = SZ_128K; 418 + unsigned long max_uncompressed = SZ_128K; 419 419 int i; 420 420 int will_compress; 421 421 int compress_type = root->fs_info->compress_type; 422 422 int redirty = 0; 423 423 424 424 /* if this is a small write inside eof, kick off a defrag */ 425 - if ((end - start + 1) < 16 * 1024 && 425 + if ((end - start + 1) < SZ_16K && 426 426 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 427 427 btrfs_add_inode_defrag(NULL, inode); 428 428 ··· 430 430 again: 431 431 will_compress = 0; 432 432 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; 433 - nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); 433 + nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE); 434 434 435 435 /* 436 436 * we don't want to send crud past the end of i_size through ··· 944 944 disk_num_bytes = num_bytes; 945 945 946 946 /* if this is a small write inside eof, kick off defrag */ 947 - if (num_bytes < 64 * 1024 && 947 + if (num_bytes < SZ_64K && 948 948 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 949 949 btrfs_add_inode_defrag(NULL, inode); 950 950 ··· 1107 1107 * atomic_sub_return implies a barrier for waitqueue_active 1108 1108 */ 1109 1109 if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < 1110 - 5 * 1024 * 1024 && 1110 + 5 * SZ_1M && 1111 1111 waitqueue_active(&root->fs_info->async_submit_wait)) 1112 1112 wake_up(&root->fs_info->async_submit_wait); 1113 1113 ··· 1132 1132 struct btrfs_root *root = BTRFS_I(inode)->root; 1133 1133 unsigned long nr_pages; 1134 1134 u64 cur_end; 1135 - int limit = 10 * 1024 * 1024; 1135 + int limit = 10 * SZ_1M; 1136 1136 1137 1137 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED, 1138 1138 1, 0, NULL, GFP_NOFS); ··· 1148 1148 !btrfs_test_opt(root, FORCE_COMPRESS)) 1149 1149 cur_end = end; 1150 1150 else 1151 - cur_end = min(end, start + 512 * 1024 - 1); 1151 + cur_end = min(end, start + SZ_512K - 1); 1152 1152 1153 1153 async_cow->end = cur_end; 1154 1154 INIT_LIST_HEAD(&async_cow->extents); ··· 4348 4348 * up a huge file in a single leaf. Most of the time that 4349 4349 * bytes_deleted is > 0, it will be huge by the time we get here 4350 4350 */ 4351 - if (be_nice && bytes_deleted > 32 * 1024 * 1024) { 4351 + if (be_nice && bytes_deleted > SZ_32M) { 4352 4352 if (btrfs_should_end_transaction(trans, root)) { 4353 4353 err = -EAGAIN; 4354 4354 goto error; ··· 4591 4591 4592 4592 btrfs_free_path(path); 4593 4593 4594 - if (be_nice && bytes_deleted > 32 * 1024 * 1024) { 4594 + if (be_nice && bytes_deleted > SZ_32M) { 4595 4595 unsigned long updates = trans->delayed_ref_updates; 4596 4596 if (updates) { 4597 4597 trans->delayed_ref_updates = 0; ··· 9757 9757 } 9758 9758 } 9759 9759 9760 - cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); 9760 + cur_bytes = min_t(u64, num_bytes, SZ_256M); 9761 9761 cur_bytes = max(cur_bytes, min_size); 9762 9762 /* 9763 9763 * If we are severely fragmented we could end up with really
+11 -12
fs/btrfs/ioctl.c
··· 1016 1016 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) 1017 1017 ret = false; 1018 1018 else if ((em->block_start + em->block_len == next->block_start) && 1019 - (em->block_len > 128 * 1024 && next->block_len > 128 * 1024)) 1019 + (em->block_len > SZ_128K && next->block_len > SZ_128K)) 1020 1020 ret = false; 1021 1021 1022 1022 free_extent_map(next); ··· 1262 1262 int defrag_count = 0; 1263 1263 int compress_type = BTRFS_COMPRESS_ZLIB; 1264 1264 u32 extent_thresh = range->extent_thresh; 1265 - unsigned long max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT; 1265 + unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT; 1266 1266 unsigned long cluster = max_cluster; 1267 - u64 new_align = ~((u64)128 * 1024 - 1); 1267 + u64 new_align = ~((u64)SZ_128K - 1); 1268 1268 struct page **pages = NULL; 1269 1269 1270 1270 if (isize == 0) ··· 1281 1281 } 1282 1282 1283 1283 if (extent_thresh == 0) 1284 - extent_thresh = 256 * 1024; 1284 + extent_thresh = SZ_256K; 1285 1285 1286 1286 /* 1287 1287 * if we were not given a file, allocate a readahead ··· 1313 1313 1314 1314 if (newer_than) { 1315 1315 ret = find_new_extents(root, inode, newer_than, 1316 - &newer_off, 64 * 1024); 1316 + &newer_off, SZ_64K); 1317 1317 if (!ret) { 1318 1318 range->start = newer_off; 1319 1319 /* ··· 1403 1403 newer_off = max(newer_off + 1, 1404 1404 (u64)i << PAGE_CACHE_SHIFT); 1405 1405 1406 - ret = find_new_extents(root, inode, 1407 - newer_than, &newer_off, 1408 - 64 * 1024); 1406 + ret = find_new_extents(root, inode, newer_than, 1407 + &newer_off, SZ_64K); 1409 1408 if (!ret) { 1410 1409 range->start = newer_off; 1411 1410 i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; ··· 1570 1571 new_size = old_size + new_size; 1571 1572 } 1572 1573 1573 - if (new_size < 256 * 1024 * 1024) { 1574 + if (new_size < SZ_256M) { 1574 1575 ret = -EINVAL; 1575 1576 goto out_free; 1576 1577 } ··· 2159 2160 struct inode *inode; 2160 2161 int ret; 2161 2162 size_t buf_size; 2162 - const size_t buf_limit = 16 * 1024 * 1024; 2163 + const size_t buf_limit = SZ_16M; 2163 2164 2164 2165 if (!capable(CAP_SYS_ADMIN)) 2165 2166 return -EPERM; ··· 3095 3096 return ret; 3096 3097 } 3097 3098 3098 - #define BTRFS_MAX_DEDUPE_LEN (16 * 1024 * 1024) 3099 + #define BTRFS_MAX_DEDUPE_LEN SZ_16M 3099 3100 3100 3101 static long btrfs_ioctl_file_extent_same(struct file *file, 3101 3102 struct btrfs_ioctl_same_args __user *argp) ··· 4523 4524 goto out; 4524 4525 } 4525 4526 4526 - size = min_t(u32, loi->size, 64 * 1024); 4527 + size = min_t(u32, loi->size, SZ_64K); 4527 4528 inodes = init_data_container(size); 4528 4529 if (IS_ERR(inodes)) { 4529 4530 ret = PTR_ERR(inodes);
+2 -2
fs/btrfs/send.h
··· 22 22 #define BTRFS_SEND_STREAM_MAGIC "btrfs-stream" 23 23 #define BTRFS_SEND_STREAM_VERSION 1 24 24 25 - #define BTRFS_SEND_BUF_SIZE (1024 * 64) 26 - #define BTRFS_SEND_READ_SIZE (1024 * 48) 25 + #define BTRFS_SEND_BUF_SIZE SZ_64K 26 + #define BTRFS_SEND_READ_SIZE (48 * SZ_1K) 27 27 28 28 enum btrfs_tlv_type { 29 29 BTRFS_TLV_U8,
+1 -1
fs/btrfs/super.c
··· 1865 1865 * btrfs starts at an offset of at least 1MB when doing chunk 1866 1866 * allocation. 1867 1867 */ 1868 - skip_space = 1024 * 1024; 1868 + skip_space = SZ_1M; 1869 1869 1870 1870 /* user can set the offset in fs_info->alloc_start. */ 1871 1871 if (fs_info->alloc_start &&
+6 -5
fs/btrfs/tests/extent-io-tests.c
··· 18 18 19 19 #include <linux/pagemap.h> 20 20 #include <linux/sched.h> 21 + #include <linux/sizes.h> 21 22 #include "btrfs-tests.h" 22 23 #include "../extent_io.h" 23 24 ··· 71 70 struct page *page; 72 71 struct page *locked_page = NULL; 73 72 unsigned long index = 0; 74 - u64 total_dirty = 256 * 1024 * 1024; 75 - u64 max_bytes = 128 * 1024 * 1024; 73 + u64 total_dirty = SZ_256M; 74 + u64 max_bytes = SZ_128M; 76 75 u64 start, end, test_start; 77 76 u64 found; 78 77 int ret = -EINVAL; ··· 134 133 * |--- delalloc ---| 135 134 * |--- search ---| 136 135 */ 137 - test_start = 64 * 1024 * 1024; 136 + test_start = SZ_64M; 138 137 locked_page = find_lock_page(inode->i_mapping, 139 138 test_start >> PAGE_CACHE_SHIFT); 140 139 if (!locked_page) { ··· 221 220 * Now to test where we run into a page that is no longer dirty in the 222 221 * range we want to find. 223 222 */ 224 - page = find_get_page(inode->i_mapping, (max_bytes + (1 * 1024 * 1024)) 225 - >> PAGE_CACHE_SHIFT); 223 + page = find_get_page(inode->i_mapping, 224 + (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT); 226 225 if (!page) { 227 226 test_msg("Couldn't find our page\n"); 228 227 goto out_bits;
+80 -106
fs/btrfs/tests/free-space-tests.c
··· 44 44 } 45 45 46 46 cache->key.objectid = 0; 47 - cache->key.offset = 1024 * 1024 * 1024; 47 + cache->key.offset = SZ_1G; 48 48 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 49 49 cache->sectorsize = 4096; 50 50 cache->full_stripe_len = 4096; ··· 71 71 test_msg("Running extent only tests\n"); 72 72 73 73 /* First just make sure we can remove an entire entry */ 74 - ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024); 74 + ret = btrfs_add_free_space(cache, 0, SZ_4M); 75 75 if (ret) { 76 76 test_msg("Error adding initial extents %d\n", ret); 77 77 return ret; 78 78 } 79 79 80 - ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024); 80 + ret = btrfs_remove_free_space(cache, 0, SZ_4M); 81 81 if (ret) { 82 82 test_msg("Error removing extent %d\n", ret); 83 83 return ret; 84 84 } 85 85 86 - if (test_check_exists(cache, 0, 4 * 1024 * 1024)) { 86 + if (test_check_exists(cache, 0, SZ_4M)) { 87 87 test_msg("Full remove left some lingering space\n"); 88 88 return -1; 89 89 } 90 90 91 91 /* Ok edge and middle cases now */ 92 - ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024); 92 + ret = btrfs_add_free_space(cache, 0, SZ_4M); 93 93 if (ret) { 94 94 test_msg("Error adding half extent %d\n", ret); 95 95 return ret; 96 96 } 97 97 98 - ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 1 * 1024 * 1024); 98 + ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_1M); 99 99 if (ret) { 100 100 test_msg("Error removing tail end %d\n", ret); 101 101 return ret; 102 102 } 103 103 104 - ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024); 104 + ret = btrfs_remove_free_space(cache, 0, SZ_1M); 105 105 if (ret) { 106 106 test_msg("Error removing front end %d\n", ret); 107 107 return ret; 108 108 } 109 109 110 - ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096); 110 + ret = btrfs_remove_free_space(cache, SZ_2M, 4096); 111 111 if (ret) { 112 112 test_msg("Error removing middle piece %d\n", ret); 113 113 return ret; 114 114 } 115 115 116 - if (test_check_exists(cache, 0, 1 * 1024 * 1024)) { 116 + if (test_check_exists(cache, 0, SZ_1M)) { 117 117 test_msg("Still have space at the front\n"); 118 118 return -1; 119 119 } 120 120 121 - if (test_check_exists(cache, 2 * 1024 * 1024, 4096)) { 121 + if (test_check_exists(cache, SZ_2M, 4096)) { 122 122 test_msg("Still have space in the middle\n"); 123 123 return -1; 124 124 } 125 125 126 - if (test_check_exists(cache, 3 * 1024 * 1024, 1 * 1024 * 1024)) { 126 + if (test_check_exists(cache, 3 * SZ_1M, SZ_1M)) { 127 127 test_msg("Still have space at the end\n"); 128 128 return -1; 129 129 } ··· 141 141 142 142 test_msg("Running bitmap only tests\n"); 143 143 144 - ret = test_add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1); 144 + ret = test_add_free_space_entry(cache, 0, SZ_4M, 1); 145 145 if (ret) { 146 146 test_msg("Couldn't create a bitmap entry %d\n", ret); 147 147 return ret; 148 148 } 149 149 150 - ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024); 150 + ret = btrfs_remove_free_space(cache, 0, SZ_4M); 151 151 if (ret) { 152 152 test_msg("Error removing bitmap full range %d\n", ret); 153 153 return ret; 154 154 } 155 155 156 - if (test_check_exists(cache, 0, 4 * 1024 * 1024)) { 156 + if (test_check_exists(cache, 0, SZ_4M)) { 157 157 test_msg("Left some space in bitmap\n"); 158 158 return -1; 159 159 } 160 160 161 - ret = test_add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1); 161 + ret = test_add_free_space_entry(cache, 0, SZ_4M, 1); 162 162 if (ret) { 163 163 test_msg("Couldn't add to our bitmap entry %d\n", ret); 164 164 return ret; 165 165 } 166 166 167 - ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 2 * 1024 * 1024); 167 + ret = btrfs_remove_free_space(cache, SZ_1M, SZ_2M); 168 168 if (ret) { 169 169 test_msg("Couldn't remove middle chunk %d\n", ret); 170 170 return ret; ··· 177 177 next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); 178 178 179 179 /* Test a bit straddling two bitmaps */ 180 - ret = test_add_free_space_entry(cache, next_bitmap_offset - 181 - (2 * 1024 * 1024), 4 * 1024 * 1024, 1); 180 + ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, 181 + SZ_4M, 1); 182 182 if (ret) { 183 183 test_msg("Couldn't add space that straddles two bitmaps %d\n", 184 184 ret); 185 185 return ret; 186 186 } 187 187 188 - ret = btrfs_remove_free_space(cache, next_bitmap_offset - 189 - (1 * 1024 * 1024), 2 * 1024 * 1024); 188 + ret = btrfs_remove_free_space(cache, next_bitmap_offset - SZ_1M, SZ_2M); 190 189 if (ret) { 191 190 test_msg("Couldn't remove overlapping space %d\n", ret); 192 191 return ret; 193 192 } 194 193 195 - if (test_check_exists(cache, next_bitmap_offset - (1 * 1024 * 1024), 196 - 2 * 1024 * 1024)) { 194 + if (test_check_exists(cache, next_bitmap_offset - SZ_1M, SZ_2M)) { 197 195 test_msg("Left some space when removing overlapping\n"); 198 196 return -1; 199 197 } ··· 214 216 * bitmap, but the free space completely in the extent and then 215 217 * completely in the bitmap. 216 218 */ 217 - ret = test_add_free_space_entry(cache, 4 * 1024 * 1024, 1 * 1024 * 1024, 1); 219 + ret = test_add_free_space_entry(cache, SZ_4M, SZ_1M, 1); 218 220 if (ret) { 219 221 test_msg("Couldn't create bitmap entry %d\n", ret); 220 222 return ret; 221 223 } 222 224 223 - ret = test_add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0); 225 + ret = test_add_free_space_entry(cache, 0, SZ_1M, 0); 224 226 if (ret) { 225 227 test_msg("Couldn't add extent entry %d\n", ret); 226 228 return ret; 227 229 } 228 230 229 - ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024); 231 + ret = btrfs_remove_free_space(cache, 0, SZ_1M); 230 232 if (ret) { 231 233 test_msg("Couldn't remove extent entry %d\n", ret); 232 234 return ret; 233 235 } 234 236 235 - if (test_check_exists(cache, 0, 1 * 1024 * 1024)) { 237 + if (test_check_exists(cache, 0, SZ_1M)) { 236 238 test_msg("Left remnants after our remove\n"); 237 239 return -1; 238 240 } 239 241 240 242 /* Now to add back the extent entry and remove from the bitmap */ 241 - ret = test_add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0); 243 + ret = test_add_free_space_entry(cache, 0, SZ_1M, 0); 242 244 if (ret) { 243 245 test_msg("Couldn't re-add extent entry %d\n", ret); 244 246 return ret; 245 247 } 246 248 247 - ret = btrfs_remove_free_space(cache, 4 * 1024 * 1024, 1 * 1024 * 1024); 249 + ret = btrfs_remove_free_space(cache, SZ_4M, SZ_1M); 248 250 if (ret) { 249 251 test_msg("Couldn't remove from bitmap %d\n", ret); 250 252 return ret; 251 253 } 252 254 253 - if (test_check_exists(cache, 4 * 1024 * 1024, 1 * 1024 * 1024)) { 255 + if (test_check_exists(cache, SZ_4M, SZ_1M)) { 254 256 test_msg("Left remnants in the bitmap\n"); 255 257 return -1; 256 258 } ··· 259 261 * Ok so a little more evil, extent entry and bitmap at the same offset, 260 262 * removing an overlapping chunk. 261 263 */ 262 - ret = test_add_free_space_entry(cache, 1 * 1024 * 1024, 4 * 1024 * 1024, 1); 264 + ret = test_add_free_space_entry(cache, SZ_1M, SZ_4M, 1); 263 265 if (ret) { 264 266 test_msg("Couldn't add to a bitmap %d\n", ret); 265 267 return ret; 266 268 } 267 269 268 - ret = btrfs_remove_free_space(cache, 512 * 1024, 3 * 1024 * 1024); 270 + ret = btrfs_remove_free_space(cache, SZ_512K, 3 * SZ_1M); 269 271 if (ret) { 270 272 test_msg("Couldn't remove overlapping space %d\n", ret); 271 273 return ret; 272 274 } 273 275 274 - if (test_check_exists(cache, 512 * 1024, 3 * 1024 * 1024)) { 276 + if (test_check_exists(cache, SZ_512K, 3 * SZ_1M)) { 275 277 test_msg("Left over pieces after removing overlapping\n"); 276 278 return -1; 277 279 } ··· 279 281 __btrfs_remove_free_space_cache(cache->free_space_ctl); 280 282 281 283 /* Now with the extent entry offset into the bitmap */ 282 - ret = test_add_free_space_entry(cache, 4 * 1024 * 1024, 4 * 1024 * 1024, 1); 284 + ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1); 283 285 if (ret) { 284 286 test_msg("Couldn't add space to the bitmap %d\n", ret); 285 287 return ret; 286 288 } 287 289 288 - ret = test_add_free_space_entry(cache, 2 * 1024 * 1024, 2 * 1024 * 1024, 0); 290 + ret = test_add_free_space_entry(cache, SZ_2M, SZ_2M, 0); 289 291 if (ret) { 290 292 test_msg("Couldn't add extent to the cache %d\n", ret); 291 293 return ret; 292 294 } 293 295 294 - ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 4 * 1024 * 1024); 296 + ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_4M); 295 297 if (ret) { 296 298 test_msg("Problem removing overlapping space %d\n", ret); 297 299 return ret; 298 300 } 299 301 300 - if (test_check_exists(cache, 3 * 1024 * 1024, 4 * 1024 * 1024)) { 302 + if (test_check_exists(cache, 3 * SZ_1M, SZ_4M)) { 301 303 test_msg("Left something behind when removing space"); 302 304 return -1; 303 305 } ··· 313 315 * [ del ] 314 316 */ 315 317 __btrfs_remove_free_space_cache(cache->free_space_ctl); 316 - ret = test_add_free_space_entry(cache, bitmap_offset + 4 * 1024 * 1024, 317 - 4 * 1024 * 1024, 1); 318 + ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1); 318 319 if (ret) { 319 320 test_msg("Couldn't add bitmap %d\n", ret); 320 321 return ret; 321 322 } 322 323 323 - ret = test_add_free_space_entry(cache, bitmap_offset - 1 * 1024 * 1024, 324 - 5 * 1024 * 1024, 0); 324 + ret = test_add_free_space_entry(cache, bitmap_offset - SZ_1M, 325 + 5 * SZ_1M, 0); 325 326 if (ret) { 326 327 test_msg("Couldn't add extent entry %d\n", ret); 327 328 return ret; 328 329 } 329 330 330 - ret = btrfs_remove_free_space(cache, bitmap_offset + 1 * 1024 * 1024, 331 - 5 * 1024 * 1024); 331 + ret = btrfs_remove_free_space(cache, bitmap_offset + SZ_1M, 5 * SZ_1M); 332 332 if (ret) { 333 333 test_msg("Failed to free our space %d\n", ret); 334 334 return ret; 335 335 } 336 336 337 - if (test_check_exists(cache, bitmap_offset + 1 * 1024 * 1024, 338 - 5 * 1024 * 1024)) { 337 + if (test_check_exists(cache, bitmap_offset + SZ_1M, 5 * SZ_1M)) { 339 338 test_msg("Left stuff over\n"); 340 339 return -1; 341 340 } ··· 345 350 * to return -EAGAIN back from btrfs_remove_extent, make sure this 346 351 * doesn't happen. 347 352 */ 348 - ret = test_add_free_space_entry(cache, 1 * 1024 * 1024, 2 * 1024 * 1024, 1); 353 + ret = test_add_free_space_entry(cache, SZ_1M, SZ_2M, 1); 349 354 if (ret) { 350 355 test_msg("Couldn't add bitmap entry %d\n", ret); 351 356 return ret; 352 357 } 353 358 354 - ret = test_add_free_space_entry(cache, 3 * 1024 * 1024, 1 * 1024 * 1024, 0); 359 + ret = test_add_free_space_entry(cache, 3 * SZ_1M, SZ_1M, 0); 355 360 if (ret) { 356 361 test_msg("Couldn't add extent entry %d\n", ret); 357 362 return ret; 358 363 } 359 364 360 - ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 3 * 1024 * 1024); 365 + ret = btrfs_remove_free_space(cache, SZ_1M, 3 * SZ_1M); 361 366 if (ret) { 362 367 test_msg("Error removing bitmap and extent overlapping %d\n", ret); 363 368 return ret; ··· 470 475 /* 471 476 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[ 472 477 */ 473 - ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 - 256 * 1024, 474 - 128 * 1024, 0); 478 + ret = test_add_free_space_entry(cache, SZ_128M - SZ_256K, SZ_128K, 0); 475 479 if (ret) { 476 480 test_msg("Couldn't add extent entry %d\n", ret); 477 481 return ret; 478 482 } 479 483 480 484 /* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */ 481 - ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 512 * 1024, 482 - 128 * 1024 * 1024 - 512 * 1024, 1); 485 + ret = test_add_free_space_entry(cache, SZ_128M + SZ_512K, 486 + SZ_128M - SZ_512K, 1); 483 487 if (ret) { 484 488 test_msg("Couldn't add bitmap entry %d\n", ret); 485 489 return ret; ··· 496 502 * [128Mb + 512Kb, 128Mb + 768Kb[ 497 503 */ 498 504 ret = btrfs_remove_free_space(cache, 499 - 128 * 1024 * 1024 + 768 * 1024, 500 - 128 * 1024 * 1024 - 768 * 1024); 505 + SZ_128M + 768 * SZ_1K, 506 + SZ_128M - 768 * SZ_1K); 501 507 if (ret) { 502 508 test_msg("Failed to free part of bitmap space %d\n", ret); 503 509 return ret; 504 510 } 505 511 506 512 /* Confirm that only those 2 ranges are marked as free. */ 507 - if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024, 508 - 128 * 1024)) { 513 + if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_128K)) { 509 514 test_msg("Free space range missing\n"); 510 515 return -ENOENT; 511 516 } 512 - if (!test_check_exists(cache, 128 * 1024 * 1024 + 512 * 1024, 513 - 256 * 1024)) { 517 + if (!test_check_exists(cache, SZ_128M + SZ_512K, SZ_256K)) { 514 518 test_msg("Free space range missing\n"); 515 519 return -ENOENT; 516 520 } ··· 517 525 * Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked 518 526 * as free anymore. 519 527 */ 520 - if (test_check_exists(cache, 128 * 1024 * 1024 + 768 * 1024, 521 - 128 * 1024 * 1024 - 768 * 1024)) { 528 + if (test_check_exists(cache, SZ_128M + 768 * SZ_1K, 529 + SZ_128M - 768 * SZ_1K)) { 522 530 test_msg("Bitmap region not removed from space cache\n"); 523 531 return -EINVAL; 524 532 } ··· 527 535 * Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is 528 536 * covered by the bitmap, isn't marked as free. 529 537 */ 530 - if (test_check_exists(cache, 128 * 1024 * 1024 + 256 * 1024, 531 - 256 * 1024)) { 538 + if (test_check_exists(cache, SZ_128M + SZ_256K, SZ_256K)) { 532 539 test_msg("Invalid bitmap region marked as free\n"); 533 540 return -EINVAL; 534 541 } ··· 536 545 * Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered 537 546 * by the bitmap too, isn't marked as free either. 538 547 */ 539 - if (test_check_exists(cache, 128 * 1024 * 1024, 540 - 256 * 1024)) { 548 + if (test_check_exists(cache, SZ_128M, SZ_256K)) { 541 549 test_msg("Invalid bitmap region marked as free\n"); 542 550 return -EINVAL; 543 551 } ··· 546 556 * lets make sure the free space cache marks it as free in the bitmap, 547 557 * and doesn't insert a new extent entry to represent this region. 548 558 */ 549 - ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 512 * 1024); 559 + ret = btrfs_add_free_space(cache, SZ_128M, SZ_512K); 550 560 if (ret) { 551 561 test_msg("Error adding free space: %d\n", ret); 552 562 return ret; 553 563 } 554 564 /* Confirm the region is marked as free. */ 555 - if (!test_check_exists(cache, 128 * 1024 * 1024, 512 * 1024)) { 565 + if (!test_check_exists(cache, SZ_128M, SZ_512K)) { 556 566 test_msg("Bitmap region not marked as free\n"); 557 567 return -ENOENT; 558 568 } ··· 571 581 * The goal is to test that the bitmap entry space stealing doesn't 572 582 * steal this space region. 573 583 */ 574 - ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 + 16 * 1024 * 1024, 575 - 4096); 584 + ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096); 576 585 if (ret) { 577 586 test_msg("Error adding free space: %d\n", ret); 578 587 return ret; ··· 590 601 * expand the range covered by the existing extent entry that represents 591 602 * the free space [128Mb - 256Kb, 128Mb - 128Kb[. 592 603 */ 593 - ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 128 * 1024, 594 - 128 * 1024); 604 + ret = btrfs_add_free_space(cache, SZ_128M - SZ_128K, SZ_128K); 595 605 if (ret) { 596 606 test_msg("Error adding free space: %d\n", ret); 597 607 return ret; 598 608 } 599 609 /* Confirm the region is marked as free. */ 600 - if (!test_check_exists(cache, 128 * 1024 * 1024 - 128 * 1024, 601 - 128 * 1024)) { 610 + if (!test_check_exists(cache, SZ_128M - SZ_128K, SZ_128K)) { 602 611 test_msg("Extent region not marked as free\n"); 603 612 return -ENOENT; 604 613 } ··· 624 637 * that represents the 1Mb free space, and therefore we're able to 625 638 * allocate the whole free space at once. 626 639 */ 627 - if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024, 628 - 1 * 1024 * 1024)) { 640 + if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_1M)) { 629 641 test_msg("Expected region not marked as free\n"); 630 642 return -ENOENT; 631 643 } 632 644 633 - if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 4096)) { 645 + if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) { 634 646 test_msg("Cache free space is not 1Mb + 4Kb\n"); 635 647 return -EINVAL; 636 648 } 637 649 638 650 offset = btrfs_find_space_for_alloc(cache, 639 - 0, 1 * 1024 * 1024, 0, 651 + 0, SZ_1M, 0, 640 652 &max_extent_size); 641 - if (offset != (128 * 1024 * 1024 - 256 * 1024)) { 653 + if (offset != (SZ_128M - SZ_256K)) { 642 654 test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n", 643 655 offset); 644 656 return -EINVAL; ··· 656 670 offset = btrfs_find_space_for_alloc(cache, 657 671 0, 4096, 0, 658 672 &max_extent_size); 659 - if (offset != (128 * 1024 * 1024 + 16 * 1024 * 1024)) { 673 + if (offset != (SZ_128M + SZ_16M)) { 660 674 test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n", 661 675 offset); 662 676 return -EINVAL; ··· 677 691 /* 678 692 * Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[ 679 693 */ 680 - ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 128 * 1024, 681 - 128 * 1024, 0); 694 + ret = test_add_free_space_entry(cache, SZ_128M + SZ_128K, SZ_128K, 0); 682 695 if (ret) { 683 696 test_msg("Couldn't add extent entry %d\n", ret); 684 697 return ret; 685 698 } 686 699 687 700 /* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */ 688 - ret = test_add_free_space_entry(cache, 0, 689 - 128 * 1024 * 1024 - 512 * 1024, 1); 701 + ret = test_add_free_space_entry(cache, 0, SZ_128M - SZ_512K, 1); 690 702 if (ret) { 691 703 test_msg("Couldn't add bitmap entry %d\n", ret); 692 704 return ret; ··· 701 717 * [128Mb + 128b, 128Mb + 256Kb[ 702 718 * [128Mb - 768Kb, 128Mb - 512Kb[ 703 719 */ 704 - ret = btrfs_remove_free_space(cache, 705 - 0, 706 - 128 * 1024 * 1024 - 768 * 1024); 720 + ret = btrfs_remove_free_space(cache, 0, SZ_128M - 768 * SZ_1K); 707 721 if (ret) { 708 722 test_msg("Failed to free part of bitmap space %d\n", ret); 709 723 return ret; 710 724 } 711 725 712 726 /* Confirm that only those 2 ranges are marked as free. */ 713 - if (!test_check_exists(cache, 128 * 1024 * 1024 + 128 * 1024, 714 - 128 * 1024)) { 727 + if (!test_check_exists(cache, SZ_128M + SZ_128K, SZ_128K)) { 715 728 test_msg("Free space range missing\n"); 716 729 return -ENOENT; 717 730 } 718 - if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024, 719 - 256 * 1024)) { 731 + if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_256K)) { 720 732 test_msg("Free space range missing\n"); 721 733 return -ENOENT; 722 734 } ··· 721 741 * Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked 722 742 * as free anymore. 723 743 */ 724 - if (test_check_exists(cache, 0, 725 - 128 * 1024 * 1024 - 768 * 1024)) { 744 + if (test_check_exists(cache, 0, SZ_128M - 768 * SZ_1K)) { 726 745 test_msg("Bitmap region not removed from space cache\n"); 727 746 return -EINVAL; 728 747 } ··· 730 751 * Confirm that the region [128Mb - 512Kb, 128Mb[, which is 731 752 * covered by the bitmap, isn't marked as free. 732 753 */ 733 - if (test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024, 734 - 512 * 1024)) { 754 + if (test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) { 735 755 test_msg("Invalid bitmap region marked as free\n"); 736 756 return -EINVAL; 737 757 } ··· 740 762 * lets make sure the free space cache marks it as free in the bitmap, 741 763 * and doesn't insert a new extent entry to represent this region. 742 764 */ 743 - ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 512 * 1024, 744 - 512 * 1024); 765 + ret = btrfs_add_free_space(cache, SZ_128M - SZ_512K, SZ_512K); 745 766 if (ret) { 746 767 test_msg("Error adding free space: %d\n", ret); 747 768 return ret; 748 769 } 749 770 /* Confirm the region is marked as free. */ 750 - if (!test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024, 751 - 512 * 1024)) { 771 + if (!test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) { 752 772 test_msg("Bitmap region not marked as free\n"); 753 773 return -ENOENT; 754 774 } ··· 765 789 * The goal is to test that the bitmap entry space stealing doesn't 766 790 * steal this space region. 767 791 */ 768 - ret = btrfs_add_free_space(cache, 32 * 1024 * 1024, 8192); 792 + ret = btrfs_add_free_space(cache, SZ_32M, 8192); 769 793 if (ret) { 770 794 test_msg("Error adding free space: %d\n", ret); 771 795 return ret; ··· 776 800 * expand the range covered by the existing extent entry that represents 777 801 * the free space [128Mb + 128Kb, 128Mb + 256Kb[. 778 802 */ 779 - ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 128 * 1024); 803 + ret = btrfs_add_free_space(cache, SZ_128M, SZ_128K); 780 804 if (ret) { 781 805 test_msg("Error adding free space: %d\n", ret); 782 806 return ret; 783 807 } 784 808 /* Confirm the region is marked as free. */ 785 - if (!test_check_exists(cache, 128 * 1024 * 1024, 128 * 1024)) { 809 + if (!test_check_exists(cache, SZ_128M, SZ_128K)) { 786 810 test_msg("Extent region not marked as free\n"); 787 811 return -ENOENT; 788 812 } ··· 810 834 * that represents the 1Mb free space, and therefore we're able to 811 835 * allocate the whole free space at once. 812 836 */ 813 - if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024, 814 - 1 * 1024 * 1024)) { 837 + if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_1M)) { 815 838 test_msg("Expected region not marked as free\n"); 816 839 return -ENOENT; 817 840 } 818 841 819 - if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 8192)) { 842 + if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) { 820 843 test_msg("Cache free space is not 1Mb + 8Kb\n"); 821 844 return -EINVAL; 822 845 } 823 846 824 - offset = btrfs_find_space_for_alloc(cache, 825 - 0, 1 * 1024 * 1024, 0, 847 + offset = btrfs_find_space_for_alloc(cache, 0, SZ_1M, 0, 826 848 &max_extent_size); 827 - if (offset != (128 * 1024 * 1024 - 768 * 1024)) { 849 + if (offset != (SZ_128M - 768 * SZ_1K)) { 828 850 test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n", 829 851 offset); 830 852 return -EINVAL; ··· 841 867 offset = btrfs_find_space_for_alloc(cache, 842 868 0, 8192, 0, 843 869 &max_extent_size); 844 - if (offset != (32 * 1024 * 1024)) { 870 + if (offset != SZ_32M) { 845 871 test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n", 846 872 offset); 847 873 return -EINVAL;
+1 -1
fs/btrfs/tests/inode-tests.c
··· 100 100 static void setup_file_extents(struct btrfs_root *root) 101 101 { 102 102 int slot = 0; 103 - u64 disk_bytenr = 1 * 1024 * 1024; 103 + u64 disk_bytenr = SZ_1M; 104 104 u64 offset = 0; 105 105 106 106 /* First we want a hole */
+8 -8
fs/btrfs/volumes.c
··· 1406 1406 * we don't want to overwrite the superblock on the drive, 1407 1407 * so we make sure to start at an offset of at least 1MB 1408 1408 */ 1409 - search_start = max(root->fs_info->alloc_start, 1024ull * 1024); 1409 + search_start = max_t(u64, root->fs_info->alloc_start, SZ_1M); 1410 1410 return find_free_dev_extent_start(trans->transaction, device, 1411 1411 num_bytes, search_start, start, len); 1412 1412 } ··· 3405 3405 list_for_each_entry(device, devices, dev_list) { 3406 3406 old_size = btrfs_device_get_total_bytes(device); 3407 3407 size_to_free = div_factor(old_size, 1); 3408 - size_to_free = min(size_to_free, (u64)1 * 1024 * 1024); 3408 + size_to_free = min_t(u64, size_to_free, SZ_1M); 3409 3409 if (!device->writeable || 3410 3410 btrfs_device_get_total_bytes(device) - 3411 3411 btrfs_device_get_bytes_used(device) > size_to_free || ··· 4459 4459 static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target) 4460 4460 { 4461 4461 /* TODO allow them to set a preferred stripe size */ 4462 - return 64 * 1024; 4462 + return SZ_64K; 4463 4463 } 4464 4464 4465 4465 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) ··· 4527 4527 ncopies = btrfs_raid_array[index].ncopies; 4528 4528 4529 4529 if (type & BTRFS_BLOCK_GROUP_DATA) { 4530 - max_stripe_size = 1024 * 1024 * 1024; 4530 + max_stripe_size = SZ_1G; 4531 4531 max_chunk_size = 10 * max_stripe_size; 4532 4532 if (!devs_max) 4533 4533 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4534 4534 } else if (type & BTRFS_BLOCK_GROUP_METADATA) { 4535 4535 /* for larger filesystems, use larger metadata chunks */ 4536 - if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024) 4537 - max_stripe_size = 1024 * 1024 * 1024; 4536 + if (fs_devices->total_rw_bytes > 50ULL * SZ_1G) 4537 + max_stripe_size = SZ_1G; 4538 4538 else 4539 - max_stripe_size = 256 * 1024 * 1024; 4539 + max_stripe_size = SZ_256M; 4540 4540 max_chunk_size = max_stripe_size; 4541 4541 if (!devs_max) 4542 4542 devs_max = BTRFS_MAX_DEVS(info->chunk_root); 4543 4543 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { 4544 - max_stripe_size = 32 * 1024 * 1024; 4544 + max_stripe_size = SZ_32M; 4545 4545 max_chunk_size = 2 * max_stripe_size; 4546 4546 if (!devs_max) 4547 4547 devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
+1 -1
fs/btrfs/volumes.h
··· 26 26 27 27 extern struct mutex uuid_mutex; 28 28 29 - #define BTRFS_STRIPE_LEN (64 * 1024) 29 + #define BTRFS_STRIPE_LEN SZ_64K 30 30 31 31 struct buffer_head; 32 32 struct btrfs_pending_bios {