Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ext4: teach ext4_ext_split to calculate extents efficiently

Make ext4_ext_split() get extents to be moved by calculating in a statement
instead of counting in a loop.

Signed-off-by: Yongqiang Yang <xiaoqiangnk@gmail.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>

authored by

Yongqiang Yang and committed by
Theodore Ts'o
1b16da77 ae24f28d

+46 -38
+46 -38
fs/ext4/extents.c
··· 482 482 } 483 483 ext_debug("\n"); 484 484 } 485 + 486 + static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, 487 + ext4_fsblk_t newblock, int level) 488 + { 489 + int depth = ext_depth(inode); 490 + struct ext4_extent *ex; 491 + 492 + if (depth != level) { 493 + struct ext4_extent_idx *idx; 494 + idx = path[level].p_idx; 495 + while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { 496 + ext_debug("%d: move %d:%llu in new index %llu\n", level, 497 + le32_to_cpu(idx->ei_block), 498 + ext4_idx_pblock(idx), 499 + newblock); 500 + idx++; 501 + } 502 + 503 + return; 504 + } 505 + 506 + ex = path[depth].p_ext; 507 + while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { 508 + ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 509 + le32_to_cpu(ex->ee_block), 510 + ext4_ext_pblock(ex), 511 + ext4_ext_is_uninitialized(ex), 512 + ext4_ext_get_actual_len(ex), 513 + newblock); 514 + ex++; 515 + } 516 + } 517 + 485 518 #else 486 519 #define ext4_ext_show_path(inode, path) 487 520 #define ext4_ext_show_leaf(inode, path) 521 + #define ext4_ext_show_move(inode, path, newblock, level) 488 522 #endif 489 523 490 524 void ext4_ext_drop_refs(struct ext4_ext_path *path) ··· 842 808 int depth = ext_depth(inode); 843 809 struct ext4_extent_header *neh; 844 810 struct ext4_extent_idx *fidx; 845 - struct ext4_extent *ex; 846 811 int i = at, k, m, a; 847 812 ext4_fsblk_t newblock, oldblock; 848 813 __le32 border; ··· 918 885 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 919 886 neh->eh_magic = EXT4_EXT_MAGIC; 920 887 neh->eh_depth = 0; 921 - ex = EXT_FIRST_EXTENT(neh); 922 888 923 889 /* move remainder of path[depth] to the new leaf */ 924 890 if (unlikely(path[depth].p_hdr->eh_entries != ··· 929 897 goto cleanup; 930 898 } 931 899 /* start copy from next extent */ 932 - /* TODO: we could do it by single memmove */ 933 - m = 0; 934 - path[depth].p_ext++; 935 - while (path[depth].p_ext <= 936 - EXT_MAX_EXTENT(path[depth].p_hdr)) { 937 - ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", 938 - le32_to_cpu(path[depth].p_ext->ee_block), 939 - ext4_ext_pblock(path[depth].p_ext), 940 - ext4_ext_is_uninitialized(path[depth].p_ext), 941 - ext4_ext_get_actual_len(path[depth].p_ext), 942 - newblock); 943 - /*memmove(ex++, path[depth].p_ext++, 944 - sizeof(struct ext4_extent)); 945 - neh->eh_entries++;*/ 946 - path[depth].p_ext++; 947 - m++; 948 - } 900 + m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; 901 + ext4_ext_show_move(inode, path, newblock, depth); 949 902 if (m) { 950 - memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); 903 + struct ext4_extent *ex; 904 + ex = EXT_FIRST_EXTENT(neh); 905 + memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); 951 906 le16_add_cpu(&neh->eh_entries, m); 952 907 } 953 908 ··· 996 977 997 978 ext_debug("int.index at %d (block %llu): %u -> %llu\n", 998 979 i, newblock, le32_to_cpu(border), oldblock); 999 - /* copy indexes */ 1000 - m = 0; 1001 - path[i].p_idx++; 1002 980 1003 - ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 1004 - EXT_MAX_INDEX(path[i].p_hdr)); 981 + /* move remainder of path[i] to the new index block */ 1005 982 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != 1006 983 EXT_LAST_INDEX(path[i].p_hdr))) { 1007 984 EXT4_ERROR_INODE(inode, ··· 1006 991 err = -EIO; 1007 992 goto cleanup; 1008 993 } 1009 - while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { 1010 - ext_debug("%d: move %d:%llu in new index %llu\n", i, 1011 - le32_to_cpu(path[i].p_idx->ei_block), 1012 - ext4_idx_pblock(path[i].p_idx), 1013 - newblock); 1014 - /*memmove(++fidx, path[i].p_idx++, 1015 - sizeof(struct ext4_extent_idx)); 1016 - neh->eh_entries++; 1017 - BUG_ON(neh->eh_entries > neh->eh_max);*/ 1018 - path[i].p_idx++; 1019 - m++; 1020 - } 994 + /* start copy indexes */ 995 + m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; 996 + ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, 997 + EXT_MAX_INDEX(path[i].p_hdr)); 998 + ext4_ext_show_move(inode, path, newblock, i); 1021 999 if (m) { 1022 - memmove(++fidx, path[i].p_idx - m, 1000 + memmove(++fidx, path[i].p_idx, 1023 1001 sizeof(struct ext4_extent_idx) * m); 1024 1002 le16_add_cpu(&neh->eh_entries, m); 1025 1003 }