Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ext4: refactor choose group to scan group

This commit converts the `choose group` logic to `scan group` using
previously prepared helper functions. This allows us to leverage xarrays
for ordered non-linear traversal, thereby mitigating the "bouncing" issue
inherent in the `choose group` mechanism.

This also decouples linear and non-linear traversals, leading to cleaner
and more readable code.

Key changes:

* ext4_mb_choose_next_group() is refactored to ext4_mb_scan_groups().

* Replaced ext4_mb_good_group() with ext4_mb_scan_group() in non-linear
traversals, and related functions now return error codes instead of
group info.

* Added ext4_mb_scan_groups_linear() for performing linear scans starting
from a specific group for a set number of times.

* Linear scans now execute up to sbi->s_mb_max_linear_groups times,
so ac_groups_linear_remaining is removed as it's no longer used.

* ac->ac_criteria is now used directly instead of passing cr around.
Also, ac->ac_criteria is incremented directly after groups scan fails
for the corresponding criteria.

* Since we're now directly scanning groups instead of finding a good group
then scanning, the following variables and flags are no longer needed,
s_bal_cX_groups_considered is sufficient.

s_bal_p2_aligned_bad_suggestions
s_bal_goal_fast_bad_suggestions
s_bal_best_avail_bad_suggestions
EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED
EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED
EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED

Signed-off-by: Baokun Li <libaokun1@huawei.com>
Reviewed-by: Zhang Yi <yi.zhang@huawei.com>
Link: https://patch.msgid.link/20250714130327.1830534-17-libaokun1@huawei.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>

authored by

Baokun Li and committed by
Theodore Ts'o
63475587 f7eaacbb

+131 -174
-12
fs/ext4/ext4.h
··· 207 207 #define EXT4_MB_USE_RESERVED 0x2000 208 208 /* Do strict check for free blocks while retrying block allocation */ 209 209 #define EXT4_MB_STRICT_CHECK 0x4000 210 - /* Large fragment size list lookup succeeded at least once for 211 - * CR_POWER2_ALIGNED */ 212 - #define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED 0x8000 213 - /* Avg fragment size rb tree lookup succeeded at least once for 214 - * CR_GOAL_LEN_FAST */ 215 - #define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED 0x00010000 216 - /* Avg fragment size rb tree lookup succeeded at least once for 217 - * CR_BEST_AVAIL_LEN */ 218 - #define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED 0x00020000 219 210 220 211 struct ext4_allocation_request { 221 212 /* target inode for block we're allocating */ ··· 1634 1643 atomic_t s_bal_len_goals; /* len goal hits */ 1635 1644 atomic_t s_bal_breaks; /* too long searches */ 1636 1645 atomic_t s_bal_2orders; /* 2^order hits */ 1637 - atomic_t s_bal_p2_aligned_bad_suggestions; 1638 - atomic_t s_bal_goal_fast_bad_suggestions; 1639 - atomic_t s_bal_best_avail_bad_suggestions; 1640 1646 atomic64_t s_bal_cX_groups_considered[EXT4_MB_NUM_CRS]; 1641 1647 atomic64_t s_bal_cX_hits[EXT4_MB_NUM_CRS]; 1642 1648 atomic64_t s_bal_cX_failed[EXT4_MB_NUM_CRS]; /* cX loop didn't find blocks */
+131 -161
fs/ext4/mballoc.c
··· 425 425 ext4_group_t group); 426 426 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 427 427 428 - static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 429 - ext4_group_t group, enum criteria cr); 428 + static int ext4_mb_scan_group(struct ext4_allocation_context *ac, 429 + ext4_group_t group); 430 430 431 431 static int ext4_try_to_trim_range(struct super_block *sb, 432 432 struct ext4_buddy *e4b, ext4_grpblk_t start, ··· 875 875 } 876 876 } 877 877 878 - static struct ext4_group_info * 879 - ext4_mb_find_good_group_xarray(struct ext4_allocation_context *ac, 880 - struct xarray *xa, ext4_group_t start) 878 + static int ext4_mb_scan_groups_xarray(struct ext4_allocation_context *ac, 879 + struct xarray *xa, ext4_group_t start) 881 880 { 882 881 struct super_block *sb = ac->ac_sb; 883 882 struct ext4_sb_info *sbi = EXT4_SB(sb); ··· 887 888 struct ext4_group_info *grp; 888 889 889 890 if (WARN_ON_ONCE(start >= end)) 890 - return NULL; 891 + return 0; 891 892 892 893 wrap_around: 893 894 xa_for_each_range(xa, group, grp, start, end - 1) { 895 + int err; 896 + 894 897 if (sbi->s_mb_stats) 895 898 atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); 896 899 897 - if (!spin_is_locked(ext4_group_lock_ptr(sb, group)) && 898 - likely(ext4_mb_good_group(ac, group, cr))) 899 - return grp; 900 + err = ext4_mb_scan_group(ac, grp->bb_group); 901 + if (err || ac->ac_status != AC_STATUS_CONTINUE) 902 + return err; 900 903 901 904 cond_resched(); 902 905 } ··· 909 908 goto wrap_around; 910 909 } 911 910 912 - return NULL; 911 + return 0; 913 912 } 914 913 915 914 /* 916 915 * Find a suitable group of given order from the largest free orders xarray. 917 916 */ 918 - static struct ext4_group_info * 919 - ext4_mb_find_good_group_largest_free_order(struct ext4_allocation_context *ac, 920 - int order, ext4_group_t start) 917 + static int 918 + ext4_mb_scan_groups_largest_free_order(struct ext4_allocation_context *ac, 919 + int order, ext4_group_t start) 921 920 { 922 921 struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_largest_free_orders[order]; 923 922 924 923 if (xa_empty(xa)) 925 - return NULL; 924 + return 0; 926 925 927 - return ext4_mb_find_good_group_xarray(ac, xa, start); 926 + return ext4_mb_scan_groups_xarray(ac, xa, start); 928 927 } 929 928 930 929 /* 931 930 * Choose next group by traversing largest_free_order lists. Updates *new_cr if 932 931 * cr level needs an update. 933 932 */ 934 - static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac, 935 - enum criteria *new_cr, ext4_group_t *group) 933 + static int ext4_mb_scan_groups_p2_aligned(struct ext4_allocation_context *ac, 934 + ext4_group_t group) 936 935 { 937 936 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 938 - struct ext4_group_info *grp; 939 937 int i; 940 - 941 - if (ac->ac_status == AC_STATUS_FOUND) 942 - return; 943 - 944 - if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED)) 945 - atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions); 938 + int ret = 0; 946 939 947 940 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 948 - grp = ext4_mb_find_good_group_largest_free_order(ac, i, *group); 949 - if (grp) { 950 - *group = grp->bb_group; 951 - ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; 952 - return; 953 - } 941 + ret = ext4_mb_scan_groups_largest_free_order(ac, i, group); 942 + if (ret || ac->ac_status != AC_STATUS_CONTINUE) 943 + return ret; 954 944 } 955 945 946 + if (sbi->s_mb_stats) 947 + atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 948 + 956 949 /* Increment cr and search again if no group is found */ 957 - *new_cr = CR_GOAL_LEN_FAST; 950 + ac->ac_criteria = CR_GOAL_LEN_FAST; 951 + return ret; 958 952 } 959 953 960 954 /* 961 955 * Find a suitable group of given order from the average fragments xarray. 962 956 */ 963 - static struct ext4_group_info * 964 - ext4_mb_find_good_group_avg_frag_xarray(struct ext4_allocation_context *ac, 965 - int order, ext4_group_t start) 957 + static int ext4_mb_scan_groups_avg_frag_order(struct ext4_allocation_context *ac, 958 + int order, ext4_group_t start) 966 959 { 967 960 struct xarray *xa = &EXT4_SB(ac->ac_sb)->s_mb_avg_fragment_size[order]; 968 961 969 962 if (xa_empty(xa)) 970 - return NULL; 963 + return 0; 971 964 972 - return ext4_mb_find_good_group_xarray(ac, xa, start); 965 + return ext4_mb_scan_groups_xarray(ac, xa, start); 973 966 } 974 967 975 968 /* 976 969 * Choose next group by traversing average fragment size list of suitable 977 970 * order. Updates *new_cr if cr level needs an update. 978 971 */ 979 - static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac, 980 - enum criteria *new_cr, ext4_group_t *group) 972 + static int ext4_mb_scan_groups_goal_fast(struct ext4_allocation_context *ac, 973 + ext4_group_t group) 981 974 { 982 975 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 983 - struct ext4_group_info *grp = NULL; 984 - int i; 976 + int i, ret = 0; 985 977 986 - if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) { 987 - if (sbi->s_mb_stats) 988 - atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions); 978 + i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 979 + for (; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 980 + ret = ext4_mb_scan_groups_avg_frag_order(ac, i, group); 981 + if (ret || ac->ac_status != AC_STATUS_CONTINUE) 982 + return ret; 989 983 } 990 984 991 - for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 992 - i < MB_NUM_ORDERS(ac->ac_sb); i++) { 993 - grp = ext4_mb_find_good_group_avg_frag_xarray(ac, i, *group); 994 - if (grp) { 995 - *group = grp->bb_group; 996 - ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; 997 - return; 998 - } 999 - } 1000 - 985 + if (sbi->s_mb_stats) 986 + atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 1001 987 /* 1002 988 * CR_BEST_AVAIL_LEN works based on the concept that we have 1003 989 * a larger normalized goal len request which can be trimmed to ··· 994 1006 * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA). 995 1007 */ 996 1008 if (ac->ac_flags & EXT4_MB_HINT_DATA) 997 - *new_cr = CR_BEST_AVAIL_LEN; 1009 + ac->ac_criteria = CR_BEST_AVAIL_LEN; 998 1010 else 999 - *new_cr = CR_GOAL_LEN_SLOW; 1011 + ac->ac_criteria = CR_GOAL_LEN_SLOW; 1012 + 1013 + return ret; 1000 1014 } 1001 1015 1002 1016 /* ··· 1010 1020 * preallocations. However, we make sure that we don't trim the request too 1011 1021 * much and fall to CR_GOAL_LEN_SLOW in that case. 1012 1022 */ 1013 - static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac, 1014 - enum criteria *new_cr, ext4_group_t *group) 1023 + static int ext4_mb_scan_groups_best_avail(struct ext4_allocation_context *ac, 1024 + ext4_group_t group) 1015 1025 { 1026 + int ret = 0; 1016 1027 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1017 - struct ext4_group_info *grp = NULL; 1018 1028 int i, order, min_order; 1019 1029 unsigned long num_stripe_clusters = 0; 1020 - 1021 - if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) { 1022 - if (sbi->s_mb_stats) 1023 - atomic_inc(&sbi->s_bal_best_avail_bad_suggestions); 1024 - } 1025 1030 1026 1031 /* 1027 1032 * mb_avg_fragment_size_order() returns order in a way that makes ··· 1070 1085 frag_order = mb_avg_fragment_size_order(ac->ac_sb, 1071 1086 ac->ac_g_ex.fe_len); 1072 1087 1073 - grp = ext4_mb_find_good_group_avg_frag_xarray(ac, frag_order, 1074 - *group); 1075 - if (grp) { 1076 - *group = grp->bb_group; 1077 - ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; 1078 - return; 1079 - } 1088 + ret = ext4_mb_scan_groups_avg_frag_order(ac, frag_order, group); 1089 + if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1090 + return ret; 1080 1091 } 1081 1092 1082 1093 /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */ 1083 1094 ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 1084 - *new_cr = CR_GOAL_LEN_SLOW; 1095 + if (sbi->s_mb_stats) 1096 + atomic64_inc(&sbi->s_bal_cX_failed[ac->ac_criteria]); 1097 + ac->ac_criteria = CR_GOAL_LEN_SLOW; 1098 + 1099 + return ret; 1085 1100 } 1086 1101 1087 1102 static inline int should_optimize_scan(struct ext4_allocation_context *ac) ··· 1096 1111 } 1097 1112 1098 1113 /* 1099 - * Return next linear group for allocation. 1114 + * next linear group for allocation. 1100 1115 */ 1101 - static ext4_group_t 1102 - next_linear_group(ext4_group_t group, ext4_group_t ngroups) 1116 + static void next_linear_group(ext4_group_t *group, ext4_group_t ngroups) 1103 1117 { 1104 1118 /* 1105 1119 * Artificially restricted ngroups for non-extent 1106 1120 * files makes group > ngroups possible on first loop. 1107 1121 */ 1108 - return group + 1 >= ngroups ? 0 : group + 1; 1122 + *group = *group + 1 >= ngroups ? 0 : *group + 1; 1109 1123 } 1110 1124 1111 - /* 1112 - * ext4_mb_choose_next_group: choose next group for allocation. 1113 - * 1114 - * @ac Allocation Context 1115 - * @new_cr This is an output parameter. If the there is no good group 1116 - * available at current CR level, this field is updated to indicate 1117 - * the new cr level that should be used. 1118 - * @group This is an input / output parameter. As an input it indicates the 1119 - * next group that the allocator intends to use for allocation. As 1120 - * output, this field indicates the next group that should be used as 1121 - * determined by the optimization functions. 1122 - * @ngroups Total number of groups 1123 - */ 1124 - static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1125 - enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1125 + static int ext4_mb_scan_groups_linear(struct ext4_allocation_context *ac, 1126 + ext4_group_t ngroups, ext4_group_t *start, ext4_group_t count) 1126 1127 { 1127 - *new_cr = ac->ac_criteria; 1128 + int ret, i; 1129 + enum criteria cr = ac->ac_criteria; 1130 + struct super_block *sb = ac->ac_sb; 1131 + struct ext4_sb_info *sbi = EXT4_SB(sb); 1132 + ext4_group_t group = *start; 1128 1133 1129 - if (!should_optimize_scan(ac)) { 1130 - *group = next_linear_group(*group, ngroups); 1131 - return; 1134 + for (i = 0; i < count; i++, next_linear_group(&group, ngroups)) { 1135 + ret = ext4_mb_scan_group(ac, group); 1136 + if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1137 + return ret; 1138 + cond_resched(); 1132 1139 } 1140 + 1141 + *start = group; 1142 + if (count == ngroups) 1143 + ac->ac_criteria++; 1144 + 1145 + /* Processed all groups and haven't found blocks */ 1146 + if (sbi->s_mb_stats && i == ngroups) 1147 + atomic64_inc(&sbi->s_bal_cX_failed[cr]); 1148 + 1149 + return 0; 1150 + } 1151 + 1152 + static int ext4_mb_scan_groups(struct ext4_allocation_context *ac) 1153 + { 1154 + int ret = 0; 1155 + ext4_group_t start; 1156 + struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1157 + ext4_group_t ngroups = ext4_get_groups_count(ac->ac_sb); 1158 + 1159 + /* non-extent files are limited to low blocks/groups */ 1160 + if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 1161 + ngroups = sbi->s_blockfile_groups; 1162 + 1163 + /* searching for the right group start from the goal value specified */ 1164 + start = ac->ac_g_ex.fe_group; 1165 + ac->ac_prefetch_grp = start; 1166 + ac->ac_prefetch_nr = 0; 1167 + 1168 + if (!should_optimize_scan(ac)) 1169 + return ext4_mb_scan_groups_linear(ac, ngroups, &start, ngroups); 1133 1170 1134 1171 /* 1135 1172 * Optimized scanning can return non adjacent groups which can cause 1136 1173 * seek overhead for rotational disks. So try few linear groups before 1137 1174 * trying optimized scan. 1138 1175 */ 1139 - if (ac->ac_groups_linear_remaining) { 1140 - *group = next_linear_group(*group, ngroups); 1141 - ac->ac_groups_linear_remaining--; 1142 - return; 1143 - } 1176 + if (sbi->s_mb_max_linear_groups) 1177 + ret = ext4_mb_scan_groups_linear(ac, ngroups, &start, 1178 + sbi->s_mb_max_linear_groups); 1179 + if (ret || ac->ac_status != AC_STATUS_CONTINUE) 1180 + return ret; 1144 1181 1145 - if (*new_cr == CR_POWER2_ALIGNED) { 1146 - ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group); 1147 - } else if (*new_cr == CR_GOAL_LEN_FAST) { 1148 - ext4_mb_choose_next_group_goal_fast(ac, new_cr, group); 1149 - } else if (*new_cr == CR_BEST_AVAIL_LEN) { 1150 - ext4_mb_choose_next_group_best_avail(ac, new_cr, group); 1151 - } else { 1182 + switch (ac->ac_criteria) { 1183 + case CR_POWER2_ALIGNED: 1184 + return ext4_mb_scan_groups_p2_aligned(ac, start); 1185 + case CR_GOAL_LEN_FAST: 1186 + return ext4_mb_scan_groups_goal_fast(ac, start); 1187 + case CR_BEST_AVAIL_LEN: 1188 + return ext4_mb_scan_groups_best_avail(ac, start); 1189 + default: 1152 1190 /* 1153 1191 * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an 1154 1192 * rb tree sorted by bb_free. But until that happens, we should ··· 1179 1171 */ 1180 1172 WARN_ON(1); 1181 1173 } 1174 + 1175 + return 0; 1182 1176 } 1183 1177 1184 1178 /* ··· 2938 2928 static noinline_for_stack int 2939 2929 ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2940 2930 { 2941 - ext4_group_t ngroups, group, i; 2942 - enum criteria new_cr, cr = CR_GOAL_LEN_FAST; 2931 + ext4_group_t i; 2943 2932 int err = 0; 2944 - struct ext4_sb_info *sbi; 2945 - struct super_block *sb; 2933 + struct super_block *sb = ac->ac_sb; 2934 + struct ext4_sb_info *sbi = EXT4_SB(sb); 2946 2935 struct ext4_buddy e4b; 2947 - int lost; 2948 - 2949 - sb = ac->ac_sb; 2950 - sbi = EXT4_SB(sb); 2951 - ngroups = ext4_get_groups_count(sb); 2952 - /* non-extent files are limited to low blocks/groups */ 2953 - if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2954 - ngroups = sbi->s_blockfile_groups; 2955 2936 2956 2937 BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2957 2938 ··· 2988 2987 * start with CR_GOAL_LEN_FAST, unless it is power of 2 2989 2988 * aligned, in which case let's do that faster approach first. 2990 2989 */ 2990 + ac->ac_criteria = CR_GOAL_LEN_FAST; 2991 2991 if (ac->ac_2order) 2992 - cr = CR_POWER2_ALIGNED; 2992 + ac->ac_criteria = CR_POWER2_ALIGNED; 2993 2993 2994 2994 ac->ac_e4b = &e4b; 2995 2995 ac->ac_prefetch_ios = 0; 2996 2996 ac->ac_first_err = 0; 2997 2997 repeat: 2998 - for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2999 - ac->ac_criteria = cr; 3000 - /* 3001 - * searching for the right group start 3002 - * from the goal value specified 3003 - */ 3004 - group = ac->ac_g_ex.fe_group; 3005 - ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 3006 - ac->ac_prefetch_grp = group; 3007 - ac->ac_prefetch_nr = 0; 2998 + while (ac->ac_criteria < EXT4_MB_NUM_CRS) { 2999 + err = ext4_mb_scan_groups(ac); 3000 + if (err) 3001 + goto out; 3008 3002 3009 - for (i = 0, new_cr = cr; i < ngroups; i++, 3010 - ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 3011 - 3012 - cond_resched(); 3013 - if (new_cr != cr) { 3014 - cr = new_cr; 3015 - goto repeat; 3016 - } 3017 - 3018 - err = ext4_mb_scan_group(ac, group); 3019 - if (err) 3020 - goto out; 3021 - 3022 - if (ac->ac_status != AC_STATUS_CONTINUE) 3023 - break; 3024 - } 3025 - /* Processed all groups and haven't found blocks */ 3026 - if (sbi->s_mb_stats && i == ngroups) 3027 - atomic64_inc(&sbi->s_bal_cX_failed[cr]); 3028 - 3029 - if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN) 3030 - /* Reset goal length to original goal length before 3031 - * falling into CR_GOAL_LEN_SLOW */ 3032 - ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; 3003 + if (ac->ac_status != AC_STATUS_CONTINUE) 3004 + break; 3033 3005 } 3034 3006 3035 3007 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && ··· 3013 3039 */ 3014 3040 ext4_mb_try_best_found(ac, &e4b); 3015 3041 if (ac->ac_status != AC_STATUS_FOUND) { 3042 + int lost; 3043 + 3016 3044 /* 3017 3045 * Someone more lucky has already allocated it. 3018 3046 * The only thing we can do is just take first ··· 3030 3054 ac->ac_b_ex.fe_len = 0; 3031 3055 ac->ac_status = AC_STATUS_CONTINUE; 3032 3056 ac->ac_flags |= EXT4_MB_HINT_FIRST; 3033 - cr = CR_ANY_FREE; 3057 + ac->ac_criteria = CR_ANY_FREE; 3034 3058 goto repeat; 3035 3059 } 3036 3060 } ··· 3047 3071 3048 3072 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 3049 3073 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 3050 - ac->ac_flags, cr, err); 3074 + ac->ac_flags, ac->ac_criteria, err); 3051 3075 3052 3076 if (ac->ac_prefetch_nr) 3053 3077 ext4_mb_prefetch_fini(sb, ac->ac_prefetch_grp, ac->ac_prefetch_nr); ··· 3173 3197 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); 3174 3198 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3175 3199 atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); 3176 - seq_printf(seq, "\t\tbad_suggestions: %u\n", 3177 - atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions)); 3178 3200 3179 3201 /* CR_GOAL_LEN_FAST stats */ 3180 3202 seq_puts(seq, "\tcr_goal_fast_stats:\n"); ··· 3185 3211 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); 3186 3212 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3187 3213 atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); 3188 - seq_printf(seq, "\t\tbad_suggestions: %u\n", 3189 - atomic_read(&sbi->s_bal_goal_fast_bad_suggestions)); 3190 3214 3191 3215 /* CR_BEST_AVAIL_LEN stats */ 3192 3216 seq_puts(seq, "\tcr_best_avail_stats:\n"); ··· 3198 3226 atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); 3199 3227 seq_printf(seq, "\t\tuseless_loops: %llu\n", 3200 3228 atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); 3201 - seq_printf(seq, "\t\tbad_suggestions: %u\n", 3202 - atomic_read(&sbi->s_bal_best_avail_bad_suggestions)); 3203 3229 3204 3230 /* CR_GOAL_LEN_SLOW stats */ 3205 3231 seq_puts(seq, "\tcr_goal_slow_stats:\n");
-1
fs/ext4/mballoc.h
··· 199 199 int ac_first_err; 200 200 201 201 __u32 ac_flags; /* allocation hints */ 202 - __u32 ac_groups_linear_remaining; 203 202 __u16 ac_groups_scanned; 204 203 __u16 ac_found; 205 204 __u16 ac_cX_found[EXT4_MB_NUM_CRS];