Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memblock tests: add memblock_reserve_many_may_conflict_check()

This may trigger the case fixed by commit 48c3b583bbdd ("mm/memblock:
fix overlapping allocation when doubling reserved array").

This is done by adding the 129th reserve region into memblock.memory. If
memblock_double_array() use this reserve region as new array, it fails.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Link: https://lore.kernel.org/r/20240507075833.6346-3-richard.weiyang@gmail.com
Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>

authored by

Wei Yang and committed by
Mike Rapoport (IBM)
f6df89c3 3d316519

+154 -2
+151
tools/testing/memblock/tests/basic_api.c
··· 1088 1088 return 0; 1089 1089 } 1090 1090 1091 + /* 1092 + * A test that trying to reserve the 129th memory block at all locations. 1093 + * Expect to trigger memblock_double_array() to double the 1094 + * memblock.memory.max, find a new valid memory as reserved.regions. And make 1095 + * sure it doesn't conflict with the range we want to reserve. 1096 + * 1097 + * For example, we have 128 regions in reserved and now want to reserve 1098 + * the skipped one. Since reserved is full, memblock_double_array() would find 1099 + * an available range in memory for the new array. We intended to put two 1100 + * ranges in memory with one is the exact range of the skipped one. Before 1101 + * commit 48c3b583bbdd ("mm/memblock: fix overlapping allocation when doubling 1102 + * reserved array"), the new array would sits in the skipped range which is a 1103 + * conflict. The expected new array should be allocated from memory.regions[0]. 1104 + * 1105 + * 0 1 1106 + * memory +-------+ +-------+ 1107 + * | 32K | | 32K | 1108 + * +-------+ ------+-------+-------+-------+ 1109 + * |<-32K->|<-32K->|<-32K->| 1110 + * 1111 + * 0 skipped 127 1112 + * reserved +-------+ ......... +-------+ 1113 + * | 32K | . 32K . ... | 32K | 1114 + * +-------+-------+-------+ +-------+ 1115 + * |<-32K->| 1116 + * ^ 1117 + * | 1118 + * | 1119 + * skipped one 1120 + */ 1121 + /* Keep the gap so these memory region will not be merged. */ 1122 + #define MEMORY_BASE_OFFSET(idx, offset) ((offset) + (MEM_SIZE * 2) * (idx)) 1123 + static int memblock_reserve_many_may_conflict_check(void) 1124 + { 1125 + int i, skip; 1126 + void *orig_region; 1127 + struct region r = { 1128 + .base = SZ_16K, 1129 + .size = SZ_16K, 1130 + }; 1131 + phys_addr_t new_reserved_regions_size; 1132 + 1133 + /* 1134 + * 0 1 129 1135 + * +---+ +---+ +---+ 1136 + * |32K| |32K| .. |32K| 1137 + * +---+ +---+ +---+ 1138 + * 1139 + * Pre-allocate the range for 129 memory block + one range for double 1140 + * memblock.reserved.regions at idx 0. 1141 + */ 1142 + dummy_physical_memory_init(); 1143 + phys_addr_t memory_base = dummy_physical_memory_base(); 1144 + phys_addr_t offset = PAGE_ALIGN(memory_base); 1145 + 1146 + PREFIX_PUSH(); 1147 + 1148 + /* Reserve the 129th memory block for all possible positions*/ 1149 + for (skip = 1; skip <= INIT_MEMBLOCK_REGIONS + 1; skip++) { 1150 + reset_memblock_regions(); 1151 + memblock_allow_resize(); 1152 + 1153 + reset_memblock_attributes(); 1154 + /* Add a valid memory region used by double_array(). */ 1155 + memblock_add(MEMORY_BASE_OFFSET(0, offset), MEM_SIZE); 1156 + /* 1157 + * Add a memory region which will be reserved as 129th memory 1158 + * region. This is not expected to be used by double_array(). 1159 + */ 1160 + memblock_add(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE); 1161 + 1162 + for (i = 1; i <= INIT_MEMBLOCK_REGIONS + 1; i++) { 1163 + if (i == skip) 1164 + continue; 1165 + 1166 + /* Reserve some fakes memory region to fulfill the memblock. */ 1167 + memblock_reserve(MEMORY_BASE_OFFSET(i, offset), MEM_SIZE); 1168 + 1169 + if (i < skip) { 1170 + ASSERT_EQ(memblock.reserved.cnt, i); 1171 + ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE); 1172 + } else { 1173 + ASSERT_EQ(memblock.reserved.cnt, i - 1); 1174 + ASSERT_EQ(memblock.reserved.total_size, (i - 1) * MEM_SIZE); 1175 + } 1176 + } 1177 + 1178 + orig_region = memblock.reserved.regions; 1179 + 1180 + /* This reserve the 129 memory_region, and makes it double array. */ 1181 + memblock_reserve(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE); 1182 + 1183 + /* 1184 + * This is the memory region size used by the doubled reserved.regions, 1185 + * and it has been reserved due to it has been used. The size is used to 1186 + * calculate the total_size that the memblock.reserved have now. 1187 + */ 1188 + new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) * 1189 + sizeof(struct memblock_region)); 1190 + /* 1191 + * The double_array() will find a free memory region as the new 1192 + * reserved.regions, and the used memory region will be reserved, so 1193 + * there will be one more region exist in the reserved memblock. And the 1194 + * one more reserved region's size is new_reserved_regions_size. 1195 + */ 1196 + ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2); 1197 + ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + 1198 + new_reserved_regions_size); 1199 + ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); 1200 + 1201 + /* 1202 + * The first reserved region is allocated for double array 1203 + * with the size of new_reserved_regions_size and the base to be 1204 + * MEMORY_BASE_OFFSET(0, offset) + SZ_32K - new_reserved_regions_size 1205 + */ 1206 + ASSERT_EQ(memblock.reserved.regions[0].base + memblock.reserved.regions[0].size, 1207 + MEMORY_BASE_OFFSET(0, offset) + SZ_32K); 1208 + ASSERT_EQ(memblock.reserved.regions[0].size, new_reserved_regions_size); 1209 + 1210 + /* 1211 + * Now memblock_double_array() works fine. Let's check after the 1212 + * double_array(), the memblock_reserve() still works as normal. 1213 + */ 1214 + memblock_reserve(r.base, r.size); 1215 + ASSERT_EQ(memblock.reserved.regions[0].base, r.base); 1216 + ASSERT_EQ(memblock.reserved.regions[0].size, r.size); 1217 + 1218 + ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3); 1219 + ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + 1220 + new_reserved_regions_size + 1221 + r.size); 1222 + ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); 1223 + 1224 + /* 1225 + * The current reserved.regions is occupying a range of memory that 1226 + * allocated from dummy_physical_memory_init(). After free the memory, 1227 + * we must not use it. So restore the origin memory region to make sure 1228 + * the tests can run as normal and not affected by the double array. 1229 + */ 1230 + memblock.reserved.regions = orig_region; 1231 + memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; 1232 + } 1233 + 1234 + dummy_physical_memory_cleanup(); 1235 + 1236 + test_pass_pop(); 1237 + 1238 + return 0; 1239 + } 1240 + 1091 1241 static int memblock_reserve_checks(void) 1092 1242 { 1093 1243 prefix_reset(); ··· 1254 1104 memblock_reserve_near_max_check(); 1255 1105 memblock_reserve_many_check(); 1256 1106 memblock_reserve_all_locations_check(); 1107 + memblock_reserve_many_may_conflict_check(); 1257 1108 1258 1109 prefix_pop(); 1259 1110
+2 -2
tools/testing/memblock/tests/common.c
··· 61 61 62 62 static inline void fill_memblock(void) 63 63 { 64 - memset(memory_block.base, 1, MEM_SIZE); 64 + memset(memory_block.base, 1, PHYS_MEM_SIZE); 65 65 } 66 66 67 67 void setup_memblock(void) ··· 103 103 104 104 void dummy_physical_memory_init(void) 105 105 { 106 - memory_block.base = malloc(MEM_SIZE); 106 + memory_block.base = malloc(PHYS_MEM_SIZE); 107 107 assert(memory_block.base); 108 108 fill_memblock(); 109 109 }
+1
tools/testing/memblock/tests/common.h
··· 12 12 #include <../selftests/kselftest.h> 13 13 14 14 #define MEM_SIZE SZ_32K 15 + #define PHYS_MEM_SIZE SZ_16M 15 16 #define NUMA_NODES 8 16 17 17 18 #define INIT_MEMBLOCK_REGIONS 128