Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmc_test: fix large memory allocation

- Fix mmc_test_alloc_mem.

- Use nr_free_buffer_pages() instead of sysinfo.totalram to determine
total lowmem pages.

- Change variables containing memory sizes to unsigned long.

- Limit maximum test area size to 128MiB because that is the maximum MMC
high capacity erase size (the maxmium SD allocation unit size is just
4MiB)

Signed-off-by: Adrian Hunter <adrian.hunter@nokia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Adrian Hunter and committed by
Linus Torvalds
fec4dcce 64f7120d

+47 -29
+47 -29
drivers/mmc/card/mmc_test.c
··· 16 16 #include <linux/slab.h> 17 17 18 18 #include <linux/scatterlist.h> 19 + #include <linux/swap.h> /* For nr_free_buffer_pages() */ 19 20 20 21 #define RESULT_OK 0 21 22 #define RESULT_FAIL 1 ··· 25 24 26 25 #define BUFFER_ORDER 2 27 26 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) 27 + 28 + /* 29 + * Limit the test area size to the maximum MMC HC erase group size. Note that 30 + * the maximum SD allocation unit size is just 4MiB. 31 + */ 32 + #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) 28 33 29 34 /** 30 35 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. ··· 54 47 55 48 /** 56 49 * struct mmc_test_area - information for performance tests. 57 - * @dev_addr: address on card at which to do performance tests 58 50 * @max_sz: test area size (in bytes) 51 + * @dev_addr: address on card at which to do performance tests 59 52 * @max_segs: maximum segments in scatterlist @sg 60 53 * @blocks: number of (512 byte) blocks currently mapped by @sg 61 54 * @sg_len: length of currently mapped scatterlist @sg ··· 63 56 * @sg: scatterlist 64 57 */ 65 58 struct mmc_test_area { 59 + unsigned long max_sz; 66 60 unsigned int dev_addr; 67 - unsigned int max_sz; 68 61 unsigned int max_segs; 69 62 unsigned int blocks; 70 63 unsigned int sg_len; ··· 245 238 246 239 /* 247 240 * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case 248 - * there isn't much memory do not exceed 1/16th total RAM. 241 + * there isn't much memory do not exceed 1/16th total lowmem pages. 249 242 */ 250 - static struct mmc_test_mem *mmc_test_alloc_mem(unsigned int min_sz, 251 - unsigned int max_sz) 243 + static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, 244 + unsigned long max_sz) 252 245 { 253 - unsigned int max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 254 - unsigned int min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 255 - unsigned int page_cnt = 0; 246 + unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); 247 + unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); 248 + unsigned long page_cnt = 0; 249 + unsigned long limit = nr_free_buffer_pages() >> 4; 256 250 struct mmc_test_mem *mem; 257 - struct sysinfo si; 258 251 259 - si_meminfo(&si); 260 - if (max_page_cnt > si.totalram >> 4) 261 - max_page_cnt = si.totalram >> 4; 252 + if (max_page_cnt > limit) 253 + max_page_cnt = limit; 262 254 if (max_page_cnt < min_page_cnt) 263 255 max_page_cnt = min_page_cnt; 264 256 ··· 276 270 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | 277 271 __GFP_NORETRY; 278 272 279 - order = get_order(page_cnt << PAGE_SHIFT); 273 + order = get_order(max_page_cnt << PAGE_SHIFT); 280 274 while (1) { 281 275 page = alloc_pages(flags, order); 282 276 if (page || !order) ··· 291 285 mem->arr[mem->cnt].page = page; 292 286 mem->arr[mem->cnt].order = order; 293 287 mem->cnt += 1; 294 - max_page_cnt -= 1 << order; 295 - page_cnt += 1 << order; 288 + if (max_page_cnt <= (1UL << order)) 289 + break; 290 + max_page_cnt -= 1UL << order; 291 + page_cnt += 1UL << order; 296 292 } 297 293 298 294 return mem; ··· 308 300 * Map memory into a scatterlist. Optionally allow the same memory to be 309 301 * mapped more than once. 310 302 */ 311 - static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned int sz, 303 + static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, 312 304 struct scatterlist *sglist, int repeat, 313 305 unsigned int max_segs, unsigned int *sg_len) 314 306 { ··· 320 312 *sg_len = 0; 321 313 do { 322 314 for (i = 0; i < mem->cnt; i++) { 323 - unsigned int len = PAGE_SIZE << mem->arr[i].order; 315 + unsigned long len = PAGE_SIZE << mem->arr[i].order; 324 316 325 317 if (sz < len) 326 318 len = sz; ··· 352 344 * same memory to be mapped more than once. 353 345 */ 354 346 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, 355 - unsigned int sz, 347 + unsigned long sz, 356 348 struct scatterlist *sglist, 357 349 unsigned int max_segs, 358 350 unsigned int *sg_len) 359 351 { 360 352 struct scatterlist *sg = NULL; 361 - unsigned int i = mem->cnt, cnt, len; 353 + unsigned int i = mem->cnt, cnt; 354 + unsigned long len; 362 355 void *base, *addr, *last_addr = NULL; 363 356 364 357 sg_init_table(sglist, max_segs); ··· 1211 1202 /* 1212 1203 * Map sz bytes so that it can be transferred. 1213 1204 */ 1214 - static int mmc_test_area_map(struct mmc_test_card *test, unsigned int sz, 1205 + static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, 1215 1206 int max_scatter) 1216 1207 { 1217 1208 struct mmc_test_area *t = &test->area; ··· 1242 1233 /* 1243 1234 * Map and transfer bytes. 1244 1235 */ 1245 - static int mmc_test_area_io(struct mmc_test_card *test, unsigned int sz, 1236 + static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, 1246 1237 unsigned int dev_addr, int write, int max_scatter, 1247 1238 int timed) 1248 1239 { ··· 1317 1308 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) 1318 1309 { 1319 1310 struct mmc_test_area *t = &test->area; 1320 - unsigned int min_sz = 64 * 1024; 1311 + unsigned long min_sz = 64 * 1024; 1321 1312 int ret; 1322 1313 1323 1314 ret = mmc_test_set_blksize(test, 512); 1324 1315 if (ret) 1325 1316 return ret; 1326 1317 1318 + if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) 1319 + t->max_sz = TEST_AREA_MAX_SIZE; 1320 + else 1321 + t->max_sz = (unsigned long)test->card->pref_erase << 9; 1327 1322 /* 1328 1323 * Try to allocate enough memory for the whole area. Less is OK 1329 1324 * because the same memory can be mapped into the scatterlist more than 1330 1325 * once. 1331 1326 */ 1332 - t->max_sz = test->card->pref_erase << 9; 1333 1327 t->mem = mmc_test_alloc_mem(min_sz, t->max_sz); 1334 1328 if (!t->mem) 1335 1329 return -ENOMEM; ··· 1442 1430 */ 1443 1431 static int mmc_test_profile_read_perf(struct mmc_test_card *test) 1444 1432 { 1445 - unsigned int sz, dev_addr; 1433 + unsigned long sz; 1434 + unsigned int dev_addr; 1446 1435 int ret; 1447 1436 1448 1437 for (sz = 512; sz < test->area.max_sz; sz <<= 1) { ··· 1461 1448 */ 1462 1449 static int mmc_test_profile_write_perf(struct mmc_test_card *test) 1463 1450 { 1464 - unsigned int sz, dev_addr; 1451 + unsigned long sz; 1452 + unsigned int dev_addr; 1465 1453 int ret; 1466 1454 1467 1455 ret = mmc_test_area_erase(test); ··· 1486 1472 */ 1487 1473 static int mmc_test_profile_trim_perf(struct mmc_test_card *test) 1488 1474 { 1489 - unsigned int sz, dev_addr; 1475 + unsigned long sz; 1476 + unsigned int dev_addr; 1490 1477 struct timespec ts1, ts2; 1491 1478 int ret; 1492 1479 ··· 1521 1506 */ 1522 1507 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) 1523 1508 { 1524 - unsigned int sz, dev_addr, i, cnt; 1509 + unsigned long sz; 1510 + unsigned int dev_addr, i, cnt; 1525 1511 struct timespec ts1, ts2; 1526 1512 int ret; 1527 1513 ··· 1547 1531 */ 1548 1532 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) 1549 1533 { 1550 - unsigned int sz, dev_addr, i, cnt; 1534 + unsigned long sz; 1535 + unsigned int dev_addr, i, cnt; 1551 1536 struct timespec ts1, ts2; 1552 1537 int ret; 1553 1538 ··· 1576 1559 */ 1577 1560 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) 1578 1561 { 1579 - unsigned int sz, dev_addr, i, cnt; 1562 + unsigned long sz; 1563 + unsigned int dev_addr, i, cnt; 1580 1564 struct timespec ts1, ts2; 1581 1565 int ret; 1582 1566