ntfs: use bitmap_weight

Use bitmap_weight() instead of doing hweight32() for each u32 element in
the page.

Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Akinobu Mita and committed by
Linus Torvalds
c4af9644 bcc54e2a

+13 -12
+13 -12
fs/ntfs/super.c
··· 31 31 #include <linux/vfs.h> 32 32 #include <linux/moduleparam.h> 33 33 #include <linux/smp_lock.h> 34 + #include <linux/bitmap.h> 34 35 35 36 #include "sysctl.h" 36 37 #include "logfile.h" ··· 2459 2458 static s64 get_nr_free_clusters(ntfs_volume *vol) 2460 2459 { 2461 2460 s64 nr_free = vol->nr_clusters; 2462 - u32 *kaddr; 2463 2461 struct address_space *mapping = vol->lcnbmp_ino->i_mapping; 2464 2462 struct page *page; 2465 2463 pgoff_t index, max_index; ··· 2477 2477 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2478 2478 max_index, PAGE_CACHE_SIZE / 4); 2479 2479 for (index = 0; index < max_index; index++) { 2480 - unsigned int i; 2480 + unsigned long *kaddr; 2481 + 2481 2482 /* 2482 2483 * Read the page from page cache, getting it from backing store 2483 2484 * if necessary, and increment the use count. ··· 2491 2490 nr_free -= PAGE_CACHE_SIZE * 8; 2492 2491 continue; 2493 2492 } 2494 - kaddr = (u32*)kmap_atomic(page, KM_USER0); 2493 + kaddr = kmap_atomic(page, KM_USER0); 2495 2494 /* 2496 - * For each 4 bytes, subtract the number of set bits. If this 2495 + * Subtract the number of set bits. If this 2497 2496 * is the last page and it is partial we don't really care as 2498 2497 * it just means we do a little extra work but it won't affect 2499 2498 * the result as all out of range bytes are set to zero by 2500 2499 * ntfs_readpage(). 2501 2500 */ 2502 - for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) 2503 - nr_free -= (s64)hweight32(kaddr[i]); 2501 + nr_free -= bitmap_weight(kaddr, 2502 + PAGE_CACHE_SIZE * BITS_PER_BYTE); 2504 2503 kunmap_atomic(kaddr, KM_USER0); 2505 2504 page_cache_release(page); 2506 2505 } ··· 2539 2538 static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, 2540 2539 s64 nr_free, const pgoff_t max_index) 2541 2540 { 2542 - u32 *kaddr; 2543 2541 struct address_space *mapping = vol->mftbmp_ino->i_mapping; 2544 2542 struct page *page; 2545 2543 pgoff_t index; ··· 2548 2548 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2549 2549 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); 2550 2550 for (index = 0; index < max_index; index++) { 2551 - unsigned int i; 2551 + unsigned long *kaddr; 2552 + 2552 2553 /* 2553 2554 * Read the page from page cache, getting it from backing store 2554 2555 * if necessary, and increment the use count. ··· 2562 2561 nr_free -= PAGE_CACHE_SIZE * 8; 2563 2562 continue; 2564 2563 } 2565 - kaddr = (u32*)kmap_atomic(page, KM_USER0); 2564 + kaddr = kmap_atomic(page, KM_USER0); 2566 2565 /* 2567 - * For each 4 bytes, subtract the number of set bits. If this 2566 + * Subtract the number of set bits. If this 2568 2567 * is the last page and it is partial we don't really care as 2569 2568 * it just means we do a little extra work but it won't affect 2570 2569 * the result as all out of range bytes are set to zero by 2571 2570 * ntfs_readpage(). 2572 2571 */ 2573 - for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) 2574 - nr_free -= (s64)hweight32(kaddr[i]); 2572 + nr_free -= bitmap_weight(kaddr, 2573 + PAGE_CACHE_SIZE * BITS_PER_BYTE); 2575 2574 kunmap_atomic(kaddr, KM_USER0); 2576 2575 page_cache_release(page); 2577 2576 }