ntfs: use bitmap_weight

Use bitmap_weight() instead of doing hweight32() for each u32 element in
the page.

Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Akinobu Mita and committed by
Linus Torvalds
c4af9644 bcc54e2a

+13 -12
+13 -12
fs/ntfs/super.c
··· 31 #include <linux/vfs.h> 32 #include <linux/moduleparam.h> 33 #include <linux/smp_lock.h> 34 35 #include "sysctl.h" 36 #include "logfile.h" ··· 2459 static s64 get_nr_free_clusters(ntfs_volume *vol) 2460 { 2461 s64 nr_free = vol->nr_clusters; 2462 - u32 *kaddr; 2463 struct address_space *mapping = vol->lcnbmp_ino->i_mapping; 2464 struct page *page; 2465 pgoff_t index, max_index; ··· 2477 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2478 max_index, PAGE_CACHE_SIZE / 4); 2479 for (index = 0; index < max_index; index++) { 2480 - unsigned int i; 2481 /* 2482 * Read the page from page cache, getting it from backing store 2483 * if necessary, and increment the use count. ··· 2491 nr_free -= PAGE_CACHE_SIZE * 8; 2492 continue; 2493 } 2494 - kaddr = (u32*)kmap_atomic(page, KM_USER0); 2495 /* 2496 - * For each 4 bytes, subtract the number of set bits. If this 2497 * is the last page and it is partial we don't really care as 2498 * it just means we do a little extra work but it won't affect 2499 * the result as all out of range bytes are set to zero by 2500 * ntfs_readpage(). 2501 */ 2502 - for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) 2503 - nr_free -= (s64)hweight32(kaddr[i]); 2504 kunmap_atomic(kaddr, KM_USER0); 2505 page_cache_release(page); 2506 } ··· 2539 static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, 2540 s64 nr_free, const pgoff_t max_index) 2541 { 2542 - u32 *kaddr; 2543 struct address_space *mapping = vol->mftbmp_ino->i_mapping; 2544 struct page *page; 2545 pgoff_t index; ··· 2548 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2549 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); 2550 for (index = 0; index < max_index; index++) { 2551 - unsigned int i; 2552 /* 2553 * Read the page from page cache, getting it from backing store 2554 * if necessary, and increment the use count. ··· 2562 nr_free -= PAGE_CACHE_SIZE * 8; 2563 continue; 2564 } 2565 - kaddr = (u32*)kmap_atomic(page, KM_USER0); 2566 /* 2567 - * For each 4 bytes, subtract the number of set bits. If this 2568 * is the last page and it is partial we don't really care as 2569 * it just means we do a little extra work but it won't affect 2570 * the result as all out of range bytes are set to zero by 2571 * ntfs_readpage(). 2572 */ 2573 - for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) 2574 - nr_free -= (s64)hweight32(kaddr[i]); 2575 kunmap_atomic(kaddr, KM_USER0); 2576 page_cache_release(page); 2577 }
··· 31 #include <linux/vfs.h> 32 #include <linux/moduleparam.h> 33 #include <linux/smp_lock.h> 34 + #include <linux/bitmap.h> 35 36 #include "sysctl.h" 37 #include "logfile.h" ··· 2458 static s64 get_nr_free_clusters(ntfs_volume *vol) 2459 { 2460 s64 nr_free = vol->nr_clusters; 2461 struct address_space *mapping = vol->lcnbmp_ino->i_mapping; 2462 struct page *page; 2463 pgoff_t index, max_index; ··· 2477 ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", 2478 max_index, PAGE_CACHE_SIZE / 4); 2479 for (index = 0; index < max_index; index++) { 2480 + unsigned long *kaddr; 2481 + 2482 /* 2483 * Read the page from page cache, getting it from backing store 2484 * if necessary, and increment the use count. ··· 2490 nr_free -= PAGE_CACHE_SIZE * 8; 2491 continue; 2492 } 2493 + kaddr = kmap_atomic(page, KM_USER0); 2494 /* 2495 + * Subtract the number of set bits. If this 2496 * is the last page and it is partial we don't really care as 2497 * it just means we do a little extra work but it won't affect 2498 * the result as all out of range bytes are set to zero by 2499 * ntfs_readpage(). 2500 */ 2501 + nr_free -= bitmap_weight(kaddr, 2502 + PAGE_CACHE_SIZE * BITS_PER_BYTE); 2503 kunmap_atomic(kaddr, KM_USER0); 2504 page_cache_release(page); 2505 } ··· 2538 static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, 2539 s64 nr_free, const pgoff_t max_index) 2540 { 2541 struct address_space *mapping = vol->mftbmp_ino->i_mapping; 2542 struct page *page; 2543 pgoff_t index; ··· 2548 ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " 2549 "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); 2550 for (index = 0; index < max_index; index++) { 2551 + unsigned long *kaddr; 2552 + 2553 /* 2554 * Read the page from page cache, getting it from backing store 2555 * if necessary, and increment the use count. ··· 2561 nr_free -= PAGE_CACHE_SIZE * 8; 2562 continue; 2563 } 2564 + kaddr = kmap_atomic(page, KM_USER0); 2565 /* 2566 + * Subtract the number of set bits. If this 2567 * is the last page and it is partial we don't really care as 2568 * it just means we do a little extra work but it won't affect 2569 * the result as all out of range bytes are set to zero by 2570 * ntfs_readpage(). 2571 */ 2572 + nr_free -= bitmap_weight(kaddr, 2573 + PAGE_CACHE_SIZE * BITS_PER_BYTE); 2574 kunmap_atomic(kaddr, KM_USER0); 2575 page_cache_release(page); 2576 }