Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ntfs: remove the second argument of k[un]map_atomic()

Signed-off-by: Cong Wang <amwang@redhat.com>

authored by

Cong Wang and committed by
Cong Wang
a3ac1414 7b9c0976

+32 -32
+10 -10
fs/ntfs/aops.c
··· 94 94 if (file_ofs < init_size) 95 95 ofs = init_size - file_ofs; 96 96 local_irq_save(flags); 97 - kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); 97 + kaddr = kmap_atomic(page); 98 98 memset(kaddr + bh_offset(bh) + ofs, 0, 99 99 bh->b_size - ofs); 100 100 flush_dcache_page(page); 101 - kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); 101 + kunmap_atomic(kaddr); 102 102 local_irq_restore(flags); 103 103 } 104 104 } else { ··· 147 147 /* Should have been verified before we got here... */ 148 148 BUG_ON(!recs); 149 149 local_irq_save(flags); 150 - kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); 150 + kaddr = kmap_atomic(page); 151 151 for (i = 0; i < recs; i++) 152 152 post_read_mst_fixup((NTFS_RECORD*)(kaddr + 153 153 i * rec_size), rec_size); 154 - kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); 154 + kunmap_atomic(kaddr); 155 155 local_irq_restore(flags); 156 156 flush_dcache_page(page); 157 157 if (likely(page_uptodate && !PageError(page))) ··· 504 504 /* Race with shrinking truncate. */ 505 505 attr_len = i_size; 506 506 } 507 - addr = kmap_atomic(page, KM_USER0); 507 + addr = kmap_atomic(page); 508 508 /* Copy the data to the page. */ 509 509 memcpy(addr, (u8*)ctx->attr + 510 510 le16_to_cpu(ctx->attr->data.resident.value_offset), ··· 512 512 /* Zero the remainder of the page. */ 513 513 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 514 514 flush_dcache_page(page); 515 - kunmap_atomic(addr, KM_USER0); 515 + kunmap_atomic(addr); 516 516 put_unm_err_out: 517 517 ntfs_attr_put_search_ctx(ctx); 518 518 unm_err_out: ··· 746 746 unsigned long *bpos, *bend; 747 747 748 748 /* Check if the buffer is zero. */ 749 - kaddr = kmap_atomic(page, KM_USER0); 749 + kaddr = kmap_atomic(page); 750 750 bpos = (unsigned long *)(kaddr + bh_offset(bh)); 751 751 bend = (unsigned long *)((u8*)bpos + blocksize); 752 752 do { 753 753 if (unlikely(*bpos)) 754 754 break; 755 755 } while (likely(++bpos < bend)); 756 - kunmap_atomic(kaddr, KM_USER0); 756 + kunmap_atomic(kaddr); 757 757 if (bpos == bend) { 758 758 /* 759 759 * Buffer is zero and sparse, no need to write ··· 1495 1495 /* Shrinking cannot fail. */ 1496 1496 BUG_ON(err); 1497 1497 } 1498 - addr = kmap_atomic(page, KM_USER0); 1498 + addr = kmap_atomic(page); 1499 1499 /* Copy the data from the page to the mft record. */ 1500 1500 memcpy((u8*)ctx->attr + 1501 1501 le16_to_cpu(ctx->attr->data.resident.value_offset), 1502 1502 addr, attr_len); 1503 1503 /* Zero out of bounds area in the page cache page. */ 1504 1504 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1505 - kunmap_atomic(addr, KM_USER0); 1505 + kunmap_atomic(addr); 1506 1506 flush_dcache_page(page); 1507 1507 flush_dcache_mft_record_page(ctx->ntfs_ino); 1508 1508 /* We are done with the page. */
+10 -10
fs/ntfs/attrib.c
··· 1656 1656 attr_size = le32_to_cpu(a->data.resident.value_length); 1657 1657 BUG_ON(attr_size != data_size); 1658 1658 if (page && !PageUptodate(page)) { 1659 - kaddr = kmap_atomic(page, KM_USER0); 1659 + kaddr = kmap_atomic(page); 1660 1660 memcpy(kaddr, (u8*)a + 1661 1661 le16_to_cpu(a->data.resident.value_offset), 1662 1662 attr_size); 1663 1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); 1664 - kunmap_atomic(kaddr, KM_USER0); 1664 + kunmap_atomic(kaddr); 1665 1665 flush_dcache_page(page); 1666 1666 SetPageUptodate(page); 1667 1667 } ··· 1806 1806 sizeof(a->data.resident.reserved)); 1807 1807 /* Copy the data from the page back to the attribute value. */ 1808 1808 if (page) { 1809 - kaddr = kmap_atomic(page, KM_USER0); 1809 + kaddr = kmap_atomic(page); 1810 1810 memcpy((u8*)a + mp_ofs, kaddr, attr_size); 1811 - kunmap_atomic(kaddr, KM_USER0); 1811 + kunmap_atomic(kaddr); 1812 1812 } 1813 1813 /* Setup the allocated size in the ntfs inode in case it changed. */ 1814 1814 write_lock_irqsave(&ni->size_lock, flags); ··· 2540 2540 size = PAGE_CACHE_SIZE; 2541 2541 if (idx == end) 2542 2542 size = end_ofs; 2543 - kaddr = kmap_atomic(page, KM_USER0); 2543 + kaddr = kmap_atomic(page); 2544 2544 memset(kaddr + start_ofs, val, size - start_ofs); 2545 2545 flush_dcache_page(page); 2546 - kunmap_atomic(kaddr, KM_USER0); 2546 + kunmap_atomic(kaddr); 2547 2547 set_page_dirty(page); 2548 2548 page_cache_release(page); 2549 2549 balance_dirty_pages_ratelimited(mapping); ··· 2561 2561 "page (index 0x%lx).", idx); 2562 2562 return -ENOMEM; 2563 2563 } 2564 - kaddr = kmap_atomic(page, KM_USER0); 2564 + kaddr = kmap_atomic(page); 2565 2565 memset(kaddr, val, PAGE_CACHE_SIZE); 2566 2566 flush_dcache_page(page); 2567 - kunmap_atomic(kaddr, KM_USER0); 2567 + kunmap_atomic(kaddr); 2568 2568 /* 2569 2569 * If the page has buffers, mark them uptodate since buffer 2570 2570 * state and not page state is definitive in 2.6 kernels. ··· 2598 2598 "(error, index 0x%lx).", idx); 2599 2599 return PTR_ERR(page); 2600 2600 } 2601 - kaddr = kmap_atomic(page, KM_USER0); 2601 + kaddr = kmap_atomic(page); 2602 2602 memset(kaddr, val, end_ofs); 2603 2603 flush_dcache_page(page); 2604 - kunmap_atomic(kaddr, KM_USER0); 2604 + kunmap_atomic(kaddr); 2605 2605 set_page_dirty(page); 2606 2606 page_cache_release(page); 2607 2607 balance_dirty_pages_ratelimited(mapping);
+8 -8
fs/ntfs/file.c
··· 704 704 u8 *kaddr; 705 705 unsigned pofs; 706 706 707 - kaddr = kmap_atomic(page, KM_USER0); 707 + kaddr = kmap_atomic(page); 708 708 if (bh_pos < pos) { 709 709 pofs = bh_pos & ~PAGE_CACHE_MASK; 710 710 memset(kaddr + pofs, 0, pos - bh_pos); ··· 713 713 pofs = end & ~PAGE_CACHE_MASK; 714 714 memset(kaddr + pofs, 0, bh_end - end); 715 715 } 716 - kunmap_atomic(kaddr, KM_USER0); 716 + kunmap_atomic(kaddr); 717 717 flush_dcache_page(page); 718 718 } 719 719 continue; ··· 1287 1287 len = PAGE_CACHE_SIZE - ofs; 1288 1288 if (len > bytes) 1289 1289 len = bytes; 1290 - addr = kmap_atomic(*pages, KM_USER0); 1290 + addr = kmap_atomic(*pages); 1291 1291 left = __copy_from_user_inatomic(addr + ofs, buf, len); 1292 - kunmap_atomic(addr, KM_USER0); 1292 + kunmap_atomic(addr); 1293 1293 if (unlikely(left)) { 1294 1294 /* Do it the slow way. */ 1295 1295 addr = kmap(*pages); ··· 1401 1401 len = PAGE_CACHE_SIZE - ofs; 1402 1402 if (len > bytes) 1403 1403 len = bytes; 1404 - addr = kmap_atomic(*pages, KM_USER0); 1404 + addr = kmap_atomic(*pages); 1405 1405 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, 1406 1406 *iov, *iov_ofs, len); 1407 - kunmap_atomic(addr, KM_USER0); 1407 + kunmap_atomic(addr); 1408 1408 if (unlikely(copied != len)) { 1409 1409 /* Do it the slow way. */ 1410 1410 addr = kmap(*pages); ··· 1691 1691 BUG_ON(end > le32_to_cpu(a->length) - 1692 1692 le16_to_cpu(a->data.resident.value_offset)); 1693 1693 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); 1694 - kaddr = kmap_atomic(page, KM_USER0); 1694 + kaddr = kmap_atomic(page); 1695 1695 /* Copy the received data from the page to the mft record. */ 1696 1696 memcpy(kattr + pos, kaddr + pos, bytes); 1697 1697 /* Update the attribute length if necessary. */ ··· 1713 1713 flush_dcache_page(page); 1714 1714 SetPageUptodate(page); 1715 1715 } 1716 - kunmap_atomic(kaddr, KM_USER0); 1716 + kunmap_atomic(kaddr); 1717 1717 /* Update initialized_size/i_size if necessary. */ 1718 1718 read_lock_irqsave(&ni->size_lock, flags); 1719 1719 initialized_size = ni->initialized_size;
+4 -4
fs/ntfs/super.c
··· 2473 2473 nr_free -= PAGE_CACHE_SIZE * 8; 2474 2474 continue; 2475 2475 } 2476 - kaddr = kmap_atomic(page, KM_USER0); 2476 + kaddr = kmap_atomic(page); 2477 2477 /* 2478 2478 * Subtract the number of set bits. If this 2479 2479 * is the last page and it is partial we don't really care as ··· 2483 2483 */ 2484 2484 nr_free -= bitmap_weight(kaddr, 2485 2485 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2486 - kunmap_atomic(kaddr, KM_USER0); 2486 + kunmap_atomic(kaddr); 2487 2487 page_cache_release(page); 2488 2488 } 2489 2489 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); ··· 2544 2544 nr_free -= PAGE_CACHE_SIZE * 8; 2545 2545 continue; 2546 2546 } 2547 - kaddr = kmap_atomic(page, KM_USER0); 2547 + kaddr = kmap_atomic(page); 2548 2548 /* 2549 2549 * Subtract the number of set bits. If this 2550 2550 * is the last page and it is partial we don't really care as ··· 2554 2554 */ 2555 2555 nr_free -= bitmap_weight(kaddr, 2556 2556 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2557 - kunmap_atomic(kaddr, KM_USER0); 2557 + kunmap_atomic(kaddr); 2558 2558 page_cache_release(page); 2559 2559 } 2560 2560 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",