Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

pagemap: introduce data structure for pagemap entry

Currently a local variable of pagemap entry in pagemap_pte_range() is
named pfn and typed with u64, but it's not correct (pfn should be unsigned
long.)

This patch introduces special type for pagemap entries and replaces code
with it.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Naoya Horiguchi and committed by
Linus Torvalds
092b50ba 807f0ccf

+38 -31
+38 -31
fs/proc/task_mmu.c
··· 594 594 .llseek = noop_llseek, 595 595 }; 596 596 597 + typedef struct { 598 + u64 pme; 599 + } pagemap_entry_t; 600 + 597 601 struct pagemapread { 598 602 int pos, len; 599 - u64 *buffer; 603 + pagemap_entry_t *buffer; 600 604 }; 601 605 602 606 #define PAGEMAP_WALK_SIZE (PMD_SIZE) ··· 623 619 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) 624 620 #define PM_END_OF_BUFFER 1 625 621 626 - static int add_to_pagemap(unsigned long addr, u64 pfn, 622 + static inline pagemap_entry_t make_pme(u64 val) 623 + { 624 + return (pagemap_entry_t) { .pme = val }; 625 + } 626 + 627 + static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, 627 628 struct pagemapread *pm) 628 629 { 629 - pm->buffer[pm->pos++] = pfn; 630 + pm->buffer[pm->pos++] = *pme; 630 631 if (pm->pos >= pm->len) 631 632 return PM_END_OF_BUFFER; 632 633 return 0; ··· 643 634 struct pagemapread *pm = walk->private; 644 635 unsigned long addr; 645 636 int err = 0; 637 + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); 638 + 646 639 for (addr = start; addr < end; addr += PAGE_SIZE) { 647 - err = add_to_pagemap(addr, PM_NOT_PRESENT, pm); 640 + err = add_to_pagemap(addr, &pme, pm); 648 641 if (err) 649 642 break; 650 643 } ··· 659 648 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 660 649 } 661 650 662 - static u64 pte_to_pagemap_entry(pte_t pte) 651 + static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte) 663 652 { 664 - u64 pme = 0; 665 653 if (is_swap_pte(pte)) 666 - pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) 667 - | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; 654 + *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte)) 655 + | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP); 668 656 else if (pte_present(pte)) 669 - pme = PM_PFRAME(pte_pfn(pte)) 670 - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 671 - return pme; 657 + *pme = make_pme(PM_PFRAME(pte_pfn(pte)) 658 + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 672 659 } 673 660 674 661 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 675 - static u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset) 662 + static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, 663 + pmd_t pmd, int offset) 676 664 { 677 - u64 pme = 0; 678 665 /* 679 666 * Currently pmd for thp is always present because thp can not be 680 667 * swapped-out, migrated, or HWPOISONed (split in such cases instead.) 681 668 * This if-check is just to prepare for future implementation. 682 669 */ 683 670 if (pmd_present(pmd)) 684 - pme = PM_PFRAME(pmd_pfn(pmd) + offset) 685 - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 686 - return pme; 671 + *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) 672 + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 687 673 } 688 674 #else 689 - static inline u64 thp_pmd_to_pagemap_entry(pmd_t pmd, int offset) 675 + static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, 676 + pmd_t pmd, int offset) 690 677 { 691 - return 0; 692 678 } 693 679 #endif 694 680 ··· 696 688 struct pagemapread *pm = walk->private; 697 689 pte_t *pte; 698 690 int err = 0; 699 - u64 pfn = PM_NOT_PRESENT; 691 + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); 700 692 701 693 if (pmd_trans_unstable(pmd)) 702 694 return 0; ··· 710 702 711 703 offset = (addr & ~PAGEMAP_WALK_MASK) >> 712 704 PAGE_SHIFT; 713 - pfn = thp_pmd_to_pagemap_entry(*pmd, offset); 714 - err = add_to_pagemap(addr, pfn, pm); 705 + thp_pmd_to_pagemap_entry(&pme, *pmd, offset); 706 + err = add_to_pagemap(addr, &pme, pm); 715 707 if (err) 716 708 break; 717 709 } ··· 731 723 if (vma && (vma->vm_start <= addr) && 732 724 !is_vm_hugetlb_page(vma)) { 733 725 pte = pte_offset_map(pmd, addr); 734 - pfn = pte_to_pagemap_entry(*pte); 726 + pte_to_pagemap_entry(&pme, *pte); 735 727 /* unmap before userspace copy */ 736 728 pte_unmap(pte); 737 729 } 738 - err = add_to_pagemap(addr, pfn, pm); 730 + err = add_to_pagemap(addr, &pme, pm); 739 731 if (err) 740 732 return err; 741 733 } ··· 746 738 } 747 739 748 740 #ifdef CONFIG_HUGETLB_PAGE 749 - static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset) 741 + static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, 742 + pte_t pte, int offset) 750 743 { 751 - u64 pme = 0; 752 744 if (pte_present(pte)) 753 - pme = PM_PFRAME(pte_pfn(pte) + offset) 754 - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; 755 - return pme; 745 + *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) 746 + | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 756 747 } 757 748 758 749 /* This function walks within one hugetlb entry in the single call */ ··· 761 754 { 762 755 struct pagemapread *pm = walk->private; 763 756 int err = 0; 764 - u64 pfn; 757 + pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); 765 758 766 759 for (; addr != end; addr += PAGE_SIZE) { 767 760 int offset = (addr & ~hmask) >> PAGE_SHIFT; 768 - pfn = huge_pte_to_pagemap_entry(*pte, offset); 769 - err = add_to_pagemap(addr, pfn, pm); 761 + huge_pte_to_pagemap_entry(&pme, *pte, offset); 762 + err = add_to_pagemap(addr, &pme, pm); 770 763 if (err) 771 764 return err; 772 765 }