Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/grant-table: Refactor gnttab_[un]map_refs to avoid m2p_override

The grant mapping API does m2p_override unnecessarily: only gntdev needs it,
for blkback and future netback patches it just cause a lock contention, as
those pages never go to userspace. Therefore this series does the following:
- the bulk of the original function (everything after the mapping hypercall)
is moved to arch-dependent set/clear_foreign_p2m_mapping
- the "if (xen_feature(XENFEAT_auto_translated_physmap))" branch goes to ARM
- therefore the ARM function could be much smaller, the m2p_override stubs
could be also removed
- on x86 the set_phys_to_machine calls were moved up to this new funcion
from m2p_override functions
- and m2p_override functions are only called when there is a kmap_ops param

It also removes a stray space from arch/x86/include/asm/xen/page.h.

Signed-off-by: Zoltan Kiss <zoltan.kiss@citrix.com>
Suggested-by: Anthony Liguori <aliguori@amazon.com>
Suggested-by: David Vrabel <david.vrabel@citrix.com>
Suggested-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

authored by

Zoltan Kiss and committed by
David Vrabel
1429d46d 395edbb8

+156 -96
+6 -9
arch/arm/include/asm/xen/page.h
··· 97 97 return NULL; 98 98 } 99 99 100 - static inline int m2p_add_override(unsigned long mfn, struct page *page, 101 - struct gnttab_map_grant_ref *kmap_op) 102 - { 103 - return 0; 104 - } 100 + extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 101 + struct gnttab_map_grant_ref *kmap_ops, 102 + struct page **pages, unsigned int count); 105 103 106 - static inline int m2p_remove_override(struct page *page, bool clear_pte) 107 - { 108 - return 0; 109 - } 104 + extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 105 + struct gnttab_map_grant_ref *kmap_ops, 106 + struct page **pages, unsigned int count); 110 107 111 108 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 112 109 bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
+32
arch/arm/xen/p2m.c
··· 146 146 } 147 147 EXPORT_SYMBOL_GPL(__mfn_to_pfn); 148 148 149 + int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 150 + struct gnttab_map_grant_ref *kmap_ops, 151 + struct page **pages, unsigned int count) 152 + { 153 + int i; 154 + 155 + for (i = 0; i < count; i++) { 156 + if (map_ops[i].status) 157 + continue; 158 + set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, 159 + map_ops[i].dev_bus_addr >> PAGE_SHIFT); 160 + } 161 + 162 + return 0; 163 + } 164 + EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); 165 + 166 + int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 167 + struct gnttab_map_grant_ref *kmap_ops, 168 + struct page **pages, unsigned int count) 169 + { 170 + int i; 171 + 172 + for (i = 0; i < count; i++) { 173 + set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, 174 + INVALID_P2M_ENTRY); 175 + } 176 + 177 + return 0; 178 + } 179 + EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); 180 + 149 181 bool __set_phys_to_machine_multi(unsigned long pfn, 150 182 unsigned long mfn, unsigned long nr_pages) 151 183 {
+9 -2
arch/x86/include/asm/xen/page.h
··· 49 49 extern unsigned long set_phys_range_identity(unsigned long pfn_s, 50 50 unsigned long pfn_e); 51 51 52 + extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 53 + struct gnttab_map_grant_ref *kmap_ops, 54 + struct page **pages, unsigned int count); 52 55 extern int m2p_add_override(unsigned long mfn, struct page *page, 53 56 struct gnttab_map_grant_ref *kmap_op); 57 + extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 58 + struct gnttab_map_grant_ref *kmap_ops, 59 + struct page **pages, unsigned int count); 54 60 extern int m2p_remove_override(struct page *page, 55 - struct gnttab_map_grant_ref *kmap_op); 61 + struct gnttab_map_grant_ref *kmap_op, 62 + unsigned long mfn); 56 63 extern struct page *m2p_find_override(unsigned long mfn); 57 64 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); 58 65 ··· 128 121 pfn = m2p_find_override_pfn(mfn, ~0); 129 122 } 130 123 131 - /* 124 + /* 132 125 * pfn is ~0 if there are no entries in the m2p for mfn or if the 133 126 * entry doesn't map back to the mfn and m2p_override doesn't have a 134 127 * valid entry for it.
+106 -15
arch/x86/xen/p2m.c
··· 881 881 return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT); 882 882 } 883 883 884 + int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 885 + struct gnttab_map_grant_ref *kmap_ops, 886 + struct page **pages, unsigned int count) 887 + { 888 + int i, ret = 0; 889 + bool lazy = false; 890 + pte_t *pte; 891 + 892 + if (xen_feature(XENFEAT_auto_translated_physmap)) 893 + return 0; 894 + 895 + if (kmap_ops && 896 + !in_interrupt() && 897 + paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 898 + arch_enter_lazy_mmu_mode(); 899 + lazy = true; 900 + } 901 + 902 + for (i = 0; i < count; i++) { 903 + unsigned long mfn, pfn; 904 + 905 + /* Do not add to override if the map failed. */ 906 + if (map_ops[i].status) 907 + continue; 908 + 909 + if (map_ops[i].flags & GNTMAP_contains_pte) { 910 + pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + 911 + (map_ops[i].host_addr & ~PAGE_MASK)); 912 + mfn = pte_mfn(*pte); 913 + } else { 914 + mfn = PFN_DOWN(map_ops[i].dev_bus_addr); 915 + } 916 + pfn = page_to_pfn(pages[i]); 917 + 918 + WARN_ON(PagePrivate(pages[i])); 919 + SetPagePrivate(pages[i]); 920 + set_page_private(pages[i], mfn); 921 + pages[i]->index = pfn_to_mfn(pfn); 922 + 923 + if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { 924 + ret = -ENOMEM; 925 + goto out; 926 + } 927 + 928 + if (kmap_ops) { 929 + ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]); 930 + if (ret) 931 + goto out; 932 + } 933 + } 934 + 935 + out: 936 + if (lazy) 937 + arch_leave_lazy_mmu_mode(); 938 + 939 + return ret; 940 + } 941 + EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); 942 + 884 943 /* Add an MFN override for a particular page */ 885 944 int m2p_add_override(unsigned long mfn, struct page *page, 886 945 struct gnttab_map_grant_ref *kmap_op) ··· 958 899 "m2p_add_override: pfn %lx not mapped", pfn)) 959 900 return -EINVAL; 960 901 } 961 - WARN_ON(PagePrivate(page)); 962 - SetPagePrivate(page); 963 - set_page_private(page, mfn); 964 - page->index = pfn_to_mfn(pfn); 965 - 966 - if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) 967 - return -ENOMEM; 968 902 969 903 if (kmap_op != NULL) { 970 904 if (!PageHighMem(page)) { ··· 995 943 return 0; 996 944 } 997 945 EXPORT_SYMBOL_GPL(m2p_add_override); 946 + 947 + int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 948 + struct gnttab_map_grant_ref *kmap_ops, 949 + struct page **pages, unsigned int count) 950 + { 951 + int i, ret = 0; 952 + bool lazy = false; 953 + 954 + if (xen_feature(XENFEAT_auto_translated_physmap)) 955 + return 0; 956 + 957 + if (kmap_ops && 958 + !in_interrupt() && 959 + paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 960 + arch_enter_lazy_mmu_mode(); 961 + lazy = true; 962 + } 963 + 964 + for (i = 0; i < count; i++) { 965 + unsigned long mfn = get_phys_to_machine(page_to_pfn(pages[i])); 966 + unsigned long pfn = page_to_pfn(pages[i]); 967 + 968 + if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { 969 + ret = -EINVAL; 970 + goto out; 971 + } 972 + 973 + set_page_private(pages[i], INVALID_P2M_ENTRY); 974 + WARN_ON(!PagePrivate(pages[i])); 975 + ClearPagePrivate(pages[i]); 976 + set_phys_to_machine(pfn, pages[i]->index); 977 + 978 + if (kmap_ops) 979 + ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn); 980 + if (ret) 981 + goto out; 982 + } 983 + 984 + out: 985 + if (lazy) 986 + arch_leave_lazy_mmu_mode(); 987 + return ret; 988 + } 989 + EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); 990 + 998 991 int m2p_remove_override(struct page *page, 999 - struct gnttab_map_grant_ref *kmap_op) 992 + struct gnttab_map_grant_ref *kmap_op, 993 + unsigned long mfn) 1000 994 { 1001 995 unsigned long flags; 1002 - unsigned long mfn; 1003 996 unsigned long pfn; 1004 997 unsigned long uninitialized_var(address); 1005 998 unsigned level; 1006 999 pte_t *ptep = NULL; 1007 1000 1008 1001 pfn = page_to_pfn(page); 1009 - mfn = get_phys_to_machine(pfn); 1010 - if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) 1011 - return -EINVAL; 1012 1002 1013 1003 if (!PageHighMem(page)) { 1014 1004 address = (unsigned long)__va(pfn << PAGE_SHIFT); ··· 1064 970 spin_lock_irqsave(&m2p_override_lock, flags); 1065 971 list_del(&page->lru); 1066 972 spin_unlock_irqrestore(&m2p_override_lock, flags); 1067 - WARN_ON(!PagePrivate(page)); 1068 - ClearPagePrivate(page); 1069 973 1070 - set_phys_to_machine(pfn, page->index); 1071 974 if (kmap_op != NULL) { 1072 975 if (!PageHighMem(page)) { 1073 976 struct multicall_space mcs;
+3 -70
drivers/xen/grant-table.c
··· 933 933 struct page **pages, unsigned int count) 934 934 { 935 935 int i, ret; 936 - bool lazy = false; 937 - pte_t *pte; 938 - unsigned long mfn; 939 936 940 937 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 941 938 if (ret) ··· 944 947 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 945 948 &map_ops[i].status, __func__); 946 949 947 - /* this is basically a nop on x86 */ 948 - if (xen_feature(XENFEAT_auto_translated_physmap)) { 949 - for (i = 0; i < count; i++) { 950 - if (map_ops[i].status) 951 - continue; 952 - set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, 953 - map_ops[i].dev_bus_addr >> PAGE_SHIFT); 954 - } 955 - return ret; 956 - } 957 - 958 - if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 959 - arch_enter_lazy_mmu_mode(); 960 - lazy = true; 961 - } 962 - 963 - for (i = 0; i < count; i++) { 964 - /* Do not add to override if the map failed. */ 965 - if (map_ops[i].status) 966 - continue; 967 - 968 - if (map_ops[i].flags & GNTMAP_contains_pte) { 969 - pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + 970 - (map_ops[i].host_addr & ~PAGE_MASK)); 971 - mfn = pte_mfn(*pte); 972 - } else { 973 - mfn = PFN_DOWN(map_ops[i].dev_bus_addr); 974 - } 975 - ret = m2p_add_override(mfn, pages[i], kmap_ops ? 976 - &kmap_ops[i] : NULL); 977 - if (ret) 978 - goto out; 979 - } 980 - 981 - out: 982 - if (lazy) 983 - arch_leave_lazy_mmu_mode(); 984 - 985 - return ret; 950 + return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); 986 951 } 987 952 EXPORT_SYMBOL_GPL(gnttab_map_refs); 988 953 ··· 952 993 struct gnttab_map_grant_ref *kmap_ops, 953 994 struct page **pages, unsigned int count) 954 995 { 955 - int i, ret; 956 - bool lazy = false; 996 + int ret; 957 997 958 998 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 959 999 if (ret) 960 1000 return ret; 961 1001 962 - /* this is basically a nop on x86 */ 963 - if (xen_feature(XENFEAT_auto_translated_physmap)) { 964 - for (i = 0; i < count; i++) { 965 - set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, 966 - INVALID_P2M_ENTRY); 967 - } 968 - return ret; 969 - } 970 - 971 - if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 972 - arch_enter_lazy_mmu_mode(); 973 - lazy = true; 974 - } 975 - 976 - for (i = 0; i < count; i++) { 977 - ret = m2p_remove_override(pages[i], kmap_ops ? 978 - &kmap_ops[i] : NULL); 979 - if (ret) 980 - goto out; 981 - } 982 - 983 - out: 984 - if (lazy) 985 - arch_leave_lazy_mmu_mode(); 986 - 987 - return ret; 1002 + return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count); 988 1003 } 989 1004 EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 990 1005