Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'stable/for-linus-3.14-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen fixes from Konrad Rzeszutek Wilk:
"Bug-fixes:
- Revert "xen/grant-table: Avoid m2p_override during mapping" as it
broke Xen ARM build.
- Fix CR4 not being set on AP processors in Xen PVH mode"

* tag 'stable/for-linus-3.14-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/pvh: set CR4 flags for APs
Revert "xen/grant-table: Avoid m2p_override during mapping"

+58 -101
+2 -3
arch/x86/include/asm/xen/page.h
··· 52 52 extern int m2p_add_override(unsigned long mfn, struct page *page, 53 53 struct gnttab_map_grant_ref *kmap_op); 54 54 extern int m2p_remove_override(struct page *page, 55 - struct gnttab_map_grant_ref *kmap_op, 56 - unsigned long mfn); 55 + struct gnttab_map_grant_ref *kmap_op); 57 56 extern struct page *m2p_find_override(unsigned long mfn); 58 57 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); 59 58 ··· 121 122 pfn = m2p_find_override_pfn(mfn, ~0); 122 123 } 123 124 124 - /* 125 + /* 125 126 * pfn is ~0 if there are no entries in the m2p for mfn or if the 126 127 * entry doesn't map back to the mfn and m2p_override doesn't have a 127 128 * valid entry for it.
+12
arch/x86/xen/enlighten.c
··· 1473 1473 * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests 1474 1474 * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ 1475 1475 write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); 1476 + 1477 + if (!cpu) 1478 + return; 1479 + /* 1480 + * For BSP, PSE PGE are set in probe_page_size_mask(), for APs 1481 + * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init. 1482 + */ 1483 + if (cpu_has_pse) 1484 + set_in_cr4(X86_CR4_PSE); 1485 + 1486 + if (cpu_has_pge) 1487 + set_in_cr4(X86_CR4_PGE); 1476 1488 } 1477 1489 1478 1490 /*
+15 -2
arch/x86/xen/p2m.c
··· 899 899 "m2p_add_override: pfn %lx not mapped", pfn)) 900 900 return -EINVAL; 901 901 } 902 + WARN_ON(PagePrivate(page)); 903 + SetPagePrivate(page); 904 + set_page_private(page, mfn); 905 + page->index = pfn_to_mfn(pfn); 906 + 907 + if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) 908 + return -ENOMEM; 902 909 903 910 if (kmap_op != NULL) { 904 911 if (!PageHighMem(page)) { ··· 944 937 } 945 938 EXPORT_SYMBOL_GPL(m2p_add_override); 946 939 int m2p_remove_override(struct page *page, 947 - struct gnttab_map_grant_ref *kmap_op, 948 - unsigned long mfn) 940 + struct gnttab_map_grant_ref *kmap_op) 949 941 { 950 942 unsigned long flags; 943 + unsigned long mfn; 951 944 unsigned long pfn; 952 945 unsigned long uninitialized_var(address); 953 946 unsigned level; 954 947 pte_t *ptep = NULL; 955 948 956 949 pfn = page_to_pfn(page); 950 + mfn = get_phys_to_machine(pfn); 951 + if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) 952 + return -EINVAL; 957 953 958 954 if (!PageHighMem(page)) { 959 955 address = (unsigned long)__va(pfn << PAGE_SHIFT); ··· 970 960 spin_lock_irqsave(&m2p_override_lock, flags); 971 961 list_del(&page->lru); 972 962 spin_unlock_irqrestore(&m2p_override_lock, flags); 963 + WARN_ON(!PagePrivate(page)); 964 + ClearPagePrivate(page); 973 965 966 + set_phys_to_machine(pfn, page->index); 974 967 if (kmap_op != NULL) { 975 968 if (!PageHighMem(page)) { 976 969 struct multicall_space mcs;
+9 -6
drivers/block/xen-blkback/blkback.c
··· 285 285 286 286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 287 287 !rb_next(&persistent_gnt->node)) { 288 - ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 288 + ret = gnttab_unmap_refs(unmap, NULL, pages, 289 + segs_to_unmap); 289 290 BUG_ON(ret); 290 291 put_free_pages(blkif, pages, segs_to_unmap); 291 292 segs_to_unmap = 0; ··· 321 320 pages[segs_to_unmap] = persistent_gnt->page; 322 321 323 322 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 324 - ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 323 + ret = gnttab_unmap_refs(unmap, NULL, pages, 324 + segs_to_unmap); 325 325 BUG_ON(ret); 326 326 put_free_pages(blkif, pages, segs_to_unmap); 327 327 segs_to_unmap = 0; ··· 330 328 kfree(persistent_gnt); 331 329 } 332 330 if (segs_to_unmap > 0) { 333 - ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); 331 + ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); 334 332 BUG_ON(ret); 335 333 put_free_pages(blkif, pages, segs_to_unmap); 336 334 } ··· 670 668 GNTMAP_host_map, pages[i]->handle); 671 669 pages[i]->handle = BLKBACK_INVALID_HANDLE; 672 670 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 673 - ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); 671 + ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, 672 + invcount); 674 673 BUG_ON(ret); 675 674 put_free_pages(blkif, unmap_pages, invcount); 676 675 invcount = 0; 677 676 } 678 677 } 679 678 if (invcount) { 680 - ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); 679 + ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); 681 680 BUG_ON(ret); 682 681 put_free_pages(blkif, unmap_pages, invcount); 683 682 } ··· 740 737 } 741 738 742 739 if (segs_to_map) { 743 - ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map); 740 + ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); 744 741 BUG_ON(ret); 745 742 } 746 743
+5 -8
drivers/xen/gntdev.c
··· 284 284 } 285 285 286 286 pr_debug("map %d+%d\n", map->index, map->count); 287 - err = gnttab_map_refs_userspace(map->map_ops, 288 - use_ptemod ? map->kmap_ops : NULL, 289 - map->pages, 290 - map->count); 287 + err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, 288 + map->pages, map->count); 291 289 if (err) 292 290 return err; 293 291 ··· 315 317 } 316 318 } 317 319 318 - err = gnttab_unmap_refs_userspace(map->unmap_ops + offset, 319 - use_ptemod ? map->kmap_ops + offset : NULL, 320 - map->pages + offset, 321 - pages); 320 + err = gnttab_unmap_refs(map->unmap_ops + offset, 321 + use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, 322 + pages); 322 323 if (err) 323 324 return err; 324 325
+13 -76
drivers/xen/grant-table.c
··· 928 928 } 929 929 EXPORT_SYMBOL_GPL(gnttab_batch_copy); 930 930 931 - int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 931 + int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 932 932 struct gnttab_map_grant_ref *kmap_ops, 933 - struct page **pages, unsigned int count, 934 - bool m2p_override) 933 + struct page **pages, unsigned int count) 935 934 { 936 935 int i, ret; 937 936 bool lazy = false; 938 937 pte_t *pte; 939 - unsigned long mfn, pfn; 938 + unsigned long mfn; 940 939 941 - BUG_ON(kmap_ops && !m2p_override); 942 940 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 943 941 if (ret) 944 942 return ret; ··· 955 957 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, 956 958 map_ops[i].dev_bus_addr >> PAGE_SHIFT); 957 959 } 958 - return 0; 960 + return ret; 959 961 } 960 962 961 - if (m2p_override && 962 - !in_interrupt() && 963 - paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 963 + if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 964 964 arch_enter_lazy_mmu_mode(); 965 965 lazy = true; 966 966 } ··· 975 979 } else { 976 980 mfn = PFN_DOWN(map_ops[i].dev_bus_addr); 977 981 } 978 - pfn = page_to_pfn(pages[i]); 979 - 980 - WARN_ON(PagePrivate(pages[i])); 981 - SetPagePrivate(pages[i]); 982 - set_page_private(pages[i], mfn); 983 - 984 - pages[i]->index = pfn_to_mfn(pfn); 985 - if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { 986 - ret = -ENOMEM; 987 - goto out; 988 - } 989 - if (m2p_override) 990 - ret = m2p_add_override(mfn, pages[i], kmap_ops ? 991 - &kmap_ops[i] : NULL); 982 + ret = m2p_add_override(mfn, pages[i], kmap_ops ? 983 + &kmap_ops[i] : NULL); 992 984 if (ret) 993 985 goto out; 994 986 } ··· 987 1003 988 1004 return ret; 989 1005 } 990 - 991 - int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 992 - struct page **pages, unsigned int count) 993 - { 994 - return __gnttab_map_refs(map_ops, NULL, pages, count, false); 995 - } 996 1006 EXPORT_SYMBOL_GPL(gnttab_map_refs); 997 1007 998 - int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops, 999 - struct gnttab_map_grant_ref *kmap_ops, 1000 - struct page **pages, unsigned int count) 1001 - { 1002 - return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true); 1003 - } 1004 - EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace); 1005 - 1006 - int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 1008 + int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 1007 1009 struct gnttab_map_grant_ref *kmap_ops, 1008 - struct page **pages, unsigned int count, 1009 - bool m2p_override) 1010 + struct page **pages, unsigned int count) 1010 1011 { 1011 1012 int i, ret; 1012 1013 bool lazy = false; 1013 - unsigned long pfn, mfn; 1014 1014 1015 - BUG_ON(kmap_ops && !m2p_override); 1016 1015 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 1017 1016 if (ret) 1018 1017 return ret; ··· 1006 1039 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, 1007 1040 INVALID_P2M_ENTRY); 1008 1041 } 1009 - return 0; 1042 + return ret; 1010 1043 } 1011 1044 1012 - if (m2p_override && 1013 - !in_interrupt() && 1014 - paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 1045 + if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 1015 1046 arch_enter_lazy_mmu_mode(); 1016 1047 lazy = true; 1017 1048 } 1018 1049 1019 1050 for (i = 0; i < count; i++) { 1020 - pfn = page_to_pfn(pages[i]); 1021 - mfn = get_phys_to_machine(pfn); 1022 - if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { 1023 - ret = -EINVAL; 1024 - goto out; 1025 - } 1026 - 1027 - set_page_private(pages[i], INVALID_P2M_ENTRY); 1028 - WARN_ON(!PagePrivate(pages[i])); 1029 - ClearPagePrivate(pages[i]); 1030 - set_phys_to_machine(pfn, pages[i]->index); 1031 - if (m2p_override) 1032 - ret = m2p_remove_override(pages[i], 1033 - kmap_ops ? 1034 - &kmap_ops[i] : NULL, 1035 - mfn); 1051 + ret = m2p_remove_override(pages[i], kmap_ops ? 1052 + &kmap_ops[i] : NULL); 1036 1053 if (ret) 1037 1054 goto out; 1038 1055 } ··· 1027 1076 1028 1077 return ret; 1029 1078 } 1030 - 1031 - int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops, 1032 - struct page **pages, unsigned int count) 1033 - { 1034 - return __gnttab_unmap_refs(map_ops, NULL, pages, count, false); 1035 - } 1036 1079 EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 1037 - 1038 - int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops, 1039 - struct gnttab_map_grant_ref *kmap_ops, 1040 - struct page **pages, unsigned int count) 1041 - { 1042 - return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true); 1043 - } 1044 - EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace); 1045 1080 1046 1081 static unsigned nr_status_frames(unsigned nr_grant_frames) 1047 1082 {
+2 -6
include/xen/grant_table.h
··· 191 191 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) 192 192 193 193 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 194 + struct gnttab_map_grant_ref *kmap_ops, 194 195 struct page **pages, unsigned int count); 195 - int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops, 196 - struct gnttab_map_grant_ref *kmap_ops, 197 - struct page **pages, unsigned int count); 198 196 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 197 + struct gnttab_map_grant_ref *kunmap_ops, 199 198 struct page **pages, unsigned int count); 200 - int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops, 201 - struct gnttab_map_grant_ref *kunmap_ops, 202 - struct page **pages, unsigned int count); 203 199 204 200 /* Perform a batch of grant map/copy operations. Retry every batch slot 205 201 * for which the hypervisor returns GNTST_eagain. This is typically due