xen: don't drop NX bit

Because NX is now enforced properly, we must put the hypercall page
into the .text segment so that it is executable.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stable Kernel <stable@kernel.org>
Cc: the arch/x86 maintainers <x86@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by Jeremy Fitzhardinge and committed by Ingo Molnar ebb9cfe2 05345b0f

+32 -26
+31 -25
arch/x86/xen/mmu.c
··· 179 179 preempt_enable(); 180 180 } 181 181 182 + /* Assume pteval_t is equivalent to all the other *val_t types. */ 183 + static pteval_t pte_mfn_to_pfn(pteval_t val) 184 + { 185 + if (val & _PAGE_PRESENT) { 186 + unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT; 187 + pteval_t flags = val & ~PTE_MASK; 188 + val = (mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; 189 + } 190 + 191 + return val; 192 + } 193 + 194 + static pteval_t pte_pfn_to_mfn(pteval_t val) 195 + { 196 + if (val & _PAGE_PRESENT) { 197 + unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT; 198 + pteval_t flags = val & ~PTE_MASK; 199 + val = (pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; 200 + } 201 + 202 + return val; 203 + } 204 + 182 205 pteval_t xen_pte_val(pte_t pte) 183 206 { 184 - pteval_t ret = pte.pte; 185 - 186 - if (ret & _PAGE_PRESENT) 187 - ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; 188 - 189 - return ret; 207 + return pte_mfn_to_pfn(pte.pte); 190 208 } 191 209 192 210 pgdval_t xen_pgd_val(pgd_t pgd) 193 211 { 194 - pgdval_t ret = pgd.pgd; 195 - if (ret & _PAGE_PRESENT) 196 - ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; 197 - return ret; 212 + return pte_mfn_to_pfn(pgd.pgd); 198 213 } 199 214 200 215 pte_t xen_make_pte(pteval_t pte) 201 216 { 202 - if (pte & _PAGE_PRESENT) 203 - pte = phys_to_machine(XPADDR(pte)).maddr; 204 - 205 - return (pte_t){ .pte = pte }; 217 + pte = pte_pfn_to_mfn(pte); 218 + return native_make_pte(pte); 206 219 } 207 220 208 221 pgd_t xen_make_pgd(pgdval_t pgd) 209 222 { 210 - if (pgd & _PAGE_PRESENT) 211 - pgd = phys_to_machine(XPADDR(pgd)).maddr; 212 - 213 - return (pgd_t){ pgd }; 223 + pgd = pte_pfn_to_mfn(pgd); 224 + return native_make_pgd(pgd); 214 225 } 215 226 216 227 pmdval_t xen_pmd_val(pmd_t pmd) 217 228 { 218 - pmdval_t ret = native_pmd_val(pmd); 219 - if (ret & _PAGE_PRESENT) 220 - ret = machine_to_phys(XMADDR(ret)).paddr | _PAGE_PRESENT; 221 - return ret; 229 + return pte_mfn_to_pfn(pmd.pmd); 222 230 } 223 231 #ifdef CONFIG_X86_PAE 224 232 void xen_set_pud(pud_t *ptr, pud_t val) ··· 273 265 274 266 pmd_t xen_make_pmd(pmdval_t pmd) 275 267 { 276 - if (pmd & _PAGE_PRESENT) 277 - pmd = phys_to_machine(XPADDR(pmd)).maddr; 278 - 268 + pmd = pte_pfn_to_mfn(pmd); 279 269 return native_make_pmd(pmd); 280 270 } 281 271 #else /* !PAE */
+1 -1
arch/x86/xen/xen-head.S
··· 17 17 18 18 __FINIT 19 19 20 - .pushsection .bss.page_aligned 20 + .pushsection .text 21 21 .align PAGE_SIZE_asm 22 22 ENTRY(hypercall_page) 23 23 .skip 0x1000