Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'stable/for-linus-3.13-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull Xen bugfixes from Konrad Rzeszutek Wilk:
- Fix balloon driver for auto-translate guests (PVHVM, ARM) to not use
scratch pages.
- Fix block API header for ARM32 and ARM64 to have proper layout
- On ARM when mapping guests, stick on PTE_SPECIAL
- When using SWIOTLB under ARM, don't call swiotlb functions twice
- When unmapping guests memory and if we fail, don't return pages which
failed to be unmapped.
- Grant driver was using the wrong address on ARM.

* tag 'stable/for-linus-3.13-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/balloon: Seperate the auto-translate logic properly (v2)
xen/block: Correctly define structures in public headers on ARM32 and ARM64
arm: xen: foreign mapping PTEs are special.
xen/arm64: do not call the swiotlb functions twice
xen: privcmd: do not return pages which we have failed to unmap
XEN: Grant table address, xen_hvm_resume_frames, is a phys_addr not a pfn

+51 -44
+3 -3
arch/arm/xen/enlighten.c
··· 96 96 struct remap_data *info = data; 97 97 struct page *page = info->pages[info->index++]; 98 98 unsigned long pfn = page_to_pfn(page); 99 - pte_t pte = pfn_pte(pfn, info->prot); 99 + pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 100 100 101 101 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 102 102 return -EFAULT; ··· 224 224 } 225 225 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 226 226 return 0; 227 - xen_hvm_resume_frames = res.start >> PAGE_SHIFT; 227 + xen_hvm_resume_frames = res.start; 228 228 xen_events_irq = irq_of_parse_and_map(node, 0); 229 229 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 230 - version, xen_events_irq, xen_hvm_resume_frames); 230 + version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); 231 231 xen_domain_type = XEN_HVM_DOMAIN; 232 232 233 233 xen_setup_features();
-4
arch/arm64/include/asm/xen/page-coherent.h
··· 23 23 unsigned long offset, size_t size, enum dma_data_direction dir, 24 24 struct dma_attrs *attrs) 25 25 { 26 - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 27 26 } 28 27 29 28 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 30 29 size_t size, enum dma_data_direction dir, 31 30 struct dma_attrs *attrs) 32 31 { 33 - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); 34 32 } 35 33 36 34 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 37 35 dma_addr_t handle, size_t size, enum dma_data_direction dir) 38 36 { 39 - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 40 37 } 41 38 42 39 static inline void xen_dma_sync_single_for_device(struct device *hwdev, 43 40 dma_addr_t handle, size_t size, enum dma_data_direction dir) 44 41 { 45 - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); 46 42 } 47 43 #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
+34 -29
drivers/xen/balloon.c
··· 350 350 351 351 pfn = page_to_pfn(page); 352 352 353 - set_phys_to_machine(pfn, frame_list[i]); 354 - 355 353 #ifdef CONFIG_XEN_HAVE_PVMMU 356 - /* Link back into the page tables if not highmem. */ 357 - if (xen_pv_domain() && !PageHighMem(page)) { 358 - int ret; 359 - ret = HYPERVISOR_update_va_mapping( 360 - (unsigned long)__va(pfn << PAGE_SHIFT), 361 - mfn_pte(frame_list[i], PAGE_KERNEL), 362 - 0); 363 - BUG_ON(ret); 354 + if (!xen_feature(XENFEAT_auto_translated_physmap)) { 355 + set_phys_to_machine(pfn, frame_list[i]); 356 + 357 + /* Link back into the page tables if not highmem. */ 358 + if (!PageHighMem(page)) { 359 + int ret; 360 + ret = HYPERVISOR_update_va_mapping( 361 + (unsigned long)__va(pfn << PAGE_SHIFT), 362 + mfn_pte(frame_list[i], PAGE_KERNEL), 363 + 0); 364 + BUG_ON(ret); 365 + } 364 366 } 365 367 #endif 366 368 ··· 380 378 enum bp_state state = BP_DONE; 381 379 unsigned long pfn, i; 382 380 struct page *page; 383 - struct page *scratch_page; 384 381 int ret; 385 382 struct xen_memory_reservation reservation = { 386 383 .address_bits = 0, ··· 412 411 413 412 scrub_page(page); 414 413 414 + #ifdef CONFIG_XEN_HAVE_PVMMU 415 415 /* 416 416 * Ballooned out frames are effectively replaced with 417 417 * a scratch frame. Ensure direct mappings and the 418 418 * p2m are consistent. 419 419 */ 420 - scratch_page = get_balloon_scratch_page(); 421 - #ifdef CONFIG_XEN_HAVE_PVMMU 422 - if (xen_pv_domain() && !PageHighMem(page)) { 423 - ret = HYPERVISOR_update_va_mapping( 424 - (unsigned long)__va(pfn << PAGE_SHIFT), 425 - pfn_pte(page_to_pfn(scratch_page), 426 - PAGE_KERNEL_RO), 0); 427 - BUG_ON(ret); 428 - } 429 - #endif 430 420 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 431 421 unsigned long p; 422 + struct page *scratch_page = get_balloon_scratch_page(); 423 + 424 + if (!PageHighMem(page)) { 425 + ret = HYPERVISOR_update_va_mapping( 426 + (unsigned long)__va(pfn << PAGE_SHIFT), 427 + pfn_pte(page_to_pfn(scratch_page), 428 + PAGE_KERNEL_RO), 0); 429 + BUG_ON(ret); 430 + } 432 431 p = page_to_pfn(scratch_page); 433 432 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 433 + 434 + put_balloon_scratch_page(); 434 435 } 435 - put_balloon_scratch_page(); 436 + #endif 436 437 437 438 balloon_append(pfn_to_page(pfn)); 438 439 } ··· 630 627 if (!xen_domain()) 631 628 return -ENODEV; 632 629 633 - for_each_online_cpu(cpu) 634 - { 635 - per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 636 - if (per_cpu(balloon_scratch_page, cpu) == NULL) { 637 - pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 638 - return -ENOMEM; 630 + if (!xen_feature(XENFEAT_auto_translated_physmap)) { 631 + for_each_online_cpu(cpu) 632 + { 633 + per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 634 + if (per_cpu(balloon_scratch_page, cpu) == NULL) { 635 + pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 636 + return -ENOMEM; 637 + } 639 638 } 639 + register_cpu_notifier(&balloon_cpu_notifier); 640 640 } 641 - register_cpu_notifier(&balloon_cpu_notifier); 642 641 643 642 pr_info("Initialising balloon driver\n"); 644 643
+2 -1
drivers/xen/grant-table.c
··· 1176 1176 gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, 1177 1177 PAGE_SIZE * max_nr_gframes); 1178 1178 if (gnttab_shared.addr == NULL) { 1179 - pr_warn("Failed to ioremap gnttab share frames!\n"); 1179 + pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n", 1180 + xen_hvm_resume_frames); 1180 1181 return -ENOMEM; 1181 1182 } 1182 1183 }
+7 -2
drivers/xen/privcmd.c
··· 533 533 { 534 534 struct page **pages = vma->vm_private_data; 535 535 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 536 + int rc; 536 537 537 538 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 538 539 return; 539 540 540 - xen_unmap_domain_mfn_range(vma, numpgs, pages); 541 - free_xenballooned_pages(numpgs, pages); 541 + rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); 542 + if (rc == 0) 543 + free_xenballooned_pages(numpgs, pages); 544 + else 545 + pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n", 546 + numpgs, rc); 542 547 kfree(pages); 543 548 } 544 549
+5 -5
include/xen/interface/io/blkif.h
··· 146 146 struct blkif_request_rw { 147 147 uint8_t nr_segments; /* number of segments */ 148 148 blkif_vdev_t handle; /* only for read/write requests */ 149 - #ifdef CONFIG_X86_64 149 + #ifndef CONFIG_X86_32 150 150 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */ 151 151 #endif 152 152 uint64_t id; /* private guest value, echoed in resp */ ··· 163 163 uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */ 164 164 #define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */ 165 165 blkif_vdev_t _pad1; /* only for read/write requests */ 166 - #ifdef CONFIG_X86_64 166 + #ifndef CONFIG_X86_32 167 167 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/ 168 168 #endif 169 169 uint64_t id; /* private guest value, echoed in resp */ ··· 175 175 struct blkif_request_other { 176 176 uint8_t _pad1; 177 177 blkif_vdev_t _pad2; /* only for read/write requests */ 178 - #ifdef CONFIG_X86_64 178 + #ifndef CONFIG_X86_32 179 179 uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ 180 180 #endif 181 181 uint64_t id; /* private guest value, echoed in resp */ ··· 184 184 struct blkif_request_indirect { 185 185 uint8_t indirect_op; 186 186 uint16_t nr_segments; 187 - #ifdef CONFIG_X86_64 187 + #ifndef CONFIG_X86_32 188 188 uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */ 189 189 #endif 190 190 uint64_t id; ··· 192 192 blkif_vdev_t handle; 193 193 uint16_t _pad2; 194 194 grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; 195 - #ifdef CONFIG_X86_64 195 + #ifndef CONFIG_X86_32 196 196 uint32_t _pad3; /* make it 64 byte aligned */ 197 197 #else 198 198 uint64_t _pad3; /* make it 64 byte aligned */