Merge tag 'for-linus-5.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

- A bunch of minor cleanups

- A fix for kexec in Xen dom0 when executed on a high cpu number

- A fix for resuming after suspend of a Xen guest with assigned PCI
devices

- A fix for a crash due to not disabled preemption when resuming as Xen
dom0

* tag 'for-linus-5.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen: fix is_xen_pmu()
xen: don't hang when resuming PCI device
arch:x86:xen: Remove unnecessary assignment in xen_apic_read()
xen/grant-table: remove readonly parameter from functions
xen/grant-table: remove gnttab_*transfer*() functions
drivers/xen: use helper macro __ATTR_RW
x86/xen: Fix kerneldoc warning
xen: delay xen_hvm_init_time_ops() if kdump is boot on vcpu>=32
xen: use time_is_before_eq_jiffies() instead of open coding it

+94 -193
+1 -1
arch/x86/xen/apic.c
··· 51 .interface_version = XENPF_INTERFACE_VERSION, 52 .u.pcpu_info.xen_cpuid = 0, 53 }; 54 - int ret = 0; 55 56 /* Shouldn't need this as APIC is turned off for PV, and we only 57 * get called on the bootup processor. But just in case. */
··· 51 .interface_version = XENPF_INTERFACE_VERSION, 52 .u.pcpu_info.xen_cpuid = 0, 53 }; 54 + int ret; 55 56 /* Shouldn't need this as APIC is turned off for PV, and we only 57 * get called on the bootup processor. But just in case. */
+4 -6
arch/x86/xen/pmu.c
··· 506 return ret; 507 } 508 509 - bool is_xen_pmu(int cpu) 510 - { 511 - return (get_xenpmu_data() != NULL); 512 - } 513 514 void xen_pmu_init(int cpu) 515 { ··· 517 518 BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); 519 520 - if (xen_hvm_domain()) 521 return; 522 523 xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); ··· 538 per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; 539 per_cpu(xenpmu_shared, cpu).flags = 0; 540 541 - if (cpu == 0) { 542 perf_register_guest_info_callbacks(&xen_guest_cbs); 543 xen_pmu_arch_init(); 544 }
··· 506 return ret; 507 } 508 509 + bool is_xen_pmu; 510 511 void xen_pmu_init(int cpu) 512 { ··· 520 521 BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); 522 523 + if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu)) 524 return; 525 526 xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); ··· 541 per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; 542 per_cpu(xenpmu_shared, cpu).flags = 0; 543 544 + if (!is_xen_pmu) { 545 + is_xen_pmu = true; 546 perf_register_guest_info_callbacks(&xen_guest_cbs); 547 xen_pmu_arch_init(); 548 }
+2 -1
arch/x86/xen/pmu.h
··· 4 5 #include <xen/interface/xenpmu.h> 6 7 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); 8 #ifdef CONFIG_XEN_HAVE_VPMU 9 void xen_pmu_init(int cpu); ··· 14 static inline void xen_pmu_init(int cpu) {} 15 static inline void xen_pmu_finish(int cpu) {} 16 #endif 17 - bool is_xen_pmu(int cpu); 18 bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); 19 bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); 20 int pmu_apic_update(uint32_t reg);
··· 4 5 #include <xen/interface/xenpmu.h> 6 7 + extern bool is_xen_pmu; 8 + 9 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); 10 #ifdef CONFIG_XEN_HAVE_VPMU 11 void xen_pmu_init(int cpu); ··· 12 static inline void xen_pmu_init(int cpu) {} 13 static inline void xen_pmu_finish(int cpu) {} 14 #endif 15 bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); 16 bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); 17 int pmu_apic_update(uint32_t reg);
+1 -1
arch/x86/xen/setup.c
··· 719 } 720 721 /** 722 - * machine_specific_memory_setup - Hook for machine specific memory setup. 723 **/ 724 char * __init xen_memory_setup(void) 725 {
··· 719 } 720 721 /** 722 + * xen_memory_setup - Hook for machine specific memory setup. 723 **/ 724 char * __init xen_memory_setup(void) 725 {
+6
arch/x86/xen/smp_hvm.c
··· 20 xen_vcpu_setup(0); 21 22 /* 23 * The alternative logic (which patches the unlock/lock) runs before 24 * the smp bootup up code is activated. Hence we need to set this up 25 * the core kernel is being patched. Otherwise we will have only
··· 20 xen_vcpu_setup(0); 21 22 /* 23 + * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS. 24 + * Refer to comments in xen_hvm_init_time_ops(). 25 + */ 26 + xen_hvm_init_time_ops(); 27 + 28 + /* 29 * The alternative logic (which patches the unlock/lock) runs before 30 * the smp bootup up code is activated. Hence we need to set this up 31 * the core kernel is being patched. Otherwise we will have only
+1 -1
arch/x86/xen/smp_pv.c
··· 129 per_cpu(xen_irq_work, cpu).irq = rc; 130 per_cpu(xen_irq_work, cpu).name = callfunc_name; 131 132 - if (is_xen_pmu(cpu)) { 133 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); 134 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, 135 xen_pmu_irq_handler,
··· 129 per_cpu(xen_irq_work, cpu).irq = rc; 130 per_cpu(xen_irq_work, cpu).name = callfunc_name; 131 132 + if (is_xen_pmu) { 133 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); 134 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, 135 xen_pmu_irq_handler,
+23 -1
arch/x86/xen/time.c
··· 558 559 void __init xen_hvm_init_time_ops(void) 560 { 561 /* 562 * vector callback is needed otherwise we cannot receive interrupts 563 * on cpu > 0 and at this point we don't know how many cpus are ··· 572 return; 573 574 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { 575 - pr_info("Xen doesn't support pvclock on HVM, disable pv timer"); 576 return; 577 } 578 ··· 597 x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; 598 599 x86_platform.set_wallclock = xen_set_wallclock; 600 } 601 #endif 602
··· 558 559 void __init xen_hvm_init_time_ops(void) 560 { 561 + static bool hvm_time_initialized; 562 + 563 + if (hvm_time_initialized) 564 + return; 565 + 566 /* 567 * vector callback is needed otherwise we cannot receive interrupts 568 * on cpu > 0 and at this point we don't know how many cpus are ··· 567 return; 568 569 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { 570 + pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer"); 571 + return; 572 + } 573 + 574 + /* 575 + * Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'. 576 + * The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest 577 + * boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access 578 + * __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic. 579 + * 580 + * The xen_hvm_init_time_ops() should be called again later after 581 + * __this_cpu_read(xen_vcpu) is available. 582 + */ 583 + if (!__this_cpu_read(xen_vcpu)) { 584 + pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n", 585 + xen_vcpu_nr(0)); 586 return; 587 } 588 ··· 577 x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; 578 579 x86_platform.set_wallclock = xen_set_wallclock; 580 + 581 + hvm_time_initialized = true; 582 } 583 #endif 584
+4 -4
drivers/block/xen-blkfront.c
··· 1223 list_del(&persistent_gnt->node); 1224 if (persistent_gnt->gref != GRANT_INVALID_REF) { 1225 gnttab_end_foreign_access(persistent_gnt->gref, 1226 - 0, 0UL); 1227 rinfo->persistent_gnts_c--; 1228 } 1229 if (info->feature_persistent) ··· 1246 rinfo->shadow[i].req.u.rw.nr_segments; 1247 for (j = 0; j < segs; j++) { 1248 persistent_gnt = rinfo->shadow[i].grants_used[j]; 1249 - gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 1250 if (info->feature_persistent) 1251 __free_page(persistent_gnt->page); 1252 kfree(persistent_gnt); ··· 1261 1262 for (j = 0; j < INDIRECT_GREFS(segs); j++) { 1263 persistent_gnt = rinfo->shadow[i].indirect_grants[j]; 1264 - gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 1265 __free_page(persistent_gnt->page); 1266 kfree(persistent_gnt); 1267 } ··· 1284 /* Free resources associated with old device channel. */ 1285 for (i = 0; i < info->nr_ring_pages; i++) { 1286 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) { 1287 - gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0); 1288 rinfo->ring_ref[i] = GRANT_INVALID_REF; 1289 } 1290 }
··· 1223 list_del(&persistent_gnt->node); 1224 if (persistent_gnt->gref != GRANT_INVALID_REF) { 1225 gnttab_end_foreign_access(persistent_gnt->gref, 1226 + 0UL); 1227 rinfo->persistent_gnts_c--; 1228 } 1229 if (info->feature_persistent) ··· 1246 rinfo->shadow[i].req.u.rw.nr_segments; 1247 for (j = 0; j < segs; j++) { 1248 persistent_gnt = rinfo->shadow[i].grants_used[j]; 1249 + gnttab_end_foreign_access(persistent_gnt->gref, 0UL); 1250 if (info->feature_persistent) 1251 __free_page(persistent_gnt->page); 1252 kfree(persistent_gnt); ··· 1261 1262 for (j = 0; j < INDIRECT_GREFS(segs); j++) { 1263 persistent_gnt = rinfo->shadow[i].indirect_grants[j]; 1264 + gnttab_end_foreign_access(persistent_gnt->gref, 0UL); 1265 __free_page(persistent_gnt->page); 1266 kfree(persistent_gnt); 1267 } ··· 1284 /* Free resources associated with old device channel. */ 1285 for (i = 0; i < info->nr_ring_pages; i++) { 1286 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) { 1287 + gnttab_end_foreign_access(rinfo->ring_ref[i], 0); 1288 rinfo->ring_ref[i] = GRANT_INVALID_REF; 1289 } 1290 }
+1 -1
drivers/char/tpm/xen-tpmfront.c
··· 332 return; 333 334 if (priv->ring_ref) 335 - gnttab_end_foreign_access(priv->ring_ref, 0, 336 (unsigned long)priv->shr); 337 else 338 free_page((unsigned long)priv->shr);
··· 332 return; 333 334 if (priv->ring_ref) 335 + gnttab_end_foreign_access(priv->ring_ref, 336 (unsigned long)priv->shr); 337 else 338 free_page((unsigned long)priv->shr);
+1 -1
drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
··· 148 149 /* end access and free the page */ 150 if (evtchnl->gref != GRANT_INVALID_REF) 151 - gnttab_end_foreign_access(evtchnl->gref, 0, page); 152 153 memset(evtchnl, 0, sizeof(*evtchnl)); 154 }
··· 148 149 /* end access and free the page */ 150 if (evtchnl->gref != GRANT_INVALID_REF) 151 + gnttab_end_foreign_access(evtchnl->gref, page); 152 153 memset(evtchnl, 0, sizeof(*evtchnl)); 154 }
+2 -2
drivers/input/misc/xen-kbdfront.c
··· 481 error_evtchan: 482 xenbus_free_evtchn(dev, evtchn); 483 error_grant: 484 - gnttab_end_foreign_access(info->gref, 0, 0UL); 485 info->gref = -1; 486 return ret; 487 } ··· 492 unbind_from_irqhandler(info->irq, info); 493 info->irq = -1; 494 if (info->gref >= 0) 495 - gnttab_end_foreign_access(info->gref, 0, 0UL); 496 info->gref = -1; 497 } 498
··· 481 error_evtchan: 482 xenbus_free_evtchn(dev, evtchn); 483 error_grant: 484 + gnttab_end_foreign_access(info->gref, 0UL); 485 info->gref = -1; 486 return ret; 487 } ··· 492 unbind_from_irqhandler(info->irq, info); 493 info->irq = -1; 494 if (info->gref >= 0) 495 + gnttab_end_foreign_access(info->gref, 0UL); 496 info->gref = -1; 497 } 498
+6 -7
drivers/net/xen-netfront.c
··· 425 skb = queue->tx_skbs[id]; 426 queue->tx_skbs[id] = NULL; 427 if (unlikely(!gnttab_end_foreign_access_ref( 428 - queue->grant_tx_ref[id], GNTMAP_readonly))) { 429 dev_alert(dev, 430 "Grant still in use by backend domain\n"); 431 goto err; ··· 1029 goto next; 1030 } 1031 1032 - if (!gnttab_end_foreign_access_ref(ref, 0)) { 1033 dev_alert(dev, 1034 "Grant still in use by backend domain\n"); 1035 queue->info->broken = true; ··· 1388 queue->tx_skbs[i] = NULL; 1389 get_page(queue->grant_tx_page[i]); 1390 gnttab_end_foreign_access(queue->grant_tx_ref[i], 1391 - GNTMAP_readonly, 1392 (unsigned long)page_address(queue->grant_tx_page[i])); 1393 queue->grant_tx_page[i] = NULL; 1394 queue->grant_tx_ref[i] = GRANT_INVALID_REF; ··· 1420 * foreign access is ended (which may be deferred). 1421 */ 1422 get_page(page); 1423 - gnttab_end_foreign_access(ref, 0, 1424 (unsigned long)page_address(page)); 1425 queue->grant_rx_ref[id] = GRANT_INVALID_REF; 1426 ··· 1762 { 1763 /* This frees the page as a side-effect */ 1764 if (ref != GRANT_INVALID_REF) 1765 - gnttab_end_foreign_access(ref, 0, (unsigned long)page); 1766 } 1767 1768 static void xennet_disconnect_backend(struct netfront_info *info) ··· 1979 */ 1980 fail: 1981 if (queue->rx_ring_ref != GRANT_INVALID_REF) { 1982 - gnttab_end_foreign_access(queue->rx_ring_ref, 0, 1983 (unsigned long)rxs); 1984 queue->rx_ring_ref = GRANT_INVALID_REF; 1985 } else { 1986 free_page((unsigned long)rxs); 1987 } 1988 if (queue->tx_ring_ref != GRANT_INVALID_REF) { 1989 - gnttab_end_foreign_access(queue->tx_ring_ref, 0, 1990 (unsigned long)txs); 1991 queue->tx_ring_ref = GRANT_INVALID_REF; 1992 } else {
··· 425 skb = queue->tx_skbs[id]; 426 queue->tx_skbs[id] = NULL; 427 if (unlikely(!gnttab_end_foreign_access_ref( 428 + queue->grant_tx_ref[id]))) { 429 dev_alert(dev, 430 "Grant still in use by backend domain\n"); 431 goto err; ··· 1029 goto next; 1030 } 1031 1032 + if (!gnttab_end_foreign_access_ref(ref)) { 1033 dev_alert(dev, 1034 "Grant still in use by backend domain\n"); 1035 queue->info->broken = true; ··· 1388 queue->tx_skbs[i] = NULL; 1389 get_page(queue->grant_tx_page[i]); 1390 gnttab_end_foreign_access(queue->grant_tx_ref[i], 1391 (unsigned long)page_address(queue->grant_tx_page[i])); 1392 queue->grant_tx_page[i] = NULL; 1393 queue->grant_tx_ref[i] = GRANT_INVALID_REF; ··· 1421 * foreign access is ended (which may be deferred). 1422 */ 1423 get_page(page); 1424 + gnttab_end_foreign_access(ref, 1425 (unsigned long)page_address(page)); 1426 queue->grant_rx_ref[id] = GRANT_INVALID_REF; 1427 ··· 1763 { 1764 /* This frees the page as a side-effect */ 1765 if (ref != GRANT_INVALID_REF) 1766 + gnttab_end_foreign_access(ref, (unsigned long)page); 1767 } 1768 1769 static void xennet_disconnect_backend(struct netfront_info *info) ··· 1980 */ 1981 fail: 1982 if (queue->rx_ring_ref != GRANT_INVALID_REF) { 1983 + gnttab_end_foreign_access(queue->rx_ring_ref, 1984 (unsigned long)rxs); 1985 queue->rx_ring_ref = GRANT_INVALID_REF; 1986 } else { 1987 free_page((unsigned long)rxs); 1988 } 1989 if (queue->tx_ring_ref != GRANT_INVALID_REF) { 1990 + gnttab_end_foreign_access(queue->tx_ring_ref, 1991 (unsigned long)txs); 1992 queue->tx_ring_ref = GRANT_INVALID_REF; 1993 } else {
+1 -1
drivers/pci/xen-pcifront.c
··· 755 xenbus_free_evtchn(pdev->xdev, pdev->evtchn); 756 757 if (pdev->gnt_ref != INVALID_GRANT_REF) 758 - gnttab_end_foreign_access(pdev->gnt_ref, 0 /* r/w page */, 759 (unsigned long)pdev->sh_info); 760 else 761 free_page((unsigned long)pdev->sh_info);
··· 755 xenbus_free_evtchn(pdev->xdev, pdev->evtchn); 756 757 if (pdev->gnt_ref != INVALID_GRANT_REF) 758 + gnttab_end_foreign_access(pdev->gnt_ref, 759 (unsigned long)pdev->sh_info); 760 else 761 free_page((unsigned long)pdev->sh_info);
+2 -2
drivers/scsi/xen-scsifront.c
··· 757 free_irq: 758 unbind_from_irqhandler(info->irq, info); 759 free_gnttab: 760 - gnttab_end_foreign_access(info->ring_ref, 0, 761 (unsigned long)info->ring.sring); 762 763 return err; ··· 766 static void scsifront_free_ring(struct vscsifrnt_info *info) 767 { 768 unbind_from_irqhandler(info->irq, info); 769 - gnttab_end_foreign_access(info->ring_ref, 0, 770 (unsigned long)info->ring.sring); 771 } 772
··· 757 free_irq: 758 unbind_from_irqhandler(info->irq, info); 759 free_gnttab: 760 + gnttab_end_foreign_access(info->ring_ref, 761 (unsigned long)info->ring.sring); 762 763 return err; ··· 766 static void scsifront_free_ring(struct vscsifrnt_info *info) 767 { 768 unbind_from_irqhandler(info->irq, info); 769 + gnttab_end_foreign_access(info->ring_ref, 770 (unsigned long)info->ring.sring); 771 } 772
+2 -2
drivers/usb/host/xen-hcd.c
··· 1101 info->irq = 0; 1102 1103 if (info->urb_ring_ref != GRANT_INVALID_REF) { 1104 - gnttab_end_foreign_access(info->urb_ring_ref, 0, 1105 (unsigned long)info->urb_ring.sring); 1106 info->urb_ring_ref = GRANT_INVALID_REF; 1107 } 1108 info->urb_ring.sring = NULL; 1109 1110 if (info->conn_ring_ref != GRANT_INVALID_REF) { 1111 - gnttab_end_foreign_access(info->conn_ring_ref, 0, 1112 (unsigned long)info->conn_ring.sring); 1113 info->conn_ring_ref = GRANT_INVALID_REF; 1114 }
··· 1101 info->irq = 0; 1102 1103 if (info->urb_ring_ref != GRANT_INVALID_REF) { 1104 + gnttab_end_foreign_access(info->urb_ring_ref, 1105 (unsigned long)info->urb_ring.sring); 1106 info->urb_ring_ref = GRANT_INVALID_REF; 1107 } 1108 info->urb_ring.sring = NULL; 1109 1110 if (info->conn_ring_ref != GRANT_INVALID_REF) { 1111 + gnttab_end_foreign_access(info->conn_ring_ref, 1112 (unsigned long)info->conn_ring.sring); 1113 info->conn_ring_ref = GRANT_INVALID_REF; 1114 }
+2 -1
drivers/xen/balloon.c
··· 59 #include <linux/slab.h> 60 #include <linux/sysctl.h> 61 #include <linux/moduleparam.h> 62 63 #include <asm/page.h> 64 #include <asm/tlb.h> ··· 795 if (balloon_state == BP_ECANCELED) { 796 pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n", 797 -credit); 798 - if (jiffies - last_changed >= HZ * balloon_boot_timeout) 799 panic("Initial ballooning failed!\n"); 800 } 801
··· 59 #include <linux/slab.h> 60 #include <linux/sysctl.h> 61 #include <linux/moduleparam.h> 62 + #include <linux/jiffies.h> 63 64 #include <asm/page.h> 65 #include <asm/tlb.h> ··· 794 if (balloon_state == BP_ECANCELED) { 795 pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n", 796 -credit); 797 + if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout)) 798 panic("Initial ballooning failed!\n"); 799 } 800
+1 -1
drivers/xen/gntalloc.c
··· 192 if (gref->gref_id) { 193 if (gref->page) { 194 addr = (unsigned long)page_to_virt(gref->page); 195 - gnttab_end_foreign_access(gref->gref_id, 0, addr); 196 } else 197 gnttab_free_grant_reference(gref->gref_id); 198 }
··· 192 if (gref->gref_id) { 193 if (gref->page) { 194 addr = (unsigned long)page_to_virt(gref->page); 195 + gnttab_end_foreign_access(gref->gref_id, addr); 196 } else 197 gnttab_free_grant_reference(gref->gref_id); 198 }
+1 -1
drivers/xen/gntdev-dmabuf.c
··· 533 534 for (i = 0; i < count; i++) 535 if (refs[i] != GRANT_INVALID_REF) 536 - gnttab_end_foreign_access(refs[i], 0, 0UL); 537 } 538 539 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
··· 533 534 for (i = 0; i < count; i++) 535 if (refs[i] != GRANT_INVALID_REF) 536 + gnttab_end_foreign_access(refs[i], 0UL); 537 } 538 539 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
+18 -133
drivers/xen/grant-table.c
··· 109 void (*unmap_frames)(void); 110 /* 111 * Introducing a valid entry into the grant table, granting the frame of 112 - * this grant entry to domain for accessing or transfering. Ref 113 * parameter is reference of this introduced grant entry, domid is id of 114 * granted domain, frame is the page frame to be granted, and flags is 115 * status of the grant entry to be updated. ··· 118 unsigned long frame, unsigned flags); 119 /* 120 * Stop granting a grant entry to domain for accessing. Ref parameter is 121 - * reference of a grant entry whose grant access will be stopped, 122 - * readonly is not in use in this function. If the grant entry is 123 - * currently mapped for reading or writing, just return failure(==0) 124 - * directly and don't tear down the grant access. Otherwise, stop grant 125 - * access for this entry and return success(==1). 126 */ 127 - int (*end_foreign_access_ref)(grant_ref_t ref, int readonly); 128 - /* 129 - * Stop granting a grant entry to domain for transfer. Ref parameter is 130 - * reference of a grant entry whose grant transfer will be stopped. If 131 - * tranfer has not started, just reclaim the grant entry and return 132 - * failure(==0). Otherwise, wait for the transfer to complete and then 133 - * return the frame. 134 - */ 135 - unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); 136 /* 137 * Read the frame number related to a given grant reference. 138 */ ··· 221 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. 222 * Introducing a valid entry into the grant table: 223 * 1. Write ent->domid. 224 - * 2. Write ent->frame: 225 - * GTF_permit_access: Frame to which access is permitted. 226 - * GTF_accept_transfer: Pseudo-phys frame slot being filled by new 227 - * frame, or zero if none. 228 * 3. Write memory barrier (WMB). 229 * 4. Write ent->flags, inc. valid type. 230 */ ··· 269 } 270 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 271 272 - static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) 273 { 274 u16 flags, nflags; 275 u16 *pflags; ··· 285 return 1; 286 } 287 288 - static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) 289 { 290 gnttab_shared.v2[ref].hdr.flags = 0; 291 mb(); /* Concurrent access by hypervisor. */ ··· 308 return 1; 309 } 310 311 - static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 312 { 313 - return gnttab_interface->end_foreign_access_ref(ref, readonly); 314 } 315 316 - int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 317 { 318 - if (_gnttab_end_foreign_access_ref(ref, readonly)) 319 return 1; 320 pr_warn("WARNING: g.e. %#x still in use!\n", ref); 321 return 0; ··· 335 struct deferred_entry { 336 struct list_head list; 337 grant_ref_t ref; 338 - bool ro; 339 uint16_t warn_delay; 340 struct page *page; 341 }; ··· 358 break; 359 list_del(&entry->list); 360 spin_unlock_irqrestore(&gnttab_list_lock, flags); 361 - if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { 362 put_free_entry(entry->ref); 363 pr_debug("freeing g.e. %#x (pfn %#lx)\n", 364 entry->ref, page_to_pfn(entry->page)); ··· 384 spin_unlock_irqrestore(&gnttab_list_lock, flags); 385 } 386 387 - static void gnttab_add_deferred(grant_ref_t ref, bool readonly, 388 - struct page *page) 389 { 390 struct deferred_entry *entry; 391 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; ··· 402 unsigned long flags; 403 404 entry->ref = ref; 405 - entry->ro = readonly; 406 entry->page = page; 407 entry->warn_delay = 60; 408 spin_lock_irqsave(&gnttab_list_lock, flags); ··· 419 420 int gnttab_try_end_foreign_access(grant_ref_t ref) 421 { 422 - int ret = _gnttab_end_foreign_access_ref(ref, 0); 423 424 if (ret) 425 put_free_entry(ref); ··· 428 } 429 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); 430 431 - void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 432 - unsigned long page) 433 { 434 if (gnttab_try_end_foreign_access(ref)) { 435 if (page != 0) 436 put_page(virt_to_page(page)); 437 } else 438 - gnttab_add_deferred(ref, readonly, 439 - page ? virt_to_page(page) : NULL); 440 } 441 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 442 - 443 - int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) 444 - { 445 - int ref; 446 - 447 - ref = get_free_entries(1); 448 - if (unlikely(ref < 0)) 449 - return -ENOSPC; 450 - gnttab_grant_foreign_transfer_ref(ref, domid, pfn); 451 - 452 - return ref; 453 - } 454 - EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); 455 - 456 - void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, 457 - unsigned long pfn) 458 - { 459 - gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); 460 - } 461 - EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); 462 - 463 - static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) 464 - { 465 - unsigned long frame; 466 - u16 flags; 467 - u16 *pflags; 468 - 469 - pflags = &gnttab_shared.v1[ref].flags; 470 - 471 - /* 472 - * If a transfer is not even yet started, try to reclaim the grant 473 - * reference and return failure (== 0). 474 - */ 475 - while (!((flags = *pflags) & GTF_transfer_committed)) { 476 - if (sync_cmpxchg(pflags, flags, 0) == flags) 477 - return 0; 478 - cpu_relax(); 479 - } 480 - 481 - /* If a transfer is in progress then wait until it is completed. */ 482 - while (!(flags & GTF_transfer_completed)) { 483 - flags = *pflags; 484 - cpu_relax(); 485 - } 486 - 487 - rmb(); /* Read the frame number /after/ reading completion status. */ 488 - frame = gnttab_shared.v1[ref].frame; 489 - BUG_ON(frame == 0); 490 - 491 - return frame; 492 - } 493 - 494 - static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref) 495 - { 496 - unsigned long frame; 497 - u16 flags; 498 - u16 *pflags; 499 - 500 - pflags = &gnttab_shared.v2[ref].hdr.flags; 501 - 502 - /* 503 - * If a transfer is not even yet started, try to reclaim the grant 504 - * reference and return failure (== 0). 505 - */ 506 - while (!((flags = *pflags) & GTF_transfer_committed)) { 507 - if (sync_cmpxchg(pflags, flags, 0) == flags) 508 - return 0; 509 - cpu_relax(); 510 - } 511 - 512 - /* If a transfer is in progress then wait until it is completed. */ 513 - while (!(flags & GTF_transfer_completed)) { 514 - flags = *pflags; 515 - cpu_relax(); 516 - } 517 - 518 - rmb(); /* Read the frame number /after/ reading completion status. */ 519 - frame = gnttab_shared.v2[ref].full_page.frame; 520 - BUG_ON(frame == 0); 521 - 522 - return frame; 523 - } 524 - 525 - unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) 526 - { 527 - return gnttab_interface->end_foreign_transfer_ref(ref); 528 - } 529 - EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); 530 - 531 - unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) 532 - { 533 - unsigned long frame = gnttab_end_foreign_transfer_ref(ref); 534 - put_free_entry(ref); 535 - return frame; 536 - } 537 - EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); 538 539 void gnttab_free_grant_reference(grant_ref_t ref) 540 { ··· 1310 .unmap_frames = gnttab_unmap_frames_v1, 1311 .update_entry = gnttab_update_entry_v1, 1312 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, 1313 - .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, 1314 .read_frame = gnttab_read_frame_v1, 1315 }; 1316 ··· 1321 .unmap_frames = gnttab_unmap_frames_v2, 1322 .update_entry = gnttab_update_entry_v2, 1323 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, 1324 - .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, 1325 .read_frame = gnttab_read_frame_v2, 1326 }; 1327
··· 109 void (*unmap_frames)(void); 110 /* 111 * Introducing a valid entry into the grant table, granting the frame of 112 + * this grant entry to domain for accessing. Ref 113 * parameter is reference of this introduced grant entry, domid is id of 114 * granted domain, frame is the page frame to be granted, and flags is 115 * status of the grant entry to be updated. ··· 118 unsigned long frame, unsigned flags); 119 /* 120 * Stop granting a grant entry to domain for accessing. Ref parameter is 121 + * reference of a grant entry whose grant access will be stopped. 122 + * If the grant entry is currently mapped for reading or writing, just 123 + * return failure(==0) directly and don't tear down the grant access. 124 + * Otherwise, stop grant access for this entry and return success(==1). 125 */ 126 + int (*end_foreign_access_ref)(grant_ref_t ref); 127 /* 128 * Read the frame number related to a given grant reference. 129 */ ··· 230 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. 231 * Introducing a valid entry into the grant table: 232 * 1. Write ent->domid. 233 + * 2. Write ent->frame: Frame to which access is permitted. 234 * 3. Write memory barrier (WMB). 235 * 4. Write ent->flags, inc. valid type. 236 */ ··· 281 } 282 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 283 284 + static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref) 285 { 286 u16 flags, nflags; 287 u16 *pflags; ··· 297 return 1; 298 } 299 300 + static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref) 301 { 302 gnttab_shared.v2[ref].hdr.flags = 0; 303 mb(); /* Concurrent access by hypervisor. */ ··· 320 return 1; 321 } 322 323 + static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref) 324 { 325 + return gnttab_interface->end_foreign_access_ref(ref); 326 } 327 328 + int gnttab_end_foreign_access_ref(grant_ref_t ref) 329 { 330 + if (_gnttab_end_foreign_access_ref(ref)) 331 return 1; 332 pr_warn("WARNING: g.e. %#x still in use!\n", ref); 333 return 0; ··· 347 struct deferred_entry { 348 struct list_head list; 349 grant_ref_t ref; 350 uint16_t warn_delay; 351 struct page *page; 352 }; ··· 371 break; 372 list_del(&entry->list); 373 spin_unlock_irqrestore(&gnttab_list_lock, flags); 374 + if (_gnttab_end_foreign_access_ref(entry->ref)) { 375 put_free_entry(entry->ref); 376 pr_debug("freeing g.e. %#x (pfn %#lx)\n", 377 entry->ref, page_to_pfn(entry->page)); ··· 397 spin_unlock_irqrestore(&gnttab_list_lock, flags); 398 } 399 400 + static void gnttab_add_deferred(grant_ref_t ref, struct page *page) 401 { 402 struct deferred_entry *entry; 403 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; ··· 416 unsigned long flags; 417 418 entry->ref = ref; 419 entry->page = page; 420 entry->warn_delay = 60; 421 spin_lock_irqsave(&gnttab_list_lock, flags); ··· 434 435 int gnttab_try_end_foreign_access(grant_ref_t ref) 436 { 437 + int ret = _gnttab_end_foreign_access_ref(ref); 438 439 if (ret) 440 put_free_entry(ref); ··· 443 } 444 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); 445 446 + void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) 447 { 448 if (gnttab_try_end_foreign_access(ref)) { 449 if (page != 0) 450 put_page(virt_to_page(page)); 451 } else 452 + gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL); 453 } 454 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 455 456 void gnttab_free_grant_reference(grant_ref_t ref) 457 { ··· 1423 .unmap_frames = gnttab_unmap_frames_v1, 1424 .update_entry = gnttab_update_entry_v1, 1425 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, 1426 .read_frame = gnttab_read_frame_v1, 1427 }; 1428 ··· 1435 .unmap_frames = gnttab_unmap_frames_v2, 1436 .update_entry = gnttab_update_entry_v2, 1437 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, 1438 .read_frame = gnttab_read_frame_v2, 1439 }; 1440
+2 -2
drivers/xen/manage.c
··· 141 142 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); 143 144 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 145 146 if (err) { 147 pr_err("failed to start xen_suspend: %d\n", err); 148 si.cancelled = 1; 149 } 150 - 151 - xen_arch_resume(); 152 153 out_resume: 154 if (!si.cancelled)
··· 141 142 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); 143 144 + xen_arch_resume(); 145 + 146 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 147 148 if (err) { 149 pr_err("failed to start xen_suspend: %d\n", err); 150 si.cancelled = 1; 151 } 152 153 out_resume: 154 if (!si.cancelled)
+3 -3
drivers/xen/pvcalls-front.c
··· 238 spin_unlock(&bedata->socket_lock); 239 240 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) 241 - gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0); 242 - gnttab_end_foreign_access(map->active.ref, 0, 0); 243 free_page((unsigned long)map->active.ring); 244 245 kfree(map); ··· 1117 } 1118 } 1119 if (bedata->ref != -1) 1120 - gnttab_end_foreign_access(bedata->ref, 0, 0); 1121 kfree(bedata->ring.sring); 1122 kfree(bedata); 1123 xenbus_switch_state(dev, XenbusStateClosed);
··· 238 spin_unlock(&bedata->socket_lock); 239 240 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) 241 + gnttab_end_foreign_access(map->active.ring->ref[i], 0); 242 + gnttab_end_foreign_access(map->active.ref, 0); 243 free_page((unsigned long)map->active.ring); 244 245 kfree(map); ··· 1117 } 1118 } 1119 if (bedata->ref != -1) 1120 + gnttab_end_foreign_access(bedata->ref, 0); 1121 kfree(bedata->ring.sring); 1122 kfree(bedata); 1123 xenbus_switch_state(dev, XenbusStateClosed);
+2 -3
drivers/xen/sys-hypervisor.c
··· 22 #endif 23 24 #define HYPERVISOR_ATTR_RO(_name) \ 25 - static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) 26 27 #define HYPERVISOR_ATTR_RW(_name) \ 28 - static struct hyp_sysfs_attr _name##_attr = \ 29 - __ATTR(_name, 0644, _name##_show, _name##_store) 30 31 struct hyp_sysfs_attr { 32 struct attribute attr;
··· 22 #endif 23 24 #define HYPERVISOR_ATTR_RO(_name) \ 25 + static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) 26 27 #define HYPERVISOR_ATTR_RW(_name) \ 28 + static struct hyp_sysfs_attr _name##_attr = __ATTR_RW(_name) 29 30 struct hyp_sysfs_attr { 31 struct attribute attr;
+1 -2
drivers/xen/xen-front-pgdir-shbuf.c
··· 143 144 for (i = 0; i < buf->num_grefs; i++) 145 if (buf->grefs[i] != GRANT_INVALID_REF) 146 - gnttab_end_foreign_access(buf->grefs[i], 147 - 0, 0UL); 148 } 149 kfree(buf->grefs); 150 kfree(buf->directory);
··· 143 144 for (i = 0; i < buf->num_grefs; i++) 145 if (buf->grefs[i] != GRANT_INVALID_REF) 146 + gnttab_end_foreign_access(buf->grefs[i], 0UL); 147 } 148 kfree(buf->grefs); 149 kfree(buf->directory);
+2 -11
include/xen/grant_table.h
··· 97 * longer in use. Return 1 if the grant entry was freed, 0 if it is still in 98 * use. 99 */ 100 - int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); 101 102 /* 103 * Eventually end access through the given grant reference, and once that ··· 114 * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing 115 * via free_pages_exact()) in order to avoid high order pages. 116 */ 117 - void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 118 - unsigned long page); 119 120 /* 121 * End access through the given grant reference, iff the grant entry is ··· 123 * Return 1 if the grant entry was freed, 0 if it is still in use. 124 */ 125 int gnttab_try_end_foreign_access(grant_ref_t ref); 126 - 127 - int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); 128 - 129 - unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); 130 - unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); 131 132 /* 133 * operations on reserved batches of grant references ··· 155 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page), 156 readonly); 157 } 158 - 159 - void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, 160 - unsigned long pfn); 161 162 static inline void 163 gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
··· 97 * longer in use. Return 1 if the grant entry was freed, 0 if it is still in 98 * use. 99 */ 100 + int gnttab_end_foreign_access_ref(grant_ref_t ref); 101 102 /* 103 * Eventually end access through the given grant reference, and once that ··· 114 * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing 115 * via free_pages_exact()) in order to avoid high order pages. 116 */ 117 + void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); 118 119 /* 120 * End access through the given grant reference, iff the grant entry is ··· 124 * Return 1 if the grant entry was freed, 0 if it is still in use. 125 */ 126 int gnttab_try_end_foreign_access(grant_ref_t ref); 127 128 /* 129 * operations on reserved batches of grant references ··· 161 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page), 162 readonly); 163 } 164 165 static inline void 166 gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
+4 -4
net/9p/trans_xen.c
··· 279 grant_ref_t ref; 280 281 ref = priv->rings[i].intf->ref[j]; 282 - gnttab_end_foreign_access(ref, 0, 0); 283 } 284 free_pages_exact(priv->rings[i].data.in, 285 1UL << (priv->rings[i].intf->ring_order + 286 XEN_PAGE_SHIFT)); 287 } 288 - gnttab_end_foreign_access(priv->rings[i].ref, 0, 0); 289 free_page((unsigned long)priv->rings[i].intf); 290 } 291 kfree(priv->rings); ··· 353 out: 354 if (bytes) { 355 for (i--; i >= 0; i--) 356 - gnttab_end_foreign_access(ring->intf->ref[i], 0, 0); 357 free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT)); 358 } 359 - gnttab_end_foreign_access(ring->ref, 0, 0); 360 free_page((unsigned long)ring->intf); 361 return ret; 362 }
··· 279 grant_ref_t ref; 280 281 ref = priv->rings[i].intf->ref[j]; 282 + gnttab_end_foreign_access(ref, 0); 283 } 284 free_pages_exact(priv->rings[i].data.in, 285 1UL << (priv->rings[i].intf->ring_order + 286 XEN_PAGE_SHIFT)); 287 } 288 + gnttab_end_foreign_access(priv->rings[i].ref, 0); 289 free_page((unsigned long)priv->rings[i].intf); 290 } 291 kfree(priv->rings); ··· 353 out: 354 if (bytes) { 355 for (i--; i >= 0; i--) 356 + gnttab_end_foreign_access(ring->intf->ref[i], 0); 357 free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT)); 358 } 359 + gnttab_end_foreign_access(ring->ref, 0); 360 free_page((unsigned long)ring->intf); 361 return ret; 362 }
+1 -1
sound/xen/xen_snd_front_evtchnl.c
··· 168 169 /* End access and free the page. */ 170 if (channel->gref != GRANT_INVALID_REF) 171 - gnttab_end_foreign_access(channel->gref, 0, page); 172 else 173 free_page(page); 174
··· 168 169 /* End access and free the page. */ 170 if (channel->gref != GRANT_INVALID_REF) 171 + gnttab_end_foreign_access(channel->gref, page); 172 else 173 free_page(page); 174