Merge tag 'for-linus-5.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

- A bunch of minor cleanups

- A fix for kexec in Xen dom0 when executed on a high cpu number

- A fix for resuming after suspend of a Xen guest with assigned PCI
devices

- A fix for a crash due to not disabled preemption when resuming as Xen
dom0

* tag 'for-linus-5.18-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen: fix is_xen_pmu()
xen: don't hang when resuming PCI device
arch:x86:xen: Remove unnecessary assignment in xen_apic_read()
xen/grant-table: remove readonly parameter from functions
xen/grant-table: remove gnttab_*transfer*() functions
drivers/xen: use helper macro __ATTR_RW
x86/xen: Fix kerneldoc warning
xen: delay xen_hvm_init_time_ops() if kdump is boot on vcpu>=32
xen: use time_is_before_eq_jiffies() instead of open coding it

+94 -193
+1 -1
arch/x86/xen/apic.c
··· 51 51 .interface_version = XENPF_INTERFACE_VERSION, 52 52 .u.pcpu_info.xen_cpuid = 0, 53 53 }; 54 - int ret = 0; 54 + int ret; 55 55 56 56 /* Shouldn't need this as APIC is turned off for PV, and we only 57 57 * get called on the bootup processor. But just in case. */
+4 -6
arch/x86/xen/pmu.c
··· 506 506 return ret; 507 507 } 508 508 509 - bool is_xen_pmu(int cpu) 510 - { 511 - return (get_xenpmu_data() != NULL); 512 - } 509 + bool is_xen_pmu; 513 510 514 511 void xen_pmu_init(int cpu) 515 512 { ··· 517 520 518 521 BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); 519 522 520 - if (xen_hvm_domain()) 523 + if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu)) 521 524 return; 522 525 523 526 xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); ··· 538 541 per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; 539 542 per_cpu(xenpmu_shared, cpu).flags = 0; 540 543 541 - if (cpu == 0) { 544 + if (!is_xen_pmu) { 545 + is_xen_pmu = true; 542 546 perf_register_guest_info_callbacks(&xen_guest_cbs); 543 547 xen_pmu_arch_init(); 544 548 }
+2 -1
arch/x86/xen/pmu.h
··· 4 4 5 5 #include <xen/interface/xenpmu.h> 6 6 7 + extern bool is_xen_pmu; 8 + 7 9 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); 8 10 #ifdef CONFIG_XEN_HAVE_VPMU 9 11 void xen_pmu_init(int cpu); ··· 14 12 static inline void xen_pmu_init(int cpu) {} 15 13 static inline void xen_pmu_finish(int cpu) {} 16 14 #endif 17 - bool is_xen_pmu(int cpu); 18 15 bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); 19 16 bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); 20 17 int pmu_apic_update(uint32_t reg);
+1 -1
arch/x86/xen/setup.c
··· 719 719 } 720 720 721 721 /** 722 - * machine_specific_memory_setup - Hook for machine specific memory setup. 722 + * xen_memory_setup - Hook for machine specific memory setup. 723 723 **/ 724 724 char * __init xen_memory_setup(void) 725 725 {
+6
arch/x86/xen/smp_hvm.c
··· 20 20 xen_vcpu_setup(0); 21 21 22 22 /* 23 + * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS. 24 + * Refer to comments in xen_hvm_init_time_ops(). 25 + */ 26 + xen_hvm_init_time_ops(); 27 + 28 + /* 23 29 * The alternative logic (which patches the unlock/lock) runs before 24 30 * the smp bootup up code is activated. Hence we need to set this up 25 31 * the core kernel is being patched. Otherwise we will have only
+1 -1
arch/x86/xen/smp_pv.c
··· 129 129 per_cpu(xen_irq_work, cpu).irq = rc; 130 130 per_cpu(xen_irq_work, cpu).name = callfunc_name; 131 131 132 - if (is_xen_pmu(cpu)) { 132 + if (is_xen_pmu) { 133 133 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); 134 134 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, 135 135 xen_pmu_irq_handler,
+23 -1
arch/x86/xen/time.c
··· 558 558 559 559 void __init xen_hvm_init_time_ops(void) 560 560 { 561 + static bool hvm_time_initialized; 562 + 563 + if (hvm_time_initialized) 564 + return; 565 + 561 566 /* 562 567 * vector callback is needed otherwise we cannot receive interrupts 563 568 * on cpu > 0 and at this point we don't know how many cpus are ··· 572 567 return; 573 568 574 569 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { 575 - pr_info("Xen doesn't support pvclock on HVM, disable pv timer"); 570 + pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer"); 571 + return; 572 + } 573 + 574 + /* 575 + * Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'. 576 + * The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest 577 + * boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access 578 + * __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic. 579 + * 580 + * The xen_hvm_init_time_ops() should be called again later after 581 + * __this_cpu_read(xen_vcpu) is available. 582 + */ 583 + if (!__this_cpu_read(xen_vcpu)) { 584 + pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n", 585 + xen_vcpu_nr(0)); 576 586 return; 577 587 } 578 588 ··· 597 577 x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; 598 578 599 579 x86_platform.set_wallclock = xen_set_wallclock; 580 + 581 + hvm_time_initialized = true; 600 582 } 601 583 #endif 602 584
+4 -4
drivers/block/xen-blkfront.c
··· 1223 1223 list_del(&persistent_gnt->node); 1224 1224 if (persistent_gnt->gref != GRANT_INVALID_REF) { 1225 1225 gnttab_end_foreign_access(persistent_gnt->gref, 1226 - 0, 0UL); 1226 + 0UL); 1227 1227 rinfo->persistent_gnts_c--; 1228 1228 } 1229 1229 if (info->feature_persistent) ··· 1246 1246 rinfo->shadow[i].req.u.rw.nr_segments; 1247 1247 for (j = 0; j < segs; j++) { 1248 1248 persistent_gnt = rinfo->shadow[i].grants_used[j]; 1249 - gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 1249 + gnttab_end_foreign_access(persistent_gnt->gref, 0UL); 1250 1250 if (info->feature_persistent) 1251 1251 __free_page(persistent_gnt->page); 1252 1252 kfree(persistent_gnt); ··· 1261 1261 1262 1262 for (j = 0; j < INDIRECT_GREFS(segs); j++) { 1263 1263 persistent_gnt = rinfo->shadow[i].indirect_grants[j]; 1264 - gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); 1264 + gnttab_end_foreign_access(persistent_gnt->gref, 0UL); 1265 1265 __free_page(persistent_gnt->page); 1266 1266 kfree(persistent_gnt); 1267 1267 } ··· 1284 1284 /* Free resources associated with old device channel. */ 1285 1285 for (i = 0; i < info->nr_ring_pages; i++) { 1286 1286 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) { 1287 - gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0); 1287 + gnttab_end_foreign_access(rinfo->ring_ref[i], 0); 1288 1288 rinfo->ring_ref[i] = GRANT_INVALID_REF; 1289 1289 } 1290 1290 }
+1 -1
drivers/char/tpm/xen-tpmfront.c
··· 332 332 return; 333 333 334 334 if (priv->ring_ref) 335 - gnttab_end_foreign_access(priv->ring_ref, 0, 335 + gnttab_end_foreign_access(priv->ring_ref, 336 336 (unsigned long)priv->shr); 337 337 else 338 338 free_page((unsigned long)priv->shr);
+1 -1
drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
··· 148 148 149 149 /* end access and free the page */ 150 150 if (evtchnl->gref != GRANT_INVALID_REF) 151 - gnttab_end_foreign_access(evtchnl->gref, 0, page); 151 + gnttab_end_foreign_access(evtchnl->gref, page); 152 152 153 153 memset(evtchnl, 0, sizeof(*evtchnl)); 154 154 }
+2 -2
drivers/input/misc/xen-kbdfront.c
··· 481 481 error_evtchan: 482 482 xenbus_free_evtchn(dev, evtchn); 483 483 error_grant: 484 - gnttab_end_foreign_access(info->gref, 0, 0UL); 484 + gnttab_end_foreign_access(info->gref, 0UL); 485 485 info->gref = -1; 486 486 return ret; 487 487 } ··· 492 492 unbind_from_irqhandler(info->irq, info); 493 493 info->irq = -1; 494 494 if (info->gref >= 0) 495 - gnttab_end_foreign_access(info->gref, 0, 0UL); 495 + gnttab_end_foreign_access(info->gref, 0UL); 496 496 info->gref = -1; 497 497 } 498 498
+6 -7
drivers/net/xen-netfront.c
··· 425 425 skb = queue->tx_skbs[id]; 426 426 queue->tx_skbs[id] = NULL; 427 427 if (unlikely(!gnttab_end_foreign_access_ref( 428 - queue->grant_tx_ref[id], GNTMAP_readonly))) { 428 + queue->grant_tx_ref[id]))) { 429 429 dev_alert(dev, 430 430 "Grant still in use by backend domain\n"); 431 431 goto err; ··· 1029 1029 goto next; 1030 1030 } 1031 1031 1032 - if (!gnttab_end_foreign_access_ref(ref, 0)) { 1032 + if (!gnttab_end_foreign_access_ref(ref)) { 1033 1033 dev_alert(dev, 1034 1034 "Grant still in use by backend domain\n"); 1035 1035 queue->info->broken = true; ··· 1388 1388 queue->tx_skbs[i] = NULL; 1389 1389 get_page(queue->grant_tx_page[i]); 1390 1390 gnttab_end_foreign_access(queue->grant_tx_ref[i], 1391 - GNTMAP_readonly, 1392 1391 (unsigned long)page_address(queue->grant_tx_page[i])); 1393 1392 queue->grant_tx_page[i] = NULL; 1394 1393 queue->grant_tx_ref[i] = GRANT_INVALID_REF; ··· 1420 1421 * foreign access is ended (which may be deferred). 1421 1422 */ 1422 1423 get_page(page); 1423 - gnttab_end_foreign_access(ref, 0, 1424 + gnttab_end_foreign_access(ref, 1424 1425 (unsigned long)page_address(page)); 1425 1426 queue->grant_rx_ref[id] = GRANT_INVALID_REF; 1426 1427 ··· 1762 1763 { 1763 1764 /* This frees the page as a side-effect */ 1764 1765 if (ref != GRANT_INVALID_REF) 1765 - gnttab_end_foreign_access(ref, 0, (unsigned long)page); 1766 + gnttab_end_foreign_access(ref, (unsigned long)page); 1766 1767 } 1767 1768 1768 1769 static void xennet_disconnect_backend(struct netfront_info *info) ··· 1979 1980 */ 1980 1981 fail: 1981 1982 if (queue->rx_ring_ref != GRANT_INVALID_REF) { 1982 - gnttab_end_foreign_access(queue->rx_ring_ref, 0, 1983 + gnttab_end_foreign_access(queue->rx_ring_ref, 1983 1984 (unsigned long)rxs); 1984 1985 queue->rx_ring_ref = GRANT_INVALID_REF; 1985 1986 } else { 1986 1987 free_page((unsigned long)rxs); 1987 1988 } 1988 1989 if (queue->tx_ring_ref != GRANT_INVALID_REF) { 1989 - gnttab_end_foreign_access(queue->tx_ring_ref, 0, 1990 + gnttab_end_foreign_access(queue->tx_ring_ref, 1990 1991 (unsigned long)txs); 1991 1992 queue->tx_ring_ref = GRANT_INVALID_REF; 1992 1993 } else {
+1 -1
drivers/pci/xen-pcifront.c
··· 755 755 xenbus_free_evtchn(pdev->xdev, pdev->evtchn); 756 756 757 757 if (pdev->gnt_ref != INVALID_GRANT_REF) 758 - gnttab_end_foreign_access(pdev->gnt_ref, 0 /* r/w page */, 758 + gnttab_end_foreign_access(pdev->gnt_ref, 759 759 (unsigned long)pdev->sh_info); 760 760 else 761 761 free_page((unsigned long)pdev->sh_info);
+2 -2
drivers/scsi/xen-scsifront.c
··· 757 757 free_irq: 758 758 unbind_from_irqhandler(info->irq, info); 759 759 free_gnttab: 760 - gnttab_end_foreign_access(info->ring_ref, 0, 760 + gnttab_end_foreign_access(info->ring_ref, 761 761 (unsigned long)info->ring.sring); 762 762 763 763 return err; ··· 766 766 static void scsifront_free_ring(struct vscsifrnt_info *info) 767 767 { 768 768 unbind_from_irqhandler(info->irq, info); 769 - gnttab_end_foreign_access(info->ring_ref, 0, 769 + gnttab_end_foreign_access(info->ring_ref, 770 770 (unsigned long)info->ring.sring); 771 771 } 772 772
+2 -2
drivers/usb/host/xen-hcd.c
··· 1101 1101 info->irq = 0; 1102 1102 1103 1103 if (info->urb_ring_ref != GRANT_INVALID_REF) { 1104 - gnttab_end_foreign_access(info->urb_ring_ref, 0, 1104 + gnttab_end_foreign_access(info->urb_ring_ref, 1105 1105 (unsigned long)info->urb_ring.sring); 1106 1106 info->urb_ring_ref = GRANT_INVALID_REF; 1107 1107 } 1108 1108 info->urb_ring.sring = NULL; 1109 1109 1110 1110 if (info->conn_ring_ref != GRANT_INVALID_REF) { 1111 - gnttab_end_foreign_access(info->conn_ring_ref, 0, 1111 + gnttab_end_foreign_access(info->conn_ring_ref, 1112 1112 (unsigned long)info->conn_ring.sring); 1113 1113 info->conn_ring_ref = GRANT_INVALID_REF; 1114 1114 }
+2 -1
drivers/xen/balloon.c
··· 59 59 #include <linux/slab.h> 60 60 #include <linux/sysctl.h> 61 61 #include <linux/moduleparam.h> 62 + #include <linux/jiffies.h> 62 63 63 64 #include <asm/page.h> 64 65 #include <asm/tlb.h> ··· 795 794 if (balloon_state == BP_ECANCELED) { 796 795 pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n", 797 796 -credit); 798 - if (jiffies - last_changed >= HZ * balloon_boot_timeout) 797 + if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout)) 799 798 panic("Initial ballooning failed!\n"); 800 799 } 801 800
+1 -1
drivers/xen/gntalloc.c
··· 192 192 if (gref->gref_id) { 193 193 if (gref->page) { 194 194 addr = (unsigned long)page_to_virt(gref->page); 195 - gnttab_end_foreign_access(gref->gref_id, 0, addr); 195 + gnttab_end_foreign_access(gref->gref_id, addr); 196 196 } else 197 197 gnttab_free_grant_reference(gref->gref_id); 198 198 }
+1 -1
drivers/xen/gntdev-dmabuf.c
··· 533 533 534 534 for (i = 0; i < count; i++) 535 535 if (refs[i] != GRANT_INVALID_REF) 536 - gnttab_end_foreign_access(refs[i], 0, 0UL); 536 + gnttab_end_foreign_access(refs[i], 0UL); 537 537 } 538 538 539 539 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
+18 -133
drivers/xen/grant-table.c
··· 109 109 void (*unmap_frames)(void); 110 110 /* 111 111 * Introducing a valid entry into the grant table, granting the frame of 112 - * this grant entry to domain for accessing or transfering. Ref 112 + * this grant entry to domain for accessing. Ref 113 113 * parameter is reference of this introduced grant entry, domid is id of 114 114 * granted domain, frame is the page frame to be granted, and flags is 115 115 * status of the grant entry to be updated. ··· 118 118 unsigned long frame, unsigned flags); 119 119 /* 120 120 * Stop granting a grant entry to domain for accessing. Ref parameter is 121 - * reference of a grant entry whose grant access will be stopped, 122 - * readonly is not in use in this function. If the grant entry is 123 - * currently mapped for reading or writing, just return failure(==0) 124 - * directly and don't tear down the grant access. Otherwise, stop grant 125 - * access for this entry and return success(==1). 121 + * reference of a grant entry whose grant access will be stopped. 122 + * If the grant entry is currently mapped for reading or writing, just 123 + * return failure(==0) directly and don't tear down the grant access. 124 + * Otherwise, stop grant access for this entry and return success(==1). 126 125 */ 127 - int (*end_foreign_access_ref)(grant_ref_t ref, int readonly); 128 - /* 129 - * Stop granting a grant entry to domain for transfer. Ref parameter is 130 - * reference of a grant entry whose grant transfer will be stopped. If 131 - * tranfer has not started, just reclaim the grant entry and return 132 - * failure(==0). Otherwise, wait for the transfer to complete and then 133 - * return the frame. 134 - */ 135 - unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); 126 + int (*end_foreign_access_ref)(grant_ref_t ref); 136 127 /* 137 128 * Read the frame number related to a given grant reference. 138 129 */ ··· 221 230 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. 222 231 * Introducing a valid entry into the grant table: 223 232 * 1. Write ent->domid. 224 - * 2. Write ent->frame: 225 - * GTF_permit_access: Frame to which access is permitted. 226 - * GTF_accept_transfer: Pseudo-phys frame slot being filled by new 227 - * frame, or zero if none. 233 + * 2. Write ent->frame: Frame to which access is permitted. 228 234 * 3. Write memory barrier (WMB). 229 235 * 4. Write ent->flags, inc. valid type. 230 236 */ ··· 269 281 } 270 282 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 271 283 272 - static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) 284 + static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref) 273 285 { 274 286 u16 flags, nflags; 275 287 u16 *pflags; ··· 285 297 return 1; 286 298 } 287 299 288 - static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly) 300 + static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref) 289 301 { 290 302 gnttab_shared.v2[ref].hdr.flags = 0; 291 303 mb(); /* Concurrent access by hypervisor. */ ··· 308 320 return 1; 309 321 } 310 322 311 - static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 323 + static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref) 312 324 { 313 - return gnttab_interface->end_foreign_access_ref(ref, readonly); 325 + return gnttab_interface->end_foreign_access_ref(ref); 314 326 } 315 327 316 - int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 328 + int gnttab_end_foreign_access_ref(grant_ref_t ref) 317 329 { 318 - if (_gnttab_end_foreign_access_ref(ref, readonly)) 330 + if (_gnttab_end_foreign_access_ref(ref)) 319 331 return 1; 320 332 pr_warn("WARNING: g.e. %#x still in use!\n", ref); 321 333 return 0; ··· 335 347 struct deferred_entry { 336 348 struct list_head list; 337 349 grant_ref_t ref; 338 - bool ro; 339 350 uint16_t warn_delay; 340 351 struct page *page; 341 352 }; ··· 358 371 break; 359 372 list_del(&entry->list); 360 373 spin_unlock_irqrestore(&gnttab_list_lock, flags); 361 - if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { 374 + if (_gnttab_end_foreign_access_ref(entry->ref)) { 362 375 put_free_entry(entry->ref); 363 376 pr_debug("freeing g.e. %#x (pfn %#lx)\n", 364 377 entry->ref, page_to_pfn(entry->page)); ··· 384 397 spin_unlock_irqrestore(&gnttab_list_lock, flags); 385 398 } 386 399 387 - static void gnttab_add_deferred(grant_ref_t ref, bool readonly, 388 - struct page *page) 400 + static void gnttab_add_deferred(grant_ref_t ref, struct page *page) 389 401 { 390 402 struct deferred_entry *entry; 391 403 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; ··· 402 416 unsigned long flags; 403 417 404 418 entry->ref = ref; 405 - entry->ro = readonly; 406 419 entry->page = page; 407 420 entry->warn_delay = 60; 408 421 spin_lock_irqsave(&gnttab_list_lock, flags); ··· 419 434 420 435 int gnttab_try_end_foreign_access(grant_ref_t ref) 421 436 { 422 - int ret = _gnttab_end_foreign_access_ref(ref, 0); 437 + int ret = _gnttab_end_foreign_access_ref(ref); 423 438 424 439 if (ret) 425 440 put_free_entry(ref); ··· 428 443 } 429 444 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); 430 445 431 - void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 432 - unsigned long page) 446 + void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page) 433 447 { 434 448 if (gnttab_try_end_foreign_access(ref)) { 435 449 if (page != 0) 436 450 put_page(virt_to_page(page)); 437 451 } else 438 - gnttab_add_deferred(ref, readonly, 439 - page ? virt_to_page(page) : NULL); 452 + gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL); 440 453 } 441 454 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 442 - 443 - int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) 444 - { 445 - int ref; 446 - 447 - ref = get_free_entries(1); 448 - if (unlikely(ref < 0)) 449 - return -ENOSPC; 450 - gnttab_grant_foreign_transfer_ref(ref, domid, pfn); 451 - 452 - return ref; 453 - } 454 - EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); 455 - 456 - void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, 457 - unsigned long pfn) 458 - { 459 - gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); 460 - } 461 - EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); 462 - 463 - static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) 464 - { 465 - unsigned long frame; 466 - u16 flags; 467 - u16 *pflags; 468 - 469 - pflags = &gnttab_shared.v1[ref].flags; 470 - 471 - /* 472 - * If a transfer is not even yet started, try to reclaim the grant 473 - * reference and return failure (== 0). 474 - */ 475 - while (!((flags = *pflags) & GTF_transfer_committed)) { 476 - if (sync_cmpxchg(pflags, flags, 0) == flags) 477 - return 0; 478 - cpu_relax(); 479 - } 480 - 481 - /* If a transfer is in progress then wait until it is completed. */ 482 - while (!(flags & GTF_transfer_completed)) { 483 - flags = *pflags; 484 - cpu_relax(); 485 - } 486 - 487 - rmb(); /* Read the frame number /after/ reading completion status. */ 488 - frame = gnttab_shared.v1[ref].frame; 489 - BUG_ON(frame == 0); 490 - 491 - return frame; 492 - } 493 - 494 - static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref) 495 - { 496 - unsigned long frame; 497 - u16 flags; 498 - u16 *pflags; 499 - 500 - pflags = &gnttab_shared.v2[ref].hdr.flags; 501 - 502 - /* 503 - * If a transfer is not even yet started, try to reclaim the grant 504 - * reference and return failure (== 0). 505 - */ 506 - while (!((flags = *pflags) & GTF_transfer_committed)) { 507 - if (sync_cmpxchg(pflags, flags, 0) == flags) 508 - return 0; 509 - cpu_relax(); 510 - } 511 - 512 - /* If a transfer is in progress then wait until it is completed. */ 513 - while (!(flags & GTF_transfer_completed)) { 514 - flags = *pflags; 515 - cpu_relax(); 516 - } 517 - 518 - rmb(); /* Read the frame number /after/ reading completion status. */ 519 - frame = gnttab_shared.v2[ref].full_page.frame; 520 - BUG_ON(frame == 0); 521 - 522 - return frame; 523 - } 524 - 525 - unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) 526 - { 527 - return gnttab_interface->end_foreign_transfer_ref(ref); 528 - } 529 - EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); 530 - 531 - unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) 532 - { 533 - unsigned long frame = gnttab_end_foreign_transfer_ref(ref); 534 - put_free_entry(ref); 535 - return frame; 536 - } 537 - EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); 538 455 539 456 void gnttab_free_grant_reference(grant_ref_t ref) 540 457 { ··· 1310 1423 .unmap_frames = gnttab_unmap_frames_v1, 1311 1424 .update_entry = gnttab_update_entry_v1, 1312 1425 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, 1313 - .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, 1314 1426 .read_frame = gnttab_read_frame_v1, 1315 1427 }; 1316 1428 ··· 1321 1435 .unmap_frames = gnttab_unmap_frames_v2, 1322 1436 .update_entry = gnttab_update_entry_v2, 1323 1437 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, 1324 - .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, 1325 1438 .read_frame = gnttab_read_frame_v2, 1326 1439 }; 1327 1440
+2 -2
drivers/xen/manage.c
··· 141 141 142 142 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); 143 143 144 + xen_arch_resume(); 145 + 144 146 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 145 147 146 148 if (err) { 147 149 pr_err("failed to start xen_suspend: %d\n", err); 148 150 si.cancelled = 1; 149 151 } 150 - 151 - xen_arch_resume(); 152 152 153 153 out_resume: 154 154 if (!si.cancelled)
+3 -3
drivers/xen/pvcalls-front.c
··· 238 238 spin_unlock(&bedata->socket_lock); 239 239 240 240 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) 241 - gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0); 242 - gnttab_end_foreign_access(map->active.ref, 0, 0); 241 + gnttab_end_foreign_access(map->active.ring->ref[i], 0); 242 + gnttab_end_foreign_access(map->active.ref, 0); 243 243 free_page((unsigned long)map->active.ring); 244 244 245 245 kfree(map); ··· 1117 1117 } 1118 1118 } 1119 1119 if (bedata->ref != -1) 1120 - gnttab_end_foreign_access(bedata->ref, 0, 0); 1120 + gnttab_end_foreign_access(bedata->ref, 0); 1121 1121 kfree(bedata->ring.sring); 1122 1122 kfree(bedata); 1123 1123 xenbus_switch_state(dev, XenbusStateClosed);
+2 -3
drivers/xen/sys-hypervisor.c
··· 22 22 #endif 23 23 24 24 #define HYPERVISOR_ATTR_RO(_name) \ 25 - static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) 25 + static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name) 26 26 27 27 #define HYPERVISOR_ATTR_RW(_name) \ 28 - static struct hyp_sysfs_attr _name##_attr = \ 29 - __ATTR(_name, 0644, _name##_show, _name##_store) 28 + static struct hyp_sysfs_attr _name##_attr = __ATTR_RW(_name) 30 29 31 30 struct hyp_sysfs_attr { 32 31 struct attribute attr;
+1 -2
drivers/xen/xen-front-pgdir-shbuf.c
··· 143 143 144 144 for (i = 0; i < buf->num_grefs; i++) 145 145 if (buf->grefs[i] != GRANT_INVALID_REF) 146 - gnttab_end_foreign_access(buf->grefs[i], 147 - 0, 0UL); 146 + gnttab_end_foreign_access(buf->grefs[i], 0UL); 148 147 } 149 148 kfree(buf->grefs); 150 149 kfree(buf->directory);
+2 -11
include/xen/grant_table.h
··· 97 97 * longer in use. Return 1 if the grant entry was freed, 0 if it is still in 98 98 * use. 99 99 */ 100 - int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); 100 + int gnttab_end_foreign_access_ref(grant_ref_t ref); 101 101 102 102 /* 103 103 * Eventually end access through the given grant reference, and once that ··· 114 114 * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing 115 115 * via free_pages_exact()) in order to avoid high order pages. 116 116 */ 117 - void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 118 - unsigned long page); 117 + void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page); 119 118 120 119 /* 121 120 * End access through the given grant reference, iff the grant entry is ··· 123 124 * Return 1 if the grant entry was freed, 0 if it is still in use. 124 125 */ 125 126 int gnttab_try_end_foreign_access(grant_ref_t ref); 126 - 127 - int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); 128 - 129 - unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); 130 - unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); 131 127 132 128 /* 133 129 * operations on reserved batches of grant references ··· 155 161 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page), 156 162 readonly); 157 163 } 158 - 159 - void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, 160 - unsigned long pfn); 161 164 162 165 static inline void 163 166 gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
+4 -4
net/9p/trans_xen.c
··· 279 279 grant_ref_t ref; 280 280 281 281 ref = priv->rings[i].intf->ref[j]; 282 - gnttab_end_foreign_access(ref, 0, 0); 282 + gnttab_end_foreign_access(ref, 0); 283 283 } 284 284 free_pages_exact(priv->rings[i].data.in, 285 285 1UL << (priv->rings[i].intf->ring_order + 286 286 XEN_PAGE_SHIFT)); 287 287 } 288 - gnttab_end_foreign_access(priv->rings[i].ref, 0, 0); 288 + gnttab_end_foreign_access(priv->rings[i].ref, 0); 289 289 free_page((unsigned long)priv->rings[i].intf); 290 290 } 291 291 kfree(priv->rings); ··· 353 353 out: 354 354 if (bytes) { 355 355 for (i--; i >= 0; i--) 356 - gnttab_end_foreign_access(ring->intf->ref[i], 0, 0); 356 + gnttab_end_foreign_access(ring->intf->ref[i], 0); 357 357 free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT)); 358 358 } 359 - gnttab_end_foreign_access(ring->ref, 0, 0); 359 + gnttab_end_foreign_access(ring->ref, 0); 360 360 free_page((unsigned long)ring->intf); 361 361 return ret; 362 362 }
+1 -1
sound/xen/xen_snd_front_evtchnl.c
··· 168 168 169 169 /* End access and free the page. */ 170 170 if (channel->gref != GRANT_INVALID_REF) 171 - gnttab_end_foreign_access(channel->gref, 0, page); 171 + gnttab_end_foreign_access(channel->gref, page); 172 172 else 173 173 free_page(page); 174 174