Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen bug fixes from David Vrabel:

- fix blkback regression if using persistent grants

- fix various event channel related suspend/resume bugs

- fix AMD x86 regression with X86_BUG_SYSRET_SS_ATTRS

- SWIOTLB on ARM now uses frames <4 GiB (if available) so device only
capable of 32-bit DMA work.

* tag 'for-linus-4.1b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen: Add __GFP_DMA flag when xen_swiotlb_init gets free pages on ARM
hypervisor/x86/xen: Unset X86_BUG_SYSRET_SS_ATTRS on Xen PV guests
xen/events: Set irq_info->evtchn before binding the channel to CPU in __startup_pirq()
xen/console: Update console event channel on resume
xen/xenbus: Update xenbus event channel on resume
xen/events: Clear cpu_evtchn_mask before resuming
xen-pciback: Add name prefix to global 'permissive' variable
xen: Suspend ticks on all CPUs during suspend
xen/grant: introduce func gnttab_unmap_refs_sync()
xen/blkback: safely unmap purge persistent grants

+169 -75
+1
arch/arm/include/asm/xen/page.h
··· 110 110 bool xen_arch_need_swiotlb(struct device *dev, 111 111 unsigned long pfn, 112 112 unsigned long mfn); 113 + unsigned long xen_get_swiotlb_free_pages(unsigned int order); 113 114 114 115 #endif /* _ASM_ARM_XEN_PAGE_H */
+15
arch/arm/xen/mm.c
··· 4 4 #include <linux/gfp.h> 5 5 #include <linux/highmem.h> 6 6 #include <linux/export.h> 7 + #include <linux/memblock.h> 7 8 #include <linux/of_address.h> 8 9 #include <linux/slab.h> 9 10 #include <linux/types.h> ··· 21 20 #include <asm/xen/page.h> 22 21 #include <asm/xen/hypercall.h> 23 22 #include <asm/xen/interface.h> 23 + 24 + unsigned long xen_get_swiotlb_free_pages(unsigned int order) 25 + { 26 + struct memblock_region *reg; 27 + gfp_t flags = __GFP_NOWARN; 28 + 29 + for_each_memblock(memory, reg) { 30 + if (reg->base < (phys_addr_t)0xffffffff) { 31 + flags |= __GFP_DMA; 32 + break; 33 + } 34 + } 35 + return __get_free_pages(flags, order); 36 + } 24 37 25 38 enum dma_cache_op { 26 39 DMA_UNMAP,
+1 -1
arch/x86/include/asm/hypervisor.h
··· 50 50 /* Recognized hypervisors */ 51 51 extern const struct hypervisor_x86 x86_hyper_vmware; 52 52 extern const struct hypervisor_x86 x86_hyper_ms_hyperv; 53 - extern const struct hypervisor_x86 x86_hyper_xen_hvm; 53 + extern const struct hypervisor_x86 x86_hyper_xen; 54 54 extern const struct hypervisor_x86 x86_hyper_kvm; 55 55 56 56 extern void init_hypervisor(struct cpuinfo_x86 *c);
+5
arch/x86/include/asm/xen/page.h
··· 269 269 return false; 270 270 } 271 271 272 + static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) 273 + { 274 + return __get_free_pages(__GFP_NOWARN, order); 275 + } 276 + 272 277 #endif /* _ASM_X86_XEN_PAGE_H */
+2 -2
arch/x86/kernel/cpu/hypervisor.c
··· 27 27 28 28 static const __initconst struct hypervisor_x86 * const hypervisors[] = 29 29 { 30 - #ifdef CONFIG_XEN_PVHVM 31 - &x86_hyper_xen_hvm, 30 + #ifdef CONFIG_XEN 31 + &x86_hyper_xen, 32 32 #endif 33 33 &x86_hyper_vmware, 34 34 &x86_hyper_ms_hyperv,
+19 -10
arch/x86/xen/enlighten.c
··· 1760 1760 1761 1761 static void __init xen_hvm_guest_init(void) 1762 1762 { 1763 + if (xen_pv_domain()) 1764 + return; 1765 + 1763 1766 init_hvm_pv_info(); 1764 1767 1765 1768 xen_hvm_init_shared_info(); ··· 1778 1775 xen_hvm_init_time_ops(); 1779 1776 xen_hvm_init_mmu_ops(); 1780 1777 } 1778 + #endif 1781 1779 1782 1780 static bool xen_nopv = false; 1783 1781 static __init int xen_parse_nopv(char *arg) ··· 1788 1784 } 1789 1785 early_param("xen_nopv", xen_parse_nopv); 1790 1786 1791 - static uint32_t __init xen_hvm_platform(void) 1787 + static uint32_t __init xen_platform(void) 1792 1788 { 1793 1789 if (xen_nopv) 1794 - return 0; 1795 - 1796 - if (xen_pv_domain()) 1797 1790 return 0; 1798 1791 1799 1792 return xen_cpuid_base(); ··· 1810 1809 } 1811 1810 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); 1812 1811 1813 - const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { 1814 - .name = "Xen HVM", 1815 - .detect = xen_hvm_platform, 1812 + static void xen_set_cpu_features(struct cpuinfo_x86 *c) 1813 + { 1814 + if (xen_pv_domain()) 1815 + clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 1816 + } 1817 + 1818 + const struct hypervisor_x86 x86_hyper_xen = { 1819 + .name = "Xen", 1820 + .detect = xen_platform, 1821 + #ifdef CONFIG_XEN_PVHVM 1816 1822 .init_platform = xen_hvm_guest_init, 1817 - .x2apic_available = xen_x2apic_para_available, 1818 - }; 1819 - EXPORT_SYMBOL(x86_hyper_xen_hvm); 1820 1823 #endif 1824 + .x2apic_available = xen_x2apic_para_available, 1825 + .set_cpu_features = xen_set_cpu_features, 1826 + }; 1827 + EXPORT_SYMBOL(x86_hyper_xen);
+10
arch/x86/xen/suspend.c
··· 88 88 tick_resume_local(); 89 89 } 90 90 91 + static void xen_vcpu_notify_suspend(void *data) 92 + { 93 + tick_suspend_local(); 94 + } 95 + 91 96 void xen_arch_resume(void) 92 97 { 93 98 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 99 + } 100 + 101 + void xen_arch_suspend(void) 102 + { 103 + on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); 94 104 }
+11 -24
drivers/block/xen-blkback/blkback.c
··· 265 265 atomic_dec(&blkif->persistent_gnt_in_use); 266 266 } 267 267 268 - static void free_persistent_gnts_unmap_callback(int result, 269 - struct gntab_unmap_queue_data *data) 270 - { 271 - struct completion *c = data->data; 272 - 273 - /* BUG_ON used to reproduce existing behaviour, 274 - but is this the best way to deal with this? */ 275 - BUG_ON(result); 276 - complete(c); 277 - } 278 - 279 268 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, 280 269 unsigned int num) 281 270 { ··· 274 285 struct rb_node *n; 275 286 int segs_to_unmap = 0; 276 287 struct gntab_unmap_queue_data unmap_data; 277 - struct completion unmap_completion; 278 288 279 - init_completion(&unmap_completion); 280 - 281 - unmap_data.data = &unmap_completion; 282 - unmap_data.done = &free_persistent_gnts_unmap_callback; 283 289 unmap_data.pages = pages; 284 290 unmap_data.unmap_ops = unmap; 285 291 unmap_data.kunmap_ops = NULL; ··· 294 310 !rb_next(&persistent_gnt->node)) { 295 311 296 312 unmap_data.count = segs_to_unmap; 297 - gnttab_unmap_refs_async(&unmap_data); 298 - wait_for_completion(&unmap_completion); 313 + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); 299 314 300 315 put_free_pages(blkif, pages, segs_to_unmap); 301 316 segs_to_unmap = 0; ··· 312 329 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 313 330 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 314 331 struct persistent_gnt *persistent_gnt; 315 - int ret, segs_to_unmap = 0; 332 + int segs_to_unmap = 0; 316 333 struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); 334 + struct gntab_unmap_queue_data unmap_data; 335 + 336 + unmap_data.pages = pages; 337 + unmap_data.unmap_ops = unmap; 338 + unmap_data.kunmap_ops = NULL; 317 339 318 340 while(!list_empty(&blkif->persistent_purge_list)) { 319 341 persistent_gnt = list_first_entry(&blkif->persistent_purge_list, ··· 334 346 pages[segs_to_unmap] = persistent_gnt->page; 335 347 336 348 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 337 - ret = gnttab_unmap_refs(unmap, NULL, pages, 338 - segs_to_unmap); 339 - BUG_ON(ret); 349 + unmap_data.count = segs_to_unmap; 350 + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); 340 351 put_free_pages(blkif, pages, segs_to_unmap); 341 352 segs_to_unmap = 0; 342 353 } 343 354 kfree(persistent_gnt); 344 355 } 345 356 if (segs_to_unmap > 0) { 346 - ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); 347 - BUG_ON(ret); 357 + unmap_data.count = segs_to_unmap; 358 + BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); 348 359 put_free_pages(blkif, pages, segs_to_unmap); 349 360 } 350 361 }
+17 -1
drivers/tty/hvc/hvc_xen.c
··· 299 299 return 0; 300 300 } 301 301 302 + static void xen_console_update_evtchn(struct xencons_info *info) 303 + { 304 + if (xen_hvm_domain()) { 305 + uint64_t v; 306 + int err; 307 + 308 + err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); 309 + if (!err && v) 310 + info->evtchn = v; 311 + } else 312 + info->evtchn = xen_start_info->console.domU.evtchn; 313 + } 314 + 302 315 void xen_console_resume(void) 303 316 { 304 317 struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); 305 - if (info != NULL && info->irq) 318 + if (info != NULL && info->irq) { 319 + if (!xen_initial_domain()) 320 + xen_console_update_evtchn(info); 306 321 rebind_evtchn_irq(info->evtchn, info->irq); 322 + } 307 323 } 308 324 309 325 static void xencons_disconnect_backend(struct xencons_info *info)
+10
drivers/xen/events/events_2l.c
··· 345 345 return IRQ_HANDLED; 346 346 } 347 347 348 + static void evtchn_2l_resume(void) 349 + { 350 + int i; 351 + 352 + for_each_online_cpu(i) 353 + memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * 354 + EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); 355 + } 356 + 348 357 static const struct evtchn_ops evtchn_ops_2l = { 349 358 .max_channels = evtchn_2l_max_channels, 350 359 .nr_channels = evtchn_2l_max_channels, ··· 365 356 .mask = evtchn_2l_mask, 366 357 .unmask = evtchn_2l_unmask, 367 358 .handle_events = evtchn_2l_handle_events, 359 + .resume = evtchn_2l_resume, 368 360 }; 369 361 370 362 void __init xen_evtchn_2l_init(void)
+4 -3
drivers/xen/events/events_base.c
··· 529 529 if (rc) 530 530 goto err; 531 531 532 - bind_evtchn_to_cpu(evtchn, 0); 533 532 info->evtchn = evtchn; 533 + bind_evtchn_to_cpu(evtchn, 0); 534 534 535 535 rc = xen_evtchn_port_setup(info); 536 536 if (rc) ··· 1279 1279 1280 1280 mutex_unlock(&irq_mapping_update_lock); 1281 1281 1282 - /* new event channels are always bound to cpu 0 */ 1283 - irq_set_affinity(irq, cpumask_of(0)); 1282 + bind_evtchn_to_cpu(evtchn, info->cpu); 1283 + /* This will be deferred until interrupt is processed */ 1284 + irq_set_affinity(irq, cpumask_of(info->cpu)); 1284 1285 1285 1286 /* Unmask the event channel. */ 1286 1287 enable_irq(irq);
+3 -25
drivers/xen/gntdev.c
··· 327 327 return err; 328 328 } 329 329 330 - struct unmap_grant_pages_callback_data 331 - { 332 - struct completion completion; 333 - int result; 334 - }; 335 - 336 - static void unmap_grant_callback(int result, 337 - struct gntab_unmap_queue_data *data) 338 - { 339 - struct unmap_grant_pages_callback_data* d = data->data; 340 - 341 - d->result = result; 342 - complete(&d->completion); 343 - } 344 - 345 330 static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) 346 331 { 347 332 int i, err = 0; 348 333 struct gntab_unmap_queue_data unmap_data; 349 - struct unmap_grant_pages_callback_data data; 350 - 351 - init_completion(&data.completion); 352 - unmap_data.data = &data; 353 - unmap_data.done= &unmap_grant_callback; 354 334 355 335 if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { 356 336 int pgno = (map->notify.addr >> PAGE_SHIFT); ··· 347 367 unmap_data.pages = map->pages + offset; 348 368 unmap_data.count = pages; 349 369 350 - gnttab_unmap_refs_async(&unmap_data); 351 - 352 - wait_for_completion(&data.completion); 353 - if (data.result) 354 - return data.result; 370 + err = gnttab_unmap_refs_sync(&unmap_data); 371 + if (err) 372 + return err; 355 373 356 374 for (i = 0; i < pages; i++) { 357 375 if (map->unmap_ops[offset+i].status)
+28
drivers/xen/grant-table.c
··· 123 123 int (*query_foreign_access)(grant_ref_t ref); 124 124 }; 125 125 126 + struct unmap_refs_callback_data { 127 + struct completion completion; 128 + int result; 129 + }; 130 + 126 131 static struct gnttab_ops *gnttab_interface; 127 132 128 133 static int grant_table_version; ··· 867 862 __gnttab_unmap_refs_async(item); 868 863 } 869 864 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); 865 + 866 + static void unmap_refs_callback(int result, 867 + struct gntab_unmap_queue_data *data) 868 + { 869 + struct unmap_refs_callback_data *d = data->data; 870 + 871 + d->result = result; 872 + complete(&d->completion); 873 + } 874 + 875 + int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) 876 + { 877 + struct unmap_refs_callback_data data; 878 + 879 + init_completion(&data.completion); 880 + item->data = &data; 881 + item->done = &unmap_refs_callback; 882 + gnttab_unmap_refs_async(item); 883 + wait_for_completion(&data.completion); 884 + 885 + return data.result; 886 + } 887 + EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); 870 888 871 889 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 872 890 {
+6 -3
drivers/xen/manage.c
··· 131 131 goto out_resume; 132 132 } 133 133 134 + xen_arch_suspend(); 135 + 134 136 si.cancelled = 1; 135 137 136 138 err = stop_machine(xen_suspend, &si, cpumask_of(0)); ··· 150 148 si.cancelled = 1; 151 149 } 152 150 151 + xen_arch_resume(); 152 + 153 153 out_resume: 154 - if (!si.cancelled) { 155 - xen_arch_resume(); 154 + if (!si.cancelled) 156 155 xs_resume(); 157 - } else 156 + else 158 157 xs_suspend_cancel(); 159 158 160 159 dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
+1 -1
drivers/xen/swiotlb-xen.c
··· 235 235 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 236 236 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 237 237 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 238 - xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order); 238 + xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); 239 239 if (xen_io_tlb_start) 240 240 break; 241 241 order--;
+3 -3
drivers/xen/xen-pciback/conf_space.c
··· 16 16 #include "conf_space.h" 17 17 #include "conf_space_quirks.h" 18 18 19 - bool permissive; 20 - module_param(permissive, bool, 0644); 19 + bool xen_pcibk_permissive; 20 + module_param_named(permissive, xen_pcibk_permissive, bool, 0644); 21 21 22 22 /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, 23 23 * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ ··· 262 262 * This means that some fields may still be read-only because 263 263 * they have entries in the config_field list that intercept 264 264 * the write and do nothing. */ 265 - if (dev_data->permissive || permissive) { 265 + if (dev_data->permissive || xen_pcibk_permissive) { 266 266 switch (size) { 267 267 case 1: 268 268 err = pci_write_config_byte(dev, offset,
+1 -1
drivers/xen/xen-pciback/conf_space.h
··· 64 64 void *data; 65 65 }; 66 66 67 - extern bool permissive; 67 + extern bool xen_pcibk_permissive; 68 68 69 69 #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) 70 70
+1 -1
drivers/xen/xen-pciback/conf_space_header.c
··· 118 118 119 119 cmd->val = value; 120 120 121 - if (!permissive && (!dev_data || !dev_data->permissive)) 121 + if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive)) 122 122 return 0; 123 123 124 124 /* Only allow the guest to control certain bits. */
+29
drivers/xen/xenbus/xenbus_probe.c
··· 57 57 #include <xen/xen.h> 58 58 #include <xen/xenbus.h> 59 59 #include <xen/events.h> 60 + #include <xen/xen-ops.h> 60 61 #include <xen/page.h> 61 62 62 63 #include <xen/hvm.h> ··· 736 735 return err; 737 736 } 738 737 738 + static int xenbus_resume_cb(struct notifier_block *nb, 739 + unsigned long action, void *data) 740 + { 741 + int err = 0; 742 + 743 + if (xen_hvm_domain()) { 744 + uint64_t v; 745 + 746 + err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); 747 + if (!err && v) 748 + xen_store_evtchn = v; 749 + else 750 + pr_warn("Cannot update xenstore event channel: %d\n", 751 + err); 752 + } else 753 + xen_store_evtchn = xen_start_info->store_evtchn; 754 + 755 + return err; 756 + } 757 + 758 + static struct notifier_block xenbus_resume_nb = { 759 + .notifier_call = xenbus_resume_cb, 760 + }; 761 + 739 762 static int __init xenbus_init(void) 740 763 { 741 764 int err = 0; ··· 817 792 pr_warn("Error initializing xenstore comms: %i\n", err); 818 793 goto out_error; 819 794 } 795 + 796 + if ((xen_store_domain_type != XS_LOCAL) && 797 + (xen_store_domain_type != XS_UNKNOWN)) 798 + xen_resume_notifier_register(&xenbus_resume_nb); 820 799 821 800 #ifdef CONFIG_XEN_COMPAT_XENFS 822 801 /*
+1
include/xen/grant_table.h
··· 191 191 struct gnttab_unmap_grant_ref *kunmap_ops, 192 192 struct page **pages, unsigned int count); 193 193 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); 194 + int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item); 194 195 195 196 196 197 /* Perform a batch of grant map/copy operations. Retry every batch slot
+1
include/xen/xen-ops.h
··· 13 13 14 14 void xen_timer_resume(void); 15 15 void xen_arch_resume(void); 16 + void xen_arch_suspend(void); 16 17 17 18 void xen_resume_notifier_register(struct notifier_block *nb); 18 19 void xen_resume_notifier_unregister(struct notifier_block *nb);