Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-5.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

- a fix for the Xen gntdev driver

- a fix for running as Xen dom0 booted via EFI and the EFI framebuffer
being located above 4GB

- a series for support of mapping other guest's memory by using zone
device when running as Xen guest on Arm

* tag 'for-linus-5.17-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
dt-bindings: xen: Clarify "reg" purpose
arm/xen: Read extended regions from DT and init Xen resource
xen/unpopulated-alloc: Add mechanism to use Xen resource
xen/balloon: Bring alloc(free)_xenballooned_pages helpers back
arm/xen: Switch to use gnttab_setup_auto_xlat_frames() for DT
xen/unpopulated-alloc: Drop check for virt_addr_valid() in fill_list()
xen/x86: obtain upper 32 bits of video frame buffer address for Dom0
xen/gntdev: fix unmap notification order

+259 -36
+8 -6
Documentation/devicetree/bindings/arm/xen.txt
··· 7 7 compatible = "xen,xen-<version>", "xen,xen"; 8 8 where <version> is the version of the Xen ABI of the platform. 9 9 10 - - reg: specifies the base physical address and size of a region in 11 - memory where the grant table should be mapped to, using an 12 - HYPERVISOR_memory_op hypercall. The memory region is large enough to map 13 - the whole grant table (it is larger or equal to gnttab_max_grant_frames()). 14 - This property is unnecessary when booting Dom0 using ACPI. 10 + - reg: specifies the base physical address and size of the regions in memory 11 + where the special resources should be mapped to, using an HYPERVISOR_memory_op 12 + hypercall. 13 + Region 0 is reserved for mapping grant table, it must be always present. 14 + The memory region is large enough to map the whole grant table (it is larger 15 + or equal to gnttab_max_grant_frames()). 16 + Regions 1...N are extended regions (unused address space) for mapping foreign 17 + GFNs and grants, they might be absent if there is nothing to expose. 15 18 16 19 - interrupts: the interrupt used by Xen to inject event notifications. 17 20 A GIC node is also required. 18 - This property is unnecessary when booting Dom0 using ACPI. 19 21 20 22 To support UEFI on Xen ARM virtual platforms, Xen populates the FDT "uefi" node 21 23 under /hypervisor with following parameters:
+126 -6
arch/arm/xen/enlighten.c
··· 59 59 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 60 60 61 61 static __read_mostly unsigned int xen_events_irq; 62 + static __read_mostly phys_addr_t xen_grant_frames; 63 + 64 + #define GRANT_TABLE_INDEX 0 65 + #define EXT_REGION_INDEX 1 62 66 63 67 uint32_t xen_start_flags; 64 68 EXPORT_SYMBOL(xen_start_flags); ··· 304 300 #endif 305 301 } 306 302 303 + #ifdef CONFIG_XEN_UNPOPULATED_ALLOC 304 + /* 305 + * A type-less specific Xen resource which contains extended regions 306 + * (unused regions of guest physical address space provided by the hypervisor). 307 + */ 308 + static struct resource xen_resource = { 309 + .name = "Xen unused space", 310 + }; 311 + 312 + int __init arch_xen_unpopulated_init(struct resource **res) 313 + { 314 + struct device_node *np; 315 + struct resource *regs, *tmp_res; 316 + uint64_t min_gpaddr = -1, max_gpaddr = 0; 317 + unsigned int i, nr_reg = 0; 318 + int rc; 319 + 320 + if (!xen_domain()) 321 + return -ENODEV; 322 + 323 + if (!acpi_disabled) 324 + return -ENODEV; 325 + 326 + np = of_find_compatible_node(NULL, NULL, "xen,xen"); 327 + if (WARN_ON(!np)) 328 + return -ENODEV; 329 + 330 + /* Skip region 0 which is reserved for grant table space */ 331 + while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL)) 332 + nr_reg++; 333 + 334 + if (!nr_reg) { 335 + pr_err("No extended regions are found\n"); 336 + return -EINVAL; 337 + } 338 + 339 + regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL); 340 + if (!regs) 341 + return -ENOMEM; 342 + 343 + /* 344 + * Create resource from extended regions provided by the hypervisor to be 345 + * used as unused address space for Xen scratch pages. 346 + */ 347 + for (i = 0; i < nr_reg; i++) { 348 + rc = of_address_to_resource(np, i + EXT_REGION_INDEX, &regs[i]); 349 + if (rc) 350 + goto err; 351 + 352 + if (max_gpaddr < regs[i].end) 353 + max_gpaddr = regs[i].end; 354 + if (min_gpaddr > regs[i].start) 355 + min_gpaddr = regs[i].start; 356 + } 357 + 358 + xen_resource.start = min_gpaddr; 359 + xen_resource.end = max_gpaddr; 360 + 361 + /* 362 + * Mark holes between extended regions as unavailable. The rest of that 363 + * address space will be available for the allocation. 364 + */ 365 + for (i = 1; i < nr_reg; i++) { 366 + resource_size_t start, end; 367 + 368 + /* There is an overlap between regions */ 369 + if (regs[i - 1].end + 1 > regs[i].start) { 370 + rc = -EINVAL; 371 + goto err; 372 + } 373 + 374 + /* There is no hole between regions */ 375 + if (regs[i - 1].end + 1 == regs[i].start) 376 + continue; 377 + 378 + start = regs[i - 1].end + 1; 379 + end = regs[i].start - 1; 380 + 381 + tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL); 382 + if (!tmp_res) { 383 + rc = -ENOMEM; 384 + goto err; 385 + } 386 + 387 + tmp_res->name = "Unavailable space"; 388 + tmp_res->start = start; 389 + tmp_res->end = end; 390 + 391 + rc = insert_resource(&xen_resource, tmp_res); 392 + if (rc) { 393 + pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc); 394 + kfree(tmp_res); 395 + goto err; 396 + } 397 + } 398 + 399 + *res = &xen_resource; 400 + 401 + err: 402 + kfree(regs); 403 + 404 + return rc; 405 + } 406 + #endif 407 + 307 408 static void __init xen_dt_guest_init(void) 308 409 { 309 410 struct device_node *xen_node; 411 + struct resource res; 310 412 311 413 xen_node = of_find_compatible_node(NULL, NULL, "xen,xen"); 312 414 if (!xen_node) { ··· 421 311 } 422 312 423 313 xen_events_irq = irq_of_parse_and_map(xen_node, 0); 314 + 315 + if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) { 316 + pr_err("Xen grant table region is not found\n"); 317 + return; 318 + } 319 + xen_grant_frames = res.start; 424 320 } 425 321 426 322 static int __init xen_guest_init(void) 427 323 { 428 324 struct xen_add_to_physmap xatp; 429 325 struct shared_info *shared_info_page = NULL; 430 - int cpu; 326 + int rc, cpu; 431 327 432 328 if (!xen_domain()) 433 329 return 0; ··· 486 370 for_each_possible_cpu(cpu) 487 371 per_cpu(xen_vcpu_id, cpu) = cpu; 488 372 489 - xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); 490 - if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, 491 - &xen_auto_xlat_grant_frames.vaddr, 492 - xen_auto_xlat_grant_frames.count)) { 373 + if (!xen_grant_frames) { 374 + xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); 375 + rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, 376 + &xen_auto_xlat_grant_frames.vaddr, 377 + xen_auto_xlat_grant_frames.count); 378 + } else 379 + rc = gnttab_setup_auto_xlat_frames(xen_grant_frames); 380 + if (rc) { 493 381 free_percpu(xen_vcpu_info); 494 - return -ENOMEM; 382 + return rc; 495 383 } 496 384 gnttab_init(); 497 385
+8 -4
arch/x86/xen/vga.c
··· 63 63 } 64 64 65 65 if (size >= offsetof(struct dom0_vga_console_info, 66 - u.vesa_lfb.gbl_caps) 67 - + sizeof(info->u.vesa_lfb.gbl_caps)) 68 - screen_info->capabilities = info->u.vesa_lfb.gbl_caps; 69 - if (size >= offsetof(struct dom0_vga_console_info, 70 66 u.vesa_lfb.mode_attrs) 71 67 + sizeof(info->u.vesa_lfb.mode_attrs)) 72 68 screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs; 69 + 70 + if (size >= offsetof(struct dom0_vga_console_info, 71 + u.vesa_lfb.ext_lfb_base) 72 + + sizeof(info->u.vesa_lfb.ext_lfb_base) 73 + && info->u.vesa_lfb.ext_lfb_base) { 74 + screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base; 75 + screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; 76 + } 73 77 break; 74 78 } 75 79 }
+1 -1
drivers/xen/Kconfig
··· 327 327 328 328 config XEN_UNPOPULATED_ALLOC 329 329 bool "Use unpopulated memory ranges for guest mappings" 330 - depends on X86 && ZONE_DEVICE 330 + depends on ZONE_DEVICE 331 331 default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0 332 332 help 333 333 Use unpopulated memory ranges in order to create mappings for guest
+9 -11
drivers/xen/balloon.c
··· 581 581 } 582 582 EXPORT_SYMBOL_GPL(balloon_set_new_target); 583 583 584 - #ifndef CONFIG_XEN_UNPOPULATED_ALLOC 585 584 static int add_ballooned_pages(unsigned int nr_pages) 586 585 { 587 586 enum bp_state st; ··· 609 610 } 610 611 611 612 /** 612 - * xen_alloc_unpopulated_pages - get pages that have been ballooned out 613 + * xen_alloc_ballooned_pages - get pages that have been ballooned out 613 614 * @nr_pages: Number of pages to get 614 615 * @pages: pages returned 615 616 * @return 0 on success, error otherwise 616 617 */ 617 - int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) 618 + int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages) 618 619 { 619 620 unsigned int pgno = 0; 620 621 struct page *page; ··· 651 652 return 0; 652 653 out_undo: 653 654 mutex_unlock(&balloon_mutex); 654 - xen_free_unpopulated_pages(pgno, pages); 655 + xen_free_ballooned_pages(pgno, pages); 655 656 /* 656 - * NB: free_xenballooned_pages will only subtract pgno pages, but since 657 + * NB: xen_free_ballooned_pages will only subtract pgno pages, but since 657 658 * target_unpopulated is incremented with nr_pages at the start we need 658 659 * to remove the remaining ones also, or accounting will be screwed. 659 660 */ 660 661 balloon_stats.target_unpopulated -= nr_pages - pgno; 661 662 return ret; 662 663 } 663 - EXPORT_SYMBOL(xen_alloc_unpopulated_pages); 664 + EXPORT_SYMBOL(xen_alloc_ballooned_pages); 664 665 665 666 /** 666 - * xen_free_unpopulated_pages - return pages retrieved with get_ballooned_pages 667 + * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages 667 668 * @nr_pages: Number of pages 668 669 * @pages: pages to return 669 670 */ 670 - void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) 671 + void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages) 671 672 { 672 673 unsigned int i; 673 674 ··· 686 687 687 688 mutex_unlock(&balloon_mutex); 688 689 } 689 - EXPORT_SYMBOL(xen_free_unpopulated_pages); 690 + EXPORT_SYMBOL(xen_free_ballooned_pages); 690 691 691 - #if defined(CONFIG_XEN_PV) 692 + #if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC) 692 693 static void __init balloon_add_region(unsigned long start_pfn, 693 694 unsigned long pages) 694 695 { ··· 710 711 711 712 balloon_stats.total_pages += extra_pfn_end - start_pfn; 712 713 } 713 - #endif 714 714 #endif 715 715 716 716 static int __init balloon_init(void)
+3 -3
drivers/xen/gntdev.c
··· 250 250 if (!refcount_dec_and_test(&map->users)) 251 251 return; 252 252 253 + if (map->pages && !use_ptemod) 254 + unmap_grant_pages(map, 0, map->count); 255 + 253 256 if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { 254 257 notify_remote_via_evtchn(map->notify.event); 255 258 evtchn_put(map->notify.event); 256 259 } 257 - 258 - if (map->pages && !use_ptemod) 259 - unmap_grant_pages(map, 0, map->count); 260 260 gntdev_free_map(map); 261 261 } 262 262
+82 -5
drivers/xen/unpopulated-alloc.c
··· 8 8 9 9 #include <asm/page.h> 10 10 11 + #include <xen/balloon.h> 11 12 #include <xen/page.h> 12 13 #include <xen/xen.h> 13 14 ··· 16 15 static struct page *page_list; 17 16 static unsigned int list_count; 18 17 18 + static struct resource *target_resource; 19 + 20 + /* 21 + * If arch is not happy with system "iomem_resource" being used for 22 + * the region allocation it can provide it's own view by creating specific 23 + * Xen resource with unused regions of guest physical address space provided 24 + * by the hypervisor. 25 + */ 26 + int __weak __init arch_xen_unpopulated_init(struct resource **res) 27 + { 28 + *res = &iomem_resource; 29 + 30 + return 0; 31 + } 32 + 19 33 static int fill_list(unsigned int nr_pages) 20 34 { 21 35 struct dev_pagemap *pgmap; 22 - struct resource *res; 36 + struct resource *res, *tmp_res = NULL; 23 37 void *vaddr; 24 38 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); 25 - int ret = -ENOMEM; 39 + struct range mhp_range; 40 + int ret; 26 41 27 42 res = kzalloc(sizeof(*res), GFP_KERNEL); 28 43 if (!res) ··· 47 30 res->name = "Xen scratch"; 48 31 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 49 32 50 - ret = allocate_resource(&iomem_resource, res, 51 - alloc_pages * PAGE_SIZE, 0, -1, 33 + mhp_range = mhp_get_pluggable_range(true); 34 + 35 + ret = allocate_resource(target_resource, res, 36 + alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end, 52 37 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 53 38 if (ret < 0) { 54 39 pr_err("Cannot allocate new IOMEM resource\n"); 55 40 goto err_resource; 41 + } 42 + 43 + /* 44 + * Reserve the region previously allocated from Xen resource to avoid 45 + * re-using it by someone else. 46 + */ 47 + if (target_resource != &iomem_resource) { 48 + tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL); 49 + if (!tmp_res) { 50 + ret = -ENOMEM; 51 + goto err_insert; 52 + } 53 + 54 + tmp_res->name = res->name; 55 + tmp_res->start = res->start; 56 + tmp_res->end = res->end; 57 + tmp_res->flags = res->flags; 58 + 59 + ret = request_resource(&iomem_resource, tmp_res); 60 + if (ret < 0) { 61 + pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret); 62 + kfree(tmp_res); 63 + goto err_insert; 64 + } 56 65 } 57 66 58 67 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); ··· 128 85 for (i = 0; i < alloc_pages; i++) { 129 86 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); 130 87 131 - BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); 132 88 pg->zone_device_data = page_list; 133 89 page_list = pg; 134 90 list_count++; ··· 138 96 err_memremap: 139 97 kfree(pgmap); 140 98 err_pgmap: 99 + if (tmp_res) { 100 + release_resource(tmp_res); 101 + kfree(tmp_res); 102 + } 103 + err_insert: 141 104 release_resource(res); 142 105 err_resource: 143 106 kfree(res); ··· 159 112 { 160 113 unsigned int i; 161 114 int ret = 0; 115 + 116 + /* 117 + * Fallback to default behavior if we do not have any suitable resource 118 + * to allocate required region from and as the result we won't be able to 119 + * construct pages. 120 + */ 121 + if (!target_resource) 122 + return xen_alloc_ballooned_pages(nr_pages, pages); 162 123 163 124 mutex_lock(&list_lock); 164 125 if (list_count < nr_pages) { ··· 215 160 { 216 161 unsigned int i; 217 162 163 + if (!target_resource) { 164 + xen_free_ballooned_pages(nr_pages, pages); 165 + return; 166 + } 167 + 218 168 mutex_lock(&list_lock); 219 169 for (i = 0; i < nr_pages; i++) { 220 170 pages[i]->zone_device_data = page_list; ··· 262 202 } 263 203 subsys_initcall(init); 264 204 #endif 205 + 206 + static int __init unpopulated_init(void) 207 + { 208 + int ret; 209 + 210 + if (!xen_domain()) 211 + return -ENODEV; 212 + 213 + ret = arch_xen_unpopulated_init(&target_resource); 214 + if (ret) { 215 + pr_err("xen:unpopulated: Cannot initialize target resource\n"); 216 + target_resource = NULL; 217 + } 218 + 219 + return ret; 220 + } 221 + early_initcall(unpopulated_init);
+3
include/xen/balloon.h
··· 26 26 27 27 void balloon_set_new_target(unsigned long target); 28 28 29 + int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages); 30 + void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages); 31 + 29 32 #ifdef CONFIG_XEN_BALLOON 30 33 void xen_balloon_init(void); 31 34 #else
+3
include/xen/interface/xen.h
··· 722 722 uint32_t gbl_caps; 723 723 /* Mode attributes (offset 0x0, VESA command 0x4f01). */ 724 724 uint16_t mode_attrs; 725 + uint16_t pad; 726 + /* high 32 bits of lfb_base */ 727 + uint32_t ext_lfb_base; 725 728 } vesa_lfb; 726 729 } u; 727 730 };
+16
include/xen/xen.h
··· 52 52 extern u64 xen_saved_max_mem_size; 53 53 #endif 54 54 55 + #ifdef CONFIG_XEN_UNPOPULATED_ALLOC 55 56 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); 56 57 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); 58 + #include <linux/ioport.h> 59 + int arch_xen_unpopulated_init(struct resource **res); 60 + #else 61 + #include <xen/balloon.h> 62 + static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages, 63 + struct page **pages) 64 + { 65 + return xen_alloc_ballooned_pages(nr_pages, pages); 66 + } 67 + static inline void xen_free_unpopulated_pages(unsigned int nr_pages, 68 + struct page **pages) 69 + { 70 + xen_free_ballooned_pages(nr_pages, pages); 71 + } 72 + #endif 57 73 58 74 #endif /* _XEN_XEN_H */