Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
"Not much this time around, the 5.20-rc1 development updates for arm
are:

- add KASAN support for vmalloc space on arm

- some sparse fixes from Ben Dooks

- rework amba device handling (so device addition isn't deferred)"

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: 9220/1: amba: Remove deferred device addition
ARM: 9219/1: fix undeclared soft_restart
ARM: 9218/1: dma-mapping: fix pointer/integer warning
ARM: 9217/1: add definition of arch_irq_work_raise()
ARM: 9203/1: kconfig: fix MODULE_PLTS for KASAN with KASAN_VMALLOC
ARM: 9202/1: kasan: support CONFIG_KASAN_VMALLOC

+157 -171
+2
arch/arm/Kconfig
··· 75 75 select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL 76 76 select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU 77 77 select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL 78 + select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN 78 79 select HAVE_ARCH_MMAP_RND_BITS if MMU 79 80 select HAVE_ARCH_PFN_VALID 80 81 select HAVE_ARCH_SECCOMP ··· 1420 1419 config ARM_MODULE_PLTS 1421 1420 bool "Use PLTs to allow module memory to spill over into vmalloc area" 1422 1421 depends on MODULES 1422 + select KASAN_VMALLOC if KASAN 1423 1423 default y 1424 1424 help 1425 1425 Allocate PLTs when loading modules so that jumps and calls whose
+2
arch/arm/include/asm/irq_work.h
··· 9 9 return is_smp(); 10 10 } 11 11 12 + extern void arch_irq_work_raise(void); 13 + 12 14 #endif /* _ASM_ARM_IRQ_WORK_H */
+1
arch/arm/kernel/reboot.c
··· 10 10 #include <asm/cacheflush.h> 11 11 #include <asm/idmap.h> 12 12 #include <asm/virt.h> 13 + #include <asm/system_misc.h> 13 14 14 15 #include "reboot.h" 15 16
+1 -1
arch/arm/mm/dma-mapping.c
··· 709 709 710 710 *handle = DMA_MAPPING_ERROR; 711 711 allowblock = gfpflags_allow_blocking(gfp); 712 - cma = allowblock ? dev_get_cma_area(dev) : false; 712 + cma = allowblock ? dev_get_cma_area(dev) : NULL; 713 713 714 714 if (cma) 715 715 buf->allocator = &cma_allocator;
+5 -1
arch/arm/mm/kasan_init.c
··· 236 236 237 237 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 238 238 239 - kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), 239 + if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) 240 + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), 241 + kasan_mem_to_shadow((void *)VMALLOC_END)); 242 + 243 + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_END), 240 244 kasan_mem_to_shadow((void *)-1UL) + 1); 241 245 242 246 for_each_mem_range(i, &pa_start, &pa_end) {
+146 -169
drivers/amba/bus.c
··· 130 130 }; 131 131 ATTRIBUTE_GROUPS(amba_dev); 132 132 133 + static int amba_read_periphid(struct amba_device *dev) 134 + { 135 + struct reset_control *rstc; 136 + u32 size, pid, cid; 137 + void __iomem *tmp; 138 + int i, ret; 139 + 140 + ret = dev_pm_domain_attach(&dev->dev, true); 141 + if (ret) { 142 + dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret); 143 + goto err_out; 144 + } 145 + 146 + ret = amba_get_enable_pclk(dev); 147 + if (ret) { 148 + dev_dbg(&dev->dev, "can't get pclk: %d\n", ret); 149 + goto err_pm; 150 + } 151 + 152 + /* 153 + * Find reset control(s) of the amba bus and de-assert them. 154 + */ 155 + rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node); 156 + if (IS_ERR(rstc)) { 157 + ret = PTR_ERR(rstc); 158 + if (ret != -EPROBE_DEFER) 159 + dev_err(&dev->dev, "can't get reset: %d\n", ret); 160 + goto err_clk; 161 + } 162 + reset_control_deassert(rstc); 163 + reset_control_put(rstc); 164 + 165 + size = resource_size(&dev->res); 166 + tmp = ioremap(dev->res.start, size); 167 + if (!tmp) { 168 + ret = -ENOMEM; 169 + goto err_clk; 170 + } 171 + 172 + /* 173 + * Read pid and cid based on size of resource 174 + * they are located at end of region 175 + */ 176 + for (pid = 0, i = 0; i < 4; i++) 177 + pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8); 178 + for (cid = 0, i = 0; i < 4; i++) 179 + cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); 180 + 181 + if (cid == CORESIGHT_CID) { 182 + /* set the base to the start of the last 4k block */ 183 + void __iomem *csbase = tmp + size - 4096; 184 + 185 + dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET); 186 + dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff; 187 + } 188 + 189 + if (cid == AMBA_CID || cid == CORESIGHT_CID) { 190 + dev->periphid = pid; 191 + dev->cid = cid; 192 + } 193 + 194 + if (!dev->periphid) 195 + ret = -ENODEV; 196 + 197 + iounmap(tmp); 198 + 199 + err_clk: 200 + amba_put_disable_pclk(dev); 201 + err_pm: 202 + dev_pm_domain_detach(&dev->dev, true); 203 + err_out: 204 + return ret; 205 + } 206 + 133 207 static int amba_match(struct device *dev, struct device_driver *drv) 134 208 { 135 209 struct amba_device *pcdev = to_amba_device(dev); 136 210 struct amba_driver *pcdrv = to_amba_driver(drv); 211 + 212 + if (!pcdev->periphid) { 213 + int ret = amba_read_periphid(pcdev); 214 + 215 + /* 216 + * Returning any error other than -EPROBE_DEFER from bus match 217 + * can cause driver registration failure. So, if there's a 218 + * permanent failure in reading pid and cid, simply map it to 219 + * -EPROBE_DEFER. 220 + */ 221 + if (ret) 222 + return -EPROBE_DEFER; 223 + dev_set_uevent_suppress(dev, false); 224 + kobject_uevent(&dev->kobj, KOBJ_ADD); 225 + } 137 226 138 227 /* When driver_override is set, only bind to the matching driver */ 139 228 if (pcdev->driver_override) ··· 457 368 458 369 postcore_initcall(amba_init); 459 370 371 + static int amba_proxy_probe(struct amba_device *adev, 372 + const struct amba_id *id) 373 + { 374 + WARN(1, "Stub driver should never match any device.\n"); 375 + return -ENODEV; 376 + } 377 + 378 + static const struct amba_id amba_stub_drv_ids[] = { 379 + { 0, 0 }, 380 + }; 381 + 382 + static struct amba_driver amba_proxy_drv = { 383 + .drv = { 384 + .name = "amba-proxy", 385 + }, 386 + .probe = amba_proxy_probe, 387 + .id_table = amba_stub_drv_ids, 388 + }; 389 + 390 + static int __init amba_stub_drv_init(void) 391 + { 392 + if (!IS_ENABLED(CONFIG_MODULES)) 393 + return 0; 394 + 395 + /* 396 + * The amba_match() function will get called only if there is at least 397 + * one amba driver registered. If all amba drivers are modules and are 398 + * only loaded based on uevents, then we'll hit a chicken-and-egg 399 + * situation where amba_match() is waiting on drivers and drivers are 400 + * waiting on amba_match(). So, register a stub driver to make sure 401 + * amba_match() is called even if no amba driver has been registered. 402 + */ 403 + return amba_driver_register(&amba_proxy_drv); 404 + } 405 + late_initcall_sync(amba_stub_drv_init); 406 + 460 407 /** 461 408 * amba_driver_register - register an AMBA device driver 462 409 * @drv: amba device driver structure ··· 535 410 kfree(d); 536 411 } 537 412 538 - static int amba_read_periphid(struct amba_device *dev) 539 - { 540 - struct reset_control *rstc; 541 - u32 size, pid, cid; 542 - void __iomem *tmp; 543 - int i, ret; 544 - 545 - ret = dev_pm_domain_attach(&dev->dev, true); 546 - if (ret) 547 - goto err_out; 548 - 549 - ret = amba_get_enable_pclk(dev); 550 - if (ret) 551 - goto err_pm; 552 - 553 - /* 554 - * Find reset control(s) of the amba bus and de-assert them. 555 - */ 556 - rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node); 557 - if (IS_ERR(rstc)) { 558 - ret = PTR_ERR(rstc); 559 - if (ret != -EPROBE_DEFER) 560 - dev_err(&dev->dev, "can't get reset: %d\n", ret); 561 - goto err_clk; 562 - } 563 - reset_control_deassert(rstc); 564 - reset_control_put(rstc); 565 - 566 - size = resource_size(&dev->res); 567 - tmp = ioremap(dev->res.start, size); 568 - if (!tmp) { 569 - ret = -ENOMEM; 570 - goto err_clk; 571 - } 572 - 573 - /* 574 - * Read pid and cid based on size of resource 575 - * they are located at end of region 576 - */ 577 - for (pid = 0, i = 0; i < 4; i++) 578 - pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8); 579 - for (cid = 0, i = 0; i < 4; i++) 580 - cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); 581 - 582 - if (cid == CORESIGHT_CID) { 583 - /* set the base to the start of the last 4k block */ 584 - void __iomem *csbase = tmp + size - 4096; 585 - 586 - dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET); 587 - dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff; 588 - } 589 - 590 - if (cid == AMBA_CID || cid == CORESIGHT_CID) { 591 - dev->periphid = pid; 592 - dev->cid = cid; 593 - } 594 - 595 - if (!dev->periphid) 596 - ret = -ENODEV; 597 - 598 - iounmap(tmp); 599 - 600 - err_clk: 601 - amba_put_disable_pclk(dev); 602 - err_pm: 603 - dev_pm_domain_detach(&dev->dev, true); 604 - err_out: 605 - return ret; 606 - } 607 - 608 - static int amba_device_try_add(struct amba_device *dev, struct resource *parent) 609 - { 610 - int ret; 611 - 612 - ret = request_resource(parent, &dev->res); 613 - if (ret) 614 - goto err_out; 615 - 616 - /* Hard-coded primecell ID instead of plug-n-play */ 617 - if (dev->periphid != 0) 618 - goto skip_probe; 619 - 620 - ret = amba_read_periphid(dev); 621 - if (ret) 622 - goto err_release; 623 - 624 - skip_probe: 625 - ret = device_add(&dev->dev); 626 - err_release: 627 - if (ret) 628 - release_resource(&dev->res); 629 - err_out: 630 - return ret; 631 - } 632 - 633 - /* 634 - * Registration of AMBA device require reading its pid and cid registers. 635 - * To do this, the device must be turned on (if it is a part of power domain) 636 - * and have clocks enabled. However in some cases those resources might not be 637 - * yet available. Returning EPROBE_DEFER is not a solution in such case, 638 - * because callers don't handle this special error code. Instead such devices 639 - * are added to the special list and their registration is retried from 640 - * periodic worker, until all resources are available and registration succeeds. 641 - */ 642 - struct deferred_device { 643 - struct amba_device *dev; 644 - struct resource *parent; 645 - struct list_head node; 646 - }; 647 - 648 - static LIST_HEAD(deferred_devices); 649 - static DEFINE_MUTEX(deferred_devices_lock); 650 - 651 - static void amba_deferred_retry_func(struct work_struct *dummy); 652 - static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func); 653 - 654 - #define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000)) 655 - 656 - static int amba_deferred_retry(void) 657 - { 658 - struct deferred_device *ddev, *tmp; 659 - 660 - mutex_lock(&deferred_devices_lock); 661 - 662 - list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) { 663 - int ret = amba_device_try_add(ddev->dev, ddev->parent); 664 - 665 - if (ret == -EPROBE_DEFER) 666 - continue; 667 - 668 - list_del_init(&ddev->node); 669 - amba_device_put(ddev->dev); 670 - kfree(ddev); 671 - } 672 - 673 - mutex_unlock(&deferred_devices_lock); 674 - 675 - return 0; 676 - } 677 - late_initcall(amba_deferred_retry); 678 - 679 - static void amba_deferred_retry_func(struct work_struct *dummy) 680 - { 681 - amba_deferred_retry(); 682 - 683 - if (!list_empty(&deferred_devices)) 684 - schedule_delayed_work(&deferred_retry_work, 685 - DEFERRED_DEVICE_TIMEOUT); 686 - } 687 - 688 413 /** 689 414 * amba_device_add - add a previously allocated AMBA device structure 690 415 * @dev: AMBA device allocated by amba_device_alloc ··· 546 571 */ 547 572 int amba_device_add(struct amba_device *dev, struct resource *parent) 548 573 { 549 - int ret = amba_device_try_add(dev, parent); 574 + int ret; 550 575 551 - if (ret == -EPROBE_DEFER) { 552 - struct deferred_device *ddev; 576 + ret = request_resource(parent, &dev->res); 577 + if (ret) 578 + return ret; 553 579 554 - ddev = kmalloc(sizeof(*ddev), GFP_KERNEL); 555 - if (!ddev) 556 - return -ENOMEM; 557 - 558 - ddev->dev = dev; 559 - ddev->parent = parent; 560 - ret = 0; 561 - 562 - mutex_lock(&deferred_devices_lock); 563 - 564 - if (list_empty(&deferred_devices)) 565 - schedule_delayed_work(&deferred_retry_work, 566 - DEFERRED_DEVICE_TIMEOUT); 567 - list_add_tail(&ddev->node, &deferred_devices); 568 - 569 - mutex_unlock(&deferred_devices_lock); 580 + /* If primecell ID isn't hard-coded, figure it out */ 581 + if (!dev->periphid) { 582 + /* 583 + * AMBA device uevents require reading its pid and cid 584 + * registers. To do this, the device must be on, clocked and 585 + * out of reset. However in some cases those resources might 586 + * not yet be available. If that's the case, we suppress the 587 + * generation of uevents until we can read the pid and cid 588 + * registers. See also amba_match(). 589 + */ 590 + if (amba_read_periphid(dev)) 591 + dev_set_uevent_suppress(&dev->dev, true); 570 592 } 593 + 594 + ret = device_add(&dev->dev); 595 + if (ret) 596 + release_resource(&dev->res); 597 + 571 598 return ret; 572 599 } 573 600 EXPORT_SYMBOL_GPL(amba_device_add);