Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
"Changes included in this pull request:

- revert pxa2xx-flash back to using ioremap_cached() and switch
memremap() to use arch_memremap_wb()

- remove pci=firmware command line argument handling

- remove unnecessary arm_dma_set_mask() implementation, the generic
implementation will do for ARM

- removal of the ARM kallsyms "hack" to work around mode switching
veneers and vectors located below PAGE_OFFSET

- tidy up build system output a little

- add L2 cache power management DT bindings

- remove duplicated local_irq_disable() in reboot paths

- handle AMBA primecell devices better at registration time with PM
domains (needed for Samsung SoCs)

- ARM specific preparation to support Keystone II kexec"

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: 8567/1: cache-uniphier: activate ways for secondary CPUs
ARM: 8570/2: Documentation: devicetree: Add PL310 PM bindings
ARM: 8569/1: pl2x0: Add OF control of cache power management
ARM: 8568/1: reboot: remove duplicated local_irq_disable()
ARM: 8566/1: drivers: amba: properly handle devices with power domains
ARM: provide arm_has_idmap_alias() helper
ARM: kexec: remove 512MB restriction on kexec crashdump
ARM: provide improved virt_to_idmap() functionality
ARM: kexec: fix crashkernel= handling
ARM: 8557/1: specify install, zinstall, and uinstall as PHONY targets
ARM: 8562/1: suppress "include/generated/mach-types.h is up to date."
ARM: 8553/1: kallsyms: remove --page-offset command line option
ARM: 8552/1: kallsyms: remove special lower address limit for CONFIG_ARM
ARM: 8555/1: kallsyms: ignore ARM mode switching veneers
ARM: 8548/1: dma-mapping: remove arm_dma_set_mask()
ARM: 8554/1: kernel: pci: remove pci=firmware command line parameter handling
ARM: memremap: implement arch_memremap_wb()
memremap: add arch specific hook for MEMREMAP_WB mappings
mtd: pxa2xx-flash: switch back from memremap to ioremap_cached
ARM: reintroduce ioremap_cached() for creating cached I/O mappings

+260 -94
+6
Documentation/devicetree/bindings/arm/l2c2x0.txt
··· 84 84 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable), 85 85 <1> (forcibly enable), property absent (retain settings set by 86 86 firmware) 87 + - arm,dynamic-clock-gating : L2 dynamic clock gating. Value: <0> (forcibly 88 + disable), <1> (forcibly enable), property absent (OS specific behavior, 89 + preferrably retain firmware settings) 90 + - arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable), 91 + <1> (forcibly enable), property absent (OS specific behavior, 92 + preferrably retain firmware settings) 87 93 88 94 Example: 89 95
+3 -10
Documentation/kdump/kdump.txt
··· 263 263 crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset] 264 264 range=start-[end] 265 265 266 - Please note, on arm, the offset is required. 267 - crashkernel=<range1>:<size1>[,<range2>:<size2>,...]@offset 268 - range=start-[end] 269 - 270 - 'start' is inclusive and 'end' is exclusive. 271 - 272 266 For example: 273 267 274 268 crashkernel=512M-2G:64M,2G-:128M ··· 301 307 on the memory consumption of the kdump system. In general this is not 302 308 dependent on the memory size of the production system. 303 309 304 - On arm, use "crashkernel=Y@X". Note that the start address of the kernel 305 - will be aligned to 128MiB (0x08000000), so if the start address is not then 306 - any space below the alignment point may be overwritten by the dump-capture kernel, 307 - which means it is possible that the vmcore is not that precise as expected. 310 + On arm, the use of "crashkernel=Y@X" is no longer necessary; the 311 + kernel will automatically locate the crash kernel image within the 312 + first 512MB of RAM if X is not given. 308 313 309 314 310 315 Load the Dump-capture Kernel
-5
Documentation/kernel-parameters.txt
··· 2959 2959 for broken drivers that don't call it. 2960 2960 skip_isa_align [X86] do not align io start addr, so can 2961 2961 handle more pci cards 2962 - firmware [ARM] Do not re-enumerate the bus but instead 2963 - just use the configuration from the 2964 - bootloader. This is currently used on 2965 - IXP2000 systems where the bus has to be 2966 - configured a certain way for adjunct CPUs. 2967 2962 noearly [X86] Don't do any early type 1 scanning. 2968 2963 This might help on some broken boards which 2969 2964 machine check when some devices' config space
+1 -1
arch/arm/boot/Makefile
··· 88 88 $(call if_changed,objcopy) 89 89 @$(kecho) ' Kernel: $@ is ready' 90 90 91 - PHONY += initrd 91 + PHONY += initrd install zinstall uinstall 92 92 initrd: 93 93 @test "$(INITRD_PHYS)" != "" || \ 94 94 (echo This machine does not support INITRD; exit -1)
-2
arch/arm/include/asm/dma-mapping.h
··· 162 162 163 163 static inline void dma_mark_clean(void *addr, size_t size) { } 164 164 165 - extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); 166 - 167 165 /** 168 166 * arm_dma_alloc - allocate consistent memory for DMA 169 167 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+12
arch/arm/include/asm/io.h
··· 392 392 #define ioremap ioremap 393 393 #define ioremap_nocache ioremap 394 394 395 + /* 396 + * Do not use ioremap_cache for mapping memory. Use memremap instead. 397 + */ 395 398 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size); 396 399 #define ioremap_cache ioremap_cache 400 + 401 + /* 402 + * Do not use ioremap_cached in new code. Provided for the benefit of 403 + * the pxa2xx-flash MTD driver only. 404 + */ 405 + void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size); 397 406 398 407 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size); 399 408 #define ioremap_wc ioremap_wc ··· 410 401 411 402 void iounmap(volatile void __iomem *iomem_cookie); 412 403 #define iounmap iounmap 404 + 405 + void *arch_memremap_wb(phys_addr_t phys_addr, size_t size); 406 + #define arch_memremap_wb arch_memremap_wb 413 407 414 408 /* 415 409 * io{read,write}{16,32}be() macros
+31 -7
arch/arm/include/asm/memory.h
··· 288 288 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 289 289 #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) 290 290 291 - extern unsigned long (*arch_virt_to_idmap)(unsigned long x); 291 + extern long long arch_phys_to_idmap_offset; 292 292 293 293 /* 294 - * These are for systems that have a hardware interconnect supported alias of 295 - * physical memory for idmap purposes. Most cases should leave these 294 + * These are for systems that have a hardware interconnect supported alias 295 + * of physical memory for idmap purposes. Most cases should leave these 296 296 * untouched. Note: this can only return addresses less than 4GiB. 297 297 */ 298 + static inline bool arm_has_idmap_alias(void) 299 + { 300 + return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0; 301 + } 302 + 303 + #define IDMAP_INVALID_ADDR ((u32)~0) 304 + 305 + static inline unsigned long phys_to_idmap(phys_addr_t addr) 306 + { 307 + if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) { 308 + addr += arch_phys_to_idmap_offset; 309 + if (addr > (u32)~0) 310 + addr = IDMAP_INVALID_ADDR; 311 + } 312 + return addr; 313 + } 314 + 315 + static inline phys_addr_t idmap_to_phys(unsigned long idmap) 316 + { 317 + phys_addr_t addr = idmap; 318 + 319 + if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) 320 + addr -= arch_phys_to_idmap_offset; 321 + 322 + return addr; 323 + } 324 + 298 325 static inline unsigned long __virt_to_idmap(unsigned long x) 299 326 { 300 - if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap) 301 - return arch_virt_to_idmap(x); 302 - else 303 - return __virt_to_phys(x); 327 + return phys_to_idmap(__virt_to_phys(x)); 304 328 } 305 329 306 330 #define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
-3
arch/arm/kernel/bios32.c
··· 550 550 if (!strcmp(str, "debug")) { 551 551 debug_pci = 1; 552 552 return NULL; 553 - } else if (!strcmp(str, "firmware")) { 554 - pci_add_flags(PCI_PROBE_ONLY); 555 - return NULL; 556 553 } 557 554 return str; 558 555 }
-3
arch/arm/kernel/reboot.c
··· 104 104 { 105 105 local_irq_disable(); 106 106 smp_send_stop(); 107 - 108 - local_irq_disable(); 109 107 while (1); 110 108 } 111 109 ··· 148 150 149 151 /* Whoops - the platform was unable to reboot. Tell the user! */ 150 152 printk("Reboot failed -- System halted\n"); 151 - local_irq_disable(); 152 153 while (1); 153 154 }
+26
arch/arm/kernel/setup.c
··· 941 941 late_initcall(init_machine_late); 942 942 943 943 #ifdef CONFIG_KEXEC 944 + /* 945 + * The crash region must be aligned to 128MB to avoid 946 + * zImage relocating below the reserved region. 947 + */ 948 + #define CRASH_ALIGN (128 << 20) 949 + 944 950 static inline unsigned long long get_total_mem(void) 945 951 { 946 952 unsigned long total; ··· 973 967 &crash_size, &crash_base); 974 968 if (ret) 975 969 return; 970 + 971 + if (crash_base <= 0) { 972 + unsigned long long crash_max = idmap_to_phys((u32)~0); 973 + crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max, 974 + crash_size, CRASH_ALIGN); 975 + if (!crash_base) { 976 + pr_err("crashkernel reservation failed - No suitable area found.\n"); 977 + return; 978 + } 979 + } else { 980 + unsigned long long start; 981 + 982 + start = memblock_find_in_range(crash_base, 983 + crash_base + crash_size, 984 + crash_size, SECTION_SIZE); 985 + if (start != crash_base) { 986 + pr_err("crashkernel reservation failed - memory is in use.\n"); 987 + return; 988 + } 989 + } 976 990 977 991 ret = memblock_reserve(crash_base, crash_size); 978 992 if (ret < 0) {
+1 -6
arch/arm/mach-keystone/keystone.c
··· 63 63 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 64 64 } 65 65 66 - static unsigned long keystone_virt_to_idmap(unsigned long x) 67 - { 68 - return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START; 69 - } 70 - 71 66 static long long __init keystone_pv_fixup(void) 72 67 { 73 68 long long offset; ··· 86 91 offset = KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START; 87 92 88 93 /* Populate the arch idmap hook */ 89 - arch_virt_to_idmap = keystone_virt_to_idmap; 94 + arch_phys_to_idmap_offset = -offset; 90 95 91 96 return offset; 92 97 }
+21 -5
arch/arm/mm/cache-l2x0.c
··· 647 647 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); 648 648 } 649 649 650 - /* r3p0 or later has power control register */ 651 - if (rev >= L310_CACHE_ID_RTL_R3P0) 652 - l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN | 653 - L310_STNDBY_MODE_EN; 654 - 655 650 /* 656 651 * Always enable non-secure access to the lockdown registers - 657 652 * we write to them as part of the L2C enable sequence so they ··· 1136 1141 u32 filter[2] = { 0, 0 }; 1137 1142 u32 assoc; 1138 1143 u32 prefetch; 1144 + u32 power; 1139 1145 u32 val; 1140 1146 int ret; 1141 1147 ··· 1267 1271 } 1268 1272 1269 1273 l2x0_saved_regs.prefetch_ctrl = prefetch; 1274 + 1275 + power = l2x0_saved_regs.pwr_ctrl | 1276 + L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN; 1277 + 1278 + ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val); 1279 + if (!ret) { 1280 + if (!val) 1281 + power &= ~L310_DYNAMIC_CLK_GATING_EN; 1282 + } else if (ret != -EINVAL) { 1283 + pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n"); 1284 + } 1285 + ret = of_property_read_u32(np, "arm,standby-mode", &val); 1286 + if (!ret) { 1287 + if (!val) 1288 + power &= ~L310_STNDBY_MODE_EN; 1289 + } else if (ret != -EINVAL) { 1290 + pr_err("L2C-310 OF standby-mode property value is missing or invalid\n"); 1291 + } 1292 + 1293 + l2x0_saved_regs.pwr_ctrl = power; 1270 1294 } 1271 1295 1272 1296 static const struct l2c_init_data of_l2c310_data __initconst = {
+24 -2
arch/arm/mm/cache-uniphier.c
··· 96 96 void __iomem *ctrl_base; 97 97 void __iomem *rev_base; 98 98 void __iomem *op_base; 99 + void __iomem *way_ctrl_base; 99 100 u32 way_present_mask; 100 101 u32 way_locked_mask; 101 102 u32 nsets; ··· 257 256 struct uniphier_cache_data *data, 258 257 u32 way_mask) 259 258 { 259 + unsigned int cpu; 260 + 260 261 data->way_locked_mask = way_mask & data->way_present_mask; 261 262 262 - writel_relaxed(~data->way_locked_mask & data->way_present_mask, 263 - data->ctrl_base + UNIPHIER_SSCLPDAWCR); 263 + for_each_possible_cpu(cpu) 264 + writel_relaxed(~data->way_locked_mask & data->way_present_mask, 265 + data->way_ctrl_base + 4 * cpu); 264 266 } 265 267 266 268 static void uniphier_cache_maint_range(unsigned long start, unsigned long end, ··· 463 459 goto err; 464 460 } 465 461 462 + data->way_ctrl_base = data->ctrl_base + 0xc00; 463 + 466 464 if (*cache_level == 2) { 467 465 u32 revision = readl(data->rev_base + UNIPHIER_SSCID); 468 466 /* ··· 473 467 */ 474 468 if (revision <= 0x16) 475 469 data->range_op_max_size = (u32)1 << 22; 470 + 471 + /* 472 + * Unfortunatly, the offset address of active way control base 473 + * varies from SoC to SoC. 474 + */ 475 + switch (revision) { 476 + case 0x11: /* sLD3 */ 477 + data->way_ctrl_base = data->ctrl_base + 0x870; 478 + break; 479 + case 0x12: /* LD4 */ 480 + case 0x16: /* sld8 */ 481 + data->way_ctrl_base = data->ctrl_base + 0x840; 482 + break; 483 + default: 484 + break; 485 + } 476 486 } 477 487 478 488 data->range_op_max_size -= data->line_size;
-16
arch/arm/mm/dma-mapping.c
··· 190 190 .sync_single_for_device = arm_dma_sync_single_for_device, 191 191 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 192 192 .sync_sg_for_device = arm_dma_sync_sg_for_device, 193 - .set_dma_mask = arm_dma_set_mask, 194 193 }; 195 194 EXPORT_SYMBOL(arm_dma_ops); 196 195 ··· 208 209 .get_sgtable = arm_dma_get_sgtable, 209 210 .map_page = arm_coherent_dma_map_page, 210 211 .map_sg = arm_dma_map_sg, 211 - .set_dma_mask = arm_dma_set_mask, 212 212 }; 213 213 EXPORT_SYMBOL(arm_coherent_dma_ops); 214 214 ··· 1141 1143 } 1142 1144 EXPORT_SYMBOL(dma_supported); 1143 1145 1144 - int arm_dma_set_mask(struct device *dev, u64 dma_mask) 1145 - { 1146 - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1147 - return -EIO; 1148 - 1149 - *dev->dma_mask = dma_mask; 1150 - 1151 - return 0; 1152 - } 1153 - 1154 1146 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 1155 1147 1156 1148 static int __init dma_debug_do_init(void) ··· 1994 2006 .unmap_sg = arm_iommu_unmap_sg, 1995 2007 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 1996 2008 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1997 - 1998 - .set_dma_mask = arm_dma_set_mask, 1999 2009 }; 2000 2010 2001 2011 struct dma_map_ops iommu_coherent_ops = { ··· 2007 2021 2008 2022 .map_sg = arm_coherent_iommu_map_sg, 2009 2023 .unmap_sg = arm_coherent_iommu_unmap_sg, 2010 - 2011 - .set_dma_mask = arm_dma_set_mask, 2012 2024 }; 2013 2025 2014 2026 /**
+1 -1
arch/arm/mm/idmap.c
··· 15 15 * page tables. 16 16 */ 17 17 pgd_t *idmap_pgd; 18 - unsigned long (*arch_virt_to_idmap)(unsigned long x); 18 + long long arch_phys_to_idmap_offset; 19 19 20 20 #ifdef CONFIG_ARM_LPAE 21 21 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+14 -2
arch/arm/mm/ioremap.c
··· 297 297 } 298 298 299 299 /* 300 - * Don't allow RAM to be mapped - this causes problems with ARMv6+ 300 + * Don't allow RAM to be mapped with mismatched attributes - this 301 + * causes problems with ARMv6+ 301 302 */ 302 - if (WARN_ON(pfn_valid(pfn))) 303 + if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW)) 303 304 return NULL; 304 305 305 306 area = get_vm_area_caller(size, VM_IOREMAP, caller); ··· 381 380 EXPORT_SYMBOL(ioremap); 382 381 383 382 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) 383 + __alias(ioremap_cached); 384 + 385 + void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) 384 386 { 385 387 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, 386 388 __builtin_return_address(0)); 387 389 } 388 390 EXPORT_SYMBOL(ioremap_cache); 391 + EXPORT_SYMBOL(ioremap_cached); 389 392 390 393 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) 391 394 { ··· 417 412 418 413 return __arm_ioremap_caller(phys_addr, size, mtype, 419 414 __builtin_return_address(0)); 415 + } 416 + 417 + void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) 418 + { 419 + return (__force void *)arch_ioremap_caller(phys_addr, size, 420 + MT_MEMORY_RW, 421 + __builtin_return_address(0)); 420 422 } 421 423 422 424 void __iounmap(volatile void __iomem *io_addr)
+9
arch/arm/mm/nommu.c
··· 368 368 EXPORT_SYMBOL(ioremap); 369 369 370 370 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size) 371 + __alias(ioremap_cached); 372 + 373 + void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size) 371 374 { 372 375 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED, 373 376 __builtin_return_address(0)); 374 377 } 375 378 EXPORT_SYMBOL(ioremap_cache); 379 + EXPORT_SYMBOL(ioremap_cached); 376 380 377 381 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size) 378 382 { ··· 384 380 __builtin_return_address(0)); 385 381 } 386 382 EXPORT_SYMBOL(ioremap_wc); 383 + 384 + void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) 385 + { 386 + return (void *)phys_addr; 387 + } 387 388 388 389 void __iounmap(volatile void __iomem *addr) 389 390 {
+7 -4
arch/arm/tools/Makefile
··· 4 4 # Copyright (C) 2001 Russell King 5 5 # 6 6 7 - include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types 8 - @$(kecho) ' Generating $@' 9 - @mkdir -p $(dir $@) 10 - $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; } 7 + quiet_cmd_gen_mach = GEN $@ 8 + cmd_gen_mach = mkdir -p $(dir $@) && \ 9 + $(AWK) -f $(filter-out $(PHONY),$^) > $@ || \ 10 + { rm -f $@; /bin/false; } 11 + 12 + include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE 13 + $(call if_changed,gen_mach)
+90 -10
drivers/amba/bus.c
··· 336 336 kfree(d); 337 337 } 338 338 339 - /** 340 - * amba_device_add - add a previously allocated AMBA device structure 341 - * @dev: AMBA device allocated by amba_device_alloc 342 - * @parent: resource parent for this devices resources 343 - * 344 - * Claim the resource, and read the device cell ID if not already 345 - * initialized. Register the AMBA device with the Linux device 346 - * manager. 347 - */ 348 - int amba_device_add(struct amba_device *dev, struct resource *parent) 339 + static int amba_device_try_add(struct amba_device *dev, struct resource *parent) 349 340 { 350 341 u32 size; 351 342 void __iomem *tmp; ··· 361 370 tmp = ioremap(dev->res.start, size); 362 371 if (!tmp) { 363 372 ret = -ENOMEM; 373 + goto err_release; 374 + } 375 + 376 + ret = dev_pm_domain_attach(&dev->dev, true); 377 + if (ret == -EPROBE_DEFER) { 378 + iounmap(tmp); 364 379 goto err_release; 365 380 } 366 381 ··· 395 398 } 396 399 397 400 iounmap(tmp); 401 + dev_pm_domain_detach(&dev->dev, true); 398 402 399 403 if (ret) 400 404 goto err_release; ··· 417 419 err_release: 418 420 release_resource(&dev->res); 419 421 err_out: 422 + return ret; 423 + } 424 + 425 + /* 426 + * Registration of AMBA device require reading its pid and cid registers. 427 + * To do this, the device must be turned on (if it is a part of power domain) 428 + * and have clocks enabled. However in some cases those resources might not be 429 + * yet available. Returning EPROBE_DEFER is not a solution in such case, 430 + * because callers don't handle this special error code. Instead such devices 431 + * are added to the special list and their registration is retried from 432 + * periodic worker, until all resources are available and registration succeeds. 433 + */ 434 + struct deferred_device { 435 + struct amba_device *dev; 436 + struct resource *parent; 437 + struct list_head node; 438 + }; 439 + 440 + static LIST_HEAD(deferred_devices); 441 + static DEFINE_MUTEX(deferred_devices_lock); 442 + 443 + static void amba_deferred_retry_func(struct work_struct *dummy); 444 + static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func); 445 + 446 + #define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000)) 447 + 448 + static void amba_deferred_retry_func(struct work_struct *dummy) 449 + { 450 + struct deferred_device *ddev, *tmp; 451 + 452 + mutex_lock(&deferred_devices_lock); 453 + 454 + list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) { 455 + int ret = amba_device_try_add(ddev->dev, ddev->parent); 456 + 457 + if (ret == -EPROBE_DEFER) 458 + continue; 459 + 460 + list_del_init(&ddev->node); 461 + kfree(ddev); 462 + } 463 + 464 + if (!list_empty(&deferred_devices)) 465 + schedule_delayed_work(&deferred_retry_work, 466 + DEFERRED_DEVICE_TIMEOUT); 467 + 468 + mutex_unlock(&deferred_devices_lock); 469 + } 470 + 471 + /** 472 + * amba_device_add - add a previously allocated AMBA device structure 473 + * @dev: AMBA device allocated by amba_device_alloc 474 + * @parent: resource parent for this devices resources 475 + * 476 + * Claim the resource, and read the device cell ID if not already 477 + * initialized. Register the AMBA device with the Linux device 478 + * manager. 479 + */ 480 + int amba_device_add(struct amba_device *dev, struct resource *parent) 481 + { 482 + int ret = amba_device_try_add(dev, parent); 483 + 484 + if (ret == -EPROBE_DEFER) { 485 + struct deferred_device *ddev; 486 + 487 + ddev = kmalloc(sizeof(*ddev), GFP_KERNEL); 488 + if (!ddev) 489 + return -ENOMEM; 490 + 491 + ddev->dev = dev; 492 + ddev->parent = parent; 493 + ret = 0; 494 + 495 + mutex_lock(&deferred_devices_lock); 496 + 497 + if (list_empty(&deferred_devices)) 498 + schedule_delayed_work(&deferred_retry_work, 499 + DEFERRED_DEVICE_TIMEOUT); 500 + list_add_tail(&ddev->node, &deferred_devices); 501 + 502 + mutex_unlock(&deferred_devices_lock); 503 + } 420 504 return ret; 421 505 } 422 506 EXPORT_SYMBOL_GPL(amba_device_add);
+3 -3
drivers/mtd/maps/pxa2xx-flash.c
··· 71 71 info->map.name); 72 72 return -ENOMEM; 73 73 } 74 - info->map.cached = memremap(info->map.phys, info->map.size, 75 - MEMREMAP_WB); 74 + info->map.cached = 75 + ioremap_cached(info->map.phys, info->map.size); 76 76 if (!info->map.cached) 77 77 printk(KERN_WARNING "Failed to ioremap cached %s\n", 78 78 info->map.name); ··· 111 111 map_destroy(info->mtd); 112 112 iounmap(info->map.virt); 113 113 if (info->map.cached) 114 - memunmap(info->map.cached); 114 + iounmap(info->map.cached); 115 115 kfree(info); 116 116 return 0; 117 117 }
+9 -2
kernel/memremap.c
··· 27 27 } 28 28 #endif 29 29 30 + #ifndef arch_memremap_wb 31 + static void *arch_memremap_wb(resource_size_t offset, unsigned long size) 32 + { 33 + return (__force void *)ioremap_cache(offset, size); 34 + } 35 + #endif 36 + 30 37 static void *try_ram_remap(resource_size_t offset, size_t size) 31 38 { 32 39 unsigned long pfn = PHYS_PFN(offset); ··· 41 34 /* In the simple case just return the existing linear address */ 42 35 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn))) 43 36 return __va(offset); 44 - return NULL; /* fallback to ioremap_cache */ 37 + return NULL; /* fallback to arch_memremap_wb */ 45 38 } 46 39 47 40 /** ··· 97 90 if (is_ram == REGION_INTERSECTS) 98 91 addr = try_ram_remap(offset, size); 99 92 if (!addr) 100 - addr = ioremap_cache(offset, size); 93 + addr = arch_memremap_wb(offset, size); 101 94 } 102 95 103 96 /*
+2 -8
scripts/kallsyms.c
··· 63 63 static int all_symbols = 0; 64 64 static int absolute_percpu = 0; 65 65 static char symbol_prefix_char = '\0'; 66 - static unsigned long long kernel_start_addr = 0; 67 66 static int base_relative = 0; 68 67 69 68 int token_profit[0x10000]; ··· 222 223 223 224 static char *special_suffixes[] = { 224 225 "_veneer", /* arm */ 226 + "_from_arm", /* arm */ 227 + "_from_thumb", /* arm */ 225 228 NULL }; 226 229 227 230 int i; 228 231 char *sym_name = (char *)s->sym + 1; 229 - 230 - 231 - if (s->addr < kernel_start_addr) 232 - return 0; 233 232 234 233 /* skip prefix char */ 235 234 if (symbol_prefix_char && *sym_name == symbol_prefix_char) ··· 762 765 if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\'')) 763 766 p++; 764 767 symbol_prefix_char = *p; 765 - } else if (strncmp(argv[i], "--page-offset=", 14) == 0) { 766 - const char *p = &argv[i][14]; 767 - kernel_start_addr = strtoull(p, NULL, 16); 768 768 } else if (strcmp(argv[i], "--base-relative") == 0) 769 769 base_relative = 1; 770 770 else