Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

- StrongARM SA1111 updates to modernise and remove cruft

- Add StrongARM gpio drivers for board GPIOs

- Verify size of zImage is what we expect to avoid issues with
appended DTB

- nommu updates from Vladimir Murzin

- page table read-write-execute checking from Jinbum Park

- Broadcom Brahma-B15 cache updates from Florian Fainelli

- Avoid failure with kprobes test caused by inappropriately
placed kprobes

- Remove __memzero optimisation (which was incorrectly being
used directly by some drivers)

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (32 commits)
ARM: 8745/1: get rid of __memzero()
ARM: 8744/1: don't discard memblock for kexec
ARM: 8743/1: bL_switcher: add MODULE_LICENSE tag
ARM: 8742/1: Always use REFCOUNT_FULL
ARM: 8741/1: B15: fix unused label warnings
ARM: 8740/1: NOMMU: Make sure we do not hold stale data in mem[] array
ARM: 8739/1: NOMMU: Setup VBAR/Hivecs for secondaries cores
ARM: 8738/1: Disable CONFIG_DEBUG_VIRTUAL for NOMMU
ARM: 8737/1: mm: dump: add checking for writable and executable
ARM: 8736/1: mm: dump: make the page table dumping seq_file
ARM: 8735/1: mm: dump: make page table dumping reusable
ARM: sa1100/neponset: add GPIO drivers for control and modem registers
ARM: sa1100/assabet: add BCR/BSR GPIO driver
ARM: 8734/1: mm: idmap: Mark variables as ro_after_init
ARM: 8733/1: hw_breakpoint: Mark variables as __ro_after_init
ARM: 8732/1: NOMMU: Allow userspace to access background MPU region
ARM: 8727/1: MAINTAINERS: Update brcmstb entries to cover B15 code
ARM: 8728/1: B15: Register reboot notifier for KEXEC
ARM: 8730/1: B15: Add suspend/resume hooks
ARM: 8726/1: B15: Add CPU hotplug awareness
...

+1031 -623
+2
MAINTAINERS
··· 2855 2855 F: arch/arm/mach-bcm/*brcmstb* 2856 2856 F: arch/arm/boot/dts/bcm7*.dts* 2857 2857 F: drivers/bus/brcmstb_gisb.c 2858 + F: arch/arm/mm/cache-b15-rac.c 2859 + F: arch/arm/include/asm/hardware/cache-b15-rac.h 2858 2860 N: brcmstb 2859 2861 2860 2862 BROADCOM BMIPS CPUFREQ DRIVER
+4 -8
arch/arm/Kconfig
··· 3 3 bool 4 4 default y 5 5 select ARCH_CLOCKSOURCE_DATA 6 - select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID 7 - select ARCH_HAS_DEBUG_VIRTUAL 6 + select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC 7 + select ARCH_HAS_DEBUG_VIRTUAL if MMU 8 8 select ARCH_HAS_DEVMEM_IS_ALLOWED 9 9 select ARCH_HAS_ELF_RANDOMIZE 10 10 select ARCH_HAS_SET_MEMORY ··· 100 100 select OLD_SIGACTION 101 101 select OLD_SIGSUSPEND3 102 102 select PERF_USE_VMALLOC 103 + select REFCOUNT_FULL 103 104 select RTC_LIB 104 105 select SYS_SUPPORTS_APM_EMULATION 105 106 # Above selects are sorted alphabetically; please add new ones ··· 1527 1526 bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY 1528 1527 depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K 1529 1528 default y if CPU_THUMBONLY 1530 - select ARM_ASM_UNIFIED 1531 1529 select ARM_UNWIND 1532 1530 help 1533 1531 By enabling this option, the kernel will be compiled in 1534 - Thumb-2 mode. A compiler/assembler that understand the unified 1535 - ARM-Thumb syntax is needed. 1532 + Thumb-2 mode. 1536 1533 1537 1534 If unsure, say N. 1538 1535 ··· 1564 1565 Only Thumb-2 kernels are affected. 1565 1566 1566 1567 Unless you are sure your tools don't have this problem, say Y. 1567 - 1568 - config ARM_ASM_UNIFIED 1569 - bool 1570 1568 1571 1569 config ARM_PATCH_IDIV 1572 1570 bool "Runtime patch udiv/sdiv instructions into __aeabi_{u}idiv()"
+32 -1
arch/arm/Kconfig.debug
··· 3 3 4 4 source "lib/Kconfig.debug" 5 5 6 - config ARM_PTDUMP 6 + config ARM_PTDUMP_CORE 7 + def_bool n 8 + 9 + config ARM_PTDUMP_DEBUGFS 7 10 bool "Export kernel pagetable layout to userspace via debugfs" 8 11 depends on DEBUG_KERNEL 9 12 depends on MMU 13 + select ARM_PTDUMP_CORE 10 14 select DEBUG_FS 11 15 ---help--- 12 16 Say Y here if you want to show the kernel pagetable layout in a ··· 19 15 It is probably not a good idea to enable this feature in a production 20 16 kernel. 21 17 If in doubt, say "N" 18 + 19 + config DEBUG_WX 20 + bool "Warn on W+X mappings at boot" 21 + select ARM_PTDUMP_CORE 22 + ---help--- 23 + Generate a warning if any W+X mappings are found at boot. 24 + 25 + This is useful for discovering cases where the kernel is leaving 26 + W+X mappings after applying NX, as such mappings are a security risk. 27 + 28 + Look for a message in dmesg output like this: 29 + 30 + arm/mm: Checked W+X mappings: passed, no W+X pages found. 31 + 32 + or like this, if the check failed: 33 + 34 + arm/mm: Checked W+X mappings: FAILED, <N> W+X pages found. 35 + 36 + Note that even if the check fails, your kernel is possibly 37 + still fine, as W+X mappings are not a security hole in 38 + themselves, what they do is that they make the exploitation 39 + of other unfixed kernel bugs easier. 40 + 41 + There is no runtime or memory usage effect of this option 42 + once the kernel has booted up - it's a one time check. 43 + 44 + If in doubt, say "Y". 22 45 23 46 # RMK wants arm kernels compiled with frame pointers or stack unwinding. 24 47 # If you know what you are doing and are willing to live without stack
+4 -2
arch/arm/Makefile
··· 115 115 CFLAGS_ABI +=-funwind-tables 116 116 endif 117 117 118 + # Accept old syntax despite ".syntax unified" 119 + AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) 120 + 118 121 ifeq ($(CONFIG_THUMB2_KERNEL),y) 119 122 AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it) 120 - AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) 121 123 CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) 122 124 AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb 123 125 # Work around buggy relocation from gas if requested: ··· 127 125 KBUILD_CFLAGS_MODULE +=-fno-optimize-sibling-calls 128 126 endif 129 127 else 130 - CFLAGS_ISA :=$(call cc-option,-marm,) 128 + CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN) 131 129 AFLAGS_ISA :=$(CFLAGS_ISA) 132 130 endif 133 131
-5
arch/arm/boot/compressed/string.c
··· 130 130 *xs++ = c; 131 131 return s; 132 132 } 133 - 134 - void __memzero(void *s, size_t count) 135 - { 136 - memset(s, 0, count); 137 - }
+8
arch/arm/boot/compressed/vmlinux.lds.S
··· 56 56 .rodata : { 57 57 *(.rodata) 58 58 *(.rodata.*) 59 + *(.data.rel.ro) 59 60 } 60 61 .piggydata : { 61 62 *(.piggydata) ··· 102 101 * this symbol allows further debug in the near future. 103 102 */ 104 103 .image_end (NOLOAD) : { 104 + /* 105 + * EFI requires that the image is aligned to 512 bytes, and appended 106 + * DTB requires that we know where the end of the image is. Ensure 107 + * that both are satisfied by ensuring that there are no additional 108 + * sections emitted into the decompressor image. 109 + */ 105 110 _edata_real = .; 106 111 } 107 112 ··· 135 128 .stab.indexstr 0 : { *(.stab.indexstr) } 136 129 .comment 0 : { *(.comment) } 137 130 } 131 + ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");
+4
arch/arm/common/bL_switcher_dummy_if.c
··· 57 57 &bL_switcher_fops 58 58 }; 59 59 module_misc_device(bL_switcher_device); 60 + 61 + MODULE_AUTHOR("Nicolas Pitre <nico@linaro.org>"); 62 + MODULE_LICENSE("GPL v2"); 63 + MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface");
+163 -163
arch/arm/common/sa1111.c
··· 108 108 spinlock_t lock; 109 109 void __iomem *base; 110 110 struct sa1111_platform_data *pdata; 111 + struct irq_domain *irqdomain; 111 112 struct gpio_chip gc; 112 113 #ifdef CONFIG_PM 113 114 void *saved_state; ··· 126 125 unsigned long skpcr_mask; 127 126 bool dma; 128 127 unsigned int devid; 129 - unsigned int irq[6]; 128 + unsigned int hwirq[6]; 130 129 }; 131 130 132 131 static struct sa1111_dev_info sa1111_devices[] = { ··· 135 134 .skpcr_mask = SKPCR_UCLKEN, 136 135 .dma = true, 137 136 .devid = SA1111_DEVID_USB, 138 - .irq = { 137 + .hwirq = { 139 138 IRQ_USBPWR, 140 139 IRQ_HCIM, 141 140 IRQ_HCIBUFFACC, ··· 149 148 .skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN, 150 149 .dma = true, 151 150 .devid = SA1111_DEVID_SAC, 152 - .irq = { 151 + .hwirq = { 153 152 AUDXMTDMADONEA, 154 153 AUDXMTDMADONEB, 155 154 AUDRCVDMADONEA, ··· 165 164 .offset = SA1111_KBD, 166 165 .skpcr_mask = SKPCR_PTCLKEN, 167 166 .devid = SA1111_DEVID_PS2_KBD, 168 - .irq = { 167 + .hwirq = { 169 168 IRQ_TPRXINT, 170 169 IRQ_TPTXINT 171 170 }, ··· 174 173 .offset = SA1111_MSE, 175 174 .skpcr_mask = SKPCR_PMCLKEN, 176 175 .devid = SA1111_DEVID_PS2_MSE, 177 - .irq = { 176 + .hwirq = { 178 177 IRQ_MSRXINT, 179 178 IRQ_MSTXINT 180 179 }, ··· 183 182 .offset = 0x1800, 184 183 .skpcr_mask = 0, 185 184 .devid = SA1111_DEVID_PCMCIA, 186 - .irq = { 185 + .hwirq = { 187 186 IRQ_S0_READY_NINT, 188 187 IRQ_S0_CD_VALID, 189 188 IRQ_S0_BVD1_STSCHG, ··· 194 193 }, 195 194 }; 196 195 196 + static int sa1111_map_irq(struct sa1111 *sachip, irq_hw_number_t hwirq) 197 + { 198 + return irq_create_mapping(sachip->irqdomain, hwirq); 199 + } 200 + 201 + static void sa1111_handle_irqdomain(struct irq_domain *irqdomain, int irq) 202 + { 203 + struct irq_desc *d = irq_to_desc(irq_linear_revmap(irqdomain, irq)); 204 + 205 + if (d) 206 + generic_handle_irq_desc(d); 207 + } 208 + 197 209 /* 198 210 * SA1111 interrupt support. Since clearing an IRQ while there are 199 211 * active IRQs causes the interrupt output to pulse, the upper levels ··· 216 202 { 217 203 unsigned int stat0, stat1, i; 218 204 struct sa1111 *sachip = irq_desc_get_handler_data(desc); 205 + struct irq_domain *irqdomain; 219 206 void __iomem *mapbase = sachip->base + SA1111_INTC; 220 207 221 - stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0); 222 - stat1 = sa1111_readl(mapbase + SA1111_INTSTATCLR1); 208 + stat0 = readl_relaxed(mapbase + SA1111_INTSTATCLR0); 209 + stat1 = readl_relaxed(mapbase + SA1111_INTSTATCLR1); 223 210 224 - sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0); 211 + writel_relaxed(stat0, mapbase + SA1111_INTSTATCLR0); 225 212 226 213 desc->irq_data.chip->irq_ack(&desc->irq_data); 227 214 228 - sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); 215 + writel_relaxed(stat1, mapbase + SA1111_INTSTATCLR1); 229 216 230 217 if (stat0 == 0 && stat1 == 0) { 231 218 do_bad_IRQ(desc); 232 219 return; 233 220 } 234 221 222 + irqdomain = sachip->irqdomain; 223 + 235 224 for (i = 0; stat0; i++, stat0 >>= 1) 236 225 if (stat0 & 1) 237 - generic_handle_irq(i + sachip->irq_base); 226 + sa1111_handle_irqdomain(irqdomain, i); 238 227 239 228 for (i = 32; stat1; i++, stat1 >>= 1) 240 229 if (stat1 & 1) 241 - generic_handle_irq(i + sachip->irq_base); 230 + sa1111_handle_irqdomain(irqdomain, i); 242 231 243 232 /* For level-based interrupts */ 244 233 desc->irq_data.chip->irq_unmask(&desc->irq_data); 245 234 } 246 235 247 - #define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base)) 248 - #define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32)) 249 - 250 236 static u32 sa1111_irqmask(struct irq_data *d) 251 237 { 252 - struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 253 - 254 - return BIT((d->irq - sachip->irq_base) & 31); 238 + return BIT(irqd_to_hwirq(d) & 31); 255 239 } 256 240 257 241 static int sa1111_irqbank(struct irq_data *d) 258 242 { 259 - struct sa1111 *sachip = irq_data_get_irq_chip_data(d); 260 - 261 - return ((d->irq - sachip->irq_base) / 32) * 4; 243 + return (irqd_to_hwirq(d) / 32) * 4; 262 244 } 263 245 264 246 static void sa1111_ack_irq(struct irq_data *d) ··· 267 257 void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d); 268 258 u32 ie; 269 259 270 - ie = sa1111_readl(mapbase + SA1111_INTEN0); 260 + ie = readl_relaxed(mapbase + SA1111_INTEN0); 271 261 ie &= ~sa1111_irqmask(d); 272 - sa1111_writel(ie, mapbase + SA1111_INTEN0); 262 + writel(ie, mapbase + SA1111_INTEN0); 273 263 } 274 264 275 265 static void sa1111_unmask_irq(struct irq_data *d) ··· 278 268 void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d); 279 269 u32 ie; 280 270 281 - ie = sa1111_readl(mapbase + SA1111_INTEN0); 271 + ie = readl_relaxed(mapbase + SA1111_INTEN0); 282 272 ie |= sa1111_irqmask(d); 283 - sa1111_writel(ie, mapbase + SA1111_INTEN0); 273 + writel_relaxed(ie, mapbase + SA1111_INTEN0); 284 274 } 285 275 286 276 /* ··· 297 287 u32 ip, mask = sa1111_irqmask(d); 298 288 int i; 299 289 300 - ip = sa1111_readl(mapbase + SA1111_INTPOL0); 290 + ip = readl_relaxed(mapbase + SA1111_INTPOL0); 301 291 for (i = 0; i < 8; i++) { 302 - sa1111_writel(ip ^ mask, mapbase + SA1111_INTPOL0); 303 - sa1111_writel(ip, mapbase + SA1111_INTPOL0); 304 - if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask) 292 + writel_relaxed(ip ^ mask, mapbase + SA1111_INTPOL0); 293 + writel_relaxed(ip, mapbase + SA1111_INTPOL0); 294 + if (readl_relaxed(mapbase + SA1111_INTSTATCLR0) & mask) 305 295 break; 306 296 } 307 297 ··· 323 313 if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0) 324 314 return -EINVAL; 325 315 326 - ip = sa1111_readl(mapbase + SA1111_INTPOL0); 316 + ip = readl_relaxed(mapbase + SA1111_INTPOL0); 327 317 if (flags & IRQ_TYPE_EDGE_RISING) 328 318 ip &= ~mask; 329 319 else 330 320 ip |= mask; 331 - sa1111_writel(ip, mapbase + SA1111_INTPOL0); 332 - sa1111_writel(ip, mapbase + SA1111_WAKEPOL0); 321 + writel_relaxed(ip, mapbase + SA1111_INTPOL0); 322 + writel_relaxed(ip, mapbase + SA1111_WAKEPOL0); 333 323 334 324 return 0; 335 325 } ··· 340 330 void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d); 341 331 u32 we, mask = sa1111_irqmask(d); 342 332 343 - we = sa1111_readl(mapbase + SA1111_WAKEEN0); 333 + we = readl_relaxed(mapbase + SA1111_WAKEEN0); 344 334 if (on) 345 335 we |= mask; 346 336 else 347 337 we &= ~mask; 348 - sa1111_writel(we, mapbase + SA1111_WAKEEN0); 338 + writel_relaxed(we, mapbase + SA1111_WAKEEN0); 349 339 350 340 return 0; 351 341 } ··· 360 350 .irq_set_wake = sa1111_wake_irq, 361 351 }; 362 352 353 + static int sa1111_irqdomain_map(struct irq_domain *d, unsigned int irq, 354 + irq_hw_number_t hwirq) 355 + { 356 + struct sa1111 *sachip = d->host_data; 357 + 358 + /* Disallow unavailable interrupts */ 359 + if (hwirq > SSPROR && hwirq < AUDXMTDMADONEA) 360 + return -EINVAL; 361 + 362 + irq_set_chip_data(irq, sachip); 363 + irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq); 364 + irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); 365 + 366 + return 0; 367 + } 368 + 369 + static const struct irq_domain_ops sa1111_irqdomain_ops = { 370 + .map = sa1111_irqdomain_map, 371 + .xlate = irq_domain_xlate_twocell, 372 + }; 373 + 363 374 static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base) 364 375 { 365 376 void __iomem *irqbase = sachip->base + SA1111_INTC; 366 - unsigned i, irq; 367 377 int ret; 368 378 369 379 /* ··· 403 373 sachip->irq_base = ret; 404 374 405 375 /* disable all IRQs */ 406 - sa1111_writel(0, irqbase + SA1111_INTEN0); 407 - sa1111_writel(0, irqbase + SA1111_INTEN1); 408 - sa1111_writel(0, irqbase + SA1111_WAKEEN0); 409 - sa1111_writel(0, irqbase + SA1111_WAKEEN1); 376 + writel_relaxed(0, irqbase + SA1111_INTEN0); 377 + writel_relaxed(0, irqbase + SA1111_INTEN1); 378 + writel_relaxed(0, irqbase + SA1111_WAKEEN0); 379 + writel_relaxed(0, irqbase + SA1111_WAKEEN1); 410 380 411 381 /* 412 382 * detect on rising edge. Note: Feb 2001 Errata for SA1111 413 383 * specifies that S0ReadyInt and S1ReadyInt should be '1'. 414 384 */ 415 - sa1111_writel(0, irqbase + SA1111_INTPOL0); 416 - sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) | 417 - BIT(IRQ_S1_READY_NINT & 31), 418 - irqbase + SA1111_INTPOL1); 385 + writel_relaxed(0, irqbase + SA1111_INTPOL0); 386 + writel_relaxed(BIT(IRQ_S0_READY_NINT & 31) | 387 + BIT(IRQ_S1_READY_NINT & 31), 388 + irqbase + SA1111_INTPOL1); 419 389 420 390 /* clear all IRQs */ 421 - sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0); 422 - sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1); 391 + writel_relaxed(~0, irqbase + SA1111_INTSTATCLR0); 392 + writel_relaxed(~0, irqbase + SA1111_INTSTATCLR1); 423 393 424 - for (i = IRQ_GPAIN0; i <= SSPROR; i++) { 425 - irq = sachip->irq_base + i; 426 - irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq); 427 - irq_set_chip_data(irq, sachip); 428 - irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); 394 + sachip->irqdomain = irq_domain_add_linear(NULL, SA1111_IRQ_NR, 395 + &sa1111_irqdomain_ops, 396 + sachip); 397 + if (!sachip->irqdomain) { 398 + irq_free_descs(sachip->irq_base, SA1111_IRQ_NR); 399 + return -ENOMEM; 429 400 } 430 401 431 - for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) { 432 - irq = sachip->irq_base + i; 433 - irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq); 434 - irq_set_chip_data(irq, sachip); 435 - irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE); 436 - } 402 + irq_domain_associate_many(sachip->irqdomain, 403 + sachip->irq_base + IRQ_GPAIN0, 404 + IRQ_GPAIN0, SSPROR + 1 - IRQ_GPAIN0); 405 + irq_domain_associate_many(sachip->irqdomain, 406 + sachip->irq_base + AUDXMTDMADONEA, 407 + AUDXMTDMADONEA, 408 + IRQ_S1_BVD1_STSCHG + 1 - AUDXMTDMADONEA); 437 409 438 410 /* 439 411 * Register SA1111 interrupt ··· 452 420 453 421 static void sa1111_remove_irq(struct sa1111 *sachip) 454 422 { 423 + struct irq_domain *domain = sachip->irqdomain; 455 424 void __iomem *irqbase = sachip->base + SA1111_INTC; 425 + int i; 456 426 457 427 /* disable all IRQs */ 458 - sa1111_writel(0, irqbase + SA1111_INTEN0); 459 - sa1111_writel(0, irqbase + SA1111_INTEN1); 460 - sa1111_writel(0, irqbase + SA1111_WAKEEN0); 461 - sa1111_writel(0, irqbase + SA1111_WAKEEN1); 428 + writel_relaxed(0, irqbase + SA1111_INTEN0); 429 + writel_relaxed(0, irqbase + SA1111_INTEN1); 430 + writel_relaxed(0, irqbase + SA1111_WAKEEN0); 431 + writel_relaxed(0, irqbase + SA1111_WAKEEN1); 462 432 463 - if (sachip->irq != NO_IRQ) { 464 - irq_set_chained_handler_and_data(sachip->irq, NULL, NULL); 465 - irq_free_descs(sachip->irq_base, SA1111_IRQ_NR); 433 + irq_set_chained_handler_and_data(sachip->irq, NULL, NULL); 434 + for (i = 0; i < SA1111_IRQ_NR; i++) 435 + irq_dispose_mapping(irq_find_mapping(domain, i)); 436 + irq_domain_remove(domain); 466 437 467 - release_mem_region(sachip->phys + SA1111_INTC, 512); 468 - } 438 + release_mem_region(sachip->phys + SA1111_INTC, 512); 469 439 } 470 440 471 441 enum { ··· 606 572 { 607 573 struct sa1111 *sachip = gc_to_sa1111(gc); 608 574 609 - return sachip->irq_base + offset; 575 + return sa1111_map_irq(sachip, offset); 610 576 } 611 577 612 578 static int sa1111_setup_gpios(struct sa1111 *sachip) ··· 652 618 /* 653 619 * Turn VCO on, and disable PLL Bypass. 654 620 */ 655 - r = sa1111_readl(sachip->base + SA1111_SKCR); 621 + r = readl_relaxed(sachip->base + SA1111_SKCR); 656 622 r &= ~SKCR_VCO_OFF; 657 - sa1111_writel(r, sachip->base + SA1111_SKCR); 623 + writel_relaxed(r, sachip->base + SA1111_SKCR); 658 624 r |= SKCR_PLL_BYPASS | SKCR_OE_EN; 659 - sa1111_writel(r, sachip->base + SA1111_SKCR); 625 + writel_relaxed(r, sachip->base + SA1111_SKCR); 660 626 661 627 /* 662 628 * Wait lock time. SA1111 manual _doesn't_ ··· 668 634 * Enable RCLK. We also ensure that RDYEN is set. 669 635 */ 670 636 r |= SKCR_RCLKEN | SKCR_RDYEN; 671 - sa1111_writel(r, sachip->base + SA1111_SKCR); 637 + writel_relaxed(r, sachip->base + SA1111_SKCR); 672 638 673 639 /* 674 640 * Wait 14 RCLK cycles for the chip to finish coming out ··· 679 645 /* 680 646 * Ensure all clocks are initially off. 681 647 */ 682 - sa1111_writel(0, sachip->base + SA1111_SKPCR); 648 + writel_relaxed(0, sachip->base + SA1111_SKPCR); 683 649 684 650 spin_unlock_irqrestore(&sachip->lock, flags); 685 651 } ··· 709 675 if (cas_latency == 3) 710 676 smcr |= SMCR_CLAT; 711 677 712 - sa1111_writel(smcr, sachip->base + SA1111_SMCR); 678 + writel_relaxed(smcr, sachip->base + SA1111_SMCR); 713 679 714 680 /* 715 681 * Now clear the bits in the DMA mask to work around the SA1111 ··· 757 723 dev->mapbase = sachip->base + info->offset; 758 724 dev->skpcr_mask = info->skpcr_mask; 759 725 760 - for (i = 0; i < ARRAY_SIZE(info->irq); i++) 761 - dev->irq[i] = sachip->irq_base + info->irq[i]; 726 + for (i = 0; i < ARRAY_SIZE(info->hwirq); i++) 727 + dev->hwirq[i] = info->hwirq[i]; 762 728 763 729 /* 764 730 * If the parent device has a DMA mask associated with it, and ··· 848 814 /* 849 815 * Probe for the chip. Only touch the SBI registers. 850 816 */ 851 - id = sa1111_readl(sachip->base + SA1111_SKID); 817 + id = readl_relaxed(sachip->base + SA1111_SKID); 852 818 if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { 853 819 printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id); 854 820 ret = -ENODEV; ··· 867 833 * The interrupt controller must be initialised before any 868 834 * other device to ensure that the interrupts are available. 869 835 */ 870 - if (sachip->irq != NO_IRQ) { 871 - ret = sa1111_setup_irq(sachip, pd->irq_base); 872 - if (ret) 873 - goto err_clk; 874 - } 836 + ret = sa1111_setup_irq(sachip, pd->irq_base); 837 + if (ret) 838 + goto err_clk; 875 839 876 840 /* Setup the GPIOs - should really be done after the IRQ setup */ 877 841 ret = sa1111_setup_gpios(sachip); ··· 896 864 * DMA. It can otherwise be held firmly in the off position. 897 865 * (currently, we always enable it.) 898 866 */ 899 - val = sa1111_readl(sachip->base + SA1111_SKPCR); 900 - sa1111_writel(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR); 867 + val = readl_relaxed(sachip->base + SA1111_SKPCR); 868 + writel_relaxed(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR); 901 869 902 870 /* 903 871 * Enable the SA1110 memory bus request and grant signals. ··· 994 962 * Save state. 995 963 */ 996 964 base = sachip->base; 997 - save->skcr = sa1111_readl(base + SA1111_SKCR); 998 - save->skpcr = sa1111_readl(base + SA1111_SKPCR); 999 - save->skcdr = sa1111_readl(base + SA1111_SKCDR); 1000 - save->skaud = sa1111_readl(base + SA1111_SKAUD); 1001 - save->skpwm0 = sa1111_readl(base + SA1111_SKPWM0); 1002 - save->skpwm1 = sa1111_readl(base + SA1111_SKPWM1); 965 + save->skcr = readl_relaxed(base + SA1111_SKCR); 966 + save->skpcr = readl_relaxed(base + SA1111_SKPCR); 967 + save->skcdr = readl_relaxed(base + SA1111_SKCDR); 968 + save->skaud = readl_relaxed(base + SA1111_SKAUD); 969 + save->skpwm0 = readl_relaxed(base + SA1111_SKPWM0); 970 + save->skpwm1 = readl_relaxed(base + SA1111_SKPWM1); 1003 971 1004 - sa1111_writel(0, sachip->base + SA1111_SKPWM0); 1005 - sa1111_writel(0, sachip->base + SA1111_SKPWM1); 972 + writel_relaxed(0, sachip->base + SA1111_SKPWM0); 973 + writel_relaxed(0, sachip->base + SA1111_SKPWM1); 1006 974 1007 975 base = sachip->base + SA1111_INTC; 1008 - save->intpol0 = sa1111_readl(base + SA1111_INTPOL0); 1009 - save->intpol1 = sa1111_readl(base + SA1111_INTPOL1); 1010 - save->inten0 = sa1111_readl(base + SA1111_INTEN0); 1011 - save->inten1 = sa1111_readl(base + SA1111_INTEN1); 1012 - save->wakepol0 = sa1111_readl(base + SA1111_WAKEPOL0); 1013 - save->wakepol1 = sa1111_readl(base + SA1111_WAKEPOL1); 1014 - save->wakeen0 = sa1111_readl(base + SA1111_WAKEEN0); 1015 - save->wakeen1 = sa1111_readl(base + SA1111_WAKEEN1); 976 + save->intpol0 = readl_relaxed(base + SA1111_INTPOL0); 977 + save->intpol1 = readl_relaxed(base + SA1111_INTPOL1); 978 + save->inten0 = readl_relaxed(base + SA1111_INTEN0); 979 + save->inten1 = readl_relaxed(base + SA1111_INTEN1); 980 + save->wakepol0 = readl_relaxed(base + SA1111_WAKEPOL0); 981 + save->wakepol1 = readl_relaxed(base + SA1111_WAKEPOL1); 982 + save->wakeen0 = readl_relaxed(base + SA1111_WAKEEN0); 983 + save->wakeen1 = readl_relaxed(base + SA1111_WAKEEN1); 1016 984 1017 985 /* 1018 986 * Disable. 1019 987 */ 1020 - val = sa1111_readl(sachip->base + SA1111_SKCR); 1021 - sa1111_writel(val | SKCR_SLEEP, sachip->base + SA1111_SKCR); 988 + val = readl_relaxed(sachip->base + SA1111_SKCR); 989 + writel_relaxed(val | SKCR_SLEEP, sachip->base + SA1111_SKCR); 1022 990 1023 991 clk_disable(sachip->clk); 1024 992 ··· 1055 1023 * Ensure that the SA1111 is still here. 1056 1024 * FIXME: shouldn't do this here. 1057 1025 */ 1058 - id = sa1111_readl(sachip->base + SA1111_SKID); 1026 + id = readl_relaxed(sachip->base + SA1111_SKID); 1059 1027 if ((id & SKID_ID_MASK) != SKID_SA1111_ID) { 1060 1028 __sa1111_remove(sachip); 1061 1029 dev_set_drvdata(dev, NULL); ··· 1079 1047 */ 1080 1048 spin_lock_irqsave(&sachip->lock, flags); 1081 1049 1082 - sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0); 1083 - sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1); 1050 + writel_relaxed(0, sachip->base + SA1111_INTC + SA1111_INTEN0); 1051 + writel_relaxed(0, sachip->base + SA1111_INTC + SA1111_INTEN1); 1084 1052 1085 1053 base = sachip->base; 1086 - sa1111_writel(save->skcr, base + SA1111_SKCR); 1087 - sa1111_writel(save->skpcr, base + SA1111_SKPCR); 1088 - sa1111_writel(save->skcdr, base + SA1111_SKCDR); 1089 - sa1111_writel(save->skaud, base + SA1111_SKAUD); 1090 - sa1111_writel(save->skpwm0, base + SA1111_SKPWM0); 1091 - sa1111_writel(save->skpwm1, base + SA1111_SKPWM1); 1054 + writel_relaxed(save->skcr, base + SA1111_SKCR); 1055 + writel_relaxed(save->skpcr, base + SA1111_SKPCR); 1056 + writel_relaxed(save->skcdr, base + SA1111_SKCDR); 1057 + writel_relaxed(save->skaud, base + SA1111_SKAUD); 1058 + writel_relaxed(save->skpwm0, base + SA1111_SKPWM0); 1059 + writel_relaxed(save->skpwm1, base + SA1111_SKPWM1); 1092 1060 1093 1061 base = sachip->base + SA1111_INTC; 1094 - sa1111_writel(save->intpol0, base + SA1111_INTPOL0); 1095 - sa1111_writel(save->intpol1, base + SA1111_INTPOL1); 1096 - sa1111_writel(save->inten0, base + SA1111_INTEN0); 1097 - sa1111_writel(save->inten1, base + SA1111_INTEN1); 1098 - sa1111_writel(save->wakepol0, base + SA1111_WAKEPOL0); 1099 - sa1111_writel(save->wakepol1, base + SA1111_WAKEPOL1); 1100 - sa1111_writel(save->wakeen0, base + SA1111_WAKEEN0); 1101 - sa1111_writel(save->wakeen1, base + SA1111_WAKEEN1); 1062 + writel_relaxed(save->intpol0, base + SA1111_INTPOL0); 1063 + writel_relaxed(save->intpol1, base + SA1111_INTPOL1); 1064 + writel_relaxed(save->inten0, base + SA1111_INTEN0); 1065 + writel_relaxed(save->inten1, base + SA1111_INTEN1); 1066 + writel_relaxed(save->wakepol0, base + SA1111_WAKEPOL0); 1067 + writel_relaxed(save->wakepol1, base + SA1111_WAKEPOL1); 1068 + writel_relaxed(save->wakeen0, base + SA1111_WAKEEN0); 1069 + writel_relaxed(save->wakeen1, base + SA1111_WAKEEN1); 1102 1070 1103 1071 spin_unlock_irqrestore(&sachip->lock, flags); 1104 1072 ··· 1185 1153 { 1186 1154 unsigned int skcdr, fbdiv, ipdiv, opdiv; 1187 1155 1188 - skcdr = sa1111_readl(sachip->base + SA1111_SKCDR); 1156 + skcdr = readl_relaxed(sachip->base + SA1111_SKCDR); 1189 1157 1190 1158 fbdiv = (skcdr & 0x007f) + 2; 1191 1159 ipdiv = ((skcdr & 0x0f80) >> 7) + 2; ··· 1227 1195 1228 1196 spin_lock_irqsave(&sachip->lock, flags); 1229 1197 1230 - val = sa1111_readl(sachip->base + SA1111_SKCR); 1198 + val = readl_relaxed(sachip->base + SA1111_SKCR); 1231 1199 if (mode == SA1111_AUDIO_I2S) { 1232 1200 val &= ~SKCR_SELAC; 1233 1201 } else { 1234 1202 val |= SKCR_SELAC; 1235 1203 } 1236 - sa1111_writel(val, sachip->base + SA1111_SKCR); 1204 + writel_relaxed(val, sachip->base + SA1111_SKCR); 1237 1205 1238 1206 spin_unlock_irqrestore(&sachip->lock, flags); 1239 1207 } ··· 1258 1226 if (div > 128) 1259 1227 div = 128; 1260 1228 1261 - sa1111_writel(div - 1, sachip->base + SA1111_SKAUD); 1229 + writel_relaxed(div - 1, sachip->base + SA1111_SKAUD); 1262 1230 1263 1231 return 0; 1264 1232 } ··· 1276 1244 if (sadev->devid != SA1111_DEVID_SAC) 1277 1245 return -EINVAL; 1278 1246 1279 - div = sa1111_readl(sachip->base + SA1111_SKAUD) + 1; 1247 + div = readl_relaxed(sachip->base + SA1111_SKAUD) + 1; 1280 1248 1281 1249 return __sa1111_pll_clock(sachip) / (256 * div); 1282 1250 } ··· 1293 1261 1294 1262 #define MODIFY_BITS(port, mask, dir) \ 1295 1263 if (mask) { \ 1296 - val = sa1111_readl(port); \ 1264 + val = readl_relaxed(port); \ 1297 1265 val &= ~(mask); \ 1298 1266 val |= (dir) & (mask); \ 1299 - sa1111_writel(val, port); \ 1267 + writel_relaxed(val, port); \ 1300 1268 } 1301 1269 1302 1270 spin_lock_irqsave(&sachip->lock, flags); ··· 1361 1329 1362 1330 if (ret == 0) { 1363 1331 spin_lock_irqsave(&sachip->lock, flags); 1364 - val = sa1111_readl(sachip->base + SA1111_SKPCR); 1365 - sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR); 1332 + val = readl_relaxed(sachip->base + SA1111_SKPCR); 1333 + writel_relaxed(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR); 1366 1334 spin_unlock_irqrestore(&sachip->lock, flags); 1367 1335 } 1368 1336 return ret; ··· 1380 1348 unsigned int val; 1381 1349 1382 1350 spin_lock_irqsave(&sachip->lock, flags); 1383 - val = sa1111_readl(sachip->base + SA1111_SKPCR); 1384 - sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR); 1351 + val = readl_relaxed(sachip->base + SA1111_SKPCR); 1352 + writel_relaxed(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR); 1385 1353 spin_unlock_irqrestore(&sachip->lock, flags); 1386 1354 1387 1355 if (sachip->pdata && sachip->pdata->disable) ··· 1391 1359 1392 1360 int sa1111_get_irq(struct sa1111_dev *sadev, unsigned num) 1393 1361 { 1394 - if (num >= ARRAY_SIZE(sadev->irq)) 1362 + struct sa1111 *sachip = sa1111_chip_driver(sadev); 1363 + if (num >= ARRAY_SIZE(sadev->hwirq)) 1395 1364 return -EINVAL; 1396 - return sadev->irq[num]; 1365 + return sa1111_map_irq(sachip, sadev->hwirq[num]); 1397 1366 } 1398 1367 EXPORT_SYMBOL_GPL(sa1111_get_irq); 1399 1368 ··· 1410 1377 struct sa1111_driver *drv = SA1111_DRV(_drv); 1411 1378 1412 1379 return !!(dev->devid & drv->devid); 1413 - } 1414 - 1415 - static int sa1111_bus_suspend(struct device *dev, pm_message_t state) 1416 - { 1417 - struct sa1111_dev *sadev = to_sa1111_device(dev); 1418 - struct sa1111_driver *drv = SA1111_DRV(dev->driver); 1419 - int ret = 0; 1420 - 1421 - if (drv && drv->suspend) 1422 - ret = drv->suspend(sadev, state); 1423 - return ret; 1424 - } 1425 - 1426 - static int sa1111_bus_resume(struct device *dev) 1427 - { 1428 - struct sa1111_dev *sadev = to_sa1111_device(dev); 1429 - struct sa1111_driver *drv = SA1111_DRV(dev->driver); 1430 - int ret = 0; 1431 - 1432 - if (drv && drv->resume) 1433 - ret = drv->resume(sadev); 1434 - return ret; 1435 - } 1436 - 1437 - static void sa1111_bus_shutdown(struct device *dev) 1438 - { 1439 - struct sa1111_driver *drv = SA1111_DRV(dev->driver); 1440 - 1441 - if (drv && drv->shutdown) 1442 - drv->shutdown(to_sa1111_device(dev)); 1443 1380 } 1444 1381 1445 1382 static int sa1111_bus_probe(struct device *dev) ··· 1439 1436 .match = sa1111_match, 1440 1437 .probe = sa1111_bus_probe, 1441 1438 .remove = sa1111_bus_remove, 1442 - .suspend = sa1111_bus_suspend, 1443 - .resume = sa1111_bus_resume, 1444 - .shutdown = sa1111_bus_shutdown, 1445 1439 }; 1446 1440 EXPORT_SYMBOL(sa1111_bus_type); 1447 1441
+1 -2
arch/arm/include/asm/exception.h
··· 10 10 11 11 #include <linux/interrupt.h> 12 12 13 - #define __exception __attribute__((section(".exception.text"))) 14 13 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 15 14 #define __exception_irq_entry __irq_entry 16 15 #else 17 - #define __exception_irq_entry __exception 16 + #define __exception_irq_entry 18 17 #endif 19 18 20 19 #endif /* __ASM_ARM_EXCEPTION_H */
+4
arch/arm/include/asm/glue-cache.h
··· 117 117 # endif 118 118 #endif 119 119 120 + #if defined(CONFIG_CACHE_B15_RAC) 121 + # define MULTI_CACHE 1 122 + #endif 123 + 120 124 #if defined(CONFIG_CPU_V7M) 121 125 # define MULTI_CACHE 1 122 126 #endif
+10
arch/arm/include/asm/hardware/cache-b15-rac.h
··· 1 + #ifndef __ASM_ARM_HARDWARE_CACHE_B15_RAC_H 2 + #define __ASM_ARM_HARDWARE_CACHE_B15_RAC_H 3 + 4 + #ifndef __ASSEMBLY__ 5 + 6 + void b15_flush_kern_cache_all(void); 7 + 8 + #endif 9 + 10 + #endif
+1 -31
arch/arm/include/asm/hardware/sa1111.h
··· 16 16 #include <mach/bitfield.h> 17 17 18 18 /* 19 - * The SA1111 is always located at virtual 0xf4000000, and is always 20 - * "native" endian. 21 - */ 22 - 23 - #define SA1111_VBASE 0xf4000000 24 - 25 - /* Don't use these! */ 26 - #define SA1111_p2v( x ) ((x) - SA1111_BASE + SA1111_VBASE) 27 - #define SA1111_v2p( x ) ((x) - SA1111_VBASE + SA1111_BASE) 28 - 29 - #ifndef __ASSEMBLY__ 30 - #define _SA1111(x) ((x) + sa1111->resource.start) 31 - #endif 32 - 33 - #define sa1111_writel(val,addr) __raw_writel(val, addr) 34 - #define sa1111_readl(addr) __raw_readl(addr) 35 - 36 - /* 37 - * 26 bits of the SA-1110 address bus are available to the SA-1111. 38 - * Use these when feeding target addresses to the DMA engines. 39 - */ 40 - 41 - #define SA1111_ADDR_WIDTH (26) 42 - #define SA1111_ADDR_MASK ((1<<SA1111_ADDR_WIDTH)-1) 43 - #define SA1111_DMA_ADDR(x) ((x)&SA1111_ADDR_MASK) 44 - 45 - /* 46 19 * Don't ask the (SAC) DMA engines to move less than this amount. 47 20 */ 48 21 ··· 390 417 struct resource res; 391 418 void __iomem *mapbase; 392 419 unsigned int skpcr_mask; 393 - unsigned int irq[6]; 420 + unsigned int hwirq[6]; 394 421 u64 dma_mask; 395 422 }; 396 423 ··· 404 431 unsigned int devid; 405 432 int (*probe)(struct sa1111_dev *); 406 433 int (*remove)(struct sa1111_dev *); 407 - int (*suspend)(struct sa1111_dev *, pm_message_t); 408 - int (*resume)(struct sa1111_dev *); 409 - void (*shutdown)(struct sa1111_dev *); 410 434 }; 411 435 412 436 #define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
+1
arch/arm/include/asm/memory.h
··· 88 88 #else /* CONFIG_MMU */ 89 89 90 90 #ifndef __ASSEMBLY__ 91 + extern unsigned long setup_vectors_base(void); 91 92 extern unsigned long vectors_base; 92 93 #define VECTORS_BASE vectors_base 93 94 #endif
+43
arch/arm/include/asm/ptdump.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2014 ARM Ltd. */ 3 + #ifndef __ASM_PTDUMP_H 4 + #define __ASM_PTDUMP_H 5 + 6 + #ifdef CONFIG_ARM_PTDUMP_CORE 7 + 8 + #include <linux/mm_types.h> 9 + #include <linux/seq_file.h> 10 + 11 + struct addr_marker { 12 + unsigned long start_address; 13 + char *name; 14 + }; 15 + 16 + struct ptdump_info { 17 + struct mm_struct *mm; 18 + const struct addr_marker *markers; 19 + unsigned long base_addr; 20 + }; 21 + 22 + void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info); 23 + #ifdef CONFIG_ARM_PTDUMP_DEBUGFS 24 + int ptdump_debugfs_register(struct ptdump_info *info, const char *name); 25 + #else 26 + static inline int ptdump_debugfs_register(struct ptdump_info *info, 27 + const char *name) 28 + { 29 + return 0; 30 + } 31 + #endif /* CONFIG_ARM_PTDUMP_DEBUGFS */ 32 + 33 + void ptdump_check_wx(void); 34 + 35 + #endif /* CONFIG_ARM_PTDUMP_CORE */ 36 + 37 + #ifdef CONFIG_DEBUG_WX 38 + #define debug_checkwx() ptdump_check_wx() 39 + #else 40 + #define debug_checkwx() do { } while (0) 41 + #endif 42 + 43 + #endif /* __ASM_PTDUMP_H */
+21
arch/arm/include/asm/sections.h
··· 6 6 7 7 extern char _exiprom[]; 8 8 9 + extern char __idmap_text_start[]; 10 + extern char __idmap_text_end[]; 11 + extern char __entry_text_start[]; 12 + extern char __entry_text_end[]; 13 + extern char __hyp_idmap_text_start[]; 14 + extern char __hyp_idmap_text_end[]; 15 + 16 + static inline bool in_entry_text(unsigned long addr) 17 + { 18 + return memory_contains(__entry_text_start, __entry_text_end, 19 + (void *)addr, 1); 20 + } 21 + 22 + static inline bool in_idmap_text(unsigned long addr) 23 + { 24 + void *a = (void *)addr; 25 + return memory_contains(__idmap_text_start, __idmap_text_end, a, 1) || 26 + memory_contains(__hyp_idmap_text_start, __hyp_idmap_text_end, 27 + a, 1); 28 + } 29 + 9 30 #endif /* _ASM_ARM_SECTIONS_H */
-14
arch/arm/include/asm/string.h
··· 39 39 return __memset64(p, v, n * 8, v >> 32); 40 40 } 41 41 42 - extern void __memzero(void *ptr, __kernel_size_t n); 43 - 44 - #define memset(p,v,n) \ 45 - ({ \ 46 - void *__p = (p); size_t __n = n; \ 47 - if ((__n) != 0) { \ 48 - if (__builtin_constant_p((v)) && (v) == 0) \ 49 - __memzero((__p),(__n)); \ 50 - else \ 51 - memset((__p),(v),(__n)); \ 52 - } \ 53 - (__p); \ 54 - }) 55 - 56 42 #endif
-12
arch/arm/include/asm/traps.h
··· 28 28 ptr < (unsigned long)&__irqentry_text_end; 29 29 } 30 30 31 - static inline int in_exception_text(unsigned long ptr) 32 - { 33 - extern char __exception_text_start[]; 34 - extern char __exception_text_end[]; 35 - int in; 36 - 37 - in = ptr >= (unsigned long)&__exception_text_start && 38 - ptr < (unsigned long)&__exception_text_end; 39 - 40 - return in ? : __in_irqentry_text(ptr); 41 - } 42 - 43 31 extern void __init early_trap_init(void *); 44 32 extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); 45 33 extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
+3 -74
arch/arm/include/asm/unified.h
··· 20 20 #ifndef __ASM_UNIFIED_H 21 21 #define __ASM_UNIFIED_H 22 22 23 - #if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED) 23 + #if defined(__ASSEMBLY__) 24 24 .syntax unified 25 + #else 26 + __asm__(".syntax unified"); 25 27 #endif 26 28 27 29 #ifdef CONFIG_CPU_V7M ··· 65 63 #endif 66 64 67 65 #endif /* CONFIG_THUMB2_KERNEL */ 68 - 69 - #ifndef CONFIG_ARM_ASM_UNIFIED 70 - 71 - /* 72 - * If the unified assembly syntax isn't used (in ARM mode), these 73 - * macros expand to an empty string 74 - */ 75 - #ifdef __ASSEMBLY__ 76 - .macro it, cond 77 - .endm 78 - .macro itt, cond 79 - .endm 80 - .macro ite, cond 81 - .endm 82 - .macro ittt, cond 83 - .endm 84 - .macro itte, cond 85 - .endm 86 - .macro itet, cond 87 - .endm 88 - .macro itee, cond 89 - .endm 90 - .macro itttt, cond 91 - .endm 92 - .macro ittte, cond 93 - .endm 94 - .macro ittet, cond 95 - .endm 96 - .macro ittee, cond 97 - .endm 98 - .macro itett, cond 99 - .endm 100 - .macro itete, cond 101 - .endm 102 - .macro iteet, cond 103 - .endm 104 - .macro iteee, cond 105 - .endm 106 - #else /* !__ASSEMBLY__ */ 107 - __asm__( 108 - " .macro it, cond\n" 109 - " .endm\n" 110 - " .macro itt, cond\n" 111 - " .endm\n" 112 - " .macro ite, cond\n" 113 - " .endm\n" 114 - " .macro ittt, cond\n" 115 - " .endm\n" 116 - " .macro itte, cond\n" 117 - " .endm\n" 118 - " .macro itet, cond\n" 119 - " .endm\n" 120 - " .macro itee, cond\n" 121 - " .endm\n" 122 - " .macro itttt, cond\n" 123 - " .endm\n" 124 - " .macro ittte, cond\n" 125 - " .endm\n" 126 - " .macro ittet, cond\n" 127 - " .endm\n" 128 - " .macro ittee, cond\n" 129 - " .endm\n" 130 - " .macro itett, cond\n" 131 - " .endm\n" 132 - " .macro itete, cond\n" 133 - " .endm\n" 134 - " .macro iteet, cond\n" 135 - " .endm\n" 136 - " .macro iteee, cond\n" 137 - " .endm\n"); 138 - #endif /* __ASSEMBLY__ */ 139 - 140 - #endif /* CONFIG_ARM_ASM_UNIFIED */ 141 66 142 67 #endif /* !__ASM_UNIFIED_H */
-1
arch/arm/kernel/armksyms.c
··· 92 92 EXPORT_SYMBOL(memcpy); 93 93 EXPORT_SYMBOL(memmove); 94 94 EXPORT_SYMBOL(memchr); 95 - EXPORT_SYMBOL(__memzero); 96 95 97 96 EXPORT_SYMBOL(mmioset); 98 97 EXPORT_SYMBOL(mmiocpy);
+1 -5
arch/arm/kernel/entry-armv.S
··· 82 82 #endif 83 83 .endm 84 84 85 - #ifdef CONFIG_KPROBES 86 - .section .kprobes.text,"ax",%progbits 87 - #else 88 - .text 89 - #endif 85 + .section .entry.text,"ax",%progbits 90 86 91 87 /* 92 88 * Invalid mode handlers
+1
arch/arm/kernel/entry-common.S
··· 37 37 #define TRACE(x...) 38 38 #endif 39 39 40 + .section .entry.text,"ax",%progbits 40 41 .align 5 41 42 #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) 42 43 /*
+3 -2
arch/arm/kernel/head-common.S
··· 105 105 ARM( ldmia r4!, {r0, r1, sp} ) 106 106 THUMB( ldmia r4!, {r0, r1, r3} ) 107 107 THUMB( mov sp, r3 ) 108 - sub r1, r1, r0 109 - bl __memzero @ clear .bss 108 + sub r2, r1, r0 109 + mov r1, #0 110 + bl memset @ clear .bss 110 111 111 112 ldmia r4, {r0, r1, r2, r3} 112 113 str r9, [r0] @ Save processor ID
+5 -5
arch/arm/kernel/hw_breakpoint.c
··· 44 44 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); 45 45 46 46 /* Number of BRP/WRP registers on this CPU. */ 47 - static int core_num_brps; 48 - static int core_num_wrps; 47 + static int core_num_brps __ro_after_init; 48 + static int core_num_wrps __ro_after_init; 49 49 50 50 /* Debug architecture version. */ 51 - static u8 debug_arch; 51 + static u8 debug_arch __ro_after_init; 52 52 53 53 /* Does debug architecture support OS Save and Restore? */ 54 - static bool has_ossr; 54 + static bool has_ossr __ro_after_init; 55 55 56 56 /* Maximum supported watchpoint length. */ 57 - static u8 max_watchpoint_len; 57 + static u8 max_watchpoint_len __ro_after_init; 58 58 59 59 #define READ_WB_REG_CASE(OP2, M, VAL) \ 60 60 case ((OP2 << 4) + M): \
+3
arch/arm/kernel/smp.c
··· 379 379 380 380 cpu_init(); 381 381 382 + #ifndef CONFIG_MMU 383 + setup_vectors_base(); 384 + #endif 382 385 pr_debug("CPU%u: Booted secondary processor\n", cpu); 383 386 384 387 preempt_disable();
+2 -12
arch/arm/kernel/stacktrace.c
··· 3 3 #include <linux/sched/debug.h> 4 4 #include <linux/stacktrace.h> 5 5 6 + #include <asm/sections.h> 6 7 #include <asm/stacktrace.h> 7 8 #include <asm/traps.h> 8 9 ··· 64 63 #ifdef CONFIG_STACKTRACE 65 64 struct stack_trace_data { 66 65 struct stack_trace *trace; 67 - unsigned long last_pc; 68 66 unsigned int no_sched_functions; 69 67 unsigned int skip; 70 68 }; ··· 87 87 if (trace->nr_entries >= trace->max_entries) 88 88 return 1; 89 89 90 - /* 91 - * in_exception_text() is designed to test if the PC is one of 92 - * the functions which has an exception stack above it, but 93 - * unfortunately what is in frame->pc is the return LR value, 94 - * not the saved PC value. So, we need to track the previous 95 - * frame PC value when doing this. 96 - */ 97 - addr = data->last_pc; 98 - data->last_pc = frame->pc; 99 - if (!in_exception_text(addr)) 90 + if (!in_entry_text(frame->pc)) 100 91 return 0; 101 92 102 93 regs = (struct pt_regs *)frame->sp; ··· 105 114 struct stackframe frame; 106 115 107 116 data.trace = trace; 108 - data.last_pc = ULONG_MAX; 109 117 data.skip = trace->skip; 110 118 data.no_sched_functions = nosched; 111 119
+2 -2
arch/arm/kernel/traps.c
··· 72 72 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); 73 73 #endif 74 74 75 - if (in_exception_text(where)) 75 + if (in_entry_text(from)) 76 76 dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); 77 77 } 78 78 ··· 433 433 return fn ? fn(regs, instr) : 1; 434 434 } 435 435 436 - asmlinkage void __exception do_undefinstr(struct pt_regs *regs) 436 + asmlinkage void do_undefinstr(struct pt_regs *regs) 437 437 { 438 438 unsigned int instr; 439 439 siginfo_t info;
+3 -3
arch/arm/kernel/vmlinux-xip.lds.S
··· 96 96 .text : { /* Real text segment */ 97 97 _stext = .; /* Text and read-only data */ 98 98 IDMAP_TEXT 99 - __exception_text_start = .; 100 - *(.exception.text) 101 - __exception_text_end = .; 99 + __entry_text_start = .; 100 + *(.entry.text) 101 + __entry_text_end = .; 102 102 IRQENTRY_TEXT 103 103 TEXT_TEXT 104 104 SCHED_TEXT
+3 -3
arch/arm/kernel/vmlinux.lds.S
··· 105 105 .text : { /* Real text segment */ 106 106 _stext = .; /* Text and read-only data */ 107 107 IDMAP_TEXT 108 - __exception_text_start = .; 109 - *(.exception.text) 110 - __exception_text_end = .; 108 + __entry_text_start = .; 109 + *(.entry.text) 110 + __entry_text_end = .; 111 111 IRQENTRY_TEXT 112 112 SOFTIRQENTRY_TEXT 113 113 TEXT_TEXT
+1 -1
arch/arm/lib/Makefile
··· 8 8 lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ 9 9 csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ 10 10 delay.o delay-loop.o findbit.o memchr.o memcpy.o \ 11 - memmove.o memset.o memzero.o setbit.o \ 11 + memmove.o memset.o setbit.o \ 12 12 strchr.o strrchr.o \ 13 13 testchangebit.o testclearbit.o testsetbit.o \ 14 14 ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
-137
arch/arm/lib/memzero.S
··· 1 - /* 2 - * linux/arch/arm/lib/memzero.S 3 - * 4 - * Copyright (C) 1995-2000 Russell King 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License version 2 as 8 - * published by the Free Software Foundation. 9 - */ 10 - #include <linux/linkage.h> 11 - #include <asm/assembler.h> 12 - #include <asm/unwind.h> 13 - 14 - .text 15 - .align 5 16 - .word 0 17 - /* 18 - * Align the pointer in r0. r3 contains the number of bytes that we are 19 - * mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we 20 - * don't bother; we use byte stores instead. 21 - */ 22 - UNWIND( .fnstart ) 23 - 1: subs r1, r1, #4 @ 1 do we have enough 24 - blt 5f @ 1 bytes to align with? 25 - cmp r3, #2 @ 1 26 - strltb r2, [r0], #1 @ 1 27 - strleb r2, [r0], #1 @ 1 28 - strb r2, [r0], #1 @ 1 29 - add r1, r1, r3 @ 1 (r1 = r1 - (4 - r3)) 30 - /* 31 - * The pointer is now aligned and the length is adjusted. Try doing the 32 - * memzero again. 33 - */ 34 - 35 - ENTRY(__memzero) 36 - mov r2, #0 @ 1 37 - ands r3, r0, #3 @ 1 unaligned? 38 - bne 1b @ 1 39 - /* 40 - * r3 = 0, and we know that the pointer in r0 is aligned to a word boundary. 41 - */ 42 - cmp r1, #16 @ 1 we can skip this chunk if we 43 - blt 4f @ 1 have < 16 bytes 44 - 45 - #if ! CALGN(1)+0 46 - 47 - /* 48 - * We need an extra register for this loop - save the return address and 49 - * use the LR 50 - */ 51 - str lr, [sp, #-4]! @ 1 52 - UNWIND( .fnend ) 53 - UNWIND( .fnstart ) 54 - UNWIND( .save {lr} ) 55 - mov ip, r2 @ 1 56 - mov lr, r2 @ 1 57 - 58 - 3: subs r1, r1, #64 @ 1 write 32 bytes out per loop 59 - stmgeia r0!, {r2, r3, ip, lr} @ 4 60 - stmgeia r0!, {r2, r3, ip, lr} @ 4 61 - stmgeia r0!, {r2, r3, ip, lr} @ 4 62 - stmgeia r0!, {r2, r3, ip, lr} @ 4 63 - bgt 3b @ 1 64 - ldmeqfd sp!, {pc} @ 1/2 quick exit 65 - /* 66 - * No need to correct the count; we're only testing bits from now on 67 - */ 68 - tst r1, #32 @ 1 69 - stmneia r0!, {r2, r3, ip, lr} @ 4 70 - stmneia r0!, {r2, r3, ip, lr} @ 4 71 - tst r1, #16 @ 1 16 bytes or more? 72 - stmneia r0!, {r2, r3, ip, lr} @ 4 73 - ldr lr, [sp], #4 @ 1 74 - UNWIND( .fnend ) 75 - 76 - #else 77 - 78 - /* 79 - * This version aligns the destination pointer in order to write 80 - * whole cache lines at once. 81 - */ 82 - 83 - stmfd sp!, {r4-r7, lr} 84 - UNWIND( .fnend ) 85 - UNWIND( .fnstart ) 86 - UNWIND( .save {r4-r7, lr} ) 87 - mov r4, r2 88 - mov r5, r2 89 - mov r6, r2 90 - mov r7, r2 91 - mov ip, r2 92 - mov lr, r2 93 - 94 - cmp r1, #96 95 - andgts ip, r0, #31 96 - ble 3f 97 - 98 - rsb ip, ip, #32 99 - sub r1, r1, ip 100 - movs ip, ip, lsl #(32 - 4) 101 - stmcsia r0!, {r4, r5, r6, r7} 102 - stmmiia r0!, {r4, r5} 103 - movs ip, ip, lsl #2 104 - strcs r2, [r0], #4 105 - 106 - 3: subs r1, r1, #64 107 - stmgeia r0!, {r2-r7, ip, lr} 108 - stmgeia r0!, {r2-r7, ip, lr} 109 - bgt 3b 110 - ldmeqfd sp!, {r4-r7, pc} 111 - 112 - tst r1, #32 113 - stmneia r0!, {r2-r7, ip, lr} 114 - tst r1, #16 115 - stmneia r0!, {r4-r7} 116 - ldmfd sp!, {r4-r7, lr} 117 - UNWIND( .fnend ) 118 - 119 - #endif 120 - 121 - UNWIND( .fnstart ) 122 - 4: tst r1, #8 @ 1 8 bytes or more? 123 - stmneia r0!, {r2, r3} @ 2 124 - tst r1, #4 @ 1 4 bytes or more? 125 - strne r2, [r0], #4 @ 1 126 - /* 127 - * When we get here, we've got less than 4 bytes to zero. We 128 - * may have an unaligned pointer as well. 129 - */ 130 - 5: tst r1, #2 @ 1 2 bytes or more? 131 - strneb r2, [r0], #1 @ 1 132 - strneb r2, [r0], #1 @ 1 133 - tst r1, #1 @ 1 a byte left over 134 - strneb r2, [r0], #1 @ 1 135 - ret lr @ 1 136 - UNWIND( .fnend ) 137 - ENDPROC(__memzero)
+1
arch/arm/mach-sa1100/Kconfig
··· 5 5 config SA1100_ASSABET 6 6 bool "Assabet" 7 7 select ARM_SA1110_CPUFREQ 8 + select GPIO_REG 8 9 help 9 10 Say Y here if you are using the Intel(R) StrongARM(R) SA-1110 10 11 Microprocessor Development Board (also known as the Assabet).
+54 -17
arch/arm/mach-sa1100/assabet.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/module.h> 15 15 #include <linux/errno.h> 16 + #include <linux/gpio/gpio-reg.h> 16 17 #include <linux/ioport.h> 17 18 #include <linux/platform_data/sa11x0-serial.h> 18 19 #include <linux/serial_core.h> ··· 62 61 unsigned long SCR_value = ASSABET_SCR_INIT; 63 62 EXPORT_SYMBOL(SCR_value); 64 63 65 - static unsigned long BCR_value = ASSABET_BCR_DB1110; 64 + static struct gpio_chip *assabet_bcr_gc; 66 65 66 + static const char *assabet_names[] = { 67 + "cf_pwr", "cf_gfx_reset", "nsoft_reset", "irda_fsel", 68 + "irda_md0", "irda_md1", "stereo_loopback", "ncf_bus_on", 69 + "audio_pwr_on", "light_pwr_on", "lcd16data", "lcd_pwr_on", 70 + "rs232_on", "nred_led", "ngreen_led", "vib_on", 71 + "com_dtr", "com_rts", "radio_wake_mod", "i2c_enab", 72 + "tvir_enab", "qmute", "radio_pwr_on", "spkr_off", 73 + "rs232_valid", "com_dcd", "com_cts", "com_dsr", 74 + "radio_cts", "radio_dsr", "radio_dcd", "radio_ri", 75 + }; 76 + 77 + /* The old deprecated interface */ 67 78 void ASSABET_BCR_frob(unsigned int mask, unsigned int val) 68 79 { 69 - unsigned long flags; 80 + unsigned long m = mask, v = val; 70 81 71 - local_irq_save(flags); 72 - BCR_value = (BCR_value & ~mask) | val; 73 - ASSABET_BCR = BCR_value; 74 - local_irq_restore(flags); 82 + assabet_bcr_gc->set_multiple(assabet_bcr_gc, &m, &v); 75 83 } 76 - 77 84 EXPORT_SYMBOL(ASSABET_BCR_frob); 85 + 86 + static int __init assabet_init_gpio(void __iomem *reg, u32 def_val) 87 + { 88 + struct gpio_chip *gc; 89 + 90 + writel_relaxed(def_val, reg); 91 + 92 + gc = gpio_reg_init(NULL, reg, -1, 32, "assabet", 0xff000000, def_val, 93 + assabet_names, NULL, NULL); 94 + 95 + if (IS_ERR(gc)) 96 + return PTR_ERR(gc); 97 + 98 + assabet_bcr_gc = gc; 99 + 100 + return gc->base; 101 + } 78 102 79 103 /* 80 104 * The codec reset goes to three devices, so we need to release ··· 172 146 unsigned gpdr = GPDR; 173 147 unsigned gplr = GPLR; 174 148 175 - ASSABET_BCR = BCR_value | ASSABET_BCR_AUDIO_ON; 149 + ASSABET_BCR_frob(ASSABET_BCR_AUDIO_ON, ASSABET_BCR_AUDIO_ON); 176 150 udelay(100); 177 151 178 152 GPCR = SDA | SCK | MOD; /* clear L3 mode to ensure UDA1341 doesn't respond */ ··· 483 457 sa11x0_ppc_configure_mcp(); 484 458 485 459 if (machine_has_neponset()) { 486 - /* 487 - * Angel sets this, but other bootloaders may not. 488 - * 489 - * This must precede any driver calls to BCR_set() 490 - * or BCR_clear(). 491 - */ 492 - ASSABET_BCR = BCR_value = ASSABET_BCR_DB1111; 493 - 494 460 #ifndef CONFIG_ASSABET_NEPONSET 495 461 printk( "Warning: Neponset detected but full support " 496 462 "hasn't been configured in the kernel\n" ); ··· 766 748 fs_initcall(assabet_leds_init); 767 749 #endif 768 750 751 + void __init assabet_init_irq(void) 752 + { 753 + u32 def_val; 754 + 755 + sa1100_init_irq(); 756 + 757 + if (machine_has_neponset()) 758 + def_val = ASSABET_BCR_DB1111; 759 + else 760 + def_val = ASSABET_BCR_DB1110; 761 + 762 + /* 763 + * Angel sets this, but other bootloaders may not. 764 + * 765 + * This must precede any driver calls to BCR_set() or BCR_clear(). 766 + */ 767 + assabet_init_gpio((void *)&ASSABET_BCR, def_val); 768 + } 769 + 769 770 MACHINE_START(ASSABET, "Intel-Assabet") 770 771 .atag_offset = 0x100, 771 772 .fixup = fixup_assabet, 772 773 .map_io = assabet_map_io, 773 774 .nr_irqs = SA1100_NR_IRQS, 774 - .init_irq = sa1100_init_irq, 775 + .init_irq = assabet_init_irq, 775 776 .init_time = sa1100_timer_init, 776 777 .init_machine = assabet_init, 777 778 .init_late = sa11x0_init_late,
+93 -60
arch/arm/mach-sa1100/neponset.c
··· 3 3 * linux/arch/arm/mach-sa1100/neponset.c 4 4 */ 5 5 #include <linux/err.h> 6 + #include <linux/gpio/driver.h> 7 + #include <linux/gpio/gpio-reg.h> 6 8 #include <linux/init.h> 7 9 #include <linux/ioport.h> 8 10 #include <linux/irq.h> ··· 47 45 #define IRR_USAR (1 << 1) 48 46 #define IRR_SA1111 (1 << 2) 49 47 48 + #define NCR_NGPIO 7 49 + 50 50 #define MDM_CTL0_RTS1 (1 << 0) 51 51 #define MDM_CTL0_DTR1 (1 << 1) 52 52 #define MDM_CTL0_RTS2 (1 << 2) 53 53 #define MDM_CTL0_DTR2 (1 << 3) 54 + #define MDM_CTL0_NGPIO 4 54 55 55 56 #define MDM_CTL1_CTS1 (1 << 0) 56 57 #define MDM_CTL1_DSR1 (1 << 1) ··· 61 56 #define MDM_CTL1_CTS2 (1 << 3) 62 57 #define MDM_CTL1_DSR2 (1 << 4) 63 58 #define MDM_CTL1_DCD2 (1 << 5) 59 + #define MDM_CTL1_NGPIO 6 64 60 65 61 #define AUD_SEL_1341 (1 << 0) 66 62 #define AUD_MUTE_1341 (1 << 1) 63 + #define AUD_NGPIO 2 67 64 68 65 extern void sa1110_mb_disable(void); 66 + 67 + #define to_neponset_gpio_chip(x) container_of(x, struct neponset_gpio_chip, gc) 68 + 69 + static const char *neponset_ncr_names[] = { 70 + "gp01_off", "tp_power", "ms_power", "enet_osc", 71 + "spi_kb_wk_up", "a0vpp", "a1vpp" 72 + }; 73 + 74 + static const char *neponset_mdmctl0_names[] = { 75 + "rts3", "dtr3", "rts1", "dtr1", 76 + }; 77 + 78 + static const char *neponset_mdmctl1_names[] = { 79 + "cts3", "dsr3", "dcd3", "cts1", "dsr1", "dcd1" 80 + }; 81 + 82 + static const char *neponset_aud_names[] = { 83 + "sel_1341", "mute_1341", 84 + }; 69 85 70 86 struct neponset_drvdata { 71 87 void __iomem *base; 72 88 struct platform_device *sa1111; 73 89 struct platform_device *smc91x; 74 90 unsigned irq_base; 75 - #ifdef CONFIG_PM_SLEEP 76 - u32 ncr0; 77 - u32 mdm_ctl_0; 78 - #endif 91 + struct gpio_chip *gpio[4]; 79 92 }; 80 93 81 - static void __iomem *nep_base; 94 + static struct neponset_drvdata *nep; 82 95 83 96 void neponset_ncr_frob(unsigned int mask, unsigned int val) 84 97 { 85 - void __iomem *base = nep_base; 98 + struct neponset_drvdata *n = nep; 99 + unsigned long m = mask, v = val; 86 100 87 - if (base) { 88 - unsigned long flags; 89 - unsigned v; 90 - 91 - local_irq_save(flags); 92 - v = readb_relaxed(base + NCR_0); 93 - writeb_relaxed((v & ~mask) | val, base + NCR_0); 94 - local_irq_restore(flags); 95 - } else { 96 - WARN(1, "nep_base unset\n"); 97 - } 101 + if (nep) 102 + n->gpio[0]->set_multiple(n->gpio[0], &m, &v); 103 + else 104 + WARN(1, "nep unset\n"); 98 105 } 99 106 EXPORT_SYMBOL(neponset_ncr_frob); 100 107 101 108 static void neponset_set_mctrl(struct uart_port *port, u_int mctrl) 102 109 { 103 - void __iomem *base = nep_base; 104 - u_int mdm_ctl0; 110 + struct neponset_drvdata *n = nep; 111 + unsigned long mask, val = 0; 105 112 106 - if (!base) 113 + if (!n) 107 114 return; 108 115 109 - mdm_ctl0 = readb_relaxed(base + MDM_CTL_0); 110 116 if (port->mapbase == _Ser1UTCR0) { 111 - if (mctrl & TIOCM_RTS) 112 - mdm_ctl0 &= ~MDM_CTL0_RTS2; 113 - else 114 - mdm_ctl0 |= MDM_CTL0_RTS2; 117 + mask = MDM_CTL0_RTS2 | MDM_CTL0_DTR2; 115 118 116 - if (mctrl & TIOCM_DTR) 117 - mdm_ctl0 &= ~MDM_CTL0_DTR2; 118 - else 119 - mdm_ctl0 |= MDM_CTL0_DTR2; 119 + if (!(mctrl & TIOCM_RTS)) 120 + val |= MDM_CTL0_RTS2; 121 + 122 + if (!(mctrl & TIOCM_DTR)) 123 + val |= MDM_CTL0_DTR2; 120 124 } else if (port->mapbase == _Ser3UTCR0) { 121 - if (mctrl & TIOCM_RTS) 122 - mdm_ctl0 &= ~MDM_CTL0_RTS1; 123 - else 124 - mdm_ctl0 |= MDM_CTL0_RTS1; 125 + mask = MDM_CTL0_RTS1 | MDM_CTL0_DTR1; 125 126 126 - if (mctrl & TIOCM_DTR) 127 - mdm_ctl0 &= ~MDM_CTL0_DTR1; 128 - else 129 - mdm_ctl0 |= MDM_CTL0_DTR1; 127 + if (!(mctrl & TIOCM_RTS)) 128 + val |= MDM_CTL0_RTS1; 129 + 130 + if (!(mctrl & TIOCM_DTR)) 131 + val |= MDM_CTL0_DTR1; 130 132 } 131 133 132 - writeb_relaxed(mdm_ctl0, base + MDM_CTL_0); 134 + n->gpio[1]->set_multiple(n->gpio[1], &mask, &val); 133 135 } 134 136 135 137 static u_int neponset_get_mctrl(struct uart_port *port) 136 138 { 137 - void __iomem *base = nep_base; 139 + void __iomem *base = nep->base; 138 140 u_int ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; 139 141 u_int mdm_ctl1; 140 142 ··· 243 231 .irq_unmask = nochip_noop, 244 232 }; 245 233 234 + static int neponset_init_gpio(struct gpio_chip **gcp, 235 + struct device *dev, const char *label, void __iomem *reg, 236 + unsigned num, bool in, const char *const * names) 237 + { 238 + struct gpio_chip *gc; 239 + 240 + gc = gpio_reg_init(dev, reg, -1, num, label, in ? 0xffffffff : 0, 241 + readl_relaxed(reg), names, NULL, NULL); 242 + if (IS_ERR(gc)) 243 + return PTR_ERR(gc); 244 + 245 + *gcp = gc; 246 + 247 + return 0; 248 + } 249 + 246 250 static struct sa1111_platform_data sa1111_info = { 247 251 .disable_devs = SA1111_DEVID_PS2_MSE, 248 252 }; ··· 302 274 }; 303 275 int ret, irq; 304 276 305 - if (nep_base) 277 + if (nep) 306 278 return -EBUSY; 307 279 308 280 irq = ret = platform_get_irq(dev, 0); ··· 358 330 irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); 359 331 irq_set_chained_handler_and_data(irq, neponset_irq_handler, d); 360 332 333 + /* Disable GPIO 0/1 drivers so the buttons work on the Assabet */ 334 + writeb_relaxed(NCR_GP01_OFF, d->base + NCR_0); 335 + 336 + neponset_init_gpio(&d->gpio[0], &dev->dev, "neponset-ncr", 337 + d->base + NCR_0, NCR_NGPIO, false, 338 + neponset_ncr_names); 339 + neponset_init_gpio(&d->gpio[1], &dev->dev, "neponset-mdm-ctl0", 340 + d->base + MDM_CTL_0, MDM_CTL0_NGPIO, false, 341 + neponset_mdmctl0_names); 342 + neponset_init_gpio(&d->gpio[2], &dev->dev, "neponset-mdm-ctl1", 343 + d->base + MDM_CTL_1, MDM_CTL1_NGPIO, true, 344 + neponset_mdmctl1_names); 345 + neponset_init_gpio(&d->gpio[3], &dev->dev, "neponset-aud-ctl", 346 + d->base + AUD_CTL, AUD_NGPIO, false, 347 + neponset_aud_names); 348 + 361 349 /* 362 350 * We would set IRQ_GPIO25 to be a wake-up IRQ, but unfortunately 363 351 * something on the Neponset activates this IRQ on sleep (eth?) ··· 384 340 385 341 dev_info(&dev->dev, "Neponset daughter board, providing IRQ%u-%u\n", 386 342 d->irq_base, d->irq_base + NEP_IRQ_NR - 1); 387 - nep_base = d->base; 343 + nep = d; 388 344 389 345 sa1100_register_uart_fns(&neponset_port_fns); 390 346 391 347 /* Ensure that the memory bus request/grant signals are setup */ 392 348 sa1110_mb_disable(); 393 - 394 - /* Disable GPIO 0/1 drivers so the buttons work on the Assabet */ 395 - writeb_relaxed(NCR_GP01_OFF, d->base + NCR_0); 396 349 397 350 sa1111_resources[0].parent = sa1111_res; 398 351 sa1111_resources[1].start = d->irq_base + NEP_IRQ_SA1111; ··· 426 385 platform_device_unregister(d->smc91x); 427 386 irq_set_chained_handler(irq, NULL); 428 387 irq_free_descs(d->irq_base, NEP_IRQ_NR); 429 - nep_base = NULL; 388 + nep = NULL; 430 389 iounmap(d->base); 431 390 kfree(d); 432 391 ··· 434 393 } 435 394 436 395 #ifdef CONFIG_PM_SLEEP 437 - static int neponset_suspend(struct device *dev) 438 - { 439 - struct neponset_drvdata *d = dev_get_drvdata(dev); 440 - 441 - d->ncr0 = readb_relaxed(d->base + NCR_0); 442 - d->mdm_ctl_0 = readb_relaxed(d->base + MDM_CTL_0); 443 - 444 - return 0; 445 - } 446 - 447 396 static int neponset_resume(struct device *dev) 448 397 { 449 398 struct neponset_drvdata *d = dev_get_drvdata(dev); 399 + int i, ret = 0; 450 400 451 - writeb_relaxed(d->ncr0, d->base + NCR_0); 452 - writeb_relaxed(d->mdm_ctl_0, d->base + MDM_CTL_0); 401 + for (i = 0; i < ARRAY_SIZE(d->gpio); i++) { 402 + ret = gpio_reg_resume(d->gpio[i]); 403 + if (ret) 404 + break; 405 + } 453 406 454 - return 0; 407 + return ret; 455 408 } 456 409 457 410 static const struct dev_pm_ops neponset_pm_ops = { 458 - .suspend_noirq = neponset_suspend, 459 411 .resume_noirq = neponset_resume, 460 - .freeze_noirq = neponset_suspend, 461 412 .restore_noirq = neponset_resume, 462 413 }; 463 414 #define PM_OPS &neponset_pm_ops
+8
arch/arm/mm/Kconfig
··· 909 909 The outer cache has a outer_cache_fns.sync function pointer 910 910 that can be used to drain the write buffer of the outer cache. 911 911 912 + config CACHE_B15_RAC 913 + bool "Enable the Broadcom Brahma-B15 read-ahead cache controller" 914 + depends on ARCH_BRCMSTB 915 + default y 916 + help 917 + This option enables the Broadcom Brahma-B15 read-ahead cache 918 + controller. If disabled, the read-ahead cache remains off. 919 + 912 920 config CACHE_FEROCEON_L2 913 921 bool "Enable the Feroceon L2 cache controller" 914 922 depends on ARCH_MV78XX0 || ARCH_MVEBU
+3 -1
arch/arm/mm/Makefile
··· 13 13 obj-$(CONFIG_ARM_MPU) += pmsa-v7.o 14 14 endif 15 15 16 - obj-$(CONFIG_ARM_PTDUMP) += dump.o 16 + obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o 17 + obj-$(CONFIG_ARM_PTDUMP_DEBUGFS) += ptdump_debugfs.o 17 18 obj-$(CONFIG_MODULES) += proc-syms.o 18 19 obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o 19 20 ··· 104 103 AFLAGS_proc-v7.o :=-Wa,-march=armv7-a 105 104 106 105 obj-$(CONFIG_OUTER_CACHE) += l2c-common.o 106 + obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o 107 107 obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o 108 108 obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o 109 109 obj-$(CONFIG_CACHE_L2X0_PMU) += cache-l2x0-pmu.o
+356
arch/arm/mm/cache-b15-rac.c
··· 1 + /* 2 + * Broadcom Brahma-B15 CPU read-ahead cache management functions 3 + * 4 + * Copyright (C) 2015-2016 Broadcom 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <linux/err.h> 12 + #include <linux/spinlock.h> 13 + #include <linux/io.h> 14 + #include <linux/bitops.h> 15 + #include <linux/of_address.h> 16 + #include <linux/notifier.h> 17 + #include <linux/cpu.h> 18 + #include <linux/syscore_ops.h> 19 + #include <linux/reboot.h> 20 + 21 + #include <asm/cacheflush.h> 22 + #include <asm/hardware/cache-b15-rac.h> 23 + 24 + extern void v7_flush_kern_cache_all(void); 25 + 26 + /* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */ 27 + #define RAC_CONFIG0_REG (0x78) 28 + #define RACENPREF_MASK (0x3) 29 + #define RACPREFINST_SHIFT (0) 30 + #define RACENINST_SHIFT (2) 31 + #define RACPREFDATA_SHIFT (4) 32 + #define RACENDATA_SHIFT (6) 33 + #define RAC_CPU_SHIFT (8) 34 + #define RACCFG_MASK (0xff) 35 + #define RAC_CONFIG1_REG (0x7c) 36 + #define RAC_FLUSH_REG (0x80) 37 + #define FLUSH_RAC (1 << 0) 38 + 39 + /* Bitmask to enable instruction and data prefetching with a 256-bytes stride */ 40 + #define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \ 41 + RACENPREF_MASK << RACENINST_SHIFT | \ 42 + 1 << RACPREFDATA_SHIFT | \ 43 + RACENPREF_MASK << RACENDATA_SHIFT) 44 + 45 + #define RAC_ENABLED 0 46 + /* Special state where we want to bypass the spinlock and call directly 47 + * into the v7 cache maintenance operations during suspend/resume 48 + */ 49 + #define RAC_SUSPENDED 1 50 + 51 + static void __iomem *b15_rac_base; 52 + static DEFINE_SPINLOCK(rac_lock); 53 + 54 + static u32 rac_config0_reg; 55 + 56 + /* Initialization flag to avoid checking for b15_rac_base, and to prevent 57 + * multi-platform kernels from crashing here as well. 58 + */ 59 + static unsigned long b15_rac_flags; 60 + 61 + static inline u32 __b15_rac_disable(void) 62 + { 63 + u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG); 64 + __raw_writel(0, b15_rac_base + RAC_CONFIG0_REG); 65 + dmb(); 66 + return val; 67 + } 68 + 69 + static inline void __b15_rac_flush(void) 70 + { 71 + u32 reg; 72 + 73 + __raw_writel(FLUSH_RAC, b15_rac_base + RAC_FLUSH_REG); 74 + do { 75 + /* This dmb() is required to force the Bus Interface Unit 76 + * to clean oustanding writes, and forces an idle cycle 77 + * to be inserted. 78 + */ 79 + dmb(); 80 + reg = __raw_readl(b15_rac_base + RAC_FLUSH_REG); 81 + } while (reg & FLUSH_RAC); 82 + } 83 + 84 + static inline u32 b15_rac_disable_and_flush(void) 85 + { 86 + u32 reg; 87 + 88 + reg = __b15_rac_disable(); 89 + __b15_rac_flush(); 90 + return reg; 91 + } 92 + 93 + static inline void __b15_rac_enable(u32 val) 94 + { 95 + __raw_writel(val, b15_rac_base + RAC_CONFIG0_REG); 96 + /* dsb() is required here to be consistent with __flush_icache_all() */ 97 + dsb(); 98 + } 99 + 100 + #define BUILD_RAC_CACHE_OP(name, bar) \ 101 + void b15_flush_##name(void) \ 102 + { \ 103 + unsigned int do_flush; \ 104 + u32 val = 0; \ 105 + \ 106 + if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) { \ 107 + v7_flush_##name(); \ 108 + bar; \ 109 + return; \ 110 + } \ 111 + \ 112 + spin_lock(&rac_lock); \ 113 + do_flush = test_bit(RAC_ENABLED, &b15_rac_flags); \ 114 + if (do_flush) \ 115 + val = b15_rac_disable_and_flush(); \ 116 + v7_flush_##name(); \ 117 + if (!do_flush) \ 118 + bar; \ 119 + else \ 120 + __b15_rac_enable(val); \ 121 + spin_unlock(&rac_lock); \ 122 + } 123 + 124 + #define nobarrier 125 + 126 + /* The readahead cache present in the Brahma-B15 CPU is a special piece of 127 + * hardware after the integrated L2 cache of the B15 CPU complex whose purpose 128 + * is to prefetch instruction and/or data with a line size of either 64 bytes 129 + * or 256 bytes. The rationale is that the data-bus of the CPU interface is 130 + * optimized for 256-bytes transactions, and enabling the readahead cache 131 + * provides a significant performance boost we want it enabled (typically 132 + * twice the performance for a memcpy benchmark application). 133 + * 134 + * The readahead cache is transparent for Modified Virtual Addresses 135 + * cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and 136 + * DCCIMVAC. 137 + * 138 + * It is however not transparent for the following cache maintenance 139 + * operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely 140 + * what we are patching here with our BUILD_RAC_CACHE_OP here. 141 + */ 142 + BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier); 143 + 144 + static void b15_rac_enable(void) 145 + { 146 + unsigned int cpu; 147 + u32 enable = 0; 148 + 149 + for_each_possible_cpu(cpu) 150 + enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT)); 151 + 152 + b15_rac_disable_and_flush(); 153 + __b15_rac_enable(enable); 154 + } 155 + 156 + static int b15_rac_reboot_notifier(struct notifier_block *nb, 157 + unsigned long action, 158 + void *data) 159 + { 160 + /* During kexec, we are not yet migrated on the boot CPU, so we need to 161 + * make sure we are SMP safe here. Once the RAC is disabled, flag it as 162 + * suspended such that the hotplug notifier returns early. 163 + */ 164 + if (action == SYS_RESTART) { 165 + spin_lock(&rac_lock); 166 + b15_rac_disable_and_flush(); 167 + clear_bit(RAC_ENABLED, &b15_rac_flags); 168 + set_bit(RAC_SUSPENDED, &b15_rac_flags); 169 + spin_unlock(&rac_lock); 170 + } 171 + 172 + return NOTIFY_DONE; 173 + } 174 + 175 + static struct notifier_block b15_rac_reboot_nb = { 176 + .notifier_call = b15_rac_reboot_notifier, 177 + }; 178 + 179 + /* The CPU hotplug case is the most interesting one, we basically need to make 180 + * sure that the RAC is disabled for the entire system prior to having a CPU 181 + * die, in particular prior to this dying CPU having exited the coherency 182 + * domain. 183 + * 184 + * Once this CPU is marked dead, we can safely re-enable the RAC for the 185 + * remaining CPUs in the system which are still online. 186 + * 187 + * Offlining a CPU is the problematic case, onlining a CPU is not much of an 188 + * issue since the CPU and its cache-level hierarchy will start filling with 189 + * the RAC disabled, so L1 and L2 only. 190 + * 191 + * In this function, we should NOT have to verify any unsafe setting/condition 192 + * b15_rac_base: 193 + * 194 + * It is protected by the RAC_ENABLED flag which is cleared by default, and 195 + * being cleared when initial procedure is done. b15_rac_base had been set at 196 + * that time. 197 + * 198 + * RAC_ENABLED: 199 + * There is a small timing windows, in b15_rac_init(), between 200 + * cpuhp_setup_state_*() 201 + * ... 202 + * set RAC_ENABLED 203 + * However, there is no hotplug activity based on the Linux booting procedure. 204 + * 205 + * Since we have to disable RAC for all cores, we keep RAC on as long as as 206 + * possible (disable it as late as possible) to gain the cache benefit. 207 + * 208 + * Thus, dying/dead states are chosen here 209 + * 210 + * We are choosing not do disable the RAC on a per-CPU basis, here, if we did 211 + * we would want to consider disabling it as early as possible to benefit the 212 + * other active CPUs. 213 + */ 214 + 215 + /* Running on the dying CPU */ 216 + static int b15_rac_dying_cpu(unsigned int cpu) 217 + { 218 + /* During kexec/reboot, the RAC is disabled via the reboot notifier 219 + * return early here. 220 + */ 221 + if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) 222 + return 0; 223 + 224 + spin_lock(&rac_lock); 225 + 226 + /* Indicate that we are starting a hotplug procedure */ 227 + __clear_bit(RAC_ENABLED, &b15_rac_flags); 228 + 229 + /* Disable the readahead cache and save its value to a global */ 230 + rac_config0_reg = b15_rac_disable_and_flush(); 231 + 232 + spin_unlock(&rac_lock); 233 + 234 + return 0; 235 + } 236 + 237 + /* Running on a non-dying CPU */ 238 + static int b15_rac_dead_cpu(unsigned int cpu) 239 + { 240 + /* During kexec/reboot, the RAC is disabled via the reboot notifier 241 + * return early here. 242 + */ 243 + if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) 244 + return 0; 245 + 246 + spin_lock(&rac_lock); 247 + 248 + /* And enable it */ 249 + __b15_rac_enable(rac_config0_reg); 250 + __set_bit(RAC_ENABLED, &b15_rac_flags); 251 + 252 + spin_unlock(&rac_lock); 253 + 254 + return 0; 255 + } 256 + 257 + static int b15_rac_suspend(void) 258 + { 259 + /* Suspend the read-ahead cache oeprations, forcing our cache 260 + * implementation to fallback to the regular ARMv7 calls. 261 + * 262 + * We are guaranteed to be running on the boot CPU at this point and 263 + * with every other CPU quiesced, so setting RAC_SUSPENDED is not racy 264 + * here. 265 + */ 266 + rac_config0_reg = b15_rac_disable_and_flush(); 267 + set_bit(RAC_SUSPENDED, &b15_rac_flags); 268 + 269 + return 0; 270 + } 271 + 272 + static void b15_rac_resume(void) 273 + { 274 + /* Coming out of a S3 suspend/resume cycle, the read-ahead cache 275 + * register RAC_CONFIG0_REG will be restored to its default value, make 276 + * sure we re-enable it and set the enable flag, we are also guaranteed 277 + * to run on the boot CPU, so not racy again. 278 + */ 279 + __b15_rac_enable(rac_config0_reg); 280 + clear_bit(RAC_SUSPENDED, &b15_rac_flags); 281 + } 282 + 283 + static struct syscore_ops b15_rac_syscore_ops = { 284 + .suspend = b15_rac_suspend, 285 + .resume = b15_rac_resume, 286 + }; 287 + 288 + static int __init b15_rac_init(void) 289 + { 290 + struct device_node *dn; 291 + int ret = 0, cpu; 292 + u32 reg, en_mask = 0; 293 + 294 + dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl"); 295 + if (!dn) 296 + return -ENODEV; 297 + 298 + if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n")) 299 + goto out; 300 + 301 + b15_rac_base = of_iomap(dn, 0); 302 + if (!b15_rac_base) { 303 + pr_err("failed to remap BIU control base\n"); 304 + ret = -ENOMEM; 305 + goto out; 306 + } 307 + 308 + ret = register_reboot_notifier(&b15_rac_reboot_nb); 309 + if (ret) { 310 + pr_err("failed to register reboot notifier\n"); 311 + iounmap(b15_rac_base); 312 + goto out; 313 + } 314 + 315 + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { 316 + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, 317 + "arm/cache-b15-rac:dead", 318 + NULL, b15_rac_dead_cpu); 319 + if (ret) 320 + goto out_unmap; 321 + 322 + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING, 323 + "arm/cache-b15-rac:dying", 324 + NULL, b15_rac_dying_cpu); 325 + if (ret) 326 + goto out_cpu_dead; 327 + } 328 + 329 + if (IS_ENABLED(CONFIG_PM_SLEEP)) 330 + register_syscore_ops(&b15_rac_syscore_ops); 331 + 332 + spin_lock(&rac_lock); 333 + reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG); 334 + for_each_possible_cpu(cpu) 335 + en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT)); 336 + WARN(reg & en_mask, "Read-ahead cache not previously disabled\n"); 337 + 338 + b15_rac_enable(); 339 + set_bit(RAC_ENABLED, &b15_rac_flags); 340 + spin_unlock(&rac_lock); 341 + 342 + pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n", 343 + b15_rac_base + RAC_CONFIG0_REG); 344 + 345 + goto out; 346 + 347 + out_cpu_dead: 348 + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING); 349 + out_unmap: 350 + unregister_reboot_notifier(&b15_rac_reboot_nb); 351 + iounmap(b15_rac_base); 352 + out: 353 + of_node_put(dn); 354 + return ret; 355 + } 356 + arch_initcall(b15_rac_init);
+21
arch/arm/mm/cache-v7.S
··· 15 15 #include <asm/assembler.h> 16 16 #include <asm/errno.h> 17 17 #include <asm/unwind.h> 18 + #include <asm/hardware/cache-b15-rac.h> 18 19 19 20 #include "proc-macros.S" 20 21 ··· 447 446 448 447 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 449 448 define_cache_functions v7 449 + 450 + /* The Broadcom Brahma-B15 read-ahead cache requires some modifications 451 + * to the v7_cache_fns, we only override the ones we need 452 + */ 453 + #ifndef CONFIG_CACHE_B15_RAC 454 + globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all 455 + #endif 456 + globl_equ b15_flush_icache_all, v7_flush_icache_all 457 + globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis 458 + globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all 459 + globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range 460 + globl_equ b15_coherent_kern_range, v7_coherent_kern_range 461 + globl_equ b15_coherent_user_range, v7_coherent_user_range 462 + globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area 463 + 464 + globl_equ b15_dma_map_area, v7_dma_map_area 465 + globl_equ b15_dma_unmap_area, v7_dma_unmap_area 466 + globl_equ b15_dma_flush_range, v7_dma_flush_range 467 + 468 + define_cache_functions b15
+108 -47
arch/arm/mm/dump.c
··· 21 21 #include <asm/fixmap.h> 22 22 #include <asm/memory.h> 23 23 #include <asm/pgtable.h> 24 - 25 - struct addr_marker { 26 - unsigned long start_address; 27 - const char *name; 28 - }; 24 + #include <asm/ptdump.h> 29 25 30 26 static struct addr_marker address_markers[] = { 31 27 { MODULES_VADDR, "Modules" }, ··· 34 38 { -1, NULL }, 35 39 }; 36 40 41 + #define pt_dump_seq_printf(m, fmt, args...) \ 42 + ({ \ 43 + if (m) \ 44 + seq_printf(m, fmt, ##args); \ 45 + }) 46 + 47 + #define pt_dump_seq_puts(m, fmt) \ 48 + ({ \ 49 + if (m) \ 50 + seq_printf(m, fmt); \ 51 + }) 52 + 37 53 struct pg_state { 38 54 struct seq_file *seq; 39 55 const struct addr_marker *marker; 40 56 unsigned long start_address; 41 57 unsigned level; 42 58 u64 current_prot; 59 + bool check_wx; 60 + unsigned long wx_pages; 43 61 const char *current_domain; 44 62 }; 45 63 ··· 62 52 u64 val; 63 53 const char *set; 64 54 const char *clear; 55 + bool ro_bit; 56 + bool nx_bit; 65 57 }; 66 58 67 59 static const struct prot_bits pte_bits[] = { ··· 77 65 .val = L_PTE_RDONLY, 78 66 .set = "ro", 79 67 .clear = "RW", 68 + .ro_bit = true, 80 69 }, { 81 70 .mask = L_PTE_XN, 82 71 .val = L_PTE_XN, 83 72 .set = "NX", 84 73 .clear = "x ", 74 + .nx_bit = true, 85 75 }, { 86 76 .mask = L_PTE_SHARED, 87 77 .val = L_PTE_SHARED, ··· 147 133 .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2, 148 134 .set = "ro", 149 135 .clear = "RW", 136 + .ro_bit = true, 150 137 #elif __LINUX_ARM_ARCH__ >= 6 151 138 { 152 139 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 153 140 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE, 154 141 .set = " ro", 142 + .ro_bit = true, 155 143 }, { 156 144 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 157 145 .val = PMD_SECT_AP_WRITE, ··· 172 156 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 173 157 .val = 0, 174 158 .set = " ro", 159 + .ro_bit = true, 175 160 }, { 176 161 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 177 162 .val = PMD_SECT_AP_WRITE, ··· 191 174 .val = PMD_SECT_XN, 192 175 .set = "NX", 193 176 .clear = "x ", 177 + .nx_bit = true, 194 178 }, { 195 179 .mask = PMD_SECT_S, 196 180 .val = PMD_SECT_S, ··· 204 186 const struct prot_bits *bits; 205 187 size_t num; 206 188 u64 mask; 189 + const struct prot_bits *ro_bit; 190 + const struct prot_bits *nx_bit; 207 191 }; 208 192 209 193 static struct pg_level pg_level[] = { ··· 234 214 s = bits->clear; 235 215 236 216 if (s) 237 - seq_printf(st->seq, " %s", s); 217 + pt_dump_seq_printf(st->seq, " %s", s); 238 218 } 219 + } 220 + 221 + static void note_prot_wx(struct pg_state *st, unsigned long addr) 222 + { 223 + if (!st->check_wx) 224 + return; 225 + if ((st->current_prot & pg_level[st->level].ro_bit->mask) == 226 + pg_level[st->level].ro_bit->val) 227 + return; 228 + if ((st->current_prot & pg_level[st->level].nx_bit->mask) == 229 + pg_level[st->level].nx_bit->val) 230 + return; 231 + 232 + WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n", 233 + (void *)st->start_address); 234 + 235 + st->wx_pages += (addr - st->start_address) / PAGE_SIZE; 239 236 } 240 237 241 238 static void note_page(struct pg_state *st, unsigned long addr, ··· 265 228 st->level = level; 266 229 st->current_prot = prot; 267 230 st->current_domain = domain; 268 - seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 231 + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 269 232 } else if (prot != st->current_prot || level != st->level || 270 233 domain != st->current_domain || 271 234 addr >= st->marker[1].start_address) { ··· 273 236 unsigned long delta; 274 237 275 238 if (st->current_prot) { 276 - seq_printf(st->seq, "0x%08lx-0x%08lx ", 239 + note_prot_wx(st, addr); 240 + pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ", 277 241 st->start_address, addr); 278 242 279 243 delta = (addr - st->start_address) >> 10; ··· 282 244 delta >>= 10; 283 245 unit++; 284 246 } 285 - seq_printf(st->seq, "%9lu%c", delta, *unit); 247 + pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit); 286 248 if (st->current_domain) 287 - seq_printf(st->seq, " %s", st->current_domain); 249 + pt_dump_seq_printf(st->seq, " %s", 250 + st->current_domain); 288 251 if (pg_level[st->level].bits) 289 252 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num); 290 - seq_printf(st->seq, "\n"); 253 + pt_dump_seq_printf(st->seq, "\n"); 291 254 } 292 255 293 256 if (addr >= st->marker[1].start_address) { 294 257 st->marker++; 295 - seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 258 + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", 259 + st->marker->name); 296 260 } 297 261 st->start_address = addr; 298 262 st->current_prot = prot; ··· 375 335 } 376 336 } 377 337 378 - static void walk_pgd(struct seq_file *m) 338 + static void walk_pgd(struct pg_state *st, struct mm_struct *mm, 339 + unsigned long start) 379 340 { 380 - pgd_t *pgd = swapper_pg_dir; 381 - struct pg_state st; 382 - unsigned long addr; 341 + pgd_t *pgd = pgd_offset(mm, 0UL); 383 342 unsigned i; 384 - 385 - memset(&st, 0, sizeof(st)); 386 - st.seq = m; 387 - st.marker = address_markers; 343 + unsigned long addr; 388 344 389 345 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 390 - addr = i * PGDIR_SIZE; 346 + addr = start + i * PGDIR_SIZE; 391 347 if (!pgd_none(*pgd)) { 392 - walk_pud(&st, pgd, addr); 348 + walk_pud(st, pgd, addr); 393 349 } else { 394 - note_page(&st, addr, 1, pgd_val(*pgd), NULL); 350 + note_page(st, addr, 1, pgd_val(*pgd), NULL); 395 351 } 396 352 } 353 + } 397 354 355 + void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) 356 + { 357 + struct pg_state st = { 358 + .seq = m, 359 + .marker = info->markers, 360 + .check_wx = false, 361 + }; 362 + 363 + walk_pgd(&st, info->mm, info->base_addr); 398 364 note_page(&st, 0, 0, 0, NULL); 399 365 } 400 366 401 - static int ptdump_show(struct seq_file *m, void *v) 367 + static void ptdump_initialize(void) 402 368 { 403 - walk_pgd(m); 404 - return 0; 405 - } 406 - 407 - static int ptdump_open(struct inode *inode, struct file *file) 408 - { 409 - return single_open(file, ptdump_show, NULL); 410 - } 411 - 412 - static const struct file_operations ptdump_fops = { 413 - .open = ptdump_open, 414 - .read = seq_read, 415 - .llseek = seq_lseek, 416 - .release = single_release, 417 - }; 418 - 419 - static int ptdump_init(void) 420 - { 421 - struct dentry *pe; 422 369 unsigned i, j; 423 370 424 371 for (i = 0; i < ARRAY_SIZE(pg_level); i++) 425 372 if (pg_level[i].bits) 426 - for (j = 0; j < pg_level[i].num; j++) 373 + for (j = 0; j < pg_level[i].num; j++) { 427 374 pg_level[i].mask |= pg_level[i].bits[j].mask; 375 + if (pg_level[i].bits[j].ro_bit) 376 + pg_level[i].ro_bit = &pg_level[i].bits[j]; 377 + if (pg_level[i].bits[j].nx_bit) 378 + pg_level[i].nx_bit = &pg_level[i].bits[j]; 379 + } 428 380 429 381 address_markers[2].start_address = VMALLOC_START; 382 + } 430 383 431 - pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, 432 - &ptdump_fops); 433 - return pe ? 0 : -ENOMEM; 384 + static struct ptdump_info kernel_ptdump_info = { 385 + .mm = &init_mm, 386 + .markers = address_markers, 387 + .base_addr = 0, 388 + }; 389 + 390 + void ptdump_check_wx(void) 391 + { 392 + struct pg_state st = { 393 + .seq = NULL, 394 + .marker = (struct addr_marker[]) { 395 + { 0, NULL}, 396 + { -1, NULL}, 397 + }, 398 + .check_wx = true, 399 + }; 400 + 401 + walk_pgd(&st, &init_mm, 0); 402 + note_page(&st, 0, 0, 0, NULL); 403 + if (st.wx_pages) 404 + pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", 405 + st.wx_pages); 406 + else 407 + pr_info("Checked W+X mappings: passed, no W+X pages found\n"); 408 + } 409 + 410 + static int ptdump_init(void) 411 + { 412 + ptdump_initialize(); 413 + return ptdump_debugfs_register(&kernel_ptdump_info, 414 + "kernel_page_tables"); 434 415 } 435 416 __initcall(ptdump_init);
+2 -3
arch/arm/mm/fault.c
··· 21 21 #include <linux/highmem.h> 22 22 #include <linux/perf_event.h> 23 23 24 - #include <asm/exception.h> 25 24 #include <asm/pgtable.h> 26 25 #include <asm/system_misc.h> 27 26 #include <asm/system_info.h> ··· 544 545 /* 545 546 * Dispatch a data abort to the relevant handler. 546 547 */ 547 - asmlinkage void __exception 548 + asmlinkage void 548 549 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 549 550 { 550 551 const struct fsr_info *inf = fsr_info + fsr_fs(fsr); ··· 577 578 ifsr_info[nr].name = name; 578 579 } 579 580 580 - asmlinkage void __exception 581 + asmlinkage void 581 582 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) 582 583 { 583 584 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+2 -2
arch/arm/mm/idmap.c
··· 16 16 * are not supported on any CPU using the idmap tables as its current 17 17 * page tables. 18 18 */ 19 - pgd_t *idmap_pgd; 20 - long long arch_phys_to_idmap_offset; 19 + pgd_t *idmap_pgd __ro_after_init; 20 + long long arch_phys_to_idmap_offset __ro_after_init; 21 21 22 22 #ifdef CONFIG_ARM_LPAE 23 23 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
+2
arch/arm/mm/init.c
··· 36 36 #include <asm/system_info.h> 37 37 #include <asm/tlb.h> 38 38 #include <asm/fixmap.h> 39 + #include <asm/ptdump.h> 39 40 40 41 #include <asm/mach/arch.h> 41 42 #include <asm/mach/map.h> ··· 739 738 void mark_rodata_ro(void) 740 739 { 741 740 stop_machine(__mark_rodata_ro, NULL, NULL); 741 + debug_checkwx(); 742 742 } 743 743 744 744 void set_kernel_text_rw(void)
+2 -2
arch/arm/mm/nommu.c
··· 31 31 32 32 #ifdef CONFIG_CPU_CP15 33 33 #ifdef CONFIG_CPU_HIGH_VECTOR 34 - static unsigned long __init setup_vectors_base(void) 34 + unsigned long setup_vectors_base(void) 35 35 { 36 36 unsigned long reg = get_cr(); 37 37 ··· 57 57 return 0; 58 58 } 59 59 60 - static unsigned long __init setup_vectors_base(void) 60 + unsigned long setup_vectors_base(void) 61 61 { 62 62 unsigned long base = 0, reg = get_cr(); 63 63
+3 -1
arch/arm/mm/pmsa-v7.c
··· 6 6 7 7 #include <linux/bitops.h> 8 8 #include <linux/memblock.h> 9 + #include <linux/string.h> 9 10 10 11 #include <asm/cacheflush.h> 11 12 #include <asm/cp15.h> ··· 297 296 } 298 297 } 299 298 299 + memset(mem, 0, sizeof(mem)); 300 300 num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem); 301 301 302 302 for (i = 0; i < num; i++) { ··· 435 433 436 434 /* Background */ 437 435 err |= mpu_setup_region(region++, 0, 32, 438 - MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA, 436 + MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW, 439 437 0, false); 440 438 441 439 #ifdef CONFIG_XIP_KERNEL
+3 -3
arch/arm/mm/proc-v7.S
··· 567 567 /* 568 568 * Standard v7 proc info content 569 569 */ 570 - .macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions 570 + .macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions, cache_fns = v7_cache_fns 571 571 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ 572 572 PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) 573 573 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ ··· 583 583 .long \proc_fns 584 584 .long v7wbi_tlb_fns 585 585 .long v6_user_fns 586 - .long v7_cache_fns 586 + .long \cache_fns 587 587 .endm 588 588 589 589 #ifndef CONFIG_ARM_LPAE ··· 678 678 __v7_b15mp_proc_info: 679 679 .long 0x420f00f0 680 680 .long 0xff0ffff0 681 - __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup 681 + __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns 682 682 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info 683 683 684 684 /*
+34
arch/arm/mm/ptdump_debugfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/debugfs.h> 3 + #include <linux/seq_file.h> 4 + 5 + #include <asm/ptdump.h> 6 + 7 + static int ptdump_show(struct seq_file *m, void *v) 8 + { 9 + struct ptdump_info *info = m->private; 10 + 11 + ptdump_walk_pgd(m, info); 12 + return 0; 13 + } 14 + 15 + static int ptdump_open(struct inode *inode, struct file *file) 16 + { 17 + return single_open(file, ptdump_show, inode->i_private); 18 + } 19 + 20 + static const struct file_operations ptdump_fops = { 21 + .open = ptdump_open, 22 + .read = seq_read, 23 + .llseek = seq_lseek, 24 + .release = single_release, 25 + }; 26 + 27 + int ptdump_debugfs_register(struct ptdump_info *info, const char *name) 28 + { 29 + struct dentry *pe; 30 + 31 + pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); 32 + return pe ? 0 : -ENOMEM; 33 + 34 + }
+11 -3
arch/arm/probes/kprobes/core.c
··· 32 32 #include <linux/percpu.h> 33 33 #include <linux/bug.h> 34 34 #include <asm/patch.h> 35 + #include <asm/sections.h> 35 36 36 37 #include "../decode-arm.h" 37 38 #include "../decode-thumb.h" ··· 64 63 const union decode_action *actions; 65 64 int is; 66 65 const struct decode_checker **checkers; 67 - 68 - if (in_exception_text(addr)) 69 - return -EINVAL; 70 66 71 67 #ifdef CONFIG_THUMB2_KERNEL 72 68 thumb = true; ··· 677 679 register_undef_hook(&kprobes_arm_break_hook); 678 680 #endif 679 681 return 0; 682 + } 683 + 684 + bool arch_within_kprobe_blacklist(unsigned long addr) 685 + { 686 + void *a = (void *)addr; 687 + 688 + return __in_irqentry_text(addr) || 689 + in_entry_text(addr) || 690 + in_idmap_text(addr) || 691 + memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1); 680 692 }
+1 -1
drivers/dma/imx-dma.c
··· 765 765 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 766 766 if (!desc) 767 767 break; 768 - __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); 768 + memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor)); 769 769 dma_async_tx_descriptor_init(&desc->desc, chan); 770 770 desc->desc.tx_submit = imxdma_tx_submit; 771 771 /* txd.flags will be overwritten in prep funcs */
+2
include/linux/cpuhotplug.h
··· 59 59 CPUHP_PCI_XGENE_DEAD, 60 60 CPUHP_IOMMU_INTEL_DEAD, 61 61 CPUHP_LUSTRE_CFS_DEAD, 62 + CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, 62 63 CPUHP_WORKQUEUE_PREP, 63 64 CPUHP_POWER_NUMA_PREPARE, 64 65 CPUHP_HRTIMERS_PREPARE, ··· 139 138 CPUHP_AP_ARM64_ISNDEP_STARTING, 140 139 CPUHP_AP_SMPCFD_DYING, 141 140 CPUHP_AP_X86_TBOOT_DYING, 141 + CPUHP_AP_ARM_CACHE_B15_RAC_DYING, 142 142 CPUHP_AP_ONLINE, 143 143 CPUHP_TEARDOWN_CPU, 144 144 CPUHP_AP_ONLINE_IDLE,