Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm: (28 commits)
ARM: 6411/1: vexpress: set RAM latencies to 1 cycle for PL310 on ct-ca9x4 tile
ARM: 6409/1: davinci: map sram using MT_MEMORY_NONCACHED instead of MT_DEVICE
ARM: 6408/1: omap: Map only available sram memory
ARM: 6407/1: mmu: Setup MT_MEMORY and MT_MEMORY_NONCACHED L1 entries
ARM: pxa: remove pr_<level> uses of KERN_<level>
ARM: pxa168fb: clear enable bit when not active
ARM: pxa: fix cpu_is_pxa*() not expanding to zero when not configured
ARM: pxa168: fix corrected reset vector
ARM: pxa: Use PIO for PI2C communication on Palm27x
ARM: pxa: Fix Vpac270 gpio_power for MMC
ARM: 6401/1: plug a race in the alignment trap handler
ARM: 6406/1: at91sam9g45: fix i2c bus speed
leds: leds-ns2: fix locking
ARM: dove: fix __io() definition to use bus based offset
dmaengine: fix interrupt clearing for mv_xor
ARM: kirkwood: Unbreak PCIe I/O port
ARM: Fix build error when using KCONFIG_CONFIG
ARM: 6383/1: Implement phys_mem_access_prot() to avoid attributes aliasing
ARM: 6400/1: at91: fix arch_gettimeoffset fallout
ARM: 6398/1: add proc info for ARM11MPCore/Cortex-A9 from ARM
...

+207 -81
+26 -1
arch/arm/Kconfig
··· 271 bool "Atmel AT91" 272 select ARCH_REQUIRE_GPIOLIB 273 select HAVE_CLK 274 - select ARCH_USES_GETTIMEOFFSET 275 help 276 This enables support for systems based on the Atmel AT91RM9200, 277 AT91SAM9 and AT91CAP9 processors. ··· 1049 workaround disables the write-allocate mode for the L2 cache via the 1050 ACTLR register. Note that setting specific bits in the ACTLR register 1051 may not be available in non-secure mode. 1052 1053 config PL310_ERRATA_588369 1054 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
··· 271 bool "Atmel AT91" 272 select ARCH_REQUIRE_GPIOLIB 273 select HAVE_CLK 274 help 275 This enables support for systems based on the Atmel AT91RM9200, 276 AT91SAM9 and AT91CAP9 processors. ··· 1050 workaround disables the write-allocate mode for the L2 cache via the 1051 ACTLR register. Note that setting specific bits in the ACTLR register 1052 may not be available in non-secure mode. 1053 + 1054 + config ARM_ERRATA_742230 1055 + bool "ARM errata: DMB operation may be faulty" 1056 + depends on CPU_V7 && SMP 1057 + help 1058 + This option enables the workaround for the 742230 Cortex-A9 1059 + (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction 1060 + between two write operations may not ensure the correct visibility 1061 + ordering of the two writes. This workaround sets a specific bit in 1062 + the diagnostic register of the Cortex-A9 which causes the DMB 1063 + instruction to behave as a DSB, ensuring the correct behaviour of 1064 + the two writes. 1065 + 1066 + config ARM_ERRATA_742231 1067 + bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption" 1068 + depends on CPU_V7 && SMP 1069 + help 1070 + This option enables the workaround for the 742231 Cortex-A9 1071 + (r2p0..r2p2) erratum. Under certain conditions, specific to the 1072 + Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode, 1073 + accessing some data located in the same cache line, may get corrupted 1074 + data due to bad handling of the address hazard when the line gets 1075 + replaced from one of the CPUs at the same time as another CPU is 1076 + accessing it. This workaround sets specific bits in the diagnostic 1077 + register of the Cortex-A9 which reduces the linefill issuing 1078 + capabilities of the processor. 1079 1080 config PL310_ERRATA_588369 1081 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+1 -1
arch/arm/boot/compressed/Makefile
··· 116 $(obj)/font.c: $(FONTC) 117 $(call cmd,shipped) 118 119 - $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config 120 @sed "$(SEDFLAGS)" < $< > $@
··· 116 $(obj)/font.c: $(FONTC) 117 $(call cmd,shipped) 118 119 + $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) 120 @sed "$(SEDFLAGS)" < $< > $@
+4
arch/arm/include/asm/pgtable.h
··· 317 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 318 #define pgprot_dmacoherent(prot) \ 319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 320 #else 321 #define pgprot_dmacoherent(prot) \ 322 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
··· 317 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 318 #define pgprot_dmacoherent(prot) \ 319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 320 + #define __HAVE_PHYS_MEM_ACCESS_PROT 321 + struct file; 322 + extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 323 + unsigned long size, pgprot_t vma_prot); 324 #else 325 #define pgprot_dmacoherent(prot) \ 326 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
+2
arch/arm/kernel/entry-common.S
··· 48 beq no_work_pending 49 mov r0, sp @ 'regs' 50 mov r2, why @ 'syscall' 51 bl do_notify_resume 52 b ret_slow_syscall @ Check work again 53
··· 48 beq no_work_pending 49 mov r0, sp @ 'regs' 50 mov r2, why @ 'syscall' 51 + tst r1, #_TIF_SIGPENDING @ delivering a signal? 52 + movne why, #0 @ prevent further restarts 53 bl do_notify_resume 54 b ret_slow_syscall @ Check work again 55
+2 -2
arch/arm/mach-at91/at91sam9g45_devices.c
··· 426 .sda_is_open_drain = 1, 427 .scl_pin = AT91_PIN_PA21, 428 .scl_is_open_drain = 1, 429 - .udelay = 2, /* ~100 kHz */ 430 }; 431 432 static struct platform_device at91sam9g45_twi0_device = { ··· 440 .sda_is_open_drain = 1, 441 .scl_pin = AT91_PIN_PB11, 442 .scl_is_open_drain = 1, 443 - .udelay = 2, /* ~100 kHz */ 444 }; 445 446 static struct platform_device at91sam9g45_twi1_device = {
··· 426 .sda_is_open_drain = 1, 427 .scl_pin = AT91_PIN_PA21, 428 .scl_is_open_drain = 1, 429 + .udelay = 5, /* ~100 kHz */ 430 }; 431 432 static struct platform_device at91sam9g45_twi0_device = { ··· 440 .sda_is_open_drain = 1, 441 .scl_pin = AT91_PIN_PB11, 442 .scl_is_open_drain = 1, 443 + .udelay = 5, /* ~100 kHz */ 444 }; 445 446 static struct platform_device at91sam9g45_twi1_device = {
+1 -2
arch/arm/mach-davinci/dm355.c
··· 769 .virtual = SRAM_VIRT, 770 .pfn = __phys_to_pfn(0x00010000), 771 .length = SZ_32K, 772 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 773 - .type = MT_DEVICE, 774 }, 775 }; 776
··· 769 .virtual = SRAM_VIRT, 770 .pfn = __phys_to_pfn(0x00010000), 771 .length = SZ_32K, 772 + .type = MT_MEMORY_NONCACHED, 773 }, 774 }; 775
+1 -2
arch/arm/mach-davinci/dm365.c
··· 969 .virtual = SRAM_VIRT, 970 .pfn = __phys_to_pfn(0x00010000), 971 .length = SZ_32K, 972 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 973 - .type = MT_DEVICE, 974 }, 975 }; 976
··· 969 .virtual = SRAM_VIRT, 970 .pfn = __phys_to_pfn(0x00010000), 971 .length = SZ_32K, 972 + .type = MT_MEMORY_NONCACHED, 973 }, 974 }; 975
+1 -2
arch/arm/mach-davinci/dm644x.c
··· 653 .virtual = SRAM_VIRT, 654 .pfn = __phys_to_pfn(0x00008000), 655 .length = SZ_16K, 656 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 657 - .type = MT_DEVICE, 658 }, 659 }; 660
··· 653 .virtual = SRAM_VIRT, 654 .pfn = __phys_to_pfn(0x00008000), 655 .length = SZ_16K, 656 + .type = MT_MEMORY_NONCACHED, 657 }, 658 }; 659
+1 -2
arch/arm/mach-davinci/dm646x.c
··· 737 .virtual = SRAM_VIRT, 738 .pfn = __phys_to_pfn(0x00010000), 739 .length = SZ_32K, 740 - /* MT_MEMORY_NONCACHED requires supersection alignment */ 741 - .type = MT_DEVICE, 742 }, 743 }; 744
··· 737 .virtual = SRAM_VIRT, 738 .pfn = __phys_to_pfn(0x00010000), 739 .length = SZ_32K, 740 + .type = MT_MEMORY_NONCACHED, 741 }, 742 }; 743
+3 -3
arch/arm/mach-dove/include/mach/io.h
··· 13 14 #define IO_SPACE_LIMIT 0xffffffff 15 16 - #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ 17 - DOVE_PCIE0_IO_VIRT_BASE)) 18 - #define __mem_pci(a) (a) 19 20 #endif
··· 13 14 #define IO_SPACE_LIMIT 0xffffffff 15 16 + #define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \ 17 + DOVE_PCIE0_IO_VIRT_BASE)) 18 + #define __mem_pci(a) (a) 19 20 #endif
+1 -1
arch/arm/mach-kirkwood/include/mach/kirkwood.h
··· 38 39 #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 40 #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 41 - #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 42 #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M 43 44 #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
··· 38 39 #define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 40 #define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 41 + #define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000 42 #define KIRKWOOD_PCIE1_IO_SIZE SZ_1M 43 44 #define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
+2 -2
arch/arm/mach-kirkwood/pcie.c
··· 117 * IORESOURCE_IO 118 */ 119 pp->res[0].name = "PCIe 0 I/O Space"; 120 - pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; 121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; 122 pp->res[0].flags = IORESOURCE_IO; 123 ··· 139 * IORESOURCE_IO 140 */ 141 pp->res[0].name = "PCIe 1 I/O Space"; 142 - pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; 143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; 144 pp->res[0].flags = IORESOURCE_IO; 145
··· 117 * IORESOURCE_IO 118 */ 119 pp->res[0].name = "PCIe 0 I/O Space"; 120 + pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE; 121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; 122 pp->res[0].flags = IORESOURCE_IO; 123 ··· 139 * IORESOURCE_IO 140 */ 141 pp->res[0].name = "PCIe 1 I/O Space"; 142 + pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE; 143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; 144 pp->res[0].flags = IORESOURCE_IO; 145
+6 -1
arch/arm/mach-mmp/include/mach/system.h
··· 9 #ifndef __ASM_MACH_SYSTEM_H 10 #define __ASM_MACH_SYSTEM_H 11 12 static inline void arch_idle(void) 13 { 14 cpu_do_idle(); ··· 18 19 static inline void arch_reset(char mode, const char *cmd) 20 { 21 - cpu_reset(0); 22 } 23 #endif /* __ASM_MACH_SYSTEM_H */
··· 9 #ifndef __ASM_MACH_SYSTEM_H 10 #define __ASM_MACH_SYSTEM_H 11 12 + #include <mach/cputype.h> 13 + 14 static inline void arch_idle(void) 15 { 16 cpu_do_idle(); ··· 16 17 static inline void arch_reset(char mode, const char *cmd) 18 { 19 + if (cpu_is_pxa168()) 20 + cpu_reset(0xffff0000); 21 + else 22 + cpu_reset(0); 23 } 24 #endif /* __ASM_MACH_SYSTEM_H */
+1 -2
arch/arm/mach-pxa/cpufreq-pxa2xx.c
··· 312 freqs.cpu = policy->cpu; 313 314 if (freq_debug) 315 - pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " 316 - "(SDRAM %d Mhz)\n", 317 freqs.new / 1000, (pxa_freq_settings[idx].div2) ? 318 (new_freq_mem / 2000) : (new_freq_mem / 1000)); 319
··· 312 freqs.cpu = policy->cpu; 313 314 if (freq_debug) 315 + pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", 316 freqs.new / 1000, (pxa_freq_settings[idx].div2) ? 317 (new_freq_mem / 2000) : (new_freq_mem / 1000)); 318
+12
arch/arm/mach-pxa/include/mach/hardware.h
··· 264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 265 * == 0x3 for pxa300/pxa310/pxa320 266 */ 267 #define __cpu_is_pxa2xx(id) \ 268 ({ \ 269 unsigned int _id = (id) >> 13 & 0x7; \ 270 _id <= 0x2; \ 271 }) 272 273 #define __cpu_is_pxa3xx(id) \ 274 ({ \ 275 unsigned int _id = (id) >> 13 & 0x7; \ 276 _id == 0x3; \ 277 }) 278 279 #define __cpu_is_pxa93x(id) \ 280 ({ \ 281 unsigned int _id = (id) >> 4 & 0xfff; \ 282 _id == 0x683 || _id == 0x693; \ 283 }) 284 285 #define cpu_is_pxa2xx() \ 286 ({ \
··· 264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 265 * == 0x3 for pxa300/pxa310/pxa320 266 */ 267 + #if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) 268 #define __cpu_is_pxa2xx(id) \ 269 ({ \ 270 unsigned int _id = (id) >> 13 & 0x7; \ 271 _id <= 0x2; \ 272 }) 273 + #else 274 + #define __cpu_is_pxa2xx(id) (0) 275 + #endif 276 277 + #ifdef CONFIG_PXA3xx 278 #define __cpu_is_pxa3xx(id) \ 279 ({ \ 280 unsigned int _id = (id) >> 13 & 0x7; \ 281 _id == 0x3; \ 282 }) 283 + #else 284 + #define __cpu_is_pxa3xx(id) (0) 285 + #endif 286 287 + #if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935) 288 #define __cpu_is_pxa93x(id) \ 289 ({ \ 290 unsigned int _id = (id) >> 4 & 0xfff; \ 291 _id == 0x683 || _id == 0x693; \ 292 }) 293 + #else 294 + #define __cpu_is_pxa93x(id) (0) 295 + #endif 296 297 #define cpu_is_pxa2xx() \ 298 ({ \
+5 -1
arch/arm/mach-pxa/palm27x.c
··· 469 }, 470 }; 471 472 void __init palm27x_pmic_init(void) 473 { 474 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); 475 - pxa27x_set_i2c_power_info(NULL); 476 } 477 #endif
··· 469 }, 470 }; 471 472 + static struct i2c_pxa_platform_data palm27x_i2c_power_info = { 473 + .use_pio = 1, 474 + }; 475 + 476 void __init palm27x_pmic_init(void) 477 { 478 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); 479 + pxa27x_set_i2c_power_info(&palm27x_i2c_power_info); 480 } 481 #endif
+1
arch/arm/mach-pxa/vpac270.c
··· 240 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) 241 static struct pxamci_platform_data vpac270_mci_platform_data = { 242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 243 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, 244 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, 245 .detect_delay_ms = 200,
··· 240 #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) 241 static struct pxamci_platform_data vpac270_mci_platform_data = { 242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 243 + .gpio_power = -1, 244 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, 245 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, 246 .detect_delay_ms = 200,
+3
arch/arm/mach-u300/include/mach/gpio.h
··· 273 extern int gpio_get_value(unsigned gpio); 274 extern void gpio_set_value(unsigned gpio, int value); 275 276 /* wrappers to sleep-enable the previous two functions */ 277 static inline unsigned gpio_to_irq(unsigned gpio) 278 {
··· 273 extern int gpio_get_value(unsigned gpio); 274 extern void gpio_set_value(unsigned gpio, int value); 275 276 + #define gpio_get_value_cansleep gpio_get_value 277 + #define gpio_set_value_cansleep gpio_set_value 278 + 279 /* wrappers to sleep-enable the previous two functions */ 280 static inline unsigned gpio_to_irq(unsigned gpio) 281 {
+7 -1
arch/arm/mach-vexpress/ct-ca9x4.c
··· 227 int i; 228 229 #ifdef CONFIG_CACHE_L2X0 230 - l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); 231 #endif 232 233 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
··· 227 int i; 228 229 #ifdef CONFIG_CACHE_L2X0 230 + void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC); 231 + 232 + /* set RAM latencies to 1 cycle for this core tile. */ 233 + writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); 234 + writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); 235 + 236 + l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); 237 #endif 238 239 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
+17 -2
arch/arm/mm/alignment.c
··· 885 886 if (ai_usermode & UM_SIGNAL) 887 force_sig(SIGBUS, current); 888 - else 889 - set_cr(cr_no_alignment); 890 891 return 0; 892 }
··· 885 886 if (ai_usermode & UM_SIGNAL) 887 force_sig(SIGBUS, current); 888 + else { 889 + /* 890 + * We're about to disable the alignment trap and return to 891 + * user space. But if an interrupt occurs before actually 892 + * reaching user space, then the IRQ vector entry code will 893 + * notice that we were still in kernel space and therefore 894 + * the alignment trap won't be re-enabled in that case as it 895 + * is presumed to be always on from kernel space. 896 + * Let's prevent that race by disabling interrupts here (they 897 + * are disabled on the way back to user space anyway in 898 + * entry-common.S) and disable the alignment trap only if 899 + * there is no work pending for this thread. 900 + */ 901 + raw_local_irq_disable(); 902 + if (!(current_thread_info()->flags & _TIF_WORK_MASK)) 903 + set_cr(cr_no_alignment); 904 + } 905 906 return 0; 907 }
+29 -2
arch/arm/mm/mmu.c
··· 15 #include <linux/nodemask.h> 16 #include <linux/memblock.h> 17 #include <linux/sort.h> 18 19 #include <asm/cputype.h> 20 #include <asm/sections.h> ··· 247 .domain = DOMAIN_USER, 248 }, 249 [MT_MEMORY] = { 250 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 251 .domain = DOMAIN_KERNEL, 252 }, ··· 258 .domain = DOMAIN_KERNEL, 259 }, 260 [MT_MEMORY_NONCACHED] = { 261 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 262 .domain = DOMAIN_KERNEL, 263 }, ··· 418 * Enable CPU-specific coherency if supported. 419 * (Only available on XSC3 at the moment.) 420 */ 421 - if (arch_is_coherent() && cpu_is_xsc3()) 422 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 423 - 424 /* 425 * ARMv6 and above have extended page tables. 426 */ ··· 448 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 449 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 450 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 451 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 452 #endif 453 } 454 ··· 487 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 488 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 489 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 490 mem_types[MT_ROM].prot_sect |= cp->pmd; 491 492 switch (cp->pmd) { ··· 511 t->prot_sect |= PMD_DOMAIN(t->domain); 512 } 513 } 514 515 #define vectors_base() (vectors_high() ? 0xffff0000 : 0) 516
··· 15 #include <linux/nodemask.h> 16 #include <linux/memblock.h> 17 #include <linux/sort.h> 18 + #include <linux/fs.h> 19 20 #include <asm/cputype.h> 21 #include <asm/sections.h> ··· 246 .domain = DOMAIN_USER, 247 }, 248 [MT_MEMORY] = { 249 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 250 + L_PTE_USER | L_PTE_EXEC, 251 + .prot_l1 = PMD_TYPE_TABLE, 252 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 253 .domain = DOMAIN_KERNEL, 254 }, ··· 254 .domain = DOMAIN_KERNEL, 255 }, 256 [MT_MEMORY_NONCACHED] = { 257 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 258 + L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, 259 + .prot_l1 = PMD_TYPE_TABLE, 260 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 261 .domain = DOMAIN_KERNEL, 262 }, ··· 411 * Enable CPU-specific coherency if supported. 412 * (Only available on XSC3 at the moment.) 413 */ 414 + if (arch_is_coherent() && cpu_is_xsc3()) { 415 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 416 + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 417 + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 418 + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 419 + } 420 /* 421 * ARMv6 and above have extended page tables. 422 */ ··· 438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 439 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 440 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 441 + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 442 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 443 + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 444 #endif 445 } 446 ··· 475 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 476 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 477 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 478 + mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 479 + mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 480 mem_types[MT_ROM].prot_sect |= cp->pmd; 481 482 switch (cp->pmd) { ··· 497 t->prot_sect |= PMD_DOMAIN(t->domain); 498 } 499 } 500 + 501 + #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 502 + pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 503 + unsigned long size, pgprot_t vma_prot) 504 + { 505 + if (!pfn_valid(pfn)) 506 + return pgprot_noncached(vma_prot); 507 + else if (file->f_flags & O_SYNC) 508 + return pgprot_writecombine(vma_prot); 509 + return vma_prot; 510 + } 511 + EXPORT_SYMBOL(phys_mem_access_prot); 512 + #endif 513 514 #define vectors_base() (vectors_high() ? 0xffff0000 : 0) 515
+56 -6
arch/arm/mm/proc-v7.S
··· 186 * It is assumed that: 187 * - cache type register is implemented 188 */ 189 - __v7_setup: 190 #ifdef CONFIG_SMP 191 mrc p15, 0, r0, c1, c0, 1 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 195 #endif 196 adr r12, __v7_setup_stack @ the local stack 197 stmia r12, {r0-r5, r7, r9, r11, lr} 198 bl v7_flush_dcache_all ··· 202 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 203 and r10, r0, #0xff000000 @ ARM? 204 teq r10, #0x41000000 205 - bne 2f 206 and r5, r0, #0x00f00000 @ variant 207 and r6, r0, #0x0000000f @ revision 208 - orr r0, r6, r5, lsr #20-4 @ combine variant and revision 209 210 #ifdef CONFIG_ARM_ERRATA_430973 211 teq r5, #0x00100000 @ only present in r1p* 212 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register ··· 219 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 220 #endif 221 #ifdef CONFIG_ARM_ERRATA_458693 222 - teq r0, #0x20 @ only present in r2p0 223 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 224 orreq r10, r10, #(1 << 5) @ set L1NEON to 1 225 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 226 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 227 #endif 228 #ifdef CONFIG_ARM_ERRATA_460075 229 - teq r0, #0x20 @ only present in r2p0 230 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 231 tsteq r10, #1 << 22 232 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit 233 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 234 #endif 235 236 - 2: mov r10, #0 237 #ifdef HARVARD_CACHE 238 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 239 #endif ··· 349 .align 350 351 .section ".proc.info.init", #alloc, #execinstr 352 353 /* 354 * Match any ARMv7 processor core.
··· 186 * It is assumed that: 187 * - cache type register is implemented 188 */ 189 + __v7_ca9mp_setup: 190 #ifdef CONFIG_SMP 191 mrc p15, 0, r0, c1, c0, 1 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 195 #endif 196 + __v7_setup: 197 adr r12, __v7_setup_stack @ the local stack 198 stmia r12, {r0-r5, r7, r9, r11, lr} 199 bl v7_flush_dcache_all ··· 201 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 202 and r10, r0, #0xff000000 @ ARM? 203 teq r10, #0x41000000 204 + bne 3f 205 and r5, r0, #0x00f00000 @ variant 206 and r6, r0, #0x0000000f @ revision 207 + orr r6, r6, r5, lsr #20-4 @ combine variant and revision 208 + ubfx r0, r0, #4, #12 @ primary part number 209 210 + /* Cortex-A8 Errata */ 211 + ldr r10, =0x00000c08 @ Cortex-A8 primary part number 212 + teq r0, r10 213 + bne 2f 214 #ifdef CONFIG_ARM_ERRATA_430973 215 teq r5, #0x00100000 @ only present in r1p* 216 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register ··· 213 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 214 #endif 215 #ifdef CONFIG_ARM_ERRATA_458693 216 + teq r6, #0x20 @ only present in r2p0 217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 218 orreq r10, r10, #(1 << 5) @ set L1NEON to 1 219 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 220 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 221 #endif 222 #ifdef CONFIG_ARM_ERRATA_460075 223 + teq r6, #0x20 @ only present in r2p0 224 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 225 tsteq r10, #1 << 22 226 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit 227 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 228 #endif 229 + b 3f 230 231 + /* Cortex-A9 Errata */ 232 + 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number 233 + teq r0, r10 234 + bne 3f 235 + #ifdef CONFIG_ARM_ERRATA_742230 236 + cmp r6, #0x22 @ only present up to r2p2 237 + mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register 238 + orrle r10, r10, #1 << 4 @ set bit #4 239 + mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register 240 + #endif 241 + #ifdef CONFIG_ARM_ERRATA_742231 242 + teq r6, #0x20 @ present in r2p0 243 + teqne r6, #0x21 @ present in r2p1 244 + teqne r6, #0x22 @ present in r2p2 245 + mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register 246 + orreq r10, r10, #1 << 12 @ set bit #12 247 + orreq r10, r10, #1 << 22 @ set bit #22 248 + mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 249 + #endif 250 + 251 + 3: mov r10, #0 252 #ifdef HARVARD_CACHE 253 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 254 #endif ··· 322 .align 323 324 .section ".proc.info.init", #alloc, #execinstr 325 + 326 + .type __v7_ca9mp_proc_info, #object 327 + __v7_ca9mp_proc_info: 328 + .long 0x410fc090 @ Required ID value 329 + .long 0xff0ffff0 @ Mask for ID 330 + .long PMD_TYPE_SECT | \ 331 + PMD_SECT_AP_WRITE | \ 332 + PMD_SECT_AP_READ | \ 333 + PMD_FLAGS 334 + .long PMD_TYPE_SECT | \ 335 + PMD_SECT_XN | \ 336 + PMD_SECT_AP_WRITE | \ 337 + PMD_SECT_AP_READ 338 + b __v7_ca9mp_setup 339 + .long cpu_arch_name 340 + .long cpu_elf_name 341 + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 342 + .long cpu_v7_name 343 + .long v7_processor_functions 344 + .long v7wbi_tlb_fns 345 + .long v6_user_fns 346 + .long v7_cache_fns 347 + .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 348 349 /* 350 * Match any ARMv7 processor core.
+12 -21
arch/arm/plat-nomadik/timer.c
··· 1 /* 2 - * linux/arch/arm/mach-nomadik/timer.c 3 * 4 * Copyright (C) 2008 STMicroelectronics 5 * Copyright (C) 2010 Alessandro Rubini ··· 75 cr = readl(mtu_base + MTU_CR(1)); 76 writel(0, mtu_base + MTU_LR(1)); 77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); 78 - writel(0x2, mtu_base + MTU_IMSC); 79 break; 80 case CLOCK_EVT_MODE_SHUTDOWN: 81 case CLOCK_EVT_MODE_UNUSED: ··· 131 { 132 unsigned long rate; 133 struct clk *clk0; 134 - struct clk *clk1; 135 - u32 cr; 136 137 clk0 = clk_get_sys("mtu0", NULL); 138 BUG_ON(IS_ERR(clk0)); 139 140 - clk1 = clk_get_sys("mtu1", NULL); 141 - BUG_ON(IS_ERR(clk1)); 142 - 143 clk_enable(clk0); 144 - clk_enable(clk1); 145 146 /* 147 - * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: 148 - * use a divide-by-16 counter if it's more than 16MHz 149 */ 150 - cr = MTU_CRn_32BITS;; 151 rate = clk_get_rate(clk0); 152 - if (rate > 16 << 20) { 153 rate /= 16; 154 cr |= MTU_CRn_PRESCALE_16; 155 } else { ··· 168 pr_err("timer: failed to initialize clock source %s\n", 169 nmdk_clksrc.name); 170 171 - /* Timer 1 is used for events, fix according to rate */ 172 - cr = MTU_CRn_32BITS; 173 - rate = clk_get_rate(clk1); 174 - if (rate > 16 << 20) { 175 - rate /= 16; 176 - cr |= MTU_CRn_PRESCALE_16; 177 - } else { 178 - cr |= MTU_CRn_PRESCALE_1; 179 - } 180 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 181 182 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
··· 1 /* 2 + * linux/arch/arm/plat-nomadik/timer.c 3 * 4 * Copyright (C) 2008 STMicroelectronics 5 * Copyright (C) 2010 Alessandro Rubini ··· 75 cr = readl(mtu_base + MTU_CR(1)); 76 writel(0, mtu_base + MTU_LR(1)); 77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); 78 + writel(1 << 1, mtu_base + MTU_IMSC); 79 break; 80 case CLOCK_EVT_MODE_SHUTDOWN: 81 case CLOCK_EVT_MODE_UNUSED: ··· 131 { 132 unsigned long rate; 133 struct clk *clk0; 134 + u32 cr = MTU_CRn_32BITS; 135 136 clk0 = clk_get_sys("mtu0", NULL); 137 BUG_ON(IS_ERR(clk0)); 138 139 clk_enable(clk0); 140 141 /* 142 + * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz 143 + * for ux500. 144 + * Use a divide-by-16 counter if the tick rate is more than 32MHz. 145 + * At 32 MHz, the timer (with 32 bit counter) can be programmed 146 + * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer 147 + * with 16 gives too low timer resolution. 148 */ 149 rate = clk_get_rate(clk0); 150 + if (rate > 32000000) { 151 rate /= 16; 152 cr |= MTU_CRn_PRESCALE_16; 153 } else { ··· 170 pr_err("timer: failed to initialize clock source %s\n", 171 nmdk_clksrc.name); 172 173 + /* Timer 1 is used for events */ 174 + 175 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 176 177 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
+5 -20
arch/arm/plat-omap/sram.c
··· 220 if (omap_sram_size == 0) 221 return; 222 223 - if (cpu_is_omap24xx()) { 224 - omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA; 225 - 226 - base = OMAP2_SRAM_PA; 227 - base = ROUND_DOWN(base, PAGE_SIZE); 228 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 229 - } 230 - 231 if (cpu_is_omap34xx()) { 232 - omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA; 233 - base = OMAP3_SRAM_PA; 234 - base = ROUND_DOWN(base, PAGE_SIZE); 235 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 236 - 237 /* 238 * SRAM must be marked as non-cached on OMAP3 since the 239 * CORE DPLL M2 divider change code (in SRAM) runs with the ··· 231 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; 232 } 233 234 - if (cpu_is_omap44xx()) { 235 - omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; 236 - base = OMAP4_SRAM_PA; 237 - base = ROUND_DOWN(base, PAGE_SIZE); 238 - omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 239 - } 240 - omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */ 241 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 242 243 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
··· 220 if (omap_sram_size == 0) 221 return; 222 223 if (cpu_is_omap34xx()) { 224 /* 225 * SRAM must be marked as non-cached on OMAP3 since the 226 * CORE DPLL M2 divider change code (in SRAM) runs with the ··· 244 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; 245 } 246 247 + omap_sram_io_desc[0].virtual = omap_sram_base; 248 + base = omap_sram_start; 249 + base = ROUND_DOWN(base, PAGE_SIZE); 250 + omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 251 + omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); 252 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 253 254 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
+1 -1
drivers/dma/mv_xor.c
··· 162 163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 164 { 165 - u32 val = (1 << (1 + (chan->idx * 16))); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 168 }
··· 162 163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 164 { 165 + u32 val = ~(1 << (chan->idx * 16)); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 168 }
+5 -4
drivers/leds/leds-ns2.c
··· 81 int cmd_level; 82 int slow_level; 83 84 - read_lock(&led_dat->rw_lock); 85 86 cmd_level = gpio_get_value(led_dat->cmd); 87 slow_level = gpio_get_value(led_dat->slow); ··· 95 } 96 } 97 98 - read_unlock(&led_dat->rw_lock); 99 100 return ret; 101 } ··· 104 enum ns2_led_modes mode) 105 { 106 int i; 107 108 - write_lock(&led_dat->rw_lock); 109 110 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 111 if (mode == ns2_led_modval[i].mode) { ··· 117 } 118 } 119 120 - write_unlock(&led_dat->rw_lock); 121 } 122 123 static void ns2_led_set(struct led_classdev *led_cdev,
··· 81 int cmd_level; 82 int slow_level; 83 84 + read_lock_irq(&led_dat->rw_lock); 85 86 cmd_level = gpio_get_value(led_dat->cmd); 87 slow_level = gpio_get_value(led_dat->slow); ··· 95 } 96 } 97 98 + read_unlock_irq(&led_dat->rw_lock); 99 100 return ret; 101 } ··· 104 enum ns2_led_modes mode) 105 { 106 int i; 107 + unsigned long flags; 108 109 + write_lock_irqsave(&led_dat->rw_lock, flags); 110 111 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 112 if (mode == ns2_led_modval[i].mode) { ··· 116 } 117 } 118 119 + write_unlock_irqrestore(&led_dat->rw_lock, flags); 120 } 121 122 static void ns2_led_set(struct led_classdev *led_cdev,
+2 -2
drivers/video/pxa168fb.c
··· 298 * Set bit to enable graphics DMA. 299 */ 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 301 - x |= fbi->active ? 0x00000100 : 0; 302 - fbi->active = 0; 303 304 /* 305 * If we are in a pseudo-color mode, we need to enable
··· 298 * Set bit to enable graphics DMA. 299 */ 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 301 + x &= ~CFG_GRA_ENA_MASK; 302 + x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0); 303 304 /* 305 * If we are in a pseudo-color mode, we need to enable