Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm:
cyber2000fb: fix console in truecolor modes
cyber2000fb: fix machine hang on module load
SA1111: Eliminate use after free
ARM: Fix Versatile/Realview/VExpress MMC card detection sense
ARM: 6279/1: highmem: fix SMP preemption bug in kmap_high_l1_vipt
ARM: Add barriers to io{read,write}{8,16,32} accessors as well
ARM: 6273/1: Add barriers to the I/O accessors if ARM_DMA_MEM_BUFFERABLE
ARM: 6272/1: Convert L2x0 to use the IO relaxed operations
ARM: 6271/1: Introduce *_relaxed() I/O accessors
ARM: 6275/1: ux500: don't use writeb() in uncompress.h
ARM: 6270/1: clean files in arch/arm/boot/compressed/
ARM: Fix csum_partial_copy_from_user()

Changed files
+75 -49
arch
arm
boot
compressed
common
include
asm
lib
mach-realview
mach-ux500
include
mach-vexpress
mm
drivers
mmc
host
video
+3
arch/arm/boot/compressed/Makefile
··· 71 71 piggy.$(suffix_y) piggy.$(suffix_y).o \ 72 72 font.o font.c head.o misc.o $(OBJS) 73 73 74 + # Make sure files are removed during clean 75 + extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S 76 + 74 77 ifeq ($(CONFIG_FUNCTION_TRACER),y) 75 78 ORIG_CFLAGS := $(KBUILD_CFLAGS) 76 79 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
+2 -3
arch/arm/common/sa1111.c
··· 1028 1028 struct sa1111 *sachip = platform_get_drvdata(pdev); 1029 1029 1030 1030 if (sachip) { 1031 - __sa1111_remove(sachip); 1032 - platform_set_drvdata(pdev, NULL); 1033 - 1034 1031 #ifdef CONFIG_PM 1035 1032 kfree(sachip->saved_state); 1036 1033 sachip->saved_state = NULL; 1037 1034 #endif 1035 + __sa1111_remove(sachip); 1036 + platform_set_drvdata(pdev, NULL); 1038 1037 } 1039 1038 1040 1039 return 0;
+32 -18
arch/arm/include/asm/io.h
··· 26 26 #include <linux/types.h> 27 27 #include <asm/byteorder.h> 28 28 #include <asm/memory.h> 29 + #include <asm/system.h> 29 30 30 31 /* 31 32 * ISA I/O bus memory addresses are 1:1 with the physical address. ··· 180 179 * IO port primitives for more information. 181 180 */ 182 181 #ifdef __mem_pci 183 - #define readb(c) ({ __u8 __v = __raw_readb(__mem_pci(c)); __v; }) 184 - #define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \ 182 + #define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; }) 183 + #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \ 185 184 __raw_readw(__mem_pci(c))); __v; }) 186 - #define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \ 185 + #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \ 187 186 __raw_readl(__mem_pci(c))); __v; }) 188 - #define readb_relaxed(addr) readb(addr) 189 - #define readw_relaxed(addr) readw(addr) 190 - #define readl_relaxed(addr) readl(addr) 187 + 188 + #define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c))) 189 + #define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \ 190 + cpu_to_le16(v),__mem_pci(c))) 191 + #define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ 192 + cpu_to_le32(v),__mem_pci(c))) 193 + 194 + #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 195 + #define __iormb() rmb() 196 + #define __iowmb() wmb() 197 + #else 198 + #define __iormb() do { } while (0) 199 + #define __iowmb() do { } while (0) 200 + #endif 201 + 202 + #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) 203 + #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) 204 + #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) 205 + 206 + #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) 207 + #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) 208 + #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) 191 209 192 210 #define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) 193 211 #define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) 194 212 #define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) 195 - 196 - #define writeb(v,c) __raw_writeb(v,__mem_pci(c)) 197 - #define writew(v,c) __raw_writew((__force __u16) \ 198 - cpu_to_le16(v),__mem_pci(c)) 199 - #define writel(v,c) __raw_writel((__force __u32) \ 200 - cpu_to_le32(v),__mem_pci(c)) 201 213 202 214 #define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) 203 215 #define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) ··· 258 244 * io{read,write}{8,16,32} macros 259 245 */ 260 246 #ifndef ioread8 261 - #define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) 262 - #define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; }) 263 - #define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; }) 247 + #define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; }) 248 + #define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) 249 + #define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) 264 250 265 - #define iowrite8(v,p) __raw_writeb(v, p) 266 - #define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p) 267 - #define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p) 251 + #define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); }) 252 + #define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) 253 + #define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) 268 254 269 255 #define ioread8_rep(p,d,c) __raw_readsb(p,d,c) 270 256 #define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
+1 -1
arch/arm/lib/csumpartialcopyuser.S
··· 71 71 .pushsection .fixup,"ax" 72 72 .align 4 73 73 9001: mov r4, #-EFAULT 74 - ldr r5, [fp, #4] @ *err_ptr 74 + ldr r5, [sp, #8*4] @ *err_ptr 75 75 str r4, [r5] 76 76 ldmia sp, {r1, r2} @ retrieve dst, len 77 77 add r2, r2, r1
+1 -1
arch/arm/mach-realview/core.c
··· 237 237 else 238 238 mask = 2; 239 239 240 - return !(readl(REALVIEW_SYSMCI) & mask); 240 + return readl(REALVIEW_SYSMCI) & mask; 241 241 } 242 242 243 243 struct mmci_platform_data realview_mmc0_plat_data = {
+5 -5
arch/arm/mach-ux500/include/mach/uncompress.h
··· 30 30 static void putc(const char c) 31 31 { 32 32 /* Do nothing if the UART is not enabled. */ 33 - if (!(readb(U8500_UART_CR) & 0x1)) 33 + if (!(__raw_readb(U8500_UART_CR) & 0x1)) 34 34 return; 35 35 36 36 if (c == '\n') 37 37 putc('\r'); 38 38 39 - while (readb(U8500_UART_FR) & (1 << 5)) 39 + while (__raw_readb(U8500_UART_FR) & (1 << 5)) 40 40 barrier(); 41 - writeb(c, U8500_UART_DR); 41 + __raw_writeb(c, U8500_UART_DR); 42 42 } 43 43 44 44 static void flush(void) 45 45 { 46 - if (!(readb(U8500_UART_CR) & 0x1)) 46 + if (!(__raw_readb(U8500_UART_CR) & 0x1)) 47 47 return; 48 - while (readb(U8500_UART_FR) & (1 << 3)) 48 + while (__raw_readb(U8500_UART_FR) & (1 << 3)) 49 49 barrier(); 50 50 } 51 51
+1 -1
arch/arm/mach-vexpress/v2m.c
··· 241 241 242 242 static unsigned int v2m_mmci_status(struct device *dev) 243 243 { 244 - return !(readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0)); 244 + return readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0); 245 245 } 246 246 247 247 static struct mmci_platform_data v2m_mmci_data = {
+13 -13
arch/arm/mm/cache-l2x0.c
··· 32 32 static inline void cache_wait(void __iomem *reg, unsigned long mask) 33 33 { 34 34 /* wait for the operation to complete */ 35 - while (readl(reg) & mask) 35 + while (readl_relaxed(reg) & mask) 36 36 ; 37 37 } 38 38 39 39 static inline void cache_sync(void) 40 40 { 41 41 void __iomem *base = l2x0_base; 42 - writel(0, base + L2X0_CACHE_SYNC); 42 + writel_relaxed(0, base + L2X0_CACHE_SYNC); 43 43 cache_wait(base + L2X0_CACHE_SYNC, 1); 44 44 } 45 45 ··· 47 47 { 48 48 void __iomem *base = l2x0_base; 49 49 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 50 - writel(addr, base + L2X0_CLEAN_LINE_PA); 50 + writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 51 51 } 52 52 53 53 static inline void l2x0_inv_line(unsigned long addr) 54 54 { 55 55 void __iomem *base = l2x0_base; 56 56 cache_wait(base + L2X0_INV_LINE_PA, 1); 57 - writel(addr, base + L2X0_INV_LINE_PA); 57 + writel_relaxed(addr, base + L2X0_INV_LINE_PA); 58 58 } 59 59 60 60 #ifdef CONFIG_PL310_ERRATA_588369 ··· 75 75 76 76 /* Clean by PA followed by Invalidate by PA */ 77 77 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 78 - writel(addr, base + L2X0_CLEAN_LINE_PA); 78 + writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); 79 79 cache_wait(base + L2X0_INV_LINE_PA, 1); 80 - writel(addr, base + L2X0_INV_LINE_PA); 80 + writel_relaxed(addr, base + L2X0_INV_LINE_PA); 81 81 } 82 82 #else 83 83 ··· 90 90 { 91 91 void __iomem *base = l2x0_base; 92 92 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 93 - writel(addr, base + L2X0_CLEAN_INV_LINE_PA); 93 + writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); 94 94 } 95 95 #endif 96 96 ··· 109 109 110 110 /* invalidate all ways */ 111 111 spin_lock_irqsave(&l2x0_lock, flags); 112 - writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 112 + writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 113 113 cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 114 114 cache_sync(); 115 115 spin_unlock_irqrestore(&l2x0_lock, flags); ··· 215 215 216 216 l2x0_base = base; 217 217 218 - cache_id = readl(l2x0_base + L2X0_CACHE_ID); 219 - aux = readl(l2x0_base + L2X0_AUX_CTRL); 218 + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 219 + aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 220 220 221 221 aux &= aux_mask; 222 222 aux |= aux_val; ··· 248 248 * If you are booting from non-secure mode 249 249 * accessing the below registers will fault. 250 250 */ 251 - if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { 251 + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { 252 252 253 253 /* l2x0 controller is disabled */ 254 - writel(aux, l2x0_base + L2X0_AUX_CTRL); 254 + writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 255 255 256 256 l2x0_inv_all(); 257 257 258 258 /* enable L2X0 */ 259 - writel(1, l2x0_base + L2X0_CTRL); 259 + writel_relaxed(1, l2x0_base + L2X0_CTRL); 260 260 } 261 261 262 262 outer_cache.inv_range = l2x0_inv_range;
+8 -5
arch/arm/mm/highmem.c
··· 163 163 164 164 void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) 165 165 { 166 - unsigned int idx, cpu = smp_processor_id(); 167 - int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 166 + unsigned int idx, cpu; 167 + int *depth; 168 168 unsigned long vaddr, flags; 169 169 pte_t pte, *ptep; 170 + 171 + if (!in_interrupt()) 172 + preempt_disable(); 173 + 174 + cpu = smp_processor_id(); 175 + depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); 170 176 171 177 idx = KM_L1_CACHE + KM_TYPE_NR * cpu; 172 178 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 173 179 ptep = TOP_PTE(vaddr); 174 180 pte = mk_pte(page, kmap_prot); 175 - 176 - if (!in_interrupt()) 177 - preempt_disable(); 178 181 179 182 raw_local_irq_save(flags); 180 183 (*depth)++;
+6 -2
drivers/mmc/host/mmci.c
··· 539 539 if (host->gpio_cd == -ENOSYS) 540 540 status = host->plat->status(mmc_dev(host->mmc)); 541 541 else 542 - status = gpio_get_value(host->gpio_cd); 542 + status = !gpio_get_value(host->gpio_cd); 543 543 544 - return !status; 544 + /* 545 + * Use positive logic throughout - status is zero for no card, 546 + * non-zero for card inserted. 547 + */ 548 + return status; 545 549 } 546 550 547 551 static const struct mmc_host_ops mmci_ops = {
+3
drivers/video/cyber2000fb.c
··· 388 388 pseudo_val |= convert_bitfield(red, &var->red); 389 389 pseudo_val |= convert_bitfield(green, &var->green); 390 390 pseudo_val |= convert_bitfield(blue, &var->blue); 391 + ret = 0; 391 392 break; 392 393 } 393 394 ··· 437 436 cyber2000fb_writeb(i | 4, 0x3cf, cfb); 438 437 cyber2000fb_writeb(val, 0x3c6, cfb); 439 438 cyber2000fb_writeb(i, 0x3cf, cfb); 439 + /* prevent card lock-up observed on x86 with CyberPro 2000 */ 440 + cyber2000fb_readb(0x3cf, cfb); 440 441 } 441 442 442 443 static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)