Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

Pull sparc updates from David Miller:

1) Add sparc RAM output to /proc/iomem, from Bob Picco.

2) Allow seeks on /dev/mdesc, from Khalid Aziz.

3) Cleanup sparc64 I/O accessors, from Sam Ravnborg.

4) If update_mmu_cache{,_pmd}() is called with an not-valid mapping, do
not insert it into the TLB miss hash tables otherwise we'll
livelock. Based upon work by Christopher Alexander Tobias Schulze.

5) Fix BREAK detection in sunsab driver when no actual characters are
pending, from Christopher Alexander Tobias Schulze.

6) Because we have modules --> openfirmware --> vmalloc ordering of
virtual memory, the lazy VMAP TLB flusher can cons up an invocation
of flush_tlb_kernel_range() that covers the openfirmware address
range. Unfortunately this will flush out the firmware's locked TLB
mapping which causes all kinds of trouble. Just split up the flush
request if this happens, but in the long term the lazy VMAP flusher
should probably be made a little bit smarter.

Based upon work by Christopher Alexander Tobias Schulze.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next:
sparc64: Fix up merge thinko.
sparc: Add "install" target
arch/sparc/math-emu/math_32.c: drop stray break operator
sparc64: ldc_connect() should not return EINVAL when handshake is in progress.
sparc64: Guard against flushing openfirmware mappings.
sunsab: Fix detection of BREAK on sunsab serial console
bbc-i2c: Fix BBC I2C envctrl on SunBlade 2000
sparc64: Do not insert non-valid PTEs into the TSB hash table.
sparc64: avoid code duplication in io_64.h
sparc64: reorder functions in io_64.h
sparc64: drop unused SLOW_DOWN_IO definitions
sparc64: remove macro indirection in io_64.h
sparc64: update IO access functions in PeeCeeI
sparcspkr: use sbus_*() primitives for IO
sparc: Add support for seek and shorter read to /dev/mdesc
sparc: use %s for unaligned panic
drivers/sbus/char: Micro-optimization in display7seg.c
display7seg: Introduce the use of the managed version of kzalloc
sparc64 - add mem to iomem resource

+476 -350
+3
arch/sparc/Makefile
··· 68 68 image zImage uImage tftpboot.img vmlinux.aout: vmlinux 69 69 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 70 70 71 + install: 72 + $(Q)$(MAKE) $(build)=$(boot) $@ 73 + 71 74 archclean: 72 75 $(Q)$(MAKE) $(clean)=$(boot) 73 76
+4
arch/sparc/boot/Makefile
··· 69 69 $(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE 70 70 $(call if_changed,elftoaout) 71 71 $(call if_changed,piggy) 72 + 73 + install: 74 + sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/zImage \ 75 + System.map "$(INSTALL_PATH)"
+50
arch/sparc/boot/install.sh
··· 1 + #!/bin/sh 2 + # 3 + # This file is subject to the terms and conditions of the GNU General Public 4 + # License. See the file "COPYING" in the main directory of this archive 5 + # for more details. 6 + # 7 + # Copyright (C) 1995 by Linus Torvalds 8 + # 9 + # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin 10 + # 11 + # "make install" script for SPARC architecture 12 + # 13 + # Arguments: 14 + # $1 - kernel version 15 + # $2 - kernel image file 16 + # $3 - kernel map file 17 + # $4 - default install path (blank if root directory) 18 + # 19 + 20 + verify () { 21 + if [ ! -f "$1" ]; then 22 + echo "" 1>&2 23 + echo " *** Missing file: $1" 1>&2 24 + echo ' *** You need to run "make" before "make install".' 1>&2 25 + echo "" 1>&2 26 + exit 1 27 + fi 28 + } 29 + 30 + # Make sure the files actually exist 31 + verify "$2" 32 + verify "$3" 33 + 34 + # User may have a custom install script 35 + 36 + if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 37 + if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 38 + 39 + # Default install - same as make zlilo 40 + 41 + if [ -f $4/vmlinuz ]; then 42 + mv $4/vmlinuz $4/vmlinuz.old 43 + fi 44 + 45 + if [ -f $4/System.map ]; then 46 + mv $4/System.map $4/System.old 47 + fi 48 + 49 + cat $2 > $4/vmlinuz 50 + cp $3 $4/System.map
+194 -285
arch/sparc/include/asm/io_64.h
··· 9 9 #include <asm/asi.h> 10 10 #include <asm-generic/pci_iomap.h> 11 11 12 - /* PC crapola... */ 13 - #define __SLOW_DOWN_IO do { } while (0) 14 - #define SLOW_DOWN_IO do { } while (0) 15 - 16 12 /* BIO layer definitions. */ 17 13 extern unsigned long kern_base, kern_size; 18 14 19 - static inline u8 _inb(unsigned long addr) 15 + /* __raw_{read,write}{b,w,l,q} uses direct access. 16 + * Access the memory as big endian bypassing the cache 17 + * by using ASI_PHYS_BYPASS_EC_E 18 + */ 19 + #define __raw_readb __raw_readb 20 + static inline u8 __raw_readb(const volatile void __iomem *addr) 20 21 { 21 22 u8 ret; 22 23 23 - __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_inb */" 24 + __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */" 24 25 : "=r" (ret) 25 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 26 - : "memory"); 26 + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 27 27 28 28 return ret; 29 29 } 30 30 31 - static inline u16 _inw(unsigned long addr) 31 + #define __raw_readw __raw_readw 32 + static inline u16 __raw_readw(const volatile void __iomem *addr) 32 33 { 33 34 u16 ret; 34 35 35 - __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_inw */" 36 + __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */" 36 37 : "=r" (ret) 37 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 38 - : "memory"); 38 + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 39 39 40 40 return ret; 41 41 } 42 42 43 - static inline u32 _inl(unsigned long addr) 43 + #define __raw_readl __raw_readl 44 + static inline u32 __raw_readl(const volatile void __iomem *addr) 44 45 { 45 46 u32 ret; 46 47 47 - __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_inl */" 48 + __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */" 49 + : "=r" (ret) 50 + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 51 + 52 + return ret; 53 + } 54 + 55 + #define __raw_readq __raw_readq 56 + static inline u64 __raw_readq(const volatile void __iomem *addr) 57 + { 58 + u64 ret; 59 + 60 + __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */" 61 + : "=r" (ret) 62 + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 63 + 64 + return ret; 65 + } 66 + 67 + #define __raw_writeb __raw_writeb 68 + static inline void __raw_writeb(u8 b, const volatile void __iomem *addr) 69 + { 70 + __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */" 71 + : /* no outputs */ 72 + : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 73 + } 74 + 75 + #define __raw_writew __raw_writew 76 + static inline void __raw_writew(u16 w, const volatile void __iomem *addr) 77 + { 78 + __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */" 79 + : /* no outputs */ 80 + : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 81 + } 82 + 83 + #define __raw_writel __raw_writel 84 + static inline void __raw_writel(u32 l, const volatile void __iomem *addr) 85 + { 86 + __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */" 87 + : /* no outputs */ 88 + : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 89 + } 90 + 91 + #define __raw_writeq __raw_writeq 92 + static inline void __raw_writeq(u64 q, const volatile void __iomem *addr) 93 + { 94 + __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */" 95 + : /* no outputs */ 96 + : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 97 + } 98 + 99 + /* Memory functions, same as I/O accesses on Ultra. 100 + * Access memory as little endian bypassing 101 + * the cache by using ASI_PHYS_BYPASS_EC_E_L 102 + */ 103 + #define readb readb 104 + static inline u8 readb(const volatile void __iomem *addr) 105 + { u8 ret; 106 + 107 + __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */" 108 + : "=r" (ret) 109 + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 110 + : "memory"); 111 + return ret; 112 + } 113 + 114 + #define readw readw 115 + static inline u16 readw(const volatile void __iomem *addr) 116 + { u16 ret; 117 + 118 + __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */" 48 119 : "=r" (ret) 49 120 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 50 121 : "memory"); ··· 123 52 return ret; 124 53 } 125 54 126 - static inline void _outb(u8 b, unsigned long addr) 55 + #define readl readl 56 + static inline u32 readl(const volatile void __iomem *addr) 57 + { u32 ret; 58 + 59 + __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */" 60 + : "=r" (ret) 61 + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 62 + : "memory"); 63 + 64 + return ret; 65 + } 66 + 67 + #define readq readq 68 + static inline u64 readq(const volatile void __iomem *addr) 69 + { u64 ret; 70 + 71 + __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */" 72 + : "=r" (ret) 73 + : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 74 + : "memory"); 75 + 76 + return ret; 77 + } 78 + 79 + #define writeb writeb 80 + static inline void writeb(u8 b, volatile void __iomem *addr) 127 81 { 128 - __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */" 82 + __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */" 129 83 : /* no outputs */ 130 84 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 131 85 : "memory"); 132 86 } 133 87 134 - static inline void _outw(u16 w, unsigned long addr) 88 + #define writew writew 89 + static inline void writew(u16 w, volatile void __iomem *addr) 135 90 { 136 - __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */" 91 + __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */" 137 92 : /* no outputs */ 138 93 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 139 94 : "memory"); 140 95 } 141 96 142 - static inline void _outl(u32 l, unsigned long addr) 97 + #define writel writel 98 + static inline void writel(u32 l, volatile void __iomem *addr) 143 99 { 144 - __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */" 100 + __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */" 145 101 : /* no outputs */ 146 102 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 147 103 : "memory"); 148 104 } 149 105 150 - #define inb(__addr) (_inb((unsigned long)(__addr))) 151 - #define inw(__addr) (_inw((unsigned long)(__addr))) 152 - #define inl(__addr) (_inl((unsigned long)(__addr))) 153 - #define outb(__b, __addr) (_outb((u8)(__b), (unsigned long)(__addr))) 154 - #define outw(__w, __addr) (_outw((u16)(__w), (unsigned long)(__addr))) 155 - #define outl(__l, __addr) (_outl((u32)(__l), (unsigned long)(__addr))) 106 + #define writeq writeq 107 + static inline void writeq(u64 q, volatile void __iomem *addr) 108 + { 109 + __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */" 110 + : /* no outputs */ 111 + : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 112 + : "memory"); 113 + } 114 + 115 + 116 + #define inb inb 117 + static inline u8 inb(unsigned long addr) 118 + { 119 + return readb((volatile void __iomem *)addr); 120 + } 121 + 122 + #define inw inw 123 + static inline u16 inw(unsigned long addr) 124 + { 125 + return readw((volatile void __iomem *)addr); 126 + } 127 + 128 + #define inl inl 129 + static inline u32 inl(unsigned long addr) 130 + { 131 + return readl((volatile void __iomem *)addr); 132 + } 133 + 134 + #define outb outb 135 + static inline void outb(u8 b, unsigned long addr) 136 + { 137 + writeb(b, (volatile void __iomem *)addr); 138 + } 139 + 140 + #define outw outw 141 + static inline void outw(u16 w, unsigned long addr) 142 + { 143 + writew(w, (volatile void __iomem *)addr); 144 + } 145 + 146 + #define outl outl 147 + static inline void outl(u32 l, unsigned long addr) 148 + { 149 + writel(l, (volatile void __iomem *)addr); 150 + } 151 + 156 152 157 153 #define inb_p(__addr) inb(__addr) 158 154 #define outb_p(__b, __addr) outb(__b, __addr) ··· 264 126 outsl((unsigned long __force)port, buf, count); 265 127 } 266 128 267 - /* Memory functions, same as I/O accesses on Ultra. */ 268 - static inline u8 _readb(const volatile void __iomem *addr) 269 - { u8 ret; 270 - 271 - __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */" 272 - : "=r" (ret) 273 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 274 - : "memory"); 275 - return ret; 276 - } 277 - 278 - static inline u16 _readw(const volatile void __iomem *addr) 279 - { u16 ret; 280 - 281 - __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */" 282 - : "=r" (ret) 283 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 284 - : "memory"); 285 - 286 - return ret; 287 - } 288 - 289 - static inline u32 _readl(const volatile void __iomem *addr) 290 - { u32 ret; 291 - 292 - __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */" 293 - : "=r" (ret) 294 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 295 - : "memory"); 296 - 297 - return ret; 298 - } 299 - 300 - static inline u64 _readq(const volatile void __iomem *addr) 301 - { u64 ret; 302 - 303 - __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */" 304 - : "=r" (ret) 305 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 306 - : "memory"); 307 - 308 - return ret; 309 - } 310 - 311 - static inline void _writeb(u8 b, volatile void __iomem *addr) 312 - { 313 - __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */" 314 - : /* no outputs */ 315 - : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 316 - : "memory"); 317 - } 318 - 319 - static inline void _writew(u16 w, volatile void __iomem *addr) 320 - { 321 - __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */" 322 - : /* no outputs */ 323 - : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 324 - : "memory"); 325 - } 326 - 327 - static inline void _writel(u32 l, volatile void __iomem *addr) 328 - { 329 - __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */" 330 - : /* no outputs */ 331 - : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 332 - : "memory"); 333 - } 334 - 335 - static inline void _writeq(u64 q, volatile void __iomem *addr) 336 - { 337 - __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */" 338 - : /* no outputs */ 339 - : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) 340 - : "memory"); 341 - } 342 - 343 - #define readb(__addr) _readb(__addr) 344 - #define readw(__addr) _readw(__addr) 345 - #define readl(__addr) _readl(__addr) 346 - #define readq(__addr) _readq(__addr) 347 - #define readb_relaxed(__addr) _readb(__addr) 348 - #define readw_relaxed(__addr) _readw(__addr) 349 - #define readl_relaxed(__addr) _readl(__addr) 350 - #define readq_relaxed(__addr) _readq(__addr) 351 - #define writeb(__b, __addr) _writeb(__b, __addr) 352 - #define writew(__w, __addr) _writew(__w, __addr) 353 - #define writel(__l, __addr) _writel(__l, __addr) 354 - #define writeq(__q, __addr) _writeq(__q, __addr) 355 - 356 - /* Now versions without byte-swapping. */ 357 - static inline u8 _raw_readb(unsigned long addr) 358 - { 359 - u8 ret; 360 - 361 - __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */" 362 - : "=r" (ret) 363 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 364 - 365 - return ret; 366 - } 367 - 368 - static inline u16 _raw_readw(unsigned long addr) 369 - { 370 - u16 ret; 371 - 372 - __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */" 373 - : "=r" (ret) 374 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 375 - 376 - return ret; 377 - } 378 - 379 - static inline u32 _raw_readl(unsigned long addr) 380 - { 381 - u32 ret; 382 - 383 - __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */" 384 - : "=r" (ret) 385 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 386 - 387 - return ret; 388 - } 389 - 390 - static inline u64 _raw_readq(unsigned long addr) 391 - { 392 - u64 ret; 393 - 394 - __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */" 395 - : "=r" (ret) 396 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 397 - 398 - return ret; 399 - } 400 - 401 - static inline void _raw_writeb(u8 b, unsigned long addr) 402 - { 403 - __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */" 404 - : /* no outputs */ 405 - : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 406 - } 407 - 408 - static inline void _raw_writew(u16 w, unsigned long addr) 409 - { 410 - __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */" 411 - : /* no outputs */ 412 - : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 413 - } 414 - 415 - static inline void _raw_writel(u32 l, unsigned long addr) 416 - { 417 - __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */" 418 - : /* no outputs */ 419 - : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 420 - } 421 - 422 - static inline void _raw_writeq(u64 q, unsigned long addr) 423 - { 424 - __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */" 425 - : /* no outputs */ 426 - : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); 427 - } 428 - 429 - #define __raw_readb(__addr) (_raw_readb((unsigned long)(__addr))) 430 - #define __raw_readw(__addr) (_raw_readw((unsigned long)(__addr))) 431 - #define __raw_readl(__addr) (_raw_readl((unsigned long)(__addr))) 432 - #define __raw_readq(__addr) (_raw_readq((unsigned long)(__addr))) 433 - #define __raw_writeb(__b, __addr) (_raw_writeb((u8)(__b), (unsigned long)(__addr))) 434 - #define __raw_writew(__w, __addr) (_raw_writew((u16)(__w), (unsigned long)(__addr))) 435 - #define __raw_writel(__l, __addr) (_raw_writel((u32)(__l), (unsigned long)(__addr))) 436 - #define __raw_writeq(__q, __addr) (_raw_writeq((u64)(__q), (unsigned long)(__addr))) 129 + #define readb_relaxed(__addr) readb(__addr) 130 + #define readw_relaxed(__addr) readw(__addr) 131 + #define readl_relaxed(__addr) readl(__addr) 132 + #define readq_relaxed(__addr) readq(__addr) 437 133 438 134 /* Valid I/O Space regions are anywhere, because each PCI bus supported 439 135 * can live in an arbitrary area of the physical address range. ··· 277 305 /* Now, SBUS variants, only difference from PCI is that we do 278 306 * not use little-endian ASIs. 279 307 */ 280 - static inline u8 _sbus_readb(const volatile void __iomem *addr) 308 + static inline u8 sbus_readb(const volatile void __iomem *addr) 281 309 { 282 - u8 ret; 283 - 284 - __asm__ __volatile__("lduba\t[%1] %2, %0\t/* sbus_readb */" 285 - : "=r" (ret) 286 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) 287 - : "memory"); 288 - 289 - return ret; 310 + return __raw_readb(addr); 290 311 } 291 312 292 - static inline u16 _sbus_readw(const volatile void __iomem *addr) 313 + static inline u16 sbus_readw(const volatile void __iomem *addr) 293 314 { 294 - u16 ret; 295 - 296 - __asm__ __volatile__("lduha\t[%1] %2, %0\t/* sbus_readw */" 297 - : "=r" (ret) 298 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) 299 - : "memory"); 300 - 301 - return ret; 315 + return __raw_readw(addr); 302 316 } 303 317 304 - static inline u32 _sbus_readl(const volatile void __iomem *addr) 318 + static inline u32 sbus_readl(const volatile void __iomem *addr) 305 319 { 306 - u32 ret; 307 - 308 - __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* sbus_readl */" 309 - : "=r" (ret) 310 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) 311 - : "memory"); 312 - 313 - return ret; 320 + return __raw_readl(addr); 314 321 } 315 322 316 - static inline u64 _sbus_readq(const volatile void __iomem *addr) 323 + static inline u64 sbus_readq(const volatile void __iomem *addr) 317 324 { 318 - u64 ret; 319 - 320 - __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* sbus_readq */" 321 - : "=r" (ret) 322 - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) 323 - : "memory"); 324 - 325 - return ret; 325 + return __raw_readq(addr); 326 326 } 327 327 328 - static inline void _sbus_writeb(u8 b, volatile void __iomem *addr) 328 + static inline void sbus_writeb(u8 b, volatile void __iomem *addr) 329 329 { 330 - __asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */" 331 - : /* no outputs */ 332 - : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) 333 - : "memory"); 330 + __raw_writeb(b, addr); 334 331 } 335 332 336 - static inline void _sbus_writew(u16 w, volatile void __iomem *addr) 333 + static inline void sbus_writew(u16 w, volatile void __iomem *addr) 337 334 { 338 - __asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */" 339 - : /* no outputs */ 340 - : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) 341 - : "memory"); 335 + __raw_writew(w, addr); 342 336 } 343 337 344 - static inline void _sbus_writel(u32 l, volatile void __iomem *addr) 338 + static inline void sbus_writel(u32 l, volatile void __iomem *addr) 345 339 { 346 - __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */" 347 - : /* no outputs */ 348 - : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) 349 - : "memory"); 340 + __raw_writel(l, addr); 350 341 } 351 342 352 - static inline void _sbus_writeq(u64 l, volatile void __iomem *addr) 343 + static inline void sbus_writeq(u64 q, volatile void __iomem *addr) 353 344 { 354 - __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */" 355 - : /* no outputs */ 356 - : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) 357 - : "memory"); 345 + __raw_writeq(q, addr); 358 346 } 359 347 360 - #define sbus_readb(__addr) _sbus_readb(__addr) 361 - #define sbus_readw(__addr) _sbus_readw(__addr) 362 - #define sbus_readl(__addr) _sbus_readl(__addr) 363 - #define sbus_readq(__addr) _sbus_readq(__addr) 364 - #define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr) 365 - #define sbus_writew(__w, __addr) _sbus_writew(__w, __addr) 366 - #define sbus_writel(__l, __addr) _sbus_writel(__l, __addr) 367 - #define sbus_writeq(__l, __addr) _sbus_writeq(__l, __addr) 368 - 369 - static inline void _sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) 348 + static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) 370 349 { 371 350 while(n--) { 372 351 sbus_writeb(c, dst); ··· 325 402 } 326 403 } 327 404 328 - #define sbus_memset_io(d,c,sz) _sbus_memset_io(d,c,sz) 329 - 330 - static inline void 331 - _memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) 405 + static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n) 332 406 { 333 407 volatile void __iomem *d = dst; 334 408 ··· 335 415 } 336 416 } 337 417 338 - #define memset_io(d,c,sz) _memset_io(d,c,sz) 339 - 340 - static inline void 341 - _sbus_memcpy_fromio(void *dst, const volatile void __iomem *src, 342 - __kernel_size_t n) 418 + static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src, 419 + __kernel_size_t n) 343 420 { 344 421 char *d = dst; 345 422 ··· 347 430 } 348 431 } 349 432 350 - #define sbus_memcpy_fromio(d, s, sz) _sbus_memcpy_fromio(d, s, sz) 351 433 352 - static inline void 353 - _memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n) 434 + static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, 435 + __kernel_size_t n) 354 436 { 355 437 char *d = dst; 356 438 ··· 360 444 } 361 445 } 362 446 363 - #define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz) 364 - 365 - static inline void 366 - _sbus_memcpy_toio(volatile void __iomem *dst, const void *src, 367 - __kernel_size_t n) 447 + static inline void sbus_memcpy_toio(volatile void __iomem *dst, const void *src, 448 + __kernel_size_t n) 368 449 { 369 450 const char *s = src; 370 451 volatile void __iomem *d = dst; ··· 373 460 } 374 461 } 375 462 376 - #define sbus_memcpy_toio(d, s, sz) _sbus_memcpy_toio(d, s, sz) 377 - 378 - static inline void 379 - _memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n) 463 + static inline void memcpy_toio(volatile void __iomem *dst, const void *src, 464 + __kernel_size_t n) 380 465 { 381 466 const char *s = src; 382 467 volatile void __iomem *d = dst; ··· 385 474 d++; 386 475 } 387 476 } 388 - 389 - #define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz) 390 477 391 478 #define mmiowb() 392 479
+2 -10
arch/sparc/include/asm/tlbflush_64.h
··· 34 34 { 35 35 } 36 36 37 + void flush_tlb_kernel_range(unsigned long start, unsigned long end); 38 + 37 39 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 38 40 39 41 void flush_tlb_pending(void); ··· 50 48 51 49 #ifndef CONFIG_SMP 52 50 53 - #define flush_tlb_kernel_range(start,end) \ 54 - do { flush_tsb_kernel_range(start,end); \ 55 - __flush_tlb_kernel_range(start,end); \ 56 - } while (0) 57 - 58 51 static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) 59 52 { 60 53 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); ··· 59 62 60 63 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); 61 64 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); 62 - 63 - #define flush_tlb_kernel_range(start, end) \ 64 - do { flush_tsb_kernel_range(start,end); \ 65 - smp_flush_tlb_kernel_range(start, end); \ 66 - } while (0) 67 65 68 66 #define global_flush_tlb_page(mm, vaddr) \ 69 67 smp_flush_tlb_page(mm, vaddr)
+1 -1
arch/sparc/kernel/ldc.c
··· 1336 1336 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || 1337 1337 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || 1338 1338 lp->hs_state != LDC_HS_OPEN) 1339 - err = -EINVAL; 1339 + err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL); 1340 1340 else 1341 1341 err = start_handshake(lp); 1342 1342
+69 -13
arch/sparc/kernel/mdesc.c
··· 906 906 smp_fill_in_sib_core_maps(); 907 907 } 908 908 909 - static ssize_t mdesc_read(struct file *file, char __user *buf, 910 - size_t len, loff_t *offp) 909 + /* mdesc_open() - Grab a reference to mdesc_handle when /dev/mdesc is 910 + * opened. Hold this reference until /dev/mdesc is closed to ensure 911 + * mdesc data structure is not released underneath us. Store the 912 + * pointer to mdesc structure in private_data for read and seek to use 913 + */ 914 + static int mdesc_open(struct inode *inode, struct file *file) 911 915 { 912 916 struct mdesc_handle *hp = mdesc_grab(); 913 - int err; 914 917 915 918 if (!hp) 916 919 return -ENODEV; 917 920 918 - err = hp->handle_size; 919 - if (len < hp->handle_size) 920 - err = -EMSGSIZE; 921 - else if (copy_to_user(buf, &hp->mdesc, hp->handle_size)) 922 - err = -EFAULT; 923 - mdesc_release(hp); 921 + file->private_data = hp; 924 922 925 - return err; 923 + return 0; 924 + } 925 + 926 + static ssize_t mdesc_read(struct file *file, char __user *buf, 927 + size_t len, loff_t *offp) 928 + { 929 + struct mdesc_handle *hp = file->private_data; 930 + unsigned char *mdesc; 931 + int bytes_left, count = len; 932 + 933 + if (*offp >= hp->handle_size) 934 + return 0; 935 + 936 + bytes_left = hp->handle_size - *offp; 937 + if (count > bytes_left) 938 + count = bytes_left; 939 + 940 + mdesc = (unsigned char *)&hp->mdesc; 941 + mdesc += *offp; 942 + if (!copy_to_user(buf, mdesc, count)) { 943 + *offp += count; 944 + return count; 945 + } else { 946 + return -EFAULT; 947 + } 948 + } 949 + 950 + static loff_t mdesc_llseek(struct file *file, loff_t offset, int whence) 951 + { 952 + struct mdesc_handle *hp; 953 + 954 + switch (whence) { 955 + case SEEK_CUR: 956 + offset += file->f_pos; 957 + break; 958 + case SEEK_SET: 959 + break; 960 + default: 961 + return -EINVAL; 962 + } 963 + 964 + hp = file->private_data; 965 + if (offset > hp->handle_size) 966 + return -EINVAL; 967 + else 968 + file->f_pos = offset; 969 + 970 + return offset; 971 + } 972 + 973 + /* mdesc_close() - /dev/mdesc is being closed, release the reference to 974 + * mdesc structure. 975 + */ 976 + static int mdesc_close(struct inode *inode, struct file *file) 977 + { 978 + mdesc_release(file->private_data); 979 + return 0; 926 980 } 927 981 928 982 static const struct file_operations mdesc_fops = { 929 - .read = mdesc_read, 930 - .owner = THIS_MODULE, 931 - .llseek = noop_llseek, 983 + .open = mdesc_open, 984 + .read = mdesc_read, 985 + .llseek = mdesc_llseek, 986 + .release = mdesc_close, 987 + .owner = THIS_MODULE, 932 988 }; 933 989 934 990 static struct miscdevice mdesc_misc = {
+1 -1
arch/sparc/kernel/unaligned_32.c
··· 166 166 /* This is just to make gcc think panic does return... */ 167 167 static void unaligned_panic(char *str) 168 168 { 169 - panic(str); 169 + panic("%s", str); 170 170 } 171 171 172 172 /* una_asm.S */
+18 -18
arch/sparc/lib/PeeCeeI.c
··· 15 15 const u8 *p = src; 16 16 17 17 while (count--) 18 - outb(*p++, addr); 18 + __raw_writeb(*p++, addr); 19 19 } 20 20 EXPORT_SYMBOL(outsb); 21 21 ··· 93 93 u8 *pb = dst; 94 94 95 95 while ((((unsigned long)pb) & 0x3) && count--) 96 - *pb++ = inb(addr); 96 + *pb++ = __raw_readb(addr); 97 97 pi = (u32 *)pb; 98 98 while (count >= 4) { 99 99 u32 w; 100 100 101 - w = (inb(addr) << 24); 102 - w |= (inb(addr) << 16); 103 - w |= (inb(addr) << 8); 104 - w |= (inb(addr) << 0); 101 + w = (__raw_readb(addr) << 24); 102 + w |= (__raw_readb(addr) << 16); 103 + w |= (__raw_readb(addr) << 8); 104 + w |= (__raw_readb(addr) << 0); 105 105 *pi++ = w; 106 106 count -= 4; 107 107 } 108 108 pb = (u8 *)pi; 109 109 while (count--) 110 - *pb++ = inb(addr); 110 + *pb++ = __raw_readb(addr); 111 111 } 112 112 } 113 113 EXPORT_SYMBOL(insb); ··· 121 121 u32 *pi; 122 122 123 123 if (((unsigned long)ps) & 0x2) { 124 - *ps++ = le16_to_cpu(inw(addr)); 124 + *ps++ = __raw_readw(addr); 125 125 count--; 126 126 } 127 127 pi = (u32 *)ps; 128 128 while (count >= 2) { 129 129 u32 w; 130 130 131 - w = (le16_to_cpu(inw(addr)) << 16); 132 - w |= (le16_to_cpu(inw(addr)) << 0); 131 + w = __raw_readw(addr) << 16; 132 + w |= __raw_readw(addr) << 0; 133 133 *pi++ = w; 134 134 count -= 2; 135 135 } 136 136 ps = (u16 *)pi; 137 137 if (count) 138 - *ps = le16_to_cpu(inw(addr)); 138 + *ps = __raw_readw(addr); 139 139 } 140 140 } 141 141 EXPORT_SYMBOL(insw); ··· 148 148 if ((((unsigned long)dst) & 0x3) == 0) { 149 149 u32 *pi = dst; 150 150 while (count--) 151 - *pi++ = le32_to_cpu(inl(addr)); 151 + *pi++ = __raw_readl(addr); 152 152 } else { 153 153 u32 l = 0, l2, *pi; 154 154 u16 *ps; ··· 158 158 case 0x2: 159 159 ps = dst; 160 160 count -= 1; 161 - l = le32_to_cpu(inl(addr)); 161 + l = __raw_readl(addr); 162 162 *ps++ = l; 163 163 pi = (u32 *)ps; 164 164 while (count--) { 165 - l2 = le32_to_cpu(inl(addr)); 165 + l2 = __raw_readl(addr); 166 166 *pi++ = (l << 16) | (l2 >> 16); 167 167 l = l2; 168 168 } ··· 173 173 case 0x1: 174 174 pb = dst; 175 175 count -= 1; 176 - l = le32_to_cpu(inl(addr)); 176 + l = __raw_readl(addr); 177 177 *pb++ = l >> 24; 178 178 ps = (u16 *)pb; 179 179 *ps++ = ((l >> 8) & 0xffff); 180 180 pi = (u32 *)ps; 181 181 while (count--) { 182 - l2 = le32_to_cpu(inl(addr)); 182 + l2 = __raw_readl(addr); 183 183 *pi++ = (l << 24) | (l2 >> 8); 184 184 l = l2; 185 185 } ··· 190 190 case 0x3: 191 191 pb = (u8 *)dst; 192 192 count -= 1; 193 - l = le32_to_cpu(inl(addr)); 193 + l = __raw_readl(addr); 194 194 *pb++ = l >> 24; 195 195 pi = (u32 *)pb; 196 196 while (count--) { 197 - l2 = le32_to_cpu(inl(addr)); 197 + l2 = __raw_readl(addr); 198 198 *pi++ = (l << 8) | (l2 >> 24); 199 199 l = l2; 200 200 }
+1 -1
arch/sparc/math-emu/math_32.c
··· 499 499 case 0: fsr = *pfsr; 500 500 if (IR == -1) IR = 2; 501 501 /* fcc is always fcc0 */ 502 - fsr &= ~0xc00; fsr |= (IR << 10); break; 502 + fsr &= ~0xc00; fsr |= (IR << 10); 503 503 *pfsr = fsr; 504 504 break; 505 505 case 1: rd->s = IR; break;
+96
arch/sparc/mm/init_64.c
··· 22 22 #include <linux/kprobes.h> 23 23 #include <linux/cache.h> 24 24 #include <linux/sort.h> 25 + #include <linux/ioport.h> 25 26 #include <linux/percpu.h> 26 27 #include <linux/memblock.h> 27 28 #include <linux/mmzone.h> ··· 351 350 } 352 351 353 352 mm = vma->vm_mm; 353 + 354 + /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ 355 + if (!pte_accessible(mm, pte)) 356 + return; 354 357 355 358 spin_lock_irqsave(&mm->context.lock, flags); 356 359 ··· 2624 2619 2625 2620 pte = pmd_val(entry); 2626 2621 2622 + /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ 2623 + if (!(pte & _PAGE_VALID)) 2624 + return; 2625 + 2627 2626 /* We are fabricating 8MB pages using 4MB real hw pages. */ 2628 2627 pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); 2629 2628 ··· 2708 2699 } 2709 2700 } 2710 2701 #endif 2702 + 2703 + static struct resource code_resource = { 2704 + .name = "Kernel code", 2705 + .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2706 + }; 2707 + 2708 + static struct resource data_resource = { 2709 + .name = "Kernel data", 2710 + .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2711 + }; 2712 + 2713 + static struct resource bss_resource = { 2714 + .name = "Kernel bss", 2715 + .flags = IORESOURCE_BUSY | IORESOURCE_MEM 2716 + }; 2717 + 2718 + static inline resource_size_t compute_kern_paddr(void *addr) 2719 + { 2720 + return (resource_size_t) (addr - KERNBASE + kern_base); 2721 + } 2722 + 2723 + static void __init kernel_lds_init(void) 2724 + { 2725 + code_resource.start = compute_kern_paddr(_text); 2726 + code_resource.end = compute_kern_paddr(_etext - 1); 2727 + data_resource.start = compute_kern_paddr(_etext); 2728 + data_resource.end = compute_kern_paddr(_edata - 1); 2729 + bss_resource.start = compute_kern_paddr(__bss_start); 2730 + bss_resource.end = compute_kern_paddr(_end - 1); 2731 + } 2732 + 2733 + static int __init report_memory(void) 2734 + { 2735 + int i; 2736 + struct resource *res; 2737 + 2738 + kernel_lds_init(); 2739 + 2740 + for (i = 0; i < pavail_ents; i++) { 2741 + res = kzalloc(sizeof(struct resource), GFP_KERNEL); 2742 + 2743 + if (!res) { 2744 + pr_warn("Failed to allocate source.\n"); 2745 + break; 2746 + } 2747 + 2748 + res->name = "System RAM"; 2749 + res->start = pavail[i].phys_addr; 2750 + res->end = pavail[i].phys_addr + pavail[i].reg_size - 1; 2751 + res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 2752 + 2753 + if (insert_resource(&iomem_resource, res) < 0) { 2754 + pr_warn("Resource insertion failed.\n"); 2755 + break; 2756 + } 2757 + 2758 + insert_resource(res, &code_resource); 2759 + insert_resource(res, &data_resource); 2760 + insert_resource(res, &bss_resource); 2761 + } 2762 + 2763 + return 0; 2764 + } 2765 + device_initcall(report_memory); 2766 + 2767 + #ifdef CONFIG_SMP 2768 + #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range 2769 + #else 2770 + #define do_flush_tlb_kernel_range __flush_tlb_kernel_range 2771 + #endif 2772 + 2773 + void flush_tlb_kernel_range(unsigned long start, unsigned long end) 2774 + { 2775 + if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { 2776 + if (start < LOW_OBP_ADDRESS) { 2777 + flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); 2778 + do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); 2779 + } 2780 + if (end > HI_OBP_ADDRESS) { 2781 + flush_tsb_kernel_range(end, HI_OBP_ADDRESS); 2782 + do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS); 2783 + } 2784 + } else { 2785 + flush_tsb_kernel_range(start, end); 2786 + do_flush_tlb_kernel_range(start, end); 2787 + } 2788 + }
+11 -11
drivers/input/misc/sparcspkr.c
··· 86 86 spin_lock_irqsave(&state->lock, flags); 87 87 88 88 if (count) { 89 - outb(0x01, info->regs + 0); 90 - outb(0x00, info->regs + 2); 91 - outb((count >> 16) & 0xff, info->regs + 3); 92 - outb((count >> 8) & 0xff, info->regs + 4); 93 - outb(0x00, info->regs + 5); 89 + sbus_writeb(0x01, info->regs + 0); 90 + sbus_writeb(0x00, info->regs + 2); 91 + sbus_writeb((count >> 16) & 0xff, info->regs + 3); 92 + sbus_writeb((count >> 8) & 0xff, info->regs + 4); 93 + sbus_writeb(0x00, info->regs + 5); 94 94 } else { 95 - outb(0x00, info->regs + 0); 95 + sbus_writeb(0x00, info->regs + 0); 96 96 } 97 97 98 98 spin_unlock_irqrestore(&state->lock, flags); ··· 123 123 124 124 if (count) { 125 125 /* enable counter 2 */ 126 - outb(inb(info->enable_reg) | 3, info->enable_reg); 126 + sbus_writeb(sbus_readb(info->enable_reg) | 3, info->enable_reg); 127 127 /* set command for counter 2, 2 byte write */ 128 - outb(0xB6, info->freq_regs + 1); 128 + sbus_writeb(0xB6, info->freq_regs + 1); 129 129 /* select desired HZ */ 130 - outb(count & 0xff, info->freq_regs + 0); 131 - outb((count >> 8) & 0xff, info->freq_regs + 0); 130 + sbus_writeb(count & 0xff, info->freq_regs + 0); 131 + sbus_writeb((count >> 8) & 0xff, info->freq_regs + 0); 132 132 } else { 133 133 /* disable counter 2 */ 134 - outb(inb_p(info->enable_reg) & 0xFC, info->enable_reg); 134 + sbus_writeb(sbus_readb(info->enable_reg) & 0xFC, info->enable_reg); 135 135 } 136 136 137 137 spin_unlock_irqrestore(&state->lock, flags);
+6
drivers/sbus/char/bbc_envctrl.c
··· 452 452 if (!tp) 453 453 return; 454 454 455 + INIT_LIST_HEAD(&tp->bp_list); 456 + INIT_LIST_HEAD(&tp->glob_list); 457 + 455 458 tp->client = bbc_i2c_attach(bp, op); 456 459 if (!tp->client) { 457 460 kfree(tp); ··· 499 496 fp = kzalloc(sizeof(*fp), GFP_KERNEL); 500 497 if (!fp) 501 498 return; 499 + 500 + INIT_LIST_HEAD(&fp->bp_list); 501 + INIT_LIST_HEAD(&fp->glob_list); 502 502 503 503 fp->client = bbc_i2c_attach(bp, op); 504 504 if (!fp->client) {
+8 -3
drivers/sbus/char/bbc_i2c.c
··· 300 300 if (!bp) 301 301 return NULL; 302 302 303 + INIT_LIST_HEAD(&bp->temps); 304 + INIT_LIST_HEAD(&bp->fans); 305 + 303 306 bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); 304 307 if (!bp->i2c_control_regs) 305 308 goto fail; 306 309 307 - bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); 308 - if (!bp->i2c_bussel_reg) 309 - goto fail; 310 + if (op->num_resources == 2) { 311 + bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); 312 + if (!bp->i2c_bussel_reg) 313 + goto fail; 314 + } 310 315 311 316 bp->waiting = 0; 312 317 init_waitqueue_head(&bp->wq);
+3 -7
drivers/sbus/char/display7seg.c
··· 4 4 * Copyright (c) 2000 Eric Brower (ebrower@usa.net) 5 5 */ 6 6 7 + #include <linux/device.h> 7 8 #include <linux/kernel.h> 8 9 #include <linux/module.h> 9 10 #include <linux/fs.h> ··· 144 143 145 144 case D7SIOCTM: 146 145 /* toggle device mode-- flip display orientation */ 147 - if (regs & D7S_FLIP) 148 - regs &= ~D7S_FLIP; 149 - else 150 - regs |= D7S_FLIP; 146 + regs ^= D7S_FLIP; 151 147 writeb(regs, p->regs); 152 148 break; 153 149 } ··· 178 180 if (d7s_device) 179 181 goto out; 180 182 181 - p = kzalloc(sizeof(*p), GFP_KERNEL); 183 + p = devm_kzalloc(&op->dev, sizeof(*p), GFP_KERNEL); 182 184 err = -ENOMEM; 183 185 if (!p) 184 186 goto out; ··· 229 231 of_iounmap(&op->resource[0], p->regs, sizeof(u8)); 230 232 231 233 out_free: 232 - kfree(p); 233 234 goto out; 234 235 } 235 236 ··· 248 251 249 252 misc_deregister(&d7s_miscdev); 250 253 of_iounmap(&op->resource[0], p->regs, sizeof(u8)); 251 - kfree(p); 252 254 253 255 return 0; 254 256 }
+9
drivers/tty/serial/sunsab.c
··· 157 157 (up->port.line == up->port.cons->index)) 158 158 saw_console_brk = 1; 159 159 160 + if (count == 0) { 161 + if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) { 162 + stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR | 163 + SAB82532_ISR0_FERR); 164 + up->port.icount.brk++; 165 + uart_handle_break(&up->port); 166 + } 167 + } 168 + 160 169 for (i = 0; i < count; i++) { 161 170 unsigned char ch = buf[i], flag; 162 171