Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib: remove fastcall from lib/*

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Harvey Harrison and committed by
Linus Torvalds
9f741cb8 7ad5b3a5

+28 -28
+16 -16
lib/iomap.c
··· 69 69 #define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr)) 70 70 #endif 71 71 72 - unsigned int fastcall ioread8(void __iomem *addr) 72 + unsigned int ioread8(void __iomem *addr) 73 73 { 74 74 IO_COND(addr, return inb(port), return readb(addr)); 75 75 return 0xff; 76 76 } 77 - unsigned int fastcall ioread16(void __iomem *addr) 77 + unsigned int ioread16(void __iomem *addr) 78 78 { 79 79 IO_COND(addr, return inw(port), return readw(addr)); 80 80 return 0xffff; 81 81 } 82 - unsigned int fastcall ioread16be(void __iomem *addr) 82 + unsigned int ioread16be(void __iomem *addr) 83 83 { 84 84 IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr)); 85 85 return 0xffff; 86 86 } 87 - unsigned int fastcall ioread32(void __iomem *addr) 87 + unsigned int ioread32(void __iomem *addr) 88 88 { 89 89 IO_COND(addr, return inl(port), return readl(addr)); 90 90 return 0xffffffff; 91 91 } 92 - unsigned int fastcall ioread32be(void __iomem *addr) 92 + unsigned int ioread32be(void __iomem *addr) 93 93 { 94 94 IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr)); 95 95 return 0xffffffff; ··· 110 110 #define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port) 111 111 #endif 112 112 113 - void fastcall iowrite8(u8 val, void __iomem *addr) 113 + void iowrite8(u8 val, void __iomem *addr) 114 114 { 115 115 IO_COND(addr, outb(val,port), writeb(val, addr)); 116 116 } 117 - void fastcall iowrite16(u16 val, void __iomem *addr) 117 + void iowrite16(u16 val, void __iomem *addr) 118 118 { 119 119 IO_COND(addr, outw(val,port), writew(val, addr)); 120 120 } 121 - void fastcall iowrite16be(u16 val, void __iomem *addr) 121 + void iowrite16be(u16 val, void __iomem *addr) 122 122 { 123 123 IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr)); 124 124 } 125 - void fastcall iowrite32(u32 val, void __iomem *addr) 125 + void iowrite32(u32 val, void __iomem *addr) 126 126 { 127 127 IO_COND(addr, outl(val,port), writel(val, addr)); 128 128 } 129 - void fastcall iowrite32be(u32 val, void __iomem *addr) 129 + void iowrite32be(u32 val, void __iomem *addr) 130 130 { 131 131 IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr)); 132 132 } ··· 193 193 } 194 194 #endif 195 195 196 - void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count) 196 + void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) 197 197 { 198 198 IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count)); 199 199 } 200 - void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count) 200 + void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) 201 201 { 202 202 IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count)); 203 203 } 204 - void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count) 204 + void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) 205 205 { 206 206 IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count)); 207 207 } ··· 209 209 EXPORT_SYMBOL(ioread16_rep); 210 210 EXPORT_SYMBOL(ioread32_rep); 211 211 212 - void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) 212 + void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) 213 213 { 214 214 IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count)); 215 215 } 216 - void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) 216 + void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) 217 217 { 218 218 IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count)); 219 219 } 220 - void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) 220 + void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) 221 221 { 222 222 IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count)); 223 223 }
+8 -8
lib/rwsem-spinlock.c
··· 125 125 /* 126 126 * get a read lock on the semaphore 127 127 */ 128 - void fastcall __sched __down_read(struct rw_semaphore *sem) 128 + void __sched __down_read(struct rw_semaphore *sem) 129 129 { 130 130 struct rwsem_waiter waiter; 131 131 struct task_struct *tsk; ··· 168 168 /* 169 169 * trylock for reading -- returns 1 if successful, 0 if contention 170 170 */ 171 - int fastcall __down_read_trylock(struct rw_semaphore *sem) 171 + int __down_read_trylock(struct rw_semaphore *sem) 172 172 { 173 173 unsigned long flags; 174 174 int ret = 0; ··· 191 191 * get a write lock on the semaphore 192 192 * - we increment the waiting count anyway to indicate an exclusive lock 193 193 */ 194 - void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass) 194 + void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) 195 195 { 196 196 struct rwsem_waiter waiter; 197 197 struct task_struct *tsk; ··· 231 231 ; 232 232 } 233 233 234 - void fastcall __sched __down_write(struct rw_semaphore *sem) 234 + void __sched __down_write(struct rw_semaphore *sem) 235 235 { 236 236 __down_write_nested(sem, 0); 237 237 } ··· 239 239 /* 240 240 * trylock for writing -- returns 1 if successful, 0 if contention 241 241 */ 242 - int fastcall __down_write_trylock(struct rw_semaphore *sem) 242 + int __down_write_trylock(struct rw_semaphore *sem) 243 243 { 244 244 unsigned long flags; 245 245 int ret = 0; ··· 260 260 /* 261 261 * release a read lock on the semaphore 262 262 */ 263 - void fastcall __up_read(struct rw_semaphore *sem) 263 + void __up_read(struct rw_semaphore *sem) 264 264 { 265 265 unsigned long flags; 266 266 ··· 275 275 /* 276 276 * release a write lock on the semaphore 277 277 */ 278 - void fastcall __up_write(struct rw_semaphore *sem) 278 + void __up_write(struct rw_semaphore *sem) 279 279 { 280 280 unsigned long flags; 281 281 ··· 292 292 * downgrade a write lock into a read lock 293 293 * - just wake up any readers at the front of the queue 294 294 */ 295 - void fastcall __downgrade_write(struct rw_semaphore *sem) 295 + void __downgrade_write(struct rw_semaphore *sem) 296 296 { 297 297 unsigned long flags; 298 298
+4 -4
lib/semaphore-sleepers.c
··· 48 48 * we cannot lose wakeup events. 49 49 */ 50 50 51 - fastcall void __up(struct semaphore *sem) 51 + void __up(struct semaphore *sem) 52 52 { 53 53 wake_up(&sem->wait); 54 54 } 55 55 56 - fastcall void __sched __down(struct semaphore * sem) 56 + void __sched __down(struct semaphore *sem) 57 57 { 58 58 struct task_struct *tsk = current; 59 59 DECLARE_WAITQUEUE(wait, tsk); ··· 90 90 tsk->state = TASK_RUNNING; 91 91 } 92 92 93 - fastcall int __sched __down_interruptible(struct semaphore * sem) 93 + int __sched __down_interruptible(struct semaphore *sem) 94 94 { 95 95 int retval = 0; 96 96 struct task_struct *tsk = current; ··· 153 153 * single "cmpxchg" without failure cases, 154 154 * but then it wouldn't work on a 386. 155 155 */ 156 - fastcall int __down_trylock(struct semaphore * sem) 156 + int __down_trylock(struct semaphore *sem) 157 157 { 158 158 int sleepers; 159 159 unsigned long flags;