at v2.6.21 299 lines 7.2 kB view raw
1#ifndef _M68KNOMMU_BITOPS_H 2#define _M68KNOMMU_BITOPS_H 3 4/* 5 * Copyright 1992, Linus Torvalds. 6 */ 7 8#include <linux/compiler.h> 9#include <asm/byteorder.h> /* swab32 */ 10 11#ifdef __KERNEL__ 12 13#include <asm-generic/bitops/ffs.h> 14#include <asm-generic/bitops/__ffs.h> 15#include <asm-generic/bitops/sched.h> 16#include <asm-generic/bitops/ffz.h> 17 18static __inline__ void set_bit(int nr, volatile unsigned long * addr) 19{ 20#ifdef CONFIG_COLDFIRE 21 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)" 22 : "+m" (((volatile char *)addr)[(nr^31) >> 3]) 23 : "d" (nr) 24 : "%a0", "cc"); 25#else 26 __asm__ __volatile__ ("bset %1,%0" 27 : "+m" (((volatile char *)addr)[(nr^31) >> 3]) 28 : "di" (nr) 29 : "cc"); 30#endif 31} 32 33#define __set_bit(nr, addr) set_bit(nr, addr) 34 35/* 36 * clear_bit() doesn't provide any barrier for the compiler. 37 */ 38#define smp_mb__before_clear_bit() barrier() 39#define smp_mb__after_clear_bit() barrier() 40 41static __inline__ void clear_bit(int nr, volatile unsigned long * addr) 42{ 43#ifdef CONFIG_COLDFIRE 44 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)" 45 : "+m" (((volatile char *)addr)[(nr^31) >> 3]) 46 : "d" (nr) 47 : "%a0", "cc"); 48#else 49 __asm__ __volatile__ ("bclr %1,%0" 50 : "+m" (((volatile char *)addr)[(nr^31) >> 3]) 51 : "di" (nr) 52 : "cc"); 53#endif 54} 55 56#define __clear_bit(nr, addr) clear_bit(nr, addr) 57 58static __inline__ void change_bit(int nr, volatile unsigned long * addr) 59{ 60#ifdef CONFIG_COLDFIRE 61 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)" 62 : "+m" (((volatile char *)addr)[(nr^31) >> 3]) 63 : "d" (nr) 64 : "%a0", "cc"); 65#else 66 __asm__ __volatile__ ("bchg %1,%0" 67 : "+m" (((volatile char *)addr)[(nr^31) >> 3]) 68 : "di" (nr) 69 : "cc"); 70#endif 71} 72 73#define __change_bit(nr, addr) change_bit(nr, addr) 74 75static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) 76{ 77 char retval; 78 79#ifdef CONFIG_COLDFIRE 80 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" 81 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) 82 : "d" (nr) 83 : "%a0"); 84#else 85 __asm__ __volatile__ ("bset %2,%1; sne %0" 86 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) 87 : "di" (nr) 88 /* No clobber */); 89#endif 90 91 return retval; 92} 93 94#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr) 95 96static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) 97{ 98 char retval; 99 100#ifdef CONFIG_COLDFIRE 101 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" 102 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) 103 : "d" (nr) 104 : "%a0"); 105#else 106 __asm__ __volatile__ ("bclr %2,%1; sne %0" 107 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) 108 : "di" (nr) 109 /* No clobber */); 110#endif 111 112 return retval; 113} 114 115#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr) 116 117static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) 118{ 119 char retval; 120 121#ifdef CONFIG_COLDFIRE 122 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0" 123 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) 124 : "d" (nr) 125 : "%a0"); 126#else 127 __asm__ __volatile__ ("bchg %2,%1; sne %0" 128 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3]) 129 : "di" (nr) 130 /* No clobber */); 131#endif 132 133 return retval; 134} 135 136#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr) 137 138/* 139 * This routine doesn't need to be atomic. 140 */ 141static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr) 142{ 143 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 144} 145 146static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) 147{ 148 int * a = (int *) addr; 149 int mask; 150 151 a += nr >> 5; 152 mask = 1 << (nr & 0x1f); 153 return ((mask & *a) != 0); 154} 155 156#define test_bit(nr,addr) \ 157(__builtin_constant_p(nr) ? \ 158 __constant_test_bit((nr),(addr)) : \ 159 __test_bit((nr),(addr))) 160 161#include <asm-generic/bitops/find.h> 162#include <asm-generic/bitops/hweight.h> 163 164static __inline__ int ext2_set_bit(int nr, volatile void * addr) 165{ 166 char retval; 167 168#ifdef CONFIG_COLDFIRE 169 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0" 170 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) 171 : "d" (nr) 172 : "%a0"); 173#else 174 __asm__ __volatile__ ("bset %2,%1; sne %0" 175 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) 176 : "di" (nr) 177 /* No clobber */); 178#endif 179 180 return retval; 181} 182 183static __inline__ int ext2_clear_bit(int nr, volatile void * addr) 184{ 185 char retval; 186 187#ifdef CONFIG_COLDFIRE 188 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0" 189 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) 190 : "d" (nr) 191 : "%a0"); 192#else 193 __asm__ __volatile__ ("bclr %2,%1; sne %0" 194 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3]) 195 : "di" (nr) 196 /* No clobber */); 197#endif 198 199 return retval; 200} 201 202#define ext2_set_bit_atomic(lock, nr, addr) \ 203 ({ \ 204 int ret; \ 205 spin_lock(lock); \ 206 ret = ext2_set_bit((nr), (addr)); \ 207 spin_unlock(lock); \ 208 ret; \ 209 }) 210 211#define ext2_clear_bit_atomic(lock, nr, addr) \ 212 ({ \ 213 int ret; \ 214 spin_lock(lock); \ 215 ret = ext2_clear_bit((nr), (addr)); \ 216 spin_unlock(lock); \ 217 ret; \ 218 }) 219 220static __inline__ int ext2_test_bit(int nr, const volatile void * addr) 221{ 222 char retval; 223 224#ifdef CONFIG_COLDFIRE 225 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0" 226 : "=d" (retval) 227 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr) 228 : "%a0"); 229#else 230 __asm__ __volatile__ ("btst %2,%1; sne %0" 231 : "=d" (retval) 232 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr) 233 /* No clobber */); 234#endif 235 236 return retval; 237} 238 239#define ext2_find_first_zero_bit(addr, size) \ 240 ext2_find_next_zero_bit((addr), (size), 0) 241 242static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) 243{ 244 unsigned long *p = ((unsigned long *) addr) + (offset >> 5); 245 unsigned long result = offset & ~31UL; 246 unsigned long tmp; 247 248 if (offset >= size) 249 return size; 250 size -= result; 251 offset &= 31UL; 252 if(offset) { 253 /* We hold the little endian value in tmp, but then the 254 * shift is illegal. So we could keep a big endian value 255 * in tmp, like this: 256 * 257 * tmp = __swab32(*(p++)); 258 * tmp |= ~0UL >> (32-offset); 259 * 260 * but this would decrease preformance, so we change the 261 * shift: 262 */ 263 tmp = *(p++); 264 tmp |= __swab32(~0UL >> (32-offset)); 265 if(size < 32) 266 goto found_first; 267 if(~tmp) 268 goto found_middle; 269 size -= 32; 270 result += 32; 271 } 272 while(size & ~31UL) { 273 if(~(tmp = *(p++))) 274 goto found_middle; 275 result += 32; 276 size -= 32; 277 } 278 if(!size) 279 return result; 280 tmp = *p; 281 282found_first: 283 /* tmp is little endian, so we would have to swab the shift, 284 * see above. But then we have to swab tmp below for ffz, so 285 * we might as well do this here. 286 */ 287 return result + ffz(__swab32(tmp) | (~0UL << size)); 288found_middle: 289 return result + ffz(__swab32(tmp)); 290} 291 292#include <asm-generic/bitops/minix.h> 293 294#endif /* __KERNEL__ */ 295 296#include <asm-generic/bitops/fls.h> 297#include <asm-generic/bitops/fls64.h> 298 299#endif /* _M68KNOMMU_BITOPS_H */