at v2.6.16 1029 lines 30 kB view raw
1#ifndef _S390_BITOPS_H 2#define _S390_BITOPS_H 3 4/* 5 * include/asm-s390/bitops.h 6 * 7 * S390 version 8 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 10 * 11 * Derived from "include/asm-i386/bitops.h" 12 * Copyright (C) 1992, Linus Torvalds 13 * 14 */ 15#include <linux/config.h> 16#include <linux/compiler.h> 17 18/* 19 * 32 bit bitops format: 20 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; 21 * bit 32 is the LSB of *(addr+4). That combined with the 22 * big endian byte order on S390 give the following bit 23 * order in memory: 24 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ 25 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 26 * after that follows the next long with bit numbers 27 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 28 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 29 * The reason for this bit ordering is the fact that 30 * in the architecture independent code bits operations 31 * of the form "flags |= (1 << bitnr)" are used INTERMIXED 32 * with operation of the form "set_bit(bitnr, flags)". 33 * 34 * 64 bit bitops format: 35 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; 36 * bit 64 is the LSB of *(addr+8). That combined with the 37 * big endian byte order on S390 give the following bit 38 * order in memory: 39 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 40 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 41 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 42 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 43 * after that follows the next long with bit numbers 44 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 45 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 46 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 47 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 48 * The reason for this bit ordering is the fact that 49 * in the architecture independent code bits operations 50 * of the form "flags |= (1 << bitnr)" are used INTERMIXED 51 * with operation of the form "set_bit(bitnr, flags)". 52 */ 53 54/* set ALIGN_CS to 1 if the SMP safe bit operations should 55 * align the address to 4 byte boundary. It seems to work 56 * without the alignment. 57 */ 58#ifdef __KERNEL__ 59#define ALIGN_CS 0 60#else 61#define ALIGN_CS 1 62#ifndef CONFIG_SMP 63#error "bitops won't work without CONFIG_SMP" 64#endif 65#endif 66 67/* bitmap tables from arch/S390/kernel/bitmap.S */ 68extern const char _oi_bitmap[]; 69extern const char _ni_bitmap[]; 70extern const char _zb_findmap[]; 71extern const char _sb_findmap[]; 72 73#ifndef __s390x__ 74 75#define __BITOPS_ALIGN 3 76#define __BITOPS_WORDSIZE 32 77#define __BITOPS_OR "or" 78#define __BITOPS_AND "nr" 79#define __BITOPS_XOR "xr" 80 81#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 82 __asm__ __volatile__(" l %0,0(%4)\n" \ 83 "0: lr %1,%0\n" \ 84 __op_string " %1,%3\n" \ 85 " cs %0,%1,0(%4)\n" \ 86 " jl 0b" \ 87 : "=&d" (__old), "=&d" (__new), \ 88 "=m" (*(unsigned long *) __addr) \ 89 : "d" (__val), "a" (__addr), \ 90 "m" (*(unsigned long *) __addr) : "cc" ); 91 92#else /* __s390x__ */ 93 94#define __BITOPS_ALIGN 7 95#define __BITOPS_WORDSIZE 64 96#define __BITOPS_OR "ogr" 97#define __BITOPS_AND "ngr" 98#define __BITOPS_XOR "xgr" 99 100#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 101 __asm__ __volatile__(" lg %0,0(%4)\n" \ 102 "0: lgr %1,%0\n" \ 103 __op_string " %1,%3\n" \ 104 " csg %0,%1,0(%4)\n" \ 105 " jl 0b" \ 106 : "=&d" (__old), "=&d" (__new), \ 107 "=m" (*(unsigned long *) __addr) \ 108 : "d" (__val), "a" (__addr), \ 109 "m" (*(unsigned long *) __addr) : "cc" ); 110 111#endif /* __s390x__ */ 112 113#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 114#define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" ) 115 116#ifdef CONFIG_SMP 117/* 118 * SMP safe set_bit routine based on compare and swap (CS) 119 */ 120static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 121{ 122 unsigned long addr, old, new, mask; 123 124 addr = (unsigned long) ptr; 125#if ALIGN_CS == 1 126 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ 127 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ 128#endif 129 /* calculate address for CS */ 130 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 131 /* make OR mask */ 132 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 133 /* Do the atomic update. */ 134 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 135} 136 137/* 138 * SMP safe clear_bit routine based on compare and swap (CS) 139 */ 140static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 141{ 142 unsigned long addr, old, new, mask; 143 144 addr = (unsigned long) ptr; 145#if ALIGN_CS == 1 146 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ 147 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ 148#endif 149 /* calculate address for CS */ 150 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 151 /* make AND mask */ 152 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 153 /* Do the atomic update. */ 154 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 155} 156 157/* 158 * SMP safe change_bit routine based on compare and swap (CS) 159 */ 160static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 161{ 162 unsigned long addr, old, new, mask; 163 164 addr = (unsigned long) ptr; 165#if ALIGN_CS == 1 166 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ 167 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ 168#endif 169 /* calculate address for CS */ 170 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 171 /* make XOR mask */ 172 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 173 /* Do the atomic update. */ 174 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 175} 176 177/* 178 * SMP safe test_and_set_bit routine based on compare and swap (CS) 179 */ 180static inline int 181test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 182{ 183 unsigned long addr, old, new, mask; 184 185 addr = (unsigned long) ptr; 186#if ALIGN_CS == 1 187 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ 188 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ 189#endif 190 /* calculate address for CS */ 191 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 192 /* make OR/test mask */ 193 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 194 /* Do the atomic update. */ 195 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 196 __BITOPS_BARRIER(); 197 return (old & mask) != 0; 198} 199 200/* 201 * SMP safe test_and_clear_bit routine based on compare and swap (CS) 202 */ 203static inline int 204test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 205{ 206 unsigned long addr, old, new, mask; 207 208 addr = (unsigned long) ptr; 209#if ALIGN_CS == 1 210 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ 211 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ 212#endif 213 /* calculate address for CS */ 214 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 215 /* make AND/test mask */ 216 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 217 /* Do the atomic update. */ 218 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 219 __BITOPS_BARRIER(); 220 return (old ^ new) != 0; 221} 222 223/* 224 * SMP safe test_and_change_bit routine based on compare and swap (CS) 225 */ 226static inline int 227test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 228{ 229 unsigned long addr, old, new, mask; 230 231 addr = (unsigned long) ptr; 232#if ALIGN_CS == 1 233 nr += (addr & __BITOPS_ALIGN) << 3; /* add alignment to bit number */ 234 addr ^= addr & __BITOPS_ALIGN; /* align address to 8 */ 235#endif 236 /* calculate address for CS */ 237 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 238 /* make XOR/test mask */ 239 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 240 /* Do the atomic update. */ 241 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 242 __BITOPS_BARRIER(); 243 return (old & mask) != 0; 244} 245#endif /* CONFIG_SMP */ 246 247/* 248 * fast, non-SMP set_bit routine 249 */ 250static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) 251{ 252 unsigned long addr; 253 254 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 255 asm volatile("oc 0(1,%1),0(%2)" 256 : "=m" (*(char *) addr) 257 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 258 "m" (*(char *) addr) : "cc" ); 259} 260 261static inline void 262__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) 263{ 264 unsigned long addr; 265 266 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 267 switch (nr&7) { 268 case 0: 269 asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr) 270 : "a" (addr), "m" (*(char *) addr) : "cc" ); 271 break; 272 case 1: 273 asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr) 274 : "a" (addr), "m" (*(char *) addr) : "cc" ); 275 break; 276 case 2: 277 asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr) 278 : "a" (addr), "m" (*(char *) addr) : "cc" ); 279 break; 280 case 3: 281 asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr) 282 : "a" (addr), "m" (*(char *) addr) : "cc" ); 283 break; 284 case 4: 285 asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr) 286 : "a" (addr), "m" (*(char *) addr) : "cc" ); 287 break; 288 case 5: 289 asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr) 290 : "a" (addr), "m" (*(char *) addr) : "cc" ); 291 break; 292 case 6: 293 asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr) 294 : "a" (addr), "m" (*(char *) addr) : "cc" ); 295 break; 296 case 7: 297 asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr) 298 : "a" (addr), "m" (*(char *) addr) : "cc" ); 299 break; 300 } 301} 302 303#define set_bit_simple(nr,addr) \ 304(__builtin_constant_p((nr)) ? \ 305 __constant_set_bit((nr),(addr)) : \ 306 __set_bit((nr),(addr)) ) 307 308/* 309 * fast, non-SMP clear_bit routine 310 */ 311static inline void 312__clear_bit(unsigned long nr, volatile unsigned long *ptr) 313{ 314 unsigned long addr; 315 316 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 317 asm volatile("nc 0(1,%1),0(%2)" 318 : "=m" (*(char *) addr) 319 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 320 "m" (*(char *) addr) : "cc" ); 321} 322 323static inline void 324__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) 325{ 326 unsigned long addr; 327 328 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 329 switch (nr&7) { 330 case 0: 331 asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr) 332 : "a" (addr), "m" (*(char *) addr) : "cc" ); 333 break; 334 case 1: 335 asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr) 336 : "a" (addr), "m" (*(char *) addr) : "cc" ); 337 break; 338 case 2: 339 asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr) 340 : "a" (addr), "m" (*(char *) addr) : "cc" ); 341 break; 342 case 3: 343 asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr) 344 : "a" (addr), "m" (*(char *) addr) : "cc" ); 345 break; 346 case 4: 347 asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr) 348 : "a" (addr), "m" (*(char *) addr) : "cc" ); 349 break; 350 case 5: 351 asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr) 352 : "a" (addr), "m" (*(char *) addr) : "cc" ); 353 break; 354 case 6: 355 asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr) 356 : "a" (addr), "m" (*(char *) addr) : "cc" ); 357 break; 358 case 7: 359 asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr) 360 : "a" (addr), "m" (*(char *) addr) : "cc" ); 361 break; 362 } 363} 364 365#define clear_bit_simple(nr,addr) \ 366(__builtin_constant_p((nr)) ? \ 367 __constant_clear_bit((nr),(addr)) : \ 368 __clear_bit((nr),(addr)) ) 369 370/* 371 * fast, non-SMP change_bit routine 372 */ 373static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) 374{ 375 unsigned long addr; 376 377 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 378 asm volatile("xc 0(1,%1),0(%2)" 379 : "=m" (*(char *) addr) 380 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 381 "m" (*(char *) addr) : "cc" ); 382} 383 384static inline void 385__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) 386{ 387 unsigned long addr; 388 389 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 390 switch (nr&7) { 391 case 0: 392 asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr) 393 : "a" (addr), "m" (*(char *) addr) : "cc" ); 394 break; 395 case 1: 396 asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr) 397 : "a" (addr), "m" (*(char *) addr) : "cc" ); 398 break; 399 case 2: 400 asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr) 401 : "a" (addr), "m" (*(char *) addr) : "cc" ); 402 break; 403 case 3: 404 asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr) 405 : "a" (addr), "m" (*(char *) addr) : "cc" ); 406 break; 407 case 4: 408 asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr) 409 : "a" (addr), "m" (*(char *) addr) : "cc" ); 410 break; 411 case 5: 412 asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr) 413 : "a" (addr), "m" (*(char *) addr) : "cc" ); 414 break; 415 case 6: 416 asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr) 417 : "a" (addr), "m" (*(char *) addr) : "cc" ); 418 break; 419 case 7: 420 asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr) 421 : "a" (addr), "m" (*(char *) addr) : "cc" ); 422 break; 423 } 424} 425 426#define change_bit_simple(nr,addr) \ 427(__builtin_constant_p((nr)) ? \ 428 __constant_change_bit((nr),(addr)) : \ 429 __change_bit((nr),(addr)) ) 430 431/* 432 * fast, non-SMP test_and_set_bit routine 433 */ 434static inline int 435test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) 436{ 437 unsigned long addr; 438 unsigned char ch; 439 440 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 441 ch = *(unsigned char *) addr; 442 asm volatile("oc 0(1,%1),0(%2)" 443 : "=m" (*(char *) addr) 444 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 445 "m" (*(char *) addr) : "cc", "memory" ); 446 return (ch >> (nr & 7)) & 1; 447} 448#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 449 450/* 451 * fast, non-SMP test_and_clear_bit routine 452 */ 453static inline int 454test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) 455{ 456 unsigned long addr; 457 unsigned char ch; 458 459 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 460 ch = *(unsigned char *) addr; 461 asm volatile("nc 0(1,%1),0(%2)" 462 : "=m" (*(char *) addr) 463 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 464 "m" (*(char *) addr) : "cc", "memory" ); 465 return (ch >> (nr & 7)) & 1; 466} 467#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 468 469/* 470 * fast, non-SMP test_and_change_bit routine 471 */ 472static inline int 473test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) 474{ 475 unsigned long addr; 476 unsigned char ch; 477 478 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 479 ch = *(unsigned char *) addr; 480 asm volatile("xc 0(1,%1),0(%2)" 481 : "=m" (*(char *) addr) 482 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 483 "m" (*(char *) addr) : "cc", "memory" ); 484 return (ch >> (nr & 7)) & 1; 485} 486#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 487 488#ifdef CONFIG_SMP 489#define set_bit set_bit_cs 490#define clear_bit clear_bit_cs 491#define change_bit change_bit_cs 492#define test_and_set_bit test_and_set_bit_cs 493#define test_and_clear_bit test_and_clear_bit_cs 494#define test_and_change_bit test_and_change_bit_cs 495#else 496#define set_bit set_bit_simple 497#define clear_bit clear_bit_simple 498#define change_bit change_bit_simple 499#define test_and_set_bit test_and_set_bit_simple 500#define test_and_clear_bit test_and_clear_bit_simple 501#define test_and_change_bit test_and_change_bit_simple 502#endif 503 504 505/* 506 * This routine doesn't need to be atomic. 507 */ 508 509static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) 510{ 511 unsigned long addr; 512 unsigned char ch; 513 514 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 515 ch = *(volatile unsigned char *) addr; 516 return (ch >> (nr & 7)) & 1; 517} 518 519static inline int 520__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { 521 return (((volatile char *) addr) 522 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; 523} 524 525#define test_bit(nr,addr) \ 526(__builtin_constant_p((nr)) ? \ 527 __constant_test_bit((nr),(addr)) : \ 528 __test_bit((nr),(addr)) ) 529 530/* 531 * ffz = Find First Zero in word. Undefined if no zero exists, 532 * so code should check against ~0UL first.. 533 */ 534static inline unsigned long ffz(unsigned long word) 535{ 536 unsigned long bit = 0; 537 538#ifdef __s390x__ 539 if (likely((word & 0xffffffff) == 0xffffffff)) { 540 word >>= 32; 541 bit += 32; 542 } 543#endif 544 if (likely((word & 0xffff) == 0xffff)) { 545 word >>= 16; 546 bit += 16; 547 } 548 if (likely((word & 0xff) == 0xff)) { 549 word >>= 8; 550 bit += 8; 551 } 552 return bit + _zb_findmap[word & 0xff]; 553} 554 555/* 556 * __ffs = find first bit in word. Undefined if no bit exists, 557 * so code should check against 0UL first.. 558 */ 559static inline unsigned long __ffs (unsigned long word) 560{ 561 unsigned long bit = 0; 562 563#ifdef __s390x__ 564 if (likely((word & 0xffffffff) == 0)) { 565 word >>= 32; 566 bit += 32; 567 } 568#endif 569 if (likely((word & 0xffff) == 0)) { 570 word >>= 16; 571 bit += 16; 572 } 573 if (likely((word & 0xff) == 0)) { 574 word >>= 8; 575 bit += 8; 576 } 577 return bit + _sb_findmap[word & 0xff]; 578} 579 580/* 581 * Find-bit routines.. 582 */ 583 584#ifndef __s390x__ 585 586static inline int 587find_first_zero_bit(const unsigned long * addr, unsigned long size) 588{ 589 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 590 unsigned long cmp, count; 591 unsigned int res; 592 593 if (!size) 594 return 0; 595 __asm__(" lhi %1,-1\n" 596 " lr %2,%3\n" 597 " slr %0,%0\n" 598 " ahi %2,31\n" 599 " srl %2,5\n" 600 "0: c %1,0(%0,%4)\n" 601 " jne 1f\n" 602 " la %0,4(%0)\n" 603 " brct %2,0b\n" 604 " lr %0,%3\n" 605 " j 4f\n" 606 "1: l %2,0(%0,%4)\n" 607 " sll %0,3\n" 608 " lhi %1,0xff\n" 609 " tml %2,0xffff\n" 610 " jno 2f\n" 611 " ahi %0,16\n" 612 " srl %2,16\n" 613 "2: tml %2,0x00ff\n" 614 " jno 3f\n" 615 " ahi %0,8\n" 616 " srl %2,8\n" 617 "3: nr %2,%1\n" 618 " ic %2,0(%2,%5)\n" 619 " alr %0,%2\n" 620 "4:" 621 : "=&a" (res), "=&d" (cmp), "=&a" (count) 622 : "a" (size), "a" (addr), "a" (&_zb_findmap), 623 "m" (*(addrtype *) addr) : "cc" ); 624 return (res < size) ? res : size; 625} 626 627static inline int 628find_first_bit(const unsigned long * addr, unsigned long size) 629{ 630 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 631 unsigned long cmp, count; 632 unsigned int res; 633 634 if (!size) 635 return 0; 636 __asm__(" slr %1,%1\n" 637 " lr %2,%3\n" 638 " slr %0,%0\n" 639 " ahi %2,31\n" 640 " srl %2,5\n" 641 "0: c %1,0(%0,%4)\n" 642 " jne 1f\n" 643 " la %0,4(%0)\n" 644 " brct %2,0b\n" 645 " lr %0,%3\n" 646 " j 4f\n" 647 "1: l %2,0(%0,%4)\n" 648 " sll %0,3\n" 649 " lhi %1,0xff\n" 650 " tml %2,0xffff\n" 651 " jnz 2f\n" 652 " ahi %0,16\n" 653 " srl %2,16\n" 654 "2: tml %2,0x00ff\n" 655 " jnz 3f\n" 656 " ahi %0,8\n" 657 " srl %2,8\n" 658 "3: nr %2,%1\n" 659 " ic %2,0(%2,%5)\n" 660 " alr %0,%2\n" 661 "4:" 662 : "=&a" (res), "=&d" (cmp), "=&a" (count) 663 : "a" (size), "a" (addr), "a" (&_sb_findmap), 664 "m" (*(addrtype *) addr) : "cc" ); 665 return (res < size) ? res : size; 666} 667 668#else /* __s390x__ */ 669 670static inline unsigned long 671find_first_zero_bit(const unsigned long * addr, unsigned long size) 672{ 673 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 674 unsigned long res, cmp, count; 675 676 if (!size) 677 return 0; 678 __asm__(" lghi %1,-1\n" 679 " lgr %2,%3\n" 680 " slgr %0,%0\n" 681 " aghi %2,63\n" 682 " srlg %2,%2,6\n" 683 "0: cg %1,0(%0,%4)\n" 684 " jne 1f\n" 685 " la %0,8(%0)\n" 686 " brct %2,0b\n" 687 " lgr %0,%3\n" 688 " j 5f\n" 689 "1: lg %2,0(%0,%4)\n" 690 " sllg %0,%0,3\n" 691 " clr %2,%1\n" 692 " jne 2f\n" 693 " aghi %0,32\n" 694 " srlg %2,%2,32\n" 695 "2: lghi %1,0xff\n" 696 " tmll %2,0xffff\n" 697 " jno 3f\n" 698 " aghi %0,16\n" 699 " srl %2,16\n" 700 "3: tmll %2,0x00ff\n" 701 " jno 4f\n" 702 " aghi %0,8\n" 703 " srl %2,8\n" 704 "4: ngr %2,%1\n" 705 " ic %2,0(%2,%5)\n" 706 " algr %0,%2\n" 707 "5:" 708 : "=&a" (res), "=&d" (cmp), "=&a" (count) 709 : "a" (size), "a" (addr), "a" (&_zb_findmap), 710 "m" (*(addrtype *) addr) : "cc" ); 711 return (res < size) ? res : size; 712} 713 714static inline unsigned long 715find_first_bit(const unsigned long * addr, unsigned long size) 716{ 717 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 718 unsigned long res, cmp, count; 719 720 if (!size) 721 return 0; 722 __asm__(" slgr %1,%1\n" 723 " lgr %2,%3\n" 724 " slgr %0,%0\n" 725 " aghi %2,63\n" 726 " srlg %2,%2,6\n" 727 "0: cg %1,0(%0,%4)\n" 728 " jne 1f\n" 729 " aghi %0,8\n" 730 " brct %2,0b\n" 731 " lgr %0,%3\n" 732 " j 5f\n" 733 "1: lg %2,0(%0,%4)\n" 734 " sllg %0,%0,3\n" 735 " clr %2,%1\n" 736 " jne 2f\n" 737 " aghi %0,32\n" 738 " srlg %2,%2,32\n" 739 "2: lghi %1,0xff\n" 740 " tmll %2,0xffff\n" 741 " jnz 3f\n" 742 " aghi %0,16\n" 743 " srl %2,16\n" 744 "3: tmll %2,0x00ff\n" 745 " jnz 4f\n" 746 " aghi %0,8\n" 747 " srl %2,8\n" 748 "4: ngr %2,%1\n" 749 " ic %2,0(%2,%5)\n" 750 " algr %0,%2\n" 751 "5:" 752 : "=&a" (res), "=&d" (cmp), "=&a" (count) 753 : "a" (size), "a" (addr), "a" (&_sb_findmap), 754 "m" (*(addrtype *) addr) : "cc" ); 755 return (res < size) ? res : size; 756} 757 758#endif /* __s390x__ */ 759 760static inline int 761find_next_zero_bit (const unsigned long * addr, unsigned long size, 762 unsigned long offset) 763{ 764 const unsigned long *p; 765 unsigned long bit, set; 766 767 if (offset >= size) 768 return size; 769 bit = offset & (__BITOPS_WORDSIZE - 1); 770 offset -= bit; 771 size -= offset; 772 p = addr + offset / __BITOPS_WORDSIZE; 773 if (bit) { 774 /* 775 * s390 version of ffz returns __BITOPS_WORDSIZE 776 * if no zero bit is present in the word. 777 */ 778 set = ffz(*p >> bit) + bit; 779 if (set >= size) 780 return size + offset; 781 if (set < __BITOPS_WORDSIZE) 782 return set + offset; 783 offset += __BITOPS_WORDSIZE; 784 size -= __BITOPS_WORDSIZE; 785 p++; 786 } 787 return offset + find_first_zero_bit(p, size); 788} 789 790static inline int 791find_next_bit (const unsigned long * addr, unsigned long size, 792 unsigned long offset) 793{ 794 const unsigned long *p; 795 unsigned long bit, set; 796 797 if (offset >= size) 798 return size; 799 bit = offset & (__BITOPS_WORDSIZE - 1); 800 offset -= bit; 801 size -= offset; 802 p = addr + offset / __BITOPS_WORDSIZE; 803 if (bit) { 804 /* 805 * s390 version of __ffs returns __BITOPS_WORDSIZE 806 * if no one bit is present in the word. 807 */ 808 set = __ffs(*p & (~0UL << bit)); 809 if (set >= size) 810 return size + offset; 811 if (set < __BITOPS_WORDSIZE) 812 return set + offset; 813 offset += __BITOPS_WORDSIZE; 814 size -= __BITOPS_WORDSIZE; 815 p++; 816 } 817 return offset + find_first_bit(p, size); 818} 819 820/* 821 * Every architecture must define this function. It's the fastest 822 * way of searching a 140-bit bitmap where the first 100 bits are 823 * unlikely to be set. It's guaranteed that at least one of the 140 824 * bits is cleared. 825 */ 826static inline int sched_find_first_bit(unsigned long *b) 827{ 828 return find_first_bit(b, 140); 829} 830 831/* 832 * ffs: find first bit set. This is defined the same way as 833 * the libc and compiler builtin ffs routines, therefore 834 * differs in spirit from the above ffz (man ffs). 835 */ 836#define ffs(x) generic_ffs(x) 837 838/* 839 * fls: find last bit set. 840 */ 841#define fls(x) generic_fls(x) 842#define fls64(x) generic_fls64(x) 843 844/* 845 * hweightN: returns the hamming weight (i.e. the number 846 * of bits set) of a N-bit word 847 */ 848#define hweight64(x) \ 849({ \ 850 unsigned long __x = (x); \ 851 unsigned int __w; \ 852 __w = generic_hweight32((unsigned int) __x); \ 853 __w += generic_hweight32((unsigned int) (__x>>32)); \ 854 __w; \ 855}) 856#define hweight32(x) generic_hweight32(x) 857#define hweight16(x) generic_hweight16(x) 858#define hweight8(x) generic_hweight8(x) 859 860 861#ifdef __KERNEL__ 862 863/* 864 * ATTENTION: intel byte ordering convention for ext2 and minix !! 865 * bit 0 is the LSB of addr; bit 31 is the MSB of addr; 866 * bit 32 is the LSB of (addr+4). 867 * That combined with the little endian byte order of Intel gives the 868 * following bit order in memory: 869 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ 870 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 871 */ 872 873#define ext2_set_bit(nr, addr) \ 874 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 875#define ext2_set_bit_atomic(lock, nr, addr) \ 876 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 877#define ext2_clear_bit(nr, addr) \ 878 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 879#define ext2_clear_bit_atomic(lock, nr, addr) \ 880 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 881#define ext2_test_bit(nr, addr) \ 882 test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 883 884#ifndef __s390x__ 885 886static inline int 887ext2_find_first_zero_bit(void *vaddr, unsigned int size) 888{ 889 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 890 unsigned long cmp, count; 891 unsigned int res; 892 893 if (!size) 894 return 0; 895 __asm__(" lhi %1,-1\n" 896 " lr %2,%3\n" 897 " ahi %2,31\n" 898 " srl %2,5\n" 899 " slr %0,%0\n" 900 "0: cl %1,0(%0,%4)\n" 901 " jne 1f\n" 902 " ahi %0,4\n" 903 " brct %2,0b\n" 904 " lr %0,%3\n" 905 " j 4f\n" 906 "1: l %2,0(%0,%4)\n" 907 " sll %0,3\n" 908 " ahi %0,24\n" 909 " lhi %1,0xff\n" 910 " tmh %2,0xffff\n" 911 " jo 2f\n" 912 " ahi %0,-16\n" 913 " srl %2,16\n" 914 "2: tml %2,0xff00\n" 915 " jo 3f\n" 916 " ahi %0,-8\n" 917 " srl %2,8\n" 918 "3: nr %2,%1\n" 919 " ic %2,0(%2,%5)\n" 920 " alr %0,%2\n" 921 "4:" 922 : "=&a" (res), "=&d" (cmp), "=&a" (count) 923 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 924 "m" (*(addrtype *) vaddr) : "cc" ); 925 return (res < size) ? res : size; 926} 927 928#else /* __s390x__ */ 929 930static inline unsigned long 931ext2_find_first_zero_bit(void *vaddr, unsigned long size) 932{ 933 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 934 unsigned long res, cmp, count; 935 936 if (!size) 937 return 0; 938 __asm__(" lghi %1,-1\n" 939 " lgr %2,%3\n" 940 " aghi %2,63\n" 941 " srlg %2,%2,6\n" 942 " slgr %0,%0\n" 943 "0: clg %1,0(%0,%4)\n" 944 " jne 1f\n" 945 " aghi %0,8\n" 946 " brct %2,0b\n" 947 " lgr %0,%3\n" 948 " j 5f\n" 949 "1: cl %1,0(%0,%4)\n" 950 " jne 2f\n" 951 " aghi %0,4\n" 952 "2: l %2,0(%0,%4)\n" 953 " sllg %0,%0,3\n" 954 " aghi %0,24\n" 955 " lghi %1,0xff\n" 956 " tmlh %2,0xffff\n" 957 " jo 3f\n" 958 " aghi %0,-16\n" 959 " srl %2,16\n" 960 "3: tmll %2,0xff00\n" 961 " jo 4f\n" 962 " aghi %0,-8\n" 963 " srl %2,8\n" 964 "4: ngr %2,%1\n" 965 " ic %2,0(%2,%5)\n" 966 " algr %0,%2\n" 967 "5:" 968 : "=&a" (res), "=&d" (cmp), "=&a" (count) 969 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 970 "m" (*(addrtype *) vaddr) : "cc" ); 971 return (res < size) ? res : size; 972} 973 974#endif /* __s390x__ */ 975 976static inline int 977ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) 978{ 979 unsigned long *addr = vaddr, *p; 980 unsigned long word, bit, set; 981 982 if (offset >= size) 983 return size; 984 bit = offset & (__BITOPS_WORDSIZE - 1); 985 offset -= bit; 986 size -= offset; 987 p = addr + offset / __BITOPS_WORDSIZE; 988 if (bit) { 989#ifndef __s390x__ 990 asm(" ic %0,0(%1)\n" 991 " icm %0,2,1(%1)\n" 992 " icm %0,4,2(%1)\n" 993 " icm %0,8,3(%1)" 994 : "=&a" (word) : "a" (p), "m" (*p) : "cc" ); 995#else 996 asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) ); 997#endif 998 /* 999 * s390 version of ffz returns __BITOPS_WORDSIZE 1000 * if no zero bit is present in the word. 1001 */ 1002 set = ffz(word >> bit) + bit; 1003 if (set >= size) 1004 return size + offset; 1005 if (set < __BITOPS_WORDSIZE) 1006 return set + offset; 1007 offset += __BITOPS_WORDSIZE; 1008 size -= __BITOPS_WORDSIZE; 1009 p++; 1010 } 1011 return offset + ext2_find_first_zero_bit(p, size); 1012} 1013 1014/* Bitmap functions for the minix filesystem. */ 1015/* FIXME !!! */ 1016#define minix_test_and_set_bit(nr,addr) \ 1017 test_and_set_bit(nr,(unsigned long *)addr) 1018#define minix_set_bit(nr,addr) \ 1019 set_bit(nr,(unsigned long *)addr) 1020#define minix_test_and_clear_bit(nr,addr) \ 1021 test_and_clear_bit(nr,(unsigned long *)addr) 1022#define minix_test_bit(nr,addr) \ 1023 test_bit(nr,(unsigned long *)addr) 1024#define minix_find_first_zero_bit(addr,size) \ 1025 find_first_zero_bit(addr,size) 1026 1027#endif /* __KERNEL__ */ 1028 1029#endif /* _S390_BITOPS_H */