at v3.5 835 lines 22 kB view raw
1#ifndef _S390_BITOPS_H 2#define _S390_BITOPS_H 3 4/* 5 * include/asm-s390/bitops.h 6 * 7 * S390 version 8 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 10 * 11 * Derived from "include/asm-i386/bitops.h" 12 * Copyright (C) 1992, Linus Torvalds 13 * 14 */ 15 16#ifndef _LINUX_BITOPS_H 17#error only <linux/bitops.h> can be included directly 18#endif 19 20#include <linux/compiler.h> 21 22/* 23 * 32 bit bitops format: 24 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; 25 * bit 32 is the LSB of *(addr+4). That combined with the 26 * big endian byte order on S390 give the following bit 27 * order in memory: 28 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ 29 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 30 * after that follows the next long with bit numbers 31 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 32 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 33 * The reason for this bit ordering is the fact that 34 * in the architecture independent code bits operations 35 * of the form "flags |= (1 << bitnr)" are used INTERMIXED 36 * with operation of the form "set_bit(bitnr, flags)". 37 * 38 * 64 bit bitops format: 39 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; 40 * bit 64 is the LSB of *(addr+8). That combined with the 41 * big endian byte order on S390 give the following bit 42 * order in memory: 43 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 44 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 45 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 46 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 47 * after that follows the next long with bit numbers 48 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 49 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 50 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 51 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 52 * The reason for this bit ordering is the fact that 53 * in the architecture independent code bits operations 54 * of the form "flags |= (1 << bitnr)" are used INTERMIXED 55 * with operation of the form "set_bit(bitnr, flags)". 56 */ 57 58/* bitmap tables from arch/s390/kernel/bitmap.c */ 59extern const char _oi_bitmap[]; 60extern const char _ni_bitmap[]; 61extern const char _zb_findmap[]; 62extern const char _sb_findmap[]; 63 64#ifndef CONFIG_64BIT 65 66#define __BITOPS_ALIGN 3 67#define __BITOPS_WORDSIZE 32 68#define __BITOPS_OR "or" 69#define __BITOPS_AND "nr" 70#define __BITOPS_XOR "xr" 71 72#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 73 asm volatile( \ 74 " l %0,%2\n" \ 75 "0: lr %1,%0\n" \ 76 __op_string " %1,%3\n" \ 77 " cs %0,%1,%2\n" \ 78 " jl 0b" \ 79 : "=&d" (__old), "=&d" (__new), \ 80 "=Q" (*(unsigned long *) __addr) \ 81 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 82 : "cc"); 83 84#else /* CONFIG_64BIT */ 85 86#define __BITOPS_ALIGN 7 87#define __BITOPS_WORDSIZE 64 88#define __BITOPS_OR "ogr" 89#define __BITOPS_AND "ngr" 90#define __BITOPS_XOR "xgr" 91 92#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 93 asm volatile( \ 94 " lg %0,%2\n" \ 95 "0: lgr %1,%0\n" \ 96 __op_string " %1,%3\n" \ 97 " csg %0,%1,%2\n" \ 98 " jl 0b" \ 99 : "=&d" (__old), "=&d" (__new), \ 100 "=Q" (*(unsigned long *) __addr) \ 101 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 102 : "cc"); 103 104#endif /* CONFIG_64BIT */ 105 106#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 107#define __BITOPS_BARRIER() asm volatile("" : : : "memory") 108 109#ifdef CONFIG_SMP 110/* 111 * SMP safe set_bit routine based on compare and swap (CS) 112 */ 113static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 114{ 115 unsigned long addr, old, new, mask; 116 117 addr = (unsigned long) ptr; 118 /* calculate address for CS */ 119 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 120 /* make OR mask */ 121 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 122 /* Do the atomic update. */ 123 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 124} 125 126/* 127 * SMP safe clear_bit routine based on compare and swap (CS) 128 */ 129static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 130{ 131 unsigned long addr, old, new, mask; 132 133 addr = (unsigned long) ptr; 134 /* calculate address for CS */ 135 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 136 /* make AND mask */ 137 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 138 /* Do the atomic update. */ 139 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 140} 141 142/* 143 * SMP safe change_bit routine based on compare and swap (CS) 144 */ 145static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 146{ 147 unsigned long addr, old, new, mask; 148 149 addr = (unsigned long) ptr; 150 /* calculate address for CS */ 151 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 152 /* make XOR mask */ 153 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 154 /* Do the atomic update. */ 155 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 156} 157 158/* 159 * SMP safe test_and_set_bit routine based on compare and swap (CS) 160 */ 161static inline int 162test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 163{ 164 unsigned long addr, old, new, mask; 165 166 addr = (unsigned long) ptr; 167 /* calculate address for CS */ 168 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 169 /* make OR/test mask */ 170 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 171 /* Do the atomic update. */ 172 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 173 __BITOPS_BARRIER(); 174 return (old & mask) != 0; 175} 176 177/* 178 * SMP safe test_and_clear_bit routine based on compare and swap (CS) 179 */ 180static inline int 181test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 182{ 183 unsigned long addr, old, new, mask; 184 185 addr = (unsigned long) ptr; 186 /* calculate address for CS */ 187 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 188 /* make AND/test mask */ 189 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 190 /* Do the atomic update. */ 191 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 192 __BITOPS_BARRIER(); 193 return (old ^ new) != 0; 194} 195 196/* 197 * SMP safe test_and_change_bit routine based on compare and swap (CS) 198 */ 199static inline int 200test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 201{ 202 unsigned long addr, old, new, mask; 203 204 addr = (unsigned long) ptr; 205 /* calculate address for CS */ 206 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 207 /* make XOR/test mask */ 208 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 209 /* Do the atomic update. */ 210 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 211 __BITOPS_BARRIER(); 212 return (old & mask) != 0; 213} 214#endif /* CONFIG_SMP */ 215 216/* 217 * fast, non-SMP set_bit routine 218 */ 219static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) 220{ 221 unsigned long addr; 222 223 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 224 asm volatile( 225 " oc %O0(1,%R0),%1" 226 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 227} 228 229static inline void 230__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) 231{ 232 unsigned long addr; 233 234 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 235 *(unsigned char *) addr |= 1 << (nr & 7); 236} 237 238#define set_bit_simple(nr,addr) \ 239(__builtin_constant_p((nr)) ? \ 240 __constant_set_bit((nr),(addr)) : \ 241 __set_bit((nr),(addr)) ) 242 243/* 244 * fast, non-SMP clear_bit routine 245 */ 246static inline void 247__clear_bit(unsigned long nr, volatile unsigned long *ptr) 248{ 249 unsigned long addr; 250 251 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 252 asm volatile( 253 " nc %O0(1,%R0),%1" 254 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); 255} 256 257static inline void 258__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) 259{ 260 unsigned long addr; 261 262 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 263 *(unsigned char *) addr &= ~(1 << (nr & 7)); 264} 265 266#define clear_bit_simple(nr,addr) \ 267(__builtin_constant_p((nr)) ? \ 268 __constant_clear_bit((nr),(addr)) : \ 269 __clear_bit((nr),(addr)) ) 270 271/* 272 * fast, non-SMP change_bit routine 273 */ 274static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) 275{ 276 unsigned long addr; 277 278 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 279 asm volatile( 280 " xc %O0(1,%R0),%1" 281 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 282} 283 284static inline void 285__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) 286{ 287 unsigned long addr; 288 289 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 290 *(unsigned char *) addr ^= 1 << (nr & 7); 291} 292 293#define change_bit_simple(nr,addr) \ 294(__builtin_constant_p((nr)) ? \ 295 __constant_change_bit((nr),(addr)) : \ 296 __change_bit((nr),(addr)) ) 297 298/* 299 * fast, non-SMP test_and_set_bit routine 300 */ 301static inline int 302test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) 303{ 304 unsigned long addr; 305 unsigned char ch; 306 307 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 308 ch = *(unsigned char *) addr; 309 asm volatile( 310 " oc %O0(1,%R0),%1" 311 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) 312 : "cc", "memory"); 313 return (ch >> (nr & 7)) & 1; 314} 315#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 316 317/* 318 * fast, non-SMP test_and_clear_bit routine 319 */ 320static inline int 321test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) 322{ 323 unsigned long addr; 324 unsigned char ch; 325 326 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 327 ch = *(unsigned char *) addr; 328 asm volatile( 329 " nc %O0(1,%R0),%1" 330 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) 331 : "cc", "memory"); 332 return (ch >> (nr & 7)) & 1; 333} 334#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 335 336/* 337 * fast, non-SMP test_and_change_bit routine 338 */ 339static inline int 340test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) 341{ 342 unsigned long addr; 343 unsigned char ch; 344 345 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 346 ch = *(unsigned char *) addr; 347 asm volatile( 348 " xc %O0(1,%R0),%1" 349 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) 350 : "cc", "memory"); 351 return (ch >> (nr & 7)) & 1; 352} 353#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 354 355#ifdef CONFIG_SMP 356#define set_bit set_bit_cs 357#define clear_bit clear_bit_cs 358#define change_bit change_bit_cs 359#define test_and_set_bit test_and_set_bit_cs 360#define test_and_clear_bit test_and_clear_bit_cs 361#define test_and_change_bit test_and_change_bit_cs 362#else 363#define set_bit set_bit_simple 364#define clear_bit clear_bit_simple 365#define change_bit change_bit_simple 366#define test_and_set_bit test_and_set_bit_simple 367#define test_and_clear_bit test_and_clear_bit_simple 368#define test_and_change_bit test_and_change_bit_simple 369#endif 370 371 372/* 373 * This routine doesn't need to be atomic. 374 */ 375 376static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) 377{ 378 unsigned long addr; 379 unsigned char ch; 380 381 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 382 ch = *(volatile unsigned char *) addr; 383 return (ch >> (nr & 7)) & 1; 384} 385 386static inline int 387__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { 388 return (((volatile char *) addr) 389 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; 390} 391 392#define test_bit(nr,addr) \ 393(__builtin_constant_p((nr)) ? \ 394 __constant_test_bit((nr),(addr)) : \ 395 __test_bit((nr),(addr)) ) 396 397/* 398 * Optimized find bit helper functions. 399 */ 400 401/** 402 * __ffz_word_loop - find byte offset of first long != -1UL 403 * @addr: pointer to array of unsigned long 404 * @size: size of the array in bits 405 */ 406static inline unsigned long __ffz_word_loop(const unsigned long *addr, 407 unsigned long size) 408{ 409 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 410 unsigned long bytes = 0; 411 412 asm volatile( 413#ifndef CONFIG_64BIT 414 " ahi %1,-1\n" 415 " sra %1,5\n" 416 " jz 1f\n" 417 "0: c %2,0(%0,%3)\n" 418 " jne 1f\n" 419 " la %0,4(%0)\n" 420 " brct %1,0b\n" 421 "1:\n" 422#else 423 " aghi %1,-1\n" 424 " srag %1,%1,6\n" 425 " jz 1f\n" 426 "0: cg %2,0(%0,%3)\n" 427 " jne 1f\n" 428 " la %0,8(%0)\n" 429 " brct %1,0b\n" 430 "1:\n" 431#endif 432 : "+&a" (bytes), "+&d" (size) 433 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) 434 : "cc" ); 435 return bytes; 436} 437 438/** 439 * __ffs_word_loop - find byte offset of first long != 0UL 440 * @addr: pointer to array of unsigned long 441 * @size: size of the array in bits 442 */ 443static inline unsigned long __ffs_word_loop(const unsigned long *addr, 444 unsigned long size) 445{ 446 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 447 unsigned long bytes = 0; 448 449 asm volatile( 450#ifndef CONFIG_64BIT 451 " ahi %1,-1\n" 452 " sra %1,5\n" 453 " jz 1f\n" 454 "0: c %2,0(%0,%3)\n" 455 " jne 1f\n" 456 " la %0,4(%0)\n" 457 " brct %1,0b\n" 458 "1:\n" 459#else 460 " aghi %1,-1\n" 461 " srag %1,%1,6\n" 462 " jz 1f\n" 463 "0: cg %2,0(%0,%3)\n" 464 " jne 1f\n" 465 " la %0,8(%0)\n" 466 " brct %1,0b\n" 467 "1:\n" 468#endif 469 : "+&a" (bytes), "+&a" (size) 470 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) 471 : "cc" ); 472 return bytes; 473} 474 475/** 476 * __ffz_word - add number of the first unset bit 477 * @nr: base value the bit number is added to 478 * @word: the word that is searched for unset bits 479 */ 480static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) 481{ 482#ifdef CONFIG_64BIT 483 if ((word & 0xffffffff) == 0xffffffff) { 484 word >>= 32; 485 nr += 32; 486 } 487#endif 488 if ((word & 0xffff) == 0xffff) { 489 word >>= 16; 490 nr += 16; 491 } 492 if ((word & 0xff) == 0xff) { 493 word >>= 8; 494 nr += 8; 495 } 496 return nr + _zb_findmap[(unsigned char) word]; 497} 498 499/** 500 * __ffs_word - add number of the first set bit 501 * @nr: base value the bit number is added to 502 * @word: the word that is searched for set bits 503 */ 504static inline unsigned long __ffs_word(unsigned long nr, unsigned long word) 505{ 506#ifdef CONFIG_64BIT 507 if ((word & 0xffffffff) == 0) { 508 word >>= 32; 509 nr += 32; 510 } 511#endif 512 if ((word & 0xffff) == 0) { 513 word >>= 16; 514 nr += 16; 515 } 516 if ((word & 0xff) == 0) { 517 word >>= 8; 518 nr += 8; 519 } 520 return nr + _sb_findmap[(unsigned char) word]; 521} 522 523 524/** 525 * __load_ulong_be - load big endian unsigned long 526 * @p: pointer to array of unsigned long 527 * @offset: byte offset of source value in the array 528 */ 529static inline unsigned long __load_ulong_be(const unsigned long *p, 530 unsigned long offset) 531{ 532 p = (unsigned long *)((unsigned long) p + offset); 533 return *p; 534} 535 536/** 537 * __load_ulong_le - load little endian unsigned long 538 * @p: pointer to array of unsigned long 539 * @offset: byte offset of source value in the array 540 */ 541static inline unsigned long __load_ulong_le(const unsigned long *p, 542 unsigned long offset) 543{ 544 unsigned long word; 545 546 p = (unsigned long *)((unsigned long) p + offset); 547#ifndef CONFIG_64BIT 548 asm volatile( 549 " ic %0,%O1(%R1)\n" 550 " icm %0,2,%O1+1(%R1)\n" 551 " icm %0,4,%O1+2(%R1)\n" 552 " icm %0,8,%O1+3(%R1)" 553 : "=&d" (word) : "Q" (*p) : "cc"); 554#else 555 asm volatile( 556 " lrvg %0,%1" 557 : "=d" (word) : "m" (*p) ); 558#endif 559 return word; 560} 561 562/* 563 * The various find bit functions. 564 */ 565 566/* 567 * ffz - find first zero in word. 568 * @word: The word to search 569 * 570 * Undefined if no zero exists, so code should check against ~0UL first. 571 */ 572static inline unsigned long ffz(unsigned long word) 573{ 574 return __ffz_word(0, word); 575} 576 577/** 578 * __ffs - find first bit in word. 579 * @word: The word to search 580 * 581 * Undefined if no bit exists, so code should check against 0 first. 582 */ 583static inline unsigned long __ffs (unsigned long word) 584{ 585 return __ffs_word(0, word); 586} 587 588/** 589 * ffs - find first bit set 590 * @x: the word to search 591 * 592 * This is defined the same way as 593 * the libc and compiler builtin ffs routines, therefore 594 * differs in spirit from the above ffz (man ffs). 595 */ 596static inline int ffs(int x) 597{ 598 if (!x) 599 return 0; 600 return __ffs_word(1, x); 601} 602 603/** 604 * find_first_zero_bit - find the first zero bit in a memory region 605 * @addr: The address to start the search at 606 * @size: The maximum size to search 607 * 608 * Returns the bit-number of the first zero bit, not the number of the byte 609 * containing a bit. 610 */ 611static inline unsigned long find_first_zero_bit(const unsigned long *addr, 612 unsigned long size) 613{ 614 unsigned long bytes, bits; 615 616 if (!size) 617 return 0; 618 bytes = __ffz_word_loop(addr, size); 619 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); 620 return (bits < size) ? bits : size; 621} 622#define find_first_zero_bit find_first_zero_bit 623 624/** 625 * find_first_bit - find the first set bit in a memory region 626 * @addr: The address to start the search at 627 * @size: The maximum size to search 628 * 629 * Returns the bit-number of the first set bit, not the number of the byte 630 * containing a bit. 631 */ 632static inline unsigned long find_first_bit(const unsigned long * addr, 633 unsigned long size) 634{ 635 unsigned long bytes, bits; 636 637 if (!size) 638 return 0; 639 bytes = __ffs_word_loop(addr, size); 640 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); 641 return (bits < size) ? bits : size; 642} 643#define find_first_bit find_first_bit 644 645/** 646 * find_next_zero_bit - find the first zero bit in a memory region 647 * @addr: The address to base the search on 648 * @offset: The bitnumber to start searching at 649 * @size: The maximum size to search 650 */ 651static inline int find_next_zero_bit (const unsigned long * addr, 652 unsigned long size, 653 unsigned long offset) 654{ 655 const unsigned long *p; 656 unsigned long bit, set; 657 658 if (offset >= size) 659 return size; 660 bit = offset & (__BITOPS_WORDSIZE - 1); 661 offset -= bit; 662 size -= offset; 663 p = addr + offset / __BITOPS_WORDSIZE; 664 if (bit) { 665 /* 666 * __ffz_word returns __BITOPS_WORDSIZE 667 * if no zero bit is present in the word. 668 */ 669 set = __ffz_word(bit, *p >> bit); 670 if (set >= size) 671 return size + offset; 672 if (set < __BITOPS_WORDSIZE) 673 return set + offset; 674 offset += __BITOPS_WORDSIZE; 675 size -= __BITOPS_WORDSIZE; 676 p++; 677 } 678 return offset + find_first_zero_bit(p, size); 679} 680#define find_next_zero_bit find_next_zero_bit 681 682/** 683 * find_next_bit - find the first set bit in a memory region 684 * @addr: The address to base the search on 685 * @offset: The bitnumber to start searching at 686 * @size: The maximum size to search 687 */ 688static inline int find_next_bit (const unsigned long * addr, 689 unsigned long size, 690 unsigned long offset) 691{ 692 const unsigned long *p; 693 unsigned long bit, set; 694 695 if (offset >= size) 696 return size; 697 bit = offset & (__BITOPS_WORDSIZE - 1); 698 offset -= bit; 699 size -= offset; 700 p = addr + offset / __BITOPS_WORDSIZE; 701 if (bit) { 702 /* 703 * __ffs_word returns __BITOPS_WORDSIZE 704 * if no one bit is present in the word. 705 */ 706 set = __ffs_word(0, *p & (~0UL << bit)); 707 if (set >= size) 708 return size + offset; 709 if (set < __BITOPS_WORDSIZE) 710 return set + offset; 711 offset += __BITOPS_WORDSIZE; 712 size -= __BITOPS_WORDSIZE; 713 p++; 714 } 715 return offset + find_first_bit(p, size); 716} 717#define find_next_bit find_next_bit 718 719/* 720 * Every architecture must define this function. It's the fastest 721 * way of searching a 140-bit bitmap where the first 100 bits are 722 * unlikely to be set. It's guaranteed that at least one of the 140 723 * bits is cleared. 724 */ 725static inline int sched_find_first_bit(unsigned long *b) 726{ 727 return find_first_bit(b, 140); 728} 729 730#include <asm-generic/bitops/fls.h> 731#include <asm-generic/bitops/__fls.h> 732#include <asm-generic/bitops/fls64.h> 733 734#include <asm-generic/bitops/hweight.h> 735#include <asm-generic/bitops/lock.h> 736 737/* 738 * ATTENTION: intel byte ordering convention for ext2 and minix !! 739 * bit 0 is the LSB of addr; bit 31 is the MSB of addr; 740 * bit 32 is the LSB of (addr+4). 741 * That combined with the little endian byte order of Intel gives the 742 * following bit order in memory: 743 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ 744 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 745 */ 746 747static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) 748{ 749 unsigned long bytes, bits; 750 751 if (!size) 752 return 0; 753 bytes = __ffz_word_loop(vaddr, size); 754 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); 755 return (bits < size) ? bits : size; 756} 757#define find_first_zero_bit_le find_first_zero_bit_le 758 759static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, 760 unsigned long offset) 761{ 762 unsigned long *addr = vaddr, *p; 763 unsigned long bit, set; 764 765 if (offset >= size) 766 return size; 767 bit = offset & (__BITOPS_WORDSIZE - 1); 768 offset -= bit; 769 size -= offset; 770 p = addr + offset / __BITOPS_WORDSIZE; 771 if (bit) { 772 /* 773 * s390 version of ffz returns __BITOPS_WORDSIZE 774 * if no zero bit is present in the word. 775 */ 776 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); 777 if (set >= size) 778 return size + offset; 779 if (set < __BITOPS_WORDSIZE) 780 return set + offset; 781 offset += __BITOPS_WORDSIZE; 782 size -= __BITOPS_WORDSIZE; 783 p++; 784 } 785 return offset + find_first_zero_bit_le(p, size); 786} 787#define find_next_zero_bit_le find_next_zero_bit_le 788 789static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) 790{ 791 unsigned long bytes, bits; 792 793 if (!size) 794 return 0; 795 bytes = __ffs_word_loop(vaddr, size); 796 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); 797 return (bits < size) ? bits : size; 798} 799#define find_first_bit_le find_first_bit_le 800 801static inline int find_next_bit_le(void *vaddr, unsigned long size, 802 unsigned long offset) 803{ 804 unsigned long *addr = vaddr, *p; 805 unsigned long bit, set; 806 807 if (offset >= size) 808 return size; 809 bit = offset & (__BITOPS_WORDSIZE - 1); 810 offset -= bit; 811 size -= offset; 812 p = addr + offset / __BITOPS_WORDSIZE; 813 if (bit) { 814 /* 815 * s390 version of ffz returns __BITOPS_WORDSIZE 816 * if no zero bit is present in the word. 817 */ 818 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); 819 if (set >= size) 820 return size + offset; 821 if (set < __BITOPS_WORDSIZE) 822 return set + offset; 823 offset += __BITOPS_WORDSIZE; 824 size -= __BITOPS_WORDSIZE; 825 p++; 826 } 827 return offset + find_first_bit_le(p, size); 828} 829#define find_next_bit_le find_next_bit_le 830 831#include <asm-generic/bitops/le.h> 832 833#include <asm-generic/bitops/ext2-atomic-setbit.h> 834 835#endif /* _S390_BITOPS_H */