at v2.6.18 958 lines 28 kB view raw
1#ifndef _S390_BITOPS_H 2#define _S390_BITOPS_H 3 4/* 5 * include/asm-s390/bitops.h 6 * 7 * S390 version 8 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 10 * 11 * Derived from "include/asm-i386/bitops.h" 12 * Copyright (C) 1992, Linus Torvalds 13 * 14 */ 15 16#ifdef __KERNEL__ 17 18#include <linux/compiler.h> 19 20/* 21 * 32 bit bitops format: 22 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; 23 * bit 32 is the LSB of *(addr+4). That combined with the 24 * big endian byte order on S390 give the following bit 25 * order in memory: 26 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ 27 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 28 * after that follows the next long with bit numbers 29 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 30 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 31 * The reason for this bit ordering is the fact that 32 * in the architecture independent code bits operations 33 * of the form "flags |= (1 << bitnr)" are used INTERMIXED 34 * with operation of the form "set_bit(bitnr, flags)". 35 * 36 * 64 bit bitops format: 37 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; 38 * bit 64 is the LSB of *(addr+8). That combined with the 39 * big endian byte order on S390 give the following bit 40 * order in memory: 41 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 42 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 43 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 44 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 45 * after that follows the next long with bit numbers 46 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 47 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 48 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 49 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 50 * The reason for this bit ordering is the fact that 51 * in the architecture independent code bits operations 52 * of the form "flags |= (1 << bitnr)" are used INTERMIXED 53 * with operation of the form "set_bit(bitnr, flags)". 54 */ 55 56/* bitmap tables from arch/S390/kernel/bitmap.S */ 57extern const char _oi_bitmap[]; 58extern const char _ni_bitmap[]; 59extern const char _zb_findmap[]; 60extern const char _sb_findmap[]; 61 62#ifndef __s390x__ 63 64#define __BITOPS_ALIGN 3 65#define __BITOPS_WORDSIZE 32 66#define __BITOPS_OR "or" 67#define __BITOPS_AND "nr" 68#define __BITOPS_XOR "xr" 69 70#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 71 __asm__ __volatile__(" l %0,0(%4)\n" \ 72 "0: lr %1,%0\n" \ 73 __op_string " %1,%3\n" \ 74 " cs %0,%1,0(%4)\n" \ 75 " jl 0b" \ 76 : "=&d" (__old), "=&d" (__new), \ 77 "=m" (*(unsigned long *) __addr) \ 78 : "d" (__val), "a" (__addr), \ 79 "m" (*(unsigned long *) __addr) : "cc" ); 80 81#else /* __s390x__ */ 82 83#define __BITOPS_ALIGN 7 84#define __BITOPS_WORDSIZE 64 85#define __BITOPS_OR "ogr" 86#define __BITOPS_AND "ngr" 87#define __BITOPS_XOR "xgr" 88 89#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ 90 __asm__ __volatile__(" lg %0,0(%4)\n" \ 91 "0: lgr %1,%0\n" \ 92 __op_string " %1,%3\n" \ 93 " csg %0,%1,0(%4)\n" \ 94 " jl 0b" \ 95 : "=&d" (__old), "=&d" (__new), \ 96 "=m" (*(unsigned long *) __addr) \ 97 : "d" (__val), "a" (__addr), \ 98 "m" (*(unsigned long *) __addr) : "cc" ); 99 100#endif /* __s390x__ */ 101 102#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 103#define __BITOPS_BARRIER() __asm__ __volatile__ ( "" : : : "memory" ) 104 105#ifdef CONFIG_SMP 106/* 107 * SMP safe set_bit routine based on compare and swap (CS) 108 */ 109static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 110{ 111 unsigned long addr, old, new, mask; 112 113 addr = (unsigned long) ptr; 114 /* calculate address for CS */ 115 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 116 /* make OR mask */ 117 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 118 /* Do the atomic update. */ 119 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 120} 121 122/* 123 * SMP safe clear_bit routine based on compare and swap (CS) 124 */ 125static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 126{ 127 unsigned long addr, old, new, mask; 128 129 addr = (unsigned long) ptr; 130 /* calculate address for CS */ 131 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 132 /* make AND mask */ 133 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 134 /* Do the atomic update. */ 135 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 136} 137 138/* 139 * SMP safe change_bit routine based on compare and swap (CS) 140 */ 141static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 142{ 143 unsigned long addr, old, new, mask; 144 145 addr = (unsigned long) ptr; 146 /* calculate address for CS */ 147 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 148 /* make XOR mask */ 149 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 150 /* Do the atomic update. */ 151 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 152} 153 154/* 155 * SMP safe test_and_set_bit routine based on compare and swap (CS) 156 */ 157static inline int 158test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) 159{ 160 unsigned long addr, old, new, mask; 161 162 addr = (unsigned long) ptr; 163 /* calculate address for CS */ 164 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 165 /* make OR/test mask */ 166 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 167 /* Do the atomic update. */ 168 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); 169 __BITOPS_BARRIER(); 170 return (old & mask) != 0; 171} 172 173/* 174 * SMP safe test_and_clear_bit routine based on compare and swap (CS) 175 */ 176static inline int 177test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) 178{ 179 unsigned long addr, old, new, mask; 180 181 addr = (unsigned long) ptr; 182 /* calculate address for CS */ 183 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 184 /* make AND/test mask */ 185 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1))); 186 /* Do the atomic update. */ 187 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); 188 __BITOPS_BARRIER(); 189 return (old ^ new) != 0; 190} 191 192/* 193 * SMP safe test_and_change_bit routine based on compare and swap (CS) 194 */ 195static inline int 196test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) 197{ 198 unsigned long addr, old, new, mask; 199 200 addr = (unsigned long) ptr; 201 /* calculate address for CS */ 202 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3; 203 /* make XOR/test mask */ 204 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1)); 205 /* Do the atomic update. */ 206 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); 207 __BITOPS_BARRIER(); 208 return (old & mask) != 0; 209} 210#endif /* CONFIG_SMP */ 211 212/* 213 * fast, non-SMP set_bit routine 214 */ 215static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) 216{ 217 unsigned long addr; 218 219 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 220 asm volatile("oc 0(1,%1),0(%2)" 221 : "=m" (*(char *) addr) 222 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 223 "m" (*(char *) addr) : "cc" ); 224} 225 226static inline void 227__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) 228{ 229 unsigned long addr; 230 231 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 232 switch (nr&7) { 233 case 0: 234 asm volatile ("oi 0(%1),0x01" : "=m" (*(char *) addr) 235 : "a" (addr), "m" (*(char *) addr) : "cc" ); 236 break; 237 case 1: 238 asm volatile ("oi 0(%1),0x02" : "=m" (*(char *) addr) 239 : "a" (addr), "m" (*(char *) addr) : "cc" ); 240 break; 241 case 2: 242 asm volatile ("oi 0(%1),0x04" : "=m" (*(char *) addr) 243 : "a" (addr), "m" (*(char *) addr) : "cc" ); 244 break; 245 case 3: 246 asm volatile ("oi 0(%1),0x08" : "=m" (*(char *) addr) 247 : "a" (addr), "m" (*(char *) addr) : "cc" ); 248 break; 249 case 4: 250 asm volatile ("oi 0(%1),0x10" : "=m" (*(char *) addr) 251 : "a" (addr), "m" (*(char *) addr) : "cc" ); 252 break; 253 case 5: 254 asm volatile ("oi 0(%1),0x20" : "=m" (*(char *) addr) 255 : "a" (addr), "m" (*(char *) addr) : "cc" ); 256 break; 257 case 6: 258 asm volatile ("oi 0(%1),0x40" : "=m" (*(char *) addr) 259 : "a" (addr), "m" (*(char *) addr) : "cc" ); 260 break; 261 case 7: 262 asm volatile ("oi 0(%1),0x80" : "=m" (*(char *) addr) 263 : "a" (addr), "m" (*(char *) addr) : "cc" ); 264 break; 265 } 266} 267 268#define set_bit_simple(nr,addr) \ 269(__builtin_constant_p((nr)) ? \ 270 __constant_set_bit((nr),(addr)) : \ 271 __set_bit((nr),(addr)) ) 272 273/* 274 * fast, non-SMP clear_bit routine 275 */ 276static inline void 277__clear_bit(unsigned long nr, volatile unsigned long *ptr) 278{ 279 unsigned long addr; 280 281 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 282 asm volatile("nc 0(1,%1),0(%2)" 283 : "=m" (*(char *) addr) 284 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 285 "m" (*(char *) addr) : "cc" ); 286} 287 288static inline void 289__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) 290{ 291 unsigned long addr; 292 293 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 294 switch (nr&7) { 295 case 0: 296 asm volatile ("ni 0(%1),0xFE" : "=m" (*(char *) addr) 297 : "a" (addr), "m" (*(char *) addr) : "cc" ); 298 break; 299 case 1: 300 asm volatile ("ni 0(%1),0xFD": "=m" (*(char *) addr) 301 : "a" (addr), "m" (*(char *) addr) : "cc" ); 302 break; 303 case 2: 304 asm volatile ("ni 0(%1),0xFB" : "=m" (*(char *) addr) 305 : "a" (addr), "m" (*(char *) addr) : "cc" ); 306 break; 307 case 3: 308 asm volatile ("ni 0(%1),0xF7" : "=m" (*(char *) addr) 309 : "a" (addr), "m" (*(char *) addr) : "cc" ); 310 break; 311 case 4: 312 asm volatile ("ni 0(%1),0xEF" : "=m" (*(char *) addr) 313 : "a" (addr), "m" (*(char *) addr) : "cc" ); 314 break; 315 case 5: 316 asm volatile ("ni 0(%1),0xDF" : "=m" (*(char *) addr) 317 : "a" (addr), "m" (*(char *) addr) : "cc" ); 318 break; 319 case 6: 320 asm volatile ("ni 0(%1),0xBF" : "=m" (*(char *) addr) 321 : "a" (addr), "m" (*(char *) addr) : "cc" ); 322 break; 323 case 7: 324 asm volatile ("ni 0(%1),0x7F" : "=m" (*(char *) addr) 325 : "a" (addr), "m" (*(char *) addr) : "cc" ); 326 break; 327 } 328} 329 330#define clear_bit_simple(nr,addr) \ 331(__builtin_constant_p((nr)) ? \ 332 __constant_clear_bit((nr),(addr)) : \ 333 __clear_bit((nr),(addr)) ) 334 335/* 336 * fast, non-SMP change_bit routine 337 */ 338static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) 339{ 340 unsigned long addr; 341 342 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 343 asm volatile("xc 0(1,%1),0(%2)" 344 : "=m" (*(char *) addr) 345 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 346 "m" (*(char *) addr) : "cc" ); 347} 348 349static inline void 350__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) 351{ 352 unsigned long addr; 353 354 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 355 switch (nr&7) { 356 case 0: 357 asm volatile ("xi 0(%1),0x01" : "=m" (*(char *) addr) 358 : "a" (addr), "m" (*(char *) addr) : "cc" ); 359 break; 360 case 1: 361 asm volatile ("xi 0(%1),0x02" : "=m" (*(char *) addr) 362 : "a" (addr), "m" (*(char *) addr) : "cc" ); 363 break; 364 case 2: 365 asm volatile ("xi 0(%1),0x04" : "=m" (*(char *) addr) 366 : "a" (addr), "m" (*(char *) addr) : "cc" ); 367 break; 368 case 3: 369 asm volatile ("xi 0(%1),0x08" : "=m" (*(char *) addr) 370 : "a" (addr), "m" (*(char *) addr) : "cc" ); 371 break; 372 case 4: 373 asm volatile ("xi 0(%1),0x10" : "=m" (*(char *) addr) 374 : "a" (addr), "m" (*(char *) addr) : "cc" ); 375 break; 376 case 5: 377 asm volatile ("xi 0(%1),0x20" : "=m" (*(char *) addr) 378 : "a" (addr), "m" (*(char *) addr) : "cc" ); 379 break; 380 case 6: 381 asm volatile ("xi 0(%1),0x40" : "=m" (*(char *) addr) 382 : "a" (addr), "m" (*(char *) addr) : "cc" ); 383 break; 384 case 7: 385 asm volatile ("xi 0(%1),0x80" : "=m" (*(char *) addr) 386 : "a" (addr), "m" (*(char *) addr) : "cc" ); 387 break; 388 } 389} 390 391#define change_bit_simple(nr,addr) \ 392(__builtin_constant_p((nr)) ? \ 393 __constant_change_bit((nr),(addr)) : \ 394 __change_bit((nr),(addr)) ) 395 396/* 397 * fast, non-SMP test_and_set_bit routine 398 */ 399static inline int 400test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) 401{ 402 unsigned long addr; 403 unsigned char ch; 404 405 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 406 ch = *(unsigned char *) addr; 407 asm volatile("oc 0(1,%1),0(%2)" 408 : "=m" (*(char *) addr) 409 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 410 "m" (*(char *) addr) : "cc", "memory" ); 411 return (ch >> (nr & 7)) & 1; 412} 413#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) 414 415/* 416 * fast, non-SMP test_and_clear_bit routine 417 */ 418static inline int 419test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) 420{ 421 unsigned long addr; 422 unsigned char ch; 423 424 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 425 ch = *(unsigned char *) addr; 426 asm volatile("nc 0(1,%1),0(%2)" 427 : "=m" (*(char *) addr) 428 : "a" (addr), "a" (_ni_bitmap + (nr & 7)), 429 "m" (*(char *) addr) : "cc", "memory" ); 430 return (ch >> (nr & 7)) & 1; 431} 432#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) 433 434/* 435 * fast, non-SMP test_and_change_bit routine 436 */ 437static inline int 438test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) 439{ 440 unsigned long addr; 441 unsigned char ch; 442 443 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 444 ch = *(unsigned char *) addr; 445 asm volatile("xc 0(1,%1),0(%2)" 446 : "=m" (*(char *) addr) 447 : "a" (addr), "a" (_oi_bitmap + (nr & 7)), 448 "m" (*(char *) addr) : "cc", "memory" ); 449 return (ch >> (nr & 7)) & 1; 450} 451#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) 452 453#ifdef CONFIG_SMP 454#define set_bit set_bit_cs 455#define clear_bit clear_bit_cs 456#define change_bit change_bit_cs 457#define test_and_set_bit test_and_set_bit_cs 458#define test_and_clear_bit test_and_clear_bit_cs 459#define test_and_change_bit test_and_change_bit_cs 460#else 461#define set_bit set_bit_simple 462#define clear_bit clear_bit_simple 463#define change_bit change_bit_simple 464#define test_and_set_bit test_and_set_bit_simple 465#define test_and_clear_bit test_and_clear_bit_simple 466#define test_and_change_bit test_and_change_bit_simple 467#endif 468 469 470/* 471 * This routine doesn't need to be atomic. 472 */ 473 474static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) 475{ 476 unsigned long addr; 477 unsigned char ch; 478 479 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3); 480 ch = *(volatile unsigned char *) addr; 481 return (ch >> (nr & 7)) & 1; 482} 483 484static inline int 485__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { 486 return (((volatile char *) addr) 487 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0; 488} 489 490#define test_bit(nr,addr) \ 491(__builtin_constant_p((nr)) ? \ 492 __constant_test_bit((nr),(addr)) : \ 493 __test_bit((nr),(addr)) ) 494 495/* 496 * ffz = Find First Zero in word. Undefined if no zero exists, 497 * so code should check against ~0UL first.. 498 */ 499static inline unsigned long ffz(unsigned long word) 500{ 501 unsigned long bit = 0; 502 503#ifdef __s390x__ 504 if (likely((word & 0xffffffff) == 0xffffffff)) { 505 word >>= 32; 506 bit += 32; 507 } 508#endif 509 if (likely((word & 0xffff) == 0xffff)) { 510 word >>= 16; 511 bit += 16; 512 } 513 if (likely((word & 0xff) == 0xff)) { 514 word >>= 8; 515 bit += 8; 516 } 517 return bit + _zb_findmap[word & 0xff]; 518} 519 520/* 521 * __ffs = find first bit in word. Undefined if no bit exists, 522 * so code should check against 0UL first.. 523 */ 524static inline unsigned long __ffs (unsigned long word) 525{ 526 unsigned long bit = 0; 527 528#ifdef __s390x__ 529 if (likely((word & 0xffffffff) == 0)) { 530 word >>= 32; 531 bit += 32; 532 } 533#endif 534 if (likely((word & 0xffff) == 0)) { 535 word >>= 16; 536 bit += 16; 537 } 538 if (likely((word & 0xff) == 0)) { 539 word >>= 8; 540 bit += 8; 541 } 542 return bit + _sb_findmap[word & 0xff]; 543} 544 545/* 546 * Find-bit routines.. 547 */ 548 549#ifndef __s390x__ 550 551static inline int 552find_first_zero_bit(const unsigned long * addr, unsigned long size) 553{ 554 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 555 unsigned long cmp, count; 556 unsigned int res; 557 558 if (!size) 559 return 0; 560 __asm__(" lhi %1,-1\n" 561 " lr %2,%3\n" 562 " slr %0,%0\n" 563 " ahi %2,31\n" 564 " srl %2,5\n" 565 "0: c %1,0(%0,%4)\n" 566 " jne 1f\n" 567 " la %0,4(%0)\n" 568 " brct %2,0b\n" 569 " lr %0,%3\n" 570 " j 4f\n" 571 "1: l %2,0(%0,%4)\n" 572 " sll %0,3\n" 573 " lhi %1,0xff\n" 574 " tml %2,0xffff\n" 575 " jno 2f\n" 576 " ahi %0,16\n" 577 " srl %2,16\n" 578 "2: tml %2,0x00ff\n" 579 " jno 3f\n" 580 " ahi %0,8\n" 581 " srl %2,8\n" 582 "3: nr %2,%1\n" 583 " ic %2,0(%2,%5)\n" 584 " alr %0,%2\n" 585 "4:" 586 : "=&a" (res), "=&d" (cmp), "=&a" (count) 587 : "a" (size), "a" (addr), "a" (&_zb_findmap), 588 "m" (*(addrtype *) addr) : "cc" ); 589 return (res < size) ? res : size; 590} 591 592static inline int 593find_first_bit(const unsigned long * addr, unsigned long size) 594{ 595 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 596 unsigned long cmp, count; 597 unsigned int res; 598 599 if (!size) 600 return 0; 601 __asm__(" slr %1,%1\n" 602 " lr %2,%3\n" 603 " slr %0,%0\n" 604 " ahi %2,31\n" 605 " srl %2,5\n" 606 "0: c %1,0(%0,%4)\n" 607 " jne 1f\n" 608 " la %0,4(%0)\n" 609 " brct %2,0b\n" 610 " lr %0,%3\n" 611 " j 4f\n" 612 "1: l %2,0(%0,%4)\n" 613 " sll %0,3\n" 614 " lhi %1,0xff\n" 615 " tml %2,0xffff\n" 616 " jnz 2f\n" 617 " ahi %0,16\n" 618 " srl %2,16\n" 619 "2: tml %2,0x00ff\n" 620 " jnz 3f\n" 621 " ahi %0,8\n" 622 " srl %2,8\n" 623 "3: nr %2,%1\n" 624 " ic %2,0(%2,%5)\n" 625 " alr %0,%2\n" 626 "4:" 627 : "=&a" (res), "=&d" (cmp), "=&a" (count) 628 : "a" (size), "a" (addr), "a" (&_sb_findmap), 629 "m" (*(addrtype *) addr) : "cc" ); 630 return (res < size) ? res : size; 631} 632 633#else /* __s390x__ */ 634 635static inline unsigned long 636find_first_zero_bit(const unsigned long * addr, unsigned long size) 637{ 638 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 639 unsigned long res, cmp, count; 640 641 if (!size) 642 return 0; 643 __asm__(" lghi %1,-1\n" 644 " lgr %2,%3\n" 645 " slgr %0,%0\n" 646 " aghi %2,63\n" 647 " srlg %2,%2,6\n" 648 "0: cg %1,0(%0,%4)\n" 649 " jne 1f\n" 650 " la %0,8(%0)\n" 651 " brct %2,0b\n" 652 " lgr %0,%3\n" 653 " j 5f\n" 654 "1: lg %2,0(%0,%4)\n" 655 " sllg %0,%0,3\n" 656 " clr %2,%1\n" 657 " jne 2f\n" 658 " aghi %0,32\n" 659 " srlg %2,%2,32\n" 660 "2: lghi %1,0xff\n" 661 " tmll %2,0xffff\n" 662 " jno 3f\n" 663 " aghi %0,16\n" 664 " srl %2,16\n" 665 "3: tmll %2,0x00ff\n" 666 " jno 4f\n" 667 " aghi %0,8\n" 668 " srl %2,8\n" 669 "4: ngr %2,%1\n" 670 " ic %2,0(%2,%5)\n" 671 " algr %0,%2\n" 672 "5:" 673 : "=&a" (res), "=&d" (cmp), "=&a" (count) 674 : "a" (size), "a" (addr), "a" (&_zb_findmap), 675 "m" (*(addrtype *) addr) : "cc" ); 676 return (res < size) ? res : size; 677} 678 679static inline unsigned long 680find_first_bit(const unsigned long * addr, unsigned long size) 681{ 682 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 683 unsigned long res, cmp, count; 684 685 if (!size) 686 return 0; 687 __asm__(" slgr %1,%1\n" 688 " lgr %2,%3\n" 689 " slgr %0,%0\n" 690 " aghi %2,63\n" 691 " srlg %2,%2,6\n" 692 "0: cg %1,0(%0,%4)\n" 693 " jne 1f\n" 694 " aghi %0,8\n" 695 " brct %2,0b\n" 696 " lgr %0,%3\n" 697 " j 5f\n" 698 "1: lg %2,0(%0,%4)\n" 699 " sllg %0,%0,3\n" 700 " clr %2,%1\n" 701 " jne 2f\n" 702 " aghi %0,32\n" 703 " srlg %2,%2,32\n" 704 "2: lghi %1,0xff\n" 705 " tmll %2,0xffff\n" 706 " jnz 3f\n" 707 " aghi %0,16\n" 708 " srl %2,16\n" 709 "3: tmll %2,0x00ff\n" 710 " jnz 4f\n" 711 " aghi %0,8\n" 712 " srl %2,8\n" 713 "4: ngr %2,%1\n" 714 " ic %2,0(%2,%5)\n" 715 " algr %0,%2\n" 716 "5:" 717 : "=&a" (res), "=&d" (cmp), "=&a" (count) 718 : "a" (size), "a" (addr), "a" (&_sb_findmap), 719 "m" (*(addrtype *) addr) : "cc" ); 720 return (res < size) ? res : size; 721} 722 723#endif /* __s390x__ */ 724 725static inline int 726find_next_zero_bit (const unsigned long * addr, unsigned long size, 727 unsigned long offset) 728{ 729 const unsigned long *p; 730 unsigned long bit, set; 731 732 if (offset >= size) 733 return size; 734 bit = offset & (__BITOPS_WORDSIZE - 1); 735 offset -= bit; 736 size -= offset; 737 p = addr + offset / __BITOPS_WORDSIZE; 738 if (bit) { 739 /* 740 * s390 version of ffz returns __BITOPS_WORDSIZE 741 * if no zero bit is present in the word. 742 */ 743 set = ffz(*p >> bit) + bit; 744 if (set >= size) 745 return size + offset; 746 if (set < __BITOPS_WORDSIZE) 747 return set + offset; 748 offset += __BITOPS_WORDSIZE; 749 size -= __BITOPS_WORDSIZE; 750 p++; 751 } 752 return offset + find_first_zero_bit(p, size); 753} 754 755static inline int 756find_next_bit (const unsigned long * addr, unsigned long size, 757 unsigned long offset) 758{ 759 const unsigned long *p; 760 unsigned long bit, set; 761 762 if (offset >= size) 763 return size; 764 bit = offset & (__BITOPS_WORDSIZE - 1); 765 offset -= bit; 766 size -= offset; 767 p = addr + offset / __BITOPS_WORDSIZE; 768 if (bit) { 769 /* 770 * s390 version of __ffs returns __BITOPS_WORDSIZE 771 * if no one bit is present in the word. 772 */ 773 set = __ffs(*p & (~0UL << bit)); 774 if (set >= size) 775 return size + offset; 776 if (set < __BITOPS_WORDSIZE) 777 return set + offset; 778 offset += __BITOPS_WORDSIZE; 779 size -= __BITOPS_WORDSIZE; 780 p++; 781 } 782 return offset + find_first_bit(p, size); 783} 784 785/* 786 * Every architecture must define this function. It's the fastest 787 * way of searching a 140-bit bitmap where the first 100 bits are 788 * unlikely to be set. It's guaranteed that at least one of the 140 789 * bits is cleared. 790 */ 791static inline int sched_find_first_bit(unsigned long *b) 792{ 793 return find_first_bit(b, 140); 794} 795 796#include <asm-generic/bitops/ffs.h> 797 798#include <asm-generic/bitops/fls.h> 799#include <asm-generic/bitops/fls64.h> 800 801#include <asm-generic/bitops/hweight.h> 802 803/* 804 * ATTENTION: intel byte ordering convention for ext2 and minix !! 805 * bit 0 is the LSB of addr; bit 31 is the MSB of addr; 806 * bit 32 is the LSB of (addr+4). 807 * That combined with the little endian byte order of Intel gives the 808 * following bit order in memory: 809 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ 810 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 811 */ 812 813#define ext2_set_bit(nr, addr) \ 814 __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 815#define ext2_set_bit_atomic(lock, nr, addr) \ 816 test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 817#define ext2_clear_bit(nr, addr) \ 818 __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 819#define ext2_clear_bit_atomic(lock, nr, addr) \ 820 test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 821#define ext2_test_bit(nr, addr) \ 822 test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) 823 824#ifndef __s390x__ 825 826static inline int 827ext2_find_first_zero_bit(void *vaddr, unsigned int size) 828{ 829 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 830 unsigned long cmp, count; 831 unsigned int res; 832 833 if (!size) 834 return 0; 835 __asm__(" lhi %1,-1\n" 836 " lr %2,%3\n" 837 " ahi %2,31\n" 838 " srl %2,5\n" 839 " slr %0,%0\n" 840 "0: cl %1,0(%0,%4)\n" 841 " jne 1f\n" 842 " ahi %0,4\n" 843 " brct %2,0b\n" 844 " lr %0,%3\n" 845 " j 4f\n" 846 "1: l %2,0(%0,%4)\n" 847 " sll %0,3\n" 848 " ahi %0,24\n" 849 " lhi %1,0xff\n" 850 " tmh %2,0xffff\n" 851 " jo 2f\n" 852 " ahi %0,-16\n" 853 " srl %2,16\n" 854 "2: tml %2,0xff00\n" 855 " jo 3f\n" 856 " ahi %0,-8\n" 857 " srl %2,8\n" 858 "3: nr %2,%1\n" 859 " ic %2,0(%2,%5)\n" 860 " alr %0,%2\n" 861 "4:" 862 : "=&a" (res), "=&d" (cmp), "=&a" (count) 863 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 864 "m" (*(addrtype *) vaddr) : "cc" ); 865 return (res < size) ? res : size; 866} 867 868#else /* __s390x__ */ 869 870static inline unsigned long 871ext2_find_first_zero_bit(void *vaddr, unsigned long size) 872{ 873 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; 874 unsigned long res, cmp, count; 875 876 if (!size) 877 return 0; 878 __asm__(" lghi %1,-1\n" 879 " lgr %2,%3\n" 880 " aghi %2,63\n" 881 " srlg %2,%2,6\n" 882 " slgr %0,%0\n" 883 "0: clg %1,0(%0,%4)\n" 884 " jne 1f\n" 885 " aghi %0,8\n" 886 " brct %2,0b\n" 887 " lgr %0,%3\n" 888 " j 5f\n" 889 "1: cl %1,0(%0,%4)\n" 890 " jne 2f\n" 891 " aghi %0,4\n" 892 "2: l %2,0(%0,%4)\n" 893 " sllg %0,%0,3\n" 894 " aghi %0,24\n" 895 " lghi %1,0xff\n" 896 " tmlh %2,0xffff\n" 897 " jo 3f\n" 898 " aghi %0,-16\n" 899 " srl %2,16\n" 900 "3: tmll %2,0xff00\n" 901 " jo 4f\n" 902 " aghi %0,-8\n" 903 " srl %2,8\n" 904 "4: ngr %2,%1\n" 905 " ic %2,0(%2,%5)\n" 906 " algr %0,%2\n" 907 "5:" 908 : "=&a" (res), "=&d" (cmp), "=&a" (count) 909 : "a" (size), "a" (vaddr), "a" (&_zb_findmap), 910 "m" (*(addrtype *) vaddr) : "cc" ); 911 return (res < size) ? res : size; 912} 913 914#endif /* __s390x__ */ 915 916static inline int 917ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) 918{ 919 unsigned long *addr = vaddr, *p; 920 unsigned long word, bit, set; 921 922 if (offset >= size) 923 return size; 924 bit = offset & (__BITOPS_WORDSIZE - 1); 925 offset -= bit; 926 size -= offset; 927 p = addr + offset / __BITOPS_WORDSIZE; 928 if (bit) { 929#ifndef __s390x__ 930 asm(" ic %0,0(%1)\n" 931 " icm %0,2,1(%1)\n" 932 " icm %0,4,2(%1)\n" 933 " icm %0,8,3(%1)" 934 : "=&a" (word) : "a" (p), "m" (*p) : "cc" ); 935#else 936 asm(" lrvg %0,%1" : "=a" (word) : "m" (*p) ); 937#endif 938 /* 939 * s390 version of ffz returns __BITOPS_WORDSIZE 940 * if no zero bit is present in the word. 941 */ 942 set = ffz(word >> bit) + bit; 943 if (set >= size) 944 return size + offset; 945 if (set < __BITOPS_WORDSIZE) 946 return set + offset; 947 offset += __BITOPS_WORDSIZE; 948 size -= __BITOPS_WORDSIZE; 949 p++; 950 } 951 return offset + ext2_find_first_zero_bit(p, size); 952} 953 954#include <asm-generic/bitops/minix.h> 955 956#endif /* __KERNEL__ */ 957 958#endif /* _S390_BITOPS_H */