Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: delete checks for xor_unlock_is_negative_byte()

Architectures which don't define their own use the one in
asm-generic/bitops/lock.h. Get rid of all the ifdefs around "maybe we
don't have it".

Link: https://lkml.kernel.org/r/20231004165317.1061855-15-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
Andrew Morton
f12fb73b 12010aa8

+1 -48
-1
arch/alpha/include/asm/bitops.h
··· 305 305 306 306 return (old & BIT(7)) != 0; 307 307 } 308 - #define xor_unlock_is_negative_byte xor_unlock_is_negative_byte 309 308 310 309 /* 311 310 * ffz = Find First Zero in word. Undefined if no zero exists,
-1
arch/m68k/include/asm/bitops.h
··· 339 339 return result; 340 340 #endif 341 341 } 342 - #define xor_unlock_is_negative_byte xor_unlock_is_negative_byte 343 342 344 343 /* 345 344 * The true 68020 and more advanced processors support the "bfffo"
-1
arch/mips/include/asm/bitops.h
··· 301 301 302 302 return res; 303 303 } 304 - #define xor_unlock_is_negative_byte xor_unlock_is_negative_byte 305 304 306 305 #undef __bit_op 307 306 #undef __test_bit_op
-1
arch/riscv/include/asm/bitops.h
··· 202 202 : "memory"); 203 203 return (res & BIT(7)) != 0; 204 204 } 205 - #define xor_unlock_is_negative_byte xor_unlock_is_negative_byte 206 205 207 206 #undef __test_and_op_bit 208 207 #undef __op_bit
-5
include/asm-generic/bitops/instrumented-lock.h
··· 58 58 return arch_test_and_set_bit_lock(nr, addr); 59 59 } 60 60 61 - #if defined(arch_xor_unlock_is_negative_byte) 62 61 /** 63 62 * xor_unlock_is_negative_byte - XOR a single byte in memory and test if 64 63 * it is negative, for unlock. ··· 79 80 instrument_atomic_write(addr, sizeof(long)); 80 81 return arch_xor_unlock_is_negative_byte(mask, addr); 81 82 } 82 - /* Let everybody know we have it. */ 83 - #define xor_unlock_is_negative_byte xor_unlock_is_negative_byte 84 - #endif 85 - 86 83 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
-1
include/asm-generic/bitops/lock.h
··· 75 75 old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p); 76 76 return !!(old & BIT(7)); 77 77 } 78 - #define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte 79 78 #endif 80 79 81 80 #include <asm-generic/bitops/instrumented-lock.h>
-3
kernel/kcsan/kcsan_test.c
··· 699 699 KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true); 700 700 KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false); 701 701 KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true); 702 - 703 - #ifdef xor_unlock_is_negative_byte 704 702 KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); 705 703 KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); 706 704 KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); 707 - #endif 708 705 kcsan_nestable_atomic_end(); 709 706 } 710 707
-3
kernel/kcsan/selftest.c
··· 227 227 KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock)); 228 228 spin_lock(&test_spinlock); 229 229 KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); 230 - 231 - #ifdef xor_unlock_is_negative_byte 232 230 KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); 233 231 KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); 234 232 KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); 235 - #endif 236 233 kcsan_nestable_atomic_end(); 237 234 238 235 return ret;
+1 -29
mm/filemap.c
··· 1482 1482 } 1483 1483 EXPORT_SYMBOL_GPL(folio_add_wait_queue); 1484 1484 1485 - #ifdef xor_unlock_is_negative_byte 1486 - #define clear_bit_unlock_is_negative_byte(nr, p) \ 1487 - xor_unlock_is_negative_byte(1 << nr, p) 1488 - #endif 1489 - 1490 - #ifndef clear_bit_unlock_is_negative_byte 1491 - 1492 - /* 1493 - * PG_waiters is the high bit in the same byte as PG_lock. 1494 - * 1495 - * On x86 (and on many other architectures), we can clear PG_lock and 1496 - * test the sign bit at the same time. But if the architecture does 1497 - * not support that special operation, we just do this all by hand 1498 - * instead. 1499 - * 1500 - * The read of PG_waiters has to be after (or concurrently with) PG_locked 1501 - * being cleared, but a memory barrier should be unnecessary since it is 1502 - * in the same byte as PG_locked. 1503 - */ 1504 - static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) 1505 - { 1506 - clear_bit_unlock(nr, mem); 1507 - /* smp_mb__after_atomic(); */ 1508 - return test_bit(PG_waiters, mem); 1509 - } 1510 - 1511 - #endif 1512 - 1513 1485 /** 1514 1486 * folio_unlock - Unlock a locked folio. 1515 1487 * @folio: The folio. ··· 1497 1525 BUILD_BUG_ON(PG_waiters != 7); 1498 1526 BUILD_BUG_ON(PG_locked > 7); 1499 1527 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1500 - if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0))) 1528 + if (xor_unlock_is_negative_byte(1 << PG_locked, folio_flags(folio, 0))) 1501 1529 folio_wake_bit(folio, PG_locked); 1502 1530 } 1503 1531 EXPORT_SYMBOL(folio_unlock);
-3
mm/kasan/kasan_test.c
··· 1098 1098 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); 1099 1099 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); 1100 1100 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); 1101 - 1102 - #if defined(xor_unlock_is_negative_byte) 1103 1101 if (nr < 7) 1104 1102 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = 1105 1103 xor_unlock_is_negative_byte(1 << nr, addr)); 1106 - #endif 1107 1104 } 1108 1105 1109 1106 static void kasan_bitops_generic(struct kunit *test)