Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MIPS] Make support for weakly ordered LL/SC a config option.

None of weakly ordered processor supported in tree need this but it seems
like this could change ...

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+59 -38
+11
arch/mips/Kconfig
··· 1190 1190 config SYS_HAS_CPU_SB1 1191 1191 bool 1192 1192 1193 + # 1194 + # CPU may reorder R->R, R->W, W->R, W->W 1195 + # Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC 1196 + # 1193 1197 config WEAK_ORDERING 1198 + bool 1199 + 1200 + # 1201 + # CPU may reorder reads and writes beyond LL/SC 1202 + # CPU may reorder R->LL, R->LL, W->LL, W->LL, R->SC, R->SC, W->SC, W->SC 1203 + # 1204 + config WEAK_REORDERING_BEYOND_LLSC 1194 1205 bool 1195 1206 endmenu 1196 1207
+17 -16
include/asm-mips/atomic.h
··· 138 138 { 139 139 unsigned long result; 140 140 141 - smp_mb(); 141 + smp_llsc_mb(); 142 142 143 143 if (cpu_has_llsc && R10000_LLSC_WAR) { 144 144 unsigned long temp; ··· 181 181 raw_local_irq_restore(flags); 182 182 } 183 183 184 - smp_mb(); 184 + smp_llsc_mb(); 185 185 186 186 return result; 187 187 } ··· 190 190 { 191 191 unsigned long result; 192 192 193 - smp_mb(); 193 + smp_llsc_mb(); 194 194 195 195 if (cpu_has_llsc && R10000_LLSC_WAR) { 196 196 unsigned long temp; ··· 233 233 raw_local_irq_restore(flags); 234 234 } 235 235 236 - smp_mb(); 236 + smp_llsc_mb(); 237 237 238 238 return result; 239 239 } ··· 250 250 { 251 251 unsigned long result; 252 252 253 - smp_mb(); 253 + smp_llsc_mb(); 254 254 255 255 if (cpu_has_llsc && R10000_LLSC_WAR) { 256 256 unsigned long temp; ··· 302 302 raw_local_irq_restore(flags); 303 303 } 304 304 305 - smp_mb(); 305 + smp_llsc_mb(); 306 306 307 307 return result; 308 308 } ··· 519 519 { 520 520 unsigned long result; 521 521 522 - smp_mb(); 522 + smp_llsc_mb(); 523 523 524 524 if (cpu_has_llsc && R10000_LLSC_WAR) { 525 525 unsigned long temp; ··· 562 562 raw_local_irq_restore(flags); 563 563 } 564 564 565 - smp_mb(); 565 + smp_llsc_mb(); 566 566 567 567 return result; 568 568 } ··· 571 571 { 572 572 unsigned long result; 573 573 574 - smp_mb(); 574 + smp_llsc_mb(); 575 575 576 576 if (cpu_has_llsc && R10000_LLSC_WAR) { 577 577 unsigned long temp; ··· 614 614 raw_local_irq_restore(flags); 615 615 } 616 616 617 - smp_mb(); 617 + smp_llsc_mb(); 618 618 619 619 return result; 620 620 } ··· 631 631 { 632 632 unsigned long result; 633 633 634 - smp_mb(); 634 + smp_llsc_mb(); 635 635 636 636 if (cpu_has_llsc && R10000_LLSC_WAR) { 637 637 unsigned long temp; ··· 683 683 raw_local_irq_restore(flags); 684 684 } 685 685 686 - smp_mb(); 686 + smp_llsc_mb(); 687 687 688 688 return result; 689 689 } ··· 791 791 * atomic*_return operations are serializing but not the non-*_return 792 792 * versions. 793 793 */ 794 - #define smp_mb__before_atomic_dec() smp_mb() 795 - #define smp_mb__after_atomic_dec() smp_mb() 796 - #define smp_mb__before_atomic_inc() smp_mb() 797 - #define smp_mb__after_atomic_inc() smp_mb() 794 + #define smp_mb__before_atomic_dec() smp_llsc_mb() 795 + #define smp_mb__after_atomic_dec() smp_llsc_mb() 796 + #define smp_mb__before_atomic_inc() smp_llsc_mb() 797 + #define smp_mb__after_atomic_inc() smp_llsc_mb() 798 798 799 799 #include <asm-generic/atomic.h> 800 + 800 801 #endif /* _ASM_ATOMIC_H */
+9
include/asm-mips/barrier.h
··· 121 121 #else 122 122 #define __WEAK_ORDERING_MB " \n" 123 123 #endif 124 + #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) 125 + #define __WEAK_LLSC_MB " sync \n" 126 + #else 127 + #define __WEAK_LLSC_MB " \n" 128 + #endif 124 129 125 130 #define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") 126 131 #define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") ··· 133 128 134 129 #define set_mb(var, value) \ 135 130 do { var = value; smp_mb(); } while (0) 131 + 132 + #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") 133 + #define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") 134 + #define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") 136 135 137 136 #endif /* __ASM_BARRIER_H */
+5 -5
include/asm-mips/bitops.h
··· 38 38 /* 39 39 * clear_bit() doesn't provide any barrier for the compiler. 40 40 */ 41 - #define smp_mb__before_clear_bit() smp_mb() 42 - #define smp_mb__after_clear_bit() smp_mb() 41 + #define smp_mb__before_clear_bit() smp_llsc_mb() 42 + #define smp_mb__after_clear_bit() smp_llsc_mb() 43 43 44 44 /* 45 45 * set_bit - Atomically set a bit in memory ··· 289 289 raw_local_irq_restore(flags); 290 290 } 291 291 292 - smp_mb(); 292 + smp_llsc_mb(); 293 293 294 294 return res != 0; 295 295 } ··· 377 377 raw_local_irq_restore(flags); 378 378 } 379 379 380 - smp_mb(); 380 + smp_llsc_mb(); 381 381 382 382 return res != 0; 383 383 } ··· 445 445 raw_local_irq_restore(flags); 446 446 } 447 447 448 - smp_mb(); 448 + smp_llsc_mb(); 449 449 450 450 return res != 0; 451 451 }
+4 -4
include/asm-mips/futex.h
··· 29 29 " .set mips3 \n" \ 30 30 "2: sc $1, %2 \n" \ 31 31 " beqzl $1, 1b \n" \ 32 - __WEAK_ORDERING_MB \ 32 + __WEAK_LLSC_MB \ 33 33 "3: \n" \ 34 34 " .set pop \n" \ 35 35 " .set mips0 \n" \ ··· 55 55 " .set mips3 \n" \ 56 56 "2: sc $1, %2 \n" \ 57 57 " beqz $1, 1b \n" \ 58 - __WEAK_ORDERING_MB \ 58 + __WEAK_LLSC_MB \ 59 59 "3: \n" \ 60 60 " .set pop \n" \ 61 61 " .set mips0 \n" \ ··· 152 152 " .set mips3 \n" 153 153 "2: sc $1, %1 \n" 154 154 " beqzl $1, 1b \n" 155 - __WEAK_ORDERING_MB 155 + __WEAK_LLSC_MB 156 156 "3: \n" 157 157 " .set pop \n" 158 158 " .section .fixup,\"ax\" \n" ··· 179 179 " .set mips3 \n" 180 180 "2: sc $1, %1 \n" 181 181 " beqz $1, 1b \n" 182 - __WEAK_ORDERING_MB 182 + __WEAK_LLSC_MB 183 183 "3: \n" 184 184 " .set pop \n" 185 185 " .section .fixup,\"ax\" \n"
+9 -9
include/asm-mips/spinlock.h
··· 67 67 : "memory"); 68 68 } 69 69 70 - smp_mb(); 70 + smp_llsc_mb(); 71 71 } 72 72 73 73 static inline void __raw_spin_unlock(raw_spinlock_t *lock) ··· 118 118 : "memory"); 119 119 } 120 120 121 - smp_mb(); 121 + smp_llsc_mb(); 122 122 123 123 return res == 0; 124 124 } ··· 183 183 : "memory"); 184 184 } 185 185 186 - smp_mb(); 186 + smp_llsc_mb(); 187 187 } 188 188 189 189 /* Note the use of sub, not subu which will make the kernel die with an ··· 193 193 { 194 194 unsigned int tmp; 195 195 196 - smp_mb(); 196 + smp_llsc_mb(); 197 197 198 198 if (R10000_LLSC_WAR) { 199 199 __asm__ __volatile__( ··· 262 262 : "memory"); 263 263 } 264 264 265 - smp_mb(); 265 + smp_llsc_mb(); 266 266 } 267 267 268 268 static inline void __raw_write_unlock(raw_rwlock_t *rw) ··· 293 293 " .set reorder \n" 294 294 " beqzl %1, 1b \n" 295 295 " nop \n" 296 - __WEAK_ORDERING_MB 296 + __WEAK_LLSC_MB 297 297 " li %2, 1 \n" 298 298 "2: \n" 299 299 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) ··· 310 310 " beqz %1, 1b \n" 311 311 " nop \n" 312 312 " .set reorder \n" 313 - __WEAK_ORDERING_MB 313 + __WEAK_LLSC_MB 314 314 " li %2, 1 \n" 315 315 "2: \n" 316 316 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) ··· 336 336 " sc %1, %0 \n" 337 337 " beqzl %1, 1b \n" 338 338 " nop \n" 339 - __WEAK_ORDERING_MB 339 + __WEAK_LLSC_MB 340 340 " li %2, 1 \n" 341 341 " .set reorder \n" 342 342 "2: \n" ··· 354 354 " beqz %1, 3f \n" 355 355 " li %2, 1 \n" 356 356 "2: \n" 357 - __WEAK_ORDERING_MB 357 + __WEAK_LLSC_MB 358 358 " .subsection 2 \n" 359 359 "3: b 1b \n" 360 360 " li %2, 0 \n"
+4 -4
include/asm-mips/system.h
··· 117 117 raw_local_irq_restore(flags); /* implies memory barrier */ 118 118 } 119 119 120 - smp_mb(); 120 + smp_llsc_mb(); 121 121 122 122 return retval; 123 123 } ··· 165 165 raw_local_irq_restore(flags); /* implies memory barrier */ 166 166 } 167 167 168 - smp_mb(); 168 + smp_llsc_mb(); 169 169 170 170 return retval; 171 171 } ··· 246 246 raw_local_irq_restore(flags); /* implies memory barrier */ 247 247 } 248 248 249 - smp_mb(); 249 + smp_llsc_mb(); 250 250 251 251 return retval; 252 252 } ··· 352 352 raw_local_irq_restore(flags); /* implies memory barrier */ 353 353 } 354 354 355 - smp_mb(); 355 + smp_llsc_mb(); 356 356 357 357 return retval; 358 358 }