Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

alpha: Replace smp_read_barrier_depends() usage with smp_[r]mb()

In preparation for removing smp_read_barrier_depends() altogether,
move the Alpha code over to using smp_rmb() and smp_mb() directly.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>

+14 -14
+8 -8
arch/alpha/include/asm/atomic.h
··· 16 16 17 17 /* 18 18 * To ensure dependency ordering is preserved for the _relaxed and 19 - * _release atomics, an smp_read_barrier_depends() is unconditionally 20 - * inserted into the _relaxed variants, which are used to build the 21 - * barriered versions. Avoid redundant back-to-back fences in the 22 - * _acquire and _fence versions. 19 + * _release atomics, an smp_mb() is unconditionally inserted into the 20 + * _relaxed variants, which are used to build the barriered versions. 21 + * Avoid redundant back-to-back fences in the _acquire and _fence 22 + * versions. 23 23 */ 24 24 #define __atomic_acquire_fence() 25 25 #define __atomic_post_full_fence() ··· 70 70 ".previous" \ 71 71 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 72 72 :"Ir" (i), "m" (v->counter) : "memory"); \ 73 - smp_read_barrier_depends(); \ 73 + smp_mb(); \ 74 74 return result; \ 75 75 } 76 76 ··· 88 88 ".previous" \ 89 89 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 90 90 :"Ir" (i), "m" (v->counter) : "memory"); \ 91 - smp_read_barrier_depends(); \ 91 + smp_mb(); \ 92 92 return result; \ 93 93 } 94 94 ··· 123 123 ".previous" \ 124 124 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 125 125 :"Ir" (i), "m" (v->counter) : "memory"); \ 126 - smp_read_barrier_depends(); \ 126 + smp_mb(); \ 127 127 return result; \ 128 128 } 129 129 ··· 141 141 ".previous" \ 142 142 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ 143 143 :"Ir" (i), "m" (v->counter) : "memory"); \ 144 - smp_read_barrier_depends(); \ 144 + smp_mb(); \ 145 145 return result; \ 146 146 } 147 147
+5 -5
arch/alpha/include/asm/pgtable.h
··· 277 277 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } 278 278 279 279 /* 280 - * The smp_read_barrier_depends() in the following functions are required to 281 - * order the load of *dir (the pointer in the top level page table) with any 282 - * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir). 280 + * The smp_rmb() in the following functions are required to order the load of 281 + * *dir (the pointer in the top level page table) with any subsequent load of 282 + * the returned pmd_t *ret (ret is data dependent on *dir). 283 283 * 284 284 * If this ordering is not enforced, the CPU might load an older value of 285 285 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for ··· 293 293 extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address) 294 294 { 295 295 pmd_t *ret = (pmd_t *) pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); 296 - smp_read_barrier_depends(); /* see above */ 296 + smp_rmb(); /* see above */ 297 297 return ret; 298 298 } 299 299 #define pmd_offset pmd_offset ··· 303 303 { 304 304 pte_t *ret = (pte_t *) pmd_page_vaddr(*dir) 305 305 + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); 306 - smp_read_barrier_depends(); /* see above */ 306 + smp_rmb(); /* see above */ 307 307 return ret; 308 308 } 309 309 #define pte_offset_kernel pte_offset_kernel
+1 -1
mm/memory.c
··· 437 437 * of a chain of data-dependent loads, meaning most CPUs (alpha 438 438 * being the notable exception) will already guarantee loads are 439 439 * seen in-order. See the alpha page table accessors for the 440 - * smp_read_barrier_depends() barriers in page table walking code. 440 + * smp_rmb() barriers in page table walking code. 441 441 */ 442 442 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 443 443