Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch: Mass conversion of smp_mb__*()

Mostly scripted conversion of the smp_mb__* barriers.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-arch@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
4e857c58 1b15611e

+284 -288
+2 -2
block/blk-iopoll.c
··· 49 49 void __blk_iopoll_complete(struct blk_iopoll *iop) 50 50 { 51 51 list_del(&iop->list); 52 - smp_mb__before_clear_bit(); 52 + smp_mb__before_atomic(); 53 53 clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); 54 54 } 55 55 EXPORT_SYMBOL(__blk_iopoll_complete); ··· 161 161 void blk_iopoll_enable(struct blk_iopoll *iop) 162 162 { 163 163 BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state)); 164 - smp_mb__before_clear_bit(); 164 + smp_mb__before_atomic(); 165 165 clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); 166 166 } 167 167 EXPORT_SYMBOL(blk_iopoll_enable);
+1 -1
crypto/chainiv.c
··· 126 126 int err = ctx->err; 127 127 128 128 if (!ctx->queue.qlen) { 129 - smp_mb__before_clear_bit(); 129 + smp_mb__before_atomic(); 130 130 clear_bit(CHAINIV_STATE_INUSE, &ctx->state); 131 131 132 132 if (!ctx->queue.qlen ||
+1 -1
drivers/base/power/domain.c
··· 105 105 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 106 106 { 107 107 atomic_inc(&genpd->sd_count); 108 - smp_mb__after_atomic_inc(); 108 + smp_mb__after_atomic(); 109 109 } 110 110 111 111 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+2 -2
drivers/block/mtip32xx/mtip32xx.c
··· 224 224 */ 225 225 static inline void release_slot(struct mtip_port *port, int tag) 226 226 { 227 - smp_mb__before_clear_bit(); 227 + smp_mb__before_atomic(); 228 228 clear_bit(tag, port->allocated); 229 - smp_mb__after_clear_bit(); 229 + smp_mb__after_atomic(); 230 230 } 231 231 232 232 /*
+1 -1
drivers/cpuidle/coupled.c
··· 159 159 { 160 160 int n = dev->coupled->online_count; 161 161 162 - smp_mb__before_atomic_inc(); 162 + smp_mb__before_atomic(); 163 163 atomic_inc(a); 164 164 165 165 while (atomic_read(a) < n)
+1 -1
drivers/firewire/ohci.c
··· 3498 3498 } 3499 3499 3500 3500 clear_bit_unlock(0, &ctx->flushing_completions); 3501 - smp_mb__after_clear_bit(); 3501 + smp_mb__after_atomic(); 3502 3502 } 3503 3503 3504 3504 tasklet_enable(&ctx->context.tasklet);
+5 -5
drivers/gpu/drm/drm_irq.c
··· 156 156 */ 157 157 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { 158 158 atomic_inc(&dev->vblank[crtc].count); 159 - smp_mb__after_atomic_inc(); 159 + smp_mb__after_atomic(); 160 160 } 161 161 162 162 /* Invalidate all timestamps while vblank irq's are off. */ ··· 864 864 vblanktimestamp(dev, crtc, tslot) = t_vblank; 865 865 } 866 866 867 - smp_mb__before_atomic_inc(); 867 + smp_mb__before_atomic(); 868 868 atomic_add(diff, &dev->vblank[crtc].count); 869 - smp_mb__after_atomic_inc(); 869 + smp_mb__after_atomic(); 870 870 } 871 871 872 872 /** ··· 1330 1330 /* Increment cooked vblank count. This also atomically commits 1331 1331 * the timestamp computed above. 1332 1332 */ 1333 - smp_mb__before_atomic_inc(); 1333 + smp_mb__before_atomic(); 1334 1334 atomic_inc(&dev->vblank[crtc].count); 1335 - smp_mb__after_atomic_inc(); 1335 + smp_mb__after_atomic(); 1336 1336 } else { 1337 1337 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1338 1338 crtc, (int) diff_ns);
+1 -1
drivers/gpu/drm/i915/i915_irq.c
··· 2147 2147 * updates before 2148 2148 * the counter increment. 2149 2149 */ 2150 - smp_mb__before_atomic_inc(); 2150 + smp_mb__before_atomic(); 2151 2151 atomic_inc(&dev_priv->gpu_error.reset_counter); 2152 2152 2153 2153 kobject_uevent_env(&dev->primary->kdev->kobj,
+1 -1
drivers/md/bcache/bcache.h
··· 828 828 return false; 829 829 830 830 /* Paired with the mb in cached_dev_attach */ 831 - smp_mb__after_atomic_inc(); 831 + smp_mb__after_atomic(); 832 832 return true; 833 833 } 834 834
+1 -1
drivers/md/bcache/closure.h
··· 243 243 cl->fn = fn; 244 244 cl->wq = wq; 245 245 /* between atomic_dec() in closure_put() */ 246 - smp_mb__before_atomic_dec(); 246 + smp_mb__before_atomic(); 247 247 } 248 248 249 249 static inline void closure_queue(struct closure *cl)
+4 -4
drivers/md/dm-bufio.c
··· 607 607 608 608 BUG_ON(!test_bit(B_WRITING, &b->state)); 609 609 610 - smp_mb__before_clear_bit(); 610 + smp_mb__before_atomic(); 611 611 clear_bit(B_WRITING, &b->state); 612 - smp_mb__after_clear_bit(); 612 + smp_mb__after_atomic(); 613 613 614 614 wake_up_bit(&b->state, B_WRITING); 615 615 } ··· 997 997 998 998 BUG_ON(!test_bit(B_READING, &b->state)); 999 999 1000 - smp_mb__before_clear_bit(); 1000 + smp_mb__before_atomic(); 1001 1001 clear_bit(B_READING, &b->state); 1002 - smp_mb__after_clear_bit(); 1002 + smp_mb__after_atomic(); 1003 1003 1004 1004 wake_up_bit(&b->state, B_READING); 1005 1005 }
+2 -2
drivers/md/dm-snap.c
··· 642 642 struct dm_snapshot *s = pe->snap; 643 643 644 644 mempool_free(pe, s->pending_pool); 645 - smp_mb__before_atomic_dec(); 645 + smp_mb__before_atomic(); 646 646 atomic_dec(&s->pending_exceptions_count); 647 647 } 648 648 ··· 783 783 static void merge_shutdown(struct dm_snapshot *s) 784 784 { 785 785 clear_bit_unlock(RUNNING_MERGE, &s->state_bits); 786 - smp_mb__after_clear_bit(); 786 + smp_mb__after_atomic(); 787 787 wake_up_bit(&s->state_bits, RUNNING_MERGE); 788 788 } 789 789
+1 -1
drivers/md/dm.c
··· 2447 2447 static void dm_queue_flush(struct mapped_device *md) 2448 2448 { 2449 2449 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); 2450 - smp_mb__after_clear_bit(); 2450 + smp_mb__after_atomic(); 2451 2451 queue_work(md->wq, &md->work); 2452 2452 } 2453 2453
+1 -1
drivers/md/raid5.c
··· 4400 4400 * STRIPE_ON_UNPLUG_LIST clear but the stripe 4401 4401 * is still in our list 4402 4402 */ 4403 - smp_mb__before_clear_bit(); 4403 + smp_mb__before_atomic(); 4404 4404 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 4405 4405 /* 4406 4406 * STRIPE_ON_RELEASE_LIST could be set here. In that
+3 -3
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
··· 399 399 400 400 /* clear 'streaming' status bit */ 401 401 clear_bit(ADAP_STREAMING, &adap->state_bits); 402 - smp_mb__after_clear_bit(); 402 + smp_mb__after_atomic(); 403 403 wake_up_bit(&adap->state_bits, ADAP_STREAMING); 404 404 skip_feed_stop: 405 405 ··· 550 550 err: 551 551 if (!adap->suspend_resume_active) { 552 552 clear_bit(ADAP_INIT, &adap->state_bits); 553 - smp_mb__after_clear_bit(); 553 + smp_mb__after_atomic(); 554 554 wake_up_bit(&adap->state_bits, ADAP_INIT); 555 555 } 556 556 ··· 591 591 if (!adap->suspend_resume_active) { 592 592 adap->active_fe = -1; 593 593 clear_bit(ADAP_SLEEP, &adap->state_bits); 594 - smp_mb__after_clear_bit(); 594 + smp_mb__after_atomic(); 595 595 wake_up_bit(&adap->state_bits, ADAP_SLEEP); 596 596 } 597 597
+3 -3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 2781 2781 2782 2782 case LOAD_OPEN: 2783 2783 netif_tx_start_all_queues(bp->dev); 2784 - smp_mb__after_clear_bit(); 2784 + smp_mb__after_atomic(); 2785 2785 break; 2786 2786 2787 2787 case LOAD_DIAG: ··· 4939 4939 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, 4940 4940 u32 verbose) 4941 4941 { 4942 - smp_mb__before_clear_bit(); 4942 + smp_mb__before_atomic(); 4943 4943 set_bit(flag, &bp->sp_rtnl_state); 4944 - smp_mb__after_clear_bit(); 4944 + smp_mb__after_atomic(); 4945 4945 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", 4946 4946 flag); 4947 4947 schedule_delayed_work(&bp->sp_rtnl_task, 0);
+9 -9
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 1858 1858 return; 1859 1859 #endif 1860 1860 1861 - smp_mb__before_atomic_inc(); 1861 + smp_mb__before_atomic(); 1862 1862 atomic_inc(&bp->cq_spq_left); 1863 1863 /* push the change in bp->spq_left and towards the memory */ 1864 - smp_mb__after_atomic_inc(); 1864 + smp_mb__after_atomic(); 1865 1865 1866 1866 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1867 1867 ··· 1876 1876 * sp_state is cleared, and this order prevents 1877 1877 * races 1878 1878 */ 1879 - smp_mb__before_clear_bit(); 1879 + smp_mb__before_atomic(); 1880 1880 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); 1881 1881 wmb(); 1882 1882 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 1883 - smp_mb__after_clear_bit(); 1883 + smp_mb__after_atomic(); 1884 1884 1885 1885 /* schedule the sp task as mcp ack is required */ 1886 1886 bnx2x_schedule_sp_task(bp); ··· 5272 5272 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); 5273 5273 5274 5274 /* mark latest Q bit */ 5275 - smp_mb__before_clear_bit(); 5275 + smp_mb__before_atomic(); 5276 5276 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); 5277 - smp_mb__after_clear_bit(); 5277 + smp_mb__after_atomic(); 5278 5278 5279 5279 /* send Q update ramrod for FCoE Q */ 5280 5280 rc = bnx2x_queue_state_change(bp, &queue_params); ··· 5500 5500 spqe_cnt++; 5501 5501 } /* for */ 5502 5502 5503 - smp_mb__before_atomic_inc(); 5503 + smp_mb__before_atomic(); 5504 5504 atomic_add(spqe_cnt, &bp->eq_spq_left); 5505 5505 5506 5506 bp->eq_cons = sw_cons; ··· 13869 13869 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { 13870 13870 int count = ctl->data.credit.credit_count; 13871 13871 13872 - smp_mb__before_atomic_inc(); 13872 + smp_mb__before_atomic(); 13873 13873 atomic_add(count, &bp->cq_spq_left); 13874 - smp_mb__after_atomic_inc(); 13874 + smp_mb__after_atomic(); 13875 13875 break; 13876 13876 } 13877 13877 case DRV_CTL_ULP_REGISTER_CMD: {
+13 -13
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
··· 258 258 259 259 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) 260 260 { 261 - smp_mb__before_clear_bit(); 261 + smp_mb__before_atomic(); 262 262 clear_bit(o->state, o->pstate); 263 - smp_mb__after_clear_bit(); 263 + smp_mb__after_atomic(); 264 264 } 265 265 266 266 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) 267 267 { 268 - smp_mb__before_clear_bit(); 268 + smp_mb__before_atomic(); 269 269 set_bit(o->state, o->pstate); 270 - smp_mb__after_clear_bit(); 270 + smp_mb__after_atomic(); 271 271 } 272 272 273 273 /** ··· 2131 2131 2132 2132 /* The operation is completed */ 2133 2133 clear_bit(p->state, p->pstate); 2134 - smp_mb__after_clear_bit(); 2134 + smp_mb__after_atomic(); 2135 2135 2136 2136 return 0; 2137 2137 } ··· 3576 3576 3577 3577 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) 3578 3578 { 3579 - smp_mb__before_clear_bit(); 3579 + smp_mb__before_atomic(); 3580 3580 clear_bit(o->sched_state, o->raw.pstate); 3581 - smp_mb__after_clear_bit(); 3581 + smp_mb__after_atomic(); 3582 3582 } 3583 3583 3584 3584 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) 3585 3585 { 3586 - smp_mb__before_clear_bit(); 3586 + smp_mb__before_atomic(); 3587 3587 set_bit(o->sched_state, o->raw.pstate); 3588 - smp_mb__after_clear_bit(); 3588 + smp_mb__after_atomic(); 3589 3589 } 3590 3590 3591 3591 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) ··· 4200 4200 if (rc) { 4201 4201 o->next_state = BNX2X_Q_STATE_MAX; 4202 4202 clear_bit(pending_bit, pending); 4203 - smp_mb__after_clear_bit(); 4203 + smp_mb__after_atomic(); 4204 4204 return rc; 4205 4205 } 4206 4206 ··· 4288 4288 wmb(); 4289 4289 4290 4290 clear_bit(cmd, &o->pending); 4291 - smp_mb__after_clear_bit(); 4291 + smp_mb__after_atomic(); 4292 4292 4293 4293 return 0; 4294 4294 } ··· 5279 5279 wmb(); 5280 5280 5281 5281 clear_bit(cmd, &o->pending); 5282 - smp_mb__after_clear_bit(); 5282 + smp_mb__after_atomic(); 5283 5283 5284 5284 return 0; 5285 5285 } ··· 5926 5926 if (rc) { 5927 5927 o->next_state = BNX2X_F_STATE_MAX; 5928 5928 clear_bit(cmd, pending); 5929 - smp_mb__after_clear_bit(); 5929 + smp_mb__after_atomic(); 5930 5930 return rc; 5931 5931 } 5932 5932
+4 -4
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 1626 1626 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1627 1627 struct bnx2x_virtf *vf) 1628 1628 { 1629 - smp_mb__before_clear_bit(); 1629 + smp_mb__before_atomic(); 1630 1630 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1631 - smp_mb__after_clear_bit(); 1631 + smp_mb__after_atomic(); 1632 1632 } 1633 1633 1634 1634 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, ··· 2960 2960 2961 2961 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) 2962 2962 { 2963 - smp_mb__before_clear_bit(); 2963 + smp_mb__before_atomic(); 2964 2964 set_bit(flag, &bp->iov_task_state); 2965 - smp_mb__after_clear_bit(); 2965 + smp_mb__after_atomic(); 2966 2966 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 2967 2967 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); 2968 2968 }
+4 -4
drivers/net/ethernet/broadcom/cnic.c
··· 436 436 static int cnic_close_prep(struct cnic_sock *csk) 437 437 { 438 438 clear_bit(SK_F_CONNECT_START, &csk->flags); 439 - smp_mb__after_clear_bit(); 439 + smp_mb__after_atomic(); 440 440 441 441 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 442 442 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) ··· 450 450 static int cnic_abort_prep(struct cnic_sock *csk) 451 451 { 452 452 clear_bit(SK_F_CONNECT_START, &csk->flags); 453 - smp_mb__after_clear_bit(); 453 + smp_mb__after_atomic(); 454 454 455 455 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 456 456 msleep(1); ··· 3646 3646 3647 3647 csk_hold(csk); 3648 3648 clear_bit(SK_F_INUSE, &csk->flags); 3649 - smp_mb__after_clear_bit(); 3649 + smp_mb__after_atomic(); 3650 3650 while (atomic_read(&csk->ref_count) != 1) 3651 3651 msleep(1); 3652 3652 cnic_cm_cleanup(csk); ··· 4026 4026 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) 4027 4027 set_bit(SK_F_HW_ERR, &csk->flags); 4028 4028 4029 - smp_mb__before_clear_bit(); 4029 + smp_mb__before_atomic(); 4030 4030 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 4031 4031 cnic_cm_upcall(cp, csk, opcode); 4032 4032 break;
+3 -3
drivers/net/ethernet/brocade/bna/bnad.c
··· 249 249 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 250 250 bna_ib_ack(tcb->i_dbell, sent); 251 251 252 - smp_mb__before_clear_bit(); 252 + smp_mb__before_atomic(); 253 253 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 254 254 255 255 return sent; ··· 1126 1126 1127 1127 bnad_txq_cleanup(bnad, tcb); 1128 1128 1129 - smp_mb__before_clear_bit(); 1129 + smp_mb__before_atomic(); 1130 1130 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 1131 1131 } 1132 1132 ··· 2992 2992 sent = bnad_txcmpl_process(bnad, tcb); 2993 2993 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 2994 2994 bna_ib_ack(tcb->i_dbell, sent); 2995 - smp_mb__before_clear_bit(); 2995 + smp_mb__before_atomic(); 2996 2996 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 2997 2997 } else { 2998 2998 netif_stop_queue(netdev);
+1 -1
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
··· 281 281 if (adapter->params.stats_update_period && 282 282 !(adapter->open_device_map & PORT_MASK)) { 283 283 /* Stop statistics accumulation. */ 284 - smp_mb__after_clear_bit(); 284 + smp_mb__after_atomic(); 285 285 spin_lock(&adapter->work_lock); /* sync with update task */ 286 286 spin_unlock(&adapter->work_lock); 287 287 cancel_mac_stats_update(adapter);
+3 -3
drivers/net/ethernet/chelsio/cxgb3/sge.c
··· 1379 1379 struct sge_qset *qs = txq_to_qset(q, qid); 1380 1380 1381 1381 set_bit(qid, &qs->txq_stopped); 1382 - smp_mb__after_clear_bit(); 1382 + smp_mb__after_atomic(); 1383 1383 1384 1384 if (should_restart_tx(q) && 1385 1385 test_and_clear_bit(qid, &qs->txq_stopped)) ··· 1492 1492 1493 1493 if (!skb_queue_empty(&q->sendq)) { 1494 1494 set_bit(TXQ_CTRL, &qs->txq_stopped); 1495 - smp_mb__after_clear_bit(); 1495 + smp_mb__after_atomic(); 1496 1496 1497 1497 if (should_restart_tx(q) && 1498 1498 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) ··· 1697 1697 1698 1698 if (unlikely(q->size - q->in_use < ndesc)) { 1699 1699 set_bit(TXQ_OFLD, &qs->txq_stopped); 1700 - smp_mb__after_clear_bit(); 1700 + smp_mb__after_atomic(); 1701 1701 1702 1702 if (should_restart_tx(q) && 1703 1703 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
+1 -1
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 2031 2031 struct sge_fl *fl = s->egr_map[id]; 2032 2032 2033 2033 clear_bit(id, s->starving_fl); 2034 - smp_mb__after_clear_bit(); 2034 + smp_mb__after_atomic(); 2035 2035 2036 2036 if (fl_starving(fl)) { 2037 2037 rxq = container_of(fl, struct sge_eth_rxq, fl);
+1 -1
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
··· 1951 1951 struct sge_fl *fl = s->egr_map[id]; 1952 1952 1953 1953 clear_bit(id, s->starving_fl); 1954 - smp_mb__after_clear_bit(); 1954 + smp_mb__after_atomic(); 1955 1955 1956 1956 /* 1957 1957 * Since we are accessing fl without a lock there's a
+4 -4
drivers/net/ethernet/freescale/gianfar.c
··· 1797 1797 1798 1798 netif_tx_stop_all_queues(dev); 1799 1799 1800 - smp_mb__before_clear_bit(); 1800 + smp_mb__before_atomic(); 1801 1801 set_bit(GFAR_DOWN, &priv->state); 1802 - smp_mb__after_clear_bit(); 1802 + smp_mb__after_atomic(); 1803 1803 1804 1804 disable_napi(priv); 1805 1805 ··· 2042 2042 2043 2043 gfar_init_tx_rx_base(priv); 2044 2044 2045 - smp_mb__before_clear_bit(); 2045 + smp_mb__before_atomic(); 2046 2046 clear_bit(GFAR_DOWN, &priv->state); 2047 - smp_mb__after_clear_bit(); 2047 + smp_mb__after_atomic(); 2048 2048 2049 2049 /* Start Rx/Tx DMA and enable the interrupts */ 2050 2050 gfar_start(priv);
+1 -1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 4671 4671 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); 4672 4672 4673 4673 /* flush memory to make sure state is correct before next watchog */ 4674 - smp_mb__before_clear_bit(); 4674 + smp_mb__before_atomic(); 4675 4675 clear_bit(__I40E_SERVICE_SCHED, &pf->state); 4676 4676 } 4677 4677
+4 -4
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 376 376 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); 377 377 378 378 /* flush memory to make sure state is correct before next watchdog */ 379 - smp_mb__before_clear_bit(); 379 + smp_mb__before_atomic(); 380 380 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); 381 381 } 382 382 ··· 4671 4671 if (hw->mac.ops.enable_tx_laser) 4672 4672 hw->mac.ops.enable_tx_laser(hw); 4673 4673 4674 - smp_mb__before_clear_bit(); 4674 + smp_mb__before_atomic(); 4675 4675 clear_bit(__IXGBE_DOWN, &adapter->state); 4676 4676 ixgbe_napi_enable_all(adapter); 4677 4677 ··· 5567 5567 e_dev_err("Cannot enable PCI device from suspend\n"); 5568 5568 return err; 5569 5569 } 5570 - smp_mb__before_clear_bit(); 5570 + smp_mb__before_atomic(); 5571 5571 clear_bit(__IXGBE_DISABLED, &adapter->state); 5572 5572 pci_set_master(pdev); 5573 5573 ··· 8541 8541 e_err(probe, "Cannot re-enable PCI device after reset.\n"); 8542 8542 result = PCI_ERS_RESULT_DISCONNECT; 8543 8543 } else { 8544 - smp_mb__before_clear_bit(); 8544 + smp_mb__before_atomic(); 8545 8545 clear_bit(__IXGBE_DISABLED, &adapter->state); 8546 8546 adapter->hw.hw_addr = adapter->io_addr; 8547 8547 pci_set_master(pdev);
+3 -3
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 1668 1668 1669 1669 spin_unlock_bh(&adapter->mbx_lock); 1670 1670 1671 - smp_mb__before_clear_bit(); 1671 + smp_mb__before_atomic(); 1672 1672 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1673 1673 ixgbevf_napi_enable_all(adapter); 1674 1674 ··· 3354 3354 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 3355 3355 return err; 3356 3356 } 3357 - smp_mb__before_clear_bit(); 3357 + smp_mb__before_atomic(); 3358 3358 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 3359 3359 pci_set_master(pdev); 3360 3360 ··· 3712 3712 return PCI_ERS_RESULT_DISCONNECT; 3713 3713 } 3714 3714 3715 - smp_mb__before_clear_bit(); 3715 + smp_mb__before_atomic(); 3716 3716 clear_bit(__IXGBEVF_DISABLED, &adapter->state); 3717 3717 pci_set_master(pdev); 3718 3718
+1 -1
drivers/net/wireless/ti/wlcore/main.c
··· 543 543 * wl1271_ps_elp_wakeup cannot be called concurrently. 544 544 */ 545 545 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 546 - smp_mb__after_clear_bit(); 546 + smp_mb__after_atomic(); 547 547 548 548 ret = wlcore_fw_status(wl, wl->fw_status); 549 549 if (ret < 0)
+2 -2
drivers/pci/xen-pcifront.c
··· 662 662 notify_remote_via_evtchn(pdev->evtchn); 663 663 664 664 /*in case of we lost an aer request in four lines time_window*/ 665 - smp_mb__before_clear_bit(); 665 + smp_mb__before_atomic(); 666 666 clear_bit(_PDEVB_op_active, &pdev->flags); 667 - smp_mb__after_clear_bit(); 667 + smp_mb__after_atomic(); 668 668 669 669 schedule_pcifront_aer_op(pdev); 670 670
+1 -1
drivers/scsi/isci/remote_device.c
··· 1541 1541 clear_bit(IDEV_STOP_PENDING, &idev->flags); 1542 1542 clear_bit(IDEV_IO_READY, &idev->flags); 1543 1543 clear_bit(IDEV_GONE, &idev->flags); 1544 - smp_mb__before_clear_bit(); 1544 + smp_mb__before_atomic(); 1545 1545 clear_bit(IDEV_ALLOCATED, &idev->flags); 1546 1546 wake_up(&ihost->eventq); 1547 1547 }
+2 -2
drivers/target/loopback/tcm_loop.c
··· 951 951 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 952 952 953 953 atomic_inc(&tl_tpg->tl_tpg_port_count); 954 - smp_mb__after_atomic_inc(); 954 + smp_mb__after_atomic(); 955 955 /* 956 956 * Add Linux/SCSI struct scsi_device by HCTL 957 957 */ ··· 986 986 scsi_device_put(sd); 987 987 988 988 atomic_dec(&tl_tpg->tl_tpg_port_count); 989 - smp_mb__after_atomic_dec(); 989 + smp_mb__after_atomic(); 990 990 991 991 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 992 992 }
+13 -13
drivers/target/target_core_alua.c
··· 393 393 continue; 394 394 395 395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 396 - smp_mb__after_atomic_inc(); 396 + smp_mb__after_atomic(); 397 397 398 398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 399 399 ··· 404 404 405 405 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 406 406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 407 - smp_mb__after_atomic_dec(); 407 + smp_mb__after_atomic(); 408 408 break; 409 409 } 410 410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); ··· 990 990 * TARGET PORT GROUPS command 991 991 */ 992 992 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 993 - smp_mb__after_atomic_inc(); 993 + smp_mb__after_atomic(); 994 994 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 995 995 996 996 spin_lock_bh(&port->sep_alua_lock); ··· 1020 1020 1021 1021 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1022 1022 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 1023 - smp_mb__after_atomic_dec(); 1023 + smp_mb__after_atomic(); 1024 1024 } 1025 1025 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1026 1026 /* ··· 1054 1054 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 1055 1055 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1056 1056 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1057 - smp_mb__after_atomic_dec(); 1057 + smp_mb__after_atomic(); 1058 1058 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1059 1059 1060 1060 if (tg_pt_gp->tg_pt_gp_transition_complete) ··· 1116 1116 */ 1117 1117 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1118 1118 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1119 - smp_mb__after_atomic_inc(); 1119 + smp_mb__after_atomic(); 1120 1120 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1121 1121 1122 1122 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { ··· 1159 1159 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1160 1160 lu_gp = local_lu_gp_mem->lu_gp; 1161 1161 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1162 - smp_mb__after_atomic_inc(); 1162 + smp_mb__after_atomic(); 1163 1163 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1164 1164 /* 1165 1165 * For storage objects that are members of the 'default_lu_gp', ··· 1176 1176 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1177 1177 new_state, explicit); 1178 1178 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1179 - smp_mb__after_atomic_dec(); 1179 + smp_mb__after_atomic(); 1180 1180 return rc; 1181 1181 } 1182 1182 /* ··· 1190 1190 1191 1191 dev = lu_gp_mem->lu_gp_mem_dev; 1192 1192 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 1193 - smp_mb__after_atomic_inc(); 1193 + smp_mb__after_atomic(); 1194 1194 spin_unlock(&lu_gp->lu_gp_lock); 1195 1195 1196 1196 spin_lock(&dev->t10_alua.tg_pt_gps_lock); ··· 1219 1219 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1220 1220 } 1221 1221 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1222 - smp_mb__after_atomic_inc(); 1222 + smp_mb__after_atomic(); 1223 1223 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1224 1224 /* 1225 1225 * core_alua_do_transition_tg_pt() will always return ··· 1230 1230 1231 1231 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1232 1232 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1233 - smp_mb__after_atomic_dec(); 1233 + smp_mb__after_atomic(); 1234 1234 if (rc) 1235 1235 break; 1236 1236 } ··· 1238 1238 1239 1239 spin_lock(&lu_gp->lu_gp_lock); 1240 1240 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 1241 - smp_mb__after_atomic_dec(); 1241 + smp_mb__after_atomic(); 1242 1242 } 1243 1243 spin_unlock(&lu_gp->lu_gp_lock); 1244 1244 ··· 1252 1252 } 1253 1253 1254 1254 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1255 - smp_mb__after_atomic_dec(); 1255 + smp_mb__after_atomic(); 1256 1256 return rc; 1257 1257 } 1258 1258
+3 -3
drivers/target/target_core_device.c
··· 225 225 continue; 226 226 227 227 atomic_inc(&deve->pr_ref_count); 228 - smp_mb__after_atomic_inc(); 228 + smp_mb__after_atomic(); 229 229 spin_unlock_irq(&nacl->device_list_lock); 230 230 231 231 return deve; ··· 1392 1392 spin_lock(&lun->lun_acl_lock); 1393 1393 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1394 1394 atomic_inc(&lun->lun_acl_count); 1395 - smp_mb__after_atomic_inc(); 1395 + smp_mb__after_atomic(); 1396 1396 spin_unlock(&lun->lun_acl_lock); 1397 1397 1398 1398 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " ··· 1426 1426 spin_lock(&lun->lun_acl_lock); 1427 1427 list_del(&lacl->lacl_list); 1428 1428 atomic_dec(&lun->lun_acl_count); 1429 - smp_mb__after_atomic_dec(); 1429 + smp_mb__after_atomic(); 1430 1430 spin_unlock(&lun->lun_acl_lock); 1431 1431 1432 1432 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
+1 -1
drivers/target/target_core_iblock.c
··· 323 323 * Bump the ib_bio_err_cnt and release bio. 324 324 */ 325 325 atomic_inc(&ibr->ib_bio_err_cnt); 326 - smp_mb__after_atomic_inc(); 326 + smp_mb__after_atomic(); 327 327 } 328 328 329 329 bio_put(bio);
+28 -28
drivers/target/target_core_pr.c
··· 675 675 spin_lock(&dev->se_port_lock); 676 676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 677 677 atomic_inc(&port->sep_tg_pt_ref_cnt); 678 - smp_mb__after_atomic_inc(); 678 + smp_mb__after_atomic(); 679 679 spin_unlock(&dev->se_port_lock); 680 680 681 681 spin_lock_bh(&port->sep_alua_lock); ··· 710 710 continue; 711 711 712 712 atomic_inc(&deve_tmp->pr_ref_count); 713 - smp_mb__after_atomic_inc(); 713 + smp_mb__after_atomic(); 714 714 spin_unlock_bh(&port->sep_alua_lock); 715 715 /* 716 716 * Grab a configfs group dependency that is released ··· 723 723 pr_err("core_scsi3_lunacl_depend" 724 724 "_item() failed\n"); 725 725 atomic_dec(&port->sep_tg_pt_ref_cnt); 726 - smp_mb__after_atomic_dec(); 726 + smp_mb__after_atomic(); 727 727 atomic_dec(&deve_tmp->pr_ref_count); 728 - smp_mb__after_atomic_dec(); 728 + smp_mb__after_atomic(); 729 729 goto out; 730 730 } 731 731 /* ··· 740 740 sa_res_key, all_tg_pt, aptpl); 741 741 if (!pr_reg_atp) { 742 742 atomic_dec(&port->sep_tg_pt_ref_cnt); 743 - smp_mb__after_atomic_dec(); 743 + smp_mb__after_atomic(); 744 744 atomic_dec(&deve_tmp->pr_ref_count); 745 - smp_mb__after_atomic_dec(); 745 + smp_mb__after_atomic(); 746 746 core_scsi3_lunacl_undepend_item(deve_tmp); 747 747 goto out; 748 748 } ··· 755 755 756 756 spin_lock(&dev->se_port_lock); 757 757 atomic_dec(&port->sep_tg_pt_ref_cnt); 758 - smp_mb__after_atomic_dec(); 758 + smp_mb__after_atomic(); 759 759 } 760 760 spin_unlock(&dev->se_port_lock); 761 761 ··· 1110 1110 continue; 1111 1111 } 1112 1112 atomic_inc(&pr_reg->pr_res_holders); 1113 - smp_mb__after_atomic_inc(); 1113 + smp_mb__after_atomic(); 1114 1114 spin_unlock(&pr_tmpl->registration_lock); 1115 1115 return pr_reg; 1116 1116 } ··· 1125 1125 continue; 1126 1126 1127 1127 atomic_inc(&pr_reg->pr_res_holders); 1128 - smp_mb__after_atomic_inc(); 1128 + smp_mb__after_atomic(); 1129 1129 spin_unlock(&pr_tmpl->registration_lock); 1130 1130 return pr_reg; 1131 1131 } ··· 1155 1155 static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) 1156 1156 { 1157 1157 atomic_dec(&pr_reg->pr_res_holders); 1158 - smp_mb__after_atomic_dec(); 1158 + smp_mb__after_atomic(); 1159 1159 } 1160 1160 1161 1161 static int core_scsi3_check_implicit_release( ··· 1349 1349 &tpg->tpg_group.cg_item); 1350 1350 1351 1351 atomic_dec(&tpg->tpg_pr_ref_count); 1352 - smp_mb__after_atomic_dec(); 1352 + smp_mb__after_atomic(); 1353 1353 } 1354 1354 1355 1355 static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) ··· 1369 1369 1370 1370 if (nacl->dynamic_node_acl) { 1371 1371 atomic_dec(&nacl->acl_pr_ref_count); 1372 - smp_mb__after_atomic_dec(); 1372 + smp_mb__after_atomic(); 1373 1373 return; 1374 1374 } 1375 1375 ··· 1377 1377 &nacl->acl_group.cg_item); 1378 1378 1379 1379 atomic_dec(&nacl->acl_pr_ref_count); 1380 - smp_mb__after_atomic_dec(); 1380 + smp_mb__after_atomic(); 1381 1381 } 1382 1382 1383 1383 static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) ··· 1408 1408 */ 1409 1409 if (!lun_acl) { 1410 1410 atomic_dec(&se_deve->pr_ref_count); 1411 - smp_mb__after_atomic_dec(); 1411 + smp_mb__after_atomic(); 1412 1412 return; 1413 1413 } 1414 1414 nacl = lun_acl->se_lun_nacl; ··· 1418 1418 &lun_acl->se_lun_group.cg_item); 1419 1419 1420 1420 atomic_dec(&se_deve->pr_ref_count); 1421 - smp_mb__after_atomic_dec(); 1421 + smp_mb__after_atomic(); 1422 1422 } 1423 1423 1424 1424 static sense_reason_t ··· 1552 1552 continue; 1553 1553 1554 1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1555 - smp_mb__after_atomic_inc(); 1555 + smp_mb__after_atomic(); 1556 1556 spin_unlock(&dev->se_port_lock); 1557 1557 1558 1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) { 1559 1559 pr_err(" core_scsi3_tpg_depend_item()" 1560 1560 " for tmp_tpg\n"); 1561 1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1562 - smp_mb__after_atomic_dec(); 1562 + smp_mb__after_atomic(); 1563 1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1564 1564 goto out_unmap; 1565 1565 } ··· 1573 1573 tmp_tpg, i_str); 1574 1574 if (dest_node_acl) { 1575 1575 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1576 - smp_mb__after_atomic_inc(); 1576 + smp_mb__after_atomic(); 1577 1577 } 1578 1578 spin_unlock_irq(&tmp_tpg->acl_node_lock); 1579 1579 ··· 1587 1587 pr_err("configfs_depend_item() failed" 1588 1588 " for dest_node_acl->acl_group\n"); 1589 1589 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1590 - smp_mb__after_atomic_dec(); 1590 + smp_mb__after_atomic(); 1591 1591 core_scsi3_tpg_undepend_item(tmp_tpg); 1592 1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1593 1593 goto out_unmap; ··· 1647 1647 pr_err("core_scsi3_lunacl_depend_item()" 1648 1648 " failed\n"); 1649 1649 atomic_dec(&dest_se_deve->pr_ref_count); 1650 - smp_mb__after_atomic_dec(); 1650 + smp_mb__after_atomic(); 1651 1651 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1652 1652 core_scsi3_tpg_undepend_item(dest_tpg); 1653 1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; ··· 3168 3168 continue; 3169 3169 3170 3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3171 - smp_mb__after_atomic_inc(); 3171 + smp_mb__after_atomic(); 3172 3172 spin_unlock(&dev->se_port_lock); 3173 3173 3174 3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) { 3175 3175 pr_err("core_scsi3_tpg_depend_item() failed" 3176 3176 " for dest_se_tpg\n"); 3177 3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3178 - smp_mb__after_atomic_dec(); 3178 + smp_mb__after_atomic(); 3179 3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3180 3180 goto out_put_pr_reg; 3181 3181 } ··· 3273 3273 initiator_str); 3274 3274 if (dest_node_acl) { 3275 3275 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3276 - smp_mb__after_atomic_inc(); 3276 + smp_mb__after_atomic(); 3277 3277 } 3278 3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3279 3279 ··· 3289 3289 pr_err("core_scsi3_nodeacl_depend_item() for" 3290 3290 " dest_node_acl\n"); 3291 3291 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3292 - smp_mb__after_atomic_dec(); 3292 + smp_mb__after_atomic(); 3293 3293 dest_node_acl = NULL; 3294 3294 ret = TCM_INVALID_PARAMETER_LIST; 3295 3295 goto out; ··· 3314 3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 3315 3315 pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3316 3316 atomic_dec(&dest_se_deve->pr_ref_count); 3317 - smp_mb__after_atomic_dec(); 3317 + smp_mb__after_atomic(); 3318 3318 dest_se_deve = NULL; 3319 3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3320 3320 goto out; ··· 3880 3880 add_desc_len = 0; 3881 3881 3882 3882 atomic_inc(&pr_reg->pr_res_holders); 3883 - smp_mb__after_atomic_inc(); 3883 + smp_mb__after_atomic(); 3884 3884 spin_unlock(&pr_tmpl->registration_lock); 3885 3885 /* 3886 3886 * Determine expected length of $FABRIC_MOD specific ··· 3894 3894 " out of buffer: %d\n", cmd->data_length); 3895 3895 spin_lock(&pr_tmpl->registration_lock); 3896 3896 atomic_dec(&pr_reg->pr_res_holders); 3897 - smp_mb__after_atomic_dec(); 3897 + smp_mb__after_atomic(); 3898 3898 break; 3899 3899 } 3900 3900 /* ··· 3956 3956 3957 3957 spin_lock(&pr_tmpl->registration_lock); 3958 3958 atomic_dec(&pr_reg->pr_res_holders); 3959 - smp_mb__after_atomic_dec(); 3959 + smp_mb__after_atomic(); 3960 3960 /* 3961 3961 * Set the ADDITIONAL DESCRIPTOR LENGTH 3962 3962 */
+8 -8
drivers/target/target_core_transport.c
··· 736 736 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 737 737 list_del(&cmd->se_qf_node); 738 738 atomic_dec(&dev->dev_qf_count); 739 - smp_mb__after_atomic_dec(); 739 + smp_mb__after_atomic(); 740 740 741 741 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 742 742 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, ··· 1148 1148 * Dormant to Active status. 1149 1149 */ 1150 1150 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1151 - smp_mb__after_atomic_inc(); 1151 + smp_mb__after_atomic(); 1152 1152 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1153 1153 cmd->se_ordered_id, cmd->sam_task_attr, 1154 1154 dev->transport->name); ··· 1705 1705 return false; 1706 1706 case MSG_ORDERED_TAG: 1707 1707 atomic_inc(&dev->dev_ordered_sync); 1708 - smp_mb__after_atomic_inc(); 1708 + smp_mb__after_atomic(); 1709 1709 1710 1710 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1711 1711 " se_ordered_id: %u\n", ··· 1723 1723 * For SIMPLE and UNTAGGED Task Attribute commands 1724 1724 */ 1725 1725 atomic_inc(&dev->simple_cmds); 1726 - smp_mb__after_atomic_inc(); 1726 + smp_mb__after_atomic(); 1727 1727 break; 1728 1728 } 1729 1729 ··· 1828 1828 1829 1829 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1830 1830 atomic_dec(&dev->simple_cmds); 1831 - smp_mb__after_atomic_dec(); 1831 + smp_mb__after_atomic(); 1832 1832 dev->dev_cur_ordered_id++; 1833 1833 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1834 1834 " SIMPLE: %u\n", dev->dev_cur_ordered_id, ··· 1840 1840 cmd->se_ordered_id); 1841 1841 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1842 1842 atomic_dec(&dev->dev_ordered_sync); 1843 - smp_mb__after_atomic_dec(); 1843 + smp_mb__after_atomic(); 1844 1844 1845 1845 dev->dev_cur_ordered_id++; 1846 1846 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" ··· 1899 1899 spin_lock_irq(&dev->qf_cmd_lock); 1900 1900 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1901 1901 atomic_inc(&dev->dev_qf_count); 1902 - smp_mb__after_atomic_inc(); 1902 + smp_mb__after_atomic(); 1903 1903 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1904 1904 1905 1905 schedule_work(&cmd->se_dev->qf_work_queue); ··· 2875 2875 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2876 2876 cmd->transport_state |= CMD_T_ABORTED; 2877 2877 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2878 - smp_mb__after_atomic_inc(); 2878 + smp_mb__after_atomic(); 2879 2879 return; 2880 2880 } 2881 2881 }
+5 -5
drivers/target/target_core_ua.c
··· 162 162 spin_unlock_irq(&nacl->device_list_lock); 163 163 164 164 atomic_inc(&deve->ua_count); 165 - smp_mb__after_atomic_inc(); 165 + smp_mb__after_atomic(); 166 166 return 0; 167 167 } 168 168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); ··· 175 175 asc, ascq); 176 176 177 177 atomic_inc(&deve->ua_count); 178 - smp_mb__after_atomic_inc(); 178 + smp_mb__after_atomic(); 179 179 return 0; 180 180 } 181 181 ··· 190 190 kmem_cache_free(se_ua_cache, ua); 191 191 192 192 atomic_dec(&deve->ua_count); 193 - smp_mb__after_atomic_dec(); 193 + smp_mb__after_atomic(); 194 194 } 195 195 spin_unlock(&deve->ua_lock); 196 196 } ··· 251 251 kmem_cache_free(se_ua_cache, ua); 252 252 253 253 atomic_dec(&deve->ua_count); 254 - smp_mb__after_atomic_dec(); 254 + smp_mb__after_atomic(); 255 255 } 256 256 spin_unlock(&deve->ua_lock); 257 257 spin_unlock_irq(&nacl->device_list_lock); ··· 310 310 kmem_cache_free(se_ua_cache, ua); 311 311 312 312 atomic_dec(&deve->ua_count); 313 - smp_mb__after_atomic_dec(); 313 + smp_mb__after_atomic(); 314 314 } 315 315 spin_unlock(&deve->ua_lock); 316 316 spin_unlock_irq(&nacl->device_list_lock);
+1 -1
drivers/tty/n_tty.c
··· 2041 2041 2042 2042 if (found) 2043 2043 clear_bit(eol, ldata->read_flags); 2044 - smp_mb__after_clear_bit(); 2044 + smp_mb__after_atomic(); 2045 2045 ldata->read_tail += c; 2046 2046 2047 2047 if (found) {
+2 -2
drivers/tty/serial/mxs-auart.c
··· 200 200 201 201 /* clear the bit used to serialize the DMA tx. */ 202 202 clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); 203 - smp_mb__after_clear_bit(); 203 + smp_mb__after_atomic(); 204 204 205 205 /* wake up the possible processes. */ 206 206 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) ··· 275 275 mxs_auart_dma_tx(s, i); 276 276 } else { 277 277 clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); 278 - smp_mb__after_clear_bit(); 278 + smp_mb__after_atomic(); 279 279 } 280 280 return; 281 281 }
+2 -2
drivers/usb/gadget/tcm_usb_gadget.c
··· 1851 1851 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); 1852 1852 1853 1853 atomic_inc(&tpg->tpg_port_count); 1854 - smp_mb__after_atomic_inc(); 1854 + smp_mb__after_atomic(); 1855 1855 return 0; 1856 1856 } 1857 1857 ··· 1861 1861 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg); 1862 1862 1863 1863 atomic_dec(&tpg->tpg_port_count); 1864 - smp_mb__after_atomic_dec(); 1864 + smp_mb__after_atomic(); 1865 1865 } 1866 1866 1867 1867 static int usbg_check_stop_free(struct se_cmd *se_cmd)
+1 -1
drivers/usb/serial/usb_wwan.c
··· 325 325 326 326 for (i = 0; i < N_OUT_URB; ++i) { 327 327 if (portdata->out_urbs[i] == urb) { 328 - smp_mb__before_clear_bit(); 328 + smp_mb__before_atomic(); 329 329 clear_bit(i, &portdata->out_busy); 330 330 break; 331 331 }
+1 -1
drivers/vhost/scsi.c
··· 1255 1255 tpg->tv_tpg_vhost_count++; 1256 1256 tpg->vhost_scsi = vs; 1257 1257 vs_tpg[tpg->tport_tpgt] = tpg; 1258 - smp_mb__after_atomic_inc(); 1258 + smp_mb__after_atomic(); 1259 1259 match = true; 1260 1260 } 1261 1261 mutex_unlock(&tpg->tv_tpg_mutex);
+2 -2
drivers/w1/w1_family.c
··· 139 139 140 140 void __w1_family_get(struct w1_family *f) 141 141 { 142 - smp_mb__before_atomic_inc(); 142 + smp_mb__before_atomic(); 143 143 atomic_inc(&f->refcnt); 144 - smp_mb__after_atomic_inc(); 144 + smp_mb__after_atomic(); 145 145 } 146 146 147 147 EXPORT_SYMBOL(w1_unregister_family);
+2 -2
drivers/xen/xen-pciback/pciback_ops.c
··· 348 348 notify_remote_via_irq(pdev->evtchn_irq); 349 349 350 350 /* Mark that we're done. */ 351 - smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */ 351 + smp_mb__before_atomic(); /* /after/ clearing PCIF_active */ 352 352 clear_bit(_PDEVF_op_active, &pdev->flags); 353 - smp_mb__after_clear_bit(); /* /before/ final check for work */ 353 + smp_mb__after_atomic(); /* /before/ final check for work */ 354 354 355 355 /* Check to see if the driver domain tried to start another request in 356 356 * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
+1 -1
fs/btrfs/btrfs_inode.h
··· 279 279 280 280 static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode) 281 281 { 282 - smp_mb__before_clear_bit(); 282 + smp_mb__before_atomic(); 283 283 clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, 284 284 &BTRFS_I(inode)->runtime_flags); 285 285 }
+1 -1
fs/btrfs/extent_io.c
··· 3458 3458 static void end_extent_buffer_writeback(struct extent_buffer *eb) 3459 3459 { 3460 3460 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); 3461 - smp_mb__after_clear_bit(); 3461 + smp_mb__after_atomic(); 3462 3462 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); 3463 3463 } 3464 3464
+3 -3
fs/btrfs/inode.c
··· 7126 7126 * before atomic variable goto zero, we must make sure 7127 7127 * dip->errors is perceived to be set. 7128 7128 */ 7129 - smp_mb__before_atomic_dec(); 7129 + smp_mb__before_atomic(); 7130 7130 } 7131 7131 7132 7132 /* if there are more bios still pending for this dio, just exit */ ··· 7306 7306 * before atomic variable goto zero, we must 7307 7307 * make sure dip->errors is perceived to be set. 7308 7308 */ 7309 - smp_mb__before_atomic_dec(); 7309 + smp_mb__before_atomic(); 7310 7310 if (atomic_dec_and_test(&dip->pending_bios)) 7311 7311 bio_io_error(dip->orig_bio); 7312 7312 ··· 7449 7449 return 0; 7450 7450 7451 7451 atomic_inc(&inode->i_dio_count); 7452 - smp_mb__after_atomic_inc(); 7452 + smp_mb__after_atomic(); 7453 7453 7454 7454 /* 7455 7455 * The generic stuff only does filemap_write_and_wait_range, which
+1 -1
fs/btrfs/ioctl.c
··· 642 642 return -EINVAL; 643 643 644 644 atomic_inc(&root->will_be_snapshoted); 645 - smp_mb__after_atomic_inc(); 645 + smp_mb__after_atomic(); 646 646 btrfs_wait_nocow_write(root); 647 647 648 648 ret = btrfs_start_delalloc_inodes(root, 0);
+1 -1
fs/buffer.c
··· 77 77 void unlock_buffer(struct buffer_head *bh) 78 78 { 79 79 clear_bit_unlock(BH_Lock, &bh->b_state); 80 - smp_mb__after_clear_bit(); 80 + smp_mb__after_atomic(); 81 81 wake_up_bit(&bh->b_state, BH_Lock); 82 82 } 83 83 EXPORT_SYMBOL(unlock_buffer);
+1 -1
fs/ext4/resize.c
··· 42 42 void ext4_resize_end(struct super_block *sb) 43 43 { 44 44 clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags); 45 - smp_mb__after_clear_bit(); 45 + smp_mb__after_atomic(); 46 46 } 47 47 48 48 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
+4 -4
fs/gfs2/glock.c
··· 277 277 static void gfs2_holder_wake(struct gfs2_holder *gh) 278 278 { 279 279 clear_bit(HIF_WAIT, &gh->gh_iflags); 280 - smp_mb__after_clear_bit(); 280 + smp_mb__after_atomic(); 281 281 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 282 282 } 283 283 ··· 411 411 { 412 412 gl->gl_demote_state = LM_ST_EXCLUSIVE; 413 413 clear_bit(GLF_DEMOTE, &gl->gl_flags); 414 - smp_mb__after_clear_bit(); 414 + smp_mb__after_atomic(); 415 415 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 416 416 } 417 417 ··· 620 620 621 621 out_sched: 622 622 clear_bit(GLF_LOCK, &gl->gl_flags); 623 - smp_mb__after_clear_bit(); 623 + smp_mb__after_atomic(); 624 624 gl->gl_lockref.count++; 625 625 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 626 626 gl->gl_lockref.count--; ··· 628 628 629 629 out_unlock: 630 630 clear_bit(GLF_LOCK, &gl->gl_flags); 631 - smp_mb__after_clear_bit(); 631 + smp_mb__after_atomic(); 632 632 return; 633 633 } 634 634
+1 -1
fs/gfs2/glops.c
··· 221 221 * Writeback of the data mapping may cause the dirty flag to be set 222 222 * so we have to clear it again here. 223 223 */ 224 - smp_mb__before_clear_bit(); 224 + smp_mb__before_atomic(); 225 225 clear_bit(GLF_DIRTY, &gl->gl_flags); 226 226 } 227 227
+2 -2
fs/gfs2/lock_dlm.c
··· 1134 1134 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); 1135 1135 1136 1136 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); 1137 - smp_mb__after_clear_bit(); 1137 + smp_mb__after_atomic(); 1138 1138 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); 1139 1139 spin_unlock(&ls->ls_recover_spin); 1140 1140 } ··· 1271 1271 1272 1272 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); 1273 1273 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); 1274 - smp_mb__after_clear_bit(); 1274 + smp_mb__after_atomic(); 1275 1275 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); 1276 1276 return 0; 1277 1277
+1 -1
fs/gfs2/recovery.c
··· 587 587 gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); 588 588 done: 589 589 clear_bit(JDF_RECOVERY, &jd->jd_flags); 590 - smp_mb__after_clear_bit(); 590 + smp_mb__after_atomic(); 591 591 wake_up_bit(&jd->jd_flags, JDF_RECOVERY); 592 592 } 593 593
+2 -2
fs/gfs2/sys.c
··· 333 333 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 334 334 else if (val == 0) { 335 335 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 336 - smp_mb__after_clear_bit(); 336 + smp_mb__after_atomic(); 337 337 gfs2_glock_thaw(sdp); 338 338 } else { 339 339 ret = -EINVAL; ··· 482 482 rv = jid = -EINVAL; 483 483 sdp->sd_lockstruct.ls_jid = jid; 484 484 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); 485 - smp_mb__after_clear_bit(); 485 + smp_mb__after_atomic(); 486 486 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); 487 487 out: 488 488 spin_unlock(&sdp->sd_jindex_spin);
+3 -3
fs/jbd2/commit.c
··· 43 43 clear_buffer_uptodate(bh); 44 44 if (orig_bh) { 45 45 clear_bit_unlock(BH_Shadow, &orig_bh->b_state); 46 - smp_mb__after_clear_bit(); 46 + smp_mb__after_atomic(); 47 47 wake_up_bit(&orig_bh->b_state, BH_Shadow); 48 48 } 49 49 unlock_buffer(bh); ··· 239 239 spin_lock(&journal->j_list_lock); 240 240 J_ASSERT(jinode->i_transaction == commit_transaction); 241 241 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags); 242 - smp_mb__after_clear_bit(); 242 + smp_mb__after_atomic(); 243 243 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); 244 244 } 245 245 spin_unlock(&journal->j_list_lock); ··· 277 277 } 278 278 spin_lock(&journal->j_list_lock); 279 279 clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags); 280 - smp_mb__after_clear_bit(); 280 + smp_mb__after_atomic(); 281 281 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); 282 282 } 283 283
+6 -6
fs/nfs/dir.c
··· 2032 2032 { 2033 2033 put_rpccred(entry->cred); 2034 2034 kfree(entry); 2035 - smp_mb__before_atomic_dec(); 2035 + smp_mb__before_atomic(); 2036 2036 atomic_long_dec(&nfs_access_nr_entries); 2037 - smp_mb__after_atomic_dec(); 2037 + smp_mb__after_atomic(); 2038 2038 } 2039 2039 2040 2040 static void nfs_access_free_list(struct list_head *head) ··· 2082 2082 else { 2083 2083 remove_lru_entry: 2084 2084 list_del_init(&nfsi->access_cache_inode_lru); 2085 - smp_mb__before_clear_bit(); 2085 + smp_mb__before_atomic(); 2086 2086 clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags); 2087 - smp_mb__after_clear_bit(); 2087 + smp_mb__after_atomic(); 2088 2088 } 2089 2089 spin_unlock(&inode->i_lock); 2090 2090 } ··· 2232 2232 nfs_access_add_rbtree(inode, cache); 2233 2233 2234 2234 /* Update accounting */ 2235 - smp_mb__before_atomic_inc(); 2235 + smp_mb__before_atomic(); 2236 2236 atomic_long_inc(&nfs_access_nr_entries); 2237 - smp_mb__after_atomic_inc(); 2237 + smp_mb__after_atomic(); 2238 2238 2239 2239 /* Add inode to global LRU list */ 2240 2240 if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
+1 -1
fs/nfs/inode.c
··· 1085 1085 trace_nfs_invalidate_mapping_exit(inode, ret); 1086 1086 1087 1087 clear_bit_unlock(NFS_INO_INVALIDATING, bitlock); 1088 - smp_mb__after_clear_bit(); 1088 + smp_mb__after_atomic(); 1089 1089 wake_up_bit(bitlock, NFS_INO_INVALIDATING); 1090 1090 out: 1091 1091 return ret;
+2 -2
fs/nfs/nfs4filelayoutdev.c
··· 789 789 790 790 static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) 791 791 { 792 - smp_mb__before_clear_bit(); 792 + smp_mb__before_atomic(); 793 793 clear_bit(NFS4DS_CONNECTING, &ds->ds_state); 794 - smp_mb__after_clear_bit(); 794 + smp_mb__after_atomic(); 795 795 wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING); 796 796 } 797 797
+2 -2
fs/nfs/nfs4state.c
··· 1140 1140 1141 1141 static void nfs4_clear_state_manager_bit(struct nfs_client *clp) 1142 1142 { 1143 - smp_mb__before_clear_bit(); 1143 + smp_mb__before_atomic(); 1144 1144 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); 1145 - smp_mb__after_clear_bit(); 1145 + smp_mb__after_atomic(); 1146 1146 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING); 1147 1147 rpc_wake_up(&clp->cl_rpcwaitq); 1148 1148 }
+3 -3
fs/nfs/pagelist.c
··· 95 95 { 96 96 if (atomic_dec_and_test(&c->io_count)) { 97 97 clear_bit(NFS_IO_INPROGRESS, &c->flags); 98 - smp_mb__after_clear_bit(); 98 + smp_mb__after_atomic(); 99 99 wake_up_bit(&c->flags, NFS_IO_INPROGRESS); 100 100 } 101 101 } ··· 193 193 printk(KERN_ERR "NFS: Invalid unlock attempted\n"); 194 194 BUG(); 195 195 } 196 - smp_mb__before_clear_bit(); 196 + smp_mb__before_atomic(); 197 197 clear_bit(PG_BUSY, &req->wb_flags); 198 - smp_mb__after_clear_bit(); 198 + smp_mb__after_atomic(); 199 199 wake_up_bit(&req->wb_flags, PG_BUSY); 200 200 } 201 201
+1 -1
fs/nfs/pnfs.c
··· 1810 1810 unsigned long *bitlock = &NFS_I(inode)->flags; 1811 1811 1812 1812 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); 1813 - smp_mb__after_clear_bit(); 1813 + smp_mb__after_atomic(); 1814 1814 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); 1815 1815 } 1816 1816
+1 -1
fs/nfs/pnfs.h
··· 275 275 { 276 276 if (lseg) { 277 277 atomic_inc(&lseg->pls_refcount); 278 - smp_mb__after_atomic_inc(); 278 + smp_mb__after_atomic(); 279 279 } 280 280 return lseg; 281 281 }
+2 -2
fs/nfs/write.c
··· 405 405 nfs_pageio_complete(&pgio); 406 406 407 407 clear_bit_unlock(NFS_INO_FLUSHING, bitlock); 408 - smp_mb__after_clear_bit(); 408 + smp_mb__after_atomic(); 409 409 wake_up_bit(bitlock, NFS_INO_FLUSHING); 410 410 411 411 if (err < 0) ··· 1458 1458 static void nfs_commit_clear_lock(struct nfs_inode *nfsi) 1459 1459 { 1460 1460 clear_bit(NFS_INO_COMMIT, &nfsi->flags); 1461 - smp_mb__after_clear_bit(); 1461 + smp_mb__after_atomic(); 1462 1462 wake_up_bit(&nfsi->flags, NFS_INO_COMMIT); 1463 1463 } 1464 1464
+2 -2
fs/ubifs/lpt_commit.c
··· 460 460 * important. 461 461 */ 462 462 clear_bit(DIRTY_CNODE, &cnode->flags); 463 - smp_mb__before_clear_bit(); 463 + smp_mb__before_atomic(); 464 464 clear_bit(COW_CNODE, &cnode->flags); 465 - smp_mb__after_clear_bit(); 465 + smp_mb__after_atomic(); 466 466 offs += len; 467 467 dbg_chk_lpt_sz(c, 1, len); 468 468 cnode = cnode->cnext;
+2 -2
fs/ubifs/tnc_commit.c
··· 895 895 * the reason for the second barrier. 896 896 */ 897 897 clear_bit(DIRTY_ZNODE, &znode->flags); 898 - smp_mb__before_clear_bit(); 898 + smp_mb__before_atomic(); 899 899 clear_bit(COW_ZNODE, &znode->flags); 900 - smp_mb__after_clear_bit(); 900 + smp_mb__after_atomic(); 901 901 902 902 /* 903 903 * We have marked the znode as clean but have not updated the
+1 -1
include/asm-generic/bitops/atomic.h
··· 80 80 * 81 81 * clear_bit() is atomic and may not be reordered. However, it does 82 82 * not contain a memory barrier, so if it is used for locking purposes, 83 - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 83 + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() 84 84 * in order to ensure changes are visible on other processors. 85 85 */ 86 86 static inline void clear_bit(int nr, volatile unsigned long *addr)
+1 -1
include/asm-generic/bitops/lock.h
··· 20 20 */ 21 21 #define clear_bit_unlock(nr, addr) \ 22 22 do { \ 23 - smp_mb__before_clear_bit(); \ 23 + smp_mb__before_atomic(); \ 24 24 clear_bit(nr, addr); \ 25 25 } while (0) 26 26
+1 -1
include/linux/buffer_head.h
··· 278 278 279 279 static inline void put_bh(struct buffer_head *bh) 280 280 { 281 - smp_mb__before_atomic_dec(); 281 + smp_mb__before_atomic(); 282 282 atomic_dec(&bh->b_count); 283 283 } 284 284
+1 -1
include/linux/genhd.h
··· 649 649 static inline void hd_struct_get(struct hd_struct *part) 650 650 { 651 651 atomic_inc(&part->ref); 652 - smp_mb__after_atomic_inc(); 652 + smp_mb__after_atomic(); 653 653 } 654 654 655 655 static inline int hd_struct_try_get(struct hd_struct *part)
+4 -4
include/linux/interrupt.h
··· 453 453 454 454 static inline void tasklet_unlock(struct tasklet_struct *t) 455 455 { 456 - smp_mb__before_clear_bit(); 456 + smp_mb__before_atomic(); 457 457 clear_bit(TASKLET_STATE_RUN, &(t)->state); 458 458 } 459 459 ··· 501 501 static inline void tasklet_disable_nosync(struct tasklet_struct *t) 502 502 { 503 503 atomic_inc(&t->count); 504 - smp_mb__after_atomic_inc(); 504 + smp_mb__after_atomic(); 505 505 } 506 506 507 507 static inline void tasklet_disable(struct tasklet_struct *t) ··· 513 513 514 514 static inline void tasklet_enable(struct tasklet_struct *t) 515 515 { 516 - smp_mb__before_atomic_dec(); 516 + smp_mb__before_atomic(); 517 517 atomic_dec(&t->count); 518 518 } 519 519 520 520 static inline void tasklet_hi_enable(struct tasklet_struct *t) 521 521 { 522 - smp_mb__before_atomic_dec(); 522 + smp_mb__before_atomic(); 523 523 atomic_dec(&t->count); 524 524 } 525 525
+1 -1
include/linux/netdevice.h
··· 493 493 static inline void napi_enable(struct napi_struct *n) 494 494 { 495 495 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 496 - smp_mb__before_clear_bit(); 496 + smp_mb__before_atomic(); 497 497 clear_bit(NAPI_STATE_SCHED, &n->state); 498 498 } 499 499
+2 -4
include/linux/sched.h
··· 2782 2782 /* 2783 2783 * Polling state must be visible before we test NEED_RESCHED, 2784 2784 * paired by resched_task() 2785 - * 2786 - * XXX: assumes set/clear bit are identical barrier wise. 2787 2785 */ 2788 - smp_mb__after_clear_bit(); 2786 + smp_mb__after_atomic(); 2789 2787 2790 2788 return unlikely(tif_need_resched()); 2791 2789 } ··· 2801 2803 * Polling state must be visible before we test NEED_RESCHED, 2802 2804 * paired by resched_task() 2803 2805 */ 2804 - smp_mb__after_clear_bit(); 2806 + smp_mb__after_atomic(); 2805 2807 2806 2808 return unlikely(tif_need_resched()); 2807 2809 }
+4 -4
include/linux/sunrpc/sched.h
··· 142 142 test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) 143 143 #define rpc_clear_running(t) \ 144 144 do { \ 145 - smp_mb__before_clear_bit(); \ 145 + smp_mb__before_atomic(); \ 146 146 clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ 147 - smp_mb__after_clear_bit(); \ 147 + smp_mb__after_atomic(); \ 148 148 } while (0) 149 149 150 150 #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) 151 151 #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) 152 152 #define rpc_clear_queued(t) \ 153 153 do { \ 154 - smp_mb__before_clear_bit(); \ 154 + smp_mb__before_atomic(); \ 155 155 clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ 156 - smp_mb__after_clear_bit(); \ 156 + smp_mb__after_atomic(); \ 157 157 } while (0) 158 158 159 159 #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
+4 -4
include/linux/sunrpc/xprt.h
··· 379 379 380 380 static inline void xprt_clear_connecting(struct rpc_xprt *xprt) 381 381 { 382 - smp_mb__before_clear_bit(); 382 + smp_mb__before_atomic(); 383 383 clear_bit(XPRT_CONNECTING, &xprt->state); 384 - smp_mb__after_clear_bit(); 384 + smp_mb__after_atomic(); 385 385 } 386 386 387 387 static inline int xprt_connecting(struct rpc_xprt *xprt) ··· 411 411 412 412 static inline void xprt_clear_binding(struct rpc_xprt *xprt) 413 413 { 414 - smp_mb__before_clear_bit(); 414 + smp_mb__before_atomic(); 415 415 clear_bit(XPRT_BINDING, &xprt->state); 416 - smp_mb__after_clear_bit(); 416 + smp_mb__after_atomic(); 417 417 } 418 418 419 419 static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
+1 -1
include/linux/tracehook.h
··· 191 191 * pairs with task_work_add()->set_notify_resume() after 192 192 * hlist_add_head(task->task_works); 193 193 */ 194 - smp_mb__after_clear_bit(); 194 + smp_mb__after_atomic(); 195 195 if (unlikely(current->task_works)) 196 196 task_work_run(); 197 197 }
+2 -2
include/net/ip_vs.h
··· 1204 1204 /* put back the conn without restarting its timer */ 1205 1205 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 1206 1206 { 1207 - smp_mb__before_atomic_dec(); 1207 + smp_mb__before_atomic(); 1208 1208 atomic_dec(&cp->refcnt); 1209 1209 } 1210 1210 void ip_vs_conn_put(struct ip_vs_conn *cp); ··· 1408 1408 1409 1409 static inline void ip_vs_dest_put(struct ip_vs_dest *dest) 1410 1410 { 1411 - smp_mb__before_atomic_dec(); 1411 + smp_mb__before_atomic(); 1412 1412 atomic_dec(&dest->refcnt); 1413 1413 } 1414 1414
+2 -2
kernel/debug/debug_core.c
··· 534 534 kgdb_info[cpu].exception_state &= 535 535 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); 536 536 kgdb_info[cpu].enter_kgdb--; 537 - smp_mb__before_atomic_dec(); 537 + smp_mb__before_atomic(); 538 538 atomic_dec(&slaves_in_kgdb); 539 539 dbg_touch_watchdogs(); 540 540 local_irq_restore(flags); ··· 662 662 kgdb_info[cpu].exception_state &= 663 663 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE); 664 664 kgdb_info[cpu].enter_kgdb--; 665 - smp_mb__before_atomic_dec(); 665 + smp_mb__before_atomic(); 666 666 atomic_dec(&masters_in_kgdb); 667 667 /* Free kgdb_active */ 668 668 atomic_set(&kgdb_active, -1);
+2 -2
kernel/futex.c
··· 267 267 * get_futex_key() implies a full barrier. This is relied upon 268 268 * as full barrier (B), see the ordering comment above. 269 269 */ 270 - smp_mb__after_atomic_inc(); 270 + smp_mb__after_atomic(); 271 271 } 272 272 273 273 /* ··· 280 280 /* 281 281 * Full barrier (A), see the ordering comment above. 282 282 */ 283 - smp_mb__after_atomic_inc(); 283 + smp_mb__after_atomic(); 284 284 #endif 285 285 } 286 286
+1 -1
kernel/kmod.c
··· 498 498 static void helper_lock(void) 499 499 { 500 500 atomic_inc(&running_helpers); 501 - smp_mb__after_atomic_inc(); 501 + smp_mb__after_atomic(); 502 502 } 503 503 504 504 static void helper_unlock(void)
+11 -11
kernel/rcu/tree.c
··· 387 387 } 388 388 rcu_prepare_for_idle(smp_processor_id()); 389 389 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 390 - smp_mb__before_atomic_inc(); /* See above. */ 390 + smp_mb__before_atomic(); /* See above. */ 391 391 atomic_inc(&rdtp->dynticks); 392 - smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ 392 + smp_mb__after_atomic(); /* Force ordering with next sojourn. */ 393 393 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 394 394 395 395 /* ··· 507 507 static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, 508 508 int user) 509 509 { 510 - smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ 510 + smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */ 511 511 atomic_inc(&rdtp->dynticks); 512 512 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 513 - smp_mb__after_atomic_inc(); /* See above. */ 513 + smp_mb__after_atomic(); /* See above. */ 514 514 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 515 515 rcu_cleanup_after_idle(smp_processor_id()); 516 516 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting); ··· 635 635 (atomic_read(&rdtp->dynticks) & 0x1)) 636 636 return; 637 637 rdtp->dynticks_nmi_nesting++; 638 - smp_mb__before_atomic_inc(); /* Force delay from prior write. */ 638 + smp_mb__before_atomic(); /* Force delay from prior write. */ 639 639 atomic_inc(&rdtp->dynticks); 640 640 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 641 - smp_mb__after_atomic_inc(); /* See above. */ 641 + smp_mb__after_atomic(); /* See above. */ 642 642 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 643 643 } 644 644 ··· 657 657 --rdtp->dynticks_nmi_nesting != 0) 658 658 return; 659 659 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 660 - smp_mb__before_atomic_inc(); /* See above. */ 660 + smp_mb__before_atomic(); /* See above. */ 661 661 atomic_inc(&rdtp->dynticks); 662 - smp_mb__after_atomic_inc(); /* Force delay to next write. */ 662 + smp_mb__after_atomic(); /* Force delay to next write. */ 663 663 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 664 664 } 665 665 ··· 2790 2790 s = atomic_long_read(&rsp->expedited_done); 2791 2791 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2792 2792 /* ensure test happens before caller kfree */ 2793 - smp_mb__before_atomic_inc(); /* ^^^ */ 2793 + smp_mb__before_atomic(); /* ^^^ */ 2794 2794 atomic_long_inc(&rsp->expedited_workdone1); 2795 2795 return; 2796 2796 } ··· 2808 2808 s = atomic_long_read(&rsp->expedited_done); 2809 2809 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) { 2810 2810 /* ensure test happens before caller kfree */ 2811 - smp_mb__before_atomic_inc(); /* ^^^ */ 2811 + smp_mb__before_atomic(); /* ^^^ */ 2812 2812 atomic_long_inc(&rsp->expedited_workdone2); 2813 2813 return; 2814 2814 } ··· 2837 2837 s = atomic_long_read(&rsp->expedited_done); 2838 2838 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) { 2839 2839 /* ensure test happens before caller kfree */ 2840 - smp_mb__before_atomic_inc(); /* ^^^ */ 2840 + smp_mb__before_atomic(); /* ^^^ */ 2841 2841 atomic_long_inc(&rsp->expedited_done_lost); 2842 2842 break; 2843 2843 }
+4 -4
kernel/rcu/tree_plugin.h
··· 2523 2523 /* Record start of fully idle period. */ 2524 2524 j = jiffies; 2525 2525 ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j; 2526 - smp_mb__before_atomic_inc(); 2526 + smp_mb__before_atomic(); 2527 2527 atomic_inc(&rdtp->dynticks_idle); 2528 - smp_mb__after_atomic_inc(); 2528 + smp_mb__after_atomic(); 2529 2529 WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1); 2530 2530 } 2531 2531 ··· 2590 2590 } 2591 2591 2592 2592 /* Record end of idle period. */ 2593 - smp_mb__before_atomic_inc(); 2593 + smp_mb__before_atomic(); 2594 2594 atomic_inc(&rdtp->dynticks_idle); 2595 - smp_mb__after_atomic_inc(); 2595 + smp_mb__after_atomic(); 2596 2596 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1)); 2597 2597 2598 2598 /*
+3 -3
kernel/sched/cpupri.c
··· 165 165 * do a write memory barrier, and then update the count, to 166 166 * make sure the vector is visible when count is set. 167 167 */ 168 - smp_mb__before_atomic_inc(); 168 + smp_mb__before_atomic(); 169 169 atomic_inc(&(vec)->count); 170 170 do_mb = 1; 171 171 } ··· 185 185 * the new priority vec. 186 186 */ 187 187 if (do_mb) 188 - smp_mb__after_atomic_inc(); 188 + smp_mb__after_atomic(); 189 189 190 190 /* 191 191 * When removing from the vector, we decrement the counter first 192 192 * do a memory barrier and then clear the mask. 193 193 */ 194 194 atomic_dec(&(vec)->count); 195 - smp_mb__after_atomic_inc(); 195 + smp_mb__after_atomic(); 196 196 cpumask_clear_cpu(cpu, vec->mask); 197 197 } 198 198
+1 -1
kernel/sched/wait.c
··· 394 394 * 395 395 * In order for this to function properly, as it uses waitqueue_active() 396 396 * internally, some kind of memory barrier must be done prior to calling 397 - * this. Typically, this will be smp_mb__after_clear_bit(), but in some 397 + * this. Typically, this will be smp_mb__after_atomic(), but in some 398 398 * cases where bitflags are manipulated non-atomically under a lock, one 399 399 * may need to use a less regular barrier, such fs/inode.c's smp_mb(), 400 400 * because spin_unlock() does not guarantee a memory barrier.
+1 -1
mm/backing-dev.c
··· 557 557 bit = sync ? BDI_sync_congested : BDI_async_congested; 558 558 if (test_and_clear_bit(bit, &bdi->state)) 559 559 atomic_dec(&nr_bdi_congested[sync]); 560 - smp_mb__after_clear_bit(); 560 + smp_mb__after_atomic(); 561 561 if (waitqueue_active(wqh)) 562 562 wake_up(wqh); 563 563 }
+2 -2
mm/filemap.c
··· 740 740 { 741 741 VM_BUG_ON_PAGE(!PageLocked(page), page); 742 742 clear_bit_unlock(PG_locked, &page->flags); 743 - smp_mb__after_clear_bit(); 743 + smp_mb__after_atomic(); 744 744 wake_up_page(page, PG_locked); 745 745 } 746 746 EXPORT_SYMBOL(unlock_page); ··· 757 757 if (!test_clear_page_writeback(page)) 758 758 BUG(); 759 759 760 - smp_mb__after_clear_bit(); 760 + smp_mb__after_atomic(); 761 761 wake_up_page(page, PG_writeback); 762 762 } 763 763 EXPORT_SYMBOL(end_page_writeback);
+1 -1
net/atm/pppoatm.c
··· 252 252 * we need to ensure there's a memory barrier after it. The bit 253 253 * *must* be set before we do the atomic_inc() on pvcc->inflight. 254 254 * There's no smp_mb__after_set_bit(), so it's this or abuse 255 - * smp_mb__after_clear_bit(). 255 + * smp_mb__after_atomic(). 256 256 */ 257 257 test_and_set_bit(BLOCKED, &pvcc->blocked); 258 258
+2 -2
net/bluetooth/hci_event.c
··· 45 45 return; 46 46 47 47 clear_bit(HCI_INQUIRY, &hdev->flags); 48 - smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 48 + smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 49 49 wake_up_bit(&hdev->flags, HCI_INQUIRY); 50 50 51 51 hci_conn_check_pending(hdev); ··· 1768 1768 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) 1769 1769 return; 1770 1770 1771 - smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ 1771 + smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 1772 1772 wake_up_bit(&hdev->flags, HCI_INQUIRY); 1773 1773 1774 1774 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
+4 -4
net/core/dev.c
··· 1326 1326 * dev->stop() will invoke napi_disable() on all of it's 1327 1327 * napi_struct instances on this device. 1328 1328 */ 1329 - smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1329 + smp_mb__after_atomic(); /* Commit netif_running(). */ 1330 1330 } 1331 1331 1332 1332 dev_deactivate_many(head); ··· 3343 3343 3344 3344 root_lock = qdisc_lock(q); 3345 3345 if (spin_trylock(root_lock)) { 3346 - smp_mb__before_clear_bit(); 3346 + smp_mb__before_atomic(); 3347 3347 clear_bit(__QDISC_STATE_SCHED, 3348 3348 &q->state); 3349 3349 qdisc_run(q); ··· 3353 3353 &q->state)) { 3354 3354 __netif_reschedule(q); 3355 3355 } else { 3356 - smp_mb__before_clear_bit(); 3356 + smp_mb__before_atomic(); 3357 3357 clear_bit(__QDISC_STATE_SCHED, 3358 3358 &q->state); 3359 3359 } ··· 4244 4244 BUG_ON(n->gro_list); 4245 4245 4246 4246 list_del(&n->poll_list); 4247 - smp_mb__before_clear_bit(); 4247 + smp_mb__before_atomic(); 4248 4248 clear_bit(NAPI_STATE_SCHED, &n->state); 4249 4249 } 4250 4250 EXPORT_SYMBOL(__napi_complete);
+1 -1
net/ipv4/inetpeer.c
··· 522 522 void inet_putpeer(struct inet_peer *p) 523 523 { 524 524 p->dtime = (__u32)jiffies; 525 - smp_mb__before_atomic_dec(); 525 + smp_mb__before_atomic(); 526 526 atomic_dec(&p->refcnt); 527 527 } 528 528 EXPORT_SYMBOL_GPL(inet_putpeer);
+1 -3
net/ipv4/tcp_output.c
··· 1930 1930 /* It is possible TX completion already happened 1931 1931 * before we set TSQ_THROTTLED, so we must 1932 1932 * test again the condition. 1933 - * We abuse smp_mb__after_clear_bit() because 1934 - * there is no smp_mb__after_set_bit() yet 1935 1933 */ 1936 - smp_mb__after_clear_bit(); 1934 + smp_mb__after_atomic(); 1937 1935 if (atomic_read(&sk->sk_wmem_alloc) > limit) 1938 1936 break; 1939 1937 }
+1 -1
net/netfilter/nf_conntrack_core.c
··· 914 914 nf_ct_ext_destroy(ct); 915 915 nf_ct_ext_free(ct); 916 916 kmem_cache_free(net->ct.nf_conntrack_cachep, ct); 917 - smp_mb__before_atomic_dec(); 917 + smp_mb__before_atomic(); 918 918 atomic_dec(&net->ct.count); 919 919 } 920 920 EXPORT_SYMBOL_GPL(nf_conntrack_free);
+2 -2
net/rds/ib_recv.c
··· 598 598 { 599 599 atomic64_set(&ic->i_ack_next, seq); 600 600 if (ack_required) { 601 - smp_mb__before_clear_bit(); 601 + smp_mb__before_atomic(); 602 602 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 603 603 } 604 604 } ··· 606 606 static u64 rds_ib_get_ack(struct rds_ib_connection *ic) 607 607 { 608 608 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 609 - smp_mb__after_clear_bit(); 609 + smp_mb__after_atomic(); 610 610 611 611 return atomic64_read(&ic->i_ack_next); 612 612 }
+2 -2
net/rds/iw_recv.c
··· 429 429 { 430 430 atomic64_set(&ic->i_ack_next, seq); 431 431 if (ack_required) { 432 - smp_mb__before_clear_bit(); 432 + smp_mb__before_atomic(); 433 433 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 434 434 } 435 435 } ··· 437 437 static u64 rds_iw_get_ack(struct rds_iw_connection *ic) 438 438 { 439 439 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 440 - smp_mb__after_clear_bit(); 440 + smp_mb__after_atomic(); 441 441 442 442 return atomic64_read(&ic->i_ack_next); 443 443 }
+3 -3
net/rds/send.c
··· 107 107 static void release_in_xmit(struct rds_connection *conn) 108 108 { 109 109 clear_bit(RDS_IN_XMIT, &conn->c_flags); 110 - smp_mb__after_clear_bit(); 110 + smp_mb__after_atomic(); 111 111 /* 112 112 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a 113 113 * hot path and finding waiters is very rare. We don't want to walk ··· 661 661 662 662 /* order flag updates with spin locks */ 663 663 if (!list_empty(&list)) 664 - smp_mb__after_clear_bit(); 664 + smp_mb__after_atomic(); 665 665 666 666 spin_unlock_irqrestore(&conn->c_lock, flags); 667 667 ··· 691 691 } 692 692 693 693 /* order flag updates with the rs lock */ 694 - smp_mb__after_clear_bit(); 694 + smp_mb__after_atomic(); 695 695 696 696 spin_unlock_irqrestore(&rs->rs_lock, flags); 697 697
+1 -1
net/rds/tcp_send.c
··· 93 93 rm->m_ack_seq = tc->t_last_sent_nxt + 94 94 sizeof(struct rds_header) + 95 95 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; 96 - smp_mb__before_clear_bit(); 96 + smp_mb__before_atomic(); 97 97 set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags); 98 98 tc->t_last_expected_una = rm->m_ack_seq + 1; 99 99
+1 -1
net/sunrpc/auth.c
··· 296 296 rpcauth_unhash_cred_locked(struct rpc_cred *cred) 297 297 { 298 298 hlist_del_rcu(&cred->cr_hash); 299 - smp_mb__before_clear_bit(); 299 + smp_mb__before_atomic(); 300 300 clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags); 301 301 } 302 302
+1 -1
net/sunrpc/auth_gss/auth_gss.c
··· 143 143 gss_get_ctx(ctx); 144 144 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 145 145 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 146 - smp_mb__before_clear_bit(); 146 + smp_mb__before_atomic(); 147 147 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 148 148 } 149 149
+2 -2
net/sunrpc/backchannel_rqst.c
··· 244 244 dprintk("RPC: free backchannel req=%p\n", req); 245 245 246 246 req->rq_connect_cookie = xprt->connect_cookie - 1; 247 - smp_mb__before_clear_bit(); 247 + smp_mb__before_atomic(); 248 248 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); 249 249 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); 250 - smp_mb__after_clear_bit(); 250 + smp_mb__after_atomic(); 251 251 252 252 if (!xprt_need_to_requeue(xprt)) { 253 253 /*
+2 -2
net/sunrpc/xprt.c
··· 230 230 { 231 231 xprt->snd_task = NULL; 232 232 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { 233 - smp_mb__before_clear_bit(); 233 + smp_mb__before_atomic(); 234 234 clear_bit(XPRT_LOCKED, &xprt->state); 235 - smp_mb__after_clear_bit(); 235 + smp_mb__after_atomic(); 236 236 } else 237 237 queue_work(rpciod_workqueue, &xprt->task_cleanup); 238 238 }
+8 -8
net/sunrpc/xprtsock.c
··· 893 893 xs_reset_transport(transport); 894 894 xprt->reestablish_timeout = 0; 895 895 896 - smp_mb__before_clear_bit(); 896 + smp_mb__before_atomic(); 897 897 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 898 898 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 899 899 clear_bit(XPRT_CLOSING, &xprt->state); 900 - smp_mb__after_clear_bit(); 900 + smp_mb__after_atomic(); 901 901 xprt_disconnect_done(xprt); 902 902 } 903 903 ··· 1497 1497 1498 1498 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) 1499 1499 { 1500 - smp_mb__before_clear_bit(); 1500 + smp_mb__before_atomic(); 1501 1501 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); 1502 1502 clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); 1503 1503 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1504 1504 clear_bit(XPRT_CLOSING, &xprt->state); 1505 - smp_mb__after_clear_bit(); 1505 + smp_mb__after_atomic(); 1506 1506 } 1507 1507 1508 1508 static void xs_sock_mark_closed(struct rpc_xprt *xprt) ··· 1556 1556 xprt->connect_cookie++; 1557 1557 xprt->reestablish_timeout = 0; 1558 1558 set_bit(XPRT_CLOSING, &xprt->state); 1559 - smp_mb__before_clear_bit(); 1559 + smp_mb__before_atomic(); 1560 1560 clear_bit(XPRT_CONNECTED, &xprt->state); 1561 1561 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 1562 - smp_mb__after_clear_bit(); 1562 + smp_mb__after_atomic(); 1563 1563 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1564 1564 break; 1565 1565 case TCP_CLOSE_WAIT: ··· 1578 1578 case TCP_LAST_ACK: 1579 1579 set_bit(XPRT_CLOSING, &xprt->state); 1580 1580 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); 1581 - smp_mb__before_clear_bit(); 1581 + smp_mb__before_atomic(); 1582 1582 clear_bit(XPRT_CONNECTED, &xprt->state); 1583 - smp_mb__after_clear_bit(); 1583 + smp_mb__after_atomic(); 1584 1584 break; 1585 1585 case TCP_CLOSE: 1586 1586 xs_tcp_cancel_linger_timeout(xprt);
+1 -1
net/unix/af_unix.c
··· 1207 1207 sk->sk_state = TCP_ESTABLISHED; 1208 1208 sock_hold(newsk); 1209 1209 1210 - smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */ 1210 + smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ 1211 1211 unix_peer(sk) = newsk; 1212 1212 1213 1213 unix_state_unlock(sk);
+2 -2
sound/pci/bt87x.c
··· 443 443 444 444 _error: 445 445 clear_bit(0, &chip->opened); 446 - smp_mb__after_clear_bit(); 446 + smp_mb__after_atomic(); 447 447 return err; 448 448 } 449 449 ··· 458 458 459 459 chip->substream = NULL; 460 460 clear_bit(0, &chip->opened); 461 - smp_mb__after_clear_bit(); 461 + smp_mb__after_atomic(); 462 462 return 0; 463 463 } 464 464