void __blk_iopoll_complete(struct blk_iopoll *iop)
{
list_del(&iop->list);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
}
EXPORT_SYMBOL(__blk_iopoll_complete);
void blk_iopoll_enable(struct blk_iopoll *iop)
{
BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
}
EXPORT_SYMBOL(blk_iopoll_enable);
int err = ctx->err;
if (!ctx->queue.qlen) {
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(CHAINIV_STATE_INUSE, &ctx->state);
if (!ctx->queue.qlen ||
static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
{
atomic_inc(&genpd->sd_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
*/
static inline void release_slot(struct mtip_port *port, int tag)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(tag, port->allocated);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
/*
{
int n = dev->coupled->online_count;
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(a);
while (atomic_read(a) < n)
}
clear_bit_unlock(0, &ctx->flushing_completions);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
tasklet_enable(&ctx->context.tasklet);
*/
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
atomic_inc(&dev->vblank[crtc].count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
/* Invalidate all timestamps while vblank irq's are off. */
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_add(diff, &dev->vblank[crtc].count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
/**
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&dev->vblank[crtc].count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
* updates before
* the counter increment.
*/
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&dev_priv->gpu_error.reset_counter);
kobject_uevent_env(&dev->primary->kdev->kobj,
return false;
/* Paired with the mb in cached_dev_attach */
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
return true;
}
cl->fn = fn;
cl->wq = wq;
/* between atomic_dec() in closure_put() */
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
}
static inline void closure_queue(struct closure *cl)
BUG_ON(!test_bit(B_WRITING, &b->state));
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(B_WRITING, &b->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&b->state, B_WRITING);
}
BUG_ON(!test_bit(B_READING, &b->state));
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(B_READING, &b->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&b->state, B_READING);
}
struct dm_snapshot *s = pe->snap;
mempool_free(pe, s->pending_pool);
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&s->pending_exceptions_count);
}
static void merge_shutdown(struct dm_snapshot *s)
{
clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&s->state_bits, RUNNING_MERGE);
}
static void dm_queue_flush(struct mapped_device *md)
{
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
queue_work(md->wq, &md->work);
}
* STRIPE_ON_UNPLUG_LIST clear but the stripe
* is still in our list
*/
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
/*
* STRIPE_ON_RELEASE_LIST could be set here. In that
/* clear 'streaming' status bit */
clear_bit(ADAP_STREAMING, &adap->state_bits);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&adap->state_bits, ADAP_STREAMING);
skip_feed_stop:
err:
if (!adap->suspend_resume_active) {
clear_bit(ADAP_INIT, &adap->state_bits);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&adap->state_bits, ADAP_INIT);
}
if (!adap->suspend_resume_active) {
adap->active_fe = -1;
clear_bit(ADAP_SLEEP, &adap->state_bits);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&adap->state_bits, ADAP_SLEEP);
}
case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
break;
case LOAD_DIAG:
void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
u32 verbose)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(flag, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
flag);
schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
#endif
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&bp->cq_spq_left);
/* push the change in bp->spq_left and towards the memory */
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
* sp_state is cleared, and this order prevents
* races
*/
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
wmb();
clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/* schedule the sp task as mcp ack is required */
bnx2x_schedule_sp_task(bp);
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
/* mark latest Q bit */
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/* send Q update ramrod for FCoE Q */
rc = bnx2x_queue_state_change(bp, &queue_params);
spqe_cnt++;
} /* for */
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_add(spqe_cnt, &bp->eq_spq_left);
bp->eq_cons = sw_cons;
case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
int count = ctl->data.credit.credit_count;
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_add(count, &bp->cq_spq_left);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
break;
}
case DRV_CTL_ULP_REGISTER_CMD: {
static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(o->state, o->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(o->state, o->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
/**
/* The operation is completed */
clear_bit(p->state, p->pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(o->sched_state, o->raw.pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(o->sched_state, o->raw.pstate);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
if (rc) {
o->next_state = BNX2X_Q_STATE_MAX;
clear_bit(pending_bit, pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return rc;
}
wmb();
clear_bit(cmd, &o->pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
wmb();
clear_bit(cmd, &o->pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}
if (rc) {
o->next_state = BNX2X_F_STATE_MAX;
clear_bit(cmd, pending);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return rc;
}
void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
struct bnx2x_virtf *vf)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(flag, &bp->iov_task_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
}
static int cnic_close_prep(struct cnic_sock *csk)
{
clear_bit(SK_F_CONNECT_START, &csk->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
static int cnic_abort_prep(struct cnic_sock *csk)
{
clear_bit(SK_F_CONNECT_START, &csk->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
msleep(1);
csk_hold(csk);
clear_bit(SK_F_INUSE, &csk->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
while (atomic_read(&csk->ref_count) != 1)
msleep(1);
cnic_cm_cleanup(csk);
L4_KCQE_COMPLETION_STATUS_PARITY_ERROR)
set_bit(SK_F_HW_ERR, &csk->flags);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
cnic_cm_upcall(cp, csk, opcode);
break;
if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
bna_ib_ack(tcb->i_dbell, sent);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
return sent;
bnad_txq_cleanup(bnad, tcb);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
}
sent = bnad_txcmpl_process(bnad, tcb);
if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
bna_ib_ack(tcb->i_dbell, sent);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
} else {
netif_stop_queue(netdev);
if (adapter->params.stats_update_period &&
!(adapter->open_device_map & PORT_MASK)) {
/* Stop statistics accumulation. */
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
spin_lock(&adapter->work_lock); /* sync with update task */
spin_unlock(&adapter->work_lock);
cancel_mac_stats_update(adapter);
struct sge_qset *qs = txq_to_qset(q, qid);
set_bit(qid, &qs->txq_stopped);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (should_restart_tx(q) &&
test_and_clear_bit(qid, &qs->txq_stopped))
if (!skb_queue_empty(&q->sendq)) {
set_bit(TXQ_CTRL, &qs->txq_stopped);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
if (unlikely(q->size - q->in_use < ndesc)) {
set_bit(TXQ_OFLD, &qs->txq_stopped);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
struct sge_fl *fl = s->egr_map[id];
clear_bit(id, s->starving_fl);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (fl_starving(fl)) {
rxq = container_of(fl, struct sge_eth_rxq, fl);
struct sge_fl *fl = s->egr_map[id];
clear_bit(id, s->starving_fl);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/*
* Since we are accessing fl without a lock there's a
netif_tx_stop_all_queues(dev);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(GFAR_DOWN, &priv->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
disable_napi(priv);
gfar_init_tx_rx_base(priv);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(GFAR_DOWN, &priv->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
/* flush memory to make sure state is correct before next watchog */
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__I40E_SERVICE_SCHED, &pf->state);
}
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
/* flush memory to make sure state is correct before next watchdog */
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
}
if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
e_dev_err("Cannot enable PCI device from suspend\n");
return err;
}
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
pci_set_master(pdev);
e_err(probe, "Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__IXGBE_DISABLED, &adapter->state);
adapter->hw.hw_addr = adapter->io_addr;
pci_set_master(pdev);
spin_unlock_bh(&adapter->mbx_lock);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
return err;
}
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
return PCI_ERS_RESULT_DISCONNECT;
}
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__IXGBEVF_DISABLED, &adapter->state);
pci_set_master(pdev);
* wl1271_ps_elp_wakeup cannot be called concurrently.
*/
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
notify_remote_via_evtchn(pdev->evtchn);
/*in case of we lost an aer request in four lines time_window*/
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(_PDEVB_op_active, &pdev->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
schedule_pcifront_aer_op(pdev);
clear_bit(IDEV_STOP_PENDING, &idev->flags);
clear_bit(IDEV_IO_READY, &idev->flags);
clear_bit(IDEV_GONE, &idev->flags);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(IDEV_ALLOCATED, &idev->flags);
wake_up(&ihost->eventq);
}
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
atomic_inc(&tl_tpg->tl_tpg_port_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
/*
* Add Linux/SCSI struct scsi_device by HCTL
*/
scsi_device_put(sd);
atomic_dec(&tl_tpg->tl_tpg_port_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
continue;
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
break;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
* TARGET PORT GROUPS command
*/
atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
spin_lock_bh(&port->sep_alua_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
/*
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (tg_pt_gp->tg_pt_gp_transition_complete)
*/
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
lu_gp = local_lu_gp_mem->lu_gp;
atomic_inc(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
/*
* For storage objects that are members of the 'default_lu_gp',
rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
new_state, explicit);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
return rc;
}
/*
dev = lu_gp_mem->lu_gp_mem_dev;
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&lu_gp->lu_gp_lock);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
if (rc)
break;
}
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&lu_gp->lu_gp_lock);
}
atomic_dec(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
return rc;
}
continue;
atomic_inc(&deve->pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock_irq(&nacl->device_list_lock);
return deve;
spin_lock(&lun->lun_acl_lock);
list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
atomic_inc(&lun->lun_acl_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&lun->lun_acl_lock);
pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
spin_lock(&lun->lun_acl_lock);
list_del(&lacl->lacl_list);
atomic_dec(&lun->lun_acl_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
spin_unlock(&lun->lun_acl_lock);
core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
* Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
bio_put(bio);
spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
atomic_inc(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock);
spin_lock_bh(&port->sep_alua_lock);
continue;
atomic_inc(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock_bh(&port->sep_alua_lock);
/*
* Grab a configfs group dependency that is released
pr_err("core_scsi3_lunacl_depend"
"_item() failed\n");
atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
atomic_dec(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
goto out;
}
/*
sa_res_key, all_tg_pt, aptpl);
if (!pr_reg_atp) {
atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
atomic_dec(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
core_scsi3_lunacl_undepend_item(deve_tmp);
goto out;
}
spin_lock(&dev->se_port_lock);
atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&dev->se_port_lock);
continue;
}
atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock);
return pr_reg;
}
continue;
atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock);
return pr_reg;
}
static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
{
atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
static int core_scsi3_check_implicit_release(
&tpg->tpg_group.cg_item);
atomic_dec(&tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
if (nacl->dynamic_node_acl) {
atomic_dec(&nacl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
return;
}
&nacl->acl_group.cg_item);
atomic_dec(&nacl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
*/
if (!lun_acl) {
atomic_dec(&se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
return;
}
nacl = lun_acl->se_lun_nacl;
&lun_acl->se_lun_group.cg_item);
atomic_dec(&se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
static sense_reason_t
continue;
atomic_inc(&tmp_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock);
if (core_scsi3_tpg_depend_item(tmp_tpg)) {
pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_unmap;
}
tmp_tpg, i_str);
if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
spin_unlock_irq(&tmp_tpg->acl_node_lock);
pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
core_scsi3_tpg_undepend_item(tmp_tpg);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_unmap;
pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
continue;
atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&dev->se_port_lock);
if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n");
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_put_pr_reg;
}
initiator_str);
if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
spin_unlock_irq(&dest_se_tpg->acl_node_lock);
pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dest_node_acl = NULL;
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item() failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dest_se_deve = NULL;
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
add_desc_len = 0;
atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock(&pr_tmpl->registration_lock);
/*
* Determine expected length of $FABRIC_MOD specific
" out of buffer: %d\n", cmd->data_length);
spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
break;
}
/*
spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
/*
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node);
atomic_dec(&dev->dev_qf_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
* Dormant to Active status.
*/
cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
dev->transport->name);
return false;
case MSG_ORDERED_TAG:
atomic_inc(&dev->dev_ordered_sync);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
" se_ordered_id: %u\n",
* For SIMPLE and UNTAGGED Task Attribute commands
*/
atomic_inc(&dev->simple_cmds);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
break;
}
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
atomic_dec(&dev->dev_ordered_sync);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
atomic_inc(&dev->dev_qf_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
schedule_work(&cmd->se_dev->qf_work_queue);
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
cmd->transport_state |= CMD_T_ABORTED;
cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
return;
}
}
spin_unlock_irq(&nacl->device_list_lock);
atomic_inc(&deve->ua_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
asc, ascq);
atomic_inc(&deve->ua_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
return 0;
}
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&deve->ua_lock);
}
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
if (found)
clear_bit(eol, ldata->read_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
ldata->read_tail += c;
if (found) {
/* clear the bit used to serialize the DMA tx. */
clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/* wake up the possible processes. */
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
mxs_auart_dma_tx(s, i);
} else {
clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
return;
}
struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
atomic_inc(&tpg->tpg_port_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
return 0;
}
struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
atomic_dec(&tpg->tpg_port_count);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
static int usbg_check_stop_free(struct se_cmd *se_cmd)
for (i = 0; i < N_OUT_URB; ++i) {
if (portdata->out_urbs[i] == urb) {
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(i, &portdata->out_busy);
break;
}
tpg->tv_tpg_vhost_count++;
tpg->vhost_scsi = vs;
vs_tpg[tpg->tport_tpgt] = tpg;
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
match = true;
}
mutex_unlock(&tpg->tv_tpg_mutex);
void __w1_family_get(struct w1_family *f)
{
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&f->refcnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
EXPORT_SYMBOL(w1_unregister_family);
notify_remote_via_irq(pdev->evtchn_irq);
/* Mark that we're done. */
- smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
+ smp_mb__before_atomic(); /* /after/ clearing PCIF_active */
clear_bit(_PDEVF_op_active, &pdev->flags);
- smp_mb__after_clear_bit(); /* /before/ final check for work */
+ smp_mb__after_atomic(); /* /before/ final check for work */
/* Check to see if the driver domain tried to start another request in
* between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags);
}
static void end_extent_buffer_writeback(struct extent_buffer *eb)
{
clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
}
* before atomic variable goto zero, we must make sure
* dip->errors is perceived to be set.
*/
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
}
/* if there are more bios still pending for this dio, just exit */
* before atomic variable goto zero, we must
* make sure dip->errors is perceived to be set.
*/
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
if (atomic_dec_and_test(&dip->pending_bios))
bio_io_error(dip->orig_bio);
return 0;
atomic_inc(&inode->i_dio_count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
/*
* The generic stuff only does filemap_write_and_wait_range, which
return -EINVAL;
atomic_inc(&root->will_be_snapshoted);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
btrfs_wait_nocow_write(root);
ret = btrfs_start_delalloc_inodes(root, 0);
void unlock_buffer(struct buffer_head *bh)
{
clear_bit_unlock(BH_Lock, &bh->b_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&bh->b_state, BH_Lock);
}
EXPORT_SYMBOL(unlock_buffer);
void ext4_resize_end(struct super_block *sb)
{
clear_bit_unlock(EXT4_RESIZING, &EXT4_SB(sb)->s_resize_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
static void gfs2_holder_wake(struct gfs2_holder *gh)
{
clear_bit(HIF_WAIT, &gh->gh_iflags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}
{
gl->gl_demote_state = LM_ST_EXCLUSIVE;
clear_bit(GLF_DEMOTE, &gl->gl_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}
out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
gl->gl_lockref.count++;
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gl->gl_lockref.count--;
out_unlock:
clear_bit(GLF_LOCK, &gl->gl_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return;
}
* Writeback of the data mapping may cause the dirty flag to be set
* so we have to clear it again here.
*/
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(GLF_DIRTY, &gl->gl_flags);
}
queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
spin_unlock(&ls->ls_recover_spin);
}
ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
return 0;
gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
done:
clear_bit(JDF_RECOVERY, &jd->jd_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
}
set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
else if (val == 0) {
clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
gfs2_glock_thaw(sdp);
} else {
ret = -EINVAL;
rv = jid = -EINVAL;
sdp->sd_lockstruct.ls_jid = jid;
clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
out:
spin_unlock(&sdp->sd_jindex_spin);
clear_buffer_uptodate(bh);
if (orig_bh) {
clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&orig_bh->b_state, BH_Shadow);
}
unlock_buffer(bh);
spin_lock(&journal->j_list_lock);
J_ASSERT(jinode->i_transaction == commit_transaction);
clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
}
spin_unlock(&journal->j_list_lock);
}
spin_lock(&journal->j_list_lock);
clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
}
{
put_rpccred(entry->cred);
kfree(entry);
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_long_dec(&nfs_access_nr_entries);
- smp_mb__after_atomic_dec();
+ smp_mb__after_atomic();
}
static void nfs_access_free_list(struct list_head *head)
else {
remove_lru_entry:
list_del_init(&nfsi->access_cache_inode_lru);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
spin_unlock(&inode->i_lock);
}
nfs_access_add_rbtree(inode, cache);
/* Update accounting */
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_long_inc(&nfs_access_nr_entries);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
/* Add inode to global LRU list */
if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
trace_nfs_invalidate_mapping_exit(inode, ret);
clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(bitlock, NFS_INO_INVALIDATING);
out:
return ret;
static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
}
static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
rpc_wake_up(&clp->cl_rpcwaitq);
}
{
if (atomic_dec_and_test(&c->io_count)) {
clear_bit(NFS_IO_INPROGRESS, &c->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
}
}
printk(KERN_ERR "NFS: Invalid unlock attempted\n");
BUG();
}
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(PG_BUSY, &req->wb_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&req->wb_flags, PG_BUSY);
}
unsigned long *bitlock = &NFS_I(inode)->flags;
clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
}
{
if (lseg) {
atomic_inc(&lseg->pls_refcount);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
return lseg;
}
nfs_pageio_complete(&pgio);
clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(bitlock, NFS_INO_FLUSHING);
if (err < 0)
static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
{
clear_bit(NFS_INO_COMMIT, &nfsi->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
}
* important.
*/
clear_bit(DIRTY_CNODE, &cnode->flags);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(COW_CNODE, &cnode->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
offs += len;
dbg_chk_lpt_sz(c, 1, len);
cnode = cnode->cnext;
* the reason for the second barrier.
*/
clear_bit(DIRTY_ZNODE, &znode->flags);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(COW_ZNODE, &znode->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/*
* We have marked the znode as clean but have not updated the
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile unsigned long *addr)
*/
#define clear_bit_unlock(nr, addr) \
do { \
- smp_mb__before_clear_bit(); \
+ smp_mb__before_atomic(); \
clear_bit(nr, addr); \
} while (0)
static inline void put_bh(struct buffer_head *bh)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&bh->b_count);
}
static inline void hd_struct_get(struct hd_struct *part)
{
atomic_inc(&part->ref);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
static inline int hd_struct_try_get(struct hd_struct *part)
static inline void tasklet_unlock(struct tasklet_struct *t)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_inc(&t->count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
static inline void tasklet_disable(struct tasklet_struct *t)
static inline void tasklet_enable(struct tasklet_struct *t)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&t->count);
}
static inline void tasklet_hi_enable(struct tasklet_struct *t)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&t->count);
}
static inline void napi_enable(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_task()
- *
- * XXX: assumes set/clear bit are identical barrier wise.
*/
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_task()
*/
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_clear_running(t) \
do { \
- smp_mb__before_clear_bit(); \
+ smp_mb__before_atomic(); \
clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
- smp_mb__after_clear_bit(); \
+ smp_mb__after_atomic(); \
} while (0)
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_clear_queued(t) \
do { \
- smp_mb__before_clear_bit(); \
+ smp_mb__before_atomic(); \
clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
- smp_mb__after_clear_bit(); \
+ smp_mb__after_atomic(); \
} while (0)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_CONNECTING, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static inline int xprt_connecting(struct rpc_xprt *xprt)
static inline void xprt_clear_binding(struct rpc_xprt *xprt)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_BINDING, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
* pairs with task_work_add()->set_notify_resume() after
* hlist_add_head(task->task_works);
*/
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (unlikely(current->task_works))
task_work_run();
}
/* put back the conn without restarting its timer */
static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&cp->refcnt);
}
void ip_vs_conn_put(struct ip_vs_conn *cp);
static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&dest->refcnt);
}
kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&slaves_in_kgdb);
dbg_touch_watchdogs();
local_irq_restore(flags);
kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&masters_in_kgdb);
/* Free kgdb_active */
atomic_set(&kgdb_active, -1);
* get_futex_key() implies a full barrier. This is relied upon
* as full barrier (B), see the ordering comment above.
*/
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
/*
/*
* Full barrier (A), see the ordering comment above.
*/
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
#endif
}
static void helper_lock(void)
{
atomic_inc(&running_helpers);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
static void helper_unlock(void)
}
rcu_prepare_for_idle(smp_processor_id());
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
- smp_mb__before_atomic_inc(); /* See above. */
+ smp_mb__before_atomic(); /* See above. */
atomic_inc(&rdtp->dynticks);
- smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
+ smp_mb__after_atomic(); /* Force ordering with next sojourn. */
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
/*
static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
int user)
{
- smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
+ smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
- smp_mb__after_atomic_inc(); /* See above. */
+ smp_mb__after_atomic(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
rcu_cleanup_after_idle(smp_processor_id());
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
(atomic_read(&rdtp->dynticks) & 0x1))
return;
rdtp->dynticks_nmi_nesting++;
- smp_mb__before_atomic_inc(); /* Force delay from prior write. */
+ smp_mb__before_atomic(); /* Force delay from prior write. */
atomic_inc(&rdtp->dynticks);
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
- smp_mb__after_atomic_inc(); /* See above. */
+ smp_mb__after_atomic(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
}
--rdtp->dynticks_nmi_nesting != 0)
return;
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
- smp_mb__before_atomic_inc(); /* See above. */
+ smp_mb__before_atomic(); /* See above. */
atomic_inc(&rdtp->dynticks);
- smp_mb__after_atomic_inc(); /* Force delay to next write. */
+ smp_mb__after_atomic(); /* Force delay to next write. */
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
}
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
/* ensure test happens before caller kfree */
- smp_mb__before_atomic_inc(); /* ^^^ */
+ smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone1);
return;
}
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
/* ensure test happens before caller kfree */
- smp_mb__before_atomic_inc(); /* ^^^ */
+ smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(&rsp->expedited_workdone2);
return;
}
s = atomic_long_read(&rsp->expedited_done);
if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
/* ensure test happens before caller kfree */
- smp_mb__before_atomic_inc(); /* ^^^ */
+ smp_mb__before_atomic(); /* ^^^ */
atomic_long_inc(&rsp->expedited_done_lost);
break;
}
/* Record start of fully idle period. */
j = jiffies;
ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&rdtp->dynticks_idle);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
}
}
/* Record end of idle period. */
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&rdtp->dynticks_idle);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
/*
* do a write memory barrier, and then update the count, to
* make sure the vector is visible when count is set.
*/
- smp_mb__before_atomic_inc();
+ smp_mb__before_atomic();
atomic_inc(&(vec)->count);
do_mb = 1;
}
* the new priority vec.
*/
if (do_mb)
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
/*
* When removing from the vector, we decrement the counter first
* do a memory barrier and then clear the mask.
*/
atomic_dec(&(vec)->count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
cpumask_clear_cpu(cpu, vec->mask);
}
*
* In order for this to function properly, as it uses waitqueue_active()
* internally, some kind of memory barrier must be done prior to calling
- * this. Typically, this will be smp_mb__after_clear_bit(), but in some
+ * this. Typically, this will be smp_mb__after_atomic(), but in some
* cases where bitflags are manipulated non-atomically under a lock, one
* may need to use a less regular barrier, such fs/inode.c's smp_mb(),
* because spin_unlock() does not guarantee a memory barrier.
bit = sync ? BDI_sync_congested : BDI_async_congested;
if (test_and_clear_bit(bit, &bdi->state))
atomic_dec(&nr_bdi_congested[sync]);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (waitqueue_active(wqh))
wake_up(wqh);
}
{
VM_BUG_ON_PAGE(!PageLocked(page), page);
clear_bit_unlock(PG_locked, &page->flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_page(page, PG_locked);
}
EXPORT_SYMBOL(unlock_page);
if (!test_clear_page_writeback(page))
BUG();
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_page(page, PG_writeback);
}
EXPORT_SYMBOL(end_page_writeback);
* we need to ensure there's a memory barrier after it. The bit
* *must* be set before we do the atomic_inc() on pvcc->inflight.
* There's no smp_mb__after_set_bit(), so it's this or abuse
- * smp_mb__after_clear_bit().
+ * smp_mb__after_atomic().
*/
test_and_set_bit(BLOCKED, &pvcc->blocked);
return;
clear_bit(HCI_INQUIRY, &hdev->flags);
- smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+ smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
wake_up_bit(&hdev->flags, HCI_INQUIRY);
hci_conn_check_pending(hdev);
if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
return;
- smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
+ smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
wake_up_bit(&hdev->flags, HCI_INQUIRY);
if (!test_bit(HCI_MGMT, &hdev->dev_flags))
* dev->stop() will invoke napi_disable() on all of it's
* napi_struct instances on this device.
*/
- smp_mb__after_clear_bit(); /* Commit netif_running(). */
+ smp_mb__after_atomic(); /* Commit netif_running(). */
}
dev_deactivate_many(head);
root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
qdisc_run(q);
&q->state)) {
__netif_reschedule(q);
} else {
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(__QDISC_STATE_SCHED,
&q->state);
}
BUG_ON(n->gro_list);
list_del(&n->poll_list);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);
* Make sure the above read is complete since it can be
* rewritten as soon as we clear the bit below.
*/
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
/* We are about to handle this device,
* so new events can be accepted
void inet_putpeer(struct inet_peer *p)
{
p->dtime = (__u32)jiffies;
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&p->refcnt);
}
EXPORT_SYMBOL_GPL(inet_putpeer);
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
- * We abuse smp_mb__after_clear_bit() because
- * there is no smp_mb__after_set_bit() yet
*/
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (atomic_read(&sk->sk_wmem_alloc) > limit)
break;
}
nf_ct_ext_destroy(ct);
nf_ct_ext_free(ct);
kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&net->ct.count);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
{
atomic64_set(&ic->i_ack_next, seq);
if (ack_required) {
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}
}
static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
{
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return atomic64_read(&ic->i_ack_next);
}
{
atomic64_set(&ic->i_ack_next, seq);
if (ack_required) {
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}
}
static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
{
clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return atomic64_read(&ic->i_ack_next);
}
static void release_in_xmit(struct rds_connection *conn)
{
clear_bit(RDS_IN_XMIT, &conn->c_flags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
/*
* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
* hot path and finding waiters is very rare. We don't want to walk
/* order flag updates with spin locks */
if (!list_empty(&list))
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
spin_unlock_irqrestore(&conn->c_lock, flags);
}
/* order flag updates with the rs lock */
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
spin_unlock_irqrestore(&rs->rs_lock, flags);
rm->m_ack_seq = tc->t_last_sent_nxt +
sizeof(struct rds_header) +
be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
tc->t_last_expected_una = rm->m_ack_seq + 1;
rpcauth_unhash_cred_locked(struct rpc_cred *cred)
{
hlist_del_rcu(&cred->cr_hash);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
}
gss_get_ctx(ctx);
rcu_assign_pointer(gss_cred->gc_ctx, ctx);
set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
}
dprintk("RPC: free backchannel req=%p\n", req);
req->rq_connect_cookie = xprt->connect_cookie - 1;
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
if (!xprt_need_to_requeue(xprt)) {
/*
{
xprt->snd_task = NULL;
if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_LOCKED, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
} else
queue_work(rpciod_workqueue, &xprt->task_cleanup);
}
xs_reset_transport(transport);
xprt->reestablish_timeout = 0;
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
clear_bit(XPRT_CLOSING, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
xprt_disconnect_done(xprt);
}
static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
clear_bit(XPRT_CLOSING, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
}
static void xs_sock_mark_closed(struct rpc_xprt *xprt)
xprt->connect_cookie++;
xprt->reestablish_timeout = 0;
set_bit(XPRT_CLOSING, &xprt->state);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_CONNECTED, &xprt->state);
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
break;
case TCP_CLOSE_WAIT:
case TCP_LAST_ACK:
set_bit(XPRT_CLOSING, &xprt->state);
xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(XPRT_CONNECTED, &xprt->state);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
break;
case TCP_CLOSE:
xs_tcp_cancel_linger_timeout(xprt);
sk->sk_state = TCP_ESTABLISHED;
sock_hold(newsk);
- smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
+ smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
unix_peer(sk) = newsk;
unix_state_unlock(sk);
_error:
clear_bit(0, &chip->opened);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return err;
}
chip->substream = NULL;
clear_bit(0, &chip->opened);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
return 0;
}