* and read back old value
*/
do {
- new = old = READ_ONCE(*ipi_data_ptr);
+ new = old = ACCESS_ONCE(*ipi_data_ptr);
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
while (lockval.tickets.next != lockval.tickets.owner) {
wfe();
- lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
+ lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}
smp_mb();
bool entered_lp2 = false;
if (tegra_pending_sgi())
- WRITE_ONCE(abort_flag, true);
+ ACCESS_ONCE(abort_flag) = true;
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
{
u32 seq;
repeat:
- seq = READ_ONCE(vdata->seq_count);
+ seq = ACCESS_ONCE(vdata->seq_count);
if (seq & 1) {
cpu_relax();
goto repeat;
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
- int tmp = READ_ONCE(lock->lock);
+ int tmp = ACCESS_ONCE(lock->lock);
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
- WRITE_ONCE(*p, (tmp + 2) & ~1);
+ ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
- long tmp = READ_ONCE(lock->lock);
+ long tmp = ACCESS_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
- long tmp = READ_ONCE(lock->lock);
+ long tmp = ACCESS_ONCE(lock->lock);
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
u32 seq;
while (true) {
- seq = READ_ONCE(data->seq_count);
+ seq = ACCESS_ONCE(data->seq_count);
if (likely(!(seq & 1))) {
/* Paired with smp_wmb() in vdso_data_write_*(). */
smp_rmb();
nc_core_ready_count = nc_addr;
/* Ensure ready_count is zero-initialised before the assembly runs */
- WRITE_ONCE(*nc_core_ready_count, 0);
+ ACCESS_ONCE(*nc_core_ready_count) = 0;
coupled_barrier(&per_cpu(pm_barrier, core), online);
/* Run the generated entry code */
try_again:
/* pull chars out of the hat */
- ix = READ_ONCE(port->rx_outp);
+ ix = ACCESS_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
if (push && !tport->low_latency)
tty_flip_buffer_push(tport);
if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) {
do {
/* pull chars out of the hat */
- ix = READ_ONCE(port->rx_outp);
+ ix = ACCESS_ONCE(port->rx_outp);
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
return NO_POLL_CHAR;
static __inline__ s64
atomic64_read(const atomic64_t *v)
{
- return READ_ONCE((v)->counter);
+ return ACCESS_ONCE((v)->counter);
}
#define atomic64_inc(v) (atomic64_add( 1,(v)))
if (!opal_memcons)
return -ENODEV;
- out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos));
+ out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos));
/* Now we've read out_pos, put a barrier in before reading the new
* data it points to in conbuf. */
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{
- int old = READ_ONCE(rw->lock);
+ int old = ACCESS_ONCE(rw->lock);
return likely(old >= 0 &&
__atomic_cmpxchg_bool(&rw->lock, old, old + 1));
}
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
{
- int old = READ_ONCE(rw->lock);
+ int old = ACCESS_ONCE(rw->lock);
return likely(old == 0 &&
__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
}
int old;
do {
- old = READ_ONCE(rw->lock);
+ old = ACCESS_ONCE(rw->lock);
} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
}
smp_yield_cpu(~owner);
count = spin_retry;
}
- old = READ_ONCE(rw->lock);
- owner = READ_ONCE(rw->owner);
+ old = ACCESS_ONCE(rw->lock);
+ owner = ACCESS_ONCE(rw->owner);
if (old < 0)
continue;
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
int old;
while (count-- > 0) {
- old = READ_ONCE(rw->lock);
+ old = ACCESS_ONCE(rw->lock);
if (old < 0)
continue;
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
smp_yield_cpu(~owner);
count = spin_retry;
}
- old = READ_ONCE(rw->lock);
- owner = READ_ONCE(rw->owner);
+ old = ACCESS_ONCE(rw->lock);
+ owner = ACCESS_ONCE(rw->owner);
smp_mb();
if (old >= 0) {
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
smp_yield_cpu(~owner);
count = spin_retry;
}
- old = READ_ONCE(rw->lock);
- owner = READ_ONCE(rw->owner);
+ old = ACCESS_ONCE(rw->lock);
+ owner = ACCESS_ONCE(rw->owner);
if (old >= 0 &&
__atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
prev = old;
int old;
while (count-- > 0) {
- old = READ_ONCE(rw->lock);
+ old = ACCESS_ONCE(rw->lock);
if (old)
continue;
if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
#define atomic_set_release(v, i) atomic_set((v), (i))
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
int64_t completion_slot, int update)
{
if (update) {
- if (READ_ONCE(dma_queue->hw_complete_count) >
+ if (ACCESS_ONCE(dma_queue->hw_complete_count) >
completion_slot)
return 1;
__gxio_dma_queue_update_credits(dma_queue);
}
- return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
+ return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
}
EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
* if the result is LESS than "hw_complete_count".
*/
uint64_t complete;
- complete = READ_ONCE(dma_queue->hw_complete_count);
+ complete = ACCESS_ONCE(dma_queue->hw_complete_count);
slot |= (complete & 0xffffffffff000000);
if (slot < complete)
slot += 0x1000000;
int do_syscall_trace_enter(struct pt_regs *regs)
{
- u32 work = READ_ONCE(current_thread_info()->flags);
+ u32 work = ACCESS_ONCE(current_thread_info()->flags);
if ((work & _TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) {
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
BUG_ON(regs != task_pt_regs(current));
- work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
+ work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
if (unlikely(work & _TIF_SYSCALL_EMU))
emulated = true;
notrace time_t __vdso_time(time_t *t)
{
/* This is atomic on x86 so we don't need any locks. */
- time_t result = READ_ONCE(gtod->wall_time_sec);
+ time_t result = ACCESS_ONCE(gtod->wall_time_sec);
if (t)
*t = result;
event->destroy(event);
}
- if (READ_ONCE(x86_pmu.attr_rdpmc))
+ if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
return err;
unsigned ret;
repeat:
- ret = READ_ONCE(s->seq);
+ ret = ACCESS_ONCE(s->seq);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
page = cpu/ESPFIX_STACKS_PER_PAGE;
/* Did another CPU already set this up? */
- stack_page = READ_ONCE(espfix_pages[page]);
+ stack_page = ACCESS_ONCE(espfix_pages[page]);
if (likely(stack_page))
goto done;
mutex_lock(&espfix_init_mutex);
/* Did we race on the lock? */
- stack_page = READ_ONCE(espfix_pages[page]);
+ stack_page = ACCESS_ONCE(espfix_pages[page]);
if (stack_page)
goto unlock_done;
set_pte(&pte_p[n*PTE_STRIDE], pte);
/* Job is done for this CPU and any CPU which shares this page */
- WRITE_ONCE(espfix_pages[page], stack_page);
+ ACCESS_ONCE(espfix_pages[page]) = stack_page;
unlock_done:
mutex_unlock(&espfix_init_mutex);
{
struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
int remainder_ns, decimal_msecs;
- u64 whole_msecs = READ_ONCE(a->max_duration);
+ u64 whole_msecs = ACCESS_ONCE(a->max_duration);
remainder_ns = do_div(whole_msecs, (1000 * 1000));
decimal_msecs = remainder_ns / 1000;
static u64 __get_spte_lockless(u64 *sptep)
{
- return READ_ONCE(*sptep);
+ return ACCESS_ONCE(*sptep);
}
#else
union split_spte {
* If we don't have indirect shadow pages, it means no page is
* write-protected, so we can exit simply.
*/
- if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+ if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
remote_flush = local_flush = false;
return false;
index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
- return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
+ return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
}
void kvm_page_track_cleanup(struct kvm *kvm)
if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
topidx = p2m_top_index(pfn);
top_mfn_p = &p2m_top_mfn[topidx];
- mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
+ mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
static void lcd_put_byte(u8 *addr, u8 data)
{
#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
- WRITE_ONCE(*addr, data);
+ ACCESS_ONCE(*addr) = data;
#else
- WRITE_ONCE(*addr, data & 0xf0);
- WRITE_ONCE(*addr, (data << 4) & 0xf0);
+ ACCESS_ONCE(*addr) = data & 0xf0;
+ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
#endif
}
static int __init lcd_init(void)
{
- WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
mdelay(5);
- WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
udelay(200);
- WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
udelay(50);
#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
- WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
udelay(50);
lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
{
- u64 now, issue = READ_ONCE(rwb->sync_issue);
+ u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
if (!issue || !rwb->sync_cookie)
return 0;
* so be careful about accessing it. dev->bus and dev->class should
* never change once they are set, so they don't need special care.
*/
- drv = READ_ONCE(dev->driver);
+ drv = ACCESS_ONCE(dev->driver);
return drv ? drv->name :
(dev->bus ? dev->bus->name :
(dev->class ? dev->class->name : ""));
if (!dev->power.use_autosuspend)
goto out;
- autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
+ autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
goto out;
- last_busy = READ_ONCE(dev->power.last_busy);
+ last_busy = ACCESS_ONCE(dev->power.last_busy);
elapsed = jiffies - last_busy;
if (elapsed < 0)
goto out; /* jiffies has wrapped around. */
return;
retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
if (nfrac < 0) {
/* Debit */
entropy_count += nfrac;
/* Can we pull enough? */
retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
ibytes = nbytes;
/* never pull more than available */
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
if (readl_relaxed(timer->control) & timer->match_mask) {
writel_relaxed(timer->match_mask, timer->control);
- event_handler = READ_ONCE(timer->evt.event_handler);
+ event_handler = ACCESS_ONCE(timer->evt.event_handler);
if (event_handler)
event_handler(&timer->evt);
return IRQ_HANDLED;
while (rd_reg32(&jrp->rregs->outring_used)) {
- head = READ_ONCE(jrp->head);
+ head = ACCESS_ONCE(jrp->head);
spin_lock(&jrp->outlock);
spin_lock_bh(&jrp->inplock);
head = jrp->head;
- tail = READ_ONCE(jrp->tail);
+ tail = ACCESS_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
ktime_t start = wmem->start, now = ktime_get();
ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
- while (!(READ_ONCE(csb->flags) & CSB_V)) {
+ while (!(ACCESS_ONCE(csb->flags) & CSB_V)) {
cpu_relax();
now = ktime_get();
if (ktime_after(now, timeout))
__le16 res_count, next_res_count;
i = ar_first_buffer_index(ctx);
- res_count = READ_ONCE(ctx->descriptors[i].res_count);
+ res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
/* A buffer that is not yet completely filled must be the last one. */
while (i != last && res_count == 0) {
/* Peek at the next descriptor. */
next_i = ar_next_buffer_index(i);
rmb(); /* read descriptors in order */
- next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
+ next_res_count = ACCESS_ONCE(
+ ctx->descriptors[next_i].res_count);
/*
* If the next descriptor is still empty, we must stop at this
* descriptor.
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
next_i = ar_next_buffer_index(next_i);
rmb();
- next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
+ next_res_count = ACCESS_ONCE(
+ ctx->descriptors[next_i].res_count);
if (next_res_count != cpu_to_le16(PAGE_SIZE))
goto next_buffer_is_active;
}
u32 buffer_dma;
req_count = le16_to_cpu(last->req_count);
- res_count = le16_to_cpu(READ_ONCE(last->res_count));
+ res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
completed = req_count - res_count;
buffer_dma = le32_to_cpu(last->data_address);
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
+ uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
struct dma_fence *fence, **ptr;
int r;
amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
- emitted += READ_ONCE(ring->fence_drv.sync_seq);
+ emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
return lower_32_bits(emitted);
}
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- offset = READ_ONCE(bo->tbo.mem.start);
+ offset = ACCESS_ONCE(bo->tbo.mem.start);
if (offset != AMDGPU_BO_INVALID_OFFSET)
seq_printf(m, " @ 0x%010Lx", offset);
- pin_count = READ_ONCE(bo->pin_count);
+ pin_count = ACCESS_ONCE(bo->pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
seq_printf(m, "\n");
if (kfifo_is_empty(&entity->job_queue))
return false;
- if (READ_ONCE(entity->dependency))
+ if (ACCESS_ONCE(entity->dependency))
return false;
return true;
else
r = 0;
- cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put_unlocked(gobj);
return r;
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
return -EACCES;
if (sc->flags & SCF_FROZEN) {
wait_event_interruptible_timeout(
dd->event_queue,
- !(READ_ONCE(dd->flags) & HFI1_FROZEN),
+ !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
if (dd->flags & HFI1_FROZEN)
return -ENOLCK;
goto done;
}
/* copy from receiver cache line and recalculate */
- sc->alloc_free = READ_ONCE(sc->free);
+ sc->alloc_free = ACCESS_ONCE(sc->free);
avail =
(unsigned long)sc->credits -
(sc->fill - sc->alloc_free);
if (blocks > avail) {
/* still no room, actively update */
sc_release_update(sc);
- sc->alloc_free = READ_ONCE(sc->free);
+ sc->alloc_free = ACCESS_ONCE(sc->free);
trycount++;
goto retry;
}
/* call sent buffer callbacks */
code = -1; /* code not yet set */
- head = READ_ONCE(sc->sr_head); /* snapshot the head */
+ head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
tail = sc->sr_tail;
while (head != tail) {
pbuf = &sc->sr[tail].pbuf;
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == READ_ONCE(sqp->s_head))
+ if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
swhead = sde->descq_head & sde->sdma_mask;
/* this code is really bad for cache line trading */
- swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
cnt = sde->descq_cnt;
if (swhead < swtail)
if ((status & sde->idle_mask) && !idle_check_done) {
u16 swtail;
- swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
if (swtail != hwhead) {
hwhead = (u16)read_sde_csr(sde, SD(HEAD));
idle_check_done = 1;
u16 len;
head = sde->descq_head & sde->sdma_mask;
- tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+ tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
seq_printf(s, SDE_FMT, sde->this_idx,
sde->cpu,
sdma_state_name(sde->state.current_state),
return -EINVAL;
}
while (1) {
- nr = ffz(READ_ONCE(sde->ahg_bits));
+ nr = ffz(ACCESS_ONCE(sde->ahg_bits));
if (nr > 31) {
trace_hfi1_ahg_allocate(sde, -ENOSPC);
return -ENOSPC;
{
return sde->descq_cnt -
(sde->descq_tail -
- READ_ONCE(sde->descq_head)) - 1;
+ ACCESS_ONCE(sde->descq_head)) - 1;
}
static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == READ_ONCE(qp->s_head))
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == READ_ONCE(qp->s_head)) {
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
}
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == READ_ONCE(qp->s_head))
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == READ_ONCE(qp->s_head))
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
/* Wait until all requests have been freed. */
wait_event_interruptible(
pq->wait,
- (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
if (ret != -EBUSY) {
req->status = ret;
WRITE_ONCE(req->has_error, 1);
- if (READ_ONCE(req->seqcomp) ==
+ if (ACCESS_ONCE(req->seqcomp) ==
req->seqsubmitted - 1)
goto free_req;
return ret;
*/
if (req->data_len) {
iovec = &req->iovs[req->iov_idx];
- if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
+ if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
goto free_txreq;
} else {
if (status != SDMA_TXREQ_S_OK)
req->status = status;
- if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
+ if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
(READ_ONCE(req->done) ||
READ_ONCE(req->has_error))) {
user_sdma_free_request(req, false);
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == READ_ONCE(sqp->s_head))
+ if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == READ_ONCE(qp->s_head))
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == READ_ONCE(qp->s_head))
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
/*
* Start a new request.
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == READ_ONCE(qp->s_head))
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == READ_ONCE(qp->s_head))
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
rdi->driver_f.notify_error_qp(qp);
/* Schedule the sending tasklet to drain the send work queue. */
- if (READ_ONCE(qp->s_last) != qp->s_head)
+ if (ACCESS_ONCE(qp->s_last) != qp->s_head)
rdi->driver_f.schedule_send(qp);
rvt_clear_mr_refs(qp, 0);
if (likely(qp->s_avail))
return 0;
smp_read_barrier_depends(); /* see rc.c */
- slast = READ_ONCE(qp->s_last);
+ slast = ACCESS_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
else
* ahead and kick the send engine into gear. Otherwise we will always
* just schedule the send to happen later.
*/
- call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
+ call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
for (; wr; wr = wr->next) {
err = rvt_post_one_wr(qp, wr, &call_send);
haptic->suspended = false;
- magnitude = READ_ONCE(haptic->magnitude);
+ magnitude = ACCESS_ONCE(haptic->magnitude);
if (magnitude)
regulator_haptic_set_voltage(haptic, magnitude);
BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
BUG_ON(dm_bufio_client_count < 0);
- dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
+ dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
/*
* Use default if set to 0 and report the actual cache size used.
{
unsigned long buffers;
- if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+ if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
if (mutex_trylock(&dm_bufio_clients_lock)) {
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
- unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
+ unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
+ return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
static unsigned get_max_age_hz(void)
{
- unsigned max_age = READ_ONCE(dm_bufio_max_age);
+ unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
try_again:
spin_lock_irq(&throttle_spinlock);
- throttle = READ_ONCE(t->throttle);
+ throttle = ACCESS_ONCE(t->throttle);
if (likely(throttle >= 100))
goto skip_limit;
t->num_io_jobs--;
- if (likely(READ_ONCE(t->throttle) >= 100))
+ if (likely(ACCESS_ONCE(t->throttle) >= 100))
goto skip_limit;
if (!t->num_io_jobs) {
synchronize_rcu_expedited();
dm_stat_free(&s->rcu_head);
} else {
- WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
+ ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
call_rcu(&s->rcu_head, dm_stat_free);
}
return 0;
*/
last = raw_cpu_ptr(stats->last);
stats_aux->merged =
- (bi_sector == (READ_ONCE(last->last_sector) &&
+ (bi_sector == (ACCESS_ONCE(last->last_sector) &&
((bi_rw == WRITE) ==
- (READ_ONCE(last->last_rw) == WRITE))
+ (ACCESS_ONCE(last->last_rw) == WRITE))
));
- WRITE_ONCE(last->last_sector, end_sector);
- WRITE_ONCE(last->last_rw, bi_rw);
+ ACCESS_ONCE(last->last_sector) = end_sector;
+ ACCESS_ONCE(last->last_rw) = bi_rw;
}
rcu_read_lock();
for_each_possible_cpu(cpu) {
p = &s->stat_percpu[cpu][x];
- shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
- shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
- shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
- shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
- shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
- shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
- shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
- shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
- shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
- shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
- shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
- shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
+ shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
unsigned i;
for (i = 0; i < s->n_histogram_entries + 1; i++)
- shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
+ shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
}
}
}
switch_get_position(sctx, region_nr, ®ion_index, &bit);
- return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
+ return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) &
((1 << sctx->region_table_entry_bits) - 1);
}
struct pool_c *pt = pool->ti->private;
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
enum pool_mode old_mode = get_pool_mode(pool);
- unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
+ unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
/*
* Never allow the pool to transition to PM_WRITE mode if user
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
- unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
+ unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
- int param = READ_ONCE(*module_param);
+ int param = ACCESS_ONCE(*module_param);
int modified_param = 0;
bool modified = true;
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
- unsigned param = READ_ONCE(*module_param);
+ unsigned param = ACCESS_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
{
char *sep = ",";
size_t len = 0;
- unsigned long flags = READ_ONCE(rdev->flags);
+ unsigned long flags = ACCESS_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
(!test_bit(ExternalBbl, &flags) &&
*/
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
+ struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
* the read barrier in scif_rb_count(..)
*/
wmb();
- WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
+ ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si bug: For the case where a Core is performing an EXT_WR
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
+ ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
#endif
}
* scif_rb_space(..)
*/
mb();
- WRITE_ONCE(*rb->read_ptr, new_offset);
+ ACCESS_ONCE(*rb->read_ptr) = new_offset;
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si Bug: For the case where a Core is performing an EXT_WR
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- WRITE_ONCE(*rb->read_ptr, new_offset);
+ ACCESS_ONCE(*rb->read_ptr) = new_offset;
#endif
}
* Need to restart list traversal if there has been
* an asynchronous list entry deletion.
*/
- if (READ_ONCE(ep->rma_info.async_list_del))
+ if (ACCESS_ONCE(ep->rma_info.async_list_del))
goto retry;
}
mutex_unlock(&ep->rma_info.rma_lock);
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? READ_ONCE(slaves->count) : 0;
+ count = slaves ? ACCESS_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
count];
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- recv_probe = READ_ONCE(bond->recv_probe);
+ recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
if (ret == RX_HANDLER_CONSUMED) {
else
bond_xmit_slave_id(bond, skb, 0);
} else {
- int slave_cnt = READ_ONCE(bond->slave_cnt);
+ int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? READ_ONCE(slaves->count) : 0;
+ count = slaves ? ACCESS_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
*/
static inline int reclaimable(const struct sge_txq *q)
{
- int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
hw_cidx -= q->cidx;
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
}
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
- int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
int reclaim = hw_cidx - q->cidx;
if (reclaim < 0)
if (wrapped)
newacc += 65536;
- WRITE_ONCE(*acc, newacc);
+ ACCESS_ONCE(*acc) = newacc;
}
static void populate_erx_stats(struct be_adapter *adapter,
unsigned int count;
smp_rmb();
- count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
+ count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
if (count == 0)
goto out;
dma_addr_t phys;
smp_rmb();
- count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
+ count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
if (count == (TX_DESC_NUM - 1)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
+ struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
if (!rx_ring)
continue;
ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
+ struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
}
rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
- tx_ring = READ_ONCE(vsi->tx_rings[j]);
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
if (!tx_ring)
continue;
u64 bytes, packets;
unsigned int start;
- tx_ring = READ_ONCE(vsi->tx_rings[i]);
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
i40e_get_netdev_stats_struct_tx(tx_ring, stats);
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
- p = READ_ONCE(vsi->tx_rings[q]);
+ p = ACCESS_ONCE(vsi->tx_rings[q]);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
}
smp_mb(); /* Force any pending update before accessing. */
- adj = READ_ONCE(pf->ptp_base_adj);
+ adj = ACCESS_ONCE(pf->ptp_base_adj);
freq = adj;
freq *= ppb;
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
/* Update the base adjustement value. */
- WRITE_ONCE(pf->ptp_base_adj, incval);
+ ACCESS_ONCE(pf->ptp_base_adj) = incval;
smp_mb(); /* Force the above update. */
}
/* write operations, indexed using DWORDS */
#define wr32(reg, val) \
do { \
- u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
+ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
if (!E1000_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
u32 igb_rd32(struct e1000_hw *hw, u32 reg)
{
struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
- u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
+ u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
u32 value = 0;
if (E1000_REMOVED(hw_addr))
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
{
- u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
*/
u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
u32 value;
if (ixgbe_removed(reg_addr))
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
u64 bytes, packets;
unsigned int start;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
+ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
}
smp_mb();
- incval = READ_ONCE(adapter->base_incval);
+ incval = ACCESS_ONCE(adapter->base_incval);
freq = incval;
freq *= ppb;
}
/* update the base incval used to calculate frequency adjustment */
- WRITE_ONCE(adapter->base_incval, incval);
+ ACCESS_ONCE(adapter->base_incval) = incval;
smp_mb();
/* need lock to prevent incorrect read while modifying cyclecounter */
u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
u32 value;
if (IXGBE_REMOVED(reg_addr))
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
if (IXGBE_REMOVED(reg_addr))
return;
index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
- last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
- ring_cons = READ_ONCE(ring->cons);
+ last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
+ ring_cons = ACCESS_ONCE(ring->cons);
ring_index = ring_cons & size_mask;
stamp_index = ring_index;
wmb();
/* we want to dirty this cache line once */
- WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
- WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
+ ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
+ ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
if (cq->type == TX_XDP)
return done < budget;
goto tx_drop;
/* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = READ_ONCE(ring->cons);
+ ring_cons = ACCESS_ONCE(ring->cons);
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
*/
smp_rmb();
- ring_cons = READ_ONCE(ring->cons);
+ ring_cons = ACCESS_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
ring = &vdev->vpaths[i].ring;
/* Truncated to machine word size number of frames */
- rx_frms = READ_ONCE(ring->stats.rx_frms);
+ rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
/* Did this vpath received any packets */
if (ring->stats.prev_rx_frms == rx_frms) {
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
- if (likely(READ_ONCE(efx->irq_soft_enabled))) {
+ if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
/* Note test interrupts */
if (context->index == efx->irq_level)
efx->last_irq_cpu = raw_smp_processor_id();
static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
bool rx_cont;
u16 flags = 0;
- if (unlikely(READ_ONCE(efx->reset_pending)))
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return 0;
/* Basic packet information */
unsigned int tx_ev_q_label;
int tx_descs = 0;
- if (unlikely(READ_ONCE(efx->reset_pending)))
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return 0;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
int i;
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- if (READ_ONCE(table->entry[i].spec) &
+ if (ACCESS_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
rc = efx_ef10_filter_remove_internal(efx,
1U << EFX_FILTER_PRI_AUTO, i, true);
unsigned long pending;
enum reset_type method;
- pending = READ_ONCE(efx->reset_pending);
+ pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if (method == RESET_TYPE_MC_BIST)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (READ_ONCE(efx->state) != STATE_READY)
+ if (ACCESS_ONCE(efx->state) != STATE_READY)
return;
/* efx_process_channel() will no longer read events once a
unsigned long pending;
enum reset_type method;
- pending = READ_ONCE(efx->reset_pending);
+ pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (READ_ONCE(efx->state) != STATE_READY)
+ if (ACCESS_ONCE(efx->state) != STATE_READY)
return;
queue_work(reset_workqueue, &efx->reset_work);
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(READ_ONCE(efx->irq_soft_enabled)))
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Check to see if we have a serious error condition */
ef4_oword_t reg;
int link_speed, isolate;
- isolate = !!READ_ONCE(efx->reset_pending);
+ isolate = !!ACCESS_ONCE(efx->reset_pending);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
struct ef4_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(READ_ONCE(efx->reset_pending)))
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return 0;
if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
struct ef4_rx_queue *rx_queue;
struct ef4_nic *efx = channel->efx;
- if (unlikely(READ_ONCE(efx->reset_pending)))
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
{
struct ef4_nic *efx = dev_id;
- bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
ef4_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct ef4_channel *channel;
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(READ_ONCE(efx->irq_soft_enabled)))
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
{
- return READ_ONCE(channel->event_test_cpu);
+ return ACCESS_ONCE(channel->event_test_cpu);
}
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
{
- return READ_ONCE(efx->last_irq_cpu);
+ return ACCESS_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = READ_ONCE(txq1->read_count);
- txq2->old_read_count = READ_ONCE(txq2->read_count);
+ txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+ txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(READ_ONCE(efx->reset_pending)))
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
- if (unlikely(READ_ONCE(efx->reset_pending)))
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- if (!likely(READ_ONCE(efx->irq_soft_enabled)))
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
- return READ_ONCE(channel->event_test_cpu);
+ return ACCESS_ONCE(channel->event_test_cpu);
}
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
{
- return READ_ONCE(efx->last_irq_cpu);
+ return ACCESS_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
/* Write host time for specified period or until MC is done */
while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
- READ_ONCE(*mc_running)) {
+ ACCESS_ONCE(*mc_running)) {
struct timespec64 update_time;
unsigned int host_time;
do {
pps_get_ts(&now);
} while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
- READ_ONCE(*mc_running));
+ ACCESS_ONCE(*mc_running));
/* Synchronise NIC with single word of time only */
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
ptp->start.dma_addr);
/* Clear flag that signals MC ready */
- WRITE_ONCE(*start, 0);
+ ACCESS_ONCE(*start) = 0;
rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
EFX_WARN_ON_ONCE_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
- while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
+ while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
udelay(20); /* Usually start MCDI execution quickly */
loops++;
}
if (!time_before(jiffies, timeout))
++ptp->sync_timeouts;
- if (READ_ONCE(*start))
+ if (ACCESS_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
/* Collect results */
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = READ_ONCE(txq1->read_count);
- txq2->old_read_count = READ_ONCE(txq2->read_count);
+ txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+ txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
pkts = dropped = errors = bytes = 0;
- rx_rings = READ_ONCE(np->rx_rings);
+ rx_rings = ACCESS_ONCE(np->rx_rings);
if (!rx_rings)
goto no_rings;
pkts = errors = bytes = 0;
- tx_rings = READ_ONCE(np->tx_rings);
+ tx_rings = ACCESS_ONCE(np->tx_rings);
if (!tx_rings)
goto no_rings;
* and validate that the result isn't NULL - in case we are
* racing against queue removal.
*/
- int numvtaps = READ_ONCE(tap->numvtaps);
+ int numvtaps = ACCESS_ONCE(tap->numvtaps);
__u32 rxq;
if (!numvtaps)
u32 numqueues = 0;
rcu_read_lock();
- numqueues = READ_ONCE(tun->numqueues);
+ numqueues = ACCESS_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
if (txq) {
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
- numqueues = READ_ONCE(tun->numqueues);
+ numqueues = ACCESS_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
if (txq >= numqueues)
tx_status = &desc->ud.ds_tx5212.tx_stat;
- txstat1 = READ_ONCE(tx_status->tx_status_1);
+ txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
/* No frame has been send or error */
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
return -EINPROGRESS;
- txstat0 = READ_ONCE(tx_status->tx_status_0);
+ txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
/*
* Get descriptor status
u32 rxstat0, rxstat1;
rx_status = &desc->ud.ds_rx.rx_stat;
- rxstat1 = READ_ONCE(rx_status->rx_status_1);
+ rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
/* No frame received / not ready */
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
- rxstat0 = READ_ONCE(rx_status->rx_status_0);
+ rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
/*
* Frame receive status
bus->dpc_running = true;
wmb();
- while (READ_ONCE(bus->dpc_triggered)) {
+ while (ACCESS_ONCE(bus->dpc_triggered)) {
bus->dpc_triggered = false;
brcmf_sdio_dpc(bus);
bus->idlecount = 0;
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- bool calibrating = READ_ONCE(mvm->calibrating);
+ bool calibrating = ACCESS_ONCE(mvm->calibrating);
if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
return -1;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
- u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
+ u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
- dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
+ dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans_pcie->txq[txq_idx];
- wr_ptr = READ_ONCE(txq->write_ptr);
+ wr_ptr = ACCESS_ONCE(txq->write_ptr);
- while (txq->read_ptr != READ_ONCE(txq->write_ptr) &&
+ while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = READ_ONCE(txq->write_ptr);
+ u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
spin_lock(&rxq->lock);
- r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
+ num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
& 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
mac80211_hwsim_monitor_rx(hw, skb, channel);
/* wmediumd mode check */
- _portid = READ_ONCE(data->wmediumd);
+ _portid = ACCESS_ONCE(data->wmediumd);
if (_portid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv;
- u32 _pid = READ_ONCE(data->wmediumd);
+ u32 _pid = ACCESS_ONCE(data->wmediumd);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
if (logout_started) {
bool traced = false;
- while (!READ_ONCE(sess->logout_completed)) {
+ while (!ACCESS_ONCE(sess->logout_completed)) {
if (!traced) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
"%s: waiting for sess %p logout\n",
mb = udev->mb_addr;
tcmu_flush_dcache_range(mb, sizeof(*mb));
- while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
+ while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
struct tcmu_cmd *cmd;
if (rv < 0)
return -ERESTARTSYS;
- cntr = READ_ONCE(desc->length);
+ cntr = ACCESS_ONCE(desc->length);
if (cntr == 0) {
desc->read = 0;
retry:
{
u64 lim;
- lim = READ_ONCE(usbfs_memory_mb);
+ lim = ACCESS_ONCE(usbfs_memory_mb);
lim <<= 20;
atomic64_add(amount, &usbfs_memory_usage);
char *string;
intf = to_usb_interface(dev);
- string = READ_ONCE(intf->cur_altsetting->string);
+ string = ACCESS_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
return sprintf(buf, "%s\n", string);
intf = to_usb_interface(dev);
udev = interface_to_usbdev(intf);
- alt = READ_ONCE(intf->cur_altsetting);
+ alt = ACCESS_ONCE(intf->cur_altsetting);
return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
if (!req->last_desc)
return 0;
- if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+ if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
return 0; /* Not put in hardware buffers yet */
if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
if (!req->curr_desc)
return 0;
- ctrl = READ_ONCE(req->curr_desc->ctrl);
+ ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
if (ctrl & GR_DESC_OUT_CTRL_EN)
return 0; /* Not received yet */
}
/* find the last TD processed by the controller. */
- head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK;
+ head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK;
td_start = td;
td_next = list_prepare_entry(td, &ed->td_list, td_list);
list_for_each_entry_continue(td_next, &ed->td_list, td_list) {
* We need a special accessor for the element pointer because it is
* subject to asynchronous updates by the controller.
*/
-#define qh_element(qh) READ_ONCE((qh)->element)
+#define qh_element(qh) ACCESS_ONCE((qh)->element)
#define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \
cpu_to_hc32((uhci), (qh)->dma_handle))
* subject to asynchronous updates by the controller.
*/
#define td_status(uhci, td) hc32_to_cpu((uhci), \
- READ_ONCE((td)->status))
+ ACCESS_ONCE((td)->status))
#define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle))
{
struct vfio_group *group = data;
struct vfio_device *device;
- struct device_driver *drv = READ_ONCE(dev->driver);
+ struct device_driver *drv = ACCESS_ONCE(dev->driver);
struct vfio_unbound_dev *unbound;
int ret = -EINVAL;
continue;
}
- tpg = READ_ONCE(vs_tpg[*target]);
+ tpg = ACCESS_ONCE(vs_tpg[*target]);
if (unlikely(!tpg)) {
/* Target does not exist, fail the request */
vhost_scsi_send_bad_target(vs, vq, head, out);
* actually has a cancel function, hence the cmpxchg()
*/
- cancel = READ_ONCE(kiocb->ki_cancel);
+ cancel = ACCESS_ONCE(kiocb->ki_cancel);
do {
if (!cancel || cancel == KIOCB_CANCELLED)
return -EINVAL;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
- b_state);
+ create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
return page_buffers(page);
}
get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
- unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
+ unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
unsigned blocksize_mask = (1 << blkbits) - 1;
ssize_t retval = -EINVAL;
return;
do {
- old = READ_ONCE(mm->flags);
+ old = ACCESS_ONCE(mm->flags);
new = (old & ~MMF_DUMPABLE_MASK) | value;
} while (cmpxchg(&mm->flags, old, new) != old);
}
* F_SETSIG can change ->signum lockless in parallel, make
* sure we read it once and use the same value throughout.
*/
- int signum = READ_ONCE(fown->signum);
+ int signum = ACCESS_ONCE(fown->signum);
if (!sigio_perm(p, fown, signum))
return;
while (1) {
struct hlist_node *p;
rcu_read_lock();
- p = READ_ONCE(m->mnt_pins.first);
+ p = ACCESS_ONCE(m->mnt_pins.first);
if (!p) {
rcu_read_unlock();
break;
while (1) {
struct hlist_node *q;
rcu_read_lock();
- q = READ_ONCE(p->first);
+ q = ACCESS_ONCE(p->first);
if (!q) {
rcu_read_unlock();
break;
* Lockless access is OK, because file->private data is set
* once during mount and is valid until the file is released.
*/
- return READ_ONCE(file->private_data);
+ return ACCESS_ONCE(file->private_data);
}
static void fuse_request_init(struct fuse_req *req, struct page **pages,
WARN_ON_ONCE(flags & ~mask);
do {
- old_flags = READ_ONCE(inode->i_flags);
+ old_flags = ACCESS_ONCE(inode->i_flags);
new_flags = (old_flags & ~mask) | flags;
} while (unlikely(cmpxchg(&inode->i_flags, old_flags,
new_flags) != old_flags));
/* Given that we're not holding a lock here, we retain the value in a
* local variable for each dentry as we look at it so that we don't see
* the components of that value change under us */
- while (managed = READ_ONCE(path->dentry->d_flags),
+ while (managed = ACCESS_ONCE(path->dentry->d_flags),
managed &= DCACHE_MANAGED_DENTRY,
unlikely(managed != 0)) {
/* Allow the filesystem to manage the transit without i_mutex
unsigned managed;
int ret;
- while (managed = READ_ONCE(path->dentry->d_flags),
+ while (managed = ACCESS_ONCE(path->dentry->d_flags),
unlikely(managed & DCACHE_MANAGED_DENTRY)) {
/* Allow the filesystem to manage the transit without i_mutex
* being held.
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
cpu_relax();
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
int error;
if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
+ parent = ACCESS_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
out_valid:
if (flags & LOOKUP_RCU) {
- if (parent != READ_ONCE(dentry->d_parent))
+ if (parent != ACCESS_ONCE(dentry->d_parent))
return -ECHILD;
} else
dput(parent);
struct inode *dir;
if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
+ parent = ACCESS_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
if (!dir)
return -ECHILD;
ret = -ECHILD;
if (!(flags & LOOKUP_RCU))
dput(parent);
- else if (parent != READ_ONCE(dentry->d_parent))
+ else if (parent != ACCESS_ONCE(dentry->d_parent))
return -ECHILD;
goto out;
}
cutime = sig->cutime;
cstime = sig->cstime;
cgtime = sig->cgtime;
- rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
+ rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
/* add up live thread stats at the group level */
if (whole) {
poll_wait(file, &p->ns->poll, wait);
- event = READ_ONCE(ns->event);
+ event = ACCESS_ONCE(ns->event);
if (m->poll_event != event) {
m->poll_event = event;
res |= POLLERR | POLLPRI;
*/
int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
- unsigned int buffers = READ_ONCE(pipe->buffers);
+ unsigned int buffers = ACCESS_ONCE(pipe->buffers);
spd->nr_pages_max = buffers;
if (buffers <= PIPE_DEF_BUFFERS)
* in __get_user_pages if userfaultfd_release waits on the
* caller of handle_userfault to release the mmap_sem.
*/
- if (unlikely(READ_ONCE(ctx->released))) {
+ if (unlikely(ACCESS_ONCE(ctx->released))) {
/*
* Don't return VM_FAULT_SIGBUS in this case, so a non
* cooperative manager can close the uffd after the
vmf->flags, reason);
up_read(&mm->mmap_sem);
- if (likely(must_wait && !READ_ONCE(ctx->released) &&
+ if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
(return_to_userland ? !signal_pending(current) :
!fatal_signal_pending(current)))) {
wake_up_poll(&ctx->fd_wqh, POLLIN);
set_current_state(TASK_KILLABLE);
if (ewq->msg.event == 0)
break;
- if (READ_ONCE(ctx->released) ||
+ if (ACCESS_ONCE(ctx->released) ||
fatal_signal_pending(current)) {
/*
* &ewq->wq may be queued in fork_event, but
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
- WRITE_ONCE(ctx->released, true);
+ ACCESS_ONCE(ctx->released) = true;
if (!mmget_not_zero(mm))
goto wakeup;
* a transiently forward state. Instead, we can see the LSN in a
* transiently behind state if we happen to race with a cycle wrap.
*/
- cur_cycle = READ_ONCE(log->l_curr_cycle);
+ cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
smp_rmb();
- cur_block = READ_ONCE(log->l_curr_block);
+ cur_block = ACCESS_ONCE(log->l_curr_block);
if ((CYCLE_LSN(lsn) > cur_cycle) ||
(CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
typeof(*ptr) old, new; \
\
do { \
- old = READ_ONCE(*ptr); \
+ old = ACCESS_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
\
typeof(*ptr) old, new; \
\
do { \
- old = READ_ONCE(*ptr); \
+ old = ACCESS_ONCE(*ptr); \
new = old & ~clear; \
} while (!(old & test) && \
cmpxchg(ptr, old, new) != old); \
/* Returns how many objects can be queued, < 0 indicates over limit. */
static inline int dql_avail(const struct dql *dql)
{
- return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued);
+ return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
}
/* Record number of completed objects and recalculate the limit. */
static inline bool is_huge_zero_page(struct page *page)
{
- return READ_ONCE(huge_zero_page) == page;
+ return ACCESS_ONCE(huge_zero_page) == page;
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
static inline int team_num_to_port_index(struct team *team, unsigned int num)
{
- int en_port_count = READ_ONCE(team->en_port_count);
+ int en_port_count = ACCESS_ONCE(team->en_port_count);
if (unlikely(!en_port_count))
return 0;
*/
static inline bool llist_empty(const struct llist_head *head)
{
- return READ_ONCE(head->first) == NULL;
+ return ACCESS_ONCE(head->first) == NULL;
}
static inline struct llist_node *llist_next(struct llist_node *node)
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
- WRITE_ONCE(dev->power.last_busy, jiffies);
+ ACCESS_ONCE(dev->power.last_busy) = jiffies;
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
{
- return READ_ONCE(ipvs->sysctl_sync_threshold[1]);
+ return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
}
static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
{
- return READ_ONCE(ipvs->sysctl_sync_refresh_period);
+ return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
}
static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
{
- return READ_ONCE(ipvs->sysctl_sync_ports);
+ return ACCESS_ONCE(ipvs->sysctl_sync_ports);
}
static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
again:
smp_rmb();
rcu_read_lock();
- res = to_acct(READ_ONCE(ns->bacct));
+ res = to_acct(ACCESS_ONCE(ns->bacct));
if (!res) {
rcu_read_unlock();
return NULL;
}
rcu_read_unlock();
mutex_lock(&res->lock);
- if (res != to_acct(READ_ONCE(ns->bacct))) {
+ if (res != to_acct(ACCESS_ONCE(ns->bacct))) {
mutex_unlock(&res->lock);
acct_put(res);
goto again;
again:
rcu_read_lock();
- ctx = READ_ONCE(event->ctx);
+ ctx = ACCESS_ONCE(event->ctx);
if (!atomic_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
if (!rb)
goto aux_unlock;
- aux_offset = READ_ONCE(rb->user_page->aux_offset);
- aux_size = READ_ONCE(rb->user_page->aux_size);
+ aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
+ aux_size = ACCESS_ONCE(rb->user_page->aux_size);
if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
goto aux_unlock;
* (B) <-> (C) ordering is still observed by the pmu driver.
*/
if (!rb->aux_overwrite) {
- aux_tail = READ_ONCE(rb->user_page->aux_tail);
+ aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
if (aux_head - aux_tail < perf_aux_size(rb))
handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
* Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
* can't confuse the checks below.
*/
- int exit_state = READ_ONCE(p->exit_state);
+ int exit_state = ACCESS_ONCE(p->exit_state);
int ret;
if (unlikely(exit_state == EXIT_DEAD))
* if it happened, we have to fail the write.
*/
barrier();
- if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
+ if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
local_dec(&cpu_buffer->committing);
local_dec(&cpu_buffer->commits);
return NULL;
static inline void *event_file_data(struct file *filp)
{
- return READ_ONCE(file_inode(filp)->i_private);
+ return ACCESS_ONCE(file_inode(filp)->i_private);
}
extern struct mutex event_mutex;
{
unsigned long this_size, flags; unsigned long *p, *top, *start;
static int tracer_frame;
- int frame_size = READ_ONCE(tracer_frame);
+ int frame_size = ACCESS_ONCE(tracer_frame);
int i, x;
this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
int proc_setgroups_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
- unsigned long userns_flags = READ_ONCE(ns->flags);
+ unsigned long userns_flags = ACCESS_ONCE(ns->flags);
seq_printf(seq, "%s\n",
(userns_flags & USERNS_SETGROUPS_ALLOWED) ?
/* Descend through a shortcut */
shortcut = assoc_array_ptr_to_shortcut(cursor);
smp_read_barrier_depends();
- cursor = READ_ONCE(shortcut->next_node);
+ cursor = ACCESS_ONCE(shortcut->next_node);
}
node = assoc_array_ptr_to_node(cursor);
*/
has_meta = 0;
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = ACCESS_ONCE(node->slots[slot]);
has_meta |= (unsigned long)ptr;
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
smp_read_barrier_depends();
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = ACCESS_ONCE(node->slots[slot]);
if (assoc_array_ptr_is_meta(ptr)) {
cursor = ptr;
goto begin_node;
finished_node:
/* Move up to the parent (may need to skip back over a shortcut) */
- parent = READ_ONCE(node->back_pointer);
+ parent = ACCESS_ONCE(node->back_pointer);
slot = node->parent_slot;
if (parent == stop)
return 0;
shortcut = assoc_array_ptr_to_shortcut(parent);
smp_read_barrier_depends();
cursor = parent;
- parent = READ_ONCE(shortcut->back_pointer);
+ parent = ACCESS_ONCE(shortcut->back_pointer);
slot = shortcut->parent_slot;
if (parent == stop)
return 0;
void *iterator_data),
void *iterator_data)
{
- struct assoc_array_ptr *root = READ_ONCE(array->root);
+ struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
if (!root)
return 0;
pr_devel("-->%s()\n", __func__);
- cursor = READ_ONCE(array->root);
+ cursor = ACCESS_ONCE(array->root);
if (!cursor)
return assoc_array_walk_tree_empty;
slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
slot &= ASSOC_ARRAY_FAN_MASK;
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = ACCESS_ONCE(node->slots[slot]);
pr_devel("consider slot %x [ix=%d type=%lu]\n",
slot, level, (unsigned long)ptr & 3);
} while (sc_level < shortcut->skip_to_level);
/* The shortcut matches the leaf's index to this point. */
- cursor = READ_ONCE(shortcut->next_node);
+ cursor = ACCESS_ONCE(shortcut->next_node);
if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
level = sc_level;
goto jumped;
* the terminal node.
*/
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = ACCESS_ONCE(node->slots[slot]);
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
* and dereferencing the pointer - but only if we are
unsigned int ovlimit, completed, num_queued;
bool all_prev_completed;
- num_queued = READ_ONCE(dql->num_queued);
+ num_queued = ACCESS_ONCE(dql->num_queued);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
struct llist_node *first;
do {
- new_last->next = first = READ_ONCE(head->first);
+ new_last->next = first = ACCESS_ONCE(head->first);
} while (cmpxchg(&head->first, first, new_first) != first);
return !first;
rcu_read_lock();
for (i = 0; i < depth; i++, d = p) {
- p = READ_ONCE(d->d_parent);
- array[i] = READ_ONCE(d->d_name.name);
+ p = ACCESS_ONCE(d->d_parent);
+ array[i] = ACCESS_ONCE(d->d_name.name);
if (p == d) {
if (i)
array[i] = "";
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
- return READ_ONCE(pgdata->split_queue_len);
+ return ACCESS_ONCE(pgdata->split_queue_len);
}
static unsigned long deferred_split_scan(struct shrinker *shrink,
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
- cpu = READ_ONCE(rflow->cpu);
+ cpu = ACCESS_ONCE(rflow->cpu);
if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
((int)(per_cpu(softnet_data, cpu).input_queue_head -
rflow->last_qtail) <
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
- unsigned int burst = READ_ONCE(pkt_dev->burst);
+ unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
struct net_device *odev = pkt_dev->odev;
struct netdev_queue *txq;
struct sk_buff *skb;
local_bh_disable();
- for (i = READ_ONCE(f->next_bucket); budget; --budget) {
+ for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
evicted += inet_evict_bucket(f, &f->hash[i]);
i = (i + 1) & (INETFRAGS_HASHSZ - 1);
if (evicted > INETFRAGS_EVICT_MAX)
{
u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
- u32 old = READ_ONCE(*p_tstamp);
+ u32 old = ACCESS_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
u32 new, delta = 0;
if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
goto send_now;
- win_divisor = READ_ONCE(sysctl_tcp_tso_win_divisor);
+ win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
if (win_divisor) {
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
*/
/* if we're overly short, let UDP handle it */
- encap_rcv = READ_ONCE(up->encap_rcv);
+ encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (encap_rcv) {
int ret;
unlock_sock_fast(sk, slow);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
- encap_destroy = READ_ONCE(up->encap_destroy);
+ encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
if (!t)
goto out;
- tproto = READ_ONCE(t->parms.proto);
+ tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0)
goto out;
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
if (t) {
- u8 tproto = READ_ONCE(t->parms.proto);
+ u8 tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != ipproto && tproto != 0)
goto drop;
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- tproto = READ_ONCE(t->parms.proto);
+ tproto = ACCESS_ONCE(t->parms.proto);
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
u8 tproto;
int err;
- tproto = READ_ONCE(t->parms.proto);
+ tproto = ACCESS_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
return -1;
*/
/* if we're overly short, let UDP handle it */
- encap_rcv = READ_ONCE(up->encap_rcv);
+ encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (encap_rcv) {
int ret;
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
- encap_destroy = READ_ONCE(up->encap_destroy);
+ encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
*/
rcv = rcu_dereference(sap->rcv_func);
dest = llc_pdu_type(skb);
- sap_handler = dest ? READ_ONCE(llc_type_handlers[dest - 1]) : NULL;
+ sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL;
if (unlikely(!sap_handler)) {
if (rcv)
rcv(skb, dev, pt, orig_dev);
kfree_skb(skb);
goto out;
handle_station:
- sta_handler = READ_ONCE(llc_station_handler);
+ sta_handler = ACCESS_ONCE(llc_station_handler);
if (!sta_handler)
goto drop;
sta_handler(skb);
static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
{
- u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate);
if (rate == STA_STATS_RATE_INVALID)
return -EINVAL;
static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
{
- return READ_ONCE(calipso_ops);
+ return ACCESS_ONCE(calipso_ops);
}
/**
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct sk_buff *msg;
void *hdr;
- u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid);
+ u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
if (!nlportid)
return false;
struct amdtp_am824 *p = s->protocol;
if (port < p->midi_ports)
- WRITE_ONCE(p->midi[port], midi);
+ ACCESS_ONCE(p->midi[port]) = midi;
}
EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger);
unsigned int data_blocks, unsigned int *syt)
{
struct amdtp_am824 *p = s->protocol;
- struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
unsigned int data_blocks, unsigned int *syt)
{
struct amdtp_am824 *p = s->protocol;
- struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
ptr = s->pcm_buffer_pointer + frames;
if (ptr >= pcm->runtime->buffer_size)
ptr -= pcm->runtime->buffer_size;
- WRITE_ONCE(s->pcm_buffer_pointer, ptr);
+ ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
s->pcm_period_pointer += frames;
if (s->pcm_period_pointer >= pcm->runtime->period_size) {
static void pcm_period_tasklet(unsigned long data)
{
struct amdtp_stream *s = (void *)data;
- struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
if (pcm)
snd_pcm_period_elapsed(pcm);
s->data_block_counter =
(s->data_block_counter + data_blocks) & 0xff;
- buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
+ buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
(s->data_block_quadlets << CIP_DBS_SHIFT) |
((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
s->data_block_counter);
if (queue_out_packet(s, payload_length) < 0)
return -EIO;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
if (queue_out_packet(s, payload_length) < 0)
return -EIO;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
if (queue_in_packet(s) < 0)
return -EIO;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
if (queue_in_packet(s) < 0)
return -EIO;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm && pcm_frames > 0)
update_pcm_pointers(s, pcm, pcm_frames);
if (!in_interrupt() && amdtp_stream_running(s))
fw_iso_context_flush_completions(s->context);
- return READ_ONCE(s->pcm_buffer_pointer);
+ return ACCESS_ONCE(s->pcm_buffer_pointer);
}
EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
void amdtp_stream_update(struct amdtp_stream *s)
{
/* Precomputing. */
- WRITE_ONCE(s->source_node_id_field,
- (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
+ ACCESS_ONCE(s->source_node_id_field) =
+ (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) &
+ CIP_SID_MASK;
}
EXPORT_SYMBOL(amdtp_stream_update);
{
struct snd_pcm_substream *pcm;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm)
snd_pcm_stop_xrun(pcm);
}
static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s,
struct snd_pcm_substream *pcm)
{
- WRITE_ONCE(s->pcm, pcm);
+ ACCESS_ONCE(s->pcm) = pcm;
}
static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
struct amdtp_dot *p = s->protocol;
if (port < MAX_MIDI_PORTS)
- WRITE_ONCE(p->midi[port], midi);
+ ACCESS_ONCE(p->midi[port]) = midi;
}
static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm) {
read_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks;
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm) {
write_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks;
unsigned int data_blocks,
unsigned int *syt)
{
- struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
unsigned int data_blocks,
unsigned int *syt)
{
- struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+ struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
unsigned int pcm_frames;
if (pcm) {
ff->running_status[substream->number] = 0;
ff->rx_midi_error[substream->number] = false;
- WRITE_ONCE(ff->rx_midi_substreams[substream->number], substream);
+ ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = substream;
return 0;
}
struct snd_ff *ff = substream->rmidi->private_data;
cancel_work_sync(&ff->rx_midi_work[substream->number]);
- WRITE_ONCE(ff->rx_midi_substreams[substream->number], NULL);
+ ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = NULL;
return 0;
}
spin_lock_irqsave(&ff->lock, flags);
if (up)
- WRITE_ONCE(ff->tx_midi_substreams[substream->number],
- substream);
+ ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) =
+ substream;
else
- WRITE_ONCE(ff->tx_midi_substreams[substream->number], NULL);
+ ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) = NULL;
spin_unlock_irqrestore(&ff->lock, flags);
}
int rcode)
{
struct snd_rawmidi_substream *substream =
- READ_ONCE(ff->rx_midi_substreams[port]);
+ ACCESS_ONCE(ff->rx_midi_substreams[port]);
if (rcode_is_permanent_error(rcode)) {
ff->rx_midi_error[port] = true;
static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
{
struct snd_rawmidi_substream *substream =
- READ_ONCE(ff->rx_midi_substreams[port]);
+ ACCESS_ONCE(ff->rx_midi_substreams[port]);
u8 *buf = (u8 *)ff->msg_buf[port];
int i, len;
*/
index = (quad >> 8) & 0xff;
if (index > 0) {
- substream = READ_ONCE(ff->tx_midi_substreams[0]);
+ substream = ACCESS_ONCE(ff->tx_midi_substreams[0]);
if (substream != NULL) {
byte = quad & 0xff;
snd_rawmidi_receive(substream, &byte, 1);
/* Message in second port. */
index = (quad >> 24) & 0xff;
if (index > 0) {
- substream = READ_ONCE(ff->tx_midi_substreams[1]);
+ substream = ACCESS_ONCE(ff->tx_midi_substreams[1]);
if (substream != NULL) {
byte = (quad >> 16) & 0xff;
snd_rawmidi_receive(substream, &byte, 1);
ptr += count;
if (ptr >= runtime->buffer_size)
ptr -= runtime->buffer_size;
- WRITE_ONCE(isight->buffer_pointer, ptr);
+ ACCESS_ONCE(isight->buffer_pointer) = ptr;
isight->period_counter += count;
if (isight->period_counter >= runtime->period_size) {
struct snd_pcm_runtime *runtime;
unsigned int count1;
- if (!READ_ONCE(isight->pcm_running))
+ if (!ACCESS_ONCE(isight->pcm_running))
return;
runtime = isight->pcm->runtime;
static void isight_pcm_abort(struct isight *isight)
{
- if (READ_ONCE(isight->pcm_active))
+ if (ACCESS_ONCE(isight->pcm_active))
snd_pcm_stop_xrun(isight->pcm);
}
u32 dropped;
unsigned int count1;
- if (!READ_ONCE(isight->pcm_running))
+ if (!ACCESS_ONCE(isight->pcm_running))
return;
runtime = isight->pcm->runtime;
if (err < 0)
return err;
- WRITE_ONCE(isight->pcm_active, true);
+ ACCESS_ONCE(isight->pcm_active) = true;
return 0;
}
{
struct isight *isight = substream->private_data;
- WRITE_ONCE(isight->pcm_active, false);
+ ACCESS_ONCE(isight->pcm_active) = false;
mutex_lock(&isight->mutex);
isight_stop_streaming(isight);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- WRITE_ONCE(isight->pcm_running, true);
+ ACCESS_ONCE(isight->pcm_running) = true;
break;
case SNDRV_PCM_TRIGGER_STOP:
- WRITE_ONCE(isight->pcm_running, false);
+ ACCESS_ONCE(isight->pcm_running) = false;
break;
default:
return -EINVAL;
{
struct isight *isight = substream->private_data;
- return READ_ONCE(isight->buffer_pointer);
+ return ACCESS_ONCE(isight->buffer_pointer);
}
static int isight_create_pcm(struct isight *isight)
if (p->midi_ports)
read_midi_messages(s, buffer, data_blocks);
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (data_blocks > 0 && pcm)
read_pcm_s32(s, pcm->runtime, buffer, data_blocks);
if (p->midi_ports)
write_midi_messages(s, buffer, data_blocks);
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm)
write_pcm_s32(s, pcm->runtime, buffer, data_blocks);
else
}
if (length >= 1) {
- stream = READ_ONCE(scs->input);
+ stream = ACCESS_ONCE(scs->input);
if (stream)
midi_input_packet(scs, stream, data, length);
}
if (scs->transaction_running)
return;
- stream = READ_ONCE(scs->output);
+ stream = ACCESS_ONCE(scs->output);
if (!stream || scs->error) {
scs->output_idle = true;
wake_up(&scs->idle_wait);
if (up) {
scs->input_escape_count = 0;
- WRITE_ONCE(scs->input, stream);
+ ACCESS_ONCE(scs->input) = stream;
} else {
- WRITE_ONCE(scs->input, NULL);
+ ACCESS_ONCE(scs->input) = NULL;
}
}
scs->transaction_bytes = 0;
scs->error = false;
- WRITE_ONCE(scs->output, stream);
+ ACCESS_ONCE(scs->output) = stream;
schedule_work(&scs->work);
} else {
- WRITE_ONCE(scs->output, NULL);
+ ACCESS_ONCE(scs->output) = NULL;
}
}
static void midi_playback_drain(struct snd_rawmidi_substream *stream)
{
struct snd_pcm_substream *pcm;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (data_blocks > 0 && pcm)
read_pcm_s32(s, pcm, buffer, data_blocks);
/* This field is not used. */
*syt = 0x0000;
- pcm = READ_ONCE(s->pcm);
+ pcm = ACCESS_ONCE(s->pcm);
if (pcm)
write_pcm_s32(s, pcm, buffer, data_blocks);
else
void *callback_data)
{
struct snd_fw_async_midi_port *port = callback_data;
- struct snd_rawmidi_substream *substream = READ_ONCE(port->substream);
+ struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
/* This port is closed. */
if (substream == NULL)
{
struct snd_fw_async_midi_port *port =
container_of(work, struct snd_fw_async_midi_port, work);
- struct snd_rawmidi_substream *substream = READ_ONCE(port->substream);
+ struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
int generation;
/* Under transacting or error state. */
bytes = 3;
}
- substream = READ_ONCE(tscm->tx_midi_substreams[port]);
+ substream = ACCESS_ONCE(tscm->tx_midi_substreams[port]);
if (substream != NULL)
snd_rawmidi_receive(substream, b + 1, bytes);
}
tx_substream = rcu_dereference(i2s->tx_substream);
tx_active = tx_substream && snd_pcm_running(tx_substream);
if (tx_active) {
- unsigned tx_ptr = READ_ONCE(i2s->tx_ptr);
+ unsigned tx_ptr = ACCESS_ONCE(i2s->tx_ptr);
unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime,
tx_ptr);
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- WRITE_ONCE(i2s->tx_ptr, 0);
+ ACCESS_ONCE(i2s->tx_ptr) = 0;
rcu_assign_pointer(i2s->tx_substream, substream);
xtfpga_pcm_refill_fifo(i2s);
break;
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct xtfpga_i2s *i2s = runtime->private_data;
- snd_pcm_uframes_t pos = READ_ONCE(i2s->tx_ptr);
+ snd_pcm_uframes_t pos = ACCESS_ONCE(i2s->tx_ptr);
return pos < runtime->buffer_size ? pos : 0;
}
unsigned int payload_length, tocopy;
struct snd_rawmidi_substream *midi_receive_substream;
- midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream);
+ midi_receive_substream = ACCESS_ONCE(bcd2k->midi_receive_substream);
if (!midi_receive_substream)
return;
BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE);
- midi_out_substream = READ_ONCE(bcd2k->midi_out_substream);
+ midi_out_substream = ACCESS_ONCE(bcd2k->midi_out_substream);
if (!midi_out_substream)
return;
*/
static inline int atomic_read(const atomic_t *v)
{
- return READ_ONCE((v)->counter);
+ return ACCESS_ONCE((v)->counter);
}
/**
*/
static inline int atomic_read(const atomic_t *v)
{
- return READ_ONCE((v)->counter);
+ return ACCESS_ONCE((v)->counter);
}
/**
static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->userpg;
- u64 head = READ_ONCE(pc->aux_head);
+ u64 head = ACCESS_ONCE(pc->aux_head);
/* Ensure all reads are done after we read the head */
rmb();
{
struct perf_event_mmap_page *pc = mm->userpg;
#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
- u64 head = READ_ONCE(pc->aux_head);
+ u64 head = ACCESS_ONCE(pc->aux_head);
#else
u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
#endif
extern volatile int session_done;
-#define session_done() READ_ONCE(session_done)
+#define session_done() ACCESS_ONCE(session_done)
int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
continue;
} else if (pass && i > last_boosted_vcpu)
break;
- if (!READ_ONCE(vcpu->preempted))
+ if (!ACCESS_ONCE(vcpu->preempted))
continue;
if (vcpu == me)
continue;