Revert "locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns...
authorJaegeuk Kim <jaegeuk@kernel.org>
Wed, 10 Jan 2018 01:00:28 +0000 (17:00 -0800)
committerJaegeuk Kim <jaegeuk@kernel.org>
Wed, 10 Jan 2018 01:01:36 +0000 (17:01 -0800)
Change fscrypt only.

This reverts commit 4bb665c7e388a1ea9770d8b40fc7f48a6e9c4438.

179 files changed:
arch/arc/kernel/smp.c
arch/arm/include/asm/spinlock.h
arch/arm/mach-tegra/cpuidle-tegra20.c
arch/arm/vdso/vgettimeofday.c
arch/ia64/include/asm/spinlock.h
arch/mips/include/asm/vdso.h
arch/mips/kernel/pm-cps.c
arch/mn10300/kernel/mn10300-serial.c
arch/parisc/include/asm/atomic.h
arch/powerpc/platforms/powernv/opal-msglog.c
arch/s390/include/asm/spinlock.h
arch/s390/lib/spinlock.c
arch/sparc/include/asm/atomic_32.h
arch/tile/gxio/dma_queue.c
arch/tile/include/gxio/dma_queue.h
arch/tile/kernel/ptrace.c
arch/x86/entry/common.c
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/events/core.c
arch/x86/include/asm/vgtod.h
arch/x86/kernel/espfix_64.c
arch/x86/kernel/nmi.c
arch/x86/kvm/mmu.c
arch/x86/kvm/page_track.c
arch/x86/xen/p2m.c
arch/xtensa/platforms/xtfpga/lcd.c
block/blk-wbt.c
drivers/base/core.c
drivers/base/power/runtime.c
drivers/char/random.c
drivers/clocksource/bcm2835_timer.c
drivers/crypto/caam/jr.c
drivers/crypto/nx/nx-842-powernv.c
drivers/firewire/ohci.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/ruc.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/sdma.h
drivers/infiniband/hw/hfi1/uc.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/qib/qib_ruc.c
drivers/infiniband/hw/qib/qib_uc.c
drivers/infiniband/hw/qib/qib_ud.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/input/misc/regulator-haptic.c
drivers/md/dm-bufio.c
drivers/md/dm-kcopyd.c
drivers/md/dm-stats.c
drivers/md/dm-switch.c
drivers/md/dm-thin.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid5.c
drivers/misc/mic/scif/scif_rb.c
drivers/misc/mic/scif/scif_rma_list.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/ethernet/sfc/falcon/falcon.c
drivers/net/ethernet/sfc/falcon/farch.c
drivers/net/ethernet/sfc/falcon/nic.h
drivers/net/ethernet/sfc/falcon/tx.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sun/niu.c
drivers/net/tap.c
drivers/net/tun.c
drivers/net/wireless/ath/ath5k/desc.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/mac80211_hwsim.c
drivers/scsi/qla2xxx/qla_target.c
drivers/target/target_core_user.c
drivers/usb/class/cdc-wdm.c
drivers/usb/core/devio.c
drivers/usb/core/sysfs.c
drivers/usb/gadget/udc/gr_udc.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/uhci-hcd.h
drivers/vfio/vfio.c
drivers/vhost/scsi.c
fs/aio.c
fs/buffer.c
fs/direct-io.c
fs/exec.c
fs/fcntl.c
fs/fs_pin.c
fs/fuse/dev.c
fs/inode.c
fs/namei.c
fs/namespace.c
fs/nfs/dir.c
fs/proc/array.c
fs/proc_namespace.c
fs/splice.c
fs/userfaultfd.c
fs/xfs/xfs_log_priv.h
include/linux/bitops.h
include/linux/dynamic_queue_limits.h
include/linux/huge_mm.h
include/linux/if_team.h
include/linux/llist.h
include/linux/pm_runtime.h
include/net/ip_vs.h
kernel/acct.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/exit.c
kernel/trace/ring_buffer.c
kernel/trace/trace.h
kernel/trace/trace_stack.c
kernel/user_namespace.c
lib/assoc_array.c
lib/dynamic_queue_limits.c
lib/llist.c
lib/vsprintf.c
mm/huge_memory.c
net/core/dev.c
net/core/pktgen.c
net/ipv4/inet_fragment.c
net/ipv4/route.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/ip6_tunnel.c
net/ipv6/udp.c
net/llc/llc_input.c
net/mac80211/sta_info.c
net/netlabel/netlabel_calipso.c
net/wireless/nl80211.c
sound/firewire/amdtp-am824.c
sound/firewire/amdtp-stream.c
sound/firewire/amdtp-stream.h
sound/firewire/digi00x/amdtp-dot.c
sound/firewire/fireface/amdtp-ff.c
sound/firewire/fireface/ff-midi.c
sound/firewire/fireface/ff-transaction.c
sound/firewire/isight.c
sound/firewire/motu/amdtp-motu.c
sound/firewire/oxfw/oxfw-scs1x.c
sound/firewire/tascam/amdtp-tascam.c
sound/firewire/tascam/tascam-transaction.c
sound/soc/xtensa/xtfpga-i2s.c
sound/usb/bcd2000/bcd2000.c
tools/arch/x86/include/asm/atomic.h
tools/include/asm-generic/atomic-gcc.h
tools/perf/util/auxtrace.h
tools/perf/util/session.h
virt/kvm/kvm_main.c

index efe8b4200a676529a9f3f0af52d50faca176a1e3..6df9d94a953763eca43b20f02f1897308ab1ee7a 100644 (file)
@@ -250,7 +250,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
         * and read back old value
         */
        do {
-               new = old = READ_ONCE(*ipi_data_ptr);
+               new = old = ACCESS_ONCE(*ipi_data_ptr);
                new |= 1U << msg;
        } while (cmpxchg(ipi_data_ptr, old, new) != old);
 
index 333351f7a10be244157b53be8d46b14b1828a5a0..25cb465c8538b22cc68de7cdd39e2852a8382f16 100644 (file)
@@ -74,7 +74,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 
        while (lockval.tickets.next != lockval.tickets.owner) {
                wfe();
-               lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
+               lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
        }
 
        smp_mb();
index 3f24addd7972b9bb3ede7b79cf9983e781389956..76e4c83cd5c8dd54e1f644618beef62087e0b0b2 100644 (file)
@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
        bool entered_lp2 = false;
 
        if (tegra_pending_sgi())
-               WRITE_ONCE(abort_flag, true);
+               ACCESS_ONCE(abort_flag) = true;
 
        cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
 
index a9dd619c6c290042d052f03c351d1dae4760f7e5..79214d5ff097044dc59e0394c87749e68d8f4d92 100644 (file)
@@ -35,7 +35,7 @@ static notrace u32 __vdso_read_begin(const struct vdso_data *vdata)
 {
        u32 seq;
 repeat:
-       seq = READ_ONCE(vdata->seq_count);
+       seq = ACCESS_ONCE(vdata->seq_count);
        if (seq & 1) {
                cpu_relax();
                goto repeat;
index 047873500b1eec427d5cf66bd46ac0dfa406b226..aa057abd948ec8eb01447e978e50f29616b0cd8a 100644 (file)
@@ -62,7 +62,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
 
 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
 {
-       int tmp = READ_ONCE(lock->lock);
+       int tmp = ACCESS_ONCE(lock->lock);
 
        if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
                return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
@@ -74,19 +74,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
        unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
 
        asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
-       WRITE_ONCE(*p, (tmp + 2) & ~1);
+       ACCESS_ONCE(*p) = (tmp + 2) & ~1;
 }
 
 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
 {
-       long tmp = READ_ONCE(lock->lock);
+       long tmp = ACCESS_ONCE(lock->lock);
 
        return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
 }
 
 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
 {
-       long tmp = READ_ONCE(lock->lock);
+       long tmp = ACCESS_ONCE(lock->lock);
 
        return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
 }
index 91bf0c2c265cbbe3e2e32ea596dabe22f8aa8662..b7cd6cf77b83e9946f66bad049b02f5bb7f0407b 100644 (file)
@@ -99,7 +99,7 @@ static inline u32 vdso_data_read_begin(const union mips_vdso_data *data)
        u32 seq;
 
        while (true) {
-               seq = READ_ONCE(data->seq_count);
+               seq = ACCESS_ONCE(data->seq_count);
                if (likely(!(seq & 1))) {
                        /* Paired with smp_wmb() in vdso_data_write_*(). */
                        smp_rmb();
index 421e06dfee728a973452c11e8b51a128efaae032..9dd624c2fe567e9f4ad41f414704c98e265df1a1 100644 (file)
@@ -166,7 +166,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
        nc_core_ready_count = nc_addr;
 
        /* Ensure ready_count is zero-initialised before the assembly runs */
-       WRITE_ONCE(*nc_core_ready_count, 0);
+       ACCESS_ONCE(*nc_core_ready_count) = 0;
        coupled_barrier(&per_cpu(pm_barrier, core), online);
 
        /* Run the generated entry code */
index d7ef1232a82a56da071ad58d0c9dc0b434430944..7ecf69879e2d6ff077296a79605d8f367b70fdd4 100644 (file)
@@ -543,7 +543,7 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port)
 
 try_again:
        /* pull chars out of the hat */
-       ix = READ_ONCE(port->rx_outp);
+       ix = ACCESS_ONCE(port->rx_outp);
        if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) {
                if (push && !tport->low_latency)
                        tty_flip_buffer_push(tport);
@@ -1724,7 +1724,7 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
        if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) {
                do {
                        /* pull chars out of the hat */
-                       ix = READ_ONCE(port->rx_outp);
+                       ix = ACCESS_ONCE(port->rx_outp);
                        if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
                                return NO_POLL_CHAR;
 
index 88bae6676c9b6ef3823f6a8590882d43b0d83b22..bc54addd589f69daf4fb7aae0c23d47ea5c631a1 100644 (file)
@@ -261,7 +261,7 @@ atomic64_set(atomic64_t *v, s64 i)
 static __inline__ s64
 atomic64_read(const atomic64_t *v)
 {
-       return READ_ONCE((v)->counter);
+       return ACCESS_ONCE((v)->counter);
 }
 
 #define atomic64_inc(v)                (atomic64_add(   1,(v)))
index acd3206dfae3477452f11c4a96dfc1638cafae00..7a9cde0cfbd110c97d9229baa1f14633e295a3fe 100644 (file)
@@ -43,7 +43,7 @@ ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
        if (!opal_memcons)
                return -ENODEV;
 
-       out_pos = be32_to_cpu(READ_ONCE(opal_memcons->out_pos));
+       out_pos = be32_to_cpu(ACCESS_ONCE(opal_memcons->out_pos));
 
        /* Now we've read out_pos, put a barrier in before reading the new
         * data it points to in conbuf. */
index 943239c159fc4acea4edd43bdc0617bc209dfe87..f3f5e0155b10721d175f4c4fb0ac80427e665e37 100644 (file)
@@ -131,14 +131,14 @@ extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
 
 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
 {
-       int old = READ_ONCE(rw->lock);
+       int old = ACCESS_ONCE(rw->lock);
        return likely(old >= 0 &&
                      __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
 }
 
 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
 {
-       int old = READ_ONCE(rw->lock);
+       int old = ACCESS_ONCE(rw->lock);
        return likely(old == 0 &&
                      __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
 }
@@ -225,7 +225,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        int old;
 
        do {
-               old = READ_ONCE(rw->lock);
+               old = ACCESS_ONCE(rw->lock);
        } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
 }
 
index 2d0af866769566125a0e08cb70a495bb7f003961..1dc85f552f4817fb7187b800155db64bfe958789 100644 (file)
@@ -163,8 +163,8 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
-               old = READ_ONCE(rw->lock);
-               owner = READ_ONCE(rw->owner);
+               old = ACCESS_ONCE(rw->lock);
+               owner = ACCESS_ONCE(rw->owner);
                if (old < 0)
                        continue;
                if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
@@ -179,7 +179,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
        int old;
 
        while (count-- > 0) {
-               old = READ_ONCE(rw->lock);
+               old = ACCESS_ONCE(rw->lock);
                if (old < 0)
                        continue;
                if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
@@ -203,8 +203,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
-               old = READ_ONCE(rw->lock);
-               owner = READ_ONCE(rw->owner);
+               old = ACCESS_ONCE(rw->lock);
+               owner = ACCESS_ONCE(rw->owner);
                smp_mb();
                if (old >= 0) {
                        prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
@@ -231,8 +231,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
-               old = READ_ONCE(rw->lock);
-               owner = READ_ONCE(rw->owner);
+               old = ACCESS_ONCE(rw->lock);
+               owner = ACCESS_ONCE(rw->owner);
                if (old >= 0 &&
                    __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
                        prev = old;
@@ -252,7 +252,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
        int old;
 
        while (count-- > 0) {
-               old = READ_ONCE(rw->lock);
+               old = ACCESS_ONCE(rw->lock);
                if (old)
                        continue;
                if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
index d13ce517f4b9946382579a7a4d5e9bbccdb6332e..0c3b3b4a99633e7d3500c19c07393cc1b4633cdc 100644 (file)
@@ -32,7 +32,7 @@ void atomic_set(atomic_t *, int);
 
 #define atomic_set_release(v, i)       atomic_set((v), (i))
 
-#define atomic_read(v)          READ_ONCE((v)->counter)
+#define atomic_read(v)          ACCESS_ONCE((v)->counter)
 
 #define atomic_add(i, v)       ((void)atomic_add_return( (int)(i), (v)))
 #define atomic_sub(i, v)       ((void)atomic_add_return(-(int)(i), (v)))
index b7ba577d82ca36e82a44dd09e1c0fab47d6a0da7..baa60357f8ba20b2e757f0aeebf01b37607d4be8 100644 (file)
@@ -163,14 +163,14 @@ int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
                                 int64_t completion_slot, int update)
 {
        if (update) {
-               if (READ_ONCE(dma_queue->hw_complete_count) >
+               if (ACCESS_ONCE(dma_queue->hw_complete_count) >
                    completion_slot)
                        return 1;
 
                __gxio_dma_queue_update_credits(dma_queue);
        }
 
-       return READ_ONCE(dma_queue->hw_complete_count) > completion_slot;
+       return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
 }
 
 EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
index c8fd47edba30f49910f35f0e5080ef27f353bf65..b9e45e37649e324efe95df53695398cfe1d72958 100644 (file)
@@ -121,7 +121,7 @@ static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
                 * if the result is LESS than "hw_complete_count".
                 */
                uint64_t complete;
-               complete = READ_ONCE(dma_queue->hw_complete_count);
+               complete = ACCESS_ONCE(dma_queue->hw_complete_count);
                slot |= (complete & 0xffffffffff000000);
                if (slot < complete)
                        slot += 0x1000000;
index d516d61751c2250dbaa60bdb9c0b24dfe09e28e9..e1a078e6828e5915968de2adac4b306d870e421f 100644 (file)
@@ -255,7 +255,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 
 int do_syscall_trace_enter(struct pt_regs *regs)
 {
-       u32 work = READ_ONCE(current_thread_info()->flags);
+       u32 work = ACCESS_ONCE(current_thread_info()->flags);
 
        if ((work & _TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs)) {
index eaa0ba66cf961a2deca0e5956fb85bc7a8c152cd..03505ffbe1b68d49982db71dfad0c83b04e45246 100644 (file)
@@ -75,7 +75,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
        if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
                BUG_ON(regs != task_pt_regs(current));
 
-       work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
+       work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
 
        if (unlikely(work & _TIF_SYSCALL_EMU))
                emulated = true;
index 11b13c4b43d55f8d6c8b239f478ecb302d4cfd07..fa8dbfcf7ed37f5677d9185d16e9addb114cf83f 100644 (file)
@@ -318,7 +318,7 @@ int gettimeofday(struct timeval *, struct timezone *)
 notrace time_t __vdso_time(time_t *t)
 {
        /* This is atomic on x86 so we don't need any locks. */
-       time_t result = READ_ONCE(gtod->wall_time_sec);
+       time_t result = ACCESS_ONCE(gtod->wall_time_sec);
 
        if (t)
                *t = result;
index fd2d37ebb840a16bfe00c1b484f8e49836093dc2..80534d3c2480013caa8b170c09c0803b7fef55b9 100644 (file)
@@ -2118,7 +2118,7 @@ static int x86_pmu_event_init(struct perf_event *event)
                        event->destroy(event);
        }
 
-       if (READ_ONCE(x86_pmu.attr_rdpmc))
+       if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
                event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
 
        return err;
index fb856c9f04494b6633d9ea8fd3ebb9204cc2c620..52250681f68c7410b30f42d9e424553aaed4f313 100644 (file)
@@ -49,7 +49,7 @@ static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
        unsigned ret;
 
 repeat:
-       ret = READ_ONCE(s->seq);
+       ret = ACCESS_ONCE(s->seq);
        if (unlikely(ret & 1)) {
                cpu_relax();
                goto repeat;
index 7d7715dde901539c3358e90bd0f449b04fe20700..9c4e7ba6870c142921cfbbd07b8bbf45285e5c07 100644 (file)
@@ -155,14 +155,14 @@ void init_espfix_ap(int cpu)
        page = cpu/ESPFIX_STACKS_PER_PAGE;
 
        /* Did another CPU already set this up? */
-       stack_page = READ_ONCE(espfix_pages[page]);
+       stack_page = ACCESS_ONCE(espfix_pages[page]);
        if (likely(stack_page))
                goto done;
 
        mutex_lock(&espfix_init_mutex);
 
        /* Did we race on the lock? */
-       stack_page = READ_ONCE(espfix_pages[page]);
+       stack_page = ACCESS_ONCE(espfix_pages[page]);
        if (stack_page)
                goto unlock_done;
 
@@ -200,7 +200,7 @@ void init_espfix_ap(int cpu)
                set_pte(&pte_p[n*PTE_STRIDE], pte);
 
        /* Job is done for this CPU and any CPU which shares this page */
-       WRITE_ONCE(espfix_pages[page], stack_page);
+       ACCESS_ONCE(espfix_pages[page]) = stack_page;
 
 unlock_done:
        mutex_unlock(&espfix_init_mutex);
index 18bc9b51ac9b99ffaf51e85daf490b0ba108bcc9..35aafc95e4b8a505491d7079a06e8e1b8e16c968 100644 (file)
@@ -105,7 +105,7 @@ static void nmi_max_handler(struct irq_work *w)
 {
        struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
        int remainder_ns, decimal_msecs;
-       u64 whole_msecs = READ_ONCE(a->max_duration);
+       u64 whole_msecs = ACCESS_ONCE(a->max_duration);
 
        remainder_ns = do_div(whole_msecs, (1000 * 1000));
        decimal_msecs = remainder_ns / 1000;
index a119b361b8b7a9c916e4df7ecd9e69622e64c1b3..7a69cf053711197df9a0f2ec284ef5a436c42514 100644 (file)
@@ -443,7 +443,7 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
 
 static u64 __get_spte_lockless(u64 *sptep)
 {
-       return READ_ONCE(*sptep);
+       return ACCESS_ONCE(*sptep);
 }
 #else
 union split_spte {
@@ -4819,7 +4819,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
         * If we don't have indirect shadow pages, it means no page is
         * write-protected, so we can exit simply.
         */
-       if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+       if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
                return;
 
        remote_flush = local_flush = false;
index 01c1371f39f8cd91912d9f4027587bfe6479dc29..ea67dc876316487f4fff2bf2e6a5193510086aee 100644 (file)
@@ -157,7 +157,7 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
                return false;
 
        index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
-       return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
+       return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
 }
 
 void kvm_page_track_cleanup(struct kvm *kvm)
index 13b4f19b9131353f8617706ccb42b60082649fbc..6083ba462f350d99c5f7b7caa0c47283823ac2e7 100644 (file)
@@ -547,7 +547,7 @@ int xen_alloc_p2m_entry(unsigned long pfn)
        if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
                topidx = p2m_top_index(pfn);
                top_mfn_p = &p2m_top_mfn[topidx];
-               mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
+               mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
 
                BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
 
index 2f7eb66c23ec9b340d1a1ff03c6e5255842eb8f0..4dc0c1b43f4bfd917a65fc04b225a790d0fdfeaa 100644 (file)
 static void lcd_put_byte(u8 *addr, u8 data)
 {
 #ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
-       WRITE_ONCE(*addr, data);
+       ACCESS_ONCE(*addr) = data;
 #else
-       WRITE_ONCE(*addr, data & 0xf0);
-       WRITE_ONCE(*addr, (data << 4) & 0xf0);
+       ACCESS_ONCE(*addr) = data & 0xf0;
+       ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
 #endif
 }
 
 static int __init lcd_init(void)
 {
-       WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+       ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
        mdelay(5);
-       WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+       ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
        udelay(200);
-       WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT);
+       ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
        udelay(50);
 #ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
-       WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
+       ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
        udelay(50);
        lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
        udelay(50);
index d822530e6aeade81a7c9b2b3d9c6a6cccc0bb351..6a9a0f03a67bd9ac9839ba667fb407a03694a304 100644 (file)
@@ -261,7 +261,7 @@ static inline bool stat_sample_valid(struct blk_rq_stat *stat)
 
 static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
 {
-       u64 now, issue = READ_ONCE(rwb->sync_issue);
+       u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
 
        if (!issue || !rwb->sync_cookie)
                return 0;
index 4b8ba2a75a4d1d026903616f508df41db10d3869..12ebd055724cd6cdc01edff013583f38979fe92a 100644 (file)
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev)
         * so be careful about accessing it.  dev->bus and dev->class should
         * never change once they are set, so they don't need special care.
         */
-       drv = READ_ONCE(dev->driver);
+       drv = ACCESS_ONCE(dev->driver);
        return drv ? drv->name :
                        (dev->bus ? dev->bus->name :
                        (dev->class ? dev->class->name : ""));
index 41d7c2b99f69242236e969a19bcf7c710cef0cd7..7bcf80fa9adad4d45b42d3c0eec10d9425dabbe0 100644 (file)
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
        if (!dev->power.use_autosuspend)
                goto out;
 
-       autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
+       autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
        if (autosuspend_delay < 0)
                goto out;
 
-       last_busy = READ_ONCE(dev->power.last_busy);
+       last_busy = ACCESS_ONCE(dev->power.last_busy);
        elapsed = jiffies - last_busy;
        if (elapsed < 0)
                goto out;       /* jiffies has wrapped around. */
index 6c7ccac2679e7c4b0543ea9dbbdcca20196bdb64..8ad92707e45f23b890203d5c5468d47473acf636 100644 (file)
@@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
                return;
 
 retry:
-       entropy_count = orig = READ_ONCE(r->entropy_count);
+       entropy_count = orig = ACCESS_ONCE(r->entropy_count);
        if (nfrac < 0) {
                /* Debit */
                entropy_count += nfrac;
@@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 
        /* Can we pull enough? */
 retry:
-       entropy_count = orig = READ_ONCE(r->entropy_count);
+       entropy_count = orig = ACCESS_ONCE(r->entropy_count);
        ibytes = nbytes;
        /* never pull more than available */
        have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
index 60da2537bef9303308498187c4fa542bca68aac6..39e489a96ad74f0f8e341aa2b3f9c8e6acfc4bfe 100644 (file)
@@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
        if (readl_relaxed(timer->control) & timer->match_mask) {
                writel_relaxed(timer->match_mask, timer->control);
 
-               event_handler = READ_ONCE(timer->evt.event_handler);
+               event_handler = ACCESS_ONCE(timer->evt.event_handler);
                if (event_handler)
                        event_handler(&timer->evt);
                return IRQ_HANDLED;
index f4f258075b895a8c55fbd836d35b1b6b399beed8..d258953ff488331486334f8fc24a99c1bc5d0ca1 100644 (file)
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg)
 
        while (rd_reg32(&jrp->rregs->outring_used)) {
 
-               head = READ_ONCE(jrp->head);
+               head = ACCESS_ONCE(jrp->head);
 
                spin_lock(&jrp->outlock);
 
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
        spin_lock_bh(&jrp->inplock);
 
        head = jrp->head;
-       tail = READ_ONCE(jrp->tail);
+       tail = ACCESS_ONCE(jrp->tail);
 
        if (!rd_reg32(&jrp->rregs->inpring_avail) ||
            CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
index 0f20f5ec96179a3505ac6f042242329ad18125df..874ddf5e9087e5a0fc62e09bad3889f964c57d5f 100644 (file)
@@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
        ktime_t start = wmem->start, now = ktime_get();
        ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
 
-       while (!(READ_ONCE(csb->flags) & CSB_V)) {
+       while (!(ACCESS_ONCE(csb->flags) & CSB_V)) {
                cpu_relax();
                now = ktime_get();
                if (ktime_after(now, timeout))
index ccf52368a073ecf0518b946853a8f7980087d8f1..8bf89267dc252f260a3fc6e640c0c1809f04ef2d 100644 (file)
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
        __le16 res_count, next_res_count;
 
        i = ar_first_buffer_index(ctx);
-       res_count = READ_ONCE(ctx->descriptors[i].res_count);
+       res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
 
        /* A buffer that is not yet completely filled must be the last one. */
        while (i != last && res_count == 0) {
@@ -742,7 +742,8 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
                /* Peek at the next descriptor. */
                next_i = ar_next_buffer_index(i);
                rmb(); /* read descriptors in order */
-               next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
+               next_res_count = ACCESS_ONCE(
+                               ctx->descriptors[next_i].res_count);
                /*
                 * If the next descriptor is still empty, we must stop at this
                 * descriptor.
@@ -758,7 +759,8 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
                        if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
                                next_i = ar_next_buffer_index(next_i);
                                rmb();
-                               next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
+                               next_res_count = ACCESS_ONCE(
+                                       ctx->descriptors[next_i].res_count);
                                if (next_res_count != cpu_to_le16(PAGE_SIZE))
                                        goto next_buffer_is_active;
                        }
@@ -2810,7 +2812,7 @@ static int handle_ir_buffer_fill(struct context *context,
        u32 buffer_dma;
 
        req_count = le16_to_cpu(last->req_count);
-       res_count = le16_to_cpu(READ_ONCE(last->res_count));
+       res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
        completed = req_count - res_count;
        buffer_dma = le32_to_cpu(last->data_address);
 
index 303b5e099a98e6c3f6fe355f520ca61b133a6534..333bad74906784f7ecc7a794c3d1d4e1dd915978 100644 (file)
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
  */
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
 {
-       uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
+       uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
        struct dma_fence *fence, **ptr;
        int r;
 
@@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
        amdgpu_fence_process(ring);
        emitted = 0x100000000ull;
        emitted -= atomic_read(&ring->fence_drv.last_seq);
-       emitted += READ_ONCE(ring->fence_drv.sync_seq);
+       emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
        return lower_32_bits(emitted);
 }
 
index 6149a47fe63d5edbd4f2561bd5e500f3a4078746..7171968f261e1a13094f3c94b2d649a382b2f6a0 100644 (file)
@@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
        seq_printf(m, "\t0x%08x: %12ld byte %s",
                   id, amdgpu_bo_size(bo), placement);
 
-       offset = READ_ONCE(bo->tbo.mem.start);
+       offset = ACCESS_ONCE(bo->tbo.mem.start);
        if (offset != AMDGPU_BO_INVALID_OFFSET)
                seq_printf(m, " @ 0x%010Lx", offset);
 
-       pin_count = READ_ONCE(bo->pin_count);
+       pin_count = ACCESS_ONCE(bo->pin_count);
        if (pin_count)
                seq_printf(m, " pin count %d", pin_count);
        seq_printf(m, "\n");
index a25f6c72f219358c9b436a714cf9ec658b8bcaf9..38cea6fb25a8b9221d64b43da04c4268a2c986b8 100644 (file)
@@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
        if (kfifo_is_empty(&entity->job_queue))
                return false;
 
-       if (READ_ONCE(entity->dependency))
+       if (ACCESS_ONCE(entity->dependency))
                return false;
 
        return true;
index cf3deb283da561914ee26904b57915fcf39ac24b..3386452bd2f057239c4ae63bea1eeb6e040c3acc 100644 (file)
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
        else
                r = 0;
 
-       cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+       cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
        args->domain = radeon_mem_type_to_domain(cur_placement);
        drm_gem_object_put_unlocked(gobj);
        return r;
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
                r = ret;
 
        /* Flush HDP cache via MMIO if necessary */
-       cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
+       cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
        if (rdev->asic->mmio_hdp_flush &&
            radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
                robj->rdev->asic->mmio_hdp_flush(rdev);
index 6ac094ee898356593d73c1b615d8c1b9115fc892..a552e4ea54407bf18e00974ac704990cf6179702 100644 (file)
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
                if (unlikely(drm_is_render_client(file_priv)))
                        require_exist = true;
 
-               if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+               if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
                        DRM_ERROR("Locked master refused legacy "
                                  "surface reference.\n");
                        return -EACCES;
index 97bea2e1aa6a7739d6c73a343258d551bbbc61d2..d9a1e989313641b06f32ffdd4677ce8aa7e32802 100644 (file)
@@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
                if (sc->flags & SCF_FROZEN) {
                        wait_event_interruptible_timeout(
                                dd->event_queue,
-                               !(READ_ONCE(dd->flags) & HFI1_FROZEN),
+                               !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
                                msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
                        if (dd->flags & HFI1_FROZEN)
                                return -ENOLCK;
index 75e740780285d050c41f2aa20a6ad9e888e2acd3..7108a4b5e94cdef45f27722847065388a36a6f85 100644 (file)
@@ -1423,14 +1423,14 @@ retry:
                        goto done;
                }
                /* copy from receiver cache line and recalculate */
-               sc->alloc_free = READ_ONCE(sc->free);
+               sc->alloc_free = ACCESS_ONCE(sc->free);
                avail =
                        (unsigned long)sc->credits -
                        (sc->fill - sc->alloc_free);
                if (blocks > avail) {
                        /* still no room, actively update */
                        sc_release_update(sc);
-                       sc->alloc_free = READ_ONCE(sc->free);
+                       sc->alloc_free = ACCESS_ONCE(sc->free);
                        trycount++;
                        goto retry;
                }
@@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc)
 
        /* call sent buffer callbacks */
        code = -1;                              /* code not yet set */
-       head = READ_ONCE(sc->sr_head);  /* snapshot the head */
+       head = ACCESS_ONCE(sc->sr_head);        /* snapshot the head */
        tail = sc->sr_tail;
        while (head != tail) {
                pbuf = &sc->sr[tail].pbuf;
index a7fc664f0d4e1c3195b216014eb1337bdac23cc4..b3291f0fde9a41ccf1119822a857ff5fd717a141 100644 (file)
@@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
 
 again:
        smp_read_barrier_depends(); /* see post_one_send() */
-       if (sqp->s_last == READ_ONCE(sqp->s_head))
+       if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
                goto clr_busy;
        wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
 
index 08346d25441cb48c6deda6c27dc52c1f19d14853..6781bcdb10b3153246d1f3e7b0f8bc056657dde3 100644 (file)
@@ -1725,7 +1725,7 @@ retry:
 
                swhead = sde->descq_head & sde->sdma_mask;
                /* this code is really bad for cache line trading */
-               swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+               swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
                cnt = sde->descq_cnt;
 
                if (swhead < swtail)
@@ -1872,7 +1872,7 @@ retry:
        if ((status & sde->idle_mask) && !idle_check_done) {
                u16 swtail;
 
-               swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+               swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
                if (swtail != hwhead) {
                        hwhead = (u16)read_sde_csr(sde, SD(HEAD));
                        idle_check_done = 1;
@@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
        u16 len;
 
        head = sde->descq_head & sde->sdma_mask;
-       tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
+       tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
        seq_printf(s, SDE_FMT, sde->this_idx,
                   sde->cpu,
                   sdma_state_name(sde->state.current_state),
@@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde)
                return -EINVAL;
        }
        while (1) {
-               nr = ffz(READ_ONCE(sde->ahg_bits));
+               nr = ffz(ACCESS_ONCE(sde->ahg_bits));
                if (nr > 31) {
                        trace_hfi1_ahg_allocate(sde, -ENOSPC);
                        return -ENOSPC;
index 374c59784950650e7845be591496b5be6b1403a4..107011d8613b9127fe63162b2c7080bee138e60d 100644 (file)
@@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
 {
        return sde->descq_cnt -
                (sde->descq_tail -
-                READ_ONCE(sde->descq_head)) - 1;
+                ACCESS_ONCE(sde->descq_head)) - 1;
 }
 
 static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
index 9a31c585427f91790d392d444ca8570a7917e33f..0b646173ca22272fc5a5cca17b231f4675462a1d 100644 (file)
@@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                        goto bail;
                /* We are in the error state, flush the work request. */
                smp_read_barrier_depends(); /* see post_one_send() */
-               if (qp->s_last == READ_ONCE(qp->s_head))
+               if (qp->s_last == ACCESS_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
                if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                        goto bail;
                /* Check if send work queue is empty. */
                smp_read_barrier_depends(); /* see post_one_send() */
-               if (qp->s_cur == READ_ONCE(qp->s_head)) {
+               if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
                        clear_ahg(qp);
                        goto bail;
                }
index 7fec6b984e3e5a8ca621447bc631aa92352dc7a6..2ba74fdd6f153dc33c0c153d09e2be1f73f8ed73 100644 (file)
@@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
                        goto bail;
                /* We are in the error state, flush the work request. */
                smp_read_barrier_depends(); /* see post_one_send */
-               if (qp->s_last == READ_ONCE(qp->s_head))
+               if (qp->s_last == ACCESS_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
                if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 
        /* see post_one_send() */
        smp_read_barrier_depends();
-       if (qp->s_cur == READ_ONCE(qp->s_head))
+       if (qp->s_cur == ACCESS_ONCE(qp->s_head))
                goto bail;
 
        wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
index 8ec6e8a8d6f76f15dc4759c228f52d1ebb69bd2a..c0c0e0445cbfbd72938fe07520f574515992298b 100644 (file)
@@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
                /* Wait until all requests have been freed. */
                wait_event_interruptible(
                        pq->wait,
-                       (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+                       (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
                kfree(pq->reqs);
                kfree(pq->req_in_use);
                kmem_cache_destroy(pq->txreq_cache);
@@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
                        if (ret != -EBUSY) {
                                req->status = ret;
                                WRITE_ONCE(req->has_error, 1);
-                               if (READ_ONCE(req->seqcomp) ==
+                               if (ACCESS_ONCE(req->seqcomp) ==
                                    req->seqsubmitted - 1)
                                        goto free_req;
                                return ret;
@@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
                 */
                if (req->data_len) {
                        iovec = &req->iovs[req->iov_idx];
-                       if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
+                       if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
                                if (++req->iov_idx == req->data_iovs) {
                                        ret = -EFAULT;
                                        goto free_txreq;
@@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
        } else {
                if (status != SDMA_TXREQ_S_OK)
                        req->status = status;
-               if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
+               if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
                    (READ_ONCE(req->done) ||
                     READ_ONCE(req->has_error))) {
                        user_sdma_free_request(req, false);
index 9a37e844d4c8739087f82fa728ecc42cb262ff80..53efbb0b40c4a1137e0de152ccad5036ad423f74 100644 (file)
@@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
 
 again:
        smp_read_barrier_depends(); /* see post_one_send() */
-       if (sqp->s_last == READ_ONCE(sqp->s_head))
+       if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
                goto clr_busy;
        wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
 
index bddcc37ace4420937c5fde681edf29169a3e36c3..498e2202e72c9d96622d8c21c3e27a473057cbe6 100644 (file)
@@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
                        goto bail;
                /* We are in the error state, flush the work request. */
                smp_read_barrier_depends(); /* see post_one_send() */
-               if (qp->s_last == READ_ONCE(qp->s_head))
+               if (qp->s_last == ACCESS_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
                if (atomic_read(&priv->s_dma_busy)) {
@@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
                        goto bail;
                /* Check if send work queue is empty. */
                smp_read_barrier_depends(); /* see post_one_send() */
-               if (qp->s_cur == READ_ONCE(qp->s_head))
+               if (qp->s_cur == ACCESS_ONCE(qp->s_head))
                        goto bail;
                /*
                 * Start a new request.
index 15962ed193cea0515bcda715ed536f3983d66ed8..be4907453ac4d031f2b1e7d8ed5ddd09235b0837 100644 (file)
@@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
                        goto bail;
                /* We are in the error state, flush the work request. */
                smp_read_barrier_depends(); /* see post_one_send */
-               if (qp->s_last == READ_ONCE(qp->s_head))
+               if (qp->s_last == ACCESS_ONCE(qp->s_head))
                        goto bail;
                /* If DMAs are in progress, we can't flush immediately. */
                if (atomic_read(&priv->s_dma_busy)) {
@@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
 
        /* see post_one_send() */
        smp_read_barrier_depends();
-       if (qp->s_cur == READ_ONCE(qp->s_head))
+       if (qp->s_cur == ACCESS_ONCE(qp->s_head))
                goto bail;
 
        wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
index b670cb9d200630215ac38cc591aed2891ac1d25f..22df09ae809e42553e865a2152539b88bd5d79ae 100644 (file)
@@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
        rdi->driver_f.notify_error_qp(qp);
 
        /* Schedule the sending tasklet to drain the send work queue. */
-       if (READ_ONCE(qp->s_last) != qp->s_head)
+       if (ACCESS_ONCE(qp->s_last) != qp->s_head)
                rdi->driver_f.schedule_send(qp);
 
        rvt_clear_mr_refs(qp, 0);
@@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail(
        if (likely(qp->s_avail))
                return 0;
        smp_read_barrier_depends(); /* see rc.c */
-       slast = READ_ONCE(qp->s_last);
+       slast = ACCESS_ONCE(qp->s_last);
        if (qp->s_head >= slast)
                avail = qp->s_size - (qp->s_head - slast);
        else
@@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
         * ahead and kick the send engine into gear. Otherwise we will always
         * just schedule the send to happen later.
         */
-       call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
+       call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
 
        for (; wr; wr = wr->next) {
                err = rvt_post_one_wr(qp, wr, &call_send);
index a1db1e5040dcf13937e54199f7d8d696885f90a4..2e8f801932be723c91e3ff4beb643580b66fa4c6 100644 (file)
@@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev)
 
        haptic->suspended = false;
 
-       magnitude = READ_ONCE(haptic->magnitude);
+       magnitude = ACCESS_ONCE(haptic->magnitude);
        if (magnitude)
                regulator_haptic_set_voltage(haptic, magnitude);
 
index b8ac591aaaa7070bfbd6d32c20993fb9130961f8..8e3adcb46851e8942054f386f405f7596899cf16 100644 (file)
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void)
        BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
        BUG_ON(dm_bufio_client_count < 0);
 
-       dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
+       dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
 
        /*
         * Use default if set to 0 and report the actual cache size used.
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
 {
        unsigned long buffers;
 
-       if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+       if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
                if (mutex_trylock(&dm_bufio_clients_lock)) {
                        __cache_size_refresh();
                        mutex_unlock(&dm_bufio_clients_lock);
@@ -1601,7 +1601,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
 
 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
 {
-        unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
+        unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
         return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
 }
 
@@ -1648,7 +1648,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
 
-       return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
+       return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
 }
 
 /*
@@ -1819,7 +1819,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
 
 static unsigned get_max_age_hz(void)
 {
-       unsigned max_age = READ_ONCE(dm_bufio_max_age);
+       unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
 
        if (max_age > UINT_MAX / HZ)
                max_age = UINT_MAX / HZ;
index eb45cc3df31da109bcb9846967cbf8926d7524b8..cf2c67e35eafe75353e016d48efefd9aadd36291 100644 (file)
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
 try_again:
        spin_lock_irq(&throttle_spinlock);
 
-       throttle = READ_ONCE(t->throttle);
+       throttle = ACCESS_ONCE(t->throttle);
 
        if (likely(throttle >= 100))
                goto skip_limit;
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
 
        t->num_io_jobs--;
 
-       if (likely(READ_ONCE(t->throttle) >= 100))
+       if (likely(ACCESS_ONCE(t->throttle) >= 100))
                goto skip_limit;
 
        if (!t->num_io_jobs) {
index 29bc51084c82be8527e65c9cd157c54b86a708cd..a7868503d1352dfee4927ceeac246d39160685a5 100644 (file)
@@ -432,7 +432,7 @@ do_sync_free:
                synchronize_rcu_expedited();
                dm_stat_free(&s->rcu_head);
        } else {
-               WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
+               ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
                call_rcu(&s->rcu_head, dm_stat_free);
        }
        return 0;
@@ -640,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
                 */
                last = raw_cpu_ptr(stats->last);
                stats_aux->merged =
-                       (bi_sector == (READ_ONCE(last->last_sector) &&
+                       (bi_sector == (ACCESS_ONCE(last->last_sector) &&
                                       ((bi_rw == WRITE) ==
-                                       (READ_ONCE(last->last_rw) == WRITE))
+                                       (ACCESS_ONCE(last->last_rw) == WRITE))
                                       ));
-               WRITE_ONCE(last->last_sector, end_sector);
-               WRITE_ONCE(last->last_rw, bi_rw);
+               ACCESS_ONCE(last->last_sector) = end_sector;
+               ACCESS_ONCE(last->last_rw) = bi_rw;
        }
 
        rcu_read_lock();
@@ -694,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
 
        for_each_possible_cpu(cpu) {
                p = &s->stat_percpu[cpu][x];
-               shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
-               shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
-               shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
-               shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
-               shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
-               shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
-               shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
-               shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
-               shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
-               shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
-               shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
-               shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
+               shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
+               shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
+               shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
+               shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
+               shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
+               shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
+               shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
+               shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
+               shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
+               shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
+               shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
+               shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
                if (s->n_histogram_entries) {
                        unsigned i;
                        for (i = 0; i < s->n_histogram_entries + 1; i++)
-                               shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
+                               shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
                }
        }
 }
index 8d0ba879777e486cbc4f9fc84600fdc7ba82c5ba..4c8de1ff78cac8ad8a575fcbd42c81a93d6bd79e 100644 (file)
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
 
        switch_get_position(sctx, region_nr, &region_index, &bit);
 
-       return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
+       return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) &
                ((1 << sctx->region_table_entry_bits) - 1);
 }
 
index 89e5dff9b4cfc1b87049529238c5c01978345b81..1e25705209c27fbb8e62f5d096c9dbe157d82c77 100644 (file)
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
        struct pool_c *pt = pool->ti->private;
        bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
        enum pool_mode old_mode = get_pool_mode(pool);
-       unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
+       unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
 
        /*
         * Never allow the pool to transition to PM_WRITE mode if user
index fba93237a78044cafc163779ce2a7085dd8543d6..bda3caca23ca69af2fe97592aa817a29b87851d6 100644 (file)
@@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work)
                verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
                verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
                if (!i) {
-                       unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
+                       unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
 
                        cluster >>= v->data_dev_block_bits;
                        if (unlikely(!cluster))
index df61d94be99f98343a1e678cae1972a53a811f8f..804419635cc7d3c76d82fb790bbc816c3ea0416f 100644 (file)
@@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
 
 static int __dm_get_module_param_int(int *module_param, int min, int max)
 {
-       int param = READ_ONCE(*module_param);
+       int param = ACCESS_ONCE(*module_param);
        int modified_param = 0;
        bool modified = true;
 
@@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
 unsigned __dm_get_module_param(unsigned *module_param,
                               unsigned def, unsigned max)
 {
-       unsigned param = READ_ONCE(*module_param);
+       unsigned param = ACCESS_ONCE(*module_param);
        unsigned modified_param = 0;
 
        if (!param)
index 7a1277c7b60befed72b7f74e7bffecbf04fd07ce..98ea86309ceb44e3681277f8796f8ff5c8fda141 100644 (file)
@@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page)
 {
        char *sep = ",";
        size_t len = 0;
-       unsigned long flags = READ_ONCE(rdev->flags);
+       unsigned long flags = ACCESS_ONCE(rdev->flags);
 
        if (test_bit(Faulty, &flags) ||
            (!test_bit(ExternalBbl, &flags) &&
index de5e5135215e4305915d956e0f5757385263ff50..7aed69a4f6552944f021828c345af704fa1846cb 100644 (file)
@@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
         */
        rcu_read_lock();
        for (i = 0; i < conf->raid_disks; i++) {
-               struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
+               struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
 
                if (rdev == NULL || test_bit(Faulty, &rdev->flags))
                        still_degraded = 1;
index b665757ca89a853243d929e6051fa993471544d3..637cc468674278bb3a804f896ee588ca0c4e8fe4 100644 (file)
@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb)
         * the read barrier in scif_rb_count(..)
         */
        wmb();
-       WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
+       ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
 #ifdef CONFIG_INTEL_MIC_CARD
        /*
         * X100 Si bug: For the case where a Core is performing an EXT_WR
@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb)
         * This way, if ordering is violated for the Interrupt Message, it will
         * fall just behind the first Posted associated with the first EXT_WR.
         */
-       WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
+       ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
 #endif
 }
 
@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
         * scif_rb_space(..)
         */
        mb();
-       WRITE_ONCE(*rb->read_ptr, new_offset);
+       ACCESS_ONCE(*rb->read_ptr) = new_offset;
 #ifdef CONFIG_INTEL_MIC_CARD
        /*
         * X100 Si Bug: For the case where a Core is performing an EXT_WR
@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
         * This way, if ordering is violated for the Interrupt Message, it will
         * fall just behind the first Posted associated with the first EXT_WR.
         */
-       WRITE_ONCE(*rb->read_ptr, new_offset);
+       ACCESS_ONCE(*rb->read_ptr) = new_offset;
 #endif
 }
 
index a036dbb4101e7011a00374d206ded520ae9959a6..e1ef8daedd5ac273d2f3caa711f2a347f1d2069b 100644 (file)
@@ -277,7 +277,7 @@ retry:
                 * Need to restart list traversal if there has been
                 * an asynchronous list entry deletion.
                 */
-               if (READ_ONCE(ep->rma_info.async_list_del))
+               if (ACCESS_ONCE(ep->rma_info.async_list_del))
                        goto retry;
        }
        mutex_unlock(&ep->rma_info.rma_lock);
index 1ed9529e7bd1de923697731dc0db0ceaa926ef1f..c02cc817a490995498b9764594a632b7581a700c 100644 (file)
@@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                                unsigned int count;
 
                                slaves = rcu_dereference(bond->slave_arr);
-                               count = slaves ? READ_ONCE(slaves->count) : 0;
+                               count = slaves ? ACCESS_ONCE(slaves->count) : 0;
                                if (likely(count))
                                        tx_slave = slaves->arr[hash_index %
                                                               count];
index 08a4f57cf40966d1ca45cb51c1b8131aa80d65f2..b2db581131b2d49a843c1f548e7341a12e395127 100644 (file)
@@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
        slave = bond_slave_get_rcu(skb->dev);
        bond = slave->bond;
 
-       recv_probe = READ_ONCE(bond->recv_probe);
+       recv_probe = ACCESS_ONCE(bond->recv_probe);
        if (recv_probe) {
                ret = recv_probe(skb, bond, slave);
                if (ret == RX_HANDLER_CONSUMED) {
@@ -3811,7 +3811,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
                else
                        bond_xmit_slave_id(bond, skb, 0);
        } else {
-               int slave_cnt = READ_ONCE(bond->slave_cnt);
+               int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
 
                if (likely(slave_cnt)) {
                        slave_id = bond_rr_gen_slave_id(bond);
@@ -3973,7 +3973,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int count;
 
        slaves = rcu_dereference(bond->slave_arr);
-       count = slaves ? READ_ONCE(slaves->count) : 0;
+       count = slaves ? ACCESS_ONCE(slaves->count) : 0;
        if (likely(count)) {
                slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
                bond_dev_queue_xmit(bond, skb, slave->dev);
index 43f52a8fe708becb0510419fb01d583f3d29e1e2..4ef68f69b58c45322d65f414e06d068ade4ab22d 100644 (file)
@@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
  */
 static inline int reclaimable(const struct sge_txq *q)
 {
-       int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+       int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
        hw_cidx -= q->cidx;
        return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
 }
@@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb);
  */
 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
 {
-       int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+       int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
        int reclaim = hw_cidx - q->cidx;
 
        if (reclaim < 0)
index c6e859a27ee634bd5d0e00ce9d64c85531427284..0e3d9f39a80756b42542d60ff1a8df7846cb837a 100644 (file)
@@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
 
        if (wrapped)
                newacc += 65536;
-       WRITE_ONCE(*acc, newacc);
+       ACCESS_ONCE(*acc) = newacc;
 }
 
 static void populate_erx_stats(struct be_adapter *adapter,
index 340e28211135a266b5a955ef432f6e8786d4f23b..0cec06bec63ee1c0085019f0f1ee456b675c2ece 100644 (file)
@@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
        unsigned int count;
 
        smp_rmb();
-       count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
+       count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
        if (count == 0)
                goto out;
 
@@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        dma_addr_t phys;
 
        smp_rmb();
-       count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
+       count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
        if (count == (TX_DESC_NUM - 1)) {
                netif_stop_queue(ndev);
                return NETDEV_TX_BUSY;
index 2cb9539c931e51f7a18696db9a307e3709c6d2f4..8f326f87a815bf8fd4ea1fe2c464fa9df19b4660 100644 (file)
@@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                 vsi->rx_buf_failed, vsi->rx_page_failed);
        rcu_read_lock();
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
+               struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
 
                if (!rx_ring)
                        continue;
@@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
        }
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
+               struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
 
                if (!tx_ring)
                        continue;
index e9e04a485e0a765e392afef2f943f8bd84bb3400..05e89864f781c361289f0a3170078a661988389b 100644 (file)
@@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
        }
        rcu_read_lock();
        for (j = 0; j < vsi->num_queue_pairs; j++) {
-               tx_ring = READ_ONCE(vsi->tx_rings[j]);
+               tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
 
                if (!tx_ring)
                        continue;
index 0b93f7c1a05afe031ca06da6e77ff95a858f1b66..ea20aacd5e1d241c6df163af987e9cb7e7893f25 100644 (file)
@@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
                u64 bytes, packets;
                unsigned int start;
 
-               tx_ring = READ_ONCE(vsi->tx_rings[i]);
+               tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
                if (!tx_ring)
                        continue;
                i40e_get_netdev_stats_struct_tx(tx_ring, stats);
@@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        rcu_read_lock();
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                /* locate Tx ring */
-               p = READ_ONCE(vsi->tx_rings[q]);
+               p = ACCESS_ONCE(vsi->tx_rings[q]);
 
                do {
                        start = u64_stats_fetch_begin_irq(&p->syncp);
index 97381238eb7c168f0f6887d039ae4d00f1e3a044..d8456c381c99d47b5de90e4680c9af7aaba5bfbd 100644 (file)
@@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        }
 
        smp_mb(); /* Force any pending update before accessing. */
-       adj = READ_ONCE(pf->ptp_base_adj);
+       adj = ACCESS_ONCE(pf->ptp_base_adj);
 
        freq = adj;
        freq *= ppb;
@@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
        wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
 
        /* Update the base adjustement value. */
-       WRITE_ONCE(pf->ptp_base_adj, incval);
+       ACCESS_ONCE(pf->ptp_base_adj) = incval;
        smp_mb(); /* Force the above update. */
 }
 
index 31a3f09df9f75fee5ab62472c64fb07446408f61..58adbf234e07058b0705d847849243fca695609f 100644 (file)
@@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg);
 /* write operations, indexed using DWORDS */
 #define wr32(reg, val) \
 do { \
-       u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
+       u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
        if (!E1000_REMOVED(hw_addr)) \
                writel((val), &hw_addr[(reg)]); \
 } while (0)
index e83835d9e8ab55e6fb2a00118cb93ade8d6e9fad..b0031c5ff767a4d430d3ac59eebdab89955eda88 100644 (file)
@@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
 u32 igb_rd32(struct e1000_hw *hw, u32 reg)
 {
        struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
-       u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
+       u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
        u32 value = 0;
 
        if (E1000_REMOVED(hw_addr))
index a01409e2e06c810d84ce4486024d9256e5181095..e083732adf649106ee8ae56d665e77649dba6456 100644 (file)
@@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr)
 
 static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
 
        if (ixgbe_removed(reg_addr))
                return;
@@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr)
 
 static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
 {
-       u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
 
        if (ixgbe_removed(reg_addr))
                return;
index ffcbf28622fa65e6b5a280d5530391328bb85737..879a9c4cef59830e643810c11171efb66ece27ff 100644 (file)
@@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
  */
 u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
 {
-       u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
        u32 value;
 
        if (ixgbe_removed(reg_addr))
@@ -8624,7 +8624,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
 
        rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
+               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
                u64 bytes, packets;
                unsigned int start;
 
@@ -8640,12 +8640,12 @@ static void ixgbe_get_stats64(struct net_device *netdev,
        }
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
+               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
 
                ixgbe_get_ring_stats64(stats, ring);
        }
        for (i = 0; i < adapter->num_xdp_queues; i++) {
-               struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
+               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
 
                ixgbe_get_ring_stats64(stats, ring);
        }
index ae312c45696afd8b85f25d675810029c5ea126a2..86d6924a2b714ad7535e34149078f10f31ca53dc 100644 (file)
@@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
        }
 
        smp_mb();
-       incval = READ_ONCE(adapter->base_incval);
+       incval = ACCESS_ONCE(adapter->base_incval);
 
        freq = incval;
        freq *= ppb;
@@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
        }
 
        /* update the base incval used to calculate frequency adjustment */
-       WRITE_ONCE(adapter->base_incval, incval);
+       ACCESS_ONCE(adapter->base_incval) = incval;
        smp_mb();
 
        /* need lock to prevent incorrect read while modifying cyclecounter */
index 9ce07759478f6d41ebf7c38ddb62949da536e1fd..90ecc4b0646210bc9daba9ac15eb47e1b4574b32 100644 (file)
@@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
 
 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
 {
-       u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
        u32 value;
 
        if (IXGBE_REMOVED(reg_addr))
index c651fefcc3d22b78e3ec1123394cf5604c94e43e..04d8d4ee4f045e9fb929882c06f3bb5803981921 100644 (file)
@@ -182,7 +182,7 @@ struct ixgbevf_info {
 
 static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
 
        if (IXGBE_REMOVED(reg_addr))
                return;
index 3541a7f9d12e5a06f924f80d0fee20c355d1204a..8a32a8f7f9c0c7316665294b5342051b46f8b623 100644 (file)
@@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
 
        index = cons_index & size_mask;
        cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
-       last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
-       ring_cons = READ_ONCE(ring->cons);
+       last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
+       ring_cons = ACCESS_ONCE(ring->cons);
        ring_index = ring_cons & size_mask;
        stamp_index = ring_index;
 
@@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
        wmb();
 
        /* we want to dirty this cache line once */
-       WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
-       WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
+       ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
+       ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
 
        if (cq->type == TX_XDP)
                return done < budget;
@@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_drop;
 
        /* fetch ring->cons far ahead before needing it to avoid stall */
-       ring_cons = READ_ONCE(ring->cons);
+       ring_cons = ACCESS_ONCE(ring->cons);
 
        real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
                                  &inline_ok, &fragptr);
@@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                 */
                smp_rmb();
 
-               ring_cons = READ_ONCE(ring->cons);
+               ring_cons = ACCESS_ONCE(ring->cons);
                if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
                        netif_tx_wake_queue(ring->tx_queue);
                        ring->wake_queue++;
index 5dd5f61e1114bd1f7870fd1b34ed32d002a4d427..50ea69d88480c6108e82192d9b9ed57c985ccaf7 100644 (file)
@@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
                ring = &vdev->vpaths[i].ring;
 
                /* Truncated to machine word size number of frames */
-               rx_frms = READ_ONCE(ring->stats.rx_frms);
+               rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
 
                /* Did this vpath received any packets */
                if (ring->stats.prev_rx_frms == rx_frms) {
index a95a46bcd339d824f442170b47f335c630c50b6b..13f72f5b18d20d215b889e2caf3f5c075110832f 100644 (file)
@@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
        netif_vdbg(efx, intr, efx->net_dev,
                   "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
 
-       if (likely(READ_ONCE(efx->irq_soft_enabled))) {
+       if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
                /* Note test interrupts */
                if (context->index == efx->irq_level)
                        efx->last_irq_cpu = raw_smp_processor_id();
@@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
 {
        struct efx_nic *efx = dev_id;
-       bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
+       bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
        struct efx_channel *channel;
        efx_dword_t reg;
        u32 queues;
@@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
        bool rx_cont;
        u16 flags = 0;
 
-       if (unlikely(READ_ONCE(efx->reset_pending)))
+       if (unlikely(ACCESS_ONCE(efx->reset_pending)))
                return 0;
 
        /* Basic packet information */
@@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
        unsigned int tx_ev_q_label;
        int tx_descs = 0;
 
-       if (unlikely(READ_ONCE(efx->reset_pending)))
+       if (unlikely(ACCESS_ONCE(efx->reset_pending)))
                return 0;
 
        if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
@@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
        int i;
 
        for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
-               if (READ_ONCE(table->entry[i].spec) &
+               if (ACCESS_ONCE(table->entry[i].spec) &
                    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
                        rc = efx_ef10_filter_remove_internal(efx,
                                        1U << EFX_FILTER_PRI_AUTO, i, true);
index 016616a6388057c7107196ae7521543785a5c678..b9cb697b281847a83aa511c577a6c790516f8012 100644 (file)
@@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data)
        unsigned long pending;
        enum reset_type method;
 
-       pending = READ_ONCE(efx->reset_pending);
+       pending = ACCESS_ONCE(efx->reset_pending);
        method = fls(pending) - 1;
 
        if (method == RESET_TYPE_MC_BIST)
@@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
        /* If we're not READY then just leave the flags set as the cue
         * to abort probing or reschedule the reset later.
         */
-       if (READ_ONCE(efx->state) != STATE_READY)
+       if (ACCESS_ONCE(efx->state) != STATE_READY)
                return;
 
        /* efx_process_channel() will no longer read events once a
index 7263275fde4a1d6eb27d7bd3d358904b2a66313a..29614da91cbf919f91841d8e644ab4b246741ec7 100644 (file)
@@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data)
        unsigned long pending;
        enum reset_type method;
 
-       pending = READ_ONCE(efx->reset_pending);
+       pending = ACCESS_ONCE(efx->reset_pending);
        method = fls(pending) - 1;
 
        if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
@@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
        /* If we're not READY then just leave the flags set as the cue
         * to abort probing or reschedule the reset later.
         */
-       if (READ_ONCE(efx->state) != STATE_READY)
+       if (ACCESS_ONCE(efx->state) != STATE_READY)
                return;
 
        queue_work(reset_workqueue, &efx->reset_work);
index cd8bb472d75813773e645b6bf0e1c196523a38d0..93c713c1f627a77965fb4fafb590995ba27b9a92 100644 (file)
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
                   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
                   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
 
-       if (!likely(READ_ONCE(efx->irq_soft_enabled)))
+       if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
                return IRQ_HANDLED;
 
        /* Check to see if we have a serious error condition */
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
        ef4_oword_t reg;
        int link_speed, isolate;
 
-       isolate = !!READ_ONCE(efx->reset_pending);
+       isolate = !!ACCESS_ONCE(efx->reset_pending);
 
        switch (link_state->speed) {
        case 10000: link_speed = 3; break;
index 494884f6af4afd1510a17111d6c311820f139b0c..05916c710d8c87071a5aed864bb03d6d01bb6c91 100644 (file)
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
        struct ef4_nic *efx = channel->efx;
        int tx_packets = 0;
 
-       if (unlikely(READ_ONCE(efx->reset_pending)))
+       if (unlikely(ACCESS_ONCE(efx->reset_pending)))
                return 0;
 
        if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
        struct ef4_rx_queue *rx_queue;
        struct ef4_nic *efx = channel->efx;
 
-       if (unlikely(READ_ONCE(efx->reset_pending)))
+       if (unlikely(ACCESS_ONCE(efx->reset_pending)))
                return;
 
        rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
 irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
 {
        struct ef4_nic *efx = dev_id;
-       bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
+       bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
        ef4_oword_t *int_ker = efx->irq_status.addr;
        irqreturn_t result = IRQ_NONE;
        struct ef4_channel *channel;
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
                   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
                   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
 
-       if (!likely(READ_ONCE(efx->irq_soft_enabled)))
+       if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
                return IRQ_HANDLED;
 
        /* Handle non-event-queue sources */
index 54ca457cdb15dc79f0d5175c83d530ed3c3440a9..a4c4592f60232a886fd8a21d0c0b0ccca21df811 100644 (file)
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_
 static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
                                         unsigned int write_count)
 {
-       unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
+       unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
 
        if (empty_read_count == 0)
                return false;
@@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
 
 static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
 {
-       return READ_ONCE(channel->event_test_cpu);
+       return ACCESS_ONCE(channel->event_test_cpu);
 }
 static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
 {
-       return READ_ONCE(efx->last_irq_cpu);
+       return ACCESS_ONCE(efx->last_irq_cpu);
 }
 
 /* Global Resources */
index 6486814e97dccee08431dea134739d2840c6c991..6a75f4140a4be3c96782d79a72f7efc6162e2a3f 100644 (file)
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
         */
        netif_tx_stop_queue(txq1->core_txq);
        smp_mb();
-       txq1->old_read_count = READ_ONCE(txq1->read_count);
-       txq2->old_read_count = READ_ONCE(txq2->read_count);
+       txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+       txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
 
        fill_level = max(txq1->insert_count - txq1->old_read_count,
                         txq2->insert_count - txq2->old_read_count);
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
 
        /* Check whether the hardware queue is now empty */
        if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
-               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+               tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
                if (tx_queue->read_count == tx_queue->old_write_count) {
                        smp_mb();
                        tx_queue->empty_read_count =
index 86454d25a405ecbcdd3dd604b963659e0d128541..ba45150f53c7a478c447d82be07f6d3174f0222d 100644 (file)
@@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
        struct efx_nic *efx = channel->efx;
        int tx_packets = 0;
 
-       if (unlikely(READ_ONCE(efx->reset_pending)))
+       if (unlikely(ACCESS_ONCE(efx->reset_pending)))
                return 0;
 
        if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
        struct efx_rx_queue *rx_queue;
        struct efx_nic *efx = channel->efx;
 
-       if (unlikely(READ_ONCE(efx->reset_pending)))
+       if (unlikely(ACCESS_ONCE(efx->reset_pending)))
                return;
 
        rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
 irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
 {
        struct efx_nic *efx = dev_id;
-       bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
+       bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
        efx_oword_t *int_ker = efx->irq_status.addr;
        irqreturn_t result = IRQ_NONE;
        struct efx_channel *channel;
@@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
                   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
                   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
 
-       if (!likely(READ_ONCE(efx->irq_soft_enabled)))
+       if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
                return IRQ_HANDLED;
 
        /* Handle non-event-queue sources */
index 7b51b637172465678b106e554055a2ccf970cecb..4d7fb8af880d0f36189f8475a9859b229a7bd078 100644 (file)
@@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
 static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
                                         unsigned int write_count)
 {
-       unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
+       unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
 
        if (empty_read_count == 0)
                return false;
@@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
 
 static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
 {
-       return READ_ONCE(channel->event_test_cpu);
+       return ACCESS_ONCE(channel->event_test_cpu);
 }
 static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
 {
-       return READ_ONCE(efx->last_irq_cpu);
+       return ACCESS_ONCE(efx->last_irq_cpu);
 }
 
 /* Global Resources */
index 56c2db398deff5f250f8cb729618daa61aa58cc7..60cdb97f58e2e315cf690ee58a16004926528583 100644 (file)
@@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
 
        /* Write host time for specified period or until MC is done */
        while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
-              READ_ONCE(*mc_running)) {
+              ACCESS_ONCE(*mc_running)) {
                struct timespec64 update_time;
                unsigned int host_time;
 
@@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
                do {
                        pps_get_ts(&now);
                } while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
-                        READ_ONCE(*mc_running));
+                        ACCESS_ONCE(*mc_running));
 
                /* Synchronise NIC with single word of time only */
                host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
@@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
                       ptp->start.dma_addr);
 
        /* Clear flag that signals MC ready */
-       WRITE_ONCE(*start, 0);
+       ACCESS_ONCE(*start) = 0;
        rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
                                MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
        EFX_WARN_ON_ONCE_PARANOID(rc);
 
        /* Wait for start from MCDI (or timeout) */
        timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
-       while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
+       while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
                udelay(20);     /* Usually start MCDI execution quickly */
                loops++;
        }
@@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
        if (!time_before(jiffies, timeout))
                ++ptp->sync_timeouts;
 
-       if (READ_ONCE(*start))
+       if (ACCESS_ONCE(*start))
                efx_ptp_send_times(efx, &last_time);
 
        /* Collect results */
index efb66ea21f27d3d8fd458bba54bdd046a1a936a2..32bf1fecf86406d7a51cc61bd03388f5ad090312 100644 (file)
@@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
         */
        netif_tx_stop_queue(txq1->core_txq);
        smp_mb();
-       txq1->old_read_count = READ_ONCE(txq1->read_count);
-       txq2->old_read_count = READ_ONCE(txq2->read_count);
+       txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
+       txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
 
        fill_level = max(txq1->insert_count - txq1->old_read_count,
                         txq2->insert_count - txq2->old_read_count);
@@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 
        /* Check whether the hardware queue is now empty */
        if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
-               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+               tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
                if (tx_queue->read_count == tx_queue->old_write_count) {
                        smp_mb();
                        tx_queue->empty_read_count =
index 8ab0fb6892d5d3562e891d574331d816acb69a51..6a4e8e1bbd90e5028ac147f3134a8ec2348d0bb5 100644 (file)
@@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np,
 
        pkts = dropped = errors = bytes = 0;
 
-       rx_rings = READ_ONCE(np->rx_rings);
+       rx_rings = ACCESS_ONCE(np->rx_rings);
        if (!rx_rings)
                goto no_rings;
 
@@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np,
 
        pkts = errors = bytes = 0;
 
-       tx_rings = READ_ONCE(np->tx_rings);
+       tx_rings = ACCESS_ONCE(np->tx_rings);
        if (!tx_rings)
                goto no_rings;
 
index 0a886fda01291efb5a6beb0a2b5eb2123c1f05ab..bfd4ded0a53fb015226d0b03ceb9e5dda9f904e5 100644 (file)
@@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap,
         * and validate that the result isn't NULL - in case we are
         * racing against queue removal.
         */
-       int numvtaps = READ_ONCE(tap->numvtaps);
+       int numvtaps = ACCESS_ONCE(tap->numvtaps);
        __u32 rxq;
 
        if (!numvtaps)
index 112218eb2a4513e4f6491346d0300b7a77d97c65..c91b110f21699b97a81963567f66be4847a222c0 100644 (file)
@@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
        u32 numqueues = 0;
 
        rcu_read_lock();
-       numqueues = READ_ONCE(tun->numqueues);
+       numqueues = ACCESS_ONCE(tun->numqueues);
 
        txq = __skb_get_hash_symmetric(skb);
        if (txq) {
@@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 
        rcu_read_lock();
        tfile = rcu_dereference(tun->tfiles[txq]);
-       numqueues = READ_ONCE(tun->numqueues);
+       numqueues = ACCESS_ONCE(tun->numqueues);
 
        /* Drop packet if interface is not attached */
        if (txq >= numqueues)
index 80f75139495fc25d3a2e7f085e9deedc3abb20f6..bd8d4392d68b3042594f5d9693816910cea3acbc 100644 (file)
@@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
 
        tx_status = &desc->ud.ds_tx5212.tx_stat;
 
-       txstat1 = READ_ONCE(tx_status->tx_status_1);
+       txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
 
        /* No frame has been send or error */
        if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
                return -EINPROGRESS;
 
-       txstat0 = READ_ONCE(tx_status->tx_status_0);
+       txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
 
        /*
         * Get descriptor status
@@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
        u32 rxstat0, rxstat1;
 
        rx_status = &desc->ud.ds_rx.rx_stat;
-       rxstat1 = READ_ONCE(rx_status->rx_status_1);
+       rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
 
        /* No frame received / not ready */
        if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
                return -EINPROGRESS;
 
        memset(rs, 0, sizeof(struct ath5k_rx_status));
-       rxstat0 = READ_ONCE(rx_status->rx_status_0);
+       rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
 
        /*
         * Frame receive status
index 2be57be1beae8907464b6a0e74d00cbfeaf6b45f..b3fa8ae804650f7ea838ff59ca84dfb6ca24da90 100644 (file)
@@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
 
        bus->dpc_running = true;
        wmb();
-       while (READ_ONCE(bus->dpc_triggered)) {
+       while (ACCESS_ONCE(bus->dpc_triggered)) {
                bus->dpc_triggered = false;
                brcmf_sdio_dpc(bus);
                bus->idlecount = 0;
index 53193ee48f201f2142a3dfe6219410374e51b285..9fb40955d5f4f2d16f16d4e994ec5b9f987900df 100644 (file)
@@ -1119,7 +1119,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-       bool calibrating = READ_ONCE(mvm->calibrating);
+       bool calibrating = ACCESS_ONCE(mvm->calibrating);
 
        if (state)
                set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
index 153c29f02c97cf4e6b04864e896f6749bcc565ba..887a504ce64a5e98f714917953461ebe57778291 100644 (file)
@@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                                return -1;
                } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
                           is_multicast_ether_addr(hdr->addr1)) {
-                       u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
+                       u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
 
                        if (ap_sta_id != IWL_MVM_INVALID_STA)
                                sta_id = ap_sta_id;
@@ -701,7 +701,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
        snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
                tcp_hdrlen(skb);
 
-       dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
+       dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
 
        if (!sta->max_amsdu_len ||
            !ieee80211_is_data_qos(hdr->frame_control) ||
index f25ce3a1ea50347678c5662e5868a3e37b9d139e..a06b6612b6583d6b5efa1d2396bc2060a2ffa37f 100644 (file)
@@ -1247,7 +1247,7 @@ restart:
        spin_lock(&rxq->lock);
        /* uCode's read index (stored in shared DRAM) indicates the last Rx
         * buffer that the driver may process (last buffer filled by ucode). */
-       r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+       r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
        i = rxq->read;
 
        /* W/A 9000 device step A0 wrap-around bug */
index 9ad3f4fe589417ed752e397554575fdc4553fb0e..2e3e013ec95acf94eecb843edef305864cf6939b 100644 (file)
@@ -2076,12 +2076,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
 
        IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
        txq = trans_pcie->txq[txq_idx];
-       wr_ptr = READ_ONCE(txq->write_ptr);
+       wr_ptr = ACCESS_ONCE(txq->write_ptr);
 
-       while (txq->read_ptr != READ_ONCE(txq->write_ptr) &&
+       while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
               !time_after(jiffies,
                           now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
-               u8 write_ptr = READ_ONCE(txq->write_ptr);
+               u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
 
                if (WARN_ONCE(wr_ptr != write_ptr,
                              "WR pointer moved while flushing %d -> %d\n",
@@ -2553,7 +2553,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
 
        spin_lock(&rxq->lock);
 
-       r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+       r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
 
        for (i = rxq->read, j = 0;
             i != r && j < allocated_rb_nums;
@@ -2814,7 +2814,7 @@ static struct iwl_trans_dump_data
                /* Dump RBs is supported only for pre-9000 devices (1 queue) */
                struct iwl_rxq *rxq = &trans_pcie->rxq[0];
                /* RBs */
-               num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
+               num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
                                      & 0x0FFF;
                num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
                len += num_rbs * (sizeof(*data) +
index 357f00d47a5760abb0e12cb63db51c9bfab6d133..a59b54328c07b859fcaf7af1b6be09edc6e41cc5 100644 (file)
@@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
        mac80211_hwsim_monitor_rx(hw, skb, channel);
 
        /* wmediumd mode check */
-       _portid = READ_ONCE(data->wmediumd);
+       _portid = ACCESS_ONCE(data->wmediumd);
 
        if (_portid)
                return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
@@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
                                    struct ieee80211_channel *chan)
 {
        struct mac80211_hwsim_data *data = hw->priv;
-       u32 _pid = READ_ONCE(data->wmediumd);
+       u32 _pid = ACCESS_ONCE(data->wmediumd);
 
        if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
                struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
index f946bf88901540d2e5fd815427be3b57ff6afc9f..f05cfc83c9c8d0110c253d0e2331a7e970f72d26 100644 (file)
@@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work)
        if (logout_started) {
                bool traced = false;
 
-               while (!READ_ONCE(sess->logout_completed)) {
+               while (!ACCESS_ONCE(sess->logout_completed)) {
                        if (!traced) {
                                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
                                        "%s: waiting for sess %p logout\n",
index 9469695f5871aea064bea2e4b4f65e32742c3cb4..942d094269fba5db66ff7e791dcfaab1c6acec15 100644 (file)
@@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
        mb = udev->mb_addr;
        tcmu_flush_dcache_range(mb, sizeof(*mb));
 
-       while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
+       while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
 
                struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
                struct tcmu_cmd *cmd;
index fbaa2a90d25dc2ee4847baa582879940925f188e..3e865dbf878c74b2e904925798cd603e5cd0e979 100644 (file)
@@ -483,7 +483,7 @@ static ssize_t wdm_read
        if (rv < 0)
                return -ERESTARTSYS;
 
-       cntr = READ_ONCE(desc->length);
+       cntr = ACCESS_ONCE(desc->length);
        if (cntr == 0) {
                desc->read = 0;
 retry:
index 0d0d2338ef746caeb51305b722734920769b9732..ab245352f102a6bd6821a75be6840474f82e095e 100644 (file)
@@ -150,7 +150,7 @@ static int usbfs_increase_memory_usage(u64 amount)
 {
        u64 lim;
 
-       lim = READ_ONCE(usbfs_memory_mb);
+       lim = ACCESS_ONCE(usbfs_memory_mb);
        lim <<= 20;
 
        atomic64_add(amount, &usbfs_memory_usage);
index 58d59c5f859258ba73349d6695e0c0d069b0d66c..d930bfda40101457dad980ec432c0b73d7193981 100644 (file)
@@ -973,7 +973,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
        char *string;
 
        intf = to_usb_interface(dev);
-       string = READ_ONCE(intf->cur_altsetting->string);
+       string = ACCESS_ONCE(intf->cur_altsetting->string);
        if (!string)
                return 0;
        return sprintf(buf, "%s\n", string);
@@ -989,7 +989,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
 
        intf = to_usb_interface(dev);
        udev = interface_to_usbdev(intf);
-       alt = READ_ONCE(intf->cur_altsetting);
+       alt = ACCESS_ONCE(intf->cur_altsetting);
 
        return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
                        "ic%02Xisc%02Xip%02Xin%02X\n",
index 0b59fa50aa301f84cd8ab9b56eb57f52832ad84d..1f9941145746ee4d8b8130d4721e2fb2720fae54 100644 (file)
@@ -1261,7 +1261,7 @@ static int gr_handle_in_ep(struct gr_ep *ep)
        if (!req->last_desc)
                return 0;
 
-       if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+       if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
                return 0; /* Not put in hardware buffers yet */
 
        if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
@@ -1290,7 +1290,7 @@ static int gr_handle_out_ep(struct gr_ep *ep)
        if (!req->curr_desc)
                return 0;
 
-       ctrl = READ_ONCE(req->curr_desc->ctrl);
+       ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
        if (ctrl & GR_DESC_OUT_CTRL_EN)
                return 0; /* Not received yet */
 
index c86f89babd579391197b4518131c198b028e4682..44924824fa414a28dfbf10a715b658e6b493a462 100644 (file)
@@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci)
                }
 
                /* find the last TD processed by the controller. */
-               head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK;
+               head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK;
                td_start = td;
                td_next = list_prepare_entry(td, &ed->td_list, td_list);
                list_for_each_entry_continue(td_next, &ed->td_list, td_list) {
index f1cc47292a59e9a3887ec7338dbdabf327773d1b..d97f0d9b3ce6ce0ec29ad1d79531082c0e392d9f 100644 (file)
@@ -187,7 +187,7 @@ struct uhci_qh {
  * We need a special accessor for the element pointer because it is
  * subject to asynchronous updates by the controller.
  */
-#define qh_element(qh)         READ_ONCE((qh)->element)
+#define qh_element(qh)         ACCESS_ONCE((qh)->element)
 
 #define LINK_TO_QH(uhci, qh)   (UHCI_PTR_QH((uhci)) | \
                                cpu_to_hc32((uhci), (qh)->dma_handle))
@@ -275,7 +275,7 @@ struct uhci_td {
  * subject to asynchronous updates by the controller.
  */
 #define td_status(uhci, td)            hc32_to_cpu((uhci), \
-                                               READ_ONCE((td)->status))
+                                               ACCESS_ONCE((td)->status))
 
 #define LINK_TO_TD(uhci, td)           (cpu_to_hc32((uhci), (td)->dma_handle))
 
index 2bc3705a99bd2f1a670e96c9f1175795b961c922..f5a86f651f38e12874069281735169a1be26bede 100644 (file)
@@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
 {
        struct vfio_group *group = data;
        struct vfio_device *device;
-       struct device_driver *drv = READ_ONCE(dev->driver);
+       struct device_driver *drv = ACCESS_ONCE(dev->driver);
        struct vfio_unbound_dev *unbound;
        int ret = -EINVAL;
 
index efc79ff979a766f9304ed0773b262b7b3450820c..e47c5bc3ddcadfa9fd74ff48656c828c76b45d8b 100644 (file)
@@ -930,7 +930,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        continue;
                }
 
-               tpg = READ_ONCE(vs_tpg[*target]);
+               tpg = ACCESS_ONCE(vs_tpg[*target]);
                if (unlikely(!tpg)) {
                        /* Target does not exist, fail the request */
                        vhost_scsi_send_bad_target(vs, vq, head, out);
index e6de7715228c92995906c4afa9712d9137161a81..5a2487217072d18b1b5a25b6961e6cdd938375fc 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -576,7 +576,7 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
         * actually has a cancel function, hence the cmpxchg()
         */
 
-       cancel = READ_ONCE(kiocb->ki_cancel);
+       cancel = ACCESS_ONCE(kiocb->ki_cancel);
        do {
                if (!cancel || cancel == KIOCB_CANCELLED)
                        return -EINVAL;
index c0b54e5929d27a9efd331ad53bbb0db5df1cb69f..b96f3b98a6ef9f8fc9a3ac22d1e60996eabf4d50 100644 (file)
@@ -1692,8 +1692,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
        BUG_ON(!PageLocked(page));
 
        if (!page_has_buffers(page))
-               create_empty_buffers(page, 1 << READ_ONCE(inode->i_blkbits),
-                                    b_state);
+               create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
        return page_buffers(page);
 }
 
index 98fe1325da9d07e52135eb728c3d0dd381d35649..b53e66d9abd7030f6b05a6dac4847928c24bf1a0 100644 (file)
@@ -1152,7 +1152,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                      get_block_t get_block, dio_iodone_t end_io,
                      dio_submit_t submit_io, int flags)
 {
-       unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
+       unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
        unsigned blkbits = i_blkbits;
        unsigned blocksize_mask = (1 << blkbits) - 1;
        ssize_t retval = -EINVAL;
index 6be2aa0ab26fe26cb37032b99bba656f8d7c6b51..4726c777dd387b3b6a0283a4a382e3ec8f13f157 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1916,7 +1916,7 @@ void set_dumpable(struct mm_struct *mm, int value)
                return;
 
        do {
-               old = READ_ONCE(mm->flags);
+               old = ACCESS_ONCE(mm->flags);
                new = (old & ~MMF_DUMPABLE_MASK) | value;
        } while (cmpxchg(&mm->flags, old, new) != old);
 }
index 0522e283a4f48c1b5bfc2433de9ad12d82cfc891..0345a46b885654268d8ee37a659e100ebe6fdcf7 100644 (file)
@@ -723,7 +723,7 @@ static void send_sigio_to_task(struct task_struct *p,
         * F_SETSIG can change ->signum lockless in parallel, make
         * sure we read it once and use the same value throughout.
         */
-       int signum = READ_ONCE(fown->signum);
+       int signum = ACCESS_ONCE(fown->signum);
 
        if (!sigio_perm(p, fown, signum))
                return;
index a6497cf8ae53aa2bc7dcc2996887fcb92f1e9820..0d285fd5b44ab6d16d2377f3a19ef7fb6cc06384 100644 (file)
@@ -79,7 +79,7 @@ void mnt_pin_kill(struct mount *m)
        while (1) {
                struct hlist_node *p;
                rcu_read_lock();
-               p = READ_ONCE(m->mnt_pins.first);
+               p = ACCESS_ONCE(m->mnt_pins.first);
                if (!p) {
                        rcu_read_unlock();
                        break;
@@ -93,7 +93,7 @@ void group_pin_kill(struct hlist_head *p)
        while (1) {
                struct hlist_node *q;
                rcu_read_lock();
-               q = READ_ONCE(p->first);
+               q = ACCESS_ONCE(p->first);
                if (!q) {
                        rcu_read_unlock();
                        break;
index a42d89371748e51140c26a57cc4fa7dd28e4196b..13c65dd2d37d1ab1af358f82b42c43ba8c2cc2de 100644 (file)
@@ -33,7 +33,7 @@ static struct fuse_dev *fuse_get_dev(struct file *file)
         * Lockless access is OK, because file->private data is set
         * once during mount and is valid until the file is released.
         */
-       return READ_ONCE(file->private_data);
+       return ACCESS_ONCE(file->private_data);
 }
 
 static void fuse_request_init(struct fuse_req *req, struct page **pages,
index fd401028a309e2d260ace4aaba996b8838b13fb1..d1e35b53bb23b80db7077500f63eeec9bce6bb28 100644 (file)
@@ -2090,7 +2090,7 @@ void inode_set_flags(struct inode *inode, unsigned int flags,
 
        WARN_ON_ONCE(flags & ~mask);
        do {
-               old_flags = READ_ONCE(inode->i_flags);
+               old_flags = ACCESS_ONCE(inode->i_flags);
                new_flags = (old_flags & ~mask) | flags;
        } while (unlikely(cmpxchg(&inode->i_flags, old_flags,
                                  new_flags) != old_flags));
index 31a71499102ad9968c2f1d708977c7e1ee87516b..62a0db6e6725e73ed93bb1a014a18fed42072444 100644 (file)
@@ -1201,7 +1201,7 @@ static int follow_managed(struct path *path, struct nameidata *nd)
        /* Given that we're not holding a lock here, we retain the value in a
         * local variable for each dentry as we look at it so that we don't see
         * the components of that value change under us */
-       while (managed = READ_ONCE(path->dentry->d_flags),
+       while (managed = ACCESS_ONCE(path->dentry->d_flags),
               managed &= DCACHE_MANAGED_DENTRY,
               unlikely(managed != 0)) {
                /* Allow the filesystem to manage the transit without i_mutex
@@ -1386,7 +1386,7 @@ int follow_down(struct path *path)
        unsigned managed;
        int ret;
 
-       while (managed = READ_ONCE(path->dentry->d_flags),
+       while (managed = ACCESS_ONCE(path->dentry->d_flags),
               unlikely(managed & DCACHE_MANAGED_DENTRY)) {
                /* Allow the filesystem to manage the transit without i_mutex
                 * being held.
index e158ec6b527b2d72341e096f76e628b5e61ea4cf..d18deb4c410b24ed276c9b60c869f4c06b6ec20f 100644 (file)
@@ -353,7 +353,7 @@ int __mnt_want_write(struct vfsmount *m)
         * incremented count after it has set MNT_WRITE_HOLD.
         */
        smp_mb();
-       while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+       while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
                cpu_relax();
        /*
         * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
index 3fbfbbfb8ed3d6134f642a48fcc9ededfc30ca9a..bf2c43635062b3cfea6df661a37ff62d87c67cf7 100644 (file)
@@ -1081,7 +1081,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
        int error;
 
        if (flags & LOOKUP_RCU) {
-               parent = READ_ONCE(dentry->d_parent);
+               parent = ACCESS_ONCE(dentry->d_parent);
                dir = d_inode_rcu(parent);
                if (!dir)
                        return -ECHILD;
@@ -1168,7 +1168,7 @@ out_set_verifier:
        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
  out_valid:
        if (flags & LOOKUP_RCU) {
-               if (parent != READ_ONCE(dentry->d_parent))
+               if (parent != ACCESS_ONCE(dentry->d_parent))
                        return -ECHILD;
        } else
                dput(parent);
@@ -1582,7 +1582,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
                struct inode *dir;
 
                if (flags & LOOKUP_RCU) {
-                       parent = READ_ONCE(dentry->d_parent);
+                       parent = ACCESS_ONCE(dentry->d_parent);
                        dir = d_inode_rcu(parent);
                        if (!dir)
                                return -ECHILD;
@@ -1596,7 +1596,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
                        ret = -ECHILD;
                if (!(flags & LOOKUP_RCU))
                        dput(parent);
-               else if (parent != READ_ONCE(dentry->d_parent))
+               else if (parent != ACCESS_ONCE(dentry->d_parent))
                        return -ECHILD;
                goto out;
        }
index d82549e804025bc59621b16e3d65e95462a1a9c9..9390032a11e13d559b0fbc9accbee25bdff38b30 100644 (file)
@@ -454,7 +454,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                cutime = sig->cutime;
                cstime = sig->cstime;
                cgtime = sig->cgtime;
-               rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
+               rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
 
                /* add up live thread stats at the group level */
                if (whole) {
index 7b635d17321377e4868554a6ad338a1bd413b3cc..7626ee11b06c67edac5d9c021516ff6ea3390b98 100644 (file)
@@ -28,7 +28,7 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &p->ns->poll, wait);
 
-       event = READ_ONCE(ns->event);
+       event = ACCESS_ONCE(ns->event);
        if (m->poll_event != event) {
                m->poll_event = event;
                res |= POLLERR | POLLPRI;
index 39e2dc01ac12c31a2d4629ddabdda2984319dd49..f3084cce0ea6be94ad047a1bf07cdcf1cb3cf0f4 100644 (file)
@@ -253,7 +253,7 @@ EXPORT_SYMBOL(add_to_pipe);
  */
 int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
 {
-       unsigned int buffers = READ_ONCE(pipe->buffers);
+       unsigned int buffers = ACCESS_ONCE(pipe->buffers);
 
        spd->nr_pages_max = buffers;
        if (buffers <= PIPE_DEF_BUFFERS)
index f46d133c094998c48eada7a4e2bd8f5a69239ffa..1c713fd5b3e67966c3d998979d2c30eb8e14ba07 100644 (file)
@@ -381,7 +381,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
         * in __get_user_pages if userfaultfd_release waits on the
         * caller of handle_userfault to release the mmap_sem.
         */
-       if (unlikely(READ_ONCE(ctx->released))) {
+       if (unlikely(ACCESS_ONCE(ctx->released))) {
                /*
                 * Don't return VM_FAULT_SIGBUS in this case, so a non
                 * cooperative manager can close the uffd after the
@@ -477,7 +477,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
                                                       vmf->flags, reason);
        up_read(&mm->mmap_sem);
 
-       if (likely(must_wait && !READ_ONCE(ctx->released) &&
+       if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
                   (return_to_userland ? !signal_pending(current) :
                    !fatal_signal_pending(current)))) {
                wake_up_poll(&ctx->fd_wqh, POLLIN);
@@ -586,7 +586,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                set_current_state(TASK_KILLABLE);
                if (ewq->msg.event == 0)
                        break;
-               if (READ_ONCE(ctx->released) ||
+               if (ACCESS_ONCE(ctx->released) ||
                    fatal_signal_pending(current)) {
                        /*
                         * &ewq->wq may be queued in fork_event, but
@@ -833,7 +833,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
        struct userfaultfd_wake_range range = { .len = 0, };
        unsigned long new_flags;
 
-       WRITE_ONCE(ctx->released, true);
+       ACCESS_ONCE(ctx->released) = true;
 
        if (!mmget_not_zero(mm))
                goto wakeup;
index 129975970d993dd81e28abf049eb05e4b1cc6649..51bf7b827387198d3bbc9f6db146abfcd3056a0c 100644 (file)
@@ -592,9 +592,9 @@ xlog_valid_lsn(
         * a transiently forward state. Instead, we can see the LSN in a
         * transiently behind state if we happen to race with a cycle wrap.
         */
-       cur_cycle = READ_ONCE(log->l_curr_cycle);
+       cur_cycle = ACCESS_ONCE(log->l_curr_cycle);
        smp_rmb();
-       cur_block = READ_ONCE(log->l_curr_block);
+       cur_block = ACCESS_ONCE(log->l_curr_block);
 
        if ((CYCLE_LSN(lsn) > cur_cycle) ||
            (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
index c537ac7435ad147e44165b7cceedde925677c50b..d03c5dd6185daafd871e8ad8e685fa1c0af5cd8e 100644 (file)
@@ -237,7 +237,7 @@ static inline unsigned long __ffs64(u64 word)
        typeof(*ptr) old, new;                                  \
                                                                \
        do {                                                    \
-               old = READ_ONCE(*ptr);                  \
+               old = ACCESS_ONCE(*ptr);                        \
                new = (old & ~mask) | bits;                     \
        } while (cmpxchg(ptr, old, new) != old);                \
                                                                \
@@ -252,7 +252,7 @@ static inline unsigned long __ffs64(u64 word)
        typeof(*ptr) old, new;                                  \
                                                                \
        do {                                                    \
-               old = READ_ONCE(*ptr);                  \
+               old = ACCESS_ONCE(*ptr);                        \
                new = old & ~clear;                             \
        } while (!(old & test) &&                               \
                 cmpxchg(ptr, old, new) != old);                \
index 023eae69398c4c393ea18f698b4957c7ee7fbaf1..34c0a5464c743c57f651c4563e465659a56a4534 100644 (file)
@@ -89,7 +89,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
 /* Returns how many objects can be queued, < 0 indicates over limit. */
 static inline int dql_avail(const struct dql *dql)
 {
-       return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued);
+       return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
 }
 
 /* Record number of completed objects and recalculate the limit. */
index a8a126259bc4c8e8590e5ef29793787de92f8d8c..87067d23a48b898a2d9e60c51ee4a5d2196f6c62 100644 (file)
@@ -222,7 +222,7 @@ extern struct page *huge_zero_page;
 
 static inline bool is_huge_zero_page(struct page *page)
 {
-       return READ_ONCE(huge_zero_page) == page;
+       return ACCESS_ONCE(huge_zero_page) == page;
 }
 
 static inline bool is_huge_zero_pmd(pmd_t pmd)
index d95cae09dea0873a0cb119e63f5c3d6d7c73d823..30294603526f91060c6ab776c09c03ee156b9b55 100644 (file)
@@ -247,7 +247,7 @@ static inline struct team_port *team_get_port_by_index(struct team *team,
 
 static inline int team_num_to_port_index(struct team *team, unsigned int num)
 {
-       int en_port_count = READ_ONCE(team->en_port_count);
+       int en_port_count = ACCESS_ONCE(team->en_port_count);
 
        if (unlikely(!en_port_count))
                return 0;
index 85abc2915e8d5d545e7d499ea57099201c57b21d..1957635e6d5f7b677896c50921627aff79445cfb 100644 (file)
@@ -198,7 +198,7 @@ static inline void init_llist_head(struct llist_head *list)
  */
 static inline bool llist_empty(const struct llist_head *head)
 {
-       return READ_ONCE(head->first) == NULL;
+       return ACCESS_ONCE(head->first) == NULL;
 }
 
 static inline struct llist_node *llist_next(struct llist_node *node)
index f0fc4700b6ff53adfb8584162ae0bb3c522237a2..2efb08a60e638f3b0c043ce6485c54ebc031d5e9 100644 (file)
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
 
 static inline void pm_runtime_mark_last_busy(struct device *dev)
 {
-       WRITE_ONCE(dev->power.last_busy, jiffies);
+       ACCESS_ONCE(dev->power.last_busy) = jiffies;
 }
 
 static inline bool pm_runtime_is_irq_safe(struct device *dev)
index ff68cf288f9bf3a3ac18d696e215980e4fee54f5..5d08c1950e7d76891ab244958b70c9d8808d92cd 100644 (file)
@@ -984,12 +984,12 @@ static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
 
 static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
 {
-       return READ_ONCE(ipvs->sysctl_sync_threshold[1]);
+       return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
 }
 
 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
 {
-       return READ_ONCE(ipvs->sysctl_sync_refresh_period);
+       return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
 }
 
 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
@@ -1014,7 +1014,7 @@ static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs)
 
 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
 {
-       return READ_ONCE(ipvs->sysctl_sync_ports);
+       return ACCESS_ONCE(ipvs->sysctl_sync_ports);
 }
 
 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
index d15c0ee4d95504a88337498045a84f53a6ae0d15..6670fbd3e466d3003ae3c20d5f321b6e0abb2069 100644 (file)
@@ -147,7 +147,7 @@ static struct bsd_acct_struct *acct_get(struct pid_namespace *ns)
 again:
        smp_rmb();
        rcu_read_lock();
-       res = to_acct(READ_ONCE(ns->bacct));
+       res = to_acct(ACCESS_ONCE(ns->bacct));
        if (!res) {
                rcu_read_unlock();
                return NULL;
@@ -159,7 +159,7 @@ again:
        }
        rcu_read_unlock();
        mutex_lock(&res->lock);
-       if (res != to_acct(READ_ONCE(ns->bacct))) {
+       if (res != to_acct(ACCESS_ONCE(ns->bacct))) {
                mutex_unlock(&res->lock);
                acct_put(res);
                goto again;
index a121015155f5e3f36937b223c96b61f61f6820da..4f1d4bfc607a32abea623af2736310103a9c305d 100644 (file)
@@ -1202,7 +1202,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
 
 again:
        rcu_read_lock();
-       ctx = READ_ONCE(event->ctx);
+       ctx = ACCESS_ONCE(event->ctx);
        if (!atomic_inc_not_zero(&ctx->refcount)) {
                rcu_read_unlock();
                goto again;
@@ -5304,8 +5304,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                if (!rb)
                        goto aux_unlock;
 
-               aux_offset = READ_ONCE(rb->user_page->aux_offset);
-               aux_size = READ_ONCE(rb->user_page->aux_size);
+               aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
+               aux_size = ACCESS_ONCE(rb->user_page->aux_size);
 
                if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
                        goto aux_unlock;
index f3e37971c842eb48878a6bfbd3dbc8f025697271..f684d8e5fa2be2fd10e4d5e2f1e65a5c2afeb629 100644 (file)
@@ -381,7 +381,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
         * (B) <-> (C) ordering is still observed by the pmu driver.
         */
        if (!rb->aux_overwrite) {
-               aux_tail = READ_ONCE(rb->user_page->aux_tail);
+               aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
                handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
                if (aux_head - aux_tail < perf_aux_size(rb))
                        handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
index 6b4298a41167c7f3f7ea7be1d85af9a08d9d44cb..f6cad39f35dfbe441abc5fc740458a6574e7d529 100644 (file)
@@ -1339,7 +1339,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
         * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
         * can't confuse the checks below.
         */
-       int exit_state = READ_ONCE(p->exit_state);
+       int exit_state = ACCESS_ONCE(p->exit_state);
        int ret;
 
        if (unlikely(exit_state == EXIT_DEAD))
index 845f3805c73d72cccaf844afcd5a5bc0f3337ebb..81279c6602ff1753d8c38c8def566c3576578962 100644 (file)
@@ -2724,7 +2724,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
         * if it happened, we have to fail the write.
         */
        barrier();
-       if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
+       if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
                local_dec(&cpu_buffer->committing);
                local_dec(&cpu_buffer->commits);
                return NULL;
index 6b0b343a36a278be32a89e91e95066e84020c620..401b0639116f216c78b3dad0ad663c9e74858d52 100644 (file)
@@ -1460,7 +1460,7 @@ extern struct trace_event_file *find_event_file(struct trace_array *tr,
 
 static inline void *event_file_data(struct file *filp)
 {
-       return READ_ONCE(file_inode(filp)->i_private);
+       return ACCESS_ONCE(file_inode(filp)->i_private);
 }
 
 extern struct mutex event_mutex;
index 734accc02418930280a5248013fa6fbd4a5868e2..719a52a4064a0f6472ab7e662d00c47b6dd5993d 100644 (file)
@@ -78,7 +78,7 @@ check_stack(unsigned long ip, unsigned long *stack)
 {
        unsigned long this_size, flags; unsigned long *p, *top, *start;
        static int tracer_frame;
-       int frame_size = READ_ONCE(tracer_frame);
+       int frame_size = ACCESS_ONCE(tracer_frame);
        int i, x;
 
        this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
index d32b45662fb66828cc907ea42ea4c9ede8a1ef0c..c490f1e4313b998a60686b98fbdf6b0167753e11 100644 (file)
@@ -894,7 +894,7 @@ static bool new_idmap_permitted(const struct file *file,
 int proc_setgroups_show(struct seq_file *seq, void *v)
 {
        struct user_namespace *ns = seq->private;
-       unsigned long userns_flags = READ_ONCE(ns->flags);
+       unsigned long userns_flags = ACCESS_ONCE(ns->flags);
 
        seq_printf(seq, "%s\n",
                   (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
index b77d51da8c73def55d1aea5ca6c34161652981a0..4e53be8bc590dc2030a930aec5a2cac8c4fa6a30 100644 (file)
@@ -39,7 +39,7 @@ begin_node:
                /* Descend through a shortcut */
                shortcut = assoc_array_ptr_to_shortcut(cursor);
                smp_read_barrier_depends();
-               cursor = READ_ONCE(shortcut->next_node);
+               cursor = ACCESS_ONCE(shortcut->next_node);
        }
 
        node = assoc_array_ptr_to_node(cursor);
@@ -55,7 +55,7 @@ begin_node:
         */
        has_meta = 0;
        for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
-               ptr = READ_ONCE(node->slots[slot]);
+               ptr = ACCESS_ONCE(node->slots[slot]);
                has_meta |= (unsigned long)ptr;
                if (ptr && assoc_array_ptr_is_leaf(ptr)) {
                        /* We need a barrier between the read of the pointer
@@ -89,7 +89,7 @@ continue_node:
        smp_read_barrier_depends();
 
        for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
-               ptr = READ_ONCE(node->slots[slot]);
+               ptr = ACCESS_ONCE(node->slots[slot]);
                if (assoc_array_ptr_is_meta(ptr)) {
                        cursor = ptr;
                        goto begin_node;
@@ -98,7 +98,7 @@ continue_node:
 
 finished_node:
        /* Move up to the parent (may need to skip back over a shortcut) */
-       parent = READ_ONCE(node->back_pointer);
+       parent = ACCESS_ONCE(node->back_pointer);
        slot = node->parent_slot;
        if (parent == stop)
                return 0;
@@ -107,7 +107,7 @@ finished_node:
                shortcut = assoc_array_ptr_to_shortcut(parent);
                smp_read_barrier_depends();
                cursor = parent;
-               parent = READ_ONCE(shortcut->back_pointer);
+               parent = ACCESS_ONCE(shortcut->back_pointer);
                slot = shortcut->parent_slot;
                if (parent == stop)
                        return 0;
@@ -147,7 +147,7 @@ int assoc_array_iterate(const struct assoc_array *array,
                                        void *iterator_data),
                        void *iterator_data)
 {
-       struct assoc_array_ptr *root = READ_ONCE(array->root);
+       struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
 
        if (!root)
                return 0;
@@ -194,7 +194,7 @@ assoc_array_walk(const struct assoc_array *array,
 
        pr_devel("-->%s()\n", __func__);
 
-       cursor = READ_ONCE(array->root);
+       cursor = ACCESS_ONCE(array->root);
        if (!cursor)
                return assoc_array_walk_tree_empty;
 
@@ -220,7 +220,7 @@ consider_node:
 
        slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
        slot &= ASSOC_ARRAY_FAN_MASK;
-       ptr = READ_ONCE(node->slots[slot]);
+       ptr = ACCESS_ONCE(node->slots[slot]);
 
        pr_devel("consider slot %x [ix=%d type=%lu]\n",
                 slot, level, (unsigned long)ptr & 3);
@@ -294,7 +294,7 @@ follow_shortcut:
        } while (sc_level < shortcut->skip_to_level);
 
        /* The shortcut matches the leaf's index to this point. */
-       cursor = READ_ONCE(shortcut->next_node);
+       cursor = ACCESS_ONCE(shortcut->next_node);
        if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
                level = sc_level;
                goto jumped;
@@ -337,7 +337,7 @@ void *assoc_array_find(const struct assoc_array *array,
         * the terminal node.
         */
        for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
-               ptr = READ_ONCE(node->slots[slot]);
+               ptr = ACCESS_ONCE(node->slots[slot]);
                if (ptr && assoc_array_ptr_is_leaf(ptr)) {
                        /* We need a barrier between the read of the pointer
                         * and dereferencing the pointer - but only if we are
index da4672a50a54a2046bb86479c57dc11552a1981c..6a406fafb5d611771fd6db769efd584719a0ebc6 100644 (file)
@@ -21,7 +21,7 @@ void dql_completed(struct dql *dql, unsigned int count)
        unsigned int ovlimit, completed, num_queued;
        bool all_prev_completed;
 
-       num_queued = READ_ONCE(dql->num_queued);
+       num_queued = ACCESS_ONCE(dql->num_queued);
 
        /* Can't complete more than what's in queue */
        BUG_ON(count > num_queued - dql->num_completed);
index 7062e931a7bb4e8f2fb2937e065e6e062f049b2a..ae5872b1df0c669fc8365ce1754d2a128651a890 100644 (file)
@@ -41,7 +41,7 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
        struct llist_node *first;
 
        do {
-               new_last->next = first = READ_ONCE(head->first);
+               new_last->next = first = ACCESS_ONCE(head->first);
        } while (cmpxchg(&head->first, first, new_first) != first);
 
        return !first;
index 1746bae94d416f6ce3311c569e5d99080173f998..86c3385b9eb393a54b09cddcfcccef3ec257f8d2 100644 (file)
@@ -620,8 +620,8 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
 
        rcu_read_lock();
        for (i = 0; i < depth; i++, d = p) {
-               p = READ_ONCE(d->d_parent);
-               array[i] = READ_ONCE(d->d_name.name);
+               p = ACCESS_ONCE(d->d_parent);
+               array[i] = ACCESS_ONCE(d->d_name.name);
                if (p == d) {
                        if (i)
                                array[i] = "";
index ad1fce9c82877135406d53fbc4ea1320278a08d1..eba34cdfc3e5b5a6adee7f613e49bbb18902c1e7 100644 (file)
@@ -2708,7 +2708,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
                struct shrink_control *sc)
 {
        struct pglist_data *pgdata = NODE_DATA(sc->nid);
-       return READ_ONCE(pgdata->split_queue_len);
+       return ACCESS_ONCE(pgdata->split_queue_len);
 }
 
 static unsigned long deferred_split_scan(struct shrinker *shrink,
index 503e3672ecc9c1280f204dac53e6966a31290537..27357fc1730b93ebf302dbaf0d7a365dd56e4dea 100644 (file)
@@ -3726,7 +3726,7 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
        flow_table = rcu_dereference(rxqueue->rps_flow_table);
        if (flow_table && flow_id <= flow_table->mask) {
                rflow = &flow_table->flows[flow_id];
-               cpu = READ_ONCE(rflow->cpu);
+               cpu = ACCESS_ONCE(rflow->cpu);
                if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
                    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
                           rflow->last_qtail) <
index 3b2034f6d49d20a0df890d02ea30ebd05cdb87b4..6e1e10ff433a5f4097d1d4b33848ab13d4e005c6 100644 (file)
@@ -3377,7 +3377,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
 
 static void pktgen_xmit(struct pktgen_dev *pkt_dev)
 {
-       unsigned int burst = READ_ONCE(pkt_dev->burst);
+       unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
        struct net_device *odev = pkt_dev->odev;
        struct netdev_queue *txq;
        struct sk_buff *skb;
index f9597ba2659986408b3d43c4821e0b7793fa6670..af74d0433453d9751e1b6e26bcbe251d173bee4b 100644 (file)
@@ -164,7 +164,7 @@ static void inet_frag_worker(struct work_struct *work)
 
        local_bh_disable();
 
-       for (i = READ_ONCE(f->next_bucket); budget; --budget) {
+       for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
                evicted += inet_evict_bucket(f, &f->hash[i]);
                i = (i + 1) & (INETFRAGS_HASHSZ - 1);
                if (evicted > INETFRAGS_EVICT_MAX)
index ebdcdaec9c06c305890a5e7b95804362301a46d8..647cfc972bde0f0455b8581734a6591484659409 100644 (file)
@@ -495,7 +495,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
 {
        u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
        atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
-       u32 old = READ_ONCE(*p_tstamp);
+       u32 old = ACCESS_ONCE(*p_tstamp);
        u32 now = (u32)jiffies;
        u32 new, delta = 0;
 
index a7f39d1304238a895546940f9d6b7b83349442c4..cd3d60bb7cc8ace78f0c85a94a8582c9144612d0 100644 (file)
@@ -1910,7 +1910,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
                goto send_now;
 
-       win_divisor = READ_ONCE(sysctl_tcp_tso_win_divisor);
+       win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
        if (win_divisor) {
                u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
 
index 02ec9a3493033cf044b31724c340ce0cfa9add20..ebfbccae62fde187ec5863670c03cd5b5c96258b 100644 (file)
@@ -1853,7 +1853,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                 */
 
                /* if we're overly short, let UDP handle it */
-               encap_rcv = READ_ONCE(up->encap_rcv);
+               encap_rcv = ACCESS_ONCE(up->encap_rcv);
                if (encap_rcv) {
                        int ret;
 
@@ -2298,7 +2298,7 @@ void udp_destroy_sock(struct sock *sk)
        unlock_sock_fast(sk, slow);
        if (static_key_false(&udp_encap_needed) && up->encap_type) {
                void (*encap_destroy)(struct sock *sk);
-               encap_destroy = READ_ONCE(up->encap_destroy);
+               encap_destroy = ACCESS_ONCE(up->encap_destroy);
                if (encap_destroy)
                        encap_destroy(sk);
        }
index dab94655415741873e869176d2f06520085d8729..a1c24443cd9e01de9c6e2d5d68c0f8426e25ceec 100644 (file)
@@ -490,7 +490,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
        if (!t)
                goto out;
 
-       tproto = READ_ONCE(t->parms.proto);
+       tproto = ACCESS_ONCE(t->parms.proto);
        if (tproto != ipproto && tproto != 0)
                goto out;
 
@@ -899,7 +899,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
        t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
 
        if (t) {
-               u8 tproto = READ_ONCE(t->parms.proto);
+               u8 tproto = ACCESS_ONCE(t->parms.proto);
 
                if (tproto != ipproto && tproto != 0)
                        goto drop;
@@ -1233,7 +1233,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
-       tproto = READ_ONCE(t->parms.proto);
+       tproto = ACCESS_ONCE(t->parms.proto);
        if (tproto != IPPROTO_IPIP && tproto != 0)
                return -1;
 
@@ -1303,7 +1303,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 tproto;
        int err;
 
-       tproto = READ_ONCE(t->parms.proto);
+       tproto = ACCESS_ONCE(t->parms.proto);
        if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
            ip6_tnl_addr_conflict(t, ipv6h))
                return -1;
index 3f30fa313bf282d129668f6c0929a631edf0ca9d..40d7234c27b991e54f5cfbacb5d7081e8277ae56 100644 (file)
@@ -606,7 +606,7 @@ static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                 */
 
                /* if we're overly short, let UDP handle it */
-               encap_rcv = READ_ONCE(up->encap_rcv);
+               encap_rcv = ACCESS_ONCE(up->encap_rcv);
                if (encap_rcv) {
                        int ret;
 
@@ -1432,7 +1432,7 @@ void udpv6_destroy_sock(struct sock *sk)
 
        if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
                void (*encap_destroy)(struct sock *sk);
-               encap_destroy = READ_ONCE(up->encap_destroy);
+               encap_destroy = ACCESS_ONCE(up->encap_destroy);
                if (encap_destroy)
                        encap_destroy(sk);
        }
index 82cb93f66b9bdd103b6900a5d1177751c5a01bc8..dd3e83328ad544d3183233da61fa40289975de51 100644 (file)
@@ -193,7 +193,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
         */
        rcv = rcu_dereference(sap->rcv_func);
        dest = llc_pdu_type(skb);
-       sap_handler = dest ? READ_ONCE(llc_type_handlers[dest - 1]) : NULL;
+       sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL;
        if (unlikely(!sap_handler)) {
                if (rcv)
                        rcv(skb, dev, pt, orig_dev);
@@ -214,7 +214,7 @@ drop:
        kfree_skb(skb);
        goto out;
 handle_station:
-       sta_handler = READ_ONCE(llc_station_handler);
+       sta_handler = ACCESS_ONCE(llc_station_handler);
        if (!sta_handler)
                goto drop;
        sta_handler(skb);
index 214d2ba02877d2fcb45980786528a4e650c9644d..69615016d5bf60cb89f46056f3faf7c6f2fd3ce4 100644 (file)
@@ -2008,7 +2008,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u16 rate,
 
 static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
 {
-       u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+       u16 rate = ACCESS_ONCE(sta_get_last_rx_stats(sta)->last_rate);
 
        if (rate == STA_STATS_RATE_INVALID)
                return -EINVAL;
index 4d748975117dba1b3889e40f48a4229bcefe2459..d177dd0665043652c199605479959ea408145a4b 100644 (file)
@@ -393,7 +393,7 @@ EXPORT_SYMBOL(netlbl_calipso_ops_register);
 
 static const struct netlbl_calipso_ops *netlbl_calipso_ops_get(void)
 {
-       return READ_ONCE(calipso_ops);
+       return ACCESS_ONCE(calipso_ops);
 }
 
 /**
index eb866647a27ac3847f2a6eca3709945808dd70e6..d396cb61a280d24b6c4bd4733885ff9693f1c673 100644 (file)
@@ -14201,7 +14201,7 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct sk_buff *msg;
        void *hdr;
-       u32 nlportid = READ_ONCE(wdev->ap_unexpected_nlportid);
+       u32 nlportid = ACCESS_ONCE(wdev->ap_unexpected_nlportid);
 
        if (!nlportid)
                return false;
index 4210e5c6262eb9f19e4d08f6d195bca793f4c680..23ccddb20de15f6498f3de67e1f683d8e22f963e 100644 (file)
@@ -247,7 +247,7 @@ void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port,
        struct amdtp_am824 *p = s->protocol;
 
        if (port < p->midi_ports)
-               WRITE_ONCE(p->midi[port], midi);
+               ACCESS_ONCE(p->midi[port]) = midi;
 }
 EXPORT_SYMBOL_GPL(amdtp_am824_midi_trigger);
 
@@ -336,7 +336,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, __be32 *buffe
                                           unsigned int data_blocks, unsigned int *syt)
 {
        struct amdtp_am824 *p = s->protocol;
-       struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+       struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
        unsigned int pcm_frames;
 
        if (pcm) {
@@ -357,7 +357,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, __be32 *buffe
                                           unsigned int data_blocks, unsigned int *syt)
 {
        struct amdtp_am824 *p = s->protocol;
-       struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+       struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
        unsigned int pcm_frames;
 
        if (pcm) {
index 4a1dc145327b19f8f5dac1123ae0a20264d93506..3fc581a5ad6254b61b23f1b21a82ce6b0b3c622b 100644 (file)
@@ -376,7 +376,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
        ptr = s->pcm_buffer_pointer + frames;
        if (ptr >= pcm->runtime->buffer_size)
                ptr -= pcm->runtime->buffer_size;
-       WRITE_ONCE(s->pcm_buffer_pointer, ptr);
+       ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
 
        s->pcm_period_pointer += frames;
        if (s->pcm_period_pointer >= pcm->runtime->period_size) {
@@ -388,7 +388,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
 static void pcm_period_tasklet(unsigned long data)
 {
        struct amdtp_stream *s = (void *)data;
-       struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+       struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
 
        if (pcm)
                snd_pcm_period_elapsed(pcm);
@@ -453,7 +453,7 @@ static int handle_out_packet(struct amdtp_stream *s,
                s->data_block_counter =
                                (s->data_block_counter + data_blocks) & 0xff;
 
-       buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
+       buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
                                (s->data_block_quadlets << CIP_DBS_SHIFT) |
                                ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
                                s->data_block_counter);
@@ -472,7 +472,7 @@ static int handle_out_packet(struct amdtp_stream *s,
        if (queue_out_packet(s, payload_length) < 0)
                return -EIO;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm && pcm_frames > 0)
                update_pcm_pointers(s, pcm, pcm_frames);
 
@@ -504,7 +504,7 @@ static int handle_out_packet_without_header(struct amdtp_stream *s,
        if (queue_out_packet(s, payload_length) < 0)
                return -EIO;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm && pcm_frames > 0)
                update_pcm_pointers(s, pcm, pcm_frames);
 
@@ -621,7 +621,7 @@ end:
        if (queue_in_packet(s) < 0)
                return -EIO;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm && pcm_frames > 0)
                update_pcm_pointers(s, pcm, pcm_frames);
 
@@ -649,7 +649,7 @@ static int handle_in_packet_without_header(struct amdtp_stream *s,
        if (queue_in_packet(s) < 0)
                return -EIO;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm && pcm_frames > 0)
                update_pcm_pointers(s, pcm, pcm_frames);
 
@@ -947,7 +947,7 @@ unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
        if (!in_interrupt() && amdtp_stream_running(s))
                fw_iso_context_flush_completions(s->context);
 
-       return READ_ONCE(s->pcm_buffer_pointer);
+       return ACCESS_ONCE(s->pcm_buffer_pointer);
 }
 EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
 
@@ -977,8 +977,9 @@ EXPORT_SYMBOL(amdtp_stream_pcm_ack);
 void amdtp_stream_update(struct amdtp_stream *s)
 {
        /* Precomputing. */
-       WRITE_ONCE(s->source_node_id_field,
-                   (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
+       ACCESS_ONCE(s->source_node_id_field) =
+               (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) &
+                                                               CIP_SID_MASK;
 }
 EXPORT_SYMBOL(amdtp_stream_update);
 
@@ -1021,7 +1022,7 @@ void amdtp_stream_pcm_abort(struct amdtp_stream *s)
 {
        struct snd_pcm_substream *pcm;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm)
                snd_pcm_stop_xrun(pcm);
 }
index e45de3eecfe39cb9b3b2f928f9a50307b05a3210..a608dae8334870ac2d83aae5ef5808a278a87bc9 100644 (file)
@@ -221,7 +221,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s)
 static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s,
                                            struct snd_pcm_substream *pcm)
 {
-       WRITE_ONCE(s->pcm, pcm);
+       ACCESS_ONCE(s->pcm) = pcm;
 }
 
 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
index 4a884a33524809a23ba6a07e8319711f76771081..1453c34ce99f0f25aa1f4cef7195dfb454456e87 100644 (file)
@@ -327,7 +327,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port,
        struct amdtp_dot *p = s->protocol;
 
        if (port < MAX_MIDI_PORTS)
-               WRITE_ONCE(p->midi[port], midi);
+               ACCESS_ONCE(p->midi[port]) = midi;
 }
 
 static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
@@ -338,7 +338,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
        struct snd_pcm_substream *pcm;
        unsigned int pcm_frames;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm) {
                read_pcm_s32(s, pcm, buffer, data_blocks);
                pcm_frames = data_blocks;
@@ -359,7 +359,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
        struct snd_pcm_substream *pcm;
        unsigned int pcm_frames;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm) {
                write_pcm_s32(s, pcm, buffer, data_blocks);
                pcm_frames = data_blocks;
index 77c7598b61abeb9b9c0ea7629b15a56d39fd37b6..780da9deb2f01b75b6b6fe4c29d352a134f2103d 100644 (file)
@@ -108,7 +108,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
                                           unsigned int data_blocks,
                                           unsigned int *syt)
 {
-       struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+       struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
        unsigned int pcm_frames;
 
        if (pcm) {
@@ -127,7 +127,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
                                           unsigned int data_blocks,
                                           unsigned int *syt)
 {
-       struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+       struct snd_pcm_substream *pcm = ACCESS_ONCE(s->pcm);
        unsigned int pcm_frames;
 
        if (pcm) {
index 6a49611ee46250a0534c4cd1455dc9a15972fc18..949ee56b4e0e91d4bd8390fe30d638787532fde5 100644 (file)
@@ -22,7 +22,7 @@ static int midi_playback_open(struct snd_rawmidi_substream *substream)
        ff->running_status[substream->number] = 0;
        ff->rx_midi_error[substream->number] = false;
 
-       WRITE_ONCE(ff->rx_midi_substreams[substream->number], substream);
+       ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = substream;
 
        return 0;
 }
@@ -38,7 +38,7 @@ static int midi_playback_close(struct snd_rawmidi_substream *substream)
        struct snd_ff *ff = substream->rmidi->private_data;
 
        cancel_work_sync(&ff->rx_midi_work[substream->number]);
-       WRITE_ONCE(ff->rx_midi_substreams[substream->number], NULL);
+       ACCESS_ONCE(ff->rx_midi_substreams[substream->number]) = NULL;
 
        return 0;
 }
@@ -52,10 +52,10 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *substream,
        spin_lock_irqsave(&ff->lock, flags);
 
        if (up)
-               WRITE_ONCE(ff->tx_midi_substreams[substream->number],
-                          substream);
+               ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) =
+                                                               substream;
        else
-               WRITE_ONCE(ff->tx_midi_substreams[substream->number], NULL);
+               ACCESS_ONCE(ff->tx_midi_substreams[substream->number]) = NULL;
 
        spin_unlock_irqrestore(&ff->lock, flags);
 }
index 332b29f8ed754da9c3a9d4c38807fe30935b2a18..dd6c8e839647f64d8eddc203b199e30014bc2738 100644 (file)
@@ -12,7 +12,7 @@ static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port,
                                     int rcode)
 {
        struct snd_rawmidi_substream *substream =
-                               READ_ONCE(ff->rx_midi_substreams[port]);
+                               ACCESS_ONCE(ff->rx_midi_substreams[port]);
 
        if (rcode_is_permanent_error(rcode)) {
                ff->rx_midi_error[port] = true;
@@ -60,7 +60,7 @@ static inline void fill_midi_buf(struct snd_ff *ff, unsigned int port,
 static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
 {
        struct snd_rawmidi_substream *substream =
-                       READ_ONCE(ff->rx_midi_substreams[port]);
+                       ACCESS_ONCE(ff->rx_midi_substreams[port]);
        u8 *buf = (u8 *)ff->msg_buf[port];
        int i, len;
 
@@ -159,7 +159,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
                 */
                index = (quad >> 8) & 0xff;
                if (index > 0) {
-                       substream = READ_ONCE(ff->tx_midi_substreams[0]);
+                       substream = ACCESS_ONCE(ff->tx_midi_substreams[0]);
                        if (substream != NULL) {
                                byte = quad & 0xff;
                                snd_rawmidi_receive(substream, &byte, 1);
@@ -169,7 +169,7 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
                /* Message in second port. */
                index = (quad >> 24) & 0xff;
                if (index > 0) {
-                       substream = READ_ONCE(ff->tx_midi_substreams[1]);
+                       substream = ACCESS_ONCE(ff->tx_midi_substreams[1]);
                        if (substream != NULL) {
                                byte = (quad >> 16) & 0xff;
                                snd_rawmidi_receive(substream, &byte, 1);
index 46092fa3ff9bf3632d41a67ce232766136b7f00f..5826aa8362f10b319c5946054c73107fef5f963f 100644 (file)
@@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
        ptr += count;
        if (ptr >= runtime->buffer_size)
                ptr -= runtime->buffer_size;
-       WRITE_ONCE(isight->buffer_pointer, ptr);
+       ACCESS_ONCE(isight->buffer_pointer) = ptr;
 
        isight->period_counter += count;
        if (isight->period_counter >= runtime->period_size) {
@@ -111,7 +111,7 @@ static void isight_samples(struct isight *isight,
        struct snd_pcm_runtime *runtime;
        unsigned int count1;
 
-       if (!READ_ONCE(isight->pcm_running))
+       if (!ACCESS_ONCE(isight->pcm_running))
                return;
 
        runtime = isight->pcm->runtime;
@@ -131,7 +131,7 @@ static void isight_samples(struct isight *isight,
 
 static void isight_pcm_abort(struct isight *isight)
 {
-       if (READ_ONCE(isight->pcm_active))
+       if (ACCESS_ONCE(isight->pcm_active))
                snd_pcm_stop_xrun(isight->pcm);
 }
 
@@ -141,7 +141,7 @@ static void isight_dropped_samples(struct isight *isight, unsigned int total)
        u32 dropped;
        unsigned int count1;
 
-       if (!READ_ONCE(isight->pcm_running))
+       if (!ACCESS_ONCE(isight->pcm_running))
                return;
 
        runtime = isight->pcm->runtime;
@@ -293,7 +293,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
        if (err < 0)
                return err;
 
-       WRITE_ONCE(isight->pcm_active, true);
+       ACCESS_ONCE(isight->pcm_active) = true;
 
        return 0;
 }
@@ -331,7 +331,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
 {
        struct isight *isight = substream->private_data;
 
-       WRITE_ONCE(isight->pcm_active, false);
+       ACCESS_ONCE(isight->pcm_active) = false;
 
        mutex_lock(&isight->mutex);
        isight_stop_streaming(isight);
@@ -424,10 +424,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
-               WRITE_ONCE(isight->pcm_running, true);
+               ACCESS_ONCE(isight->pcm_running) = true;
                break;
        case SNDRV_PCM_TRIGGER_STOP:
-               WRITE_ONCE(isight->pcm_running, false);
+               ACCESS_ONCE(isight->pcm_running) = false;
                break;
        default:
                return -EINVAL;
@@ -439,7 +439,7 @@ static snd_pcm_uframes_t isight_pointer(struct snd_pcm_substream *substream)
 {
        struct isight *isight = substream->private_data;
 
-       return READ_ONCE(isight->buffer_pointer);
+       return ACCESS_ONCE(isight->buffer_pointer);
 }
 
 static int isight_create_pcm(struct isight *isight)
index f0555a24d90ed1e6df27ea65b7eec57721c4751a..96f0091144bb2ad48cb7f1d12fc51db57e208e20 100644 (file)
@@ -310,7 +310,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
        if (p->midi_ports)
                read_midi_messages(s, buffer, data_blocks);
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (data_blocks > 0 && pcm)
                read_pcm_s32(s, pcm->runtime, buffer, data_blocks);
 
@@ -374,7 +374,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
        if (p->midi_ports)
                write_midi_messages(s, buffer, data_blocks);
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm)
                write_pcm_s32(s, pcm->runtime, buffer, data_blocks);
        else
index f33497cdc706e65584c2dfdecad43862b3906b27..02d5956658987424fdb9ca4ff8b79d68f0b47051 100644 (file)
@@ -112,7 +112,7 @@ static void handle_hss(struct fw_card *card, struct fw_request *request,
        }
 
        if (length >= 1) {
-               stream = READ_ONCE(scs->input);
+               stream = ACCESS_ONCE(scs->input);
                if (stream)
                        midi_input_packet(scs, stream, data, length);
        }
@@ -183,7 +183,7 @@ static void scs_output_work(struct work_struct *work)
        if (scs->transaction_running)
                return;
 
-       stream = READ_ONCE(scs->output);
+       stream = ACCESS_ONCE(scs->output);
        if (!stream || scs->error) {
                scs->output_idle = true;
                wake_up(&scs->idle_wait);
@@ -291,9 +291,9 @@ static void midi_capture_trigger(struct snd_rawmidi_substream *stream, int up)
 
        if (up) {
                scs->input_escape_count = 0;
-               WRITE_ONCE(scs->input, stream);
+               ACCESS_ONCE(scs->input) = stream;
        } else {
-               WRITE_ONCE(scs->input, NULL);
+               ACCESS_ONCE(scs->input) = NULL;
        }
 }
 
@@ -319,10 +319,10 @@ static void midi_playback_trigger(struct snd_rawmidi_substream *stream, int up)
                scs->transaction_bytes = 0;
                scs->error = false;
 
-               WRITE_ONCE(scs->output, stream);
+               ACCESS_ONCE(scs->output) = stream;
                schedule_work(&scs->work);
        } else {
-               WRITE_ONCE(scs->output, NULL);
+               ACCESS_ONCE(scs->output) = NULL;
        }
 }
 static void midi_playback_drain(struct snd_rawmidi_substream *stream)
index ab482423c16545d3161e04d7d8e78f614b3b70ea..6aff1fc1c72d0c8e0a789d5c66d223627d5c3217 100644 (file)
@@ -124,7 +124,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
 {
        struct snd_pcm_substream *pcm;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (data_blocks > 0 && pcm)
                read_pcm_s32(s, pcm, buffer, data_blocks);
 
@@ -143,7 +143,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
        /* This field is not used. */
        *syt = 0x0000;
 
-       pcm = READ_ONCE(s->pcm);
+       pcm = ACCESS_ONCE(s->pcm);
        if (pcm)
                write_pcm_s32(s, pcm, buffer, data_blocks);
        else
index 2ad692dd4b137d0af15f8b74fcedb1aab2e80975..8967c52f503284a2d610352fcc7324a53a088b6d 100644 (file)
@@ -148,7 +148,7 @@ static void async_midi_port_callback(struct fw_card *card, int rcode,
                                     void *callback_data)
 {
        struct snd_fw_async_midi_port *port = callback_data;
-       struct snd_rawmidi_substream *substream = READ_ONCE(port->substream);
+       struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
 
        /* This port is closed. */
        if (substream == NULL)
@@ -173,7 +173,7 @@ static void midi_port_work(struct work_struct *work)
 {
        struct snd_fw_async_midi_port *port =
                        container_of(work, struct snd_fw_async_midi_port, work);
-       struct snd_rawmidi_substream *substream = READ_ONCE(port->substream);
+       struct snd_rawmidi_substream *substream = ACCESS_ONCE(port->substream);
        int generation;
 
        /* Under transacting or error state. */
@@ -282,7 +282,7 @@ static void handle_midi_tx(struct fw_card *card, struct fw_request *request,
                                bytes = 3;
                }
 
-               substream = READ_ONCE(tscm->tx_midi_substreams[port]);
+               substream = ACCESS_ONCE(tscm->tx_midi_substreams[port]);
                if (substream != NULL)
                        snd_rawmidi_receive(substream, b + 1, bytes);
        }
index 2472144b329efed09b919ae05b65226cc0a01960..8382ffa3bcaf8b446dd67197503f3c21cef307ba 100644 (file)
@@ -165,7 +165,7 @@ static bool xtfpga_pcm_push_tx(struct xtfpga_i2s *i2s)
        tx_substream = rcu_dereference(i2s->tx_substream);
        tx_active = tx_substream && snd_pcm_running(tx_substream);
        if (tx_active) {
-               unsigned tx_ptr = READ_ONCE(i2s->tx_ptr);
+               unsigned tx_ptr = ACCESS_ONCE(i2s->tx_ptr);
                unsigned new_tx_ptr = i2s->tx_fn(i2s, tx_substream->runtime,
                                                 tx_ptr);
 
@@ -437,7 +437,7 @@ static int xtfpga_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-               WRITE_ONCE(i2s->tx_ptr, 0);
+               ACCESS_ONCE(i2s->tx_ptr) = 0;
                rcu_assign_pointer(i2s->tx_substream, substream);
                xtfpga_pcm_refill_fifo(i2s);
                break;
@@ -459,7 +459,7 @@ static snd_pcm_uframes_t xtfpga_pcm_pointer(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct xtfpga_i2s *i2s = runtime->private_data;
-       snd_pcm_uframes_t pos = READ_ONCE(i2s->tx_ptr);
+       snd_pcm_uframes_t pos = ACCESS_ONCE(i2s->tx_ptr);
 
        return pos < runtime->buffer_size ? pos : 0;
 }
index fc579f33060145ec28be3e9b2526ab36af36cc1a..7371e5b0603564e37190d074b5ae9506c80c3e32 100644 (file)
@@ -108,7 +108,7 @@ static void bcd2000_midi_handle_input(struct bcd2000 *bcd2k,
        unsigned int payload_length, tocopy;
        struct snd_rawmidi_substream *midi_receive_substream;
 
-       midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream);
+       midi_receive_substream = ACCESS_ONCE(bcd2k->midi_receive_substream);
        if (!midi_receive_substream)
                return;
 
@@ -139,7 +139,7 @@ static void bcd2000_midi_send(struct bcd2000 *bcd2k)
 
        BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE);
 
-       midi_out_substream = READ_ONCE(bcd2k->midi_out_substream);
+       midi_out_substream = ACCESS_ONCE(bcd2k->midi_out_substream);
        if (!midi_out_substream)
                return;
 
index 1f5e26aae9fc5fa898d496e3b0a1651627c61e0b..7d8c3261a50d0d0d9ddc9c9c8cb26a632e524c3a 100644 (file)
@@ -25,7 +25,7 @@
  */
 static inline int atomic_read(const atomic_t *v)
 {
-       return READ_ONCE((v)->counter);
+       return ACCESS_ONCE((v)->counter);
 }
 
 /**
index 4c1966f7c77a746c898838bc2556675fca21be44..40b231fb95bd2848c11dba7f8d3a8e3632d9c284 100644 (file)
@@ -22,7 +22,7 @@
  */
 static inline int atomic_read(const atomic_t *v)
 {
-       return READ_ONCE((v)->counter);
+       return ACCESS_ONCE((v)->counter);
 }
 
 /**
index d19e11b68de728b95c2e427923475bdb3a877a26..33b5e6cdf38c8302727fe55446b8f24d8342084d 100644 (file)
@@ -378,7 +378,7 @@ struct addr_filters {
 static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
 {
        struct perf_event_mmap_page *pc = mm->userpg;
-       u64 head = READ_ONCE(pc->aux_head);
+       u64 head = ACCESS_ONCE(pc->aux_head);
 
        /* Ensure all reads are done after we read the head */
        rmb();
@@ -389,7 +389,7 @@ static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
 {
        struct perf_event_mmap_page *pc = mm->userpg;
 #if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
-       u64 head = READ_ONCE(pc->aux_head);
+       u64 head = ACCESS_ONCE(pc->aux_head);
 #else
        u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
 #endif
index 3f63ee12471d0ce27e8b6d1605ac1c76fa313df3..41caa098ed15534f7b8100025e9eac533ad24c0e 100644 (file)
@@ -114,7 +114,7 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
 
 extern volatile int session_done;
 
-#define session_done() READ_ONCE(session_done)
+#define session_done() ACCESS_ONCE(session_done)
 
 int perf_session__deliver_synth_event(struct perf_session *session,
                                      union perf_event *event,
index 3301afff4b879932a405733d3262ccff43b0bdf8..484e8820c382b348fcd81fa47f0548616861146c 100644 (file)
@@ -2310,7 +2310,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
                                continue;
                        } else if (pass && i > last_boosted_vcpu)
                                break;
-                       if (!READ_ONCE(vcpu->preempted))
+                       if (!ACCESS_ONCE(vcpu->preempted))
                                continue;
                        if (vcpu == me)
                                continue;