atomic: use <linux/atomic.h>
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / tile / include / asm / bitops_64.h
CommitLineData
18aecc2b
CM
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_BITOPS_64_H
16#define _ASM_TILE_BITOPS_64_H
17
18#include <linux/compiler.h>
60063497 19#include <linux/atomic.h>
18aecc2b
CM
20#include <asm/system.h>
21
22/* See <asm/bitops.h> for API comments. */
23
24static inline void set_bit(unsigned nr, volatile unsigned long *addr)
25{
26 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
27 __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
28}
29
30static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
31{
32 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
33 __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
34}
35
36#define smp_mb__before_clear_bit() smp_mb()
37#define smp_mb__after_clear_bit() smp_mb()
38
39
40static inline void change_bit(unsigned nr, volatile unsigned long *addr)
41{
42 unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
43 long guess, oldval;
44 addr += nr / BITS_PER_LONG;
45 old = *addr;
46 do {
47 guess = oldval;
48 oldval = atomic64_cmpxchg((atomic64_t *)addr,
49 guess, guess ^ mask);
50 } while (guess != oldval);
51}
52
53
54/*
55 * The test_and_xxx_bit() routines require a memory fence before we
56 * start the operation, and after the operation completes. We use
57 * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
58 * barrier(), to block until the atomic op is complete.
59 */
60
61static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
62{
63 int val;
64 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
65 smp_mb(); /* barrier for proper semantics */
66 val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
67 & mask) != 0;
68 barrier();
69 return val;
70}
71
72
73static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
74{
75 int val;
76 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
77 smp_mb(); /* barrier for proper semantics */
78 val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
79 & mask) != 0;
80 barrier();
81 return val;
82}
83
84
85static inline int test_and_change_bit(unsigned nr,
86 volatile unsigned long *addr)
87{
88 unsigned long mask = (1UL << (nr % BITS_PER_LONG));
89 long guess, oldval = *addr;
90 addr += nr / BITS_PER_LONG;
91 oldval = *addr;
92 do {
93 guess = oldval;
94 oldval = atomic64_cmpxchg((atomic64_t *)addr,
95 guess, guess ^ mask);
96 } while (guess != oldval);
97 return (oldval & mask) != 0;
98}
99
148817ba 100#include <asm-generic/bitops/ext2-atomic-setbit.h>
18aecc2b
CM
101
102#endif /* _ASM_TILE_BITOPS_64_H */