Commit | Line | Data |
---|---|---|
fb0527bd | 1 | #include <linux/percpu.h> |
fb0527bd | 2 | #include <linux/sched.h> |
d84b6728 | 3 | #include <linux/osq_lock.h> |
fb0527bd PZ |
4 | |
5 | /* | |
6 | * An MCS like lock especially tailored for optimistic spinning for sleeping | |
7 | * lock implementations (mutex, rwsem, etc). | |
8 | * | |
9 | * Using a single mcs node per CPU is safe because sleeping locks should not be | |
10 | * called from interrupt context and we have preemption disabled while | |
11 | * spinning. | |
12 | */ | |
046a619d | 13 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); |
fb0527bd | 14 | |
90631822 JL |
15 | /* |
16 | * We use the value 0 to represent "no CPU", thus the encoded value | |
17 | * will be the CPU number incremented by 1. | |
18 | */ | |
19 | static inline int encode_cpu(int cpu_nr) | |
20 | { | |
21 | return cpu_nr + 1; | |
22 | } | |
23 | ||
5aff60a1 PX |
24 | static inline int node_cpu(struct optimistic_spin_node *node) |
25 | { | |
26 | return node->cpu - 1; | |
27 | } | |
28 | ||
90631822 JL |
29 | static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) |
30 | { | |
31 | int cpu_nr = encoded_cpu_val - 1; | |
32 | ||
33 | return per_cpu_ptr(&osq_node, cpu_nr); | |
34 | } | |
35 | ||
fb0527bd PZ |
36 | /* |
37 | * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. | |
38 | * Can return NULL in case we were the last queued and we updated @lock instead. | |
39 | */ | |
046a619d | 40 | static inline struct optimistic_spin_node * |
90631822 | 41 | osq_wait_next(struct optimistic_spin_queue *lock, |
046a619d JL |
42 | struct optimistic_spin_node *node, |
43 | struct optimistic_spin_node *prev) | |
fb0527bd | 44 | { |
046a619d | 45 | struct optimistic_spin_node *next = NULL; |
90631822 JL |
46 | int curr = encode_cpu(smp_processor_id()); |
47 | int old; | |
48 | ||
49 | /* | |
50 | * If there is a prev node in queue, then the 'old' value will be | |
51 | * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if | |
52 | * we're currently last in queue, then the queue will then become empty. | |
53 | */ | |
54 | old = prev ? prev->cpu : OSQ_UNLOCKED_VAL; | |
fb0527bd PZ |
55 | |
56 | for (;;) { | |
90631822 | 57 | if (atomic_read(&lock->tail) == curr && |
c55a6ffa | 58 | atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { |
fb0527bd PZ |
59 | /* |
60 | * We were the last queued, we moved @lock back. @prev | |
61 | * will now observe @lock and will complete its | |
62 | * unlock()/unqueue(). | |
63 | */ | |
64 | break; | |
65 | } | |
66 | ||
67 | /* | |
68 | * We must xchg() the @node->next value, because if we were to | |
69 | * leave it in, a concurrent unlock()/unqueue() from | |
70 | * @node->next might complete Step-A and think its @prev is | |
71 | * still valid. | |
72 | * | |
73 | * If the concurrent unlock()/unqueue() wins the race, we'll | |
74 | * wait for either @lock to point to us, through its Step-B, or | |
75 | * wait for a new @node->next from its Step-C. | |
76 | */ | |
77 | if (node->next) { | |
78 | next = xchg(&node->next, NULL); | |
79 | if (next) | |
80 | break; | |
81 | } | |
82 | ||
f2f09a4c | 83 | cpu_relax(); |
fb0527bd PZ |
84 | } |
85 | ||
86 | return next; | |
87 | } | |
88 | ||
90631822 | 89 | bool osq_lock(struct optimistic_spin_queue *lock) |
fb0527bd | 90 | { |
046a619d JL |
91 | struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); |
92 | struct optimistic_spin_node *prev, *next; | |
90631822 JL |
93 | int curr = encode_cpu(smp_processor_id()); |
94 | int old; | |
fb0527bd PZ |
95 | |
96 | node->locked = 0; | |
97 | node->next = NULL; | |
90631822 | 98 | node->cpu = curr; |
fb0527bd | 99 | |
c55a6ffa | 100 | /* |
b4b29f94 WD |
101 | * We need both ACQUIRE (pairs with corresponding RELEASE in |
102 | * unlock() uncontended, or fastpath) and RELEASE (to publish | |
103 | * the node fields we just initialised) semantics when updating | |
104 | * the lock tail. | |
c55a6ffa | 105 | */ |
b4b29f94 | 106 | old = atomic_xchg(&lock->tail, curr); |
90631822 | 107 | if (old == OSQ_UNLOCKED_VAL) |
fb0527bd PZ |
108 | return true; |
109 | ||
90631822 JL |
110 | prev = decode_cpu(old); |
111 | node->prev = prev; | |
4d3199e4 | 112 | WRITE_ONCE(prev->next, node); |
fb0527bd PZ |
113 | |
114 | /* | |
115 | * Normally @prev is untouchable after the above store; because at that | |
116 | * moment unlock can proceed and wipe the node element from stack. | |
117 | * | |
118 | * However, since our nodes are static per-cpu storage, we're | |
119 | * guaranteed their existence -- this allows us to apply | |
120 | * cmpxchg in an attempt to undo our queueing. | |
121 | */ | |
122 | ||
4d3199e4 | 123 | while (!READ_ONCE(node->locked)) { |
fb0527bd PZ |
124 | /* |
125 | * If we need to reschedule bail... so we can block. | |
5aff60a1 PX |
126 | * Use vcpu_is_preempted() to avoid waiting for a preempted |
127 | * lock holder: | |
fb0527bd | 128 | */ |
5aff60a1 | 129 | if (need_resched() || vcpu_is_preempted(node_cpu(node->prev))) |
fb0527bd PZ |
130 | goto unqueue; |
131 | ||
f2f09a4c | 132 | cpu_relax(); |
fb0527bd PZ |
133 | } |
134 | return true; | |
135 | ||
136 | unqueue: | |
137 | /* | |
138 | * Step - A -- stabilize @prev | |
139 | * | |
140 | * Undo our @prev->next assignment; this will make @prev's | |
141 | * unlock()/unqueue() wait for a next pointer since @lock points to us | |
142 | * (or later). | |
143 | */ | |
144 | ||
145 | for (;;) { | |
146 | if (prev->next == node && | |
147 | cmpxchg(&prev->next, node, NULL) == node) | |
148 | break; | |
149 | ||
150 | /* | |
151 | * We can only fail the cmpxchg() racing against an unlock(), | |
152 | * in which case we should observe @node->locked becomming | |
153 | * true. | |
154 | */ | |
155 | if (smp_load_acquire(&node->locked)) | |
156 | return true; | |
157 | ||
f2f09a4c | 158 | cpu_relax(); |
fb0527bd PZ |
159 | |
160 | /* | |
161 | * Or we race against a concurrent unqueue()'s step-B, in which | |
162 | * case its step-C will write us a new @node->prev pointer. | |
163 | */ | |
4d3199e4 | 164 | prev = READ_ONCE(node->prev); |
fb0527bd PZ |
165 | } |
166 | ||
167 | /* | |
168 | * Step - B -- stabilize @next | |
169 | * | |
170 | * Similar to unlock(), wait for @node->next or move @lock from @node | |
171 | * back to @prev. | |
172 | */ | |
173 | ||
174 | next = osq_wait_next(lock, node, prev); | |
175 | if (!next) | |
176 | return false; | |
177 | ||
178 | /* | |
179 | * Step - C -- unlink | |
180 | * | |
181 | * @prev is stable because its still waiting for a new @prev->next | |
182 | * pointer, @next is stable because our @node->next pointer is NULL and | |
183 | * it will wait in Step-A. | |
184 | */ | |
185 | ||
4d3199e4 DB |
186 | WRITE_ONCE(next->prev, prev); |
187 | WRITE_ONCE(prev->next, next); | |
fb0527bd PZ |
188 | |
189 | return false; | |
190 | } | |
191 | ||
90631822 | 192 | void osq_unlock(struct optimistic_spin_queue *lock) |
fb0527bd | 193 | { |
33ecd208 | 194 | struct optimistic_spin_node *node, *next; |
90631822 | 195 | int curr = encode_cpu(smp_processor_id()); |
fb0527bd PZ |
196 | |
197 | /* | |
198 | * Fast path for the uncontended case. | |
199 | */ | |
c55a6ffa DB |
200 | if (likely(atomic_cmpxchg_release(&lock->tail, curr, |
201 | OSQ_UNLOCKED_VAL) == curr)) | |
fb0527bd PZ |
202 | return; |
203 | ||
204 | /* | |
205 | * Second most likely case. | |
206 | */ | |
33ecd208 | 207 | node = this_cpu_ptr(&osq_node); |
fb0527bd PZ |
208 | next = xchg(&node->next, NULL); |
209 | if (next) { | |
4d3199e4 | 210 | WRITE_ONCE(next->locked, 1); |
fb0527bd PZ |
211 | return; |
212 | } | |
213 | ||
214 | next = osq_wait_next(lock, node, NULL); | |
215 | if (next) | |
4d3199e4 | 216 | WRITE_ONCE(next->locked, 1); |
fb0527bd | 217 | } |