Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / hv / ring_buffer.c
1 /*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/prefetch.h>
33
34 #include "hyperv_vmbus.h"
35
36 #define VMBUS_PKT_TRAILER 8
37
38 /*
39 * When we write to the ring buffer, check if the host needs to
40 * be signaled. Here is the details of this protocol:
41 *
42 * 1. The host guarantees that while it is draining the
43 * ring buffer, it will set the interrupt_mask to
44 * indicate it does not need to be interrupted when
45 * new data is placed.
46 *
47 * 2. The host guarantees that it will completely drain
48 * the ring buffer before exiting the read loop. Further,
49 * once the ring buffer is empty, it will clear the
50 * interrupt_mask and re-check to see if new data has
51 * arrived.
52 *
53 * KYS: Oct. 30, 2016:
54 * It looks like Windows hosts have logic to deal with DOS attacks that
55 * can be triggered if it receives interrupts when it is not expecting
56 * the interrupt. The host expects interrupts only when the ring
57 * transitions from empty to non-empty (or full to non full on the guest
58 * to host ring).
59 * So, base the signaling decision solely on the ring state until the
60 * host logic is fixed.
61 */
62
63 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
64 {
65 struct hv_ring_buffer_info *rbi = &channel->outbound;
66
67 virt_mb();
68 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
69 return;
70
71 /* check interrupt_mask before read_index */
72 virt_rmb();
73 /*
74 * This is the only case we need to signal when the
75 * ring transitions from being empty to non-empty.
76 */
77 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
78 vmbus_setevent(channel);
79 }
80
81 /* Get the next write location for the specified ring buffer. */
82 static inline u32
83 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
84 {
85 u32 next = ring_info->ring_buffer->write_index;
86
87 return next;
88 }
89
90 /* Set the next write location for the specified ring buffer. */
91 static inline void
92 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
93 u32 next_write_location)
94 {
95 ring_info->ring_buffer->write_index = next_write_location;
96 }
97
98 /* Set the next read location for the specified ring buffer. */
99 static inline void
100 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
101 u32 next_read_location)
102 {
103 ring_info->ring_buffer->read_index = next_read_location;
104 ring_info->priv_read_index = next_read_location;
105 }
106
107 /* Get the size of the ring buffer. */
108 static inline u32
109 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
110 {
111 return ring_info->ring_datasize;
112 }
113
114 /* Get the read and write indices as u64 of the specified ring buffer. */
115 static inline u64
116 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
117 {
118 return (u64)ring_info->ring_buffer->write_index << 32;
119 }
120
121 /*
122 * Helper routine to copy from source to ring buffer.
123 * Assume there is enough room. Handles wrap-around in dest case only!!
124 */
125 static u32 hv_copyto_ringbuffer(
126 struct hv_ring_buffer_info *ring_info,
127 u32 start_write_offset,
128 const void *src,
129 u32 srclen)
130 {
131 void *ring_buffer = hv_get_ring_buffer(ring_info);
132 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
133
134 memcpy(ring_buffer + start_write_offset, src, srclen);
135
136 start_write_offset += srclen;
137 if (start_write_offset >= ring_buffer_size)
138 start_write_offset -= ring_buffer_size;
139
140 return start_write_offset;
141 }
142
143 /* Get various debug metrics for the specified ring buffer. */
144 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
145 struct hv_ring_buffer_debug_info *debug_info)
146 {
147 u32 bytes_avail_towrite;
148 u32 bytes_avail_toread;
149
150 if (ring_info->ring_buffer) {
151 hv_get_ringbuffer_availbytes(ring_info,
152 &bytes_avail_toread,
153 &bytes_avail_towrite);
154
155 debug_info->bytes_avail_toread = bytes_avail_toread;
156 debug_info->bytes_avail_towrite = bytes_avail_towrite;
157 debug_info->current_read_index =
158 ring_info->ring_buffer->read_index;
159 debug_info->current_write_index =
160 ring_info->ring_buffer->write_index;
161 debug_info->current_interrupt_mask =
162 ring_info->ring_buffer->interrupt_mask;
163 }
164 }
165 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
166
167 /* Initialize the ring buffer. */
168 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
169 struct page *pages, u32 page_cnt)
170 {
171 int i;
172 struct page **pages_wraparound;
173
174 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
175
176 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
177
178 /*
179 * First page holds struct hv_ring_buffer, do wraparound mapping for
180 * the rest.
181 */
182 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
183 GFP_KERNEL);
184 if (!pages_wraparound)
185 return -ENOMEM;
186
187 pages_wraparound[0] = pages;
188 for (i = 0; i < 2 * (page_cnt - 1); i++)
189 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
190
191 ring_info->ring_buffer = (struct hv_ring_buffer *)
192 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
193
194 kfree(pages_wraparound);
195
196
197 if (!ring_info->ring_buffer)
198 return -ENOMEM;
199
200 ring_info->ring_buffer->read_index =
201 ring_info->ring_buffer->write_index = 0;
202
203 /* Set the feature bit for enabling flow control. */
204 ring_info->ring_buffer->feature_bits.value = 1;
205
206 ring_info->ring_size = page_cnt << PAGE_SHIFT;
207 ring_info->ring_datasize = ring_info->ring_size -
208 sizeof(struct hv_ring_buffer);
209
210 spin_lock_init(&ring_info->ring_lock);
211
212 return 0;
213 }
214
215 /* Cleanup the ring buffer. */
216 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
217 {
218 vunmap(ring_info->ring_buffer);
219 }
220
221 /* Write to the ring buffer. */
222 int hv_ringbuffer_write(struct vmbus_channel *channel,
223 const struct kvec *kv_list, u32 kv_count)
224 {
225 int i;
226 u32 bytes_avail_towrite;
227 u32 totalbytes_towrite = sizeof(u64);
228 u32 next_write_location;
229 u32 old_write;
230 u64 prev_indices;
231 unsigned long flags;
232 struct hv_ring_buffer_info *outring_info = &channel->outbound;
233
234 if (channel->rescind)
235 return -ENODEV;
236
237 for (i = 0; i < kv_count; i++)
238 totalbytes_towrite += kv_list[i].iov_len;
239
240 spin_lock_irqsave(&outring_info->ring_lock, flags);
241
242 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
243
244 /*
245 * If there is only room for the packet, assume it is full.
246 * Otherwise, the next time around, we think the ring buffer
247 * is empty since the read index == write index.
248 */
249 if (bytes_avail_towrite <= totalbytes_towrite) {
250 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
251 return -EAGAIN;
252 }
253
254 /* Write to the ring buffer */
255 next_write_location = hv_get_next_write_location(outring_info);
256
257 old_write = next_write_location;
258
259 for (i = 0; i < kv_count; i++) {
260 next_write_location = hv_copyto_ringbuffer(outring_info,
261 next_write_location,
262 kv_list[i].iov_base,
263 kv_list[i].iov_len);
264 }
265
266 /* Set previous packet start */
267 prev_indices = hv_get_ring_bufferindices(outring_info);
268
269 next_write_location = hv_copyto_ringbuffer(outring_info,
270 next_write_location,
271 &prev_indices,
272 sizeof(u64));
273
274 /* Issue a full memory barrier before updating the write index */
275 virt_mb();
276
277 /* Now, update the write location */
278 hv_set_next_write_location(outring_info, next_write_location);
279
280
281 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
282
283 hv_signal_on_write(old_write, channel);
284
285 if (channel->rescind)
286 return -ENODEV;
287
288 return 0;
289 }
290
291 int hv_ringbuffer_read(struct vmbus_channel *channel,
292 void *buffer, u32 buflen, u32 *buffer_actual_len,
293 u64 *requestid, bool raw)
294 {
295 struct vmpacket_descriptor *desc;
296 u32 packetlen, offset;
297
298 if (unlikely(buflen == 0))
299 return -EINVAL;
300
301 *buffer_actual_len = 0;
302 *requestid = 0;
303
304 /* Make sure there is something to read */
305 desc = hv_pkt_iter_first(channel);
306 if (desc == NULL) {
307 /*
308 * No error is set when there is even no header, drivers are
309 * supposed to analyze buffer_actual_len.
310 */
311 return 0;
312 }
313
314 offset = raw ? 0 : (desc->offset8 << 3);
315 packetlen = (desc->len8 << 3) - offset;
316 *buffer_actual_len = packetlen;
317 *requestid = desc->trans_id;
318
319 if (unlikely(packetlen > buflen))
320 return -ENOBUFS;
321
322 /* since ring is double mapped, only one copy is necessary */
323 memcpy(buffer, (const char *)desc + offset, packetlen);
324
325 /* Advance ring index to next packet descriptor */
326 __hv_pkt_iter_next(channel, desc);
327
328 /* Notify host of update */
329 hv_pkt_iter_close(channel);
330
331 return 0;
332 }
333
334 /*
335 * Determine number of bytes available in ring buffer after
336 * the current iterator (priv_read_index) location.
337 *
338 * This is similar to hv_get_bytes_to_read but with private
339 * read index instead.
340 */
341 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
342 {
343 u32 priv_read_loc = rbi->priv_read_index;
344 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
345
346 if (write_loc >= priv_read_loc)
347 return write_loc - priv_read_loc;
348 else
349 return (rbi->ring_datasize - priv_read_loc) + write_loc;
350 }
351
352 /*
353 * Get first vmbus packet from ring buffer after read_index
354 *
355 * If ring buffer is empty, returns NULL and no other action needed.
356 */
357 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
358 {
359 struct hv_ring_buffer_info *rbi = &channel->inbound;
360 struct vmpacket_descriptor *desc;
361
362 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
363 return NULL;
364
365 desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index;
366 if (desc)
367 prefetch((char *)desc + (desc->len8 << 3));
368
369 return desc;
370 }
371 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
372
373 /*
374 * Get next vmbus packet from ring buffer.
375 *
376 * Advances the current location (priv_read_index) and checks for more
377 * data. If the end of the ring buffer is reached, then return NULL.
378 */
379 struct vmpacket_descriptor *
380 __hv_pkt_iter_next(struct vmbus_channel *channel,
381 const struct vmpacket_descriptor *desc)
382 {
383 struct hv_ring_buffer_info *rbi = &channel->inbound;
384 u32 packetlen = desc->len8 << 3;
385 u32 dsize = rbi->ring_datasize;
386
387 /* bump offset to next potential packet */
388 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
389 if (rbi->priv_read_index >= dsize)
390 rbi->priv_read_index -= dsize;
391
392 /* more data? */
393 return hv_pkt_iter_first(channel);
394 }
395 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
396
397 /*
398 * Update host ring buffer after iterating over packets.
399 */
400 void hv_pkt_iter_close(struct vmbus_channel *channel)
401 {
402 struct hv_ring_buffer_info *rbi = &channel->inbound;
403 u32 orig_write_sz = hv_get_bytes_to_write(rbi);
404
405 /*
406 * Make sure all reads are done before we update the read index since
407 * the writer may start writing to the read area once the read index
408 * is updated.
409 */
410 virt_rmb();
411 rbi->ring_buffer->read_index = rbi->priv_read_index;
412
413 /*
414 * Issue a full memory barrier before making the signaling decision.
415 * Here is the reason for having this barrier:
416 * If the reading of the pend_sz (in this function)
417 * were to be reordered and read before we commit the new read
418 * index (in the calling function) we could
419 * have a problem. If the host were to set the pending_sz after we
420 * have sampled pending_sz and go to sleep before we commit the
421 * read index, we could miss sending the interrupt. Issue a full
422 * memory barrier to address this.
423 */
424 virt_mb();
425
426 /* If host has disabled notifications then skip */
427 if (rbi->ring_buffer->interrupt_mask)
428 return;
429
430 if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) {
431 u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
432
433 /*
434 * If there was space before we began iteration,
435 * then host was not blocked. Also handles case where
436 * pending_sz is zero then host has nothing pending
437 * and does not need to be signaled.
438 */
439 if (orig_write_sz > pending_sz)
440 return;
441
442 /* If pending write will not fit, don't give false hope. */
443 if (hv_get_bytes_to_write(rbi) < pending_sz)
444 return;
445 }
446
447 vmbus_setevent(channel);
448 }
449 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);