]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/hv/ring_buffer.c
Merge tag 'for-linus-4.12b-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / drivers / hv / ring_buffer.c
1 /*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32
33 #include "hyperv_vmbus.h"
34
35 #define VMBUS_PKT_TRAILER 8
36
37 /*
38 * When we write to the ring buffer, check if the host needs to
39 * be signaled. Here is the details of this protocol:
40 *
41 * 1. The host guarantees that while it is draining the
42 * ring buffer, it will set the interrupt_mask to
43 * indicate it does not need to be interrupted when
44 * new data is placed.
45 *
46 * 2. The host guarantees that it will completely drain
47 * the ring buffer before exiting the read loop. Further,
48 * once the ring buffer is empty, it will clear the
49 * interrupt_mask and re-check to see if new data has
50 * arrived.
51 *
52 * KYS: Oct. 30, 2016:
53 * It looks like Windows hosts have logic to deal with DOS attacks that
54 * can be triggered if it receives interrupts when it is not expecting
55 * the interrupt. The host expects interrupts only when the ring
56 * transitions from empty to non-empty (or full to non full on the guest
57 * to host ring).
58 * So, base the signaling decision solely on the ring state until the
59 * host logic is fixed.
60 */
61
62 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
63 {
64 struct hv_ring_buffer_info *rbi = &channel->outbound;
65
66 virt_mb();
67 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
68 return;
69
70 /* check interrupt_mask before read_index */
71 virt_rmb();
72 /*
73 * This is the only case we need to signal when the
74 * ring transitions from being empty to non-empty.
75 */
76 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
77 vmbus_setevent(channel);
78
79 return;
80 }
81
82 /* Get the next write location for the specified ring buffer. */
83 static inline u32
84 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
85 {
86 u32 next = ring_info->ring_buffer->write_index;
87
88 return next;
89 }
90
91 /* Set the next write location for the specified ring buffer. */
92 static inline void
93 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
94 u32 next_write_location)
95 {
96 ring_info->ring_buffer->write_index = next_write_location;
97 }
98
99 /* Get the next read location for the specified ring buffer. */
100 static inline u32
101 hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
102 {
103 return ring_info->ring_buffer->read_index;
104 }
105
106 /*
107 * Get the next read location + offset for the specified ring buffer.
108 * This allows the caller to skip.
109 */
110 static inline u32
111 hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
112 u32 offset)
113 {
114 u32 next = ring_info->ring_buffer->read_index;
115
116 next += offset;
117 if (next >= ring_info->ring_datasize)
118 next -= ring_info->ring_datasize;
119
120 return next;
121 }
122
123 /* Set the next read location for the specified ring buffer. */
124 static inline void
125 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
126 u32 next_read_location)
127 {
128 ring_info->ring_buffer->read_index = next_read_location;
129 ring_info->priv_read_index = next_read_location;
130 }
131
132 /* Get the size of the ring buffer. */
133 static inline u32
134 hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
135 {
136 return ring_info->ring_datasize;
137 }
138
139 /* Get the read and write indices as u64 of the specified ring buffer. */
140 static inline u64
141 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
142 {
143 return (u64)ring_info->ring_buffer->write_index << 32;
144 }
145
146 /*
147 * Helper routine to copy to source from ring buffer.
148 * Assume there is enough room. Handles wrap-around in src case only!!
149 */
150 static u32 hv_copyfrom_ringbuffer(
151 const struct hv_ring_buffer_info *ring_info,
152 void *dest,
153 u32 destlen,
154 u32 start_read_offset)
155 {
156 void *ring_buffer = hv_get_ring_buffer(ring_info);
157 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
158
159 memcpy(dest, ring_buffer + start_read_offset, destlen);
160
161 start_read_offset += destlen;
162 if (start_read_offset >= ring_buffer_size)
163 start_read_offset -= ring_buffer_size;
164
165 return start_read_offset;
166 }
167
168
169 /*
170 * Helper routine to copy from source to ring buffer.
171 * Assume there is enough room. Handles wrap-around in dest case only!!
172 */
173 static u32 hv_copyto_ringbuffer(
174 struct hv_ring_buffer_info *ring_info,
175 u32 start_write_offset,
176 const void *src,
177 u32 srclen)
178 {
179 void *ring_buffer = hv_get_ring_buffer(ring_info);
180 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
181
182 memcpy(ring_buffer + start_write_offset, src, srclen);
183
184 start_write_offset += srclen;
185 if (start_write_offset >= ring_buffer_size)
186 start_write_offset -= ring_buffer_size;
187
188 return start_write_offset;
189 }
190
191 /* Get various debug metrics for the specified ring buffer. */
192 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
193 struct hv_ring_buffer_debug_info *debug_info)
194 {
195 u32 bytes_avail_towrite;
196 u32 bytes_avail_toread;
197
198 if (ring_info->ring_buffer) {
199 hv_get_ringbuffer_availbytes(ring_info,
200 &bytes_avail_toread,
201 &bytes_avail_towrite);
202
203 debug_info->bytes_avail_toread = bytes_avail_toread;
204 debug_info->bytes_avail_towrite = bytes_avail_towrite;
205 debug_info->current_read_index =
206 ring_info->ring_buffer->read_index;
207 debug_info->current_write_index =
208 ring_info->ring_buffer->write_index;
209 debug_info->current_interrupt_mask =
210 ring_info->ring_buffer->interrupt_mask;
211 }
212 }
213
214 /* Initialize the ring buffer. */
215 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
216 struct page *pages, u32 page_cnt)
217 {
218 int i;
219 struct page **pages_wraparound;
220
221 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
222
223 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
224
225 /*
226 * First page holds struct hv_ring_buffer, do wraparound mapping for
227 * the rest.
228 */
229 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
230 GFP_KERNEL);
231 if (!pages_wraparound)
232 return -ENOMEM;
233
234 pages_wraparound[0] = pages;
235 for (i = 0; i < 2 * (page_cnt - 1); i++)
236 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
237
238 ring_info->ring_buffer = (struct hv_ring_buffer *)
239 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
240
241 kfree(pages_wraparound);
242
243
244 if (!ring_info->ring_buffer)
245 return -ENOMEM;
246
247 ring_info->ring_buffer->read_index =
248 ring_info->ring_buffer->write_index = 0;
249
250 /* Set the feature bit for enabling flow control. */
251 ring_info->ring_buffer->feature_bits.value = 1;
252
253 ring_info->ring_size = page_cnt << PAGE_SHIFT;
254 ring_info->ring_datasize = ring_info->ring_size -
255 sizeof(struct hv_ring_buffer);
256
257 spin_lock_init(&ring_info->ring_lock);
258
259 return 0;
260 }
261
262 /* Cleanup the ring buffer. */
263 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
264 {
265 vunmap(ring_info->ring_buffer);
266 }
267
268 /* Write to the ring buffer. */
269 int hv_ringbuffer_write(struct vmbus_channel *channel,
270 const struct kvec *kv_list, u32 kv_count)
271 {
272 int i = 0;
273 u32 bytes_avail_towrite;
274 u32 totalbytes_towrite = 0;
275
276 u32 next_write_location;
277 u32 old_write;
278 u64 prev_indices = 0;
279 unsigned long flags = 0;
280 struct hv_ring_buffer_info *outring_info = &channel->outbound;
281
282 if (channel->rescind)
283 return -ENODEV;
284
285 for (i = 0; i < kv_count; i++)
286 totalbytes_towrite += kv_list[i].iov_len;
287
288 totalbytes_towrite += sizeof(u64);
289
290 spin_lock_irqsave(&outring_info->ring_lock, flags);
291
292 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
293
294 /*
295 * If there is only room for the packet, assume it is full.
296 * Otherwise, the next time around, we think the ring buffer
297 * is empty since the read index == write index.
298 */
299 if (bytes_avail_towrite <= totalbytes_towrite) {
300 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
301 return -EAGAIN;
302 }
303
304 /* Write to the ring buffer */
305 next_write_location = hv_get_next_write_location(outring_info);
306
307 old_write = next_write_location;
308
309 for (i = 0; i < kv_count; i++) {
310 next_write_location = hv_copyto_ringbuffer(outring_info,
311 next_write_location,
312 kv_list[i].iov_base,
313 kv_list[i].iov_len);
314 }
315
316 /* Set previous packet start */
317 prev_indices = hv_get_ring_bufferindices(outring_info);
318
319 next_write_location = hv_copyto_ringbuffer(outring_info,
320 next_write_location,
321 &prev_indices,
322 sizeof(u64));
323
324 /* Issue a full memory barrier before updating the write index */
325 virt_mb();
326
327 /* Now, update the write location */
328 hv_set_next_write_location(outring_info, next_write_location);
329
330
331 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
332
333 hv_signal_on_write(old_write, channel);
334
335 if (channel->rescind)
336 return -ENODEV;
337
338 return 0;
339 }
340
341 static inline void
342 init_cached_read_index(struct hv_ring_buffer_info *rbi)
343 {
344 rbi->cached_read_index = rbi->ring_buffer->read_index;
345 }
346
347 int hv_ringbuffer_read(struct vmbus_channel *channel,
348 void *buffer, u32 buflen, u32 *buffer_actual_len,
349 u64 *requestid, bool raw)
350 {
351 u32 bytes_avail_toread;
352 u32 next_read_location = 0;
353 u64 prev_indices = 0;
354 struct vmpacket_descriptor desc;
355 u32 offset;
356 u32 packetlen;
357 int ret = 0;
358 struct hv_ring_buffer_info *inring_info = &channel->inbound;
359
360 if (buflen <= 0)
361 return -EINVAL;
362
363
364 *buffer_actual_len = 0;
365 *requestid = 0;
366
367 bytes_avail_toread = hv_get_bytes_to_read(inring_info);
368 /* Make sure there is something to read */
369 if (bytes_avail_toread < sizeof(desc)) {
370 /*
371 * No error is set when there is even no header, drivers are
372 * supposed to analyze buffer_actual_len.
373 */
374 return ret;
375 }
376
377 init_cached_read_index(inring_info);
378
379 next_read_location = hv_get_next_read_location(inring_info);
380 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
381 sizeof(desc),
382 next_read_location);
383
384 offset = raw ? 0 : (desc.offset8 << 3);
385 packetlen = (desc.len8 << 3) - offset;
386 *buffer_actual_len = packetlen;
387 *requestid = desc.trans_id;
388
389 if (bytes_avail_toread < packetlen + offset)
390 return -EAGAIN;
391
392 if (packetlen > buflen)
393 return -ENOBUFS;
394
395 next_read_location =
396 hv_get_next_readlocation_withoffset(inring_info, offset);
397
398 next_read_location = hv_copyfrom_ringbuffer(inring_info,
399 buffer,
400 packetlen,
401 next_read_location);
402
403 next_read_location = hv_copyfrom_ringbuffer(inring_info,
404 &prev_indices,
405 sizeof(u64),
406 next_read_location);
407
408 /*
409 * Make sure all reads are done before we update the read index since
410 * the writer may start writing to the read area once the read index
411 * is updated.
412 */
413 virt_mb();
414
415 /* Update the read index */
416 hv_set_next_read_location(inring_info, next_read_location);
417
418 hv_signal_on_read(channel);
419
420 return ret;
421 }
422
423 /*
424 * Determine number of bytes available in ring buffer after
425 * the current iterator (priv_read_index) location.
426 *
427 * This is similar to hv_get_bytes_to_read but with private
428 * read index instead.
429 */
430 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
431 {
432 u32 priv_read_loc = rbi->priv_read_index;
433 u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
434
435 if (write_loc >= priv_read_loc)
436 return write_loc - priv_read_loc;
437 else
438 return (rbi->ring_datasize - priv_read_loc) + write_loc;
439 }
440
441 /*
442 * Get first vmbus packet from ring buffer after read_index
443 *
444 * If ring buffer is empty, returns NULL and no other action needed.
445 */
446 struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
447 {
448 struct hv_ring_buffer_info *rbi = &channel->inbound;
449
450 /* set state for later hv_signal_on_read() */
451 init_cached_read_index(rbi);
452
453 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
454 return NULL;
455
456 return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
457 }
458 EXPORT_SYMBOL_GPL(hv_pkt_iter_first);
459
460 /*
461 * Get next vmbus packet from ring buffer.
462 *
463 * Advances the current location (priv_read_index) and checks for more
464 * data. If the end of the ring buffer is reached, then return NULL.
465 */
466 struct vmpacket_descriptor *
467 __hv_pkt_iter_next(struct vmbus_channel *channel,
468 const struct vmpacket_descriptor *desc)
469 {
470 struct hv_ring_buffer_info *rbi = &channel->inbound;
471 u32 packetlen = desc->len8 << 3;
472 u32 dsize = rbi->ring_datasize;
473
474 /* bump offset to next potential packet */
475 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
476 if (rbi->priv_read_index >= dsize)
477 rbi->priv_read_index -= dsize;
478
479 /* more data? */
480 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
481 return NULL;
482 else
483 return hv_get_ring_buffer(rbi) + rbi->priv_read_index;
484 }
485 EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
486
487 /*
488 * Update host ring buffer after iterating over packets.
489 */
490 void hv_pkt_iter_close(struct vmbus_channel *channel)
491 {
492 struct hv_ring_buffer_info *rbi = &channel->inbound;
493
494 /*
495 * Make sure all reads are done before we update the read index since
496 * the writer may start writing to the read area once the read index
497 * is updated.
498 */
499 virt_rmb();
500 rbi->ring_buffer->read_index = rbi->priv_read_index;
501
502 hv_signal_on_read(channel);
503 }
504 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);