]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame_incremental - drivers/hv/ring_buffer.c
Drivers: hv: cleanup vmbus_open() for wrap around mappings
[mirror_ubuntu-artful-kernel.git] / drivers / hv / ring_buffer.c
... / ...
CommitLineData
1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/kernel.h>
27#include <linux/mm.h>
28#include <linux/hyperv.h>
29#include <linux/uio.h>
30
31#include "hyperv_vmbus.h"
32
33void hv_begin_read(struct hv_ring_buffer_info *rbi)
34{
35 rbi->ring_buffer->interrupt_mask = 1;
36 virt_mb();
37}
38
39u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40{
41
42 rbi->ring_buffer->interrupt_mask = 0;
43 virt_mb();
44
45 /*
46 * Now check to see if the ring buffer is still empty.
47 * If it is not, we raced and we need to process new
48 * incoming messages.
49 */
50 return hv_get_bytes_to_read(rbi);
51}
52
53/*
54 * When we write to the ring buffer, check if the host needs to
55 * be signaled. Here is the details of this protocol:
56 *
57 * 1. The host guarantees that while it is draining the
58 * ring buffer, it will set the interrupt_mask to
59 * indicate it does not need to be interrupted when
60 * new data is placed.
61 *
62 * 2. The host guarantees that it will completely drain
63 * the ring buffer before exiting the read loop. Further,
64 * once the ring buffer is empty, it will clear the
65 * interrupt_mask and re-check to see if new data has
66 * arrived.
67 */
68
69static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
70 enum hv_signal_policy policy)
71{
72 virt_mb();
73 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
74 return false;
75
76 /*
77 * When the client wants to control signaling,
78 * we only honour the host interrupt mask.
79 */
80 if (policy == HV_SIGNAL_POLICY_EXPLICIT)
81 return true;
82
83 /* check interrupt_mask before read_index */
84 virt_rmb();
85 /*
86 * This is the only case we need to signal when the
87 * ring transitions from being empty to non-empty.
88 */
89 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
90 return true;
91
92 return false;
93}
94
95/* Get the next write location for the specified ring buffer. */
96static inline u32
97hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
98{
99 u32 next = ring_info->ring_buffer->write_index;
100
101 return next;
102}
103
104/* Set the next write location for the specified ring buffer. */
105static inline void
106hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
107 u32 next_write_location)
108{
109 ring_info->ring_buffer->write_index = next_write_location;
110}
111
112/* Get the next read location for the specified ring buffer. */
113static inline u32
114hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
115{
116 u32 next = ring_info->ring_buffer->read_index;
117
118 return next;
119}
120
121/*
122 * Get the next read location + offset for the specified ring buffer.
123 * This allows the caller to skip.
124 */
125static inline u32
126hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
127 u32 offset)
128{
129 u32 next = ring_info->ring_buffer->read_index;
130
131 next += offset;
132 next %= ring_info->ring_datasize;
133
134 return next;
135}
136
137/* Set the next read location for the specified ring buffer. */
138static inline void
139hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
140 u32 next_read_location)
141{
142 ring_info->ring_buffer->read_index = next_read_location;
143 ring_info->priv_read_index = next_read_location;
144}
145
146/* Get the size of the ring buffer. */
147static inline u32
148hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
149{
150 return ring_info->ring_datasize;
151}
152
153/* Get the read and write indices as u64 of the specified ring buffer. */
154static inline u64
155hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
156{
157 return (u64)ring_info->ring_buffer->write_index << 32;
158}
159
160/*
161 * Helper routine to copy to source from ring buffer.
162 * Assume there is enough room. Handles wrap-around in src case only!!
163 */
164static u32 hv_copyfrom_ringbuffer(
165 struct hv_ring_buffer_info *ring_info,
166 void *dest,
167 u32 destlen,
168 u32 start_read_offset)
169{
170 void *ring_buffer = hv_get_ring_buffer(ring_info);
171 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
172
173 u32 frag_len;
174
175 /* wrap-around detected at the src */
176 if (destlen > ring_buffer_size - start_read_offset) {
177 frag_len = ring_buffer_size - start_read_offset;
178
179 memcpy(dest, ring_buffer + start_read_offset, frag_len);
180 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
181 } else
182
183 memcpy(dest, ring_buffer + start_read_offset, destlen);
184
185
186 start_read_offset += destlen;
187 start_read_offset %= ring_buffer_size;
188
189 return start_read_offset;
190}
191
192
193/*
194 * Helper routine to copy from source to ring buffer.
195 * Assume there is enough room. Handles wrap-around in dest case only!!
196 */
197static u32 hv_copyto_ringbuffer(
198 struct hv_ring_buffer_info *ring_info,
199 u32 start_write_offset,
200 void *src,
201 u32 srclen)
202{
203 void *ring_buffer = hv_get_ring_buffer(ring_info);
204 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
205 u32 frag_len;
206
207 /* wrap-around detected! */
208 if (srclen > ring_buffer_size - start_write_offset) {
209 frag_len = ring_buffer_size - start_write_offset;
210 memcpy(ring_buffer + start_write_offset, src, frag_len);
211 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
212 } else
213 memcpy(ring_buffer + start_write_offset, src, srclen);
214
215 start_write_offset += srclen;
216 start_write_offset %= ring_buffer_size;
217
218 return start_write_offset;
219}
220
221/* Get various debug metrics for the specified ring buffer. */
222void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
223 struct hv_ring_buffer_debug_info *debug_info)
224{
225 u32 bytes_avail_towrite;
226 u32 bytes_avail_toread;
227
228 if (ring_info->ring_buffer) {
229 hv_get_ringbuffer_availbytes(ring_info,
230 &bytes_avail_toread,
231 &bytes_avail_towrite);
232
233 debug_info->bytes_avail_toread = bytes_avail_toread;
234 debug_info->bytes_avail_towrite = bytes_avail_towrite;
235 debug_info->current_read_index =
236 ring_info->ring_buffer->read_index;
237 debug_info->current_write_index =
238 ring_info->ring_buffer->write_index;
239 debug_info->current_interrupt_mask =
240 ring_info->ring_buffer->interrupt_mask;
241 }
242}
243
244/* Initialize the ring buffer. */
245int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
246 void *buffer, u32 buflen)
247{
248 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
249 return -EINVAL;
250
251 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
252
253 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
254 ring_info->ring_buffer->read_index =
255 ring_info->ring_buffer->write_index = 0;
256
257 /* Set the feature bit for enabling flow control. */
258 ring_info->ring_buffer->feature_bits.value = 1;
259
260 ring_info->ring_size = buflen;
261 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
262
263 spin_lock_init(&ring_info->ring_lock);
264
265 return 0;
266}
267
268/* Cleanup the ring buffer. */
269void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
270{
271}
272
273/* Write to the ring buffer. */
274int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
275 struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
276 enum hv_signal_policy policy)
277{
278 int i = 0;
279 u32 bytes_avail_towrite;
280 u32 totalbytes_towrite = 0;
281
282 u32 next_write_location;
283 u32 old_write;
284 u64 prev_indices = 0;
285 unsigned long flags = 0;
286
287 for (i = 0; i < kv_count; i++)
288 totalbytes_towrite += kv_list[i].iov_len;
289
290 totalbytes_towrite += sizeof(u64);
291
292 if (lock)
293 spin_lock_irqsave(&outring_info->ring_lock, flags);
294
295 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
296
297 /*
298 * If there is only room for the packet, assume it is full.
299 * Otherwise, the next time around, we think the ring buffer
300 * is empty since the read index == write index.
301 */
302 if (bytes_avail_towrite <= totalbytes_towrite) {
303 if (lock)
304 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
305 return -EAGAIN;
306 }
307
308 /* Write to the ring buffer */
309 next_write_location = hv_get_next_write_location(outring_info);
310
311 old_write = next_write_location;
312
313 for (i = 0; i < kv_count; i++) {
314 next_write_location = hv_copyto_ringbuffer(outring_info,
315 next_write_location,
316 kv_list[i].iov_base,
317 kv_list[i].iov_len);
318 }
319
320 /* Set previous packet start */
321 prev_indices = hv_get_ring_bufferindices(outring_info);
322
323 next_write_location = hv_copyto_ringbuffer(outring_info,
324 next_write_location,
325 &prev_indices,
326 sizeof(u64));
327
328 /* Issue a full memory barrier before updating the write index */
329 virt_mb();
330
331 /* Now, update the write location */
332 hv_set_next_write_location(outring_info, next_write_location);
333
334
335 if (lock)
336 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
337
338 *signal = hv_need_to_signal(old_write, outring_info, policy);
339 return 0;
340}
341
342int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
343 void *buffer, u32 buflen, u32 *buffer_actual_len,
344 u64 *requestid, bool *signal, bool raw)
345{
346 u32 bytes_avail_toread;
347 u32 next_read_location = 0;
348 u64 prev_indices = 0;
349 struct vmpacket_descriptor desc;
350 u32 offset;
351 u32 packetlen;
352 int ret = 0;
353
354 if (buflen <= 0)
355 return -EINVAL;
356
357
358 *buffer_actual_len = 0;
359 *requestid = 0;
360
361 bytes_avail_toread = hv_get_bytes_to_read(inring_info);
362 /* Make sure there is something to read */
363 if (bytes_avail_toread < sizeof(desc)) {
364 /*
365 * No error is set when there is even no header, drivers are
366 * supposed to analyze buffer_actual_len.
367 */
368 return ret;
369 }
370
371 next_read_location = hv_get_next_read_location(inring_info);
372 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
373 sizeof(desc),
374 next_read_location);
375
376 offset = raw ? 0 : (desc.offset8 << 3);
377 packetlen = (desc.len8 << 3) - offset;
378 *buffer_actual_len = packetlen;
379 *requestid = desc.trans_id;
380
381 if (bytes_avail_toread < packetlen + offset)
382 return -EAGAIN;
383
384 if (packetlen > buflen)
385 return -ENOBUFS;
386
387 next_read_location =
388 hv_get_next_readlocation_withoffset(inring_info, offset);
389
390 next_read_location = hv_copyfrom_ringbuffer(inring_info,
391 buffer,
392 packetlen,
393 next_read_location);
394
395 next_read_location = hv_copyfrom_ringbuffer(inring_info,
396 &prev_indices,
397 sizeof(u64),
398 next_read_location);
399
400 /*
401 * Make sure all reads are done before we update the read index since
402 * the writer may start writing to the read area once the read index
403 * is updated.
404 */
405 virt_mb();
406
407 /* Update the read index */
408 hv_set_next_read_location(inring_info, next_read_location);
409
410 *signal = hv_need_to_signal_on_read(inring_info);
411
412 return ret;
413}