]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/hv/ring_buffer.c
Merge branch 'pcmcia' of git://git.armlinux.org.uk/~rmk/linux-arm
[mirror_ubuntu-artful-kernel.git] / drivers / hv / ring_buffer.c
1 /*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32
33 #include "hyperv_vmbus.h"
34
35 void hv_begin_read(struct hv_ring_buffer_info *rbi)
36 {
37 rbi->ring_buffer->interrupt_mask = 1;
38 virt_mb();
39 }
40
41 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
42 {
43
44 rbi->ring_buffer->interrupt_mask = 0;
45 virt_mb();
46
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 return hv_get_bytes_to_read(rbi);
53 }
54
55 /*
56 * When we write to the ring buffer, check if the host needs to
57 * be signaled. Here is the details of this protocol:
58 *
59 * 1. The host guarantees that while it is draining the
60 * ring buffer, it will set the interrupt_mask to
61 * indicate it does not need to be interrupted when
62 * new data is placed.
63 *
64 * 2. The host guarantees that it will completely drain
65 * the ring buffer before exiting the read loop. Further,
66 * once the ring buffer is empty, it will clear the
67 * interrupt_mask and re-check to see if new data has
68 * arrived.
69 */
70
71 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
72 enum hv_signal_policy policy)
73 {
74 virt_mb();
75 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
76 return false;
77
78 /*
79 * When the client wants to control signaling,
80 * we only honour the host interrupt mask.
81 */
82 if (policy == HV_SIGNAL_POLICY_EXPLICIT)
83 return true;
84
85 /* check interrupt_mask before read_index */
86 virt_rmb();
87 /*
88 * This is the only case we need to signal when the
89 * ring transitions from being empty to non-empty.
90 */
91 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
92 return true;
93
94 return false;
95 }
96
97 /* Get the next write location for the specified ring buffer. */
98 static inline u32
99 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
100 {
101 u32 next = ring_info->ring_buffer->write_index;
102
103 return next;
104 }
105
106 /* Set the next write location for the specified ring buffer. */
107 static inline void
108 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
109 u32 next_write_location)
110 {
111 ring_info->ring_buffer->write_index = next_write_location;
112 }
113
114 /* Get the next read location for the specified ring buffer. */
115 static inline u32
116 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
117 {
118 u32 next = ring_info->ring_buffer->read_index;
119
120 return next;
121 }
122
123 /*
124 * Get the next read location + offset for the specified ring buffer.
125 * This allows the caller to skip.
126 */
127 static inline u32
128 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
129 u32 offset)
130 {
131 u32 next = ring_info->ring_buffer->read_index;
132
133 next += offset;
134 next %= ring_info->ring_datasize;
135
136 return next;
137 }
138
139 /* Set the next read location for the specified ring buffer. */
140 static inline void
141 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
142 u32 next_read_location)
143 {
144 ring_info->ring_buffer->read_index = next_read_location;
145 ring_info->priv_read_index = next_read_location;
146 }
147
148 /* Get the size of the ring buffer. */
149 static inline u32
150 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
151 {
152 return ring_info->ring_datasize;
153 }
154
155 /* Get the read and write indices as u64 of the specified ring buffer. */
156 static inline u64
157 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
158 {
159 return (u64)ring_info->ring_buffer->write_index << 32;
160 }
161
162 /*
163 * Helper routine to copy to source from ring buffer.
164 * Assume there is enough room. Handles wrap-around in src case only!!
165 */
166 static u32 hv_copyfrom_ringbuffer(
167 struct hv_ring_buffer_info *ring_info,
168 void *dest,
169 u32 destlen,
170 u32 start_read_offset)
171 {
172 void *ring_buffer = hv_get_ring_buffer(ring_info);
173 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
174
175 memcpy(dest, ring_buffer + start_read_offset, destlen);
176
177 start_read_offset += destlen;
178 start_read_offset %= ring_buffer_size;
179
180 return start_read_offset;
181 }
182
183
184 /*
185 * Helper routine to copy from source to ring buffer.
186 * Assume there is enough room. Handles wrap-around in dest case only!!
187 */
188 static u32 hv_copyto_ringbuffer(
189 struct hv_ring_buffer_info *ring_info,
190 u32 start_write_offset,
191 void *src,
192 u32 srclen)
193 {
194 void *ring_buffer = hv_get_ring_buffer(ring_info);
195 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
196
197 memcpy(ring_buffer + start_write_offset, src, srclen);
198
199 start_write_offset += srclen;
200 start_write_offset %= ring_buffer_size;
201
202 return start_write_offset;
203 }
204
205 /* Get various debug metrics for the specified ring buffer. */
206 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
207 struct hv_ring_buffer_debug_info *debug_info)
208 {
209 u32 bytes_avail_towrite;
210 u32 bytes_avail_toread;
211
212 if (ring_info->ring_buffer) {
213 hv_get_ringbuffer_availbytes(ring_info,
214 &bytes_avail_toread,
215 &bytes_avail_towrite);
216
217 debug_info->bytes_avail_toread = bytes_avail_toread;
218 debug_info->bytes_avail_towrite = bytes_avail_towrite;
219 debug_info->current_read_index =
220 ring_info->ring_buffer->read_index;
221 debug_info->current_write_index =
222 ring_info->ring_buffer->write_index;
223 debug_info->current_interrupt_mask =
224 ring_info->ring_buffer->interrupt_mask;
225 }
226 }
227
228 /* Initialize the ring buffer. */
229 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
230 struct page *pages, u32 page_cnt)
231 {
232 int i;
233 struct page **pages_wraparound;
234
235 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
236
237 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
238
239 /*
240 * First page holds struct hv_ring_buffer, do wraparound mapping for
241 * the rest.
242 */
243 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
244 GFP_KERNEL);
245 if (!pages_wraparound)
246 return -ENOMEM;
247
248 pages_wraparound[0] = pages;
249 for (i = 0; i < 2 * (page_cnt - 1); i++)
250 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
251
252 ring_info->ring_buffer = (struct hv_ring_buffer *)
253 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
254
255 kfree(pages_wraparound);
256
257
258 if (!ring_info->ring_buffer)
259 return -ENOMEM;
260
261 ring_info->ring_buffer->read_index =
262 ring_info->ring_buffer->write_index = 0;
263
264 /* Set the feature bit for enabling flow control. */
265 ring_info->ring_buffer->feature_bits.value = 1;
266
267 ring_info->ring_size = page_cnt << PAGE_SHIFT;
268 ring_info->ring_datasize = ring_info->ring_size -
269 sizeof(struct hv_ring_buffer);
270
271 spin_lock_init(&ring_info->ring_lock);
272
273 return 0;
274 }
275
276 /* Cleanup the ring buffer. */
277 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
278 {
279 vunmap(ring_info->ring_buffer);
280 }
281
282 /* Write to the ring buffer. */
283 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
284 struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
285 enum hv_signal_policy policy)
286 {
287 int i = 0;
288 u32 bytes_avail_towrite;
289 u32 totalbytes_towrite = 0;
290
291 u32 next_write_location;
292 u32 old_write;
293 u64 prev_indices = 0;
294 unsigned long flags = 0;
295
296 for (i = 0; i < kv_count; i++)
297 totalbytes_towrite += kv_list[i].iov_len;
298
299 totalbytes_towrite += sizeof(u64);
300
301 if (lock)
302 spin_lock_irqsave(&outring_info->ring_lock, flags);
303
304 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
305
306 /*
307 * If there is only room for the packet, assume it is full.
308 * Otherwise, the next time around, we think the ring buffer
309 * is empty since the read index == write index.
310 */
311 if (bytes_avail_towrite <= totalbytes_towrite) {
312 if (lock)
313 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
314 return -EAGAIN;
315 }
316
317 /* Write to the ring buffer */
318 next_write_location = hv_get_next_write_location(outring_info);
319
320 old_write = next_write_location;
321
322 for (i = 0; i < kv_count; i++) {
323 next_write_location = hv_copyto_ringbuffer(outring_info,
324 next_write_location,
325 kv_list[i].iov_base,
326 kv_list[i].iov_len);
327 }
328
329 /* Set previous packet start */
330 prev_indices = hv_get_ring_bufferindices(outring_info);
331
332 next_write_location = hv_copyto_ringbuffer(outring_info,
333 next_write_location,
334 &prev_indices,
335 sizeof(u64));
336
337 /* Issue a full memory barrier before updating the write index */
338 virt_mb();
339
340 /* Now, update the write location */
341 hv_set_next_write_location(outring_info, next_write_location);
342
343
344 if (lock)
345 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
346
347 *signal = hv_need_to_signal(old_write, outring_info, policy);
348 return 0;
349 }
350
351 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
352 void *buffer, u32 buflen, u32 *buffer_actual_len,
353 u64 *requestid, bool *signal, bool raw)
354 {
355 u32 bytes_avail_toread;
356 u32 next_read_location = 0;
357 u64 prev_indices = 0;
358 struct vmpacket_descriptor desc;
359 u32 offset;
360 u32 packetlen;
361 int ret = 0;
362
363 if (buflen <= 0)
364 return -EINVAL;
365
366
367 *buffer_actual_len = 0;
368 *requestid = 0;
369
370 bytes_avail_toread = hv_get_bytes_to_read(inring_info);
371 /* Make sure there is something to read */
372 if (bytes_avail_toread < sizeof(desc)) {
373 /*
374 * No error is set when there is even no header, drivers are
375 * supposed to analyze buffer_actual_len.
376 */
377 return ret;
378 }
379
380 next_read_location = hv_get_next_read_location(inring_info);
381 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
382 sizeof(desc),
383 next_read_location);
384
385 offset = raw ? 0 : (desc.offset8 << 3);
386 packetlen = (desc.len8 << 3) - offset;
387 *buffer_actual_len = packetlen;
388 *requestid = desc.trans_id;
389
390 if (bytes_avail_toread < packetlen + offset)
391 return -EAGAIN;
392
393 if (packetlen > buflen)
394 return -ENOBUFS;
395
396 next_read_location =
397 hv_get_next_readlocation_withoffset(inring_info, offset);
398
399 next_read_location = hv_copyfrom_ringbuffer(inring_info,
400 buffer,
401 packetlen,
402 next_read_location);
403
404 next_read_location = hv_copyfrom_ringbuffer(inring_info,
405 &prev_indices,
406 sizeof(u64),
407 next_read_location);
408
409 /*
410 * Make sure all reads are done before we update the read index since
411 * the writer may start writing to the read area once the read index
412 * is updated.
413 */
414 virt_mb();
415
416 /* Update the read index */
417 hv_set_next_read_location(inring_info, next_read_location);
418
419 *signal = hv_need_to_signal_on_read(inring_info);
420
421 return ret;
422 }