]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/hv/ring_buffer.c
HID: rmi: fallback to generic/multitouch if hid-rmi is not built
[mirror_ubuntu-artful-kernel.git] / drivers / hv / ring_buffer.c
1 /*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32
33 #include "hyperv_vmbus.h"
34
35 void hv_begin_read(struct hv_ring_buffer_info *rbi)
36 {
37 rbi->ring_buffer->interrupt_mask = 1;
38 virt_mb();
39 }
40
41 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
42 {
43
44 rbi->ring_buffer->interrupt_mask = 0;
45 virt_mb();
46
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 return hv_get_bytes_to_read(rbi);
53 }
54
55 /*
56 * When we write to the ring buffer, check if the host needs to
57 * be signaled. Here is the details of this protocol:
58 *
59 * 1. The host guarantees that while it is draining the
60 * ring buffer, it will set the interrupt_mask to
61 * indicate it does not need to be interrupted when
62 * new data is placed.
63 *
64 * 2. The host guarantees that it will completely drain
65 * the ring buffer before exiting the read loop. Further,
66 * once the ring buffer is empty, it will clear the
67 * interrupt_mask and re-check to see if new data has
68 * arrived.
69 *
70 * KYS: Oct. 30, 2016:
71 * It looks like Windows hosts have logic to deal with DOS attacks that
72 * can be triggered if it receives interrupts when it is not expecting
73 * the interrupt. The host expects interrupts only when the ring
74 * transitions from empty to non-empty (or full to non full on the guest
75 * to host ring).
76 * So, base the signaling decision solely on the ring state until the
77 * host logic is fixed.
78 */
79
80 static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
81 bool kick_q)
82 {
83 struct hv_ring_buffer_info *rbi = &channel->outbound;
84
85 virt_mb();
86 if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
87 return;
88
89 /* check interrupt_mask before read_index */
90 virt_rmb();
91 /*
92 * This is the only case we need to signal when the
93 * ring transitions from being empty to non-empty.
94 */
95 if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
96 vmbus_setevent(channel);
97
98 return;
99 }
100
101 /* Get the next write location for the specified ring buffer. */
102 static inline u32
103 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
104 {
105 u32 next = ring_info->ring_buffer->write_index;
106
107 return next;
108 }
109
110 /* Set the next write location for the specified ring buffer. */
111 static inline void
112 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
113 u32 next_write_location)
114 {
115 ring_info->ring_buffer->write_index = next_write_location;
116 }
117
118 /* Get the next read location for the specified ring buffer. */
119 static inline u32
120 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
121 {
122 u32 next = ring_info->ring_buffer->read_index;
123
124 return next;
125 }
126
127 /*
128 * Get the next read location + offset for the specified ring buffer.
129 * This allows the caller to skip.
130 */
131 static inline u32
132 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
133 u32 offset)
134 {
135 u32 next = ring_info->ring_buffer->read_index;
136
137 next += offset;
138 next %= ring_info->ring_datasize;
139
140 return next;
141 }
142
143 /* Set the next read location for the specified ring buffer. */
144 static inline void
145 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
146 u32 next_read_location)
147 {
148 ring_info->ring_buffer->read_index = next_read_location;
149 ring_info->priv_read_index = next_read_location;
150 }
151
152 /* Get the size of the ring buffer. */
153 static inline u32
154 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
155 {
156 return ring_info->ring_datasize;
157 }
158
159 /* Get the read and write indices as u64 of the specified ring buffer. */
160 static inline u64
161 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
162 {
163 return (u64)ring_info->ring_buffer->write_index << 32;
164 }
165
166 /*
167 * Helper routine to copy to source from ring buffer.
168 * Assume there is enough room. Handles wrap-around in src case only!!
169 */
170 static u32 hv_copyfrom_ringbuffer(
171 struct hv_ring_buffer_info *ring_info,
172 void *dest,
173 u32 destlen,
174 u32 start_read_offset)
175 {
176 void *ring_buffer = hv_get_ring_buffer(ring_info);
177 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
178
179 memcpy(dest, ring_buffer + start_read_offset, destlen);
180
181 start_read_offset += destlen;
182 start_read_offset %= ring_buffer_size;
183
184 return start_read_offset;
185 }
186
187
188 /*
189 * Helper routine to copy from source to ring buffer.
190 * Assume there is enough room. Handles wrap-around in dest case only!!
191 */
192 static u32 hv_copyto_ringbuffer(
193 struct hv_ring_buffer_info *ring_info,
194 u32 start_write_offset,
195 void *src,
196 u32 srclen)
197 {
198 void *ring_buffer = hv_get_ring_buffer(ring_info);
199 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
200
201 memcpy(ring_buffer + start_write_offset, src, srclen);
202
203 start_write_offset += srclen;
204 start_write_offset %= ring_buffer_size;
205
206 return start_write_offset;
207 }
208
209 /* Get various debug metrics for the specified ring buffer. */
210 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
211 struct hv_ring_buffer_debug_info *debug_info)
212 {
213 u32 bytes_avail_towrite;
214 u32 bytes_avail_toread;
215
216 if (ring_info->ring_buffer) {
217 hv_get_ringbuffer_availbytes(ring_info,
218 &bytes_avail_toread,
219 &bytes_avail_towrite);
220
221 debug_info->bytes_avail_toread = bytes_avail_toread;
222 debug_info->bytes_avail_towrite = bytes_avail_towrite;
223 debug_info->current_read_index =
224 ring_info->ring_buffer->read_index;
225 debug_info->current_write_index =
226 ring_info->ring_buffer->write_index;
227 debug_info->current_interrupt_mask =
228 ring_info->ring_buffer->interrupt_mask;
229 }
230 }
231
232 /* Initialize the ring buffer. */
233 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
234 struct page *pages, u32 page_cnt)
235 {
236 int i;
237 struct page **pages_wraparound;
238
239 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
240
241 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
242
243 /*
244 * First page holds struct hv_ring_buffer, do wraparound mapping for
245 * the rest.
246 */
247 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
248 GFP_KERNEL);
249 if (!pages_wraparound)
250 return -ENOMEM;
251
252 pages_wraparound[0] = pages;
253 for (i = 0; i < 2 * (page_cnt - 1); i++)
254 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
255
256 ring_info->ring_buffer = (struct hv_ring_buffer *)
257 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
258
259 kfree(pages_wraparound);
260
261
262 if (!ring_info->ring_buffer)
263 return -ENOMEM;
264
265 ring_info->ring_buffer->read_index =
266 ring_info->ring_buffer->write_index = 0;
267
268 /* Set the feature bit for enabling flow control. */
269 ring_info->ring_buffer->feature_bits.value = 1;
270
271 ring_info->ring_size = page_cnt << PAGE_SHIFT;
272 ring_info->ring_datasize = ring_info->ring_size -
273 sizeof(struct hv_ring_buffer);
274
275 spin_lock_init(&ring_info->ring_lock);
276
277 return 0;
278 }
279
280 /* Cleanup the ring buffer. */
281 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
282 {
283 vunmap(ring_info->ring_buffer);
284 }
285
286 /* Write to the ring buffer. */
287 int hv_ringbuffer_write(struct vmbus_channel *channel,
288 struct kvec *kv_list, u32 kv_count, bool lock,
289 bool kick_q)
290 {
291 int i = 0;
292 u32 bytes_avail_towrite;
293 u32 totalbytes_towrite = 0;
294
295 u32 next_write_location;
296 u32 old_write;
297 u64 prev_indices = 0;
298 unsigned long flags = 0;
299 struct hv_ring_buffer_info *outring_info = &channel->outbound;
300
301 for (i = 0; i < kv_count; i++)
302 totalbytes_towrite += kv_list[i].iov_len;
303
304 totalbytes_towrite += sizeof(u64);
305
306 if (lock)
307 spin_lock_irqsave(&outring_info->ring_lock, flags);
308
309 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
310
311 /*
312 * If there is only room for the packet, assume it is full.
313 * Otherwise, the next time around, we think the ring buffer
314 * is empty since the read index == write index.
315 */
316 if (bytes_avail_towrite <= totalbytes_towrite) {
317 if (lock)
318 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
319 return -EAGAIN;
320 }
321
322 /* Write to the ring buffer */
323 next_write_location = hv_get_next_write_location(outring_info);
324
325 old_write = next_write_location;
326
327 for (i = 0; i < kv_count; i++) {
328 next_write_location = hv_copyto_ringbuffer(outring_info,
329 next_write_location,
330 kv_list[i].iov_base,
331 kv_list[i].iov_len);
332 }
333
334 /* Set previous packet start */
335 prev_indices = hv_get_ring_bufferindices(outring_info);
336
337 next_write_location = hv_copyto_ringbuffer(outring_info,
338 next_write_location,
339 &prev_indices,
340 sizeof(u64));
341
342 /* Issue a full memory barrier before updating the write index */
343 virt_mb();
344
345 /* Now, update the write location */
346 hv_set_next_write_location(outring_info, next_write_location);
347
348
349 if (lock)
350 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
351
352 hv_signal_on_write(old_write, channel, kick_q);
353 return 0;
354 }
355
356 int hv_ringbuffer_read(struct vmbus_channel *channel,
357 void *buffer, u32 buflen, u32 *buffer_actual_len,
358 u64 *requestid, bool raw)
359 {
360 u32 bytes_avail_toread;
361 u32 next_read_location = 0;
362 u64 prev_indices = 0;
363 struct vmpacket_descriptor desc;
364 u32 offset;
365 u32 packetlen;
366 int ret = 0;
367 struct hv_ring_buffer_info *inring_info = &channel->inbound;
368
369 if (buflen <= 0)
370 return -EINVAL;
371
372
373 *buffer_actual_len = 0;
374 *requestid = 0;
375
376 bytes_avail_toread = hv_get_bytes_to_read(inring_info);
377 /* Make sure there is something to read */
378 if (bytes_avail_toread < sizeof(desc)) {
379 /*
380 * No error is set when there is even no header, drivers are
381 * supposed to analyze buffer_actual_len.
382 */
383 return ret;
384 }
385
386 init_cached_read_index(channel);
387 next_read_location = hv_get_next_read_location(inring_info);
388 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
389 sizeof(desc),
390 next_read_location);
391
392 offset = raw ? 0 : (desc.offset8 << 3);
393 packetlen = (desc.len8 << 3) - offset;
394 *buffer_actual_len = packetlen;
395 *requestid = desc.trans_id;
396
397 if (bytes_avail_toread < packetlen + offset)
398 return -EAGAIN;
399
400 if (packetlen > buflen)
401 return -ENOBUFS;
402
403 next_read_location =
404 hv_get_next_readlocation_withoffset(inring_info, offset);
405
406 next_read_location = hv_copyfrom_ringbuffer(inring_info,
407 buffer,
408 packetlen,
409 next_read_location);
410
411 next_read_location = hv_copyfrom_ringbuffer(inring_info,
412 &prev_indices,
413 sizeof(u64),
414 next_read_location);
415
416 /*
417 * Make sure all reads are done before we update the read index since
418 * the writer may start writing to the read area once the read index
419 * is updated.
420 */
421 virt_mb();
422
423 /* Update the read index */
424 hv_set_next_read_location(inring_info, next_read_location);
425
426 hv_signal_on_read(channel);
427
428 return ret;
429 }