]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * | |
3 | * Copyright (c) 2009, Microsoft Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
17 | * | |
18 | * Authors: | |
19 | * Haiyang Zhang <haiyangz@microsoft.com> | |
20 | * Hank Janssen <hjanssen@microsoft.com> | |
21 | * K. Y. Srinivasan <kys@microsoft.com> | |
22 | * | |
23 | */ | |
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
25 | ||
26 | #include <linux/kernel.h> | |
27 | #include <linux/mm.h> | |
28 | #include <linux/hyperv.h> | |
29 | #include <linux/uio.h> | |
30 | #include <linux/vmalloc.h> | |
31 | #include <linux/slab.h> | |
32 | ||
33 | #include "hyperv_vmbus.h" | |
34 | ||
35 | void hv_begin_read(struct hv_ring_buffer_info *rbi) | |
36 | { | |
37 | rbi->ring_buffer->interrupt_mask = 1; | |
38 | virt_mb(); | |
39 | } | |
40 | ||
41 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) | |
42 | { | |
43 | ||
44 | rbi->ring_buffer->interrupt_mask = 0; | |
45 | virt_mb(); | |
46 | ||
47 | /* | |
48 | * Now check to see if the ring buffer is still empty. | |
49 | * If it is not, we raced and we need to process new | |
50 | * incoming messages. | |
51 | */ | |
52 | return hv_get_bytes_to_read(rbi); | |
53 | } | |
54 | ||
55 | /* | |
56 | * When we write to the ring buffer, check if the host needs to | |
57 | * be signaled. Here is the details of this protocol: | |
58 | * | |
59 | * 1. The host guarantees that while it is draining the | |
60 | * ring buffer, it will set the interrupt_mask to | |
61 | * indicate it does not need to be interrupted when | |
62 | * new data is placed. | |
63 | * | |
64 | * 2. The host guarantees that it will completely drain | |
65 | * the ring buffer before exiting the read loop. Further, | |
66 | * once the ring buffer is empty, it will clear the | |
67 | * interrupt_mask and re-check to see if new data has | |
68 | * arrived. | |
69 | * | |
70 | * KYS: Oct. 30, 2016: | |
71 | * It looks like Windows hosts have logic to deal with DOS attacks that | |
72 | * can be triggered if it receives interrupts when it is not expecting | |
73 | * the interrupt. The host expects interrupts only when the ring | |
74 | * transitions from empty to non-empty (or full to non full on the guest | |
75 | * to host ring). | |
76 | * So, base the signaling decision solely on the ring state until the | |
77 | * host logic is fixed. | |
78 | */ | |
79 | ||
80 | static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel, | |
81 | bool kick_q) | |
82 | { | |
83 | struct hv_ring_buffer_info *rbi = &channel->outbound; | |
84 | ||
85 | virt_mb(); | |
86 | if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) | |
87 | return; | |
88 | ||
89 | /* check interrupt_mask before read_index */ | |
90 | virt_rmb(); | |
91 | /* | |
92 | * This is the only case we need to signal when the | |
93 | * ring transitions from being empty to non-empty. | |
94 | */ | |
95 | if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) | |
96 | vmbus_setevent(channel); | |
97 | ||
98 | return; | |
99 | } | |
100 | ||
101 | /* Get the next write location for the specified ring buffer. */ | |
102 | static inline u32 | |
103 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) | |
104 | { | |
105 | u32 next = ring_info->ring_buffer->write_index; | |
106 | ||
107 | return next; | |
108 | } | |
109 | ||
110 | /* Set the next write location for the specified ring buffer. */ | |
111 | static inline void | |
112 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, | |
113 | u32 next_write_location) | |
114 | { | |
115 | ring_info->ring_buffer->write_index = next_write_location; | |
116 | } | |
117 | ||
118 | /* Get the next read location for the specified ring buffer. */ | |
119 | static inline u32 | |
120 | hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) | |
121 | { | |
122 | u32 next = ring_info->ring_buffer->read_index; | |
123 | ||
124 | return next; | |
125 | } | |
126 | ||
127 | /* | |
128 | * Get the next read location + offset for the specified ring buffer. | |
129 | * This allows the caller to skip. | |
130 | */ | |
131 | static inline u32 | |
132 | hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, | |
133 | u32 offset) | |
134 | { | |
135 | u32 next = ring_info->ring_buffer->read_index; | |
136 | ||
137 | next += offset; | |
138 | next %= ring_info->ring_datasize; | |
139 | ||
140 | return next; | |
141 | } | |
142 | ||
143 | /* Set the next read location for the specified ring buffer. */ | |
144 | static inline void | |
145 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, | |
146 | u32 next_read_location) | |
147 | { | |
148 | ring_info->ring_buffer->read_index = next_read_location; | |
149 | ring_info->priv_read_index = next_read_location; | |
150 | } | |
151 | ||
152 | /* Get the size of the ring buffer. */ | |
153 | static inline u32 | |
154 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) | |
155 | { | |
156 | return ring_info->ring_datasize; | |
157 | } | |
158 | ||
159 | /* Get the read and write indices as u64 of the specified ring buffer. */ | |
160 | static inline u64 | |
161 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) | |
162 | { | |
163 | return (u64)ring_info->ring_buffer->write_index << 32; | |
164 | } | |
165 | ||
166 | /* | |
167 | * Helper routine to copy to source from ring buffer. | |
168 | * Assume there is enough room. Handles wrap-around in src case only!! | |
169 | */ | |
170 | static u32 hv_copyfrom_ringbuffer( | |
171 | struct hv_ring_buffer_info *ring_info, | |
172 | void *dest, | |
173 | u32 destlen, | |
174 | u32 start_read_offset) | |
175 | { | |
176 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
177 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
178 | ||
179 | memcpy(dest, ring_buffer + start_read_offset, destlen); | |
180 | ||
181 | start_read_offset += destlen; | |
182 | start_read_offset %= ring_buffer_size; | |
183 | ||
184 | return start_read_offset; | |
185 | } | |
186 | ||
187 | ||
188 | /* | |
189 | * Helper routine to copy from source to ring buffer. | |
190 | * Assume there is enough room. Handles wrap-around in dest case only!! | |
191 | */ | |
192 | static u32 hv_copyto_ringbuffer( | |
193 | struct hv_ring_buffer_info *ring_info, | |
194 | u32 start_write_offset, | |
195 | void *src, | |
196 | u32 srclen) | |
197 | { | |
198 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
199 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
200 | ||
201 | memcpy(ring_buffer + start_write_offset, src, srclen); | |
202 | ||
203 | start_write_offset += srclen; | |
204 | start_write_offset %= ring_buffer_size; | |
205 | ||
206 | return start_write_offset; | |
207 | } | |
208 | ||
209 | /* Get various debug metrics for the specified ring buffer. */ | |
210 | void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, | |
211 | struct hv_ring_buffer_debug_info *debug_info) | |
212 | { | |
213 | u32 bytes_avail_towrite; | |
214 | u32 bytes_avail_toread; | |
215 | ||
216 | if (ring_info->ring_buffer) { | |
217 | hv_get_ringbuffer_availbytes(ring_info, | |
218 | &bytes_avail_toread, | |
219 | &bytes_avail_towrite); | |
220 | ||
221 | debug_info->bytes_avail_toread = bytes_avail_toread; | |
222 | debug_info->bytes_avail_towrite = bytes_avail_towrite; | |
223 | debug_info->current_read_index = | |
224 | ring_info->ring_buffer->read_index; | |
225 | debug_info->current_write_index = | |
226 | ring_info->ring_buffer->write_index; | |
227 | debug_info->current_interrupt_mask = | |
228 | ring_info->ring_buffer->interrupt_mask; | |
229 | } | |
230 | } | |
231 | ||
232 | /* Initialize the ring buffer. */ | |
233 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, | |
234 | struct page *pages, u32 page_cnt) | |
235 | { | |
236 | int i; | |
237 | struct page **pages_wraparound; | |
238 | ||
239 | BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); | |
240 | ||
241 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); | |
242 | ||
243 | /* | |
244 | * First page holds struct hv_ring_buffer, do wraparound mapping for | |
245 | * the rest. | |
246 | */ | |
247 | pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1), | |
248 | GFP_KERNEL); | |
249 | if (!pages_wraparound) | |
250 | return -ENOMEM; | |
251 | ||
252 | pages_wraparound[0] = pages; | |
253 | for (i = 0; i < 2 * (page_cnt - 1); i++) | |
254 | pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; | |
255 | ||
256 | ring_info->ring_buffer = (struct hv_ring_buffer *) | |
257 | vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); | |
258 | ||
259 | kfree(pages_wraparound); | |
260 | ||
261 | ||
262 | if (!ring_info->ring_buffer) | |
263 | return -ENOMEM; | |
264 | ||
265 | ring_info->ring_buffer->read_index = | |
266 | ring_info->ring_buffer->write_index = 0; | |
267 | ||
268 | /* Set the feature bit for enabling flow control. */ | |
269 | ring_info->ring_buffer->feature_bits.value = 1; | |
270 | ||
271 | ring_info->ring_size = page_cnt << PAGE_SHIFT; | |
272 | ring_info->ring_datasize = ring_info->ring_size - | |
273 | sizeof(struct hv_ring_buffer); | |
274 | ||
275 | spin_lock_init(&ring_info->ring_lock); | |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
280 | /* Cleanup the ring buffer. */ | |
281 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | |
282 | { | |
283 | vunmap(ring_info->ring_buffer); | |
284 | } | |
285 | ||
286 | /* Write to the ring buffer. */ | |
287 | int hv_ringbuffer_write(struct vmbus_channel *channel, | |
288 | struct kvec *kv_list, u32 kv_count, bool lock, | |
289 | bool kick_q) | |
290 | { | |
291 | int i = 0; | |
292 | u32 bytes_avail_towrite; | |
293 | u32 totalbytes_towrite = 0; | |
294 | ||
295 | u32 next_write_location; | |
296 | u32 old_write; | |
297 | u64 prev_indices = 0; | |
298 | unsigned long flags = 0; | |
299 | struct hv_ring_buffer_info *outring_info = &channel->outbound; | |
300 | ||
301 | if (channel->rescind) | |
302 | return -ENODEV; | |
303 | ||
304 | for (i = 0; i < kv_count; i++) | |
305 | totalbytes_towrite += kv_list[i].iov_len; | |
306 | ||
307 | totalbytes_towrite += sizeof(u64); | |
308 | ||
309 | if (lock) | |
310 | spin_lock_irqsave(&outring_info->ring_lock, flags); | |
311 | ||
312 | bytes_avail_towrite = hv_get_bytes_to_write(outring_info); | |
313 | ||
314 | /* | |
315 | * If there is only room for the packet, assume it is full. | |
316 | * Otherwise, the next time around, we think the ring buffer | |
317 | * is empty since the read index == write index. | |
318 | */ | |
319 | if (bytes_avail_towrite <= totalbytes_towrite) { | |
320 | if (lock) | |
321 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | |
322 | return -EAGAIN; | |
323 | } | |
324 | ||
325 | /* Write to the ring buffer */ | |
326 | next_write_location = hv_get_next_write_location(outring_info); | |
327 | ||
328 | old_write = next_write_location; | |
329 | ||
330 | for (i = 0; i < kv_count; i++) { | |
331 | next_write_location = hv_copyto_ringbuffer(outring_info, | |
332 | next_write_location, | |
333 | kv_list[i].iov_base, | |
334 | kv_list[i].iov_len); | |
335 | } | |
336 | ||
337 | /* Set previous packet start */ | |
338 | prev_indices = hv_get_ring_bufferindices(outring_info); | |
339 | ||
340 | next_write_location = hv_copyto_ringbuffer(outring_info, | |
341 | next_write_location, | |
342 | &prev_indices, | |
343 | sizeof(u64)); | |
344 | ||
345 | /* Issue a full memory barrier before updating the write index */ | |
346 | virt_mb(); | |
347 | ||
348 | /* Now, update the write location */ | |
349 | hv_set_next_write_location(outring_info, next_write_location); | |
350 | ||
351 | ||
352 | if (lock) | |
353 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | |
354 | ||
355 | hv_signal_on_write(old_write, channel, kick_q); | |
356 | ||
357 | if (channel->rescind) | |
358 | return -ENODEV; | |
359 | ||
360 | return 0; | |
361 | } | |
362 | ||
363 | int hv_ringbuffer_read(struct vmbus_channel *channel, | |
364 | void *buffer, u32 buflen, u32 *buffer_actual_len, | |
365 | u64 *requestid, bool raw) | |
366 | { | |
367 | u32 bytes_avail_toread; | |
368 | u32 next_read_location = 0; | |
369 | u64 prev_indices = 0; | |
370 | struct vmpacket_descriptor desc; | |
371 | u32 offset; | |
372 | u32 packetlen; | |
373 | int ret = 0; | |
374 | struct hv_ring_buffer_info *inring_info = &channel->inbound; | |
375 | ||
376 | if (buflen <= 0) | |
377 | return -EINVAL; | |
378 | ||
379 | ||
380 | *buffer_actual_len = 0; | |
381 | *requestid = 0; | |
382 | ||
383 | bytes_avail_toread = hv_get_bytes_to_read(inring_info); | |
384 | /* Make sure there is something to read */ | |
385 | if (bytes_avail_toread < sizeof(desc)) { | |
386 | /* | |
387 | * No error is set when there is even no header, drivers are | |
388 | * supposed to analyze buffer_actual_len. | |
389 | */ | |
390 | return ret; | |
391 | } | |
392 | ||
393 | init_cached_read_index(channel); | |
394 | next_read_location = hv_get_next_read_location(inring_info); | |
395 | next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc, | |
396 | sizeof(desc), | |
397 | next_read_location); | |
398 | ||
399 | offset = raw ? 0 : (desc.offset8 << 3); | |
400 | packetlen = (desc.len8 << 3) - offset; | |
401 | *buffer_actual_len = packetlen; | |
402 | *requestid = desc.trans_id; | |
403 | ||
404 | if (bytes_avail_toread < packetlen + offset) | |
405 | return -EAGAIN; | |
406 | ||
407 | if (packetlen > buflen) | |
408 | return -ENOBUFS; | |
409 | ||
410 | next_read_location = | |
411 | hv_get_next_readlocation_withoffset(inring_info, offset); | |
412 | ||
413 | next_read_location = hv_copyfrom_ringbuffer(inring_info, | |
414 | buffer, | |
415 | packetlen, | |
416 | next_read_location); | |
417 | ||
418 | next_read_location = hv_copyfrom_ringbuffer(inring_info, | |
419 | &prev_indices, | |
420 | sizeof(u64), | |
421 | next_read_location); | |
422 | ||
423 | /* | |
424 | * Make sure all reads are done before we update the read index since | |
425 | * the writer may start writing to the read area once the read index | |
426 | * is updated. | |
427 | */ | |
428 | virt_mb(); | |
429 | ||
430 | /* Update the read index */ | |
431 | hv_set_next_read_location(inring_info, next_read_location); | |
432 | ||
433 | hv_signal_on_read(channel); | |
434 | ||
435 | return ret; | |
436 | } |