]>
Commit | Line | Data |
---|---|---|
3e7ee490 HJ |
1 | /* |
2 | * | |
3 | * Copyright (c) 2009, Microsoft Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
17 | * | |
18 | * Authors: | |
19 | * Haiyang Zhang <haiyangz@microsoft.com> | |
20 | * Hank Janssen <hjanssen@microsoft.com> | |
b2a5a585 | 21 | * K. Y. Srinivasan <kys@microsoft.com> |
3e7ee490 HJ |
22 | * |
23 | */ | |
0a46618d | 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
3e7ee490 | 25 | |
a0086dc5 GKH |
26 | #include <linux/kernel.h> |
27 | #include <linux/mm.h> | |
46a97191 | 28 | #include <linux/hyperv.h> |
011a7c3c | 29 | #include <linux/uio.h> |
9988ce68 VK |
30 | #include <linux/vmalloc.h> |
31 | #include <linux/slab.h> | |
8dd45f2a | 32 | #include <linux/prefetch.h> |
3f335ea2 | 33 | |
0f2a6619 | 34 | #include "hyperv_vmbus.h" |
3e7ee490 | 35 | |
f3dd3f47 | 36 | #define VMBUS_PKT_TRAILER 8 |
37 | ||
98fa8cf4 S |
38 | /* |
39 | * When we write to the ring buffer, check if the host needs to | |
40 | * be signaled. Here is the details of this protocol: | |
41 | * | |
42 | * 1. The host guarantees that while it is draining the | |
43 | * ring buffer, it will set the interrupt_mask to | |
44 | * indicate it does not need to be interrupted when | |
45 | * new data is placed. | |
46 | * | |
47 | * 2. The host guarantees that it will completely drain | |
48 | * the ring buffer before exiting the read loop. Further, | |
49 | * once the ring buffer is empty, it will clear the | |
50 | * interrupt_mask and re-check to see if new data has | |
51 | * arrived. | |
1f6ee4e7 S |
52 | * |
53 | * KYS: Oct. 30, 2016: | |
54 | * It looks like Windows hosts have logic to deal with DOS attacks that | |
55 | * can be triggered if it receives interrupts when it is not expecting | |
56 | * the interrupt. The host expects interrupts only when the ring | |
57 | * transitions from empty to non-empty (or full to non full on the guest | |
58 | * to host ring). | |
59 | * So, base the signaling decision solely on the ring state until the | |
60 | * host logic is fixed. | |
98fa8cf4 S |
61 | */ |
62 | ||
b103a56f | 63 | static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) |
98fa8cf4 | 64 | { |
1f6ee4e7 S |
65 | struct hv_ring_buffer_info *rbi = &channel->outbound; |
66 | ||
dcd0eeca | 67 | virt_mb(); |
d45faaee | 68 | if (READ_ONCE(rbi->ring_buffer->interrupt_mask)) |
1f6ee4e7 | 69 | return; |
98fa8cf4 | 70 | |
e91e84fa | 71 | /* check interrupt_mask before read_index */ |
dcd0eeca | 72 | virt_rmb(); |
98fa8cf4 S |
73 | /* |
74 | * This is the only case we need to signal when the | |
75 | * ring transitions from being empty to non-empty. | |
76 | */ | |
396ae57e KB |
77 | if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) { |
78 | ++channel->intr_out_empty; | |
1f6ee4e7 | 79 | vmbus_setevent(channel); |
396ae57e | 80 | } |
98fa8cf4 S |
81 | } |
82 | ||
822f18d4 | 83 | /* Get the next write location for the specified ring buffer. */ |
4d643114 | 84 | static inline u32 |
2b8a912e | 85 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 86 | { |
fc8c72eb | 87 | u32 next = ring_info->ring_buffer->write_index; |
3e7ee490 | 88 | |
3e7ee490 HJ |
89 | return next; |
90 | } | |
91 | ||
822f18d4 | 92 | /* Set the next write location for the specified ring buffer. */ |
3e7ee490 | 93 | static inline void |
2b8a912e | 94 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 95 | u32 next_write_location) |
3e7ee490 | 96 | { |
fc8c72eb | 97 | ring_info->ring_buffer->write_index = next_write_location; |
3e7ee490 HJ |
98 | } |
99 | ||
822f18d4 | 100 | /* Set the next read location for the specified ring buffer. */ |
3e7ee490 | 101 | static inline void |
2b8a912e | 102 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, |
fc8c72eb | 103 | u32 next_read_location) |
3e7ee490 | 104 | { |
fc8c72eb | 105 | ring_info->ring_buffer->read_index = next_read_location; |
ab028db4 | 106 | ring_info->priv_read_index = next_read_location; |
3e7ee490 HJ |
107 | } |
108 | ||
822f18d4 | 109 | /* Get the size of the ring buffer. */ |
4d643114 | 110 | static inline u32 |
e4165a0f | 111 | hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 112 | { |
fc8c72eb | 113 | return ring_info->ring_datasize; |
3e7ee490 HJ |
114 | } |
115 | ||
822f18d4 | 116 | /* Get the read and write indices as u64 of the specified ring buffer. */ |
59471438 | 117 | static inline u64 |
2b8a912e | 118 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 119 | { |
fc8c72eb | 120 | return (u64)ring_info->ring_buffer->write_index << 32; |
3e7ee490 HJ |
121 | } |
122 | ||
7581578d | 123 | /* |
7581578d S |
124 | * Helper routine to copy from source to ring buffer. |
125 | * Assume there is enough room. Handles wrap-around in dest case only!! | |
7581578d S |
126 | */ |
127 | static u32 hv_copyto_ringbuffer( | |
fc8c72eb HZ |
128 | struct hv_ring_buffer_info *ring_info, |
129 | u32 start_write_offset, | |
e4165a0f | 130 | const void *src, |
7581578d S |
131 | u32 srclen) |
132 | { | |
133 | void *ring_buffer = hv_get_ring_buffer(ring_info); | |
134 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | |
f24f0b49 VK |
135 | |
136 | memcpy(ring_buffer + start_write_offset, src, srclen); | |
3e7ee490 | 137 | |
7581578d | 138 | start_write_offset += srclen; |
8d12f882 SH |
139 | if (start_write_offset >= ring_buffer_size) |
140 | start_write_offset -= ring_buffer_size; | |
7581578d S |
141 | |
142 | return start_write_offset; | |
143 | } | |
3e7ee490 | 144 | |
0487426f SH |
145 | /* |
146 | * | |
147 | * hv_get_ringbuffer_availbytes() | |
148 | * | |
149 | * Get number of bytes available to read and to write to | |
150 | * for the specified ring buffer | |
151 | */ | |
152 | static void | |
153 | hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, | |
154 | u32 *read, u32 *write) | |
155 | { | |
156 | u32 read_loc, write_loc, dsize; | |
157 | ||
158 | /* Capture the read/write indices before they changed */ | |
159 | read_loc = READ_ONCE(rbi->ring_buffer->read_index); | |
160 | write_loc = READ_ONCE(rbi->ring_buffer->write_index); | |
161 | dsize = rbi->ring_datasize; | |
162 | ||
163 | *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : | |
164 | read_loc - write_loc; | |
165 | *read = dsize - *write; | |
166 | } | |
167 | ||
822f18d4 | 168 | /* Get various debug metrics for the specified ring buffer. */ |
14948e39 | 169 | int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, |
ba50bf1c | 170 | struct hv_ring_buffer_debug_info *debug_info) |
3e7ee490 | 171 | { |
fc8c72eb HZ |
172 | u32 bytes_avail_towrite; |
173 | u32 bytes_avail_toread; | |
3e7ee490 | 174 | |
14948e39 KB |
175 | mutex_lock(&ring_info->ring_buffer_mutex); |
176 | ||
177 | if (!ring_info->ring_buffer) { | |
178 | mutex_unlock(&ring_info->ring_buffer_mutex); | |
ba50bf1c | 179 | return -EINVAL; |
14948e39 | 180 | } |
ba50bf1c DC |
181 | |
182 | hv_get_ringbuffer_availbytes(ring_info, | |
183 | &bytes_avail_toread, | |
184 | &bytes_avail_towrite); | |
185 | debug_info->bytes_avail_toread = bytes_avail_toread; | |
186 | debug_info->bytes_avail_towrite = bytes_avail_towrite; | |
187 | debug_info->current_read_index = ring_info->ring_buffer->read_index; | |
188 | debug_info->current_write_index = ring_info->ring_buffer->write_index; | |
189 | debug_info->current_interrupt_mask | |
190 | = ring_info->ring_buffer->interrupt_mask; | |
14948e39 KB |
191 | mutex_unlock(&ring_info->ring_buffer_mutex); |
192 | ||
ba50bf1c | 193 | return 0; |
3e7ee490 | 194 | } |
4827ee1d | 195 | EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); |
3e7ee490 | 196 | |
14948e39 KB |
197 | /* Initialize a channel's ring buffer info mutex locks */ |
198 | void hv_ringbuffer_pre_init(struct vmbus_channel *channel) | |
199 | { | |
200 | mutex_init(&channel->inbound.ring_buffer_mutex); | |
201 | mutex_init(&channel->outbound.ring_buffer_mutex); | |
202 | } | |
203 | ||
822f18d4 | 204 | /* Initialize the ring buffer. */ |
72a95cbc | 205 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, |
9988ce68 | 206 | struct page *pages, u32 page_cnt) |
3e7ee490 | 207 | { |
9988ce68 VK |
208 | int i; |
209 | struct page **pages_wraparound; | |
210 | ||
211 | BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE)); | |
3e7ee490 | 212 | |
9988ce68 VK |
213 | /* |
214 | * First page holds struct hv_ring_buffer, do wraparound mapping for | |
215 | * the rest. | |
216 | */ | |
6396bb22 | 217 | pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *), |
9988ce68 VK |
218 | GFP_KERNEL); |
219 | if (!pages_wraparound) | |
220 | return -ENOMEM; | |
221 | ||
222 | pages_wraparound[0] = pages; | |
223 | for (i = 0; i < 2 * (page_cnt - 1); i++) | |
224 | pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1]; | |
225 | ||
226 | ring_info->ring_buffer = (struct hv_ring_buffer *) | |
227 | vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL); | |
228 | ||
229 | kfree(pages_wraparound); | |
230 | ||
231 | ||
232 | if (!ring_info->ring_buffer) | |
233 | return -ENOMEM; | |
234 | ||
fc8c72eb HZ |
235 | ring_info->ring_buffer->read_index = |
236 | ring_info->ring_buffer->write_index = 0; | |
3e7ee490 | 237 | |
822f18d4 | 238 | /* Set the feature bit for enabling flow control. */ |
046c7911 S |
239 | ring_info->ring_buffer->feature_bits.value = 1; |
240 | ||
9988ce68 | 241 | ring_info->ring_size = page_cnt << PAGE_SHIFT; |
63273cb4 LL |
242 | ring_info->ring_size_div10_reciprocal = |
243 | reciprocal_value(ring_info->ring_size / 10); | |
9988ce68 VK |
244 | ring_info->ring_datasize = ring_info->ring_size - |
245 | sizeof(struct hv_ring_buffer); | |
4713eb7b | 246 | ring_info->priv_read_index = 0; |
3e7ee490 | 247 | |
fc8c72eb | 248 | spin_lock_init(&ring_info->ring_lock); |
3e7ee490 HJ |
249 | |
250 | return 0; | |
251 | } | |
252 | ||
822f18d4 | 253 | /* Cleanup the ring buffer. */ |
2dba688b | 254 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
3e7ee490 | 255 | { |
14948e39 | 256 | mutex_lock(&ring_info->ring_buffer_mutex); |
9988ce68 | 257 | vunmap(ring_info->ring_buffer); |
ae6935ed | 258 | ring_info->ring_buffer = NULL; |
14948e39 | 259 | mutex_unlock(&ring_info->ring_buffer_mutex); |
3e7ee490 HJ |
260 | } |
261 | ||
822f18d4 | 262 | /* Write to the ring buffer. */ |
1f6ee4e7 | 263 | int hv_ringbuffer_write(struct vmbus_channel *channel, |
e4165a0f | 264 | const struct kvec *kv_list, u32 kv_count) |
3e7ee490 | 265 | { |
2c616a8b | 266 | int i; |
fc8c72eb | 267 | u32 bytes_avail_towrite; |
2c616a8b | 268 | u32 totalbytes_towrite = sizeof(u64); |
66a60543 | 269 | u32 next_write_location; |
98fa8cf4 | 270 | u32 old_write; |
2c616a8b SH |
271 | u64 prev_indices; |
272 | unsigned long flags; | |
1f6ee4e7 | 273 | struct hv_ring_buffer_info *outring_info = &channel->outbound; |
3e7ee490 | 274 | |
e7e97dd8 S |
275 | if (channel->rescind) |
276 | return -ENODEV; | |
277 | ||
011a7c3c S |
278 | for (i = 0; i < kv_count; i++) |
279 | totalbytes_towrite += kv_list[i].iov_len; | |
3e7ee490 | 280 | |
5529eaf6 | 281 | spin_lock_irqsave(&outring_info->ring_lock, flags); |
3e7ee490 | 282 | |
a6341f00 | 283 | bytes_avail_towrite = hv_get_bytes_to_write(outring_info); |
3e7ee490 | 284 | |
822f18d4 VK |
285 | /* |
286 | * If there is only room for the packet, assume it is full. | |
287 | * Otherwise, the next time around, we think the ring buffer | |
288 | * is empty since the read index == write index. | |
289 | */ | |
fc8c72eb | 290 | if (bytes_avail_towrite <= totalbytes_towrite) { |
396ae57e KB |
291 | ++channel->out_full_total; |
292 | ||
293 | if (!channel->out_full_flag) { | |
294 | ++channel->out_full_first; | |
295 | channel->out_full_flag = true; | |
296 | } | |
297 | ||
5529eaf6 | 298 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
d2598f01 | 299 | return -EAGAIN; |
3e7ee490 HJ |
300 | } |
301 | ||
396ae57e KB |
302 | channel->out_full_flag = false; |
303 | ||
454f18a9 | 304 | /* Write to the ring buffer */ |
2b8a912e | 305 | next_write_location = hv_get_next_write_location(outring_info); |
3e7ee490 | 306 | |
98fa8cf4 S |
307 | old_write = next_write_location; |
308 | ||
011a7c3c | 309 | for (i = 0; i < kv_count; i++) { |
2b8a912e | 310 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb | 311 | next_write_location, |
011a7c3c S |
312 | kv_list[i].iov_base, |
313 | kv_list[i].iov_len); | |
3e7ee490 HJ |
314 | } |
315 | ||
454f18a9 | 316 | /* Set previous packet start */ |
2b8a912e | 317 | prev_indices = hv_get_ring_bufferindices(outring_info); |
3e7ee490 | 318 | |
2b8a912e | 319 | next_write_location = hv_copyto_ringbuffer(outring_info, |
fc8c72eb HZ |
320 | next_write_location, |
321 | &prev_indices, | |
b219b3f7 | 322 | sizeof(u64)); |
3e7ee490 | 323 | |
98fa8cf4 | 324 | /* Issue a full memory barrier before updating the write index */ |
dcd0eeca | 325 | virt_mb(); |
3e7ee490 | 326 | |
454f18a9 | 327 | /* Now, update the write location */ |
2b8a912e | 328 | hv_set_next_write_location(outring_info, next_write_location); |
3e7ee490 | 329 | |
3e7ee490 | 330 | |
5529eaf6 | 331 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
98fa8cf4 | 332 | |
b103a56f | 333 | hv_signal_on_write(old_write, channel); |
e7e97dd8 S |
334 | |
335 | if (channel->rescind) | |
336 | return -ENODEV; | |
337 | ||
3e7ee490 HJ |
338 | return 0; |
339 | } | |
340 | ||
3372592a | 341 | int hv_ringbuffer_read(struct vmbus_channel *channel, |
940b68e2 | 342 | void *buffer, u32 buflen, u32 *buffer_actual_len, |
3372592a | 343 | u64 *requestid, bool raw) |
3e7ee490 | 344 | { |
4226ff69 SH |
345 | struct vmpacket_descriptor *desc; |
346 | u32 packetlen, offset; | |
347 | ||
348 | if (unlikely(buflen == 0)) | |
a16e1485 | 349 | return -EINVAL; |
3e7ee490 | 350 | |
940b68e2 VK |
351 | *buffer_actual_len = 0; |
352 | *requestid = 0; | |
353 | ||
454f18a9 | 354 | /* Make sure there is something to read */ |
4226ff69 SH |
355 | desc = hv_pkt_iter_first(channel); |
356 | if (desc == NULL) { | |
940b68e2 VK |
357 | /* |
358 | * No error is set when there is even no header, drivers are | |
359 | * supposed to analyze buffer_actual_len. | |
360 | */ | |
42dd2715 | 361 | return 0; |
940b68e2 | 362 | } |
3e7ee490 | 363 | |
4226ff69 SH |
364 | offset = raw ? 0 : (desc->offset8 << 3); |
365 | packetlen = (desc->len8 << 3) - offset; | |
940b68e2 | 366 | *buffer_actual_len = packetlen; |
4226ff69 | 367 | *requestid = desc->trans_id; |
940b68e2 | 368 | |
4226ff69 | 369 | if (unlikely(packetlen > buflen)) |
3eba9a77 | 370 | return -ENOBUFS; |
3e7ee490 | 371 | |
4226ff69 SH |
372 | /* since ring is double mapped, only one copy is necessary */ |
373 | memcpy(buffer, (const char *)desc + offset, packetlen); | |
3e7ee490 | 374 | |
4226ff69 SH |
375 | /* Advance ring index to next packet descriptor */ |
376 | __hv_pkt_iter_next(channel, desc); | |
3e7ee490 | 377 | |
4226ff69 SH |
378 | /* Notify host of update */ |
379 | hv_pkt_iter_close(channel); | |
c2b8e520 | 380 | |
42dd2715 | 381 | return 0; |
b5f53dde | 382 | } |
f3dd3f47 | 383 | |
384 | /* | |
385 | * Determine number of bytes available in ring buffer after | |
386 | * the current iterator (priv_read_index) location. | |
387 | * | |
388 | * This is similar to hv_get_bytes_to_read but with private | |
389 | * read index instead. | |
390 | */ | |
391 | static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) | |
392 | { | |
393 | u32 priv_read_loc = rbi->priv_read_index; | |
394 | u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); | |
395 | ||
396 | if (write_loc >= priv_read_loc) | |
397 | return write_loc - priv_read_loc; | |
398 | else | |
399 | return (rbi->ring_datasize - priv_read_loc) + write_loc; | |
400 | } | |
401 | ||
402 | /* | |
403 | * Get first vmbus packet from ring buffer after read_index | |
404 | * | |
405 | * If ring buffer is empty, returns NULL and no other action needed. | |
406 | */ | |
407 | struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) | |
408 | { | |
409 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
15e1674d | 410 | struct vmpacket_descriptor *desc; |
f3dd3f47 | 411 | |
f3dd3f47 | 412 | if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) |
413 | return NULL; | |
414 | ||
15e1674d SH |
415 | desc = hv_get_ring_buffer(rbi) + rbi->priv_read_index; |
416 | if (desc) | |
417 | prefetch((char *)desc + (desc->len8 << 3)); | |
418 | ||
419 | return desc; | |
f3dd3f47 | 420 | } |
421 | EXPORT_SYMBOL_GPL(hv_pkt_iter_first); | |
422 | ||
423 | /* | |
424 | * Get next vmbus packet from ring buffer. | |
425 | * | |
426 | * Advances the current location (priv_read_index) and checks for more | |
427 | * data. If the end of the ring buffer is reached, then return NULL. | |
428 | */ | |
429 | struct vmpacket_descriptor * | |
430 | __hv_pkt_iter_next(struct vmbus_channel *channel, | |
431 | const struct vmpacket_descriptor *desc) | |
432 | { | |
433 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
434 | u32 packetlen = desc->len8 << 3; | |
435 | u32 dsize = rbi->ring_datasize; | |
436 | ||
437 | /* bump offset to next potential packet */ | |
438 | rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER; | |
439 | if (rbi->priv_read_index >= dsize) | |
440 | rbi->priv_read_index -= dsize; | |
441 | ||
442 | /* more data? */ | |
05d00bc9 | 443 | return hv_pkt_iter_first(channel); |
f3dd3f47 | 444 | } |
445 | EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); | |
446 | ||
655296c8 MK |
447 | /* How many bytes were read in this iterator cycle */ |
448 | static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, | |
449 | u32 start_read_index) | |
450 | { | |
451 | if (rbi->priv_read_index >= start_read_index) | |
452 | return rbi->priv_read_index - start_read_index; | |
453 | else | |
454 | return rbi->ring_datasize - start_read_index + | |
455 | rbi->priv_read_index; | |
456 | } | |
457 | ||
f3dd3f47 | 458 | /* |
71b38245 MK |
459 | * Update host ring buffer after iterating over packets. If the host has |
460 | * stopped queuing new entries because it found the ring buffer full, and | |
461 | * sufficient space is being freed up, signal the host. But be careful to | |
462 | * only signal the host when necessary, both for performance reasons and | |
463 | * because Hyper-V protects itself by throttling guests that signal | |
464 | * inappropriately. | |
465 | * | |
466 | * Determining when to signal is tricky. There are three key data inputs | |
467 | * that must be handled in this order to avoid race conditions: | |
468 | * | |
469 | * 1. Update the read_index | |
470 | * 2. Read the pending_send_sz | |
471 | * 3. Read the current write_index | |
472 | * | |
473 | * The interrupt_mask is not used to determine when to signal. The | |
474 | * interrupt_mask is used only on the guest->host ring buffer when | |
475 | * sending requests to the host. The host does not use it on the host-> | |
476 | * guest ring buffer to indicate whether it should be signaled. | |
f3dd3f47 | 477 | */ |
478 | void hv_pkt_iter_close(struct vmbus_channel *channel) | |
479 | { | |
480 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
655296c8 | 481 | u32 curr_write_sz, pending_sz, bytes_read, start_read_index; |
f3dd3f47 | 482 | |
483 | /* | |
484 | * Make sure all reads are done before we update the read index since | |
485 | * the writer may start writing to the read area once the read index | |
486 | * is updated. | |
487 | */ | |
488 | virt_rmb(); | |
655296c8 | 489 | start_read_index = rbi->ring_buffer->read_index; |
f3dd3f47 | 490 | rbi->ring_buffer->read_index = rbi->priv_read_index; |
491 | ||
71b38245 MK |
492 | /* |
493 | * Older versions of Hyper-V (before WS2102 and Win8) do not | |
494 | * implement pending_send_sz and simply poll if the host->guest | |
495 | * ring buffer is full. No signaling is needed or expected. | |
496 | */ | |
655296c8 MK |
497 | if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) |
498 | return; | |
499 | ||
8dd45f2a SH |
500 | /* |
501 | * Issue a full memory barrier before making the signaling decision. | |
71b38245 MK |
502 | * If reading pending_send_sz were to be reordered and happen |
503 | * before we commit the new read_index, a race could occur. If the | |
504 | * host were to set the pending_send_sz after we have sampled | |
505 | * pending_send_sz, and the ring buffer blocks before we commit the | |
8dd45f2a SH |
506 | * read index, we could miss sending the interrupt. Issue a full |
507 | * memory barrier to address this. | |
508 | */ | |
509 | virt_mb(); | |
510 | ||
71b38245 MK |
511 | /* |
512 | * If the pending_send_sz is zero, then the ring buffer is not | |
513 | * blocked and there is no need to signal. This is far by the | |
514 | * most common case, so exit quickly for best performance. | |
515 | */ | |
655296c8 MK |
516 | pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); |
517 | if (!pending_sz) | |
8dd45f2a SH |
518 | return; |
519 | ||
655296c8 MK |
520 | /* |
521 | * Ensure the read of write_index in hv_get_bytes_to_write() | |
522 | * happens after the read of pending_send_sz. | |
523 | */ | |
524 | virt_rmb(); | |
525 | curr_write_sz = hv_get_bytes_to_write(rbi); | |
526 | bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); | |
8dd45f2a | 527 | |
655296c8 | 528 | /* |
71b38245 MK |
529 | * We want to signal the host only if we're transitioning |
530 | * from a "not enough free space" state to a "enough free | |
531 | * space" state. For example, it's possible that this function | |
532 | * could run and free up enough space to signal the host, and then | |
533 | * run again and free up additional space before the host has a | |
534 | * chance to clear the pending_send_sz. The 2nd invocation would | |
535 | * be a null transition from "enough free space" to "enough free | |
536 | * space", which doesn't warrant a signal. | |
537 | * | |
538 | * Exactly filling the ring buffer is treated as "not enough | |
539 | * space". The ring buffer always must have at least one byte | |
540 | * empty so the empty and full conditions are distinguishable. | |
541 | * hv_get_bytes_to_write() doesn't fully tell the truth in | |
542 | * this regard. | |
543 | * | |
544 | * So first check if we were in the "enough free space" state | |
545 | * before we began the iteration. If so, the host was not | |
546 | * blocked, and there's no need to signal. | |
655296c8 | 547 | */ |
655296c8 MK |
548 | if (curr_write_sz - bytes_read > pending_sz) |
549 | return; | |
550 | ||
71b38245 MK |
551 | /* |
552 | * Similarly, if the new state is "not enough space", then | |
553 | * there's no need to signal. | |
554 | */ | |
655296c8 MK |
555 | if (curr_write_sz <= pending_sz) |
556 | return; | |
03bad714 | 557 | |
396ae57e | 558 | ++channel->intr_in_full; |
03bad714 | 559 | vmbus_setevent(channel); |
f3dd3f47 | 560 | } |
561 | EXPORT_SYMBOL_GPL(hv_pkt_iter_close); |