]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/hyperv/netvsc.c
hyperv: Add support for vNIC hot removal
[mirror_ubuntu-artful-kernel.git] / drivers / net / hyperv / netvsc.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Authors:
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
19 */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
25 #include <linux/mm.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <asm/sync_bitops.h>
32
33 #include "hyperv_net.h"
34
35
36 static struct netvsc_device *alloc_net_device(struct hv_device *device)
37 {
38 struct netvsc_device *net_device;
39 struct net_device *ndev = hv_get_drvdata(device);
40
41 net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
42 if (!net_device)
43 return NULL;
44
45 net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
46 if (!net_device->cb_buffer) {
47 kfree(net_device);
48 return NULL;
49 }
50
51 init_waitqueue_head(&net_device->wait_drain);
52 net_device->start_remove = false;
53 net_device->destroy = false;
54 net_device->dev = device;
55 net_device->ndev = ndev;
56
57 hv_set_drvdata(device, net_device);
58 return net_device;
59 }
60
61 static void free_netvsc_device(struct netvsc_device *nvdev)
62 {
63 kfree(nvdev->cb_buffer);
64 kfree(nvdev);
65 }
66
67 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
68 {
69 struct netvsc_device *net_device;
70
71 net_device = hv_get_drvdata(device);
72 if (net_device && net_device->destroy)
73 net_device = NULL;
74
75 return net_device;
76 }
77
78 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
79 {
80 struct netvsc_device *net_device;
81
82 net_device = hv_get_drvdata(device);
83
84 if (!net_device)
85 goto get_in_err;
86
87 if (net_device->destroy &&
88 atomic_read(&net_device->num_outstanding_sends) == 0)
89 net_device = NULL;
90
91 get_in_err:
92 return net_device;
93 }
94
95
96 static int netvsc_destroy_buf(struct netvsc_device *net_device)
97 {
98 struct nvsp_message *revoke_packet;
99 int ret = 0;
100 struct net_device *ndev = net_device->ndev;
101
102 /*
103 * If we got a section count, it means we received a
104 * SendReceiveBufferComplete msg (ie sent
105 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
106 * to send a revoke msg here
107 */
108 if (net_device->recv_section_cnt) {
109 /* Send the revoke receive buffer */
110 revoke_packet = &net_device->revoke_packet;
111 memset(revoke_packet, 0, sizeof(struct nvsp_message));
112
113 revoke_packet->hdr.msg_type =
114 NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
115 revoke_packet->msg.v1_msg.
116 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
117
118 ret = vmbus_sendpacket(net_device->dev->channel,
119 revoke_packet,
120 sizeof(struct nvsp_message),
121 (unsigned long)revoke_packet,
122 VM_PKT_DATA_INBAND, 0);
123 /*
124 * If we failed here, we might as well return and
125 * have a leak rather than continue and a bugchk
126 */
127 if (ret != 0) {
128 netdev_err(ndev, "unable to send "
129 "revoke receive buffer to netvsp\n");
130 return ret;
131 }
132 }
133
134 /* Teardown the gpadl on the vsp end */
135 if (net_device->recv_buf_gpadl_handle) {
136 ret = vmbus_teardown_gpadl(net_device->dev->channel,
137 net_device->recv_buf_gpadl_handle);
138
139 /* If we failed here, we might as well return and have a leak
140 * rather than continue and a bugchk
141 */
142 if (ret != 0) {
143 netdev_err(ndev,
144 "unable to teardown receive buffer's gpadl\n");
145 return ret;
146 }
147 net_device->recv_buf_gpadl_handle = 0;
148 }
149
150 if (net_device->recv_buf) {
151 /* Free up the receive buffer */
152 vfree(net_device->recv_buf);
153 net_device->recv_buf = NULL;
154 }
155
156 if (net_device->recv_section) {
157 net_device->recv_section_cnt = 0;
158 kfree(net_device->recv_section);
159 net_device->recv_section = NULL;
160 }
161
162 /* Deal with the send buffer we may have setup.
163 * If we got a send section size, it means we received a
164 * SendsendBufferComplete msg (ie sent
165 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
166 * to send a revoke msg here
167 */
168 if (net_device->send_section_size) {
169 /* Send the revoke receive buffer */
170 revoke_packet = &net_device->revoke_packet;
171 memset(revoke_packet, 0, sizeof(struct nvsp_message));
172
173 revoke_packet->hdr.msg_type =
174 NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
175 revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
176
177 ret = vmbus_sendpacket(net_device->dev->channel,
178 revoke_packet,
179 sizeof(struct nvsp_message),
180 (unsigned long)revoke_packet,
181 VM_PKT_DATA_INBAND, 0);
182 /* If we failed here, we might as well return and
183 * have a leak rather than continue and a bugchk
184 */
185 if (ret != 0) {
186 netdev_err(ndev, "unable to send "
187 "revoke send buffer to netvsp\n");
188 return ret;
189 }
190 }
191 /* Teardown the gpadl on the vsp end */
192 if (net_device->send_buf_gpadl_handle) {
193 ret = vmbus_teardown_gpadl(net_device->dev->channel,
194 net_device->send_buf_gpadl_handle);
195
196 /* If we failed here, we might as well return and have a leak
197 * rather than continue and a bugchk
198 */
199 if (ret != 0) {
200 netdev_err(ndev,
201 "unable to teardown send buffer's gpadl\n");
202 return ret;
203 }
204 net_device->send_buf_gpadl_handle = 0;
205 }
206 if (net_device->send_buf) {
207 /* Free up the receive buffer */
208 vfree(net_device->send_buf);
209 net_device->send_buf = NULL;
210 }
211 kfree(net_device->send_section_map);
212
213 return ret;
214 }
215
216 static int netvsc_init_buf(struct hv_device *device)
217 {
218 int ret = 0;
219 int t;
220 struct netvsc_device *net_device;
221 struct nvsp_message *init_packet;
222 struct net_device *ndev;
223
224 net_device = get_outbound_net_device(device);
225 if (!net_device)
226 return -ENODEV;
227 ndev = net_device->ndev;
228
229 net_device->recv_buf = vzalloc(net_device->recv_buf_size);
230 if (!net_device->recv_buf) {
231 netdev_err(ndev, "unable to allocate receive "
232 "buffer of size %d\n", net_device->recv_buf_size);
233 ret = -ENOMEM;
234 goto cleanup;
235 }
236
237 /*
238 * Establish the gpadl handle for this buffer on this
239 * channel. Note: This call uses the vmbus connection rather
240 * than the channel to establish the gpadl handle.
241 */
242 ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
243 net_device->recv_buf_size,
244 &net_device->recv_buf_gpadl_handle);
245 if (ret != 0) {
246 netdev_err(ndev,
247 "unable to establish receive buffer's gpadl\n");
248 goto cleanup;
249 }
250
251
252 /* Notify the NetVsp of the gpadl handle */
253 init_packet = &net_device->channel_init_pkt;
254
255 memset(init_packet, 0, sizeof(struct nvsp_message));
256
257 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
258 init_packet->msg.v1_msg.send_recv_buf.
259 gpadl_handle = net_device->recv_buf_gpadl_handle;
260 init_packet->msg.v1_msg.
261 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
262
263 /* Send the gpadl notification request */
264 ret = vmbus_sendpacket(device->channel, init_packet,
265 sizeof(struct nvsp_message),
266 (unsigned long)init_packet,
267 VM_PKT_DATA_INBAND,
268 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
269 if (ret != 0) {
270 netdev_err(ndev,
271 "unable to send receive buffer's gpadl to netvsp\n");
272 goto cleanup;
273 }
274
275 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
276 BUG_ON(t == 0);
277
278
279 /* Check the response */
280 if (init_packet->msg.v1_msg.
281 send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
282 netdev_err(ndev, "Unable to complete receive buffer "
283 "initialization with NetVsp - status %d\n",
284 init_packet->msg.v1_msg.
285 send_recv_buf_complete.status);
286 ret = -EINVAL;
287 goto cleanup;
288 }
289
290 /* Parse the response */
291
292 net_device->recv_section_cnt = init_packet->msg.
293 v1_msg.send_recv_buf_complete.num_sections;
294
295 net_device->recv_section = kmemdup(
296 init_packet->msg.v1_msg.send_recv_buf_complete.sections,
297 net_device->recv_section_cnt *
298 sizeof(struct nvsp_1_receive_buffer_section),
299 GFP_KERNEL);
300 if (net_device->recv_section == NULL) {
301 ret = -EINVAL;
302 goto cleanup;
303 }
304
305 /*
306 * For 1st release, there should only be 1 section that represents the
307 * entire receive buffer
308 */
309 if (net_device->recv_section_cnt != 1 ||
310 net_device->recv_section->offset != 0) {
311 ret = -EINVAL;
312 goto cleanup;
313 }
314
315 /* Now setup the send buffer.
316 */
317 net_device->send_buf = vzalloc(net_device->send_buf_size);
318 if (!net_device->send_buf) {
319 netdev_err(ndev, "unable to allocate send "
320 "buffer of size %d\n", net_device->send_buf_size);
321 ret = -ENOMEM;
322 goto cleanup;
323 }
324
325 /* Establish the gpadl handle for this buffer on this
326 * channel. Note: This call uses the vmbus connection rather
327 * than the channel to establish the gpadl handle.
328 */
329 ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
330 net_device->send_buf_size,
331 &net_device->send_buf_gpadl_handle);
332 if (ret != 0) {
333 netdev_err(ndev,
334 "unable to establish send buffer's gpadl\n");
335 goto cleanup;
336 }
337
338 /* Notify the NetVsp of the gpadl handle */
339 init_packet = &net_device->channel_init_pkt;
340 memset(init_packet, 0, sizeof(struct nvsp_message));
341 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
342 init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
343 net_device->send_buf_gpadl_handle;
344 init_packet->msg.v1_msg.send_recv_buf.id = 0;
345
346 /* Send the gpadl notification request */
347 ret = vmbus_sendpacket(device->channel, init_packet,
348 sizeof(struct nvsp_message),
349 (unsigned long)init_packet,
350 VM_PKT_DATA_INBAND,
351 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
352 if (ret != 0) {
353 netdev_err(ndev,
354 "unable to send send buffer's gpadl to netvsp\n");
355 goto cleanup;
356 }
357
358 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
359 BUG_ON(t == 0);
360
361 /* Check the response */
362 if (init_packet->msg.v1_msg.
363 send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
364 netdev_err(ndev, "Unable to complete send buffer "
365 "initialization with NetVsp - status %d\n",
366 init_packet->msg.v1_msg.
367 send_recv_buf_complete.status);
368 ret = -EINVAL;
369 goto cleanup;
370 }
371
372 /* Parse the response */
373 net_device->send_section_size = init_packet->msg.
374 v1_msg.send_send_buf_complete.section_size;
375
376 /* Section count is simply the size divided by the section size.
377 */
378 net_device->send_section_cnt =
379 net_device->send_buf_size/net_device->send_section_size;
380
381 dev_info(&device->device, "Send section size: %d, Section count:%d\n",
382 net_device->send_section_size, net_device->send_section_cnt);
383
384 /* Setup state for managing the send buffer. */
385 net_device->map_words = DIV_ROUND_UP(net_device->send_section_cnt,
386 BITS_PER_LONG);
387
388 net_device->send_section_map =
389 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
390 if (net_device->send_section_map == NULL) {
391 ret = -ENOMEM;
392 goto cleanup;
393 }
394
395 goto exit;
396
397 cleanup:
398 netvsc_destroy_buf(net_device);
399
400 exit:
401 return ret;
402 }
403
404
405 /* Negotiate NVSP protocol version */
406 static int negotiate_nvsp_ver(struct hv_device *device,
407 struct netvsc_device *net_device,
408 struct nvsp_message *init_packet,
409 u32 nvsp_ver)
410 {
411 int ret, t;
412
413 memset(init_packet, 0, sizeof(struct nvsp_message));
414 init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
415 init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
416 init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
417
418 /* Send the init request */
419 ret = vmbus_sendpacket(device->channel, init_packet,
420 sizeof(struct nvsp_message),
421 (unsigned long)init_packet,
422 VM_PKT_DATA_INBAND,
423 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
424
425 if (ret != 0)
426 return ret;
427
428 t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
429
430 if (t == 0)
431 return -ETIMEDOUT;
432
433 if (init_packet->msg.init_msg.init_complete.status !=
434 NVSP_STAT_SUCCESS)
435 return -EINVAL;
436
437 if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
438 return 0;
439
440 /* NVSPv2 only: Send NDIS config */
441 memset(init_packet, 0, sizeof(struct nvsp_message));
442 init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
443 init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
444 ETH_HLEN;
445 init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
446
447 ret = vmbus_sendpacket(device->channel, init_packet,
448 sizeof(struct nvsp_message),
449 (unsigned long)init_packet,
450 VM_PKT_DATA_INBAND, 0);
451
452 return ret;
453 }
454
455 static int netvsc_connect_vsp(struct hv_device *device)
456 {
457 int ret;
458 struct netvsc_device *net_device;
459 struct nvsp_message *init_packet;
460 int ndis_version;
461 struct net_device *ndev;
462 u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
463 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
464 int i, num_ver = 4; /* number of different NVSP versions */
465
466 net_device = get_outbound_net_device(device);
467 if (!net_device)
468 return -ENODEV;
469 ndev = net_device->ndev;
470
471 init_packet = &net_device->channel_init_pkt;
472
473 /* Negotiate the latest NVSP protocol supported */
474 for (i = num_ver - 1; i >= 0; i--)
475 if (negotiate_nvsp_ver(device, net_device, init_packet,
476 ver_list[i]) == 0) {
477 net_device->nvsp_version = ver_list[i];
478 break;
479 }
480
481 if (i < 0) {
482 ret = -EPROTO;
483 goto cleanup;
484 }
485
486 pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
487
488 /* Send the ndis version */
489 memset(init_packet, 0, sizeof(struct nvsp_message));
490
491 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
492 ndis_version = 0x00060001;
493 else
494 ndis_version = 0x0006001e;
495
496 init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
497 init_packet->msg.v1_msg.
498 send_ndis_ver.ndis_major_ver =
499 (ndis_version & 0xFFFF0000) >> 16;
500 init_packet->msg.v1_msg.
501 send_ndis_ver.ndis_minor_ver =
502 ndis_version & 0xFFFF;
503
504 /* Send the init request */
505 ret = vmbus_sendpacket(device->channel, init_packet,
506 sizeof(struct nvsp_message),
507 (unsigned long)init_packet,
508 VM_PKT_DATA_INBAND, 0);
509 if (ret != 0)
510 goto cleanup;
511
512 /* Post the big receive buffer to NetVSP */
513 if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
514 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
515 else
516 net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
517 net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
518
519 ret = netvsc_init_buf(device);
520
521 cleanup:
522 return ret;
523 }
524
525 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
526 {
527 netvsc_destroy_buf(net_device);
528 }
529
530 /*
531 * netvsc_device_remove - Callback when the root bus device is removed
532 */
533 int netvsc_device_remove(struct hv_device *device)
534 {
535 struct netvsc_device *net_device;
536 unsigned long flags;
537
538 net_device = hv_get_drvdata(device);
539
540 netvsc_disconnect_vsp(net_device);
541
542 /*
543 * Since we have already drained, we don't need to busy wait
544 * as was done in final_release_stor_device()
545 * Note that we cannot set the ext pointer to NULL until
546 * we have drained - to drain the outgoing packets, we need to
547 * allow incoming packets.
548 */
549
550 spin_lock_irqsave(&device->channel->inbound_lock, flags);
551 hv_set_drvdata(device, NULL);
552 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
553
554 /*
555 * At this point, no one should be accessing net_device
556 * except in here
557 */
558 dev_notice(&device->device, "net device safe to remove\n");
559
560 /* Now, we can close the channel safely */
561 vmbus_close(device->channel);
562
563 /* Release all resources */
564 vfree(net_device->sub_cb_buf);
565 free_netvsc_device(net_device);
566 return 0;
567 }
568
569
570 #define RING_AVAIL_PERCENT_HIWATER 20
571 #define RING_AVAIL_PERCENT_LOWATER 10
572
573 /*
574 * Get the percentage of available bytes to write in the ring.
575 * The return value is in range from 0 to 100.
576 */
577 static inline u32 hv_ringbuf_avail_percent(
578 struct hv_ring_buffer_info *ring_info)
579 {
580 u32 avail_read, avail_write;
581
582 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
583
584 return avail_write * 100 / ring_info->ring_datasize;
585 }
586
587 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
588 u32 index)
589 {
590 sync_change_bit(index, net_device->send_section_map);
591 }
592
593 static void netvsc_send_completion(struct netvsc_device *net_device,
594 struct hv_device *device,
595 struct vmpacket_descriptor *packet)
596 {
597 struct nvsp_message *nvsp_packet;
598 struct hv_netvsc_packet *nvsc_packet;
599 struct net_device *ndev;
600 u32 send_index;
601
602 ndev = net_device->ndev;
603
604 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
605 (packet->offset8 << 3));
606
607 if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
608 (nvsp_packet->hdr.msg_type ==
609 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
610 (nvsp_packet->hdr.msg_type ==
611 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE) ||
612 (nvsp_packet->hdr.msg_type ==
613 NVSP_MSG5_TYPE_SUBCHANNEL)) {
614 /* Copy the response back */
615 memcpy(&net_device->channel_init_pkt, nvsp_packet,
616 sizeof(struct nvsp_message));
617 complete(&net_device->channel_init_wait);
618 } else if (nvsp_packet->hdr.msg_type ==
619 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
620 int num_outstanding_sends;
621 u16 q_idx = 0;
622 struct vmbus_channel *channel = device->channel;
623 int queue_sends;
624
625 /* Get the send context */
626 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
627 packet->trans_id;
628
629 /* Notify the layer above us */
630 if (nvsc_packet) {
631 send_index = nvsc_packet->send_buf_index;
632 if (send_index != NETVSC_INVALID_INDEX)
633 netvsc_free_send_slot(net_device, send_index);
634 q_idx = nvsc_packet->q_idx;
635 channel = nvsc_packet->channel;
636 nvsc_packet->send_completion(nvsc_packet->
637 send_completion_ctx);
638 }
639
640 num_outstanding_sends =
641 atomic_dec_return(&net_device->num_outstanding_sends);
642 queue_sends = atomic_dec_return(&net_device->
643 queue_sends[q_idx]);
644
645 if (net_device->destroy && num_outstanding_sends == 0)
646 wake_up(&net_device->wait_drain);
647
648 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
649 !net_device->start_remove &&
650 (hv_ringbuf_avail_percent(&channel->outbound) >
651 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
652 netif_tx_wake_queue(netdev_get_tx_queue(
653 ndev, q_idx));
654 } else {
655 netdev_err(ndev, "Unknown send completion packet type- "
656 "%d received!!\n", nvsp_packet->hdr.msg_type);
657 }
658
659 }
660
661 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
662 {
663 unsigned long index;
664 u32 max_words = net_device->map_words;
665 unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
666 u32 section_cnt = net_device->send_section_cnt;
667 int ret_val = NETVSC_INVALID_INDEX;
668 int i;
669 int prev_val;
670
671 for (i = 0; i < max_words; i++) {
672 if (!~(map_addr[i]))
673 continue;
674 index = ffz(map_addr[i]);
675 prev_val = sync_test_and_set_bit(index, &map_addr[i]);
676 if (prev_val)
677 continue;
678 if ((index + (i * BITS_PER_LONG)) >= section_cnt)
679 break;
680 ret_val = (index + (i * BITS_PER_LONG));
681 break;
682 }
683 return ret_val;
684 }
685
686 u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
687 unsigned int section_index,
688 struct hv_netvsc_packet *packet)
689 {
690 char *start = net_device->send_buf;
691 char *dest = (start + (section_index * net_device->send_section_size));
692 int i;
693 u32 msg_size = 0;
694
695 for (i = 0; i < packet->page_buf_cnt; i++) {
696 char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
697 u32 offset = packet->page_buf[i].offset;
698 u32 len = packet->page_buf[i].len;
699
700 memcpy(dest, (src + offset), len);
701 msg_size += len;
702 dest += len;
703 }
704 return msg_size;
705 }
706
707 int netvsc_send(struct hv_device *device,
708 struct hv_netvsc_packet *packet)
709 {
710 struct netvsc_device *net_device;
711 int ret = 0;
712 struct nvsp_message sendMessage;
713 struct net_device *ndev;
714 struct vmbus_channel *out_channel = NULL;
715 u64 req_id;
716 unsigned int section_index = NETVSC_INVALID_INDEX;
717 u32 msg_size = 0;
718 struct sk_buff *skb;
719 u16 q_idx = packet->q_idx;
720
721
722 net_device = get_outbound_net_device(device);
723 if (!net_device)
724 return -ENODEV;
725 ndev = net_device->ndev;
726
727 sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
728 if (packet->is_data_pkt) {
729 /* 0 is RMC_DATA; */
730 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
731 } else {
732 /* 1 is RMC_CONTROL; */
733 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
734 }
735
736 /* Attempt to send via sendbuf */
737 if (packet->total_data_buflen < net_device->send_section_size) {
738 section_index = netvsc_get_next_send_section(net_device);
739 if (section_index != NETVSC_INVALID_INDEX) {
740 msg_size = netvsc_copy_to_send_buf(net_device,
741 section_index,
742 packet);
743 skb = (struct sk_buff *)
744 (unsigned long)packet->send_completion_tid;
745 if (skb)
746 dev_kfree_skb_any(skb);
747 packet->page_buf_cnt = 0;
748 }
749 }
750 packet->send_buf_index = section_index;
751
752
753 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
754 section_index;
755 sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = msg_size;
756
757 if (packet->send_completion)
758 req_id = (ulong)packet;
759 else
760 req_id = 0;
761
762 out_channel = net_device->chn_table[packet->q_idx];
763 if (out_channel == NULL)
764 out_channel = device->channel;
765 packet->channel = out_channel;
766
767 if (out_channel->rescind)
768 return -ENODEV;
769
770 if (packet->page_buf_cnt) {
771 ret = vmbus_sendpacket_pagebuffer(out_channel,
772 packet->page_buf,
773 packet->page_buf_cnt,
774 &sendMessage,
775 sizeof(struct nvsp_message),
776 req_id);
777 } else {
778 ret = vmbus_sendpacket(out_channel, &sendMessage,
779 sizeof(struct nvsp_message),
780 req_id,
781 VM_PKT_DATA_INBAND,
782 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
783 }
784
785 if (ret == 0) {
786 atomic_inc(&net_device->num_outstanding_sends);
787 atomic_inc(&net_device->queue_sends[q_idx]);
788
789 if (hv_ringbuf_avail_percent(&out_channel->outbound) <
790 RING_AVAIL_PERCENT_LOWATER) {
791 netif_tx_stop_queue(netdev_get_tx_queue(
792 ndev, q_idx));
793
794 if (atomic_read(&net_device->
795 queue_sends[q_idx]) < 1)
796 netif_tx_wake_queue(netdev_get_tx_queue(
797 ndev, q_idx));
798 }
799 } else if (ret == -EAGAIN) {
800 netif_tx_stop_queue(netdev_get_tx_queue(
801 ndev, q_idx));
802 if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
803 netif_tx_wake_queue(netdev_get_tx_queue(
804 ndev, q_idx));
805 ret = -ENOSPC;
806 }
807 } else {
808 netdev_err(ndev, "Unable to send packet %p ret %d\n",
809 packet, ret);
810 }
811
812 return ret;
813 }
814
815 static void netvsc_send_recv_completion(struct hv_device *device,
816 struct vmbus_channel *channel,
817 struct netvsc_device *net_device,
818 u64 transaction_id, u32 status)
819 {
820 struct nvsp_message recvcompMessage;
821 int retries = 0;
822 int ret;
823 struct net_device *ndev;
824
825 ndev = net_device->ndev;
826
827 recvcompMessage.hdr.msg_type =
828 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
829
830 recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
831
832 retry_send_cmplt:
833 /* Send the completion */
834 ret = vmbus_sendpacket(channel, &recvcompMessage,
835 sizeof(struct nvsp_message), transaction_id,
836 VM_PKT_COMP, 0);
837 if (ret == 0) {
838 /* success */
839 /* no-op */
840 } else if (ret == -EAGAIN) {
841 /* no more room...wait a bit and attempt to retry 3 times */
842 retries++;
843 netdev_err(ndev, "unable to send receive completion pkt"
844 " (tid %llx)...retrying %d\n", transaction_id, retries);
845
846 if (retries < 4) {
847 udelay(100);
848 goto retry_send_cmplt;
849 } else {
850 netdev_err(ndev, "unable to send receive "
851 "completion pkt (tid %llx)...give up retrying\n",
852 transaction_id);
853 }
854 } else {
855 netdev_err(ndev, "unable to send receive "
856 "completion pkt - %llx\n", transaction_id);
857 }
858 }
859
860 static void netvsc_receive(struct netvsc_device *net_device,
861 struct vmbus_channel *channel,
862 struct hv_device *device,
863 struct vmpacket_descriptor *packet)
864 {
865 struct vmtransfer_page_packet_header *vmxferpage_packet;
866 struct nvsp_message *nvsp_packet;
867 struct hv_netvsc_packet nv_pkt;
868 struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
869 u32 status = NVSP_STAT_SUCCESS;
870 int i;
871 int count = 0;
872 struct net_device *ndev;
873
874 ndev = net_device->ndev;
875
876 /*
877 * All inbound packets other than send completion should be xfer page
878 * packet
879 */
880 if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
881 netdev_err(ndev, "Unknown packet type received - %d\n",
882 packet->type);
883 return;
884 }
885
886 nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
887 (packet->offset8 << 3));
888
889 /* Make sure this is a valid nvsp packet */
890 if (nvsp_packet->hdr.msg_type !=
891 NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
892 netdev_err(ndev, "Unknown nvsp packet type received-"
893 " %d\n", nvsp_packet->hdr.msg_type);
894 return;
895 }
896
897 vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
898
899 if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
900 netdev_err(ndev, "Invalid xfer page set id - "
901 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
902 vmxferpage_packet->xfer_pageset_id);
903 return;
904 }
905
906 count = vmxferpage_packet->range_cnt;
907 netvsc_packet->device = device;
908 netvsc_packet->channel = channel;
909
910 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
911 for (i = 0; i < count; i++) {
912 /* Initialize the netvsc packet */
913 netvsc_packet->status = NVSP_STAT_SUCCESS;
914 netvsc_packet->data = (void *)((unsigned long)net_device->
915 recv_buf + vmxferpage_packet->ranges[i].byte_offset);
916 netvsc_packet->total_data_buflen =
917 vmxferpage_packet->ranges[i].byte_count;
918
919 /* Pass it to the upper layer */
920 rndis_filter_receive(device, netvsc_packet);
921
922 if (netvsc_packet->status != NVSP_STAT_SUCCESS)
923 status = NVSP_STAT_FAIL;
924 }
925
926 netvsc_send_recv_completion(device, channel, net_device,
927 vmxferpage_packet->d.trans_id, status);
928 }
929
930
931 static void netvsc_send_table(struct hv_device *hdev,
932 struct vmpacket_descriptor *vmpkt)
933 {
934 struct netvsc_device *nvscdev;
935 struct net_device *ndev;
936 struct nvsp_message *nvmsg;
937 int i;
938 u32 count, *tab;
939
940 nvscdev = get_outbound_net_device(hdev);
941 if (!nvscdev)
942 return;
943 ndev = nvscdev->ndev;
944
945 nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
946 (vmpkt->offset8 << 3));
947
948 if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
949 return;
950
951 count = nvmsg->msg.v5_msg.send_table.count;
952 if (count != VRSS_SEND_TAB_SIZE) {
953 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
954 return;
955 }
956
957 tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
958 nvmsg->msg.v5_msg.send_table.offset);
959
960 for (i = 0; i < count; i++)
961 nvscdev->send_table[i] = tab[i];
962 }
963
964 void netvsc_channel_cb(void *context)
965 {
966 int ret;
967 struct vmbus_channel *channel = (struct vmbus_channel *)context;
968 struct hv_device *device;
969 struct netvsc_device *net_device;
970 u32 bytes_recvd;
971 u64 request_id;
972 struct vmpacket_descriptor *desc;
973 unsigned char *buffer;
974 int bufferlen = NETVSC_PACKET_SIZE;
975 struct net_device *ndev;
976
977 if (channel->primary_channel != NULL)
978 device = channel->primary_channel->device_obj;
979 else
980 device = channel->device_obj;
981
982 net_device = get_inbound_net_device(device);
983 if (!net_device)
984 return;
985 ndev = net_device->ndev;
986 buffer = get_per_channel_state(channel);
987
988 do {
989 ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
990 &bytes_recvd, &request_id);
991 if (ret == 0) {
992 if (bytes_recvd > 0) {
993 desc = (struct vmpacket_descriptor *)buffer;
994 switch (desc->type) {
995 case VM_PKT_COMP:
996 netvsc_send_completion(net_device,
997 device, desc);
998 break;
999
1000 case VM_PKT_DATA_USING_XFER_PAGES:
1001 netvsc_receive(net_device, channel,
1002 device, desc);
1003 break;
1004
1005 case VM_PKT_DATA_INBAND:
1006 netvsc_send_table(device, desc);
1007 break;
1008
1009 default:
1010 netdev_err(ndev,
1011 "unhandled packet type %d, "
1012 "tid %llx len %d\n",
1013 desc->type, request_id,
1014 bytes_recvd);
1015 break;
1016 }
1017
1018 } else {
1019 /*
1020 * We are done for this pass.
1021 */
1022 break;
1023 }
1024
1025 } else if (ret == -ENOBUFS) {
1026 if (bufferlen > NETVSC_PACKET_SIZE)
1027 kfree(buffer);
1028 /* Handle large packet */
1029 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
1030 if (buffer == NULL) {
1031 /* Try again next time around */
1032 netdev_err(ndev,
1033 "unable to allocate buffer of size "
1034 "(%d)!!\n", bytes_recvd);
1035 break;
1036 }
1037
1038 bufferlen = bytes_recvd;
1039 }
1040 } while (1);
1041
1042 if (bufferlen > NETVSC_PACKET_SIZE)
1043 kfree(buffer);
1044 return;
1045 }
1046
1047 /*
1048 * netvsc_device_add - Callback when the device belonging to this
1049 * driver is added
1050 */
1051 int netvsc_device_add(struct hv_device *device, void *additional_info)
1052 {
1053 int ret = 0;
1054 int ring_size =
1055 ((struct netvsc_device_info *)additional_info)->ring_size;
1056 struct netvsc_device *net_device;
1057 struct net_device *ndev;
1058
1059 net_device = alloc_net_device(device);
1060 if (!net_device)
1061 return -ENOMEM;
1062
1063 net_device->ring_size = ring_size;
1064
1065 /*
1066 * Coming into this function, struct net_device * is
1067 * registered as the driver private data.
1068 * In alloc_net_device(), we register struct netvsc_device *
1069 * as the driver private data and stash away struct net_device *
1070 * in struct netvsc_device *.
1071 */
1072 ndev = net_device->ndev;
1073
1074 /* Initialize the NetVSC channel extension */
1075 init_completion(&net_device->channel_init_wait);
1076
1077 set_per_channel_state(device->channel, net_device->cb_buffer);
1078
1079 /* Open the channel */
1080 ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
1081 ring_size * PAGE_SIZE, NULL, 0,
1082 netvsc_channel_cb, device->channel);
1083
1084 if (ret != 0) {
1085 netdev_err(ndev, "unable to open channel: %d\n", ret);
1086 goto cleanup;
1087 }
1088
1089 /* Channel is opened */
1090 pr_info("hv_netvsc channel opened successfully\n");
1091
1092 net_device->chn_table[0] = device->channel;
1093
1094 /* Connect with the NetVsp */
1095 ret = netvsc_connect_vsp(device);
1096 if (ret != 0) {
1097 netdev_err(ndev,
1098 "unable to connect to NetVSP - %d\n", ret);
1099 goto close;
1100 }
1101
1102 return ret;
1103
1104 close:
1105 /* Now, we can close the channel safely */
1106 vmbus_close(device->channel);
1107
1108 cleanup:
1109 free_netvsc_device(net_device);
1110
1111 return ret;
1112 }