2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/wait.h>
26 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_ether.h>
31 #include <linux/vmalloc.h>
32 #include <asm/sync_bitops.h>
34 #include "hyperv_net.h"
37 * Switch the data path from the synthetic interface to the VF
40 void netvsc_switch_datapath(struct net_device
*ndev
, bool vf
)
42 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
43 struct hv_device
*dev
= net_device_ctx
->device_ctx
;
44 struct netvsc_device
*nv_dev
= net_device_ctx
->nvdev
;
45 struct nvsp_message
*init_pkt
= &nv_dev
->channel_init_pkt
;
47 memset(init_pkt
, 0, sizeof(struct nvsp_message
));
48 init_pkt
->hdr
.msg_type
= NVSP_MSG4_TYPE_SWITCH_DATA_PATH
;
50 init_pkt
->msg
.v4_msg
.active_dp
.active_datapath
=
53 init_pkt
->msg
.v4_msg
.active_dp
.active_datapath
=
54 NVSP_DATAPATH_SYNTHETIC
;
56 vmbus_sendpacket(dev
->channel
, init_pkt
,
57 sizeof(struct nvsp_message
),
58 (unsigned long)init_pkt
,
59 VM_PKT_DATA_INBAND
, 0);
63 static struct netvsc_device
*alloc_net_device(void)
65 struct netvsc_device
*net_device
;
67 net_device
= kzalloc(sizeof(struct netvsc_device
), GFP_KERNEL
);
71 net_device
->cb_buffer
= kzalloc(NETVSC_PACKET_SIZE
, GFP_KERNEL
);
72 if (!net_device
->cb_buffer
) {
77 init_waitqueue_head(&net_device
->wait_drain
);
78 net_device
->destroy
= false;
79 atomic_set(&net_device
->open_cnt
, 0);
80 net_device
->max_pkt
= RNDIS_MAX_PKT_DEFAULT
;
81 net_device
->pkt_align
= RNDIS_PKT_ALIGN_DEFAULT
;
86 static void free_netvsc_device(struct netvsc_device
*nvdev
)
88 kfree(nvdev
->cb_buffer
);
92 static struct netvsc_device
*get_outbound_net_device(struct hv_device
*device
)
94 struct netvsc_device
*net_device
= hv_device_to_netvsc_device(device
);
96 if (net_device
&& net_device
->destroy
)
102 static struct netvsc_device
*get_inbound_net_device(struct hv_device
*device
)
104 struct netvsc_device
*net_device
= hv_device_to_netvsc_device(device
);
109 if (net_device
->destroy
&&
110 atomic_read(&net_device
->num_outstanding_sends
) == 0)
118 static int netvsc_destroy_buf(struct hv_device
*device
)
120 struct nvsp_message
*revoke_packet
;
122 struct net_device
*ndev
= hv_get_drvdata(device
);
123 struct netvsc_device
*net_device
= net_device_to_netvsc_device(ndev
);
126 * If we got a section count, it means we received a
127 * SendReceiveBufferComplete msg (ie sent
128 * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
129 * to send a revoke msg here
131 if (net_device
->recv_section_cnt
) {
132 /* Send the revoke receive buffer */
133 revoke_packet
= &net_device
->revoke_packet
;
134 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
136 revoke_packet
->hdr
.msg_type
=
137 NVSP_MSG1_TYPE_REVOKE_RECV_BUF
;
138 revoke_packet
->msg
.v1_msg
.
139 revoke_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
141 ret
= vmbus_sendpacket(device
->channel
,
143 sizeof(struct nvsp_message
),
144 (unsigned long)revoke_packet
,
145 VM_PKT_DATA_INBAND
, 0);
147 * If we failed here, we might as well return and
148 * have a leak rather than continue and a bugchk
151 netdev_err(ndev
, "unable to send "
152 "revoke receive buffer to netvsp\n");
157 /* Teardown the gpadl on the vsp end */
158 if (net_device
->recv_buf_gpadl_handle
) {
159 ret
= vmbus_teardown_gpadl(device
->channel
,
160 net_device
->recv_buf_gpadl_handle
);
162 /* If we failed here, we might as well return and have a leak
163 * rather than continue and a bugchk
167 "unable to teardown receive buffer's gpadl\n");
170 net_device
->recv_buf_gpadl_handle
= 0;
173 if (net_device
->recv_buf
) {
174 /* Free up the receive buffer */
175 vfree(net_device
->recv_buf
);
176 net_device
->recv_buf
= NULL
;
179 if (net_device
->recv_section
) {
180 net_device
->recv_section_cnt
= 0;
181 kfree(net_device
->recv_section
);
182 net_device
->recv_section
= NULL
;
185 /* Deal with the send buffer we may have setup.
186 * If we got a send section size, it means we received a
187 * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
188 * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
189 * to send a revoke msg here
191 if (net_device
->send_section_size
) {
192 /* Send the revoke receive buffer */
193 revoke_packet
= &net_device
->revoke_packet
;
194 memset(revoke_packet
, 0, sizeof(struct nvsp_message
));
196 revoke_packet
->hdr
.msg_type
=
197 NVSP_MSG1_TYPE_REVOKE_SEND_BUF
;
198 revoke_packet
->msg
.v1_msg
.revoke_send_buf
.id
=
199 NETVSC_SEND_BUFFER_ID
;
201 ret
= vmbus_sendpacket(device
->channel
,
203 sizeof(struct nvsp_message
),
204 (unsigned long)revoke_packet
,
205 VM_PKT_DATA_INBAND
, 0);
206 /* If we failed here, we might as well return and
207 * have a leak rather than continue and a bugchk
210 netdev_err(ndev
, "unable to send "
211 "revoke send buffer to netvsp\n");
215 /* Teardown the gpadl on the vsp end */
216 if (net_device
->send_buf_gpadl_handle
) {
217 ret
= vmbus_teardown_gpadl(device
->channel
,
218 net_device
->send_buf_gpadl_handle
);
220 /* If we failed here, we might as well return and have a leak
221 * rather than continue and a bugchk
225 "unable to teardown send buffer's gpadl\n");
228 net_device
->send_buf_gpadl_handle
= 0;
230 if (net_device
->send_buf
) {
231 /* Free up the send buffer */
232 vfree(net_device
->send_buf
);
233 net_device
->send_buf
= NULL
;
235 kfree(net_device
->send_section_map
);
240 static int netvsc_init_buf(struct hv_device
*device
)
243 struct netvsc_device
*net_device
;
244 struct nvsp_message
*init_packet
;
245 struct net_device
*ndev
;
248 net_device
= get_outbound_net_device(device
);
251 ndev
= hv_get_drvdata(device
);
253 node
= cpu_to_node(device
->channel
->target_cpu
);
254 net_device
->recv_buf
= vzalloc_node(net_device
->recv_buf_size
, node
);
255 if (!net_device
->recv_buf
)
256 net_device
->recv_buf
= vzalloc(net_device
->recv_buf_size
);
258 if (!net_device
->recv_buf
) {
259 netdev_err(ndev
, "unable to allocate receive "
260 "buffer of size %d\n", net_device
->recv_buf_size
);
266 * Establish the gpadl handle for this buffer on this
267 * channel. Note: This call uses the vmbus connection rather
268 * than the channel to establish the gpadl handle.
270 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->recv_buf
,
271 net_device
->recv_buf_size
,
272 &net_device
->recv_buf_gpadl_handle
);
275 "unable to establish receive buffer's gpadl\n");
280 /* Notify the NetVsp of the gpadl handle */
281 init_packet
= &net_device
->channel_init_pkt
;
283 memset(init_packet
, 0, sizeof(struct nvsp_message
));
285 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RECV_BUF
;
286 init_packet
->msg
.v1_msg
.send_recv_buf
.
287 gpadl_handle
= net_device
->recv_buf_gpadl_handle
;
288 init_packet
->msg
.v1_msg
.
289 send_recv_buf
.id
= NETVSC_RECEIVE_BUFFER_ID
;
291 /* Send the gpadl notification request */
292 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
293 sizeof(struct nvsp_message
),
294 (unsigned long)init_packet
,
296 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
299 "unable to send receive buffer's gpadl to netvsp\n");
303 wait_for_completion(&net_device
->channel_init_wait
);
305 /* Check the response */
306 if (init_packet
->msg
.v1_msg
.
307 send_recv_buf_complete
.status
!= NVSP_STAT_SUCCESS
) {
308 netdev_err(ndev
, "Unable to complete receive buffer "
309 "initialization with NetVsp - status %d\n",
310 init_packet
->msg
.v1_msg
.
311 send_recv_buf_complete
.status
);
316 /* Parse the response */
318 net_device
->recv_section_cnt
= init_packet
->msg
.
319 v1_msg
.send_recv_buf_complete
.num_sections
;
321 net_device
->recv_section
= kmemdup(
322 init_packet
->msg
.v1_msg
.send_recv_buf_complete
.sections
,
323 net_device
->recv_section_cnt
*
324 sizeof(struct nvsp_1_receive_buffer_section
),
326 if (net_device
->recv_section
== NULL
) {
332 * For 1st release, there should only be 1 section that represents the
333 * entire receive buffer
335 if (net_device
->recv_section_cnt
!= 1 ||
336 net_device
->recv_section
->offset
!= 0) {
341 /* Now setup the send buffer.
343 net_device
->send_buf
= vzalloc_node(net_device
->send_buf_size
, node
);
344 if (!net_device
->send_buf
)
345 net_device
->send_buf
= vzalloc(net_device
->send_buf_size
);
346 if (!net_device
->send_buf
) {
347 netdev_err(ndev
, "unable to allocate send "
348 "buffer of size %d\n", net_device
->send_buf_size
);
353 /* Establish the gpadl handle for this buffer on this
354 * channel. Note: This call uses the vmbus connection rather
355 * than the channel to establish the gpadl handle.
357 ret
= vmbus_establish_gpadl(device
->channel
, net_device
->send_buf
,
358 net_device
->send_buf_size
,
359 &net_device
->send_buf_gpadl_handle
);
362 "unable to establish send buffer's gpadl\n");
366 /* Notify the NetVsp of the gpadl handle */
367 init_packet
= &net_device
->channel_init_pkt
;
368 memset(init_packet
, 0, sizeof(struct nvsp_message
));
369 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_SEND_BUF
;
370 init_packet
->msg
.v1_msg
.send_send_buf
.gpadl_handle
=
371 net_device
->send_buf_gpadl_handle
;
372 init_packet
->msg
.v1_msg
.send_send_buf
.id
= NETVSC_SEND_BUFFER_ID
;
374 /* Send the gpadl notification request */
375 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
376 sizeof(struct nvsp_message
),
377 (unsigned long)init_packet
,
379 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
382 "unable to send send buffer's gpadl to netvsp\n");
386 wait_for_completion(&net_device
->channel_init_wait
);
388 /* Check the response */
389 if (init_packet
->msg
.v1_msg
.
390 send_send_buf_complete
.status
!= NVSP_STAT_SUCCESS
) {
391 netdev_err(ndev
, "Unable to complete send buffer "
392 "initialization with NetVsp - status %d\n",
393 init_packet
->msg
.v1_msg
.
394 send_send_buf_complete
.status
);
399 /* Parse the response */
400 net_device
->send_section_size
= init_packet
->msg
.
401 v1_msg
.send_send_buf_complete
.section_size
;
403 /* Section count is simply the size divided by the section size.
405 net_device
->send_section_cnt
=
406 net_device
->send_buf_size
/net_device
->send_section_size
;
408 dev_info(&device
->device
, "Send section size: %d, Section count:%d\n",
409 net_device
->send_section_size
, net_device
->send_section_cnt
);
411 /* Setup state for managing the send buffer. */
412 net_device
->map_words
= DIV_ROUND_UP(net_device
->send_section_cnt
,
415 net_device
->send_section_map
=
416 kzalloc(net_device
->map_words
* sizeof(ulong
), GFP_KERNEL
);
417 if (net_device
->send_section_map
== NULL
) {
425 netvsc_destroy_buf(device
);
432 /* Negotiate NVSP protocol version */
433 static int negotiate_nvsp_ver(struct hv_device
*device
,
434 struct netvsc_device
*net_device
,
435 struct nvsp_message
*init_packet
,
438 struct net_device
*ndev
= hv_get_drvdata(device
);
441 memset(init_packet
, 0, sizeof(struct nvsp_message
));
442 init_packet
->hdr
.msg_type
= NVSP_MSG_TYPE_INIT
;
443 init_packet
->msg
.init_msg
.init
.min_protocol_ver
= nvsp_ver
;
444 init_packet
->msg
.init_msg
.init
.max_protocol_ver
= nvsp_ver
;
446 /* Send the init request */
447 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
448 sizeof(struct nvsp_message
),
449 (unsigned long)init_packet
,
451 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
);
456 wait_for_completion(&net_device
->channel_init_wait
);
458 if (init_packet
->msg
.init_msg
.init_complete
.status
!=
462 if (nvsp_ver
== NVSP_PROTOCOL_VERSION_1
)
465 /* NVSPv2 or later: Send NDIS config */
466 memset(init_packet
, 0, sizeof(struct nvsp_message
));
467 init_packet
->hdr
.msg_type
= NVSP_MSG2_TYPE_SEND_NDIS_CONFIG
;
468 init_packet
->msg
.v2_msg
.send_ndis_config
.mtu
= ndev
->mtu
+ ETH_HLEN
;
469 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.ieee8021q
= 1;
471 if (nvsp_ver
>= NVSP_PROTOCOL_VERSION_5
) {
472 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.sriov
= 1;
474 /* Teaming bit is needed to receive link speed updates */
475 init_packet
->msg
.v2_msg
.send_ndis_config
.capability
.teaming
= 1;
478 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
479 sizeof(struct nvsp_message
),
480 (unsigned long)init_packet
,
481 VM_PKT_DATA_INBAND
, 0);
486 static int netvsc_connect_vsp(struct hv_device
*device
)
489 struct netvsc_device
*net_device
;
490 struct nvsp_message
*init_packet
;
492 u32 ver_list
[] = { NVSP_PROTOCOL_VERSION_1
, NVSP_PROTOCOL_VERSION_2
,
493 NVSP_PROTOCOL_VERSION_4
, NVSP_PROTOCOL_VERSION_5
};
494 int i
, num_ver
= 4; /* number of different NVSP versions */
496 net_device
= get_outbound_net_device(device
);
500 init_packet
= &net_device
->channel_init_pkt
;
502 /* Negotiate the latest NVSP protocol supported */
503 for (i
= num_ver
- 1; i
>= 0; i
--)
504 if (negotiate_nvsp_ver(device
, net_device
, init_packet
,
506 net_device
->nvsp_version
= ver_list
[i
];
515 pr_debug("Negotiated NVSP version:%x\n", net_device
->nvsp_version
);
517 /* Send the ndis version */
518 memset(init_packet
, 0, sizeof(struct nvsp_message
));
520 if (net_device
->nvsp_version
<= NVSP_PROTOCOL_VERSION_4
)
521 ndis_version
= 0x00060001;
523 ndis_version
= 0x0006001e;
525 init_packet
->hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_NDIS_VER
;
526 init_packet
->msg
.v1_msg
.
527 send_ndis_ver
.ndis_major_ver
=
528 (ndis_version
& 0xFFFF0000) >> 16;
529 init_packet
->msg
.v1_msg
.
530 send_ndis_ver
.ndis_minor_ver
=
531 ndis_version
& 0xFFFF;
533 /* Send the init request */
534 ret
= vmbus_sendpacket(device
->channel
, init_packet
,
535 sizeof(struct nvsp_message
),
536 (unsigned long)init_packet
,
537 VM_PKT_DATA_INBAND
, 0);
541 /* Post the big receive buffer to NetVSP */
542 if (net_device
->nvsp_version
<= NVSP_PROTOCOL_VERSION_2
)
543 net_device
->recv_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE_LEGACY
;
545 net_device
->recv_buf_size
= NETVSC_RECEIVE_BUFFER_SIZE
;
546 net_device
->send_buf_size
= NETVSC_SEND_BUFFER_SIZE
;
548 ret
= netvsc_init_buf(device
);
554 static void netvsc_disconnect_vsp(struct hv_device
*device
)
556 netvsc_destroy_buf(device
);
560 * netvsc_device_remove - Callback when the root bus device is removed
562 int netvsc_device_remove(struct hv_device
*device
)
564 struct net_device
*ndev
= hv_get_drvdata(device
);
565 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
566 struct netvsc_device
*net_device
= net_device_ctx
->nvdev
;
568 netvsc_disconnect_vsp(device
);
570 net_device_ctx
->nvdev
= NULL
;
573 * At this point, no one should be accessing net_device
576 dev_notice(&device
->device
, "net device safe to remove\n");
578 /* Now, we can close the channel safely */
579 vmbus_close(device
->channel
);
581 /* Release all resources */
582 vfree(net_device
->sub_cb_buf
);
583 free_netvsc_device(net_device
);
588 #define RING_AVAIL_PERCENT_HIWATER 20
589 #define RING_AVAIL_PERCENT_LOWATER 10
592 * Get the percentage of available bytes to write in the ring.
593 * The return value is in range from 0 to 100.
595 static inline u32
hv_ringbuf_avail_percent(
596 struct hv_ring_buffer_info
*ring_info
)
598 u32 avail_read
, avail_write
;
600 hv_get_ringbuffer_availbytes(ring_info
, &avail_read
, &avail_write
);
602 return avail_write
* 100 / ring_info
->ring_datasize
;
605 static inline void netvsc_free_send_slot(struct netvsc_device
*net_device
,
608 sync_change_bit(index
, net_device
->send_section_map
);
611 static void netvsc_send_completion(struct netvsc_device
*net_device
,
612 struct vmbus_channel
*incoming_channel
,
613 struct hv_device
*device
,
614 struct vmpacket_descriptor
*packet
)
616 struct nvsp_message
*nvsp_packet
;
617 struct hv_netvsc_packet
*nvsc_packet
;
618 struct net_device
*ndev
= hv_get_drvdata(device
);
619 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
623 nvsp_packet
= (struct nvsp_message
*)((unsigned long)packet
+
624 (packet
->offset8
<< 3));
626 if ((nvsp_packet
->hdr
.msg_type
== NVSP_MSG_TYPE_INIT_COMPLETE
) ||
627 (nvsp_packet
->hdr
.msg_type
==
628 NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE
) ||
629 (nvsp_packet
->hdr
.msg_type
==
630 NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE
) ||
631 (nvsp_packet
->hdr
.msg_type
==
632 NVSP_MSG5_TYPE_SUBCHANNEL
)) {
633 /* Copy the response back */
634 memcpy(&net_device
->channel_init_pkt
, nvsp_packet
,
635 sizeof(struct nvsp_message
));
636 complete(&net_device
->channel_init_wait
);
637 } else if (nvsp_packet
->hdr
.msg_type
==
638 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
) {
639 int num_outstanding_sends
;
641 struct vmbus_channel
*channel
= device
->channel
;
644 /* Get the send context */
645 skb
= (struct sk_buff
*)(unsigned long)packet
->trans_id
;
647 /* Notify the layer above us */
649 nvsc_packet
= (struct hv_netvsc_packet
*) skb
->cb
;
650 send_index
= nvsc_packet
->send_buf_index
;
651 if (send_index
!= NETVSC_INVALID_INDEX
)
652 netvsc_free_send_slot(net_device
, send_index
);
653 q_idx
= nvsc_packet
->q_idx
;
654 channel
= incoming_channel
;
655 dev_kfree_skb_any(skb
);
658 num_outstanding_sends
=
659 atomic_dec_return(&net_device
->num_outstanding_sends
);
660 queue_sends
= atomic_dec_return(&net_device
->
663 if (net_device
->destroy
&& num_outstanding_sends
== 0)
664 wake_up(&net_device
->wait_drain
);
666 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev
, q_idx
)) &&
667 !net_device_ctx
->start_remove
&&
668 (hv_ringbuf_avail_percent(&channel
->outbound
) >
669 RING_AVAIL_PERCENT_HIWATER
|| queue_sends
< 1))
670 netif_tx_wake_queue(netdev_get_tx_queue(
673 netdev_err(ndev
, "Unknown send completion packet type- "
674 "%d received!!\n", nvsp_packet
->hdr
.msg_type
);
679 static u32
netvsc_get_next_send_section(struct netvsc_device
*net_device
)
682 u32 max_words
= net_device
->map_words
;
683 unsigned long *map_addr
= (unsigned long *)net_device
->send_section_map
;
684 u32 section_cnt
= net_device
->send_section_cnt
;
685 int ret_val
= NETVSC_INVALID_INDEX
;
689 for (i
= 0; i
< max_words
; i
++) {
692 index
= ffz(map_addr
[i
]);
693 prev_val
= sync_test_and_set_bit(index
, &map_addr
[i
]);
696 if ((index
+ (i
* BITS_PER_LONG
)) >= section_cnt
)
698 ret_val
= (index
+ (i
* BITS_PER_LONG
));
704 static u32
netvsc_copy_to_send_buf(struct netvsc_device
*net_device
,
705 unsigned int section_index
,
707 struct hv_netvsc_packet
*packet
,
708 struct rndis_message
*rndis_msg
,
709 struct hv_page_buffer
**pb
,
712 char *start
= net_device
->send_buf
;
713 char *dest
= start
+ (section_index
* net_device
->send_section_size
)
716 bool is_data_pkt
= (skb
!= NULL
) ? true : false;
717 bool xmit_more
= (skb
!= NULL
) ? skb
->xmit_more
: false;
720 u32 remain
= packet
->total_data_buflen
% net_device
->pkt_align
;
721 u32 page_count
= packet
->cp_partial
? packet
->rmsg_pgcnt
:
722 packet
->page_buf_cnt
;
725 if (is_data_pkt
&& xmit_more
&& remain
&&
726 !packet
->cp_partial
) {
727 padding
= net_device
->pkt_align
- remain
;
728 rndis_msg
->msg_len
+= padding
;
729 packet
->total_data_buflen
+= padding
;
732 for (i
= 0; i
< page_count
; i
++) {
733 char *src
= phys_to_virt((*pb
)[i
].pfn
<< PAGE_SHIFT
);
734 u32 offset
= (*pb
)[i
].offset
;
735 u32 len
= (*pb
)[i
].len
;
737 memcpy(dest
, (src
+ offset
), len
);
743 memset(dest
, 0, padding
);
750 static inline int netvsc_send_pkt(
751 struct hv_device
*device
,
752 struct hv_netvsc_packet
*packet
,
753 struct netvsc_device
*net_device
,
754 struct hv_page_buffer
**pb
,
757 struct nvsp_message nvmsg
;
758 u16 q_idx
= packet
->q_idx
;
759 struct vmbus_channel
*out_channel
= net_device
->chn_table
[q_idx
];
760 struct net_device
*ndev
= hv_get_drvdata(device
);
763 struct hv_page_buffer
*pgbuf
;
764 u32 ring_avail
= hv_ringbuf_avail_percent(&out_channel
->outbound
);
765 bool xmit_more
= (skb
!= NULL
) ? skb
->xmit_more
: false;
767 nvmsg
.hdr
.msg_type
= NVSP_MSG1_TYPE_SEND_RNDIS_PKT
;
770 nvmsg
.msg
.v1_msg
.send_rndis_pkt
.channel_type
= 0;
772 /* 1 is RMC_CONTROL; */
773 nvmsg
.msg
.v1_msg
.send_rndis_pkt
.channel_type
= 1;
776 nvmsg
.msg
.v1_msg
.send_rndis_pkt
.send_buf_section_index
=
777 packet
->send_buf_index
;
778 if (packet
->send_buf_index
== NETVSC_INVALID_INDEX
)
779 nvmsg
.msg
.v1_msg
.send_rndis_pkt
.send_buf_section_size
= 0;
781 nvmsg
.msg
.v1_msg
.send_rndis_pkt
.send_buf_section_size
=
782 packet
->total_data_buflen
;
786 if (out_channel
->rescind
)
790 * It is possible that once we successfully place this packet
791 * on the ringbuffer, we may stop the queue. In that case, we want
792 * to notify the host independent of the xmit_more flag. We don't
793 * need to be precise here; in the worst case we may signal the host
796 if (ring_avail
< (RING_AVAIL_PERCENT_LOWATER
+ 1))
799 if (packet
->page_buf_cnt
) {
800 pgbuf
= packet
->cp_partial
? (*pb
) +
801 packet
->rmsg_pgcnt
: (*pb
);
802 ret
= vmbus_sendpacket_pagebuffer_ctl(out_channel
,
804 packet
->page_buf_cnt
,
806 sizeof(struct nvsp_message
),
808 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
,
811 ret
= vmbus_sendpacket_ctl(out_channel
, &nvmsg
,
812 sizeof(struct nvsp_message
),
815 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
,
820 atomic_inc(&net_device
->num_outstanding_sends
);
821 atomic_inc(&net_device
->queue_sends
[q_idx
]);
823 if (ring_avail
< RING_AVAIL_PERCENT_LOWATER
) {
824 netif_tx_stop_queue(netdev_get_tx_queue(ndev
, q_idx
));
826 if (atomic_read(&net_device
->
827 queue_sends
[q_idx
]) < 1)
828 netif_tx_wake_queue(netdev_get_tx_queue(
831 } else if (ret
== -EAGAIN
) {
832 netif_tx_stop_queue(netdev_get_tx_queue(
834 if (atomic_read(&net_device
->queue_sends
[q_idx
]) < 1) {
835 netif_tx_wake_queue(netdev_get_tx_queue(
840 netdev_err(ndev
, "Unable to send packet %p ret %d\n",
847 /* Move packet out of multi send data (msd), and clear msd */
848 static inline void move_pkt_msd(struct hv_netvsc_packet
**msd_send
,
849 struct sk_buff
**msd_skb
,
850 struct multi_send_data
*msdp
)
852 *msd_skb
= msdp
->skb
;
853 *msd_send
= msdp
->pkt
;
859 int netvsc_send(struct hv_device
*device
,
860 struct hv_netvsc_packet
*packet
,
861 struct rndis_message
*rndis_msg
,
862 struct hv_page_buffer
**pb
,
865 struct netvsc_device
*net_device
;
866 int ret
= 0, m_ret
= 0;
867 struct vmbus_channel
*out_channel
;
868 u16 q_idx
= packet
->q_idx
;
869 u32 pktlen
= packet
->total_data_buflen
, msd_len
= 0;
870 unsigned int section_index
= NETVSC_INVALID_INDEX
;
871 struct multi_send_data
*msdp
;
872 struct hv_netvsc_packet
*msd_send
= NULL
, *cur_send
= NULL
;
873 struct sk_buff
*msd_skb
= NULL
;
875 bool xmit_more
= (skb
!= NULL
) ? skb
->xmit_more
: false;
877 net_device
= get_outbound_net_device(device
);
881 out_channel
= net_device
->chn_table
[q_idx
];
883 packet
->send_buf_index
= NETVSC_INVALID_INDEX
;
884 packet
->cp_partial
= false;
886 /* Send control message directly without accessing msd (Multi-Send
887 * Data) field which may be changed during data packet processing.
894 msdp
= &net_device
->msd
[q_idx
];
896 /* batch packets in send buffer if possible */
898 msd_len
= msdp
->pkt
->total_data_buflen
;
900 try_batch
= (skb
!= NULL
) && msd_len
> 0 && msdp
->count
<
903 if (try_batch
&& msd_len
+ pktlen
+ net_device
->pkt_align
<
904 net_device
->send_section_size
) {
905 section_index
= msdp
->pkt
->send_buf_index
;
907 } else if (try_batch
&& msd_len
+ packet
->rmsg_size
<
908 net_device
->send_section_size
) {
909 section_index
= msdp
->pkt
->send_buf_index
;
910 packet
->cp_partial
= true;
912 } else if ((skb
!= NULL
) && pktlen
+ net_device
->pkt_align
<
913 net_device
->send_section_size
) {
914 section_index
= netvsc_get_next_send_section(net_device
);
915 if (section_index
!= NETVSC_INVALID_INDEX
) {
916 move_pkt_msd(&msd_send
, &msd_skb
, msdp
);
921 if (section_index
!= NETVSC_INVALID_INDEX
) {
922 netvsc_copy_to_send_buf(net_device
,
923 section_index
, msd_len
,
924 packet
, rndis_msg
, pb
, skb
);
926 packet
->send_buf_index
= section_index
;
928 if (packet
->cp_partial
) {
929 packet
->page_buf_cnt
-= packet
->rmsg_pgcnt
;
930 packet
->total_data_buflen
= msd_len
+ packet
->rmsg_size
;
932 packet
->page_buf_cnt
= 0;
933 packet
->total_data_buflen
+= msd_len
;
937 dev_kfree_skb_any(msdp
->skb
);
939 if (xmit_more
&& !packet
->cp_partial
) {
950 move_pkt_msd(&msd_send
, &msd_skb
, msdp
);
955 m_ret
= netvsc_send_pkt(device
, msd_send
, net_device
,
959 netvsc_free_send_slot(net_device
,
960 msd_send
->send_buf_index
);
961 dev_kfree_skb_any(msd_skb
);
967 ret
= netvsc_send_pkt(device
, cur_send
, net_device
, pb
, skb
);
969 if (ret
!= 0 && section_index
!= NETVSC_INVALID_INDEX
)
970 netvsc_free_send_slot(net_device
, section_index
);
975 static void netvsc_send_recv_completion(struct hv_device
*device
,
976 struct vmbus_channel
*channel
,
977 struct netvsc_device
*net_device
,
978 u64 transaction_id
, u32 status
)
980 struct nvsp_message recvcompMessage
;
983 struct net_device
*ndev
= hv_get_drvdata(device
);
985 recvcompMessage
.hdr
.msg_type
=
986 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE
;
988 recvcompMessage
.msg
.v1_msg
.send_rndis_pkt_complete
.status
= status
;
991 /* Send the completion */
992 ret
= vmbus_sendpacket(channel
, &recvcompMessage
,
993 sizeof(struct nvsp_message
), transaction_id
,
998 } else if (ret
== -EAGAIN
) {
999 /* no more room...wait a bit and attempt to retry 3 times */
1001 netdev_err(ndev
, "unable to send receive completion pkt"
1002 " (tid %llx)...retrying %d\n", transaction_id
, retries
);
1006 goto retry_send_cmplt
;
1008 netdev_err(ndev
, "unable to send receive "
1009 "completion pkt (tid %llx)...give up retrying\n",
1013 netdev_err(ndev
, "unable to send receive "
1014 "completion pkt - %llx\n", transaction_id
);
1018 static void netvsc_receive(struct netvsc_device
*net_device
,
1019 struct vmbus_channel
*channel
,
1020 struct hv_device
*device
,
1021 struct vmpacket_descriptor
*packet
)
1023 struct vmtransfer_page_packet_header
*vmxferpage_packet
;
1024 struct nvsp_message
*nvsp_packet
;
1025 struct hv_netvsc_packet nv_pkt
;
1026 struct hv_netvsc_packet
*netvsc_packet
= &nv_pkt
;
1027 u32 status
= NVSP_STAT_SUCCESS
;
1030 struct net_device
*ndev
= hv_get_drvdata(device
);
1034 * All inbound packets other than send completion should be xfer page
1037 if (packet
->type
!= VM_PKT_DATA_USING_XFER_PAGES
) {
1038 netdev_err(ndev
, "Unknown packet type received - %d\n",
1043 nvsp_packet
= (struct nvsp_message
*)((unsigned long)packet
+
1044 (packet
->offset8
<< 3));
1046 /* Make sure this is a valid nvsp packet */
1047 if (nvsp_packet
->hdr
.msg_type
!=
1048 NVSP_MSG1_TYPE_SEND_RNDIS_PKT
) {
1049 netdev_err(ndev
, "Unknown nvsp packet type received-"
1050 " %d\n", nvsp_packet
->hdr
.msg_type
);
1054 vmxferpage_packet
= (struct vmtransfer_page_packet_header
*)packet
;
1056 if (vmxferpage_packet
->xfer_pageset_id
!= NETVSC_RECEIVE_BUFFER_ID
) {
1057 netdev_err(ndev
, "Invalid xfer page set id - "
1058 "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID
,
1059 vmxferpage_packet
->xfer_pageset_id
);
1063 count
= vmxferpage_packet
->range_cnt
;
1065 /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1066 for (i
= 0; i
< count
; i
++) {
1067 /* Initialize the netvsc packet */
1068 data
= (void *)((unsigned long)net_device
->
1069 recv_buf
+ vmxferpage_packet
->ranges
[i
].byte_offset
);
1070 netvsc_packet
->total_data_buflen
=
1071 vmxferpage_packet
->ranges
[i
].byte_count
;
1073 /* Pass it to the upper layer */
1074 status
= rndis_filter_receive(device
, netvsc_packet
, &data
,
1079 netvsc_send_recv_completion(device
, channel
, net_device
,
1080 vmxferpage_packet
->d
.trans_id
, status
);
1084 static void netvsc_send_table(struct hv_device
*hdev
,
1085 struct nvsp_message
*nvmsg
)
1087 struct netvsc_device
*nvscdev
;
1088 struct net_device
*ndev
= hv_get_drvdata(hdev
);
1092 nvscdev
= get_outbound_net_device(hdev
);
1096 count
= nvmsg
->msg
.v5_msg
.send_table
.count
;
1097 if (count
!= VRSS_SEND_TAB_SIZE
) {
1098 netdev_err(ndev
, "Received wrong send-table size:%u\n", count
);
1102 tab
= (u32
*)((unsigned long)&nvmsg
->msg
.v5_msg
.send_table
+
1103 nvmsg
->msg
.v5_msg
.send_table
.offset
);
1105 for (i
= 0; i
< count
; i
++)
1106 nvscdev
->send_table
[i
] = tab
[i
];
1109 static void netvsc_send_vf(struct net_device_context
*net_device_ctx
,
1110 struct nvsp_message
*nvmsg
)
1112 net_device_ctx
->vf_alloc
= nvmsg
->msg
.v4_msg
.vf_assoc
.allocated
;
1113 net_device_ctx
->vf_serial
= nvmsg
->msg
.v4_msg
.vf_assoc
.serial
;
1116 static inline void netvsc_receive_inband(struct hv_device
*hdev
,
1117 struct net_device_context
*net_device_ctx
,
1118 struct nvsp_message
*nvmsg
)
1120 switch (nvmsg
->hdr
.msg_type
) {
1121 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE
:
1122 netvsc_send_table(hdev
, nvmsg
);
1125 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION
:
1126 netvsc_send_vf(net_device_ctx
, nvmsg
);
1131 static void netvsc_process_raw_pkt(struct hv_device
*device
,
1132 struct vmbus_channel
*channel
,
1133 struct netvsc_device
*net_device
,
1134 struct net_device
*ndev
,
1136 struct vmpacket_descriptor
*desc
)
1138 struct nvsp_message
*nvmsg
;
1139 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1141 nvmsg
= (struct nvsp_message
*)((unsigned long)
1142 desc
+ (desc
->offset8
<< 3));
1144 switch (desc
->type
) {
1146 netvsc_send_completion(net_device
, channel
, device
, desc
);
1149 case VM_PKT_DATA_USING_XFER_PAGES
:
1150 netvsc_receive(net_device
, channel
, device
, desc
);
1153 case VM_PKT_DATA_INBAND
:
1154 netvsc_receive_inband(device
, net_device_ctx
, nvmsg
);
1158 netdev_err(ndev
, "unhandled packet type %d, tid %llx\n",
1159 desc
->type
, request_id
);
1165 void netvsc_channel_cb(void *context
)
1168 struct vmbus_channel
*channel
= (struct vmbus_channel
*)context
;
1169 struct hv_device
*device
;
1170 struct netvsc_device
*net_device
;
1173 struct vmpacket_descriptor
*desc
;
1174 unsigned char *buffer
;
1175 int bufferlen
= NETVSC_PACKET_SIZE
;
1176 struct net_device
*ndev
;
1177 bool need_to_commit
= false;
1179 if (channel
->primary_channel
!= NULL
)
1180 device
= channel
->primary_channel
->device_obj
;
1182 device
= channel
->device_obj
;
1184 net_device
= get_inbound_net_device(device
);
1187 ndev
= hv_get_drvdata(device
);
1188 buffer
= get_per_channel_state(channel
);
1191 desc
= get_next_pkt_raw(channel
);
1193 netvsc_process_raw_pkt(device
,
1200 put_pkt_raw(channel
, desc
);
1201 need_to_commit
= true;
1204 if (need_to_commit
) {
1205 need_to_commit
= false;
1206 commit_rd_index(channel
);
1209 ret
= vmbus_recvpacket_raw(channel
, buffer
, bufferlen
,
1210 &bytes_recvd
, &request_id
);
1212 if (bytes_recvd
> 0) {
1213 desc
= (struct vmpacket_descriptor
*)buffer
;
1214 netvsc_process_raw_pkt(device
,
1224 * We are done for this pass.
1229 } else if (ret
== -ENOBUFS
) {
1230 if (bufferlen
> NETVSC_PACKET_SIZE
)
1232 /* Handle large packet */
1233 buffer
= kmalloc(bytes_recvd
, GFP_ATOMIC
);
1234 if (buffer
== NULL
) {
1235 /* Try again next time around */
1237 "unable to allocate buffer of size "
1238 "(%d)!!\n", bytes_recvd
);
1242 bufferlen
= bytes_recvd
;
1246 if (bufferlen
> NETVSC_PACKET_SIZE
)
1252 * netvsc_device_add - Callback when the device belonging to this
1255 int netvsc_device_add(struct hv_device
*device
, void *additional_info
)
1259 ((struct netvsc_device_info
*)additional_info
)->ring_size
;
1260 struct netvsc_device
*net_device
;
1261 struct net_device
*ndev
= hv_get_drvdata(device
);
1262 struct net_device_context
*net_device_ctx
= netdev_priv(ndev
);
1264 net_device
= alloc_net_device();
1268 net_device
->ring_size
= ring_size
;
1270 /* Initialize the NetVSC channel extension */
1271 init_completion(&net_device
->channel_init_wait
);
1273 set_per_channel_state(device
->channel
, net_device
->cb_buffer
);
1275 /* Open the channel */
1276 ret
= vmbus_open(device
->channel
, ring_size
* PAGE_SIZE
,
1277 ring_size
* PAGE_SIZE
, NULL
, 0,
1278 netvsc_channel_cb
, device
->channel
);
1281 netdev_err(ndev
, "unable to open channel: %d\n", ret
);
1285 /* Channel is opened */
1286 pr_info("hv_netvsc channel opened successfully\n");
1288 /* If we're reopening the device we may have multiple queues, fill the
1289 * chn_table with the default channel to use it before subchannels are
1292 for (i
= 0; i
< VRSS_CHANNEL_MAX
; i
++)
1293 net_device
->chn_table
[i
] = device
->channel
;
1295 /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1300 net_device_ctx
->nvdev
= net_device
;
1302 /* Connect with the NetVsp */
1303 ret
= netvsc_connect_vsp(device
);
1306 "unable to connect to NetVSP - %d\n", ret
);
1313 /* Now, we can close the channel safely */
1314 vmbus_close(device
->channel
);
1317 free_netvsc_device(net_device
);