]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/hyperv/rndis_filter.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / net / hyperv / rndis_filter.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Authors:
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
19 */
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/io.h>
26 #include <linux/if_ether.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_vlan.h>
29 #include <linux/nls.h>
30 #include <linux/vmalloc.h>
31
32 #include "hyperv_net.h"
33
34 static void rndis_set_multicast(struct work_struct *w);
35
36 #define RNDIS_EXT_LEN PAGE_SIZE
37 struct rndis_request {
38 struct list_head list_ent;
39 struct completion wait_event;
40
41 struct rndis_message response_msg;
42 /*
43 * The buffer for extended info after the RNDIS response message. It's
44 * referenced based on the data offset in the RNDIS message. Its size
45 * is enough for current needs, and should be sufficient for the near
46 * future.
47 */
48 u8 response_ext[RNDIS_EXT_LEN];
49
50 /* Simplify allocation by having a netvsc packet inline */
51 struct hv_netvsc_packet pkt;
52
53 struct rndis_message request_msg;
54 /*
55 * The buffer for the extended info after the RNDIS request message.
56 * It is referenced and sized in a similar way as response_ext.
57 */
58 u8 request_ext[RNDIS_EXT_LEN];
59 };
60
61 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
62 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
63 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
64 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
65 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
66 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
67 };
68
69 static struct rndis_device *get_rndis_device(void)
70 {
71 struct rndis_device *device;
72
73 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
74 if (!device)
75 return NULL;
76
77 spin_lock_init(&device->request_lock);
78
79 INIT_LIST_HEAD(&device->req_list);
80 INIT_WORK(&device->mcast_work, rndis_set_multicast);
81
82 device->state = RNDIS_DEV_UNINITIALIZED;
83
84 return device;
85 }
86
87 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
88 u32 msg_type,
89 u32 msg_len)
90 {
91 struct rndis_request *request;
92 struct rndis_message *rndis_msg;
93 struct rndis_set_request *set;
94 unsigned long flags;
95
96 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
97 if (!request)
98 return NULL;
99
100 init_completion(&request->wait_event);
101
102 rndis_msg = &request->request_msg;
103 rndis_msg->ndis_msg_type = msg_type;
104 rndis_msg->msg_len = msg_len;
105
106 request->pkt.q_idx = 0;
107
108 /*
109 * Set the request id. This field is always after the rndis header for
110 * request/response packet types so we just used the SetRequest as a
111 * template
112 */
113 set = &rndis_msg->msg.set_req;
114 set->req_id = atomic_inc_return(&dev->new_req_id);
115
116 /* Add to the request list */
117 spin_lock_irqsave(&dev->request_lock, flags);
118 list_add_tail(&request->list_ent, &dev->req_list);
119 spin_unlock_irqrestore(&dev->request_lock, flags);
120
121 return request;
122 }
123
124 static void put_rndis_request(struct rndis_device *dev,
125 struct rndis_request *req)
126 {
127 unsigned long flags;
128
129 spin_lock_irqsave(&dev->request_lock, flags);
130 list_del(&req->list_ent);
131 spin_unlock_irqrestore(&dev->request_lock, flags);
132
133 kfree(req);
134 }
135
136 static void dump_rndis_message(struct hv_device *hv_dev,
137 const struct rndis_message *rndis_msg)
138 {
139 struct net_device *netdev = hv_get_drvdata(hv_dev);
140
141 switch (rndis_msg->ndis_msg_type) {
142 case RNDIS_MSG_PACKET:
143 netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
144 "data offset %u data len %u, # oob %u, "
145 "oob offset %u, oob len %u, pkt offset %u, "
146 "pkt len %u\n",
147 rndis_msg->msg_len,
148 rndis_msg->msg.pkt.data_offset,
149 rndis_msg->msg.pkt.data_len,
150 rndis_msg->msg.pkt.num_oob_data_elements,
151 rndis_msg->msg.pkt.oob_data_offset,
152 rndis_msg->msg.pkt.oob_data_len,
153 rndis_msg->msg.pkt.per_pkt_info_offset,
154 rndis_msg->msg.pkt.per_pkt_info_len);
155 break;
156
157 case RNDIS_MSG_INIT_C:
158 netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
159 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
160 "device flags %d, max xfer size 0x%x, max pkts %u, "
161 "pkt aligned %u)\n",
162 rndis_msg->msg_len,
163 rndis_msg->msg.init_complete.req_id,
164 rndis_msg->msg.init_complete.status,
165 rndis_msg->msg.init_complete.major_ver,
166 rndis_msg->msg.init_complete.minor_ver,
167 rndis_msg->msg.init_complete.dev_flags,
168 rndis_msg->msg.init_complete.max_xfer_size,
169 rndis_msg->msg.init_complete.
170 max_pkt_per_msg,
171 rndis_msg->msg.init_complete.
172 pkt_alignment_factor);
173 break;
174
175 case RNDIS_MSG_QUERY_C:
176 netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
177 "(len %u, id 0x%x, status 0x%x, buf len %u, "
178 "buf offset %u)\n",
179 rndis_msg->msg_len,
180 rndis_msg->msg.query_complete.req_id,
181 rndis_msg->msg.query_complete.status,
182 rndis_msg->msg.query_complete.
183 info_buflen,
184 rndis_msg->msg.query_complete.
185 info_buf_offset);
186 break;
187
188 case RNDIS_MSG_SET_C:
189 netdev_dbg(netdev,
190 "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
191 rndis_msg->msg_len,
192 rndis_msg->msg.set_complete.req_id,
193 rndis_msg->msg.set_complete.status);
194 break;
195
196 case RNDIS_MSG_INDICATE:
197 netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
198 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
199 rndis_msg->msg_len,
200 rndis_msg->msg.indicate_status.status,
201 rndis_msg->msg.indicate_status.status_buflen,
202 rndis_msg->msg.indicate_status.status_buf_offset);
203 break;
204
205 default:
206 netdev_dbg(netdev, "0x%x (len %u)\n",
207 rndis_msg->ndis_msg_type,
208 rndis_msg->msg_len);
209 break;
210 }
211 }
212
213 static int rndis_filter_send_request(struct rndis_device *dev,
214 struct rndis_request *req)
215 {
216 int ret;
217 struct hv_netvsc_packet *packet;
218 struct hv_page_buffer page_buf[2];
219 struct hv_page_buffer *pb = page_buf;
220 struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
221
222 /* Setup the packet to send it */
223 packet = &req->pkt;
224
225 packet->total_data_buflen = req->request_msg.msg_len;
226 packet->page_buf_cnt = 1;
227
228 pb[0].pfn = virt_to_phys(&req->request_msg) >>
229 PAGE_SHIFT;
230 pb[0].len = req->request_msg.msg_len;
231 pb[0].offset =
232 (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
233
234 /* Add one page_buf when request_msg crossing page boundary */
235 if (pb[0].offset + pb[0].len > PAGE_SIZE) {
236 packet->page_buf_cnt++;
237 pb[0].len = PAGE_SIZE -
238 pb[0].offset;
239 pb[1].pfn = virt_to_phys((void *)&req->request_msg
240 + pb[0].len) >> PAGE_SHIFT;
241 pb[1].offset = 0;
242 pb[1].len = req->request_msg.msg_len -
243 pb[0].len;
244 }
245
246 ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL);
247 return ret;
248 }
249
250 static void rndis_set_link_state(struct rndis_device *rdev,
251 struct rndis_request *request)
252 {
253 u32 link_status;
254 struct rndis_query_complete *query_complete;
255
256 query_complete = &request->response_msg.msg.query_complete;
257
258 if (query_complete->status == RNDIS_STATUS_SUCCESS &&
259 query_complete->info_buflen == sizeof(u32)) {
260 memcpy(&link_status, (void *)((unsigned long)query_complete +
261 query_complete->info_buf_offset), sizeof(u32));
262 rdev->link_state = link_status != 0;
263 }
264 }
265
266 static void rndis_filter_receive_response(struct rndis_device *dev,
267 struct rndis_message *resp)
268 {
269 struct rndis_request *request = NULL;
270 bool found = false;
271 unsigned long flags;
272 struct net_device *ndev = dev->ndev;
273
274 spin_lock_irqsave(&dev->request_lock, flags);
275 list_for_each_entry(request, &dev->req_list, list_ent) {
276 /*
277 * All request/response message contains RequestId as the 1st
278 * field
279 */
280 if (request->request_msg.msg.init_req.req_id
281 == resp->msg.init_complete.req_id) {
282 found = true;
283 break;
284 }
285 }
286 spin_unlock_irqrestore(&dev->request_lock, flags);
287
288 if (found) {
289 if (resp->msg_len <=
290 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
291 memcpy(&request->response_msg, resp,
292 resp->msg_len);
293 if (request->request_msg.ndis_msg_type ==
294 RNDIS_MSG_QUERY && request->request_msg.msg.
295 query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
296 rndis_set_link_state(dev, request);
297 } else {
298 netdev_err(ndev,
299 "rndis response buffer overflow "
300 "detected (size %u max %zu)\n",
301 resp->msg_len,
302 sizeof(struct rndis_message));
303
304 if (resp->ndis_msg_type ==
305 RNDIS_MSG_RESET_C) {
306 /* does not have a request id field */
307 request->response_msg.msg.reset_complete.
308 status = RNDIS_STATUS_BUFFER_OVERFLOW;
309 } else {
310 request->response_msg.msg.
311 init_complete.status =
312 RNDIS_STATUS_BUFFER_OVERFLOW;
313 }
314 }
315
316 complete(&request->wait_event);
317 } else {
318 netdev_err(ndev,
319 "no rndis request found for this response "
320 "(id 0x%x res type 0x%x)\n",
321 resp->msg.init_complete.req_id,
322 resp->ndis_msg_type);
323 }
324 }
325
326 /*
327 * Get the Per-Packet-Info with the specified type
328 * return NULL if not found.
329 */
330 static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
331 {
332 struct rndis_per_packet_info *ppi;
333 int len;
334
335 if (rpkt->per_pkt_info_offset == 0)
336 return NULL;
337
338 ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
339 rpkt->per_pkt_info_offset);
340 len = rpkt->per_pkt_info_len;
341
342 while (len > 0) {
343 if (ppi->type == type)
344 return (void *)((ulong)ppi + ppi->ppi_offset);
345 len -= ppi->size;
346 ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
347 }
348
349 return NULL;
350 }
351
352 static int rndis_filter_receive_data(struct net_device *ndev,
353 struct rndis_device *dev,
354 struct rndis_message *msg,
355 struct vmbus_channel *channel,
356 void *data, u32 data_buflen)
357 {
358 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
359 const struct ndis_tcp_ip_checksum_info *csum_info;
360 const struct ndis_pkt_8021q_info *vlan;
361 u32 data_offset;
362
363 /* Remove the rndis header and pass it back up the stack */
364 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
365
366 data_buflen -= data_offset;
367
368 /*
369 * Make sure we got a valid RNDIS message, now total_data_buflen
370 * should be the data packet size plus the trailer padding size
371 */
372 if (unlikely(data_buflen < rndis_pkt->data_len)) {
373 netdev_err(dev->ndev, "rndis message buffer "
374 "overflow detected (got %u, min %u)"
375 "...dropping this message!\n",
376 data_buflen, rndis_pkt->data_len);
377 return NVSP_STAT_FAIL;
378 }
379
380 vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
381
382 /*
383 * Remove the rndis trailer padding from rndis packet message
384 * rndis_pkt->data_len tell us the real data length, we only copy
385 * the data packet to the stack, without the rndis trailer padding
386 */
387 data = (void *)((unsigned long)data + data_offset);
388 csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
389 return netvsc_recv_callback(ndev, channel,
390 data, rndis_pkt->data_len,
391 csum_info, vlan);
392 }
393
394 int rndis_filter_receive(struct net_device *ndev,
395 struct netvsc_device *net_dev,
396 struct hv_device *dev,
397 struct vmbus_channel *channel,
398 void *data, u32 buflen)
399 {
400 struct net_device_context *net_device_ctx = netdev_priv(ndev);
401 struct rndis_device *rndis_dev = net_dev->extension;
402 struct rndis_message *rndis_msg = data;
403
404 /* Make sure the rndis device state is initialized */
405 if (unlikely(!rndis_dev)) {
406 netif_err(net_device_ctx, rx_err, ndev,
407 "got rndis message but no rndis device!\n");
408 return NVSP_STAT_FAIL;
409 }
410
411 if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
412 netif_err(net_device_ctx, rx_err, ndev,
413 "got rndis message uninitialized\n");
414 return NVSP_STAT_FAIL;
415 }
416
417 if (netif_msg_rx_status(net_device_ctx))
418 dump_rndis_message(dev, rndis_msg);
419
420 switch (rndis_msg->ndis_msg_type) {
421 case RNDIS_MSG_PACKET:
422 return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
423 channel, data, buflen);
424 case RNDIS_MSG_INIT_C:
425 case RNDIS_MSG_QUERY_C:
426 case RNDIS_MSG_SET_C:
427 /* completion msgs */
428 rndis_filter_receive_response(rndis_dev, rndis_msg);
429 break;
430
431 case RNDIS_MSG_INDICATE:
432 /* notification msgs */
433 netvsc_linkstatus_callback(dev, rndis_msg);
434 break;
435 default:
436 netdev_err(ndev,
437 "unhandled rndis message (type %u len %u)\n",
438 rndis_msg->ndis_msg_type,
439 rndis_msg->msg_len);
440 break;
441 }
442
443 return 0;
444 }
445
446 static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
447 void *result, u32 *result_size)
448 {
449 struct rndis_request *request;
450 u32 inresult_size = *result_size;
451 struct rndis_query_request *query;
452 struct rndis_query_complete *query_complete;
453 int ret = 0;
454
455 if (!result)
456 return -EINVAL;
457
458 *result_size = 0;
459 request = get_rndis_request(dev, RNDIS_MSG_QUERY,
460 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
461 if (!request) {
462 ret = -ENOMEM;
463 goto cleanup;
464 }
465
466 /* Setup the rndis query */
467 query = &request->request_msg.msg.query_req;
468 query->oid = oid;
469 query->info_buf_offset = sizeof(struct rndis_query_request);
470 query->info_buflen = 0;
471 query->dev_vc_handle = 0;
472
473 if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
474 struct net_device_context *ndevctx = netdev_priv(dev->ndev);
475 struct netvsc_device *nvdev = ndevctx->nvdev;
476 struct ndis_offload *hwcaps;
477 u32 nvsp_version = nvdev->nvsp_version;
478 u8 ndis_rev;
479 size_t size;
480
481 if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
482 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
483 size = NDIS_OFFLOAD_SIZE;
484 } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
485 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
486 size = NDIS_OFFLOAD_SIZE_6_1;
487 } else {
488 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
489 size = NDIS_OFFLOAD_SIZE_6_0;
490 }
491
492 request->request_msg.msg_len += size;
493 query->info_buflen = size;
494 hwcaps = (struct ndis_offload *)
495 ((unsigned long)query + query->info_buf_offset);
496
497 hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
498 hwcaps->header.revision = ndis_rev;
499 hwcaps->header.size = size;
500
501 } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
502 struct ndis_recv_scale_cap *cap;
503
504 request->request_msg.msg_len +=
505 sizeof(struct ndis_recv_scale_cap);
506 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
507 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
508 query->info_buf_offset);
509 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
510 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
511 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
512 }
513
514 ret = rndis_filter_send_request(dev, request);
515 if (ret != 0)
516 goto cleanup;
517
518 wait_for_completion(&request->wait_event);
519
520 /* Copy the response back */
521 query_complete = &request->response_msg.msg.query_complete;
522
523 if (query_complete->info_buflen > inresult_size) {
524 ret = -1;
525 goto cleanup;
526 }
527
528 memcpy(result,
529 (void *)((unsigned long)query_complete +
530 query_complete->info_buf_offset),
531 query_complete->info_buflen);
532
533 *result_size = query_complete->info_buflen;
534
535 cleanup:
536 if (request)
537 put_rndis_request(dev, request);
538
539 return ret;
540 }
541
542 /* Get the hardware offload capabilities */
543 static int
544 rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps)
545 {
546 u32 caps_len = sizeof(*caps);
547 int ret;
548
549 memset(caps, 0, sizeof(*caps));
550
551 ret = rndis_filter_query_device(dev,
552 OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
553 caps, &caps_len);
554 if (ret)
555 return ret;
556
557 if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
558 netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
559 caps->header.type);
560 return -EINVAL;
561 }
562
563 if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
564 netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
565 caps->header.revision);
566 return -EINVAL;
567 }
568
569 if (caps->header.size > caps_len ||
570 caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
571 netdev_warn(dev->ndev,
572 "invalid NDIS objsize %u, data size %u\n",
573 caps->header.size, caps_len);
574 return -EINVAL;
575 }
576
577 return 0;
578 }
579
580 static int rndis_filter_query_device_mac(struct rndis_device *dev)
581 {
582 u32 size = ETH_ALEN;
583
584 return rndis_filter_query_device(dev,
585 RNDIS_OID_802_3_PERMANENT_ADDRESS,
586 dev->hw_mac_adr, &size);
587 }
588
589 #define NWADR_STR "NetworkAddress"
590 #define NWADR_STRLEN 14
591
592 int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
593 {
594 struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
595 struct rndis_device *rdev = nvdev->extension;
596 struct rndis_request *request;
597 struct rndis_set_request *set;
598 struct rndis_config_parameter_info *cpi;
599 wchar_t *cfg_nwadr, *cfg_mac;
600 struct rndis_set_complete *set_complete;
601 char macstr[2*ETH_ALEN+1];
602 u32 extlen = sizeof(struct rndis_config_parameter_info) +
603 2*NWADR_STRLEN + 4*ETH_ALEN;
604 int ret;
605
606 request = get_rndis_request(rdev, RNDIS_MSG_SET,
607 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
608 if (!request)
609 return -ENOMEM;
610
611 set = &request->request_msg.msg.set_req;
612 set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
613 set->info_buflen = extlen;
614 set->info_buf_offset = sizeof(struct rndis_set_request);
615 set->dev_vc_handle = 0;
616
617 cpi = (struct rndis_config_parameter_info *)((ulong)set +
618 set->info_buf_offset);
619 cpi->parameter_name_offset =
620 sizeof(struct rndis_config_parameter_info);
621 /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
622 cpi->parameter_name_length = 2*NWADR_STRLEN;
623 cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
624 cpi->parameter_value_offset =
625 cpi->parameter_name_offset + cpi->parameter_name_length;
626 /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
627 cpi->parameter_value_length = 4*ETH_ALEN;
628
629 cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
630 cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
631 ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
632 cfg_nwadr, NWADR_STRLEN);
633 if (ret < 0)
634 goto cleanup;
635 snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
636 ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
637 cfg_mac, 2*ETH_ALEN);
638 if (ret < 0)
639 goto cleanup;
640
641 ret = rndis_filter_send_request(rdev, request);
642 if (ret != 0)
643 goto cleanup;
644
645 wait_for_completion(&request->wait_event);
646
647 set_complete = &request->response_msg.msg.set_complete;
648 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
649 netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
650 set_complete->status);
651 ret = -EINVAL;
652 }
653
654 cleanup:
655 put_rndis_request(rdev, request);
656 return ret;
657 }
658
659 static int
660 rndis_filter_set_offload_params(struct net_device *ndev,
661 struct ndis_offload_params *req_offloads)
662 {
663 struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
664 struct rndis_device *rdev = nvdev->extension;
665 struct rndis_request *request;
666 struct rndis_set_request *set;
667 struct ndis_offload_params *offload_params;
668 struct rndis_set_complete *set_complete;
669 u32 extlen = sizeof(struct ndis_offload_params);
670 int ret;
671 u32 vsp_version = nvdev->nvsp_version;
672
673 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
674 extlen = VERSION_4_OFFLOAD_SIZE;
675 /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
676 * UDP checksum offload.
677 */
678 req_offloads->udp_ip_v4_csum = 0;
679 req_offloads->udp_ip_v6_csum = 0;
680 }
681
682 request = get_rndis_request(rdev, RNDIS_MSG_SET,
683 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
684 if (!request)
685 return -ENOMEM;
686
687 set = &request->request_msg.msg.set_req;
688 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
689 set->info_buflen = extlen;
690 set->info_buf_offset = sizeof(struct rndis_set_request);
691 set->dev_vc_handle = 0;
692
693 offload_params = (struct ndis_offload_params *)((ulong)set +
694 set->info_buf_offset);
695 *offload_params = *req_offloads;
696 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
697 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
698 offload_params->header.size = extlen;
699
700 ret = rndis_filter_send_request(rdev, request);
701 if (ret != 0)
702 goto cleanup;
703
704 wait_for_completion(&request->wait_event);
705 set_complete = &request->response_msg.msg.set_complete;
706 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
707 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
708 set_complete->status);
709 ret = -EINVAL;
710 }
711
712 cleanup:
713 put_rndis_request(rdev, request);
714 return ret;
715 }
716
717 int rndis_filter_set_rss_param(struct rndis_device *rdev,
718 const u8 *rss_key, int num_queue)
719 {
720 struct net_device *ndev = rdev->ndev;
721 struct rndis_request *request;
722 struct rndis_set_request *set;
723 struct rndis_set_complete *set_complete;
724 u32 extlen = sizeof(struct ndis_recv_scale_param) +
725 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
726 struct ndis_recv_scale_param *rssp;
727 u32 *itab;
728 u8 *keyp;
729 int i, ret;
730
731 request = get_rndis_request(
732 rdev, RNDIS_MSG_SET,
733 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
734 if (!request)
735 return -ENOMEM;
736
737 set = &request->request_msg.msg.set_req;
738 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
739 set->info_buflen = extlen;
740 set->info_buf_offset = sizeof(struct rndis_set_request);
741 set->dev_vc_handle = 0;
742
743 rssp = (struct ndis_recv_scale_param *)(set + 1);
744 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
745 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
746 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
747 rssp->flag = 0;
748 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
749 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
750 NDIS_HASH_TCP_IPV6;
751 rssp->indirect_tabsize = 4*ITAB_NUM;
752 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
753 rssp->hashkey_size = NETVSC_HASH_KEYLEN;
754 rssp->kashkey_offset = rssp->indirect_taboffset +
755 rssp->indirect_tabsize;
756
757 /* Set indirection table entries */
758 itab = (u32 *)(rssp + 1);
759 for (i = 0; i < ITAB_NUM; i++)
760 itab[i] = rdev->ind_table[i];
761
762 /* Set hask key values */
763 keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
764 memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
765
766 ret = rndis_filter_send_request(rdev, request);
767 if (ret != 0)
768 goto cleanup;
769
770 wait_for_completion(&request->wait_event);
771 set_complete = &request->response_msg.msg.set_complete;
772 if (set_complete->status == RNDIS_STATUS_SUCCESS)
773 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
774 else {
775 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
776 set_complete->status);
777 ret = -EINVAL;
778 }
779
780 cleanup:
781 put_rndis_request(rdev, request);
782 return ret;
783 }
784
785 static int rndis_filter_query_device_link_status(struct rndis_device *dev)
786 {
787 u32 size = sizeof(u32);
788 u32 link_status;
789 int ret;
790
791 ret = rndis_filter_query_device(dev,
792 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
793 &link_status, &size);
794
795 return ret;
796 }
797
798 static int rndis_filter_query_link_speed(struct rndis_device *dev)
799 {
800 u32 size = sizeof(u32);
801 u32 link_speed;
802 struct net_device_context *ndc;
803 int ret;
804
805 ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED,
806 &link_speed, &size);
807
808 if (!ret) {
809 ndc = netdev_priv(dev->ndev);
810
811 /* The link speed reported from host is in 100bps unit, so
812 * we convert it to Mbps here.
813 */
814 ndc->speed = link_speed / 10000;
815 }
816
817 return ret;
818 }
819
820 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
821 u32 new_filter)
822 {
823 struct rndis_request *request;
824 struct rndis_set_request *set;
825 int ret;
826
827 request = get_rndis_request(dev, RNDIS_MSG_SET,
828 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
829 sizeof(u32));
830 if (!request)
831 return -ENOMEM;
832
833
834 /* Setup the rndis set */
835 set = &request->request_msg.msg.set_req;
836 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
837 set->info_buflen = sizeof(u32);
838 set->info_buf_offset = sizeof(struct rndis_set_request);
839
840 memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
841 &new_filter, sizeof(u32));
842
843 ret = rndis_filter_send_request(dev, request);
844 if (ret == 0)
845 wait_for_completion(&request->wait_event);
846
847 put_rndis_request(dev, request);
848
849 return ret;
850 }
851
852 static void rndis_set_multicast(struct work_struct *w)
853 {
854 struct rndis_device *rdev
855 = container_of(w, struct rndis_device, mcast_work);
856
857 if (rdev->ndev->flags & IFF_PROMISC)
858 rndis_filter_set_packet_filter(rdev,
859 NDIS_PACKET_TYPE_PROMISCUOUS);
860 else
861 rndis_filter_set_packet_filter(rdev,
862 NDIS_PACKET_TYPE_BROADCAST |
863 NDIS_PACKET_TYPE_ALL_MULTICAST |
864 NDIS_PACKET_TYPE_DIRECTED);
865 }
866
867 void rndis_filter_update(struct netvsc_device *nvdev)
868 {
869 struct rndis_device *rdev = nvdev->extension;
870
871 schedule_work(&rdev->mcast_work);
872 }
873
874 static int rndis_filter_init_device(struct rndis_device *dev)
875 {
876 struct rndis_request *request;
877 struct rndis_initialize_request *init;
878 struct rndis_initialize_complete *init_complete;
879 u32 status;
880 int ret;
881 struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev);
882
883 request = get_rndis_request(dev, RNDIS_MSG_INIT,
884 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
885 if (!request) {
886 ret = -ENOMEM;
887 goto cleanup;
888 }
889
890 /* Setup the rndis set */
891 init = &request->request_msg.msg.init_req;
892 init->major_ver = RNDIS_MAJOR_VERSION;
893 init->minor_ver = RNDIS_MINOR_VERSION;
894 init->max_xfer_size = 0x4000;
895
896 dev->state = RNDIS_DEV_INITIALIZING;
897
898 ret = rndis_filter_send_request(dev, request);
899 if (ret != 0) {
900 dev->state = RNDIS_DEV_UNINITIALIZED;
901 goto cleanup;
902 }
903
904 wait_for_completion(&request->wait_event);
905
906 init_complete = &request->response_msg.msg.init_complete;
907 status = init_complete->status;
908 if (status == RNDIS_STATUS_SUCCESS) {
909 dev->state = RNDIS_DEV_INITIALIZED;
910 nvdev->max_pkt = init_complete->max_pkt_per_msg;
911 nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
912 ret = 0;
913 } else {
914 dev->state = RNDIS_DEV_UNINITIALIZED;
915 ret = -EINVAL;
916 }
917
918 cleanup:
919 if (request)
920 put_rndis_request(dev, request);
921
922 return ret;
923 }
924
925 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
926 {
927 int i;
928
929 if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
930 return false;
931
932 for (i = 0; i < nvdev->num_chn; i++) {
933 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
934
935 if (atomic_read(&nvchan->queue_sends) > 0)
936 return false;
937 }
938
939 return true;
940 }
941
942 static void rndis_filter_halt_device(struct rndis_device *dev)
943 {
944 struct rndis_request *request;
945 struct rndis_halt_request *halt;
946 struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
947 struct netvsc_device *nvdev = net_device_ctx->nvdev;
948
949 /* Attempt to do a rndis device halt */
950 request = get_rndis_request(dev, RNDIS_MSG_HALT,
951 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
952 if (!request)
953 goto cleanup;
954
955 /* Setup the rndis set */
956 halt = &request->request_msg.msg.halt_req;
957 halt->req_id = atomic_inc_return(&dev->new_req_id);
958
959 /* Ignore return since this msg is optional. */
960 rndis_filter_send_request(dev, request);
961
962 dev->state = RNDIS_DEV_UNINITIALIZED;
963
964 cleanup:
965 nvdev->destroy = true;
966
967 /* Force flag to be ordered before waiting */
968 wmb();
969
970 /* Wait for all send completions */
971 wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
972
973 if (request)
974 put_rndis_request(dev, request);
975 }
976
977 static int rndis_filter_open_device(struct rndis_device *dev)
978 {
979 int ret;
980
981 if (dev->state != RNDIS_DEV_INITIALIZED)
982 return 0;
983
984 ret = rndis_filter_set_packet_filter(dev,
985 NDIS_PACKET_TYPE_BROADCAST |
986 NDIS_PACKET_TYPE_ALL_MULTICAST |
987 NDIS_PACKET_TYPE_DIRECTED);
988 if (ret == 0)
989 dev->state = RNDIS_DEV_DATAINITIALIZED;
990
991 return ret;
992 }
993
994 static int rndis_filter_close_device(struct rndis_device *dev)
995 {
996 int ret;
997
998 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
999 return 0;
1000
1001 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1002 cancel_work_sync(&dev->mcast_work);
1003
1004 ret = rndis_filter_set_packet_filter(dev, 0);
1005 if (ret == -ENODEV)
1006 ret = 0;
1007
1008 if (ret == 0)
1009 dev->state = RNDIS_DEV_INITIALIZED;
1010
1011 return ret;
1012 }
1013
1014 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1015 {
1016 struct net_device *ndev =
1017 hv_get_drvdata(new_sc->primary_channel->device_obj);
1018 struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
1019 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1020 struct netvsc_channel *nvchan;
1021 int ret;
1022
1023 if (chn_index >= nvscdev->num_chn)
1024 return;
1025
1026 nvchan = nvscdev->chan_table + chn_index;
1027 nvchan->mrc.buf
1028 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
1029
1030 if (!nvchan->mrc.buf)
1031 return;
1032
1033 /* Because the device uses NAPI, all the interrupt batching and
1034 * control is done via Net softirq, not the channel handling
1035 */
1036 set_channel_read_mode(new_sc, HV_CALL_ISR);
1037
1038 /* Set the channel before opening.*/
1039 nvchan->channel = new_sc;
1040 netif_napi_add(ndev, &nvchan->napi,
1041 netvsc_poll, NAPI_POLL_WEIGHT);
1042
1043 ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
1044 nvscdev->ring_size * PAGE_SIZE, NULL, 0,
1045 netvsc_channel_cb, nvchan);
1046 if (ret == 0)
1047 napi_enable(&nvchan->napi);
1048 else
1049 netif_napi_del(&nvchan->napi);
1050
1051 atomic_inc(&nvscdev->open_chn);
1052 wake_up(&nvscdev->subchan_open);
1053 }
1054
1055 int rndis_filter_device_add(struct hv_device *dev,
1056 struct netvsc_device_info *device_info)
1057 {
1058 struct net_device *net = hv_get_drvdata(dev);
1059 struct net_device_context *net_device_ctx = netdev_priv(net);
1060 struct netvsc_device *net_device;
1061 struct rndis_device *rndis_device;
1062 struct ndis_offload hwcaps;
1063 struct ndis_offload_params offloads;
1064 struct nvsp_message *init_packet;
1065 struct ndis_recv_scale_cap rsscap;
1066 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1067 unsigned int gso_max_size = GSO_MAX_SIZE;
1068 u32 mtu, size, num_rss_qs;
1069 const struct cpumask *node_cpu_mask;
1070 u32 num_possible_rss_qs;
1071 int i, ret;
1072
1073 rndis_device = get_rndis_device();
1074 if (!rndis_device)
1075 return -ENODEV;
1076
1077 /*
1078 * Let the inner driver handle this first to create the netvsc channel
1079 * NOTE! Once the channel is created, we may get a receive callback
1080 * (RndisFilterOnReceive()) before this call is completed
1081 */
1082 ret = netvsc_device_add(dev, device_info);
1083 if (ret != 0) {
1084 kfree(rndis_device);
1085 return ret;
1086 }
1087
1088 /* Initialize the rndis device */
1089 net_device = net_device_ctx->nvdev;
1090 net_device->max_chn = 1;
1091 net_device->num_chn = 1;
1092
1093 net_device->extension = rndis_device;
1094 rndis_device->ndev = net;
1095
1096 /* Send the rndis initialization message */
1097 ret = rndis_filter_init_device(rndis_device);
1098 if (ret != 0) {
1099 rndis_filter_device_remove(dev, net_device);
1100 return ret;
1101 }
1102
1103 /* Get the MTU from the host */
1104 size = sizeof(u32);
1105 ret = rndis_filter_query_device(rndis_device,
1106 RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1107 &mtu, &size);
1108 if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1109 net->mtu = mtu;
1110
1111 /* Get the mac address */
1112 ret = rndis_filter_query_device_mac(rndis_device);
1113 if (ret != 0) {
1114 rndis_filter_device_remove(dev, net_device);
1115 return ret;
1116 }
1117
1118 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1119
1120 /* Find HW offload capabilities */
1121 ret = rndis_query_hwcaps(rndis_device, &hwcaps);
1122 if (ret != 0) {
1123 rndis_filter_device_remove(dev, net_device);
1124 return ret;
1125 }
1126
1127 /* A value of zero means "no change"; now turn on what we want. */
1128 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1129
1130 /* Linux does not care about IP checksum, always does in kernel */
1131 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1132
1133 /* Compute tx offload settings based on hw capabilities */
1134 net->hw_features = NETIF_F_RXCSUM;
1135
1136 if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1137 /* Can checksum TCP */
1138 net->hw_features |= NETIF_F_IP_CSUM;
1139 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1140
1141 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1142
1143 if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1144 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1145 net->hw_features |= NETIF_F_TSO;
1146
1147 if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1148 gso_max_size = hwcaps.lsov2.ip4_maxsz;
1149 }
1150
1151 if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1152 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1153 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1154 }
1155 }
1156
1157 if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1158 net->hw_features |= NETIF_F_IPV6_CSUM;
1159
1160 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1161 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1162
1163 if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1164 (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1165 offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1166 net->hw_features |= NETIF_F_TSO6;
1167
1168 if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1169 gso_max_size = hwcaps.lsov2.ip6_maxsz;
1170 }
1171
1172 if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1173 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1174 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1175 }
1176 }
1177
1178 netif_set_gso_max_size(net, gso_max_size);
1179
1180 ret = rndis_filter_set_offload_params(net, &offloads);
1181 if (ret)
1182 goto err_dev_remv;
1183
1184 rndis_filter_query_device_link_status(rndis_device);
1185
1186 netdev_dbg(net, "Device MAC %pM link state %s\n",
1187 rndis_device->hw_mac_adr,
1188 rndis_device->link_state ? "down" : "up");
1189
1190 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1191 return 0;
1192
1193 rndis_filter_query_link_speed(rndis_device);
1194
1195 /* vRSS setup */
1196 memset(&rsscap, 0, rsscap_size);
1197 ret = rndis_filter_query_device(rndis_device,
1198 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1199 &rsscap, &rsscap_size);
1200 if (ret || rsscap.num_recv_que < 2)
1201 goto out;
1202
1203 /*
1204 * We will limit the VRSS channels to the number CPUs in the NUMA node
1205 * the primary channel is currently bound to.
1206 *
1207 * This also guarantees that num_possible_rss_qs <= num_online_cpus
1208 */
1209 node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
1210 num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
1211 rsscap.num_recv_que);
1212
1213 net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1214
1215 /* We will use the given number of channels if available. */
1216 net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1217
1218 for (i = 0; i < ITAB_NUM; i++)
1219 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
1220 net_device->num_chn);
1221
1222 atomic_set(&net_device->open_chn, 1);
1223 num_rss_qs = net_device->num_chn - 1;
1224 if (num_rss_qs == 0)
1225 return 0;
1226
1227 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1228
1229 init_packet = &net_device->channel_init_pkt;
1230 memset(init_packet, 0, sizeof(struct nvsp_message));
1231 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1232 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1233 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1234 net_device->num_chn - 1;
1235 ret = vmbus_sendpacket(dev->channel, init_packet,
1236 sizeof(struct nvsp_message),
1237 (unsigned long)init_packet,
1238 VM_PKT_DATA_INBAND,
1239 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1240 if (ret)
1241 goto out;
1242
1243 wait_for_completion(&net_device->channel_init_wait);
1244 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1245 ret = -ENODEV;
1246 goto out;
1247 }
1248
1249 net_device->num_chn = 1 +
1250 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1251
1252 /* wait for all sub channels to open */
1253 wait_event(net_device->subchan_open,
1254 atomic_read(&net_device->open_chn) == net_device->num_chn);
1255
1256 /* ignore failues from setting rss parameters, still have channels */
1257 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
1258 net_device->num_chn);
1259 out:
1260 if (ret) {
1261 net_device->max_chn = 1;
1262 net_device->num_chn = 1;
1263 }
1264
1265 return 0; /* return 0 because primary channel can be used alone */
1266
1267 err_dev_remv:
1268 rndis_filter_device_remove(dev, net_device);
1269 return ret;
1270 }
1271
1272 void rndis_filter_device_remove(struct hv_device *dev,
1273 struct netvsc_device *net_dev)
1274 {
1275 struct rndis_device *rndis_dev = net_dev->extension;
1276
1277 /* Halt and release the rndis device */
1278 rndis_filter_halt_device(rndis_dev);
1279
1280 kfree(rndis_dev);
1281 net_dev->extension = NULL;
1282
1283 netvsc_device_remove(dev);
1284 }
1285
1286 int rndis_filter_open(struct netvsc_device *nvdev)
1287 {
1288 if (!nvdev)
1289 return -EINVAL;
1290
1291 if (atomic_inc_return(&nvdev->open_cnt) != 1)
1292 return 0;
1293
1294 return rndis_filter_open_device(nvdev->extension);
1295 }
1296
1297 int rndis_filter_close(struct netvsc_device *nvdev)
1298 {
1299 if (!nvdev)
1300 return -EINVAL;
1301
1302 if (atomic_dec_return(&nvdev->open_cnt) != 0)
1303 return 0;
1304
1305 return rndis_filter_close_device(nvdev->extension);
1306 }