]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/hyperv/rndis_filter.c
netvsc: add rtnl annotations in rndis
[mirror_ubuntu-bionic-kernel.git] / drivers / net / hyperv / rndis_filter.c
1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Authors:
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
19 */
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/wait.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/io.h>
26 #include <linux/if_ether.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_vlan.h>
29 #include <linux/nls.h>
30 #include <linux/vmalloc.h>
31
32 #include "hyperv_net.h"
33
34 static void rndis_set_multicast(struct work_struct *w);
35
36 #define RNDIS_EXT_LEN PAGE_SIZE
37 struct rndis_request {
38 struct list_head list_ent;
39 struct completion wait_event;
40
41 struct rndis_message response_msg;
42 /*
43 * The buffer for extended info after the RNDIS response message. It's
44 * referenced based on the data offset in the RNDIS message. Its size
45 * is enough for current needs, and should be sufficient for the near
46 * future.
47 */
48 u8 response_ext[RNDIS_EXT_LEN];
49
50 /* Simplify allocation by having a netvsc packet inline */
51 struct hv_netvsc_packet pkt;
52
53 struct rndis_message request_msg;
54 /*
55 * The buffer for the extended info after the RNDIS request message.
56 * It is referenced and sized in a similar way as response_ext.
57 */
58 u8 request_ext[RNDIS_EXT_LEN];
59 };
60
61 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
62 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
63 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
64 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
65 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
66 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
67 };
68
69 static struct rndis_device *get_rndis_device(void)
70 {
71 struct rndis_device *device;
72
73 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
74 if (!device)
75 return NULL;
76
77 spin_lock_init(&device->request_lock);
78
79 INIT_LIST_HEAD(&device->req_list);
80 INIT_WORK(&device->mcast_work, rndis_set_multicast);
81
82 device->state = RNDIS_DEV_UNINITIALIZED;
83
84 return device;
85 }
86
87 static struct netvsc_device *
88 net_device_to_netvsc_device(struct net_device *ndev)
89 {
90 struct net_device_context *net_device_ctx = netdev_priv(ndev);
91
92 return rtnl_dereference(net_device_ctx->nvdev);
93 }
94
95 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
96 u32 msg_type,
97 u32 msg_len)
98 {
99 struct rndis_request *request;
100 struct rndis_message *rndis_msg;
101 struct rndis_set_request *set;
102 unsigned long flags;
103
104 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
105 if (!request)
106 return NULL;
107
108 init_completion(&request->wait_event);
109
110 rndis_msg = &request->request_msg;
111 rndis_msg->ndis_msg_type = msg_type;
112 rndis_msg->msg_len = msg_len;
113
114 request->pkt.q_idx = 0;
115
116 /*
117 * Set the request id. This field is always after the rndis header for
118 * request/response packet types so we just used the SetRequest as a
119 * template
120 */
121 set = &rndis_msg->msg.set_req;
122 set->req_id = atomic_inc_return(&dev->new_req_id);
123
124 /* Add to the request list */
125 spin_lock_irqsave(&dev->request_lock, flags);
126 list_add_tail(&request->list_ent, &dev->req_list);
127 spin_unlock_irqrestore(&dev->request_lock, flags);
128
129 return request;
130 }
131
132 static void put_rndis_request(struct rndis_device *dev,
133 struct rndis_request *req)
134 {
135 unsigned long flags;
136
137 spin_lock_irqsave(&dev->request_lock, flags);
138 list_del(&req->list_ent);
139 spin_unlock_irqrestore(&dev->request_lock, flags);
140
141 kfree(req);
142 }
143
144 static void dump_rndis_message(struct hv_device *hv_dev,
145 const struct rndis_message *rndis_msg)
146 {
147 struct net_device *netdev = hv_get_drvdata(hv_dev);
148
149 switch (rndis_msg->ndis_msg_type) {
150 case RNDIS_MSG_PACKET:
151 netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
152 "data offset %u data len %u, # oob %u, "
153 "oob offset %u, oob len %u, pkt offset %u, "
154 "pkt len %u\n",
155 rndis_msg->msg_len,
156 rndis_msg->msg.pkt.data_offset,
157 rndis_msg->msg.pkt.data_len,
158 rndis_msg->msg.pkt.num_oob_data_elements,
159 rndis_msg->msg.pkt.oob_data_offset,
160 rndis_msg->msg.pkt.oob_data_len,
161 rndis_msg->msg.pkt.per_pkt_info_offset,
162 rndis_msg->msg.pkt.per_pkt_info_len);
163 break;
164
165 case RNDIS_MSG_INIT_C:
166 netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
167 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
168 "device flags %d, max xfer size 0x%x, max pkts %u, "
169 "pkt aligned %u)\n",
170 rndis_msg->msg_len,
171 rndis_msg->msg.init_complete.req_id,
172 rndis_msg->msg.init_complete.status,
173 rndis_msg->msg.init_complete.major_ver,
174 rndis_msg->msg.init_complete.minor_ver,
175 rndis_msg->msg.init_complete.dev_flags,
176 rndis_msg->msg.init_complete.max_xfer_size,
177 rndis_msg->msg.init_complete.
178 max_pkt_per_msg,
179 rndis_msg->msg.init_complete.
180 pkt_alignment_factor);
181 break;
182
183 case RNDIS_MSG_QUERY_C:
184 netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
185 "(len %u, id 0x%x, status 0x%x, buf len %u, "
186 "buf offset %u)\n",
187 rndis_msg->msg_len,
188 rndis_msg->msg.query_complete.req_id,
189 rndis_msg->msg.query_complete.status,
190 rndis_msg->msg.query_complete.
191 info_buflen,
192 rndis_msg->msg.query_complete.
193 info_buf_offset);
194 break;
195
196 case RNDIS_MSG_SET_C:
197 netdev_dbg(netdev,
198 "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
199 rndis_msg->msg_len,
200 rndis_msg->msg.set_complete.req_id,
201 rndis_msg->msg.set_complete.status);
202 break;
203
204 case RNDIS_MSG_INDICATE:
205 netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
206 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
207 rndis_msg->msg_len,
208 rndis_msg->msg.indicate_status.status,
209 rndis_msg->msg.indicate_status.status_buflen,
210 rndis_msg->msg.indicate_status.status_buf_offset);
211 break;
212
213 default:
214 netdev_dbg(netdev, "0x%x (len %u)\n",
215 rndis_msg->ndis_msg_type,
216 rndis_msg->msg_len);
217 break;
218 }
219 }
220
221 static int rndis_filter_send_request(struct rndis_device *dev,
222 struct rndis_request *req)
223 {
224 int ret;
225 struct hv_netvsc_packet *packet;
226 struct hv_page_buffer page_buf[2];
227 struct hv_page_buffer *pb = page_buf;
228 struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
229
230 /* Setup the packet to send it */
231 packet = &req->pkt;
232
233 packet->total_data_buflen = req->request_msg.msg_len;
234 packet->page_buf_cnt = 1;
235
236 pb[0].pfn = virt_to_phys(&req->request_msg) >>
237 PAGE_SHIFT;
238 pb[0].len = req->request_msg.msg_len;
239 pb[0].offset =
240 (unsigned long)&req->request_msg & (PAGE_SIZE - 1);
241
242 /* Add one page_buf when request_msg crossing page boundary */
243 if (pb[0].offset + pb[0].len > PAGE_SIZE) {
244 packet->page_buf_cnt++;
245 pb[0].len = PAGE_SIZE -
246 pb[0].offset;
247 pb[1].pfn = virt_to_phys((void *)&req->request_msg
248 + pb[0].len) >> PAGE_SHIFT;
249 pb[1].offset = 0;
250 pb[1].len = req->request_msg.msg_len -
251 pb[0].len;
252 }
253
254 ret = netvsc_send(net_device_ctx, packet, NULL, &pb, NULL);
255 return ret;
256 }
257
258 static void rndis_set_link_state(struct rndis_device *rdev,
259 struct rndis_request *request)
260 {
261 u32 link_status;
262 struct rndis_query_complete *query_complete;
263
264 query_complete = &request->response_msg.msg.query_complete;
265
266 if (query_complete->status == RNDIS_STATUS_SUCCESS &&
267 query_complete->info_buflen == sizeof(u32)) {
268 memcpy(&link_status, (void *)((unsigned long)query_complete +
269 query_complete->info_buf_offset), sizeof(u32));
270 rdev->link_state = link_status != 0;
271 }
272 }
273
274 static void rndis_filter_receive_response(struct rndis_device *dev,
275 struct rndis_message *resp)
276 {
277 struct rndis_request *request = NULL;
278 bool found = false;
279 unsigned long flags;
280 struct net_device *ndev = dev->ndev;
281
282 spin_lock_irqsave(&dev->request_lock, flags);
283 list_for_each_entry(request, &dev->req_list, list_ent) {
284 /*
285 * All request/response message contains RequestId as the 1st
286 * field
287 */
288 if (request->request_msg.msg.init_req.req_id
289 == resp->msg.init_complete.req_id) {
290 found = true;
291 break;
292 }
293 }
294 spin_unlock_irqrestore(&dev->request_lock, flags);
295
296 if (found) {
297 if (resp->msg_len <=
298 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
299 memcpy(&request->response_msg, resp,
300 resp->msg_len);
301 if (request->request_msg.ndis_msg_type ==
302 RNDIS_MSG_QUERY && request->request_msg.msg.
303 query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
304 rndis_set_link_state(dev, request);
305 } else {
306 netdev_err(ndev,
307 "rndis response buffer overflow "
308 "detected (size %u max %zu)\n",
309 resp->msg_len,
310 sizeof(struct rndis_message));
311
312 if (resp->ndis_msg_type ==
313 RNDIS_MSG_RESET_C) {
314 /* does not have a request id field */
315 request->response_msg.msg.reset_complete.
316 status = RNDIS_STATUS_BUFFER_OVERFLOW;
317 } else {
318 request->response_msg.msg.
319 init_complete.status =
320 RNDIS_STATUS_BUFFER_OVERFLOW;
321 }
322 }
323
324 complete(&request->wait_event);
325 } else {
326 netdev_err(ndev,
327 "no rndis request found for this response "
328 "(id 0x%x res type 0x%x)\n",
329 resp->msg.init_complete.req_id,
330 resp->ndis_msg_type);
331 }
332 }
333
334 /*
335 * Get the Per-Packet-Info with the specified type
336 * return NULL if not found.
337 */
338 static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
339 {
340 struct rndis_per_packet_info *ppi;
341 int len;
342
343 if (rpkt->per_pkt_info_offset == 0)
344 return NULL;
345
346 ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
347 rpkt->per_pkt_info_offset);
348 len = rpkt->per_pkt_info_len;
349
350 while (len > 0) {
351 if (ppi->type == type)
352 return (void *)((ulong)ppi + ppi->ppi_offset);
353 len -= ppi->size;
354 ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
355 }
356
357 return NULL;
358 }
359
360 static int rndis_filter_receive_data(struct net_device *ndev,
361 struct rndis_device *dev,
362 struct rndis_message *msg,
363 struct vmbus_channel *channel,
364 void *data, u32 data_buflen)
365 {
366 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
367 const struct ndis_tcp_ip_checksum_info *csum_info;
368 const struct ndis_pkt_8021q_info *vlan;
369 u32 data_offset;
370
371 /* Remove the rndis header and pass it back up the stack */
372 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
373
374 data_buflen -= data_offset;
375
376 /*
377 * Make sure we got a valid RNDIS message, now total_data_buflen
378 * should be the data packet size plus the trailer padding size
379 */
380 if (unlikely(data_buflen < rndis_pkt->data_len)) {
381 netdev_err(dev->ndev, "rndis message buffer "
382 "overflow detected (got %u, min %u)"
383 "...dropping this message!\n",
384 data_buflen, rndis_pkt->data_len);
385 return NVSP_STAT_FAIL;
386 }
387
388 vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
389
390 /*
391 * Remove the rndis trailer padding from rndis packet message
392 * rndis_pkt->data_len tell us the real data length, we only copy
393 * the data packet to the stack, without the rndis trailer padding
394 */
395 data = (void *)((unsigned long)data + data_offset);
396 csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
397 return netvsc_recv_callback(ndev, channel,
398 data, rndis_pkt->data_len,
399 csum_info, vlan);
400 }
401
402 int rndis_filter_receive(struct net_device *ndev,
403 struct netvsc_device *net_dev,
404 struct hv_device *dev,
405 struct vmbus_channel *channel,
406 void *data, u32 buflen)
407 {
408 struct net_device_context *net_device_ctx = netdev_priv(ndev);
409 struct rndis_device *rndis_dev = net_dev->extension;
410 struct rndis_message *rndis_msg = data;
411
412 /* Make sure the rndis device state is initialized */
413 if (unlikely(!rndis_dev)) {
414 netif_err(net_device_ctx, rx_err, ndev,
415 "got rndis message but no rndis device!\n");
416 return NVSP_STAT_FAIL;
417 }
418
419 if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
420 netif_err(net_device_ctx, rx_err, ndev,
421 "got rndis message uninitialized\n");
422 return NVSP_STAT_FAIL;
423 }
424
425 if (netif_msg_rx_status(net_device_ctx))
426 dump_rndis_message(dev, rndis_msg);
427
428 switch (rndis_msg->ndis_msg_type) {
429 case RNDIS_MSG_PACKET:
430 return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
431 channel, data, buflen);
432 case RNDIS_MSG_INIT_C:
433 case RNDIS_MSG_QUERY_C:
434 case RNDIS_MSG_SET_C:
435 /* completion msgs */
436 rndis_filter_receive_response(rndis_dev, rndis_msg);
437 break;
438
439 case RNDIS_MSG_INDICATE:
440 /* notification msgs */
441 netvsc_linkstatus_callback(dev, rndis_msg);
442 break;
443 default:
444 netdev_err(ndev,
445 "unhandled rndis message (type %u len %u)\n",
446 rndis_msg->ndis_msg_type,
447 rndis_msg->msg_len);
448 break;
449 }
450
451 return 0;
452 }
453
454 static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
455 void *result, u32 *result_size)
456 {
457 struct rndis_request *request;
458 u32 inresult_size = *result_size;
459 struct rndis_query_request *query;
460 struct rndis_query_complete *query_complete;
461 int ret = 0;
462
463 if (!result)
464 return -EINVAL;
465
466 *result_size = 0;
467 request = get_rndis_request(dev, RNDIS_MSG_QUERY,
468 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
469 if (!request) {
470 ret = -ENOMEM;
471 goto cleanup;
472 }
473
474 /* Setup the rndis query */
475 query = &request->request_msg.msg.query_req;
476 query->oid = oid;
477 query->info_buf_offset = sizeof(struct rndis_query_request);
478 query->info_buflen = 0;
479 query->dev_vc_handle = 0;
480
481 if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
482 struct net_device_context *ndevctx = netdev_priv(dev->ndev);
483 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
484 struct ndis_offload *hwcaps;
485 u32 nvsp_version = nvdev->nvsp_version;
486 u8 ndis_rev;
487 size_t size;
488
489 if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
490 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
491 size = NDIS_OFFLOAD_SIZE;
492 } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
493 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
494 size = NDIS_OFFLOAD_SIZE_6_1;
495 } else {
496 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
497 size = NDIS_OFFLOAD_SIZE_6_0;
498 }
499
500 request->request_msg.msg_len += size;
501 query->info_buflen = size;
502 hwcaps = (struct ndis_offload *)
503 ((unsigned long)query + query->info_buf_offset);
504
505 hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
506 hwcaps->header.revision = ndis_rev;
507 hwcaps->header.size = size;
508
509 } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
510 struct ndis_recv_scale_cap *cap;
511
512 request->request_msg.msg_len +=
513 sizeof(struct ndis_recv_scale_cap);
514 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
515 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
516 query->info_buf_offset);
517 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
518 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
519 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
520 }
521
522 ret = rndis_filter_send_request(dev, request);
523 if (ret != 0)
524 goto cleanup;
525
526 wait_for_completion(&request->wait_event);
527
528 /* Copy the response back */
529 query_complete = &request->response_msg.msg.query_complete;
530
531 if (query_complete->info_buflen > inresult_size) {
532 ret = -1;
533 goto cleanup;
534 }
535
536 memcpy(result,
537 (void *)((unsigned long)query_complete +
538 query_complete->info_buf_offset),
539 query_complete->info_buflen);
540
541 *result_size = query_complete->info_buflen;
542
543 cleanup:
544 if (request)
545 put_rndis_request(dev, request);
546
547 return ret;
548 }
549
550 /* Get the hardware offload capabilities */
551 static int
552 rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps)
553 {
554 u32 caps_len = sizeof(*caps);
555 int ret;
556
557 memset(caps, 0, sizeof(*caps));
558
559 ret = rndis_filter_query_device(dev,
560 OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
561 caps, &caps_len);
562 if (ret)
563 return ret;
564
565 if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
566 netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
567 caps->header.type);
568 return -EINVAL;
569 }
570
571 if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
572 netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
573 caps->header.revision);
574 return -EINVAL;
575 }
576
577 if (caps->header.size > caps_len ||
578 caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
579 netdev_warn(dev->ndev,
580 "invalid NDIS objsize %u, data size %u\n",
581 caps->header.size, caps_len);
582 return -EINVAL;
583 }
584
585 return 0;
586 }
587
588 static int rndis_filter_query_device_mac(struct rndis_device *dev)
589 {
590 u32 size = ETH_ALEN;
591
592 return rndis_filter_query_device(dev,
593 RNDIS_OID_802_3_PERMANENT_ADDRESS,
594 dev->hw_mac_adr, &size);
595 }
596
597 #define NWADR_STR "NetworkAddress"
598 #define NWADR_STRLEN 14
599
600 int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
601 {
602 struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
603 struct rndis_device *rdev = nvdev->extension;
604 struct rndis_request *request;
605 struct rndis_set_request *set;
606 struct rndis_config_parameter_info *cpi;
607 wchar_t *cfg_nwadr, *cfg_mac;
608 struct rndis_set_complete *set_complete;
609 char macstr[2*ETH_ALEN+1];
610 u32 extlen = sizeof(struct rndis_config_parameter_info) +
611 2*NWADR_STRLEN + 4*ETH_ALEN;
612 int ret;
613
614 request = get_rndis_request(rdev, RNDIS_MSG_SET,
615 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
616 if (!request)
617 return -ENOMEM;
618
619 set = &request->request_msg.msg.set_req;
620 set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
621 set->info_buflen = extlen;
622 set->info_buf_offset = sizeof(struct rndis_set_request);
623 set->dev_vc_handle = 0;
624
625 cpi = (struct rndis_config_parameter_info *)((ulong)set +
626 set->info_buf_offset);
627 cpi->parameter_name_offset =
628 sizeof(struct rndis_config_parameter_info);
629 /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
630 cpi->parameter_name_length = 2*NWADR_STRLEN;
631 cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
632 cpi->parameter_value_offset =
633 cpi->parameter_name_offset + cpi->parameter_name_length;
634 /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
635 cpi->parameter_value_length = 4*ETH_ALEN;
636
637 cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
638 cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
639 ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
640 cfg_nwadr, NWADR_STRLEN);
641 if (ret < 0)
642 goto cleanup;
643 snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
644 ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
645 cfg_mac, 2*ETH_ALEN);
646 if (ret < 0)
647 goto cleanup;
648
649 ret = rndis_filter_send_request(rdev, request);
650 if (ret != 0)
651 goto cleanup;
652
653 wait_for_completion(&request->wait_event);
654
655 set_complete = &request->response_msg.msg.set_complete;
656 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
657 netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
658 set_complete->status);
659 ret = -EINVAL;
660 }
661
662 cleanup:
663 put_rndis_request(rdev, request);
664 return ret;
665 }
666
667 static int
668 rndis_filter_set_offload_params(struct net_device *ndev,
669 struct netvsc_device *nvdev,
670 struct ndis_offload_params *req_offloads)
671 {
672 struct rndis_device *rdev = nvdev->extension;
673 struct rndis_request *request;
674 struct rndis_set_request *set;
675 struct ndis_offload_params *offload_params;
676 struct rndis_set_complete *set_complete;
677 u32 extlen = sizeof(struct ndis_offload_params);
678 int ret;
679 u32 vsp_version = nvdev->nvsp_version;
680
681 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
682 extlen = VERSION_4_OFFLOAD_SIZE;
683 /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
684 * UDP checksum offload.
685 */
686 req_offloads->udp_ip_v4_csum = 0;
687 req_offloads->udp_ip_v6_csum = 0;
688 }
689
690 request = get_rndis_request(rdev, RNDIS_MSG_SET,
691 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
692 if (!request)
693 return -ENOMEM;
694
695 set = &request->request_msg.msg.set_req;
696 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
697 set->info_buflen = extlen;
698 set->info_buf_offset = sizeof(struct rndis_set_request);
699 set->dev_vc_handle = 0;
700
701 offload_params = (struct ndis_offload_params *)((ulong)set +
702 set->info_buf_offset);
703 *offload_params = *req_offloads;
704 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
705 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
706 offload_params->header.size = extlen;
707
708 ret = rndis_filter_send_request(rdev, request);
709 if (ret != 0)
710 goto cleanup;
711
712 wait_for_completion(&request->wait_event);
713 set_complete = &request->response_msg.msg.set_complete;
714 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
715 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
716 set_complete->status);
717 ret = -EINVAL;
718 }
719
720 cleanup:
721 put_rndis_request(rdev, request);
722 return ret;
723 }
724
725 int rndis_filter_set_rss_param(struct rndis_device *rdev,
726 const u8 *rss_key, int num_queue)
727 {
728 struct net_device *ndev = rdev->ndev;
729 struct rndis_request *request;
730 struct rndis_set_request *set;
731 struct rndis_set_complete *set_complete;
732 u32 extlen = sizeof(struct ndis_recv_scale_param) +
733 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
734 struct ndis_recv_scale_param *rssp;
735 u32 *itab;
736 u8 *keyp;
737 int i, ret;
738
739 request = get_rndis_request(
740 rdev, RNDIS_MSG_SET,
741 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
742 if (!request)
743 return -ENOMEM;
744
745 set = &request->request_msg.msg.set_req;
746 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
747 set->info_buflen = extlen;
748 set->info_buf_offset = sizeof(struct rndis_set_request);
749 set->dev_vc_handle = 0;
750
751 rssp = (struct ndis_recv_scale_param *)(set + 1);
752 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
753 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
754 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
755 rssp->flag = 0;
756 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
757 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
758 NDIS_HASH_TCP_IPV6;
759 rssp->indirect_tabsize = 4*ITAB_NUM;
760 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
761 rssp->hashkey_size = NETVSC_HASH_KEYLEN;
762 rssp->kashkey_offset = rssp->indirect_taboffset +
763 rssp->indirect_tabsize;
764
765 /* Set indirection table entries */
766 itab = (u32 *)(rssp + 1);
767 for (i = 0; i < ITAB_NUM; i++)
768 itab[i] = rdev->ind_table[i];
769
770 /* Set hask key values */
771 keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
772 memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
773
774 ret = rndis_filter_send_request(rdev, request);
775 if (ret != 0)
776 goto cleanup;
777
778 wait_for_completion(&request->wait_event);
779 set_complete = &request->response_msg.msg.set_complete;
780 if (set_complete->status == RNDIS_STATUS_SUCCESS)
781 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
782 else {
783 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
784 set_complete->status);
785 ret = -EINVAL;
786 }
787
788 cleanup:
789 put_rndis_request(rdev, request);
790 return ret;
791 }
792
793 static int rndis_filter_query_device_link_status(struct rndis_device *dev)
794 {
795 u32 size = sizeof(u32);
796 u32 link_status;
797 int ret;
798
799 ret = rndis_filter_query_device(dev,
800 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
801 &link_status, &size);
802
803 return ret;
804 }
805
806 static int rndis_filter_query_link_speed(struct rndis_device *dev)
807 {
808 u32 size = sizeof(u32);
809 u32 link_speed;
810 struct net_device_context *ndc;
811 int ret;
812
813 ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED,
814 &link_speed, &size);
815
816 if (!ret) {
817 ndc = netdev_priv(dev->ndev);
818
819 /* The link speed reported from host is in 100bps unit, so
820 * we convert it to Mbps here.
821 */
822 ndc->speed = link_speed / 10000;
823 }
824
825 return ret;
826 }
827
828 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
829 u32 new_filter)
830 {
831 struct rndis_request *request;
832 struct rndis_set_request *set;
833 int ret;
834
835 request = get_rndis_request(dev, RNDIS_MSG_SET,
836 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
837 sizeof(u32));
838 if (!request)
839 return -ENOMEM;
840
841
842 /* Setup the rndis set */
843 set = &request->request_msg.msg.set_req;
844 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
845 set->info_buflen = sizeof(u32);
846 set->info_buf_offset = sizeof(struct rndis_set_request);
847
848 memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
849 &new_filter, sizeof(u32));
850
851 ret = rndis_filter_send_request(dev, request);
852 if (ret == 0)
853 wait_for_completion(&request->wait_event);
854
855 put_rndis_request(dev, request);
856
857 return ret;
858 }
859
860 static void rndis_set_multicast(struct work_struct *w)
861 {
862 struct rndis_device *rdev
863 = container_of(w, struct rndis_device, mcast_work);
864
865 if (rdev->ndev->flags & IFF_PROMISC)
866 rndis_filter_set_packet_filter(rdev,
867 NDIS_PACKET_TYPE_PROMISCUOUS);
868 else
869 rndis_filter_set_packet_filter(rdev,
870 NDIS_PACKET_TYPE_BROADCAST |
871 NDIS_PACKET_TYPE_ALL_MULTICAST |
872 NDIS_PACKET_TYPE_DIRECTED);
873 }
874
875 void rndis_filter_update(struct netvsc_device *nvdev)
876 {
877 struct rndis_device *rdev = nvdev->extension;
878
879 schedule_work(&rdev->mcast_work);
880 }
881
882 static int rndis_filter_init_device(struct rndis_device *dev)
883 {
884 struct rndis_request *request;
885 struct rndis_initialize_request *init;
886 struct rndis_initialize_complete *init_complete;
887 u32 status;
888 int ret;
889 struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev);
890
891 request = get_rndis_request(dev, RNDIS_MSG_INIT,
892 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
893 if (!request) {
894 ret = -ENOMEM;
895 goto cleanup;
896 }
897
898 /* Setup the rndis set */
899 init = &request->request_msg.msg.init_req;
900 init->major_ver = RNDIS_MAJOR_VERSION;
901 init->minor_ver = RNDIS_MINOR_VERSION;
902 init->max_xfer_size = 0x4000;
903
904 dev->state = RNDIS_DEV_INITIALIZING;
905
906 ret = rndis_filter_send_request(dev, request);
907 if (ret != 0) {
908 dev->state = RNDIS_DEV_UNINITIALIZED;
909 goto cleanup;
910 }
911
912 wait_for_completion(&request->wait_event);
913
914 init_complete = &request->response_msg.msg.init_complete;
915 status = init_complete->status;
916 if (status == RNDIS_STATUS_SUCCESS) {
917 dev->state = RNDIS_DEV_INITIALIZED;
918 nvdev->max_pkt = init_complete->max_pkt_per_msg;
919 nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
920 ret = 0;
921 } else {
922 dev->state = RNDIS_DEV_UNINITIALIZED;
923 ret = -EINVAL;
924 }
925
926 cleanup:
927 if (request)
928 put_rndis_request(dev, request);
929
930 return ret;
931 }
932
933 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
934 {
935 int i;
936
937 if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
938 return false;
939
940 for (i = 0; i < nvdev->num_chn; i++) {
941 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
942
943 if (atomic_read(&nvchan->queue_sends) > 0)
944 return false;
945 }
946
947 return true;
948 }
949
950 static void rndis_filter_halt_device(struct rndis_device *dev)
951 {
952 struct rndis_request *request;
953 struct rndis_halt_request *halt;
954 struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
955 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
956
957 /* Attempt to do a rndis device halt */
958 request = get_rndis_request(dev, RNDIS_MSG_HALT,
959 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
960 if (!request)
961 goto cleanup;
962
963 /* Setup the rndis set */
964 halt = &request->request_msg.msg.halt_req;
965 halt->req_id = atomic_inc_return(&dev->new_req_id);
966
967 /* Ignore return since this msg is optional. */
968 rndis_filter_send_request(dev, request);
969
970 dev->state = RNDIS_DEV_UNINITIALIZED;
971
972 cleanup:
973 nvdev->destroy = true;
974
975 /* Force flag to be ordered before waiting */
976 wmb();
977
978 /* Wait for all send completions */
979 wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
980
981 if (request)
982 put_rndis_request(dev, request);
983 }
984
985 static int rndis_filter_open_device(struct rndis_device *dev)
986 {
987 int ret;
988
989 if (dev->state != RNDIS_DEV_INITIALIZED)
990 return 0;
991
992 ret = rndis_filter_set_packet_filter(dev,
993 NDIS_PACKET_TYPE_BROADCAST |
994 NDIS_PACKET_TYPE_ALL_MULTICAST |
995 NDIS_PACKET_TYPE_DIRECTED);
996 if (ret == 0)
997 dev->state = RNDIS_DEV_DATAINITIALIZED;
998
999 return ret;
1000 }
1001
1002 static int rndis_filter_close_device(struct rndis_device *dev)
1003 {
1004 int ret;
1005
1006 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1007 return 0;
1008
1009 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1010 cancel_work_sync(&dev->mcast_work);
1011
1012 ret = rndis_filter_set_packet_filter(dev, 0);
1013 if (ret == -ENODEV)
1014 ret = 0;
1015
1016 if (ret == 0)
1017 dev->state = RNDIS_DEV_INITIALIZED;
1018
1019 return ret;
1020 }
1021
1022 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1023 {
1024 struct net_device *ndev =
1025 hv_get_drvdata(new_sc->primary_channel->device_obj);
1026 struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
1027 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1028 struct netvsc_channel *nvchan;
1029 int ret;
1030
1031 if (chn_index >= nvscdev->num_chn)
1032 return;
1033
1034 nvchan = nvscdev->chan_table + chn_index;
1035 nvchan->mrc.buf
1036 = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
1037
1038 if (!nvchan->mrc.buf)
1039 return;
1040
1041 /* Because the device uses NAPI, all the interrupt batching and
1042 * control is done via Net softirq, not the channel handling
1043 */
1044 set_channel_read_mode(new_sc, HV_CALL_ISR);
1045
1046 /* Set the channel before opening.*/
1047 nvchan->channel = new_sc;
1048 netif_napi_add(ndev, &nvchan->napi,
1049 netvsc_poll, NAPI_POLL_WEIGHT);
1050
1051 ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
1052 nvscdev->ring_size * PAGE_SIZE, NULL, 0,
1053 netvsc_channel_cb, nvchan);
1054 if (ret == 0)
1055 napi_enable(&nvchan->napi);
1056 else
1057 netif_napi_del(&nvchan->napi);
1058
1059 if (refcount_dec_and_test(&nvscdev->sc_offered))
1060 complete(&nvscdev->channel_init_wait);
1061 }
1062
1063 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1064 struct netvsc_device_info *device_info)
1065 {
1066 struct net_device *net = hv_get_drvdata(dev);
1067 struct net_device_context *net_device_ctx = netdev_priv(net);
1068 struct netvsc_device *net_device;
1069 struct rndis_device *rndis_device;
1070 struct ndis_offload hwcaps;
1071 struct ndis_offload_params offloads;
1072 struct nvsp_message *init_packet;
1073 struct ndis_recv_scale_cap rsscap;
1074 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1075 unsigned int gso_max_size = GSO_MAX_SIZE;
1076 u32 mtu, size, num_rss_qs;
1077 const struct cpumask *node_cpu_mask;
1078 u32 num_possible_rss_qs;
1079 int i, ret;
1080
1081 rndis_device = get_rndis_device();
1082 if (!rndis_device)
1083 return ERR_PTR(-ENODEV);
1084
1085 /*
1086 * Let the inner driver handle this first to create the netvsc channel
1087 * NOTE! Once the channel is created, we may get a receive callback
1088 * (RndisFilterOnReceive()) before this call is completed
1089 */
1090 net_device = netvsc_device_add(dev, device_info);
1091 if (IS_ERR(net_device)) {
1092 kfree(rndis_device);
1093 return net_device;
1094 }
1095
1096 /* Initialize the rndis device */
1097 net_device->max_chn = 1;
1098 net_device->num_chn = 1;
1099
1100 refcount_set(&net_device->sc_offered, 0);
1101
1102 net_device->extension = rndis_device;
1103 rndis_device->ndev = net;
1104
1105 /* Send the rndis initialization message */
1106 ret = rndis_filter_init_device(rndis_device);
1107 if (ret != 0)
1108 goto err_dev_remv;
1109
1110 /* Get the MTU from the host */
1111 size = sizeof(u32);
1112 ret = rndis_filter_query_device(rndis_device,
1113 RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1114 &mtu, &size);
1115 if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1116 net->mtu = mtu;
1117
1118 /* Get the mac address */
1119 ret = rndis_filter_query_device_mac(rndis_device);
1120 if (ret != 0)
1121 goto err_dev_remv;
1122
1123 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1124
1125 /* Find HW offload capabilities */
1126 ret = rndis_query_hwcaps(rndis_device, &hwcaps);
1127 if (ret != 0)
1128 goto err_dev_remv;
1129
1130 /* A value of zero means "no change"; now turn on what we want. */
1131 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1132
1133 /* Linux does not care about IP checksum, always does in kernel */
1134 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1135
1136 /* Compute tx offload settings based on hw capabilities */
1137 net->hw_features = NETIF_F_RXCSUM;
1138
1139 if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1140 /* Can checksum TCP */
1141 net->hw_features |= NETIF_F_IP_CSUM;
1142 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1143
1144 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1145
1146 if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1147 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1148 net->hw_features |= NETIF_F_TSO;
1149
1150 if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1151 gso_max_size = hwcaps.lsov2.ip4_maxsz;
1152 }
1153
1154 if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1155 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1156 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1157 }
1158 }
1159
1160 if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1161 net->hw_features |= NETIF_F_IPV6_CSUM;
1162
1163 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1164 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1165
1166 if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1167 (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1168 offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1169 net->hw_features |= NETIF_F_TSO6;
1170
1171 if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1172 gso_max_size = hwcaps.lsov2.ip6_maxsz;
1173 }
1174
1175 if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1176 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1177 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1178 }
1179 }
1180
1181 netif_set_gso_max_size(net, gso_max_size);
1182
1183 ret = rndis_filter_set_offload_params(net, net_device, &offloads);
1184 if (ret)
1185 goto err_dev_remv;
1186
1187 rndis_filter_query_device_link_status(rndis_device);
1188
1189 netdev_dbg(net, "Device MAC %pM link state %s\n",
1190 rndis_device->hw_mac_adr,
1191 rndis_device->link_state ? "down" : "up");
1192
1193 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1194 return net_device;
1195
1196 rndis_filter_query_link_speed(rndis_device);
1197
1198 /* vRSS setup */
1199 memset(&rsscap, 0, rsscap_size);
1200 ret = rndis_filter_query_device(rndis_device,
1201 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1202 &rsscap, &rsscap_size);
1203 if (ret || rsscap.num_recv_que < 2)
1204 goto out;
1205
1206 /*
1207 * We will limit the VRSS channels to the number CPUs in the NUMA node
1208 * the primary channel is currently bound to.
1209 *
1210 * This also guarantees that num_possible_rss_qs <= num_online_cpus
1211 */
1212 node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
1213 num_possible_rss_qs = min_t(u32, cpumask_weight(node_cpu_mask),
1214 rsscap.num_recv_que);
1215
1216 net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1217
1218 /* We will use the given number of channels if available. */
1219 net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1220
1221 for (i = 0; i < ITAB_NUM; i++)
1222 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
1223 net_device->num_chn);
1224
1225 num_rss_qs = net_device->num_chn - 1;
1226 if (num_rss_qs == 0)
1227 return net_device;
1228
1229 refcount_set(&net_device->sc_offered, num_rss_qs);
1230 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1231
1232 init_packet = &net_device->channel_init_pkt;
1233 memset(init_packet, 0, sizeof(struct nvsp_message));
1234 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1235 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1236 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1237 net_device->num_chn - 1;
1238 ret = vmbus_sendpacket(dev->channel, init_packet,
1239 sizeof(struct nvsp_message),
1240 (unsigned long)init_packet,
1241 VM_PKT_DATA_INBAND,
1242 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1243 if (ret)
1244 goto out;
1245
1246 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1247 ret = -ENODEV;
1248 goto out;
1249 }
1250 wait_for_completion(&net_device->channel_init_wait);
1251
1252 net_device->num_chn = 1 +
1253 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1254
1255 /* ignore failues from setting rss parameters, still have channels */
1256 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
1257 net_device->num_chn);
1258 out:
1259 if (ret) {
1260 net_device->max_chn = 1;
1261 net_device->num_chn = 1;
1262 }
1263
1264 return net_device;
1265
1266 err_dev_remv:
1267 rndis_filter_device_remove(dev, net_device);
1268 return ERR_PTR(ret);
1269 }
1270
1271 void rndis_filter_device_remove(struct hv_device *dev,
1272 struct netvsc_device *net_dev)
1273 {
1274 struct rndis_device *rndis_dev = net_dev->extension;
1275
1276 /* Halt and release the rndis device */
1277 rndis_filter_halt_device(rndis_dev);
1278
1279 kfree(rndis_dev);
1280 net_dev->extension = NULL;
1281
1282 netvsc_device_remove(dev);
1283 }
1284
1285 int rndis_filter_open(struct netvsc_device *nvdev)
1286 {
1287 if (!nvdev)
1288 return -EINVAL;
1289
1290 if (atomic_inc_return(&nvdev->open_cnt) != 1)
1291 return 0;
1292
1293 return rndis_filter_open_device(nvdev->extension);
1294 }
1295
1296 int rndis_filter_close(struct netvsc_device *nvdev)
1297 {
1298 if (!nvdev)
1299 return -EINVAL;
1300
1301 if (atomic_dec_return(&nvdev->open_cnt) != 0)
1302 return 0;
1303
1304 return rndis_filter_close_device(nvdev->extension);
1305 }
1306
1307 bool rndis_filter_opened(const struct netvsc_device *nvdev)
1308 {
1309 return atomic_read(&nvdev->open_cnt) > 0;
1310 }