]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/hyperv/rndis_filter.c
Merge tag 'pm-5.11-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[mirror_ubuntu-jammy-kernel.git] / drivers / net / hyperv / rndis_filter.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9 #include <linux/ethtool.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/wait.h>
13 #include <linux/highmem.h>
14 #include <linux/slab.h>
15 #include <linux/io.h>
16 #include <linux/if_ether.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_vlan.h>
19 #include <linux/nls.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/ucs2_string.h>
23
24 #include "hyperv_net.h"
25 #include "netvsc_trace.h"
26
27 static void rndis_set_multicast(struct work_struct *w);
28
29 #define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
30 struct rndis_request {
31 struct list_head list_ent;
32 struct completion wait_event;
33
34 struct rndis_message response_msg;
35 /*
36 * The buffer for extended info after the RNDIS response message. It's
37 * referenced based on the data offset in the RNDIS message. Its size
38 * is enough for current needs, and should be sufficient for the near
39 * future.
40 */
41 u8 response_ext[RNDIS_EXT_LEN];
42
43 /* Simplify allocation by having a netvsc packet inline */
44 struct hv_netvsc_packet pkt;
45
46 struct rndis_message request_msg;
47 /*
48 * The buffer for the extended info after the RNDIS request message.
49 * It is referenced and sized in a similar way as response_ext.
50 */
51 u8 request_ext[RNDIS_EXT_LEN];
52 };
53
54 static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
55 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
56 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
57 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
58 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
59 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
60 };
61
62 static struct rndis_device *get_rndis_device(void)
63 {
64 struct rndis_device *device;
65
66 device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL);
67 if (!device)
68 return NULL;
69
70 spin_lock_init(&device->request_lock);
71
72 INIT_LIST_HEAD(&device->req_list);
73 INIT_WORK(&device->mcast_work, rndis_set_multicast);
74
75 device->state = RNDIS_DEV_UNINITIALIZED;
76
77 return device;
78 }
79
80 static struct rndis_request *get_rndis_request(struct rndis_device *dev,
81 u32 msg_type,
82 u32 msg_len)
83 {
84 struct rndis_request *request;
85 struct rndis_message *rndis_msg;
86 struct rndis_set_request *set;
87 unsigned long flags;
88
89 request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL);
90 if (!request)
91 return NULL;
92
93 init_completion(&request->wait_event);
94
95 rndis_msg = &request->request_msg;
96 rndis_msg->ndis_msg_type = msg_type;
97 rndis_msg->msg_len = msg_len;
98
99 request->pkt.q_idx = 0;
100
101 /*
102 * Set the request id. This field is always after the rndis header for
103 * request/response packet types so we just used the SetRequest as a
104 * template
105 */
106 set = &rndis_msg->msg.set_req;
107 set->req_id = atomic_inc_return(&dev->new_req_id);
108
109 /* Add to the request list */
110 spin_lock_irqsave(&dev->request_lock, flags);
111 list_add_tail(&request->list_ent, &dev->req_list);
112 spin_unlock_irqrestore(&dev->request_lock, flags);
113
114 return request;
115 }
116
117 static void put_rndis_request(struct rndis_device *dev,
118 struct rndis_request *req)
119 {
120 unsigned long flags;
121
122 spin_lock_irqsave(&dev->request_lock, flags);
123 list_del(&req->list_ent);
124 spin_unlock_irqrestore(&dev->request_lock, flags);
125
126 kfree(req);
127 }
128
129 static void dump_rndis_message(struct net_device *netdev,
130 const struct rndis_message *rndis_msg)
131 {
132 switch (rndis_msg->ndis_msg_type) {
133 case RNDIS_MSG_PACKET:
134 netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
135 "data offset %u data len %u, # oob %u, "
136 "oob offset %u, oob len %u, pkt offset %u, "
137 "pkt len %u\n",
138 rndis_msg->msg_len,
139 rndis_msg->msg.pkt.data_offset,
140 rndis_msg->msg.pkt.data_len,
141 rndis_msg->msg.pkt.num_oob_data_elements,
142 rndis_msg->msg.pkt.oob_data_offset,
143 rndis_msg->msg.pkt.oob_data_len,
144 rndis_msg->msg.pkt.per_pkt_info_offset,
145 rndis_msg->msg.pkt.per_pkt_info_len);
146 break;
147
148 case RNDIS_MSG_INIT_C:
149 netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
150 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
151 "device flags %d, max xfer size 0x%x, max pkts %u, "
152 "pkt aligned %u)\n",
153 rndis_msg->msg_len,
154 rndis_msg->msg.init_complete.req_id,
155 rndis_msg->msg.init_complete.status,
156 rndis_msg->msg.init_complete.major_ver,
157 rndis_msg->msg.init_complete.minor_ver,
158 rndis_msg->msg.init_complete.dev_flags,
159 rndis_msg->msg.init_complete.max_xfer_size,
160 rndis_msg->msg.init_complete.
161 max_pkt_per_msg,
162 rndis_msg->msg.init_complete.
163 pkt_alignment_factor);
164 break;
165
166 case RNDIS_MSG_QUERY_C:
167 netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
168 "(len %u, id 0x%x, status 0x%x, buf len %u, "
169 "buf offset %u)\n",
170 rndis_msg->msg_len,
171 rndis_msg->msg.query_complete.req_id,
172 rndis_msg->msg.query_complete.status,
173 rndis_msg->msg.query_complete.
174 info_buflen,
175 rndis_msg->msg.query_complete.
176 info_buf_offset);
177 break;
178
179 case RNDIS_MSG_SET_C:
180 netdev_dbg(netdev,
181 "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
182 rndis_msg->msg_len,
183 rndis_msg->msg.set_complete.req_id,
184 rndis_msg->msg.set_complete.status);
185 break;
186
187 case RNDIS_MSG_INDICATE:
188 netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
189 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
190 rndis_msg->msg_len,
191 rndis_msg->msg.indicate_status.status,
192 rndis_msg->msg.indicate_status.status_buflen,
193 rndis_msg->msg.indicate_status.status_buf_offset);
194 break;
195
196 default:
197 netdev_dbg(netdev, "0x%x (len %u)\n",
198 rndis_msg->ndis_msg_type,
199 rndis_msg->msg_len);
200 break;
201 }
202 }
203
204 static int rndis_filter_send_request(struct rndis_device *dev,
205 struct rndis_request *req)
206 {
207 struct hv_netvsc_packet *packet;
208 struct hv_page_buffer page_buf[2];
209 struct hv_page_buffer *pb = page_buf;
210 int ret;
211
212 /* Setup the packet to send it */
213 packet = &req->pkt;
214
215 packet->total_data_buflen = req->request_msg.msg_len;
216 packet->page_buf_cnt = 1;
217
218 pb[0].pfn = virt_to_phys(&req->request_msg) >>
219 HV_HYP_PAGE_SHIFT;
220 pb[0].len = req->request_msg.msg_len;
221 pb[0].offset = offset_in_hvpage(&req->request_msg);
222
223 /* Add one page_buf when request_msg crossing page boundary */
224 if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
225 packet->page_buf_cnt++;
226 pb[0].len = HV_HYP_PAGE_SIZE -
227 pb[0].offset;
228 pb[1].pfn = virt_to_phys((void *)&req->request_msg
229 + pb[0].len) >> HV_HYP_PAGE_SHIFT;
230 pb[1].offset = 0;
231 pb[1].len = req->request_msg.msg_len -
232 pb[0].len;
233 }
234
235 trace_rndis_send(dev->ndev, 0, &req->request_msg);
236
237 rcu_read_lock_bh();
238 ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false);
239 rcu_read_unlock_bh();
240
241 return ret;
242 }
243
244 static void rndis_set_link_state(struct rndis_device *rdev,
245 struct rndis_request *request)
246 {
247 u32 link_status;
248 struct rndis_query_complete *query_complete;
249
250 query_complete = &request->response_msg.msg.query_complete;
251
252 if (query_complete->status == RNDIS_STATUS_SUCCESS &&
253 query_complete->info_buflen == sizeof(u32)) {
254 memcpy(&link_status, (void *)((unsigned long)query_complete +
255 query_complete->info_buf_offset), sizeof(u32));
256 rdev->link_state = link_status != 0;
257 }
258 }
259
260 static void rndis_filter_receive_response(struct net_device *ndev,
261 struct netvsc_device *nvdev,
262 const struct rndis_message *resp)
263 {
264 struct rndis_device *dev = nvdev->extension;
265 struct rndis_request *request = NULL;
266 bool found = false;
267 unsigned long flags;
268
269 /* This should never happen, it means control message
270 * response received after device removed.
271 */
272 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
273 netdev_err(ndev,
274 "got rndis message uninitialized\n");
275 return;
276 }
277
278 /* Ensure the packet is big enough to read req_id. Req_id is the 1st
279 * field in any request/response message, so the payload should have at
280 * least sizeof(u32) bytes
281 */
282 if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
283 netdev_err(ndev, "rndis msg_len too small: %u\n",
284 resp->msg_len);
285 return;
286 }
287
288 spin_lock_irqsave(&dev->request_lock, flags);
289 list_for_each_entry(request, &dev->req_list, list_ent) {
290 /*
291 * All request/response message contains RequestId as the 1st
292 * field
293 */
294 if (request->request_msg.msg.init_req.req_id
295 == resp->msg.init_complete.req_id) {
296 found = true;
297 break;
298 }
299 }
300 spin_unlock_irqrestore(&dev->request_lock, flags);
301
302 if (found) {
303 if (resp->msg_len <=
304 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
305 memcpy(&request->response_msg, resp,
306 resp->msg_len);
307 if (request->request_msg.ndis_msg_type ==
308 RNDIS_MSG_QUERY && request->request_msg.msg.
309 query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
310 rndis_set_link_state(dev, request);
311 } else {
312 netdev_err(ndev,
313 "rndis response buffer overflow "
314 "detected (size %u max %zu)\n",
315 resp->msg_len,
316 sizeof(struct rndis_message));
317
318 if (resp->ndis_msg_type ==
319 RNDIS_MSG_RESET_C) {
320 /* does not have a request id field */
321 request->response_msg.msg.reset_complete.
322 status = RNDIS_STATUS_BUFFER_OVERFLOW;
323 } else {
324 request->response_msg.msg.
325 init_complete.status =
326 RNDIS_STATUS_BUFFER_OVERFLOW;
327 }
328 }
329
330 complete(&request->wait_event);
331 } else {
332 netdev_err(ndev,
333 "no rndis request found for this response "
334 "(id 0x%x res type 0x%x)\n",
335 resp->msg.init_complete.req_id,
336 resp->ndis_msg_type);
337 }
338 }
339
340 /*
341 * Get the Per-Packet-Info with the specified type
342 * return NULL if not found.
343 */
344 static inline void *rndis_get_ppi(struct net_device *ndev,
345 struct rndis_packet *rpkt,
346 u32 rpkt_len, u32 type, u8 internal)
347 {
348 struct rndis_per_packet_info *ppi;
349 int len;
350
351 if (rpkt->per_pkt_info_offset == 0)
352 return NULL;
353
354 /* Validate info_offset and info_len */
355 if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
356 rpkt->per_pkt_info_offset > rpkt_len) {
357 netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
358 rpkt->per_pkt_info_offset);
359 return NULL;
360 }
361
362 if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
363 netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
364 rpkt->per_pkt_info_len);
365 return NULL;
366 }
367
368 ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
369 rpkt->per_pkt_info_offset);
370 len = rpkt->per_pkt_info_len;
371
372 while (len > 0) {
373 /* Validate ppi_offset and ppi_size */
374 if (ppi->size > len) {
375 netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
376 continue;
377 }
378
379 if (ppi->ppi_offset >= ppi->size) {
380 netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
381 continue;
382 }
383
384 if (ppi->type == type && ppi->internal == internal)
385 return (void *)((ulong)ppi + ppi->ppi_offset);
386 len -= ppi->size;
387 ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
388 }
389
390 return NULL;
391 }
392
393 static inline
394 void rsc_add_data(struct netvsc_channel *nvchan,
395 const struct ndis_pkt_8021q_info *vlan,
396 const struct ndis_tcp_ip_checksum_info *csum_info,
397 const u32 *hash_info,
398 void *data, u32 len)
399 {
400 u32 cnt = nvchan->rsc.cnt;
401
402 if (cnt) {
403 nvchan->rsc.pktlen += len;
404 } else {
405 nvchan->rsc.vlan = vlan;
406 nvchan->rsc.csum_info = csum_info;
407 nvchan->rsc.pktlen = len;
408 nvchan->rsc.hash_info = hash_info;
409 }
410
411 nvchan->rsc.data[cnt] = data;
412 nvchan->rsc.len[cnt] = len;
413 nvchan->rsc.cnt++;
414 }
415
416 static int rndis_filter_receive_data(struct net_device *ndev,
417 struct netvsc_device *nvdev,
418 struct netvsc_channel *nvchan,
419 struct rndis_message *msg,
420 u32 data_buflen)
421 {
422 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
423 const struct ndis_tcp_ip_checksum_info *csum_info;
424 const struct ndis_pkt_8021q_info *vlan;
425 const struct rndis_pktinfo_id *pktinfo_id;
426 const u32 *hash_info;
427 u32 data_offset, rpkt_len;
428 void *data;
429 bool rsc_more = false;
430 int ret;
431
432 /* Ensure data_buflen is big enough to read header fields */
433 if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
434 netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
435 data_buflen);
436 return NVSP_STAT_FAIL;
437 }
438
439 /* Validate rndis_pkt offset */
440 if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
441 netdev_err(ndev, "invalid rndis packet offset: %u\n",
442 rndis_pkt->data_offset);
443 return NVSP_STAT_FAIL;
444 }
445
446 /* Remove the rndis header and pass it back up the stack */
447 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
448
449 rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
450 data_buflen -= data_offset;
451
452 /*
453 * Make sure we got a valid RNDIS message, now total_data_buflen
454 * should be the data packet size plus the trailer padding size
455 */
456 if (unlikely(data_buflen < rndis_pkt->data_len)) {
457 netdev_err(ndev, "rndis message buffer "
458 "overflow detected (got %u, min %u)"
459 "...dropping this message!\n",
460 data_buflen, rndis_pkt->data_len);
461 return NVSP_STAT_FAIL;
462 }
463
464 vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
465
466 csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
467
468 hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
469
470 pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
471
472 data = (void *)msg + data_offset;
473
474 /* Identify RSC frags, drop erroneous packets */
475 if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
476 if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
477 nvchan->rsc.cnt = 0;
478 else if (nvchan->rsc.cnt == 0)
479 goto drop;
480
481 rsc_more = true;
482
483 if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
484 rsc_more = false;
485
486 if (rsc_more && nvchan->rsc.is_last)
487 goto drop;
488 } else {
489 nvchan->rsc.cnt = 0;
490 }
491
492 if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
493 goto drop;
494
495 /* Put data into per channel structure.
496 * Also, remove the rndis trailer padding from rndis packet message
497 * rndis_pkt->data_len tell us the real data length, we only copy
498 * the data packet to the stack, without the rndis trailer padding
499 */
500 rsc_add_data(nvchan, vlan, csum_info, hash_info,
501 data, rndis_pkt->data_len);
502
503 if (rsc_more)
504 return NVSP_STAT_SUCCESS;
505
506 ret = netvsc_recv_callback(ndev, nvdev, nvchan);
507 nvchan->rsc.cnt = 0;
508
509 return ret;
510
511 drop:
512 return NVSP_STAT_FAIL;
513 }
514
515 int rndis_filter_receive(struct net_device *ndev,
516 struct netvsc_device *net_dev,
517 struct netvsc_channel *nvchan,
518 void *data, u32 buflen)
519 {
520 struct net_device_context *net_device_ctx = netdev_priv(ndev);
521 struct rndis_message *rndis_msg = data;
522
523 if (netif_msg_rx_status(net_device_ctx))
524 dump_rndis_message(ndev, rndis_msg);
525
526 /* Validate incoming rndis_message packet */
527 if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
528 buflen < rndis_msg->msg_len) {
529 netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
530 buflen, rndis_msg->msg_len);
531 return NVSP_STAT_FAIL;
532 }
533
534 switch (rndis_msg->ndis_msg_type) {
535 case RNDIS_MSG_PACKET:
536 return rndis_filter_receive_data(ndev, net_dev, nvchan,
537 rndis_msg, buflen);
538 case RNDIS_MSG_INIT_C:
539 case RNDIS_MSG_QUERY_C:
540 case RNDIS_MSG_SET_C:
541 /* completion msgs */
542 rndis_filter_receive_response(ndev, net_dev, rndis_msg);
543 break;
544
545 case RNDIS_MSG_INDICATE:
546 /* notification msgs */
547 netvsc_linkstatus_callback(ndev, rndis_msg);
548 break;
549 default:
550 netdev_err(ndev,
551 "unhandled rndis message (type %u len %u)\n",
552 rndis_msg->ndis_msg_type,
553 rndis_msg->msg_len);
554 return NVSP_STAT_FAIL;
555 }
556
557 return NVSP_STAT_SUCCESS;
558 }
559
560 static int rndis_filter_query_device(struct rndis_device *dev,
561 struct netvsc_device *nvdev,
562 u32 oid, void *result, u32 *result_size)
563 {
564 struct rndis_request *request;
565 u32 inresult_size = *result_size;
566 struct rndis_query_request *query;
567 struct rndis_query_complete *query_complete;
568 int ret = 0;
569
570 if (!result)
571 return -EINVAL;
572
573 *result_size = 0;
574 request = get_rndis_request(dev, RNDIS_MSG_QUERY,
575 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
576 if (!request) {
577 ret = -ENOMEM;
578 goto cleanup;
579 }
580
581 /* Setup the rndis query */
582 query = &request->request_msg.msg.query_req;
583 query->oid = oid;
584 query->info_buf_offset = sizeof(struct rndis_query_request);
585 query->info_buflen = 0;
586 query->dev_vc_handle = 0;
587
588 if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
589 struct ndis_offload *hwcaps;
590 u32 nvsp_version = nvdev->nvsp_version;
591 u8 ndis_rev;
592 size_t size;
593
594 if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
595 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
596 size = NDIS_OFFLOAD_SIZE;
597 } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
598 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
599 size = NDIS_OFFLOAD_SIZE_6_1;
600 } else {
601 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
602 size = NDIS_OFFLOAD_SIZE_6_0;
603 }
604
605 request->request_msg.msg_len += size;
606 query->info_buflen = size;
607 hwcaps = (struct ndis_offload *)
608 ((unsigned long)query + query->info_buf_offset);
609
610 hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
611 hwcaps->header.revision = ndis_rev;
612 hwcaps->header.size = size;
613
614 } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
615 struct ndis_recv_scale_cap *cap;
616
617 request->request_msg.msg_len +=
618 sizeof(struct ndis_recv_scale_cap);
619 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
620 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
621 query->info_buf_offset);
622 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
623 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
624 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
625 }
626
627 ret = rndis_filter_send_request(dev, request);
628 if (ret != 0)
629 goto cleanup;
630
631 wait_for_completion(&request->wait_event);
632
633 /* Copy the response back */
634 query_complete = &request->response_msg.msg.query_complete;
635
636 if (query_complete->info_buflen > inresult_size) {
637 ret = -1;
638 goto cleanup;
639 }
640
641 memcpy(result,
642 (void *)((unsigned long)query_complete +
643 query_complete->info_buf_offset),
644 query_complete->info_buflen);
645
646 *result_size = query_complete->info_buflen;
647
648 cleanup:
649 if (request)
650 put_rndis_request(dev, request);
651
652 return ret;
653 }
654
655 /* Get the hardware offload capabilities */
656 static int
657 rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
658 struct ndis_offload *caps)
659 {
660 u32 caps_len = sizeof(*caps);
661 int ret;
662
663 memset(caps, 0, sizeof(*caps));
664
665 ret = rndis_filter_query_device(dev, net_device,
666 OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
667 caps, &caps_len);
668 if (ret)
669 return ret;
670
671 if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
672 netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
673 caps->header.type);
674 return -EINVAL;
675 }
676
677 if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
678 netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
679 caps->header.revision);
680 return -EINVAL;
681 }
682
683 if (caps->header.size > caps_len ||
684 caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
685 netdev_warn(dev->ndev,
686 "invalid NDIS objsize %u, data size %u\n",
687 caps->header.size, caps_len);
688 return -EINVAL;
689 }
690
691 return 0;
692 }
693
694 static int rndis_filter_query_device_mac(struct rndis_device *dev,
695 struct netvsc_device *net_device)
696 {
697 u32 size = ETH_ALEN;
698
699 return rndis_filter_query_device(dev, net_device,
700 RNDIS_OID_802_3_PERMANENT_ADDRESS,
701 dev->hw_mac_adr, &size);
702 }
703
704 #define NWADR_STR "NetworkAddress"
705 #define NWADR_STRLEN 14
706
707 int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
708 const char *mac)
709 {
710 struct rndis_device *rdev = nvdev->extension;
711 struct rndis_request *request;
712 struct rndis_set_request *set;
713 struct rndis_config_parameter_info *cpi;
714 wchar_t *cfg_nwadr, *cfg_mac;
715 struct rndis_set_complete *set_complete;
716 char macstr[2*ETH_ALEN+1];
717 u32 extlen = sizeof(struct rndis_config_parameter_info) +
718 2*NWADR_STRLEN + 4*ETH_ALEN;
719 int ret;
720
721 request = get_rndis_request(rdev, RNDIS_MSG_SET,
722 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
723 if (!request)
724 return -ENOMEM;
725
726 set = &request->request_msg.msg.set_req;
727 set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
728 set->info_buflen = extlen;
729 set->info_buf_offset = sizeof(struct rndis_set_request);
730 set->dev_vc_handle = 0;
731
732 cpi = (struct rndis_config_parameter_info *)((ulong)set +
733 set->info_buf_offset);
734 cpi->parameter_name_offset =
735 sizeof(struct rndis_config_parameter_info);
736 /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
737 cpi->parameter_name_length = 2*NWADR_STRLEN;
738 cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
739 cpi->parameter_value_offset =
740 cpi->parameter_name_offset + cpi->parameter_name_length;
741 /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
742 cpi->parameter_value_length = 4*ETH_ALEN;
743
744 cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
745 cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
746 ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
747 cfg_nwadr, NWADR_STRLEN);
748 if (ret < 0)
749 goto cleanup;
750 snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
751 ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
752 cfg_mac, 2*ETH_ALEN);
753 if (ret < 0)
754 goto cleanup;
755
756 ret = rndis_filter_send_request(rdev, request);
757 if (ret != 0)
758 goto cleanup;
759
760 wait_for_completion(&request->wait_event);
761
762 set_complete = &request->response_msg.msg.set_complete;
763 if (set_complete->status != RNDIS_STATUS_SUCCESS)
764 ret = -EIO;
765
766 cleanup:
767 put_rndis_request(rdev, request);
768 return ret;
769 }
770
771 int
772 rndis_filter_set_offload_params(struct net_device *ndev,
773 struct netvsc_device *nvdev,
774 struct ndis_offload_params *req_offloads)
775 {
776 struct rndis_device *rdev = nvdev->extension;
777 struct rndis_request *request;
778 struct rndis_set_request *set;
779 struct ndis_offload_params *offload_params;
780 struct rndis_set_complete *set_complete;
781 u32 extlen = sizeof(struct ndis_offload_params);
782 int ret;
783 u32 vsp_version = nvdev->nvsp_version;
784
785 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
786 extlen = VERSION_4_OFFLOAD_SIZE;
787 /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
788 * UDP checksum offload.
789 */
790 req_offloads->udp_ip_v4_csum = 0;
791 req_offloads->udp_ip_v6_csum = 0;
792 }
793
794 request = get_rndis_request(rdev, RNDIS_MSG_SET,
795 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
796 if (!request)
797 return -ENOMEM;
798
799 set = &request->request_msg.msg.set_req;
800 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
801 set->info_buflen = extlen;
802 set->info_buf_offset = sizeof(struct rndis_set_request);
803 set->dev_vc_handle = 0;
804
805 offload_params = (struct ndis_offload_params *)((ulong)set +
806 set->info_buf_offset);
807 *offload_params = *req_offloads;
808 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
809 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
810 offload_params->header.size = extlen;
811
812 ret = rndis_filter_send_request(rdev, request);
813 if (ret != 0)
814 goto cleanup;
815
816 wait_for_completion(&request->wait_event);
817 set_complete = &request->response_msg.msg.set_complete;
818 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
819 netdev_err(ndev, "Fail to set offload on host side:0x%x\n",
820 set_complete->status);
821 ret = -EINVAL;
822 }
823
824 cleanup:
825 put_rndis_request(rdev, request);
826 return ret;
827 }
828
829 static int rndis_set_rss_param_msg(struct rndis_device *rdev,
830 const u8 *rss_key, u16 flag)
831 {
832 struct net_device *ndev = rdev->ndev;
833 struct net_device_context *ndc = netdev_priv(ndev);
834 struct rndis_request *request;
835 struct rndis_set_request *set;
836 struct rndis_set_complete *set_complete;
837 u32 extlen = sizeof(struct ndis_recv_scale_param) +
838 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
839 struct ndis_recv_scale_param *rssp;
840 u32 *itab;
841 u8 *keyp;
842 int i, ret;
843
844 request = get_rndis_request(
845 rdev, RNDIS_MSG_SET,
846 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
847 if (!request)
848 return -ENOMEM;
849
850 set = &request->request_msg.msg.set_req;
851 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
852 set->info_buflen = extlen;
853 set->info_buf_offset = sizeof(struct rndis_set_request);
854 set->dev_vc_handle = 0;
855
856 rssp = (struct ndis_recv_scale_param *)(set + 1);
857 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
858 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
859 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
860 rssp->flag = flag;
861 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
862 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
863 NDIS_HASH_TCP_IPV6;
864 rssp->indirect_tabsize = 4*ITAB_NUM;
865 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
866 rssp->hashkey_size = NETVSC_HASH_KEYLEN;
867 rssp->hashkey_offset = rssp->indirect_taboffset +
868 rssp->indirect_tabsize;
869
870 /* Set indirection table entries */
871 itab = (u32 *)(rssp + 1);
872 for (i = 0; i < ITAB_NUM; i++)
873 itab[i] = ndc->rx_table[i];
874
875 /* Set hask key values */
876 keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
877 memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
878
879 ret = rndis_filter_send_request(rdev, request);
880 if (ret != 0)
881 goto cleanup;
882
883 wait_for_completion(&request->wait_event);
884 set_complete = &request->response_msg.msg.set_complete;
885 if (set_complete->status == RNDIS_STATUS_SUCCESS) {
886 if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
887 !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
888 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
889
890 } else {
891 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
892 set_complete->status);
893 ret = -EINVAL;
894 }
895
896 cleanup:
897 put_rndis_request(rdev, request);
898 return ret;
899 }
900
901 int rndis_filter_set_rss_param(struct rndis_device *rdev,
902 const u8 *rss_key)
903 {
904 /* Disable RSS before change */
905 rndis_set_rss_param_msg(rdev, rss_key,
906 NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
907
908 return rndis_set_rss_param_msg(rdev, rss_key, 0);
909 }
910
911 static int rndis_filter_query_device_link_status(struct rndis_device *dev,
912 struct netvsc_device *net_device)
913 {
914 u32 size = sizeof(u32);
915 u32 link_status;
916
917 return rndis_filter_query_device(dev, net_device,
918 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
919 &link_status, &size);
920 }
921
922 static int rndis_filter_query_link_speed(struct rndis_device *dev,
923 struct netvsc_device *net_device)
924 {
925 u32 size = sizeof(u32);
926 u32 link_speed;
927 struct net_device_context *ndc;
928 int ret;
929
930 ret = rndis_filter_query_device(dev, net_device,
931 RNDIS_OID_GEN_LINK_SPEED,
932 &link_speed, &size);
933
934 if (!ret) {
935 ndc = netdev_priv(dev->ndev);
936
937 /* The link speed reported from host is in 100bps unit, so
938 * we convert it to Mbps here.
939 */
940 ndc->speed = link_speed / 10000;
941 }
942
943 return ret;
944 }
945
946 static int rndis_filter_set_packet_filter(struct rndis_device *dev,
947 u32 new_filter)
948 {
949 struct rndis_request *request;
950 struct rndis_set_request *set;
951 int ret;
952
953 if (dev->filter == new_filter)
954 return 0;
955
956 request = get_rndis_request(dev, RNDIS_MSG_SET,
957 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
958 sizeof(u32));
959 if (!request)
960 return -ENOMEM;
961
962 /* Setup the rndis set */
963 set = &request->request_msg.msg.set_req;
964 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
965 set->info_buflen = sizeof(u32);
966 set->info_buf_offset = sizeof(struct rndis_set_request);
967
968 memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request),
969 &new_filter, sizeof(u32));
970
971 ret = rndis_filter_send_request(dev, request);
972 if (ret == 0) {
973 wait_for_completion(&request->wait_event);
974 dev->filter = new_filter;
975 }
976
977 put_rndis_request(dev, request);
978
979 return ret;
980 }
981
982 static void rndis_set_multicast(struct work_struct *w)
983 {
984 struct rndis_device *rdev
985 = container_of(w, struct rndis_device, mcast_work);
986 u32 filter = NDIS_PACKET_TYPE_DIRECTED;
987 unsigned int flags = rdev->ndev->flags;
988
989 if (flags & IFF_PROMISC) {
990 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
991 } else {
992 if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
993 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
994 if (flags & IFF_BROADCAST)
995 filter |= NDIS_PACKET_TYPE_BROADCAST;
996 }
997
998 rndis_filter_set_packet_filter(rdev, filter);
999 }
1000
1001 void rndis_filter_update(struct netvsc_device *nvdev)
1002 {
1003 struct rndis_device *rdev = nvdev->extension;
1004
1005 schedule_work(&rdev->mcast_work);
1006 }
1007
1008 static int rndis_filter_init_device(struct rndis_device *dev,
1009 struct netvsc_device *nvdev)
1010 {
1011 struct rndis_request *request;
1012 struct rndis_initialize_request *init;
1013 struct rndis_initialize_complete *init_complete;
1014 u32 status;
1015 int ret;
1016
1017 request = get_rndis_request(dev, RNDIS_MSG_INIT,
1018 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
1019 if (!request) {
1020 ret = -ENOMEM;
1021 goto cleanup;
1022 }
1023
1024 /* Setup the rndis set */
1025 init = &request->request_msg.msg.init_req;
1026 init->major_ver = RNDIS_MAJOR_VERSION;
1027 init->minor_ver = RNDIS_MINOR_VERSION;
1028 init->max_xfer_size = 0x4000;
1029
1030 dev->state = RNDIS_DEV_INITIALIZING;
1031
1032 ret = rndis_filter_send_request(dev, request);
1033 if (ret != 0) {
1034 dev->state = RNDIS_DEV_UNINITIALIZED;
1035 goto cleanup;
1036 }
1037
1038 wait_for_completion(&request->wait_event);
1039
1040 init_complete = &request->response_msg.msg.init_complete;
1041 status = init_complete->status;
1042 if (status == RNDIS_STATUS_SUCCESS) {
1043 dev->state = RNDIS_DEV_INITIALIZED;
1044 nvdev->max_pkt = init_complete->max_pkt_per_msg;
1045 nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
1046 ret = 0;
1047 } else {
1048 dev->state = RNDIS_DEV_UNINITIALIZED;
1049 ret = -EINVAL;
1050 }
1051
1052 cleanup:
1053 if (request)
1054 put_rndis_request(dev, request);
1055
1056 return ret;
1057 }
1058
1059 static bool netvsc_device_idle(const struct netvsc_device *nvdev)
1060 {
1061 int i;
1062
1063 for (i = 0; i < nvdev->num_chn; i++) {
1064 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1065
1066 if (nvchan->mrc.first != nvchan->mrc.next)
1067 return false;
1068
1069 if (atomic_read(&nvchan->queue_sends) > 0)
1070 return false;
1071 }
1072
1073 return true;
1074 }
1075
1076 static void rndis_filter_halt_device(struct netvsc_device *nvdev,
1077 struct rndis_device *dev)
1078 {
1079 struct rndis_request *request;
1080 struct rndis_halt_request *halt;
1081
1082 /* Attempt to do a rndis device halt */
1083 request = get_rndis_request(dev, RNDIS_MSG_HALT,
1084 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
1085 if (!request)
1086 goto cleanup;
1087
1088 /* Setup the rndis set */
1089 halt = &request->request_msg.msg.halt_req;
1090 halt->req_id = atomic_inc_return(&dev->new_req_id);
1091
1092 /* Ignore return since this msg is optional. */
1093 rndis_filter_send_request(dev, request);
1094
1095 dev->state = RNDIS_DEV_UNINITIALIZED;
1096
1097 cleanup:
1098 nvdev->destroy = true;
1099
1100 /* Force flag to be ordered before waiting */
1101 wmb();
1102
1103 /* Wait for all send completions */
1104 wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
1105
1106 if (request)
1107 put_rndis_request(dev, request);
1108 }
1109
1110 static int rndis_filter_open_device(struct rndis_device *dev)
1111 {
1112 int ret;
1113
1114 if (dev->state != RNDIS_DEV_INITIALIZED)
1115 return 0;
1116
1117 ret = rndis_filter_set_packet_filter(dev,
1118 NDIS_PACKET_TYPE_BROADCAST |
1119 NDIS_PACKET_TYPE_ALL_MULTICAST |
1120 NDIS_PACKET_TYPE_DIRECTED);
1121 if (ret == 0)
1122 dev->state = RNDIS_DEV_DATAINITIALIZED;
1123
1124 return ret;
1125 }
1126
1127 static int rndis_filter_close_device(struct rndis_device *dev)
1128 {
1129 int ret;
1130
1131 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1132 return 0;
1133
1134 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1135 cancel_work_sync(&dev->mcast_work);
1136
1137 ret = rndis_filter_set_packet_filter(dev, 0);
1138 if (ret == -ENODEV)
1139 ret = 0;
1140
1141 if (ret == 0)
1142 dev->state = RNDIS_DEV_INITIALIZED;
1143
1144 return ret;
1145 }
1146
1147 static void netvsc_sc_open(struct vmbus_channel *new_sc)
1148 {
1149 struct net_device *ndev =
1150 hv_get_drvdata(new_sc->primary_channel->device_obj);
1151 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1152 struct netvsc_device *nvscdev;
1153 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1154 struct netvsc_channel *nvchan;
1155 int ret;
1156
1157 /* This is safe because this callback only happens when
1158 * new device is being setup and waiting on the channel_init_wait.
1159 */
1160 nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1161 if (!nvscdev || chn_index >= nvscdev->num_chn)
1162 return;
1163
1164 nvchan = nvscdev->chan_table + chn_index;
1165
1166 /* Because the device uses NAPI, all the interrupt batching and
1167 * control is done via Net softirq, not the channel handling
1168 */
1169 set_channel_read_mode(new_sc, HV_CALL_ISR);
1170
1171 /* Set the channel before opening.*/
1172 nvchan->channel = new_sc;
1173
1174 new_sc->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes);
1175 ret = vmbus_open(new_sc, netvsc_ring_bytes,
1176 netvsc_ring_bytes, NULL, 0,
1177 netvsc_channel_cb, nvchan);
1178 if (ret == 0)
1179 napi_enable(&nvchan->napi);
1180 else
1181 netdev_notice(ndev, "sub channel open failed: %d\n", ret);
1182
1183 if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn)
1184 wake_up(&nvscdev->subchan_open);
1185 }
1186
1187 /* Open sub-channels after completing the handling of the device probe.
1188 * This breaks overlap of processing the host message for the
1189 * new primary channel with the initialization of sub-channels.
1190 */
1191 int rndis_set_subchannel(struct net_device *ndev,
1192 struct netvsc_device *nvdev,
1193 struct netvsc_device_info *dev_info)
1194 {
1195 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1196 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1197 struct hv_device *hv_dev = ndev_ctx->device_ctx;
1198 struct rndis_device *rdev = nvdev->extension;
1199 int i, ret;
1200
1201 ASSERT_RTNL();
1202
1203 memset(init_packet, 0, sizeof(struct nvsp_message));
1204 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1205 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1206 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1207 nvdev->num_chn - 1;
1208 trace_nvsp_send(ndev, init_packet);
1209
1210 ret = vmbus_sendpacket(hv_dev->channel, init_packet,
1211 sizeof(struct nvsp_message),
1212 (unsigned long)init_packet,
1213 VM_PKT_DATA_INBAND,
1214 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1215 if (ret) {
1216 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1217 return ret;
1218 }
1219
1220 wait_for_completion(&nvdev->channel_init_wait);
1221 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1222 netdev_err(ndev, "sub channel request failed\n");
1223 return -EIO;
1224 }
1225
1226 /* Check that number of allocated sub channel is within the expected range */
1227 if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
1228 netdev_err(ndev, "invalid number of allocated sub channel\n");
1229 return -EINVAL;
1230 }
1231 nvdev->num_chn = 1 +
1232 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1233
1234 /* wait for all sub channels to open */
1235 wait_event(nvdev->subchan_open,
1236 atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1237
1238 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1239 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1240
1241 /* ignore failures from setting rss parameters, still have channels */
1242 if (dev_info)
1243 rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1244 else
1245 rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1246
1247 netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1248 netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
1249
1250 return 0;
1251 }
1252
1253 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1254 struct netvsc_device *nvdev)
1255 {
1256 struct net_device *net = rndis_device->ndev;
1257 struct net_device_context *net_device_ctx = netdev_priv(net);
1258 struct ndis_offload hwcaps;
1259 struct ndis_offload_params offloads;
1260 unsigned int gso_max_size = GSO_MAX_SIZE;
1261 int ret;
1262
1263 /* Find HW offload capabilities */
1264 ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps);
1265 if (ret != 0)
1266 return ret;
1267
1268 /* A value of zero means "no change"; now turn on what we want. */
1269 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1270
1271 /* Linux does not care about IP checksum, always does in kernel */
1272 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1273
1274 /* Reset previously set hw_features flags */
1275 net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1276 net_device_ctx->tx_checksum_mask = 0;
1277
1278 /* Compute tx offload settings based on hw capabilities */
1279 net->hw_features |= NETIF_F_RXCSUM;
1280 net->hw_features |= NETIF_F_SG;
1281 net->hw_features |= NETIF_F_RXHASH;
1282
1283 if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1284 /* Can checksum TCP */
1285 net->hw_features |= NETIF_F_IP_CSUM;
1286 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1287
1288 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1289
1290 if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1291 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1292 net->hw_features |= NETIF_F_TSO;
1293
1294 if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1295 gso_max_size = hwcaps.lsov2.ip4_maxsz;
1296 }
1297
1298 if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1299 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1300 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1301 }
1302 }
1303
1304 if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1305 net->hw_features |= NETIF_F_IPV6_CSUM;
1306
1307 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1308 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1309
1310 if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1311 (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1312 offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1313 net->hw_features |= NETIF_F_TSO6;
1314
1315 if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1316 gso_max_size = hwcaps.lsov2.ip6_maxsz;
1317 }
1318
1319 if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1320 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1321 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1322 }
1323 }
1324
1325 if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
1326 net->hw_features |= NETIF_F_LRO;
1327
1328 if (net->features & NETIF_F_LRO) {
1329 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1330 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1331 } else {
1332 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1333 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1334 }
1335 }
1336
1337 /* In case some hw_features disappeared we need to remove them from
1338 * net->features list as they're no longer supported.
1339 */
1340 net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1341
1342 netif_set_gso_max_size(net, gso_max_size);
1343
1344 ret = rndis_filter_set_offload_params(net, nvdev, &offloads);
1345
1346 return ret;
1347 }
1348
1349 static void rndis_get_friendly_name(struct net_device *net,
1350 struct rndis_device *rndis_device,
1351 struct netvsc_device *net_device)
1352 {
1353 ucs2_char_t wname[256];
1354 unsigned long len;
1355 u8 ifalias[256];
1356 u32 size;
1357
1358 size = sizeof(wname);
1359 if (rndis_filter_query_device(rndis_device, net_device,
1360 RNDIS_OID_GEN_FRIENDLY_NAME,
1361 wname, &size) != 0)
1362 return; /* ignore if host does not support */
1363
1364 if (size == 0)
1365 return; /* name not set */
1366
1367 /* Convert Windows Unicode string to UTF-8 */
1368 len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias));
1369
1370 /* ignore the default value from host */
1371 if (strcmp(ifalias, "Network Adapter") != 0)
1372 dev_set_alias(net, ifalias, len);
1373 }
1374
1375 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1376 struct netvsc_device_info *device_info)
1377 {
1378 struct net_device *net = hv_get_drvdata(dev);
1379 struct net_device_context *ndc = netdev_priv(net);
1380 struct netvsc_device *net_device;
1381 struct rndis_device *rndis_device;
1382 struct ndis_recv_scale_cap rsscap;
1383 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1384 u32 mtu, size;
1385 u32 num_possible_rss_qs;
1386 int i, ret;
1387
1388 rndis_device = get_rndis_device();
1389 if (!rndis_device)
1390 return ERR_PTR(-ENODEV);
1391
1392 /* Let the inner driver handle this first to create the netvsc channel
1393 * NOTE! Once the channel is created, we may get a receive callback
1394 * (RndisFilterOnReceive()) before this call is completed
1395 */
1396 net_device = netvsc_device_add(dev, device_info);
1397 if (IS_ERR(net_device)) {
1398 kfree(rndis_device);
1399 return net_device;
1400 }
1401
1402 /* Initialize the rndis device */
1403 net_device->max_chn = 1;
1404 net_device->num_chn = 1;
1405
1406 net_device->extension = rndis_device;
1407 rndis_device->ndev = net;
1408
1409 /* Send the rndis initialization message */
1410 ret = rndis_filter_init_device(rndis_device, net_device);
1411 if (ret != 0)
1412 goto err_dev_remv;
1413
1414 /* Get the MTU from the host */
1415 size = sizeof(u32);
1416 ret = rndis_filter_query_device(rndis_device, net_device,
1417 RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1418 &mtu, &size);
1419 if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1420 net->mtu = mtu;
1421
1422 /* Get the mac address */
1423 ret = rndis_filter_query_device_mac(rndis_device, net_device);
1424 if (ret != 0)
1425 goto err_dev_remv;
1426
1427 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1428
1429 /* Get friendly name as ifalias*/
1430 if (!net->ifalias)
1431 rndis_get_friendly_name(net, rndis_device, net_device);
1432
1433 /* Query and set hardware capabilities */
1434 ret = rndis_netdev_set_hwcaps(rndis_device, net_device);
1435 if (ret != 0)
1436 goto err_dev_remv;
1437
1438 rndis_filter_query_device_link_status(rndis_device, net_device);
1439
1440 netdev_dbg(net, "Device MAC %pM link state %s\n",
1441 rndis_device->hw_mac_adr,
1442 rndis_device->link_state ? "down" : "up");
1443
1444 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1445 goto out;
1446
1447 rndis_filter_query_link_speed(rndis_device, net_device);
1448
1449 /* vRSS setup */
1450 memset(&rsscap, 0, rsscap_size);
1451 ret = rndis_filter_query_device(rndis_device, net_device,
1452 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1453 &rsscap, &rsscap_size);
1454 if (ret || rsscap.num_recv_que < 2)
1455 goto out;
1456
1457 /* This guarantees that num_possible_rss_qs <= num_online_cpus */
1458 num_possible_rss_qs = min_t(u32, num_online_cpus(),
1459 rsscap.num_recv_que);
1460
1461 net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1462
1463 /* We will use the given number of channels if available. */
1464 net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1465
1466 if (!netif_is_rxfh_configured(net)) {
1467 for (i = 0; i < ITAB_NUM; i++)
1468 ndc->rx_table[i] = ethtool_rxfh_indir_default(
1469 i, net_device->num_chn);
1470 }
1471
1472 atomic_set(&net_device->open_chn, 1);
1473 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1474
1475 for (i = 1; i < net_device->num_chn; i++) {
1476 ret = netvsc_alloc_recv_comp_ring(net_device, i);
1477 if (ret) {
1478 while (--i != 0)
1479 vfree(net_device->chan_table[i].mrc.slots);
1480 goto out;
1481 }
1482 }
1483
1484 for (i = 1; i < net_device->num_chn; i++)
1485 netif_napi_add(net, &net_device->chan_table[i].napi,
1486 netvsc_poll, NAPI_POLL_WEIGHT);
1487
1488 return net_device;
1489
1490 out:
1491 /* setting up multiple channels failed */
1492 net_device->max_chn = 1;
1493 net_device->num_chn = 1;
1494 return net_device;
1495
1496 err_dev_remv:
1497 rndis_filter_device_remove(dev, net_device);
1498 return ERR_PTR(ret);
1499 }
1500
1501 void rndis_filter_device_remove(struct hv_device *dev,
1502 struct netvsc_device *net_dev)
1503 {
1504 struct rndis_device *rndis_dev = net_dev->extension;
1505
1506 /* Halt and release the rndis device */
1507 rndis_filter_halt_device(net_dev, rndis_dev);
1508
1509 netvsc_device_remove(dev);
1510 }
1511
1512 int rndis_filter_open(struct netvsc_device *nvdev)
1513 {
1514 if (!nvdev)
1515 return -EINVAL;
1516
1517 return rndis_filter_open_device(nvdev->extension);
1518 }
1519
1520 int rndis_filter_close(struct netvsc_device *nvdev)
1521 {
1522 if (!nvdev)
1523 return -EINVAL;
1524
1525 return rndis_filter_close_device(nvdev->extension);
1526 }