]>
Commit | Line | Data |
---|---|---|
48925e37 | 1 | /* A network driver using virtio. |
296f96fc RR |
2 | * |
3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
adf8d3ff | 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
296f96fc RR |
17 | */ |
18 | //#define DEBUG | |
19 | #include <linux/netdevice.h> | |
20 | #include <linux/etherdevice.h> | |
a9ea3fc6 | 21 | #include <linux/ethtool.h> |
296f96fc RR |
22 | #include <linux/module.h> |
23 | #include <linux/virtio.h> | |
24 | #include <linux/virtio_net.h> | |
25 | #include <linux/scatterlist.h> | |
e918085a | 26 | #include <linux/if_vlan.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
8de4b2f3 | 28 | #include <linux/cpu.h> |
ab7db917 | 29 | #include <linux/average.h> |
91815639 | 30 | #include <net/busy_poll.h> |
296f96fc | 31 | |
d34710e3 | 32 | static int napi_weight = NAPI_POLL_WEIGHT; |
6c0cd7c0 DL |
33 | module_param(napi_weight, int, 0444); |
34 | ||
eb939922 | 35 | static bool csum = true, gso = true; |
34a48579 RR |
36 | module_param(csum, bool, 0444); |
37 | module_param(gso, bool, 0444); | |
38 | ||
296f96fc | 39 | /* FIXME: MTU in config. */ |
5061de36 | 40 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
3f2c31d9 | 41 | #define GOOD_COPY_LEN 128 |
296f96fc | 42 | |
5377d758 JB |
43 | /* RX packet size EWMA. The average packet size is used to determine the packet |
44 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | |
45 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | |
46 | * term, transient changes in packet size. | |
ab7db917 | 47 | */ |
5377d758 | 48 | DECLARE_EWMA(pkt_len, 1, 64) |
ab7db917 MD |
49 | |
50 | /* Minimum alignment for mergeable packet buffers. */ | |
51 | #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) | |
52 | ||
66846048 | 53 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
2a41f71d | 54 | |
3fa2a1df | 55 | struct virtnet_stats { |
83a27052 ED |
56 | struct u64_stats_sync tx_syncp; |
57 | struct u64_stats_sync rx_syncp; | |
3fa2a1df | 58 | u64 tx_bytes; |
59 | u64 tx_packets; | |
60 | ||
61 | u64 rx_bytes; | |
62 | u64 rx_packets; | |
63 | }; | |
64 | ||
e9d7417b JW |
65 | /* Internal representation of a send virtqueue */ |
66 | struct send_queue { | |
67 | /* Virtqueue associated with this send _queue */ | |
68 | struct virtqueue *vq; | |
69 | ||
70 | /* TX: fragments + linear part + virtio header */ | |
71 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d JW |
72 | |
73 | /* Name of the send queue: output.$index */ | |
74 | char name[40]; | |
e9d7417b JW |
75 | }; |
76 | ||
77 | /* Internal representation of a receive virtqueue */ | |
78 | struct receive_queue { | |
79 | /* Virtqueue associated with this receive_queue */ | |
80 | struct virtqueue *vq; | |
81 | ||
296f96fc RR |
82 | struct napi_struct napi; |
83 | ||
e9d7417b JW |
84 | /* Chain pages by the private ptr. */ |
85 | struct page *pages; | |
86 | ||
ab7db917 | 87 | /* Average packet length for mergeable receive buffers. */ |
5377d758 | 88 | struct ewma_pkt_len mrg_avg_pkt_len; |
ab7db917 | 89 | |
fb51879d MD |
90 | /* Page frag for packet buffer allocation. */ |
91 | struct page_frag alloc_frag; | |
92 | ||
e9d7417b JW |
93 | /* RX: fragments + linear part + virtio header */ |
94 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | |
986a4f4d JW |
95 | |
96 | /* Name of this receive queue: input.$index */ | |
97 | char name[40]; | |
e9d7417b JW |
98 | }; |
99 | ||
100 | struct virtnet_info { | |
101 | struct virtio_device *vdev; | |
102 | struct virtqueue *cvq; | |
103 | struct net_device *dev; | |
986a4f4d JW |
104 | struct send_queue *sq; |
105 | struct receive_queue *rq; | |
e9d7417b JW |
106 | unsigned int status; |
107 | ||
986a4f4d JW |
108 | /* Max # of queue pairs supported by the device */ |
109 | u16 max_queue_pairs; | |
110 | ||
111 | /* # of queue pairs currently used by the driver */ | |
112 | u16 curr_queue_pairs; | |
113 | ||
97402b96 HX |
114 | /* I like... big packets and I cannot lie! */ |
115 | bool big_packets; | |
116 | ||
3f2c31d9 MM |
117 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
118 | bool mergeable_rx_bufs; | |
119 | ||
986a4f4d JW |
120 | /* Has control virtqueue */ |
121 | bool has_cvq; | |
122 | ||
e7428e95 MT |
123 | /* Host can handle any s/g split between our header and packet data */ |
124 | bool any_header_sg; | |
125 | ||
012873d0 MT |
126 | /* Packet virtio header size */ |
127 | u8 hdr_len; | |
128 | ||
3fa2a1df | 129 | /* Active statistics */ |
130 | struct virtnet_stats __percpu *stats; | |
131 | ||
3161e453 RR |
132 | /* Work struct for refilling if we run low on memory. */ |
133 | struct delayed_work refill; | |
134 | ||
586d17c5 JW |
135 | /* Work struct for config space updates */ |
136 | struct work_struct config_work; | |
137 | ||
986a4f4d JW |
138 | /* Does the affinity hint is set for virtqueues? */ |
139 | bool affinity_hint_set; | |
47be2479 | 140 | |
8017c279 SAS |
141 | /* CPU hotplug instances for online & dead */ |
142 | struct hlist_node node; | |
143 | struct hlist_node node_dead; | |
2ac46030 MT |
144 | |
145 | /* Control VQ buffers: protected by the rtnl lock */ | |
146 | struct virtio_net_ctrl_hdr ctrl_hdr; | |
147 | virtio_net_ctrl_ack ctrl_status; | |
a725ee3e | 148 | struct virtio_net_ctrl_mq ctrl_mq; |
2ac46030 MT |
149 | u8 ctrl_promisc; |
150 | u8 ctrl_allmulti; | |
a725ee3e | 151 | u16 ctrl_vid; |
16032be5 NA |
152 | |
153 | /* Ethtool settings */ | |
154 | u8 duplex; | |
155 | u32 speed; | |
296f96fc RR |
156 | }; |
157 | ||
9ab86bbc | 158 | struct padded_vnet_hdr { |
012873d0 | 159 | struct virtio_net_hdr_mrg_rxbuf hdr; |
9ab86bbc | 160 | /* |
012873d0 MT |
161 | * hdr is in a separate sg buffer, and data sg buffer shares same page |
162 | * with this header sg. This padding makes next sg 16 byte aligned | |
163 | * after the header. | |
9ab86bbc | 164 | */ |
012873d0 | 165 | char padding[4]; |
9ab86bbc SM |
166 | }; |
167 | ||
986a4f4d JW |
168 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
169 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | |
170 | */ | |
171 | static int vq2txq(struct virtqueue *vq) | |
172 | { | |
9d0ca6ed | 173 | return (vq->index - 1) / 2; |
986a4f4d JW |
174 | } |
175 | ||
176 | static int txq2vq(int txq) | |
177 | { | |
178 | return txq * 2 + 1; | |
179 | } | |
180 | ||
181 | static int vq2rxq(struct virtqueue *vq) | |
182 | { | |
9d0ca6ed | 183 | return vq->index / 2; |
986a4f4d JW |
184 | } |
185 | ||
186 | static int rxq2vq(int rxq) | |
187 | { | |
188 | return rxq * 2; | |
189 | } | |
190 | ||
012873d0 | 191 | static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) |
296f96fc | 192 | { |
012873d0 | 193 | return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; |
296f96fc RR |
194 | } |
195 | ||
9ab86bbc SM |
196 | /* |
197 | * private is used to chain pages for big packets, put the whole | |
198 | * most recent used list in the beginning for reuse | |
199 | */ | |
e9d7417b | 200 | static void give_pages(struct receive_queue *rq, struct page *page) |
0a888fd1 | 201 | { |
9ab86bbc | 202 | struct page *end; |
0a888fd1 | 203 | |
e9d7417b | 204 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
9ab86bbc | 205 | for (end = page; end->private; end = (struct page *)end->private); |
e9d7417b JW |
206 | end->private = (unsigned long)rq->pages; |
207 | rq->pages = page; | |
0a888fd1 MM |
208 | } |
209 | ||
e9d7417b | 210 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
fb6813f4 | 211 | { |
e9d7417b | 212 | struct page *p = rq->pages; |
fb6813f4 | 213 | |
9ab86bbc | 214 | if (p) { |
e9d7417b | 215 | rq->pages = (struct page *)p->private; |
9ab86bbc SM |
216 | /* clear private here, it is used to chain pages */ |
217 | p->private = 0; | |
218 | } else | |
fb6813f4 RR |
219 | p = alloc_page(gfp_mask); |
220 | return p; | |
221 | } | |
222 | ||
e9d7417b | 223 | static void skb_xmit_done(struct virtqueue *vq) |
296f96fc | 224 | { |
e9d7417b | 225 | struct virtnet_info *vi = vq->vdev->priv; |
296f96fc | 226 | |
2cb9c6ba | 227 | /* Suppress further interrupts. */ |
e9d7417b | 228 | virtqueue_disable_cb(vq); |
11a3a154 | 229 | |
363f1514 | 230 | /* We were probably waiting for more output buffers. */ |
986a4f4d | 231 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
296f96fc RR |
232 | } |
233 | ||
ab7db917 MD |
234 | static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) |
235 | { | |
236 | unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); | |
237 | return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; | |
238 | } | |
239 | ||
240 | static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) | |
241 | { | |
242 | return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); | |
243 | ||
244 | } | |
245 | ||
246 | static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) | |
247 | { | |
248 | unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; | |
249 | return (unsigned long)buf | (size - 1); | |
250 | } | |
251 | ||
3464645a | 252 | /* Called from bottom half context */ |
946fa564 MT |
253 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
254 | struct receive_queue *rq, | |
2613af0e MD |
255 | struct page *page, unsigned int offset, |
256 | unsigned int len, unsigned int truesize) | |
9ab86bbc SM |
257 | { |
258 | struct sk_buff *skb; | |
012873d0 | 259 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
2613af0e | 260 | unsigned int copy, hdr_len, hdr_padded_len; |
9ab86bbc | 261 | char *p; |
fb6813f4 | 262 | |
2613af0e | 263 | p = page_address(page) + offset; |
3f2c31d9 | 264 | |
9ab86bbc | 265 | /* copy small packet so we can reuse these pages for small data */ |
c67f5db8 | 266 | skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN); |
9ab86bbc SM |
267 | if (unlikely(!skb)) |
268 | return NULL; | |
3f2c31d9 | 269 | |
9ab86bbc | 270 | hdr = skb_vnet_hdr(skb); |
3f2c31d9 | 271 | |
012873d0 MT |
272 | hdr_len = vi->hdr_len; |
273 | if (vi->mergeable_rx_bufs) | |
274 | hdr_padded_len = sizeof *hdr; | |
275 | else | |
2613af0e | 276 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
3f2c31d9 | 277 | |
9ab86bbc | 278 | memcpy(hdr, p, hdr_len); |
3f2c31d9 | 279 | |
9ab86bbc | 280 | len -= hdr_len; |
2613af0e MD |
281 | offset += hdr_padded_len; |
282 | p += hdr_padded_len; | |
3f2c31d9 | 283 | |
9ab86bbc SM |
284 | copy = len; |
285 | if (copy > skb_tailroom(skb)) | |
286 | copy = skb_tailroom(skb); | |
287 | memcpy(skb_put(skb, copy), p, copy); | |
3f2c31d9 | 288 | |
9ab86bbc SM |
289 | len -= copy; |
290 | offset += copy; | |
3f2c31d9 | 291 | |
2613af0e MD |
292 | if (vi->mergeable_rx_bufs) { |
293 | if (len) | |
294 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); | |
295 | else | |
296 | put_page(page); | |
297 | return skb; | |
298 | } | |
299 | ||
e878d78b SL |
300 | /* |
301 | * Verify that we can indeed put this data into a skb. | |
302 | * This is here to handle cases when the device erroneously | |
303 | * tries to receive more than is possible. This is usually | |
304 | * the case of a broken device. | |
305 | */ | |
306 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { | |
be443899 | 307 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
e878d78b SL |
308 | dev_kfree_skb(skb); |
309 | return NULL; | |
310 | } | |
2613af0e | 311 | BUG_ON(offset >= PAGE_SIZE); |
9ab86bbc | 312 | while (len) { |
2613af0e MD |
313 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
314 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, | |
315 | frag_size, truesize); | |
316 | len -= frag_size; | |
9ab86bbc SM |
317 | page = (struct page *)page->private; |
318 | offset = 0; | |
319 | } | |
3f2c31d9 | 320 | |
9ab86bbc | 321 | if (page) |
e9d7417b | 322 | give_pages(rq, page); |
3f2c31d9 | 323 | |
9ab86bbc SM |
324 | return skb; |
325 | } | |
3f2c31d9 | 326 | |
012873d0 | 327 | static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len) |
f121159d MT |
328 | { |
329 | struct sk_buff * skb = buf; | |
330 | ||
012873d0 | 331 | len -= vi->hdr_len; |
f121159d MT |
332 | skb_trim(skb, len); |
333 | ||
334 | return skb; | |
335 | } | |
336 | ||
337 | static struct sk_buff *receive_big(struct net_device *dev, | |
946fa564 | 338 | struct virtnet_info *vi, |
f121159d MT |
339 | struct receive_queue *rq, |
340 | void *buf, | |
341 | unsigned int len) | |
342 | { | |
343 | struct page *page = buf; | |
946fa564 | 344 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); |
f121159d MT |
345 | |
346 | if (unlikely(!skb)) | |
347 | goto err; | |
348 | ||
349 | return skb; | |
350 | ||
351 | err: | |
352 | dev->stats.rx_dropped++; | |
353 | give_pages(rq, page); | |
354 | return NULL; | |
355 | } | |
356 | ||
8fc3b9e9 | 357 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
fdd819b2 | 358 | struct virtnet_info *vi, |
8fc3b9e9 | 359 | struct receive_queue *rq, |
ab7db917 | 360 | unsigned long ctx, |
8fc3b9e9 | 361 | unsigned int len) |
9ab86bbc | 362 | { |
ab7db917 | 363 | void *buf = mergeable_ctx_to_buf_address(ctx); |
012873d0 MT |
364 | struct virtio_net_hdr_mrg_rxbuf *hdr = buf; |
365 | u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); | |
8fc3b9e9 MT |
366 | struct page *page = virt_to_head_page(buf); |
367 | int offset = buf - page_address(page); | |
ab7db917 MD |
368 | unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
369 | ||
946fa564 MT |
370 | struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, |
371 | truesize); | |
2613af0e | 372 | struct sk_buff *curr_skb = head_skb; |
9ab86bbc | 373 | |
8fc3b9e9 MT |
374 | if (unlikely(!curr_skb)) |
375 | goto err_skb; | |
9ab86bbc | 376 | while (--num_buf) { |
8fc3b9e9 MT |
377 | int num_skb_frags; |
378 | ||
ab7db917 MD |
379 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); |
380 | if (unlikely(!ctx)) { | |
8fc3b9e9 | 381 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
fdd819b2 | 382 | dev->name, num_buf, |
012873d0 MT |
383 | virtio16_to_cpu(vi->vdev, |
384 | hdr->num_buffers)); | |
8fc3b9e9 MT |
385 | dev->stats.rx_length_errors++; |
386 | goto err_buf; | |
3f2c31d9 | 387 | } |
8fc3b9e9 | 388 | |
ab7db917 | 389 | buf = mergeable_ctx_to_buf_address(ctx); |
8fc3b9e9 | 390 | page = virt_to_head_page(buf); |
8fc3b9e9 MT |
391 | |
392 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | |
2613af0e MD |
393 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
394 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | |
8fc3b9e9 MT |
395 | |
396 | if (unlikely(!nskb)) | |
397 | goto err_skb; | |
2613af0e MD |
398 | if (curr_skb == head_skb) |
399 | skb_shinfo(curr_skb)->frag_list = nskb; | |
400 | else | |
401 | curr_skb->next = nskb; | |
402 | curr_skb = nskb; | |
403 | head_skb->truesize += nskb->truesize; | |
404 | num_skb_frags = 0; | |
405 | } | |
ab7db917 | 406 | truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
2613af0e MD |
407 | if (curr_skb != head_skb) { |
408 | head_skb->data_len += len; | |
409 | head_skb->len += len; | |
fb51879d | 410 | head_skb->truesize += truesize; |
2613af0e | 411 | } |
8fc3b9e9 | 412 | offset = buf - page_address(page); |
ba275241 JW |
413 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
414 | put_page(page); | |
415 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | |
fb51879d | 416 | len, truesize); |
ba275241 JW |
417 | } else { |
418 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | |
fb51879d | 419 | offset, len, truesize); |
ba275241 | 420 | } |
8fc3b9e9 MT |
421 | } |
422 | ||
5377d758 | 423 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); |
8fc3b9e9 MT |
424 | return head_skb; |
425 | ||
426 | err_skb: | |
427 | put_page(page); | |
428 | while (--num_buf) { | |
ab7db917 MD |
429 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); |
430 | if (unlikely(!ctx)) { | |
8fc3b9e9 MT |
431 | pr_debug("%s: rx error: %d buffers missing\n", |
432 | dev->name, num_buf); | |
433 | dev->stats.rx_length_errors++; | |
434 | break; | |
435 | } | |
ab7db917 | 436 | page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); |
8fc3b9e9 | 437 | put_page(page); |
9ab86bbc | 438 | } |
8fc3b9e9 MT |
439 | err_buf: |
440 | dev->stats.rx_dropped++; | |
441 | dev_kfree_skb(head_skb); | |
442 | return NULL; | |
9ab86bbc SM |
443 | } |
444 | ||
946fa564 MT |
445 | static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
446 | void *buf, unsigned int len) | |
9ab86bbc | 447 | { |
e9d7417b | 448 | struct net_device *dev = vi->dev; |
58472a76 | 449 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
9ab86bbc | 450 | struct sk_buff *skb; |
012873d0 | 451 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
3f2c31d9 | 452 | |
bcff3162 | 453 | if (unlikely(len < vi->hdr_len + ETH_HLEN)) { |
9ab86bbc SM |
454 | pr_debug("%s: short packet %i\n", dev->name, len); |
455 | dev->stats.rx_length_errors++; | |
ab7db917 MD |
456 | if (vi->mergeable_rx_bufs) { |
457 | unsigned long ctx = (unsigned long)buf; | |
458 | void *base = mergeable_ctx_to_buf_address(ctx); | |
459 | put_page(virt_to_head_page(base)); | |
460 | } else if (vi->big_packets) { | |
98bfd23c | 461 | give_pages(rq, buf); |
ab7db917 | 462 | } else { |
9ab86bbc | 463 | dev_kfree_skb(buf); |
ab7db917 | 464 | } |
9ab86bbc SM |
465 | return; |
466 | } | |
3f2c31d9 | 467 | |
f121159d | 468 | if (vi->mergeable_rx_bufs) |
fdd819b2 | 469 | skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); |
f121159d | 470 | else if (vi->big_packets) |
946fa564 | 471 | skb = receive_big(dev, vi, rq, buf, len); |
f121159d | 472 | else |
012873d0 | 473 | skb = receive_small(vi, buf, len); |
f121159d MT |
474 | |
475 | if (unlikely(!skb)) | |
476 | return; | |
3f2c31d9 | 477 | |
9ab86bbc | 478 | hdr = skb_vnet_hdr(skb); |
3fa2a1df | 479 | |
83a27052 | 480 | u64_stats_update_begin(&stats->rx_syncp); |
3fa2a1df | 481 | stats->rx_bytes += skb->len; |
482 | stats->rx_packets++; | |
83a27052 | 483 | u64_stats_update_end(&stats->rx_syncp); |
296f96fc | 484 | |
e858fae2 | 485 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) |
10a8d94a | 486 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
296f96fc | 487 | |
e858fae2 MR |
488 | if (virtio_net_hdr_to_skb(skb, &hdr->hdr, |
489 | virtio_is_little_endian(vi->vdev))) { | |
490 | net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", | |
491 | dev->name, hdr->hdr.gso_type, | |
492 | hdr->hdr.gso_size); | |
493 | goto frame_err; | |
296f96fc RR |
494 | } |
495 | ||
d1dc06dc MR |
496 | skb->protocol = eth_type_trans(skb, dev); |
497 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | |
498 | ntohs(skb->protocol), skb->len, skb->pkt_type); | |
499 | ||
0fbd050a | 500 | napi_gro_receive(&rq->napi, skb); |
296f96fc RR |
501 | return; |
502 | ||
503 | frame_err: | |
504 | dev->stats.rx_frame_errors++; | |
296f96fc RR |
505 | dev_kfree_skb(skb); |
506 | } | |
507 | ||
946fa564 MT |
508 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
509 | gfp_t gfp) | |
296f96fc RR |
510 | { |
511 | struct sk_buff *skb; | |
012873d0 | 512 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
9ab86bbc | 513 | int err; |
3f2c31d9 | 514 | |
5061de36 | 515 | skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); |
9ab86bbc SM |
516 | if (unlikely(!skb)) |
517 | return -ENOMEM; | |
296f96fc | 518 | |
5061de36 | 519 | skb_put(skb, GOOD_PACKET_LEN); |
3f2c31d9 | 520 | |
9ab86bbc | 521 | hdr = skb_vnet_hdr(skb); |
547c890c | 522 | sg_init_table(rq->sg, 2); |
012873d0 | 523 | sg_set_buf(rq->sg, hdr, vi->hdr_len); |
e9d7417b | 524 | skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
97402b96 | 525 | |
9dc7b9e4 | 526 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); |
9ab86bbc SM |
527 | if (err < 0) |
528 | dev_kfree_skb(skb); | |
97402b96 | 529 | |
9ab86bbc SM |
530 | return err; |
531 | } | |
97402b96 | 532 | |
012873d0 MT |
533 | static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, |
534 | gfp_t gfp) | |
9ab86bbc | 535 | { |
9ab86bbc SM |
536 | struct page *first, *list = NULL; |
537 | char *p; | |
538 | int i, err, offset; | |
539 | ||
a5835440 RR |
540 | sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); |
541 | ||
e9d7417b | 542 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
9ab86bbc | 543 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
e9d7417b | 544 | first = get_a_page(rq, gfp); |
9ab86bbc SM |
545 | if (!first) { |
546 | if (list) | |
e9d7417b | 547 | give_pages(rq, list); |
9ab86bbc | 548 | return -ENOMEM; |
97402b96 | 549 | } |
e9d7417b | 550 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
97402b96 | 551 | |
9ab86bbc SM |
552 | /* chain new page in list head to match sg */ |
553 | first->private = (unsigned long)list; | |
554 | list = first; | |
555 | } | |
296f96fc | 556 | |
e9d7417b | 557 | first = get_a_page(rq, gfp); |
9ab86bbc | 558 | if (!first) { |
e9d7417b | 559 | give_pages(rq, list); |
9ab86bbc SM |
560 | return -ENOMEM; |
561 | } | |
562 | p = page_address(first); | |
563 | ||
e9d7417b | 564 | /* rq->sg[0], rq->sg[1] share the same page */ |
012873d0 MT |
565 | /* a separated rq->sg[0] for header - required in case !any_header_sg */ |
566 | sg_set_buf(&rq->sg[0], p, vi->hdr_len); | |
9ab86bbc | 567 | |
e9d7417b | 568 | /* rq->sg[1] for data packet, from offset */ |
9ab86bbc | 569 | offset = sizeof(struct padded_vnet_hdr); |
e9d7417b | 570 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
9ab86bbc SM |
571 | |
572 | /* chain first in list head */ | |
573 | first->private = (unsigned long)list; | |
9dc7b9e4 RR |
574 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
575 | first, gfp); | |
9ab86bbc | 576 | if (err < 0) |
e9d7417b | 577 | give_pages(rq, first); |
9ab86bbc SM |
578 | |
579 | return err; | |
296f96fc RR |
580 | } |
581 | ||
5377d758 | 582 | static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) |
3f2c31d9 | 583 | { |
ab7db917 | 584 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
fbf28d78 MD |
585 | unsigned int len; |
586 | ||
5377d758 | 587 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), |
fbf28d78 MD |
588 | GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); |
589 | return ALIGN(len, MERGEABLE_BUFFER_ALIGN); | |
590 | } | |
591 | ||
592 | static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) | |
593 | { | |
fb51879d MD |
594 | struct page_frag *alloc_frag = &rq->alloc_frag; |
595 | char *buf; | |
ab7db917 | 596 | unsigned long ctx; |
3f2c31d9 | 597 | int err; |
fb51879d | 598 | unsigned int len, hole; |
3f2c31d9 | 599 | |
fbf28d78 | 600 | len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); |
ab7db917 | 601 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) |
9ab86bbc | 602 | return -ENOMEM; |
ab7db917 | 603 | |
fb51879d | 604 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
ab7db917 | 605 | ctx = mergeable_buf_to_ctx(buf, len); |
fb51879d | 606 | get_page(alloc_frag->page); |
fb51879d MD |
607 | alloc_frag->offset += len; |
608 | hole = alloc_frag->size - alloc_frag->offset; | |
ab7db917 MD |
609 | if (hole < len) { |
610 | /* To avoid internal fragmentation, if there is very likely not | |
611 | * enough space for another buffer, add the remaining space to | |
612 | * the current buffer. This extra space is not included in | |
613 | * the truesize stored in ctx. | |
614 | */ | |
fb51879d MD |
615 | len += hole; |
616 | alloc_frag->offset += hole; | |
617 | } | |
3f2c31d9 | 618 | |
fb51879d | 619 | sg_init_one(rq->sg, buf, len); |
ab7db917 | 620 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); |
9ab86bbc | 621 | if (err < 0) |
2613af0e | 622 | put_page(virt_to_head_page(buf)); |
3f2c31d9 | 623 | |
9ab86bbc SM |
624 | return err; |
625 | } | |
3f2c31d9 | 626 | |
b2baed69 RR |
627 | /* |
628 | * Returns false if we couldn't fill entirely (OOM). | |
629 | * | |
630 | * Normally run in the receive path, but can also be run from ndo_open | |
631 | * before we're receiving packets, or from refill_work which is | |
632 | * careful to disable receiving (using napi_disable). | |
633 | */ | |
946fa564 MT |
634 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
635 | gfp_t gfp) | |
9ab86bbc SM |
636 | { |
637 | int err; | |
1788f495 | 638 | bool oom; |
3f2c31d9 | 639 | |
fb51879d | 640 | gfp |= __GFP_COLD; |
9ab86bbc SM |
641 | do { |
642 | if (vi->mergeable_rx_bufs) | |
e9d7417b | 643 | err = add_recvbuf_mergeable(rq, gfp); |
9ab86bbc | 644 | else if (vi->big_packets) |
012873d0 | 645 | err = add_recvbuf_big(vi, rq, gfp); |
9ab86bbc | 646 | else |
946fa564 | 647 | err = add_recvbuf_small(vi, rq, gfp); |
3f2c31d9 | 648 | |
1788f495 | 649 | oom = err == -ENOMEM; |
9ed4cb07 | 650 | if (err) |
3f2c31d9 | 651 | break; |
b7dfde95 | 652 | } while (rq->vq->num_free); |
681daee2 | 653 | virtqueue_kick(rq->vq); |
3161e453 | 654 | return !oom; |
3f2c31d9 MM |
655 | } |
656 | ||
18445c4d | 657 | static void skb_recv_done(struct virtqueue *rvq) |
296f96fc RR |
658 | { |
659 | struct virtnet_info *vi = rvq->vdev->priv; | |
986a4f4d | 660 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
e9d7417b | 661 | |
18445c4d | 662 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
e9d7417b | 663 | if (napi_schedule_prep(&rq->napi)) { |
1915a712 | 664 | virtqueue_disable_cb(rvq); |
e9d7417b | 665 | __napi_schedule(&rq->napi); |
18445c4d | 666 | } |
296f96fc RR |
667 | } |
668 | ||
e9d7417b | 669 | static void virtnet_napi_enable(struct receive_queue *rq) |
3e9d08ec | 670 | { |
e9d7417b | 671 | napi_enable(&rq->napi); |
3e9d08ec BR |
672 | |
673 | /* If all buffers were filled by other side before we napi_enabled, we | |
674 | * won't get another interrupt, so process any outstanding packets | |
675 | * now. virtnet_poll wants re-enable the queue, so we disable here. | |
676 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | |
e9d7417b JW |
677 | if (napi_schedule_prep(&rq->napi)) { |
678 | virtqueue_disable_cb(rq->vq); | |
ec13ee80 | 679 | local_bh_disable(); |
e9d7417b | 680 | __napi_schedule(&rq->napi); |
ec13ee80 | 681 | local_bh_enable(); |
3e9d08ec BR |
682 | } |
683 | } | |
684 | ||
3161e453 RR |
685 | static void refill_work(struct work_struct *work) |
686 | { | |
e9d7417b JW |
687 | struct virtnet_info *vi = |
688 | container_of(work, struct virtnet_info, refill.work); | |
3161e453 | 689 | bool still_empty; |
986a4f4d JW |
690 | int i; |
691 | ||
55257d72 | 692 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
986a4f4d | 693 | struct receive_queue *rq = &vi->rq[i]; |
3161e453 | 694 | |
986a4f4d | 695 | napi_disable(&rq->napi); |
946fa564 | 696 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
986a4f4d | 697 | virtnet_napi_enable(rq); |
3161e453 | 698 | |
986a4f4d JW |
699 | /* In theory, this can happen: if we don't get any buffers in |
700 | * we will *never* try to fill again. | |
701 | */ | |
702 | if (still_empty) | |
703 | schedule_delayed_work(&vi->refill, HZ/2); | |
704 | } | |
3161e453 RR |
705 | } |
706 | ||
2ffa7598 | 707 | static int virtnet_receive(struct receive_queue *rq, int budget) |
296f96fc | 708 | { |
e9d7417b | 709 | struct virtnet_info *vi = rq->vq->vdev->priv; |
2ffa7598 | 710 | unsigned int len, received = 0; |
9ab86bbc | 711 | void *buf; |
296f96fc | 712 | |
296f96fc | 713 | while (received < budget && |
e9d7417b | 714 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
946fa564 | 715 | receive_buf(vi, rq, buf, len); |
296f96fc RR |
716 | received++; |
717 | } | |
718 | ||
be121f46 | 719 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { |
946fa564 | 720 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
3b07e9ca | 721 | schedule_delayed_work(&vi->refill, 0); |
3161e453 | 722 | } |
296f96fc | 723 | |
2ffa7598 JW |
724 | return received; |
725 | } | |
726 | ||
727 | static int virtnet_poll(struct napi_struct *napi, int budget) | |
728 | { | |
729 | struct receive_queue *rq = | |
730 | container_of(napi, struct receive_queue, napi); | |
faadb05f | 731 | unsigned int r, received; |
2ffa7598 | 732 | |
faadb05f | 733 | received = virtnet_receive(rq, budget); |
2ffa7598 | 734 | |
8329d98e RR |
735 | /* Out of packets? */ |
736 | if (received < budget) { | |
cbdadbbf | 737 | r = virtqueue_enable_cb_prepare(rq->vq); |
0fbd050a | 738 | napi_complete_done(napi, received); |
cbdadbbf | 739 | if (unlikely(virtqueue_poll(rq->vq, r)) && |
8e95a202 | 740 | napi_schedule_prep(napi)) { |
e9d7417b | 741 | virtqueue_disable_cb(rq->vq); |
288379f0 | 742 | __napi_schedule(napi); |
4265f161 | 743 | } |
296f96fc RR |
744 | } |
745 | ||
746 | return received; | |
747 | } | |
748 | ||
91815639 JW |
749 | #ifdef CONFIG_NET_RX_BUSY_POLL |
750 | /* must be called with local_bh_disable()d */ | |
751 | static int virtnet_busy_poll(struct napi_struct *napi) | |
752 | { | |
753 | struct receive_queue *rq = | |
754 | container_of(napi, struct receive_queue, napi); | |
755 | struct virtnet_info *vi = rq->vq->vdev->priv; | |
756 | int r, received = 0, budget = 4; | |
757 | ||
758 | if (!(vi->status & VIRTIO_NET_S_LINK_UP)) | |
759 | return LL_FLUSH_FAILED; | |
760 | ||
761 | if (!napi_schedule_prep(napi)) | |
762 | return LL_FLUSH_BUSY; | |
763 | ||
764 | virtqueue_disable_cb(rq->vq); | |
765 | ||
766 | again: | |
767 | received += virtnet_receive(rq, budget); | |
768 | ||
769 | r = virtqueue_enable_cb_prepare(rq->vq); | |
770 | clear_bit(NAPI_STATE_SCHED, &napi->state); | |
771 | if (unlikely(virtqueue_poll(rq->vq, r)) && | |
772 | napi_schedule_prep(napi)) { | |
773 | virtqueue_disable_cb(rq->vq); | |
774 | if (received < budget) { | |
775 | budget -= received; | |
776 | goto again; | |
777 | } else { | |
778 | __napi_schedule(napi); | |
779 | } | |
780 | } | |
781 | ||
782 | return received; | |
783 | } | |
784 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
785 | ||
986a4f4d JW |
786 | static int virtnet_open(struct net_device *dev) |
787 | { | |
788 | struct virtnet_info *vi = netdev_priv(dev); | |
789 | int i; | |
790 | ||
e4166625 JW |
791 | for (i = 0; i < vi->max_queue_pairs; i++) { |
792 | if (i < vi->curr_queue_pairs) | |
793 | /* Make sure we have some buffers: if oom use wq. */ | |
946fa564 | 794 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
e4166625 | 795 | schedule_delayed_work(&vi->refill, 0); |
986a4f4d JW |
796 | virtnet_napi_enable(&vi->rq[i]); |
797 | } | |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
b7dfde95 | 802 | static void free_old_xmit_skbs(struct send_queue *sq) |
296f96fc RR |
803 | { |
804 | struct sk_buff *skb; | |
6ee57bcc | 805 | unsigned int len; |
e9d7417b | 806 | struct virtnet_info *vi = sq->vq->vdev->priv; |
58472a76 | 807 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
296f96fc | 808 | |
e9d7417b | 809 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
296f96fc | 810 | pr_debug("Sent skb %p\n", skb); |
3fa2a1df | 811 | |
83a27052 | 812 | u64_stats_update_begin(&stats->tx_syncp); |
3fa2a1df | 813 | stats->tx_bytes += skb->len; |
814 | stats->tx_packets++; | |
83a27052 | 815 | u64_stats_update_end(&stats->tx_syncp); |
3fa2a1df | 816 | |
ed79bab8 | 817 | dev_kfree_skb_any(skb); |
296f96fc RR |
818 | } |
819 | } | |
820 | ||
e9d7417b | 821 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
296f96fc | 822 | { |
012873d0 | 823 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
296f96fc | 824 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
e9d7417b | 825 | struct virtnet_info *vi = sq->vq->vdev->priv; |
7bedc7dc | 826 | unsigned num_sg; |
012873d0 | 827 | unsigned hdr_len = vi->hdr_len; |
e7428e95 | 828 | bool can_push; |
296f96fc | 829 | |
e174961c | 830 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
e7428e95 MT |
831 | |
832 | can_push = vi->any_header_sg && | |
833 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && | |
834 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; | |
835 | /* Even if we can, don't push here yet as this would skew | |
836 | * csum_start offset below. */ | |
837 | if (can_push) | |
012873d0 | 838 | hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); |
e7428e95 MT |
839 | else |
840 | hdr = skb_vnet_hdr(skb); | |
296f96fc | 841 | |
e858fae2 MR |
842 | if (virtio_net_hdr_from_skb(skb, &hdr->hdr, |
843 | virtio_is_little_endian(vi->vdev))) | |
844 | BUG(); | |
296f96fc | 845 | |
3f2c31d9 | 846 | if (vi->mergeable_rx_bufs) |
012873d0 | 847 | hdr->num_buffers = 0; |
3f2c31d9 | 848 | |
547c890c | 849 | sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); |
e7428e95 MT |
850 | if (can_push) { |
851 | __skb_push(skb, hdr_len); | |
852 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); | |
853 | /* Pull header back to avoid skew in tx bytes calculations. */ | |
854 | __skb_pull(skb, hdr_len); | |
855 | } else { | |
856 | sg_set_buf(sq->sg, hdr, hdr_len); | |
857 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; | |
858 | } | |
9dc7b9e4 | 859 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
11a3a154 RR |
860 | } |
861 | ||
424efe9c | 862 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
99ffc696 RR |
863 | { |
864 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d JW |
865 | int qnum = skb_get_queue_mapping(skb); |
866 | struct send_queue *sq = &vi->sq[qnum]; | |
9ed4cb07 | 867 | int err; |
4b7fd2e6 MT |
868 | struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); |
869 | bool kick = !skb->xmit_more; | |
2cb9c6ba | 870 | |
2cb9c6ba | 871 | /* Free up any pending old buffers before queueing new ones. */ |
e9d7417b | 872 | free_old_xmit_skbs(sq); |
99ffc696 | 873 | |
074c3582 JK |
874 | /* timestamp packet in software */ |
875 | skb_tx_timestamp(skb); | |
876 | ||
03f191ba | 877 | /* Try to transmit */ |
b7dfde95 | 878 | err = xmit_skb(sq, skb); |
48925e37 | 879 | |
9ed4cb07 | 880 | /* This should not happen! */ |
681daee2 | 881 | if (unlikely(err)) { |
9ed4cb07 RR |
882 | dev->stats.tx_fifo_errors++; |
883 | if (net_ratelimit()) | |
884 | dev_warn(&dev->dev, | |
b7dfde95 | 885 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
58eba97d | 886 | dev->stats.tx_dropped++; |
85e94525 | 887 | dev_kfree_skb_any(skb); |
58eba97d | 888 | return NETDEV_TX_OK; |
296f96fc | 889 | } |
03f191ba | 890 | |
48925e37 RR |
891 | /* Don't wait up for transmitted skbs to be freed. */ |
892 | skb_orphan(skb); | |
893 | nf_reset(skb); | |
894 | ||
60302ff6 MT |
895 | /* If running out of space, stop queue to avoid getting packets that we |
896 | * are then unable to transmit. | |
897 | * An alternative would be to force queuing layer to requeue the skb by | |
898 | * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be | |
899 | * returned in a normal path of operation: it means that driver is not | |
900 | * maintaining the TX queue stop/start state properly, and causes | |
901 | * the stack to do a non-trivial amount of useless work. | |
902 | * Since most packets only take 1 or 2 ring slots, stopping the queue | |
903 | * early means 16 slots are typically wasted. | |
d631b94e | 904 | */ |
b7dfde95 | 905 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
986a4f4d | 906 | netif_stop_subqueue(dev, qnum); |
e9d7417b | 907 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
48925e37 | 908 | /* More just got used, free them then recheck. */ |
b7dfde95 LT |
909 | free_old_xmit_skbs(sq); |
910 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { | |
986a4f4d | 911 | netif_start_subqueue(dev, qnum); |
e9d7417b | 912 | virtqueue_disable_cb(sq->vq); |
48925e37 RR |
913 | } |
914 | } | |
99ffc696 | 915 | } |
48925e37 | 916 | |
4b7fd2e6 | 917 | if (kick || netif_xmit_stopped(txq)) |
0b725a2c | 918 | virtqueue_kick(sq->vq); |
296f96fc | 919 | |
0b725a2c | 920 | return NETDEV_TX_OK; |
c223a078 DM |
921 | } |
922 | ||
40cbfc37 AK |
923 | /* |
924 | * Send command via the control virtqueue and check status. Commands | |
925 | * supported by the hypervisor, as indicated by feature bits, should | |
788a8b6d | 926 | * never fail unless improperly formatted. |
40cbfc37 AK |
927 | */ |
928 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |
d24bae32 | 929 | struct scatterlist *out) |
40cbfc37 | 930 | { |
f7bc9594 | 931 | struct scatterlist *sgs[4], hdr, stat; |
d24bae32 | 932 | unsigned out_num = 0, tmp; |
40cbfc37 AK |
933 | |
934 | /* Caller should know better */ | |
f7bc9594 | 935 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
40cbfc37 | 936 | |
2ac46030 MT |
937 | vi->ctrl_status = ~0; |
938 | vi->ctrl_hdr.class = class; | |
939 | vi->ctrl_hdr.cmd = cmd; | |
f7bc9594 | 940 | /* Add header */ |
2ac46030 | 941 | sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); |
f7bc9594 | 942 | sgs[out_num++] = &hdr; |
40cbfc37 | 943 | |
f7bc9594 RR |
944 | if (out) |
945 | sgs[out_num++] = out; | |
40cbfc37 | 946 | |
f7bc9594 | 947 | /* Add return status. */ |
2ac46030 | 948 | sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); |
d24bae32 | 949 | sgs[out_num] = &stat; |
40cbfc37 | 950 | |
d24bae32 | 951 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
a7c58146 | 952 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
40cbfc37 | 953 | |
67975901 | 954 | if (unlikely(!virtqueue_kick(vi->cvq))) |
2ac46030 | 955 | return vi->ctrl_status == VIRTIO_NET_OK; |
40cbfc37 AK |
956 | |
957 | /* Spin for a response, the kick causes an ioport write, trapping | |
958 | * into the hypervisor, so the request should be handled immediately. | |
959 | */ | |
047b9b94 HG |
960 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
961 | !virtqueue_is_broken(vi->cvq)) | |
40cbfc37 AK |
962 | cpu_relax(); |
963 | ||
2ac46030 | 964 | return vi->ctrl_status == VIRTIO_NET_OK; |
40cbfc37 AK |
965 | } |
966 | ||
9c46f6d4 AW |
967 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
968 | { | |
969 | struct virtnet_info *vi = netdev_priv(dev); | |
970 | struct virtio_device *vdev = vi->vdev; | |
f2f2c8b4 | 971 | int ret; |
7e58d5ae AK |
972 | struct sockaddr *addr = p; |
973 | struct scatterlist sg; | |
9c46f6d4 | 974 | |
7e58d5ae | 975 | ret = eth_prepare_mac_addr_change(dev, p); |
f2f2c8b4 JP |
976 | if (ret) |
977 | return ret; | |
9c46f6d4 | 978 | |
7e58d5ae AK |
979 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
980 | sg_init_one(&sg, addr->sa_data, dev->addr_len); | |
981 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
d24bae32 | 982 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
7e58d5ae AK |
983 | dev_warn(&vdev->dev, |
984 | "Failed to set mac address by vq command.\n"); | |
985 | return -EINVAL; | |
986 | } | |
7e93a02f MT |
987 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && |
988 | !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { | |
855e0c52 RR |
989 | unsigned int i; |
990 | ||
991 | /* Naturally, this has an atomicity problem. */ | |
992 | for (i = 0; i < dev->addr_len; i++) | |
993 | virtio_cwrite8(vdev, | |
994 | offsetof(struct virtio_net_config, mac) + | |
995 | i, addr->sa_data[i]); | |
7e58d5ae AK |
996 | } |
997 | ||
998 | eth_commit_mac_addr_change(dev, p); | |
9c46f6d4 AW |
999 | |
1000 | return 0; | |
1001 | } | |
1002 | ||
3fa2a1df | 1003 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
1004 | struct rtnl_link_stats64 *tot) | |
1005 | { | |
1006 | struct virtnet_info *vi = netdev_priv(dev); | |
1007 | int cpu; | |
1008 | unsigned int start; | |
1009 | ||
1010 | for_each_possible_cpu(cpu) { | |
58472a76 | 1011 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
3fa2a1df | 1012 | u64 tpackets, tbytes, rpackets, rbytes; |
1013 | ||
1014 | do { | |
57a7744e | 1015 | start = u64_stats_fetch_begin_irq(&stats->tx_syncp); |
3fa2a1df | 1016 | tpackets = stats->tx_packets; |
1017 | tbytes = stats->tx_bytes; | |
57a7744e | 1018 | } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); |
83a27052 ED |
1019 | |
1020 | do { | |
57a7744e | 1021 | start = u64_stats_fetch_begin_irq(&stats->rx_syncp); |
3fa2a1df | 1022 | rpackets = stats->rx_packets; |
1023 | rbytes = stats->rx_bytes; | |
57a7744e | 1024 | } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); |
3fa2a1df | 1025 | |
1026 | tot->rx_packets += rpackets; | |
1027 | tot->tx_packets += tpackets; | |
1028 | tot->rx_bytes += rbytes; | |
1029 | tot->tx_bytes += tbytes; | |
1030 | } | |
1031 | ||
1032 | tot->tx_dropped = dev->stats.tx_dropped; | |
021ac8d3 | 1033 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
3fa2a1df | 1034 | tot->rx_dropped = dev->stats.rx_dropped; |
1035 | tot->rx_length_errors = dev->stats.rx_length_errors; | |
1036 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | |
1037 | ||
1038 | return tot; | |
1039 | } | |
1040 | ||
da74e89d AS |
1041 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1042 | static void virtnet_netpoll(struct net_device *dev) | |
1043 | { | |
1044 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d | 1045 | int i; |
da74e89d | 1046 | |
986a4f4d JW |
1047 | for (i = 0; i < vi->curr_queue_pairs; i++) |
1048 | napi_schedule(&vi->rq[i].napi); | |
da74e89d AS |
1049 | } |
1050 | #endif | |
1051 | ||
586d17c5 JW |
1052 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
1053 | { | |
1054 | rtnl_lock(); | |
1055 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, | |
d24bae32 | 1056 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
586d17c5 JW |
1057 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
1058 | rtnl_unlock(); | |
1059 | } | |
1060 | ||
986a4f4d JW |
1061 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
1062 | { | |
1063 | struct scatterlist sg; | |
986a4f4d JW |
1064 | struct net_device *dev = vi->dev; |
1065 | ||
1066 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | |
1067 | return 0; | |
1068 | ||
a725ee3e AL |
1069 | vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
1070 | sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); | |
986a4f4d JW |
1071 | |
1072 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | |
d24bae32 | 1073 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
986a4f4d JW |
1074 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
1075 | queue_pairs); | |
1076 | return -EINVAL; | |
55257d72 | 1077 | } else { |
986a4f4d | 1078 | vi->curr_queue_pairs = queue_pairs; |
35ed159b JW |
1079 | /* virtnet_open() will refill when device is going to up. */ |
1080 | if (dev->flags & IFF_UP) | |
1081 | schedule_delayed_work(&vi->refill, 0); | |
55257d72 | 1082 | } |
986a4f4d JW |
1083 | |
1084 | return 0; | |
1085 | } | |
1086 | ||
296f96fc RR |
1087 | static int virtnet_close(struct net_device *dev) |
1088 | { | |
1089 | struct virtnet_info *vi = netdev_priv(dev); | |
986a4f4d | 1090 | int i; |
296f96fc | 1091 | |
b2baed69 RR |
1092 | /* Make sure refill_work doesn't re-enable napi! */ |
1093 | cancel_delayed_work_sync(&vi->refill); | |
986a4f4d JW |
1094 | |
1095 | for (i = 0; i < vi->max_queue_pairs; i++) | |
1096 | napi_disable(&vi->rq[i].napi); | |
296f96fc | 1097 | |
296f96fc RR |
1098 | return 0; |
1099 | } | |
1100 | ||
2af7698e AW |
1101 | static void virtnet_set_rx_mode(struct net_device *dev) |
1102 | { | |
1103 | struct virtnet_info *vi = netdev_priv(dev); | |
f565a7c2 | 1104 | struct scatterlist sg[2]; |
f565a7c2 | 1105 | struct virtio_net_ctrl_mac *mac_data; |
ccffad25 | 1106 | struct netdev_hw_addr *ha; |
32e7bfc4 | 1107 | int uc_count; |
4cd24eaf | 1108 | int mc_count; |
f565a7c2 AW |
1109 | void *buf; |
1110 | int i; | |
2af7698e | 1111 | |
788a8b6d | 1112 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
2af7698e AW |
1113 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
1114 | return; | |
1115 | ||
2ac46030 MT |
1116 | vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); |
1117 | vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); | |
2af7698e | 1118 | |
2ac46030 | 1119 | sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); |
2af7698e AW |
1120 | |
1121 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
d24bae32 | 1122 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
2af7698e | 1123 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
2ac46030 | 1124 | vi->ctrl_promisc ? "en" : "dis"); |
2af7698e | 1125 | |
2ac46030 | 1126 | sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); |
2af7698e AW |
1127 | |
1128 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | |
d24bae32 | 1129 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
2af7698e | 1130 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
2ac46030 | 1131 | vi->ctrl_allmulti ? "en" : "dis"); |
f565a7c2 | 1132 | |
32e7bfc4 | 1133 | uc_count = netdev_uc_count(dev); |
4cd24eaf | 1134 | mc_count = netdev_mc_count(dev); |
f565a7c2 | 1135 | /* MAC filter - use one buffer for both lists */ |
4cd24eaf JP |
1136 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
1137 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); | |
1138 | mac_data = buf; | |
e68ed8f0 | 1139 | if (!buf) |
f565a7c2 | 1140 | return; |
f565a7c2 | 1141 | |
23e258e1 AW |
1142 | sg_init_table(sg, 2); |
1143 | ||
f565a7c2 | 1144 | /* Store the unicast list and count in the front of the buffer */ |
fdd819b2 | 1145 | mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); |
ccffad25 | 1146 | i = 0; |
32e7bfc4 | 1147 | netdev_for_each_uc_addr(ha, dev) |
ccffad25 | 1148 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
f565a7c2 AW |
1149 | |
1150 | sg_set_buf(&sg[0], mac_data, | |
32e7bfc4 | 1151 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
f565a7c2 AW |
1152 | |
1153 | /* multicast list and count fill the end */ | |
32e7bfc4 | 1154 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
f565a7c2 | 1155 | |
fdd819b2 | 1156 | mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); |
567ec874 | 1157 | i = 0; |
22bedad3 JP |
1158 | netdev_for_each_mc_addr(ha, dev) |
1159 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); | |
f565a7c2 AW |
1160 | |
1161 | sg_set_buf(&sg[1], mac_data, | |
4cd24eaf | 1162 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
f565a7c2 AW |
1163 | |
1164 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | |
d24bae32 | 1165 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
99e872ae | 1166 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
f565a7c2 AW |
1167 | |
1168 | kfree(buf); | |
2af7698e AW |
1169 | } |
1170 | ||
80d5c368 PM |
1171 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
1172 | __be16 proto, u16 vid) | |
0bde9569 AW |
1173 | { |
1174 | struct virtnet_info *vi = netdev_priv(dev); | |
1175 | struct scatterlist sg; | |
1176 | ||
a725ee3e AL |
1177 | vi->ctrl_vid = vid; |
1178 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); | |
0bde9569 AW |
1179 | |
1180 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
d24bae32 | 1181 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
0bde9569 | 1182 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
8e586137 | 1183 | return 0; |
0bde9569 AW |
1184 | } |
1185 | ||
80d5c368 PM |
1186 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
1187 | __be16 proto, u16 vid) | |
0bde9569 AW |
1188 | { |
1189 | struct virtnet_info *vi = netdev_priv(dev); | |
1190 | struct scatterlist sg; | |
1191 | ||
a725ee3e AL |
1192 | vi->ctrl_vid = vid; |
1193 | sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); | |
0bde9569 AW |
1194 | |
1195 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | |
d24bae32 | 1196 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
0bde9569 | 1197 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
8e586137 | 1198 | return 0; |
0bde9569 AW |
1199 | } |
1200 | ||
8898c21c | 1201 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) |
986a4f4d JW |
1202 | { |
1203 | int i; | |
1204 | ||
8898c21c WG |
1205 | if (vi->affinity_hint_set) { |
1206 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
47be2479 WG |
1207 | virtqueue_set_affinity(vi->rq[i].vq, -1); |
1208 | virtqueue_set_affinity(vi->sq[i].vq, -1); | |
1209 | } | |
1210 | ||
8898c21c WG |
1211 | vi->affinity_hint_set = false; |
1212 | } | |
8898c21c | 1213 | } |
47be2479 | 1214 | |
8898c21c WG |
1215 | static void virtnet_set_affinity(struct virtnet_info *vi) |
1216 | { | |
1217 | int i; | |
1218 | int cpu; | |
986a4f4d JW |
1219 | |
1220 | /* In multiqueue mode, when the number of cpu is equal to the number of | |
1221 | * queue pairs, we let the queue pairs to be private to one cpu by | |
1222 | * setting the affinity hint to eliminate the contention. | |
1223 | */ | |
8898c21c WG |
1224 | if (vi->curr_queue_pairs == 1 || |
1225 | vi->max_queue_pairs != num_online_cpus()) { | |
1226 | virtnet_clean_affinity(vi, -1); | |
1227 | return; | |
986a4f4d JW |
1228 | } |
1229 | ||
8898c21c WG |
1230 | i = 0; |
1231 | for_each_online_cpu(cpu) { | |
986a4f4d JW |
1232 | virtqueue_set_affinity(vi->rq[i].vq, cpu); |
1233 | virtqueue_set_affinity(vi->sq[i].vq, cpu); | |
9bb8ca86 | 1234 | netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); |
8898c21c | 1235 | i++; |
986a4f4d JW |
1236 | } |
1237 | ||
8898c21c | 1238 | vi->affinity_hint_set = true; |
986a4f4d JW |
1239 | } |
1240 | ||
8017c279 | 1241 | static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node) |
8de4b2f3 | 1242 | { |
8017c279 SAS |
1243 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, |
1244 | node); | |
1245 | virtnet_set_affinity(vi); | |
1246 | return 0; | |
1247 | } | |
8de4b2f3 | 1248 | |
8017c279 SAS |
1249 | static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) |
1250 | { | |
1251 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, | |
1252 | node_dead); | |
1253 | virtnet_set_affinity(vi); | |
1254 | return 0; | |
1255 | } | |
3ab098df | 1256 | |
8017c279 SAS |
1257 | static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node) |
1258 | { | |
1259 | struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, | |
1260 | node); | |
1261 | ||
1262 | virtnet_clean_affinity(vi, cpu); | |
1263 | return 0; | |
1264 | } | |
1265 | ||
1266 | static enum cpuhp_state virtionet_online; | |
1267 | ||
1268 | static int virtnet_cpu_notif_add(struct virtnet_info *vi) | |
1269 | { | |
1270 | int ret; | |
1271 | ||
1272 | ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); | |
1273 | if (ret) | |
1274 | return ret; | |
1275 | ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD, | |
1276 | &vi->node_dead); | |
1277 | if (!ret) | |
1278 | return ret; | |
1279 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); | |
1280 | return ret; | |
1281 | } | |
1282 | ||
1283 | static void virtnet_cpu_notif_remove(struct virtnet_info *vi) | |
1284 | { | |
1285 | cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); | |
1286 | cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD, | |
1287 | &vi->node_dead); | |
986a4f4d JW |
1288 | } |
1289 | ||
8f9f4668 RJ |
1290 | static void virtnet_get_ringparam(struct net_device *dev, |
1291 | struct ethtool_ringparam *ring) | |
1292 | { | |
1293 | struct virtnet_info *vi = netdev_priv(dev); | |
1294 | ||
986a4f4d JW |
1295 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
1296 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); | |
8f9f4668 RJ |
1297 | ring->rx_pending = ring->rx_max_pending; |
1298 | ring->tx_pending = ring->tx_max_pending; | |
8f9f4668 RJ |
1299 | } |
1300 | ||
66846048 RJ |
1301 | |
1302 | static void virtnet_get_drvinfo(struct net_device *dev, | |
1303 | struct ethtool_drvinfo *info) | |
1304 | { | |
1305 | struct virtnet_info *vi = netdev_priv(dev); | |
1306 | struct virtio_device *vdev = vi->vdev; | |
1307 | ||
1308 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | |
1309 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); | |
1310 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); | |
1311 | ||
1312 | } | |
1313 | ||
d73bcd2c JW |
1314 | /* TODO: Eliminate OOO packets during switching */ |
1315 | static int virtnet_set_channels(struct net_device *dev, | |
1316 | struct ethtool_channels *channels) | |
1317 | { | |
1318 | struct virtnet_info *vi = netdev_priv(dev); | |
1319 | u16 queue_pairs = channels->combined_count; | |
1320 | int err; | |
1321 | ||
1322 | /* We don't support separate rx/tx channels. | |
1323 | * We don't allow setting 'other' channels. | |
1324 | */ | |
1325 | if (channels->rx_count || channels->tx_count || channels->other_count) | |
1326 | return -EINVAL; | |
1327 | ||
c18e9cd6 | 1328 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
d73bcd2c JW |
1329 | return -EINVAL; |
1330 | ||
47be2479 | 1331 | get_online_cpus(); |
d73bcd2c JW |
1332 | err = virtnet_set_queues(vi, queue_pairs); |
1333 | if (!err) { | |
1334 | netif_set_real_num_tx_queues(dev, queue_pairs); | |
1335 | netif_set_real_num_rx_queues(dev, queue_pairs); | |
1336 | ||
8898c21c | 1337 | virtnet_set_affinity(vi); |
d73bcd2c | 1338 | } |
47be2479 | 1339 | put_online_cpus(); |
d73bcd2c JW |
1340 | |
1341 | return err; | |
1342 | } | |
1343 | ||
1344 | static void virtnet_get_channels(struct net_device *dev, | |
1345 | struct ethtool_channels *channels) | |
1346 | { | |
1347 | struct virtnet_info *vi = netdev_priv(dev); | |
1348 | ||
1349 | channels->combined_count = vi->curr_queue_pairs; | |
1350 | channels->max_combined = vi->max_queue_pairs; | |
1351 | channels->max_other = 0; | |
1352 | channels->rx_count = 0; | |
1353 | channels->tx_count = 0; | |
1354 | channels->other_count = 0; | |
1355 | } | |
1356 | ||
16032be5 NA |
1357 | /* Check if the user is trying to change anything besides speed/duplex */ |
1358 | static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) | |
1359 | { | |
1360 | struct ethtool_cmd diff1 = *cmd; | |
1361 | struct ethtool_cmd diff2 = {}; | |
1362 | ||
0cf3ace9 NA |
1363 | /* cmd is always set so we need to clear it, validate the port type |
1364 | * and also without autonegotiation we can ignore advertising | |
1365 | */ | |
16032be5 | 1366 | ethtool_cmd_speed_set(&diff1, 0); |
0cf3ace9 | 1367 | diff2.port = PORT_OTHER; |
16032be5 NA |
1368 | diff1.advertising = 0; |
1369 | diff1.duplex = 0; | |
16032be5 NA |
1370 | diff1.cmd = 0; |
1371 | ||
1372 | return !memcmp(&diff1, &diff2, sizeof(diff1)); | |
1373 | } | |
1374 | ||
1375 | static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1376 | { | |
1377 | struct virtnet_info *vi = netdev_priv(dev); | |
1378 | u32 speed; | |
1379 | ||
1380 | speed = ethtool_cmd_speed(cmd); | |
1381 | /* don't allow custom speed and duplex */ | |
1382 | if (!ethtool_validate_speed(speed) || | |
1383 | !ethtool_validate_duplex(cmd->duplex) || | |
1384 | !virtnet_validate_ethtool_cmd(cmd)) | |
1385 | return -EINVAL; | |
1386 | vi->speed = speed; | |
1387 | vi->duplex = cmd->duplex; | |
1388 | ||
1389 | return 0; | |
1390 | } | |
1391 | ||
1392 | static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1393 | { | |
1394 | struct virtnet_info *vi = netdev_priv(dev); | |
1395 | ||
1396 | ethtool_cmd_speed_set(cmd, vi->speed); | |
1397 | cmd->duplex = vi->duplex; | |
1398 | cmd->port = PORT_OTHER; | |
1399 | ||
1400 | return 0; | |
1401 | } | |
1402 | ||
1403 | static void virtnet_init_settings(struct net_device *dev) | |
1404 | { | |
1405 | struct virtnet_info *vi = netdev_priv(dev); | |
1406 | ||
1407 | vi->speed = SPEED_UNKNOWN; | |
1408 | vi->duplex = DUPLEX_UNKNOWN; | |
1409 | } | |
1410 | ||
0fc0b732 | 1411 | static const struct ethtool_ops virtnet_ethtool_ops = { |
66846048 | 1412 | .get_drvinfo = virtnet_get_drvinfo, |
9f4d26d0 | 1413 | .get_link = ethtool_op_get_link, |
8f9f4668 | 1414 | .get_ringparam = virtnet_get_ringparam, |
d73bcd2c JW |
1415 | .set_channels = virtnet_set_channels, |
1416 | .get_channels = virtnet_get_channels, | |
074c3582 | 1417 | .get_ts_info = ethtool_op_get_ts_info, |
16032be5 NA |
1418 | .get_settings = virtnet_get_settings, |
1419 | .set_settings = virtnet_set_settings, | |
a9ea3fc6 HX |
1420 | }; |
1421 | ||
76288b4e SH |
1422 | static const struct net_device_ops virtnet_netdev = { |
1423 | .ndo_open = virtnet_open, | |
1424 | .ndo_stop = virtnet_close, | |
1425 | .ndo_start_xmit = start_xmit, | |
1426 | .ndo_validate_addr = eth_validate_addr, | |
9c46f6d4 | 1427 | .ndo_set_mac_address = virtnet_set_mac_address, |
2af7698e | 1428 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
3fa2a1df | 1429 | .ndo_get_stats64 = virtnet_stats, |
1824a989 AW |
1430 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
1431 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, | |
76288b4e SH |
1432 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1433 | .ndo_poll_controller = virtnet_netpoll, | |
1434 | #endif | |
91815639 JW |
1435 | #ifdef CONFIG_NET_RX_BUSY_POLL |
1436 | .ndo_busy_poll = virtnet_busy_poll, | |
1437 | #endif | |
76288b4e SH |
1438 | }; |
1439 | ||
586d17c5 | 1440 | static void virtnet_config_changed_work(struct work_struct *work) |
9f4d26d0 | 1441 | { |
586d17c5 JW |
1442 | struct virtnet_info *vi = |
1443 | container_of(work, struct virtnet_info, config_work); | |
9f4d26d0 MM |
1444 | u16 v; |
1445 | ||
855e0c52 RR |
1446 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
1447 | struct virtio_net_config, status, &v) < 0) | |
507613bf | 1448 | return; |
586d17c5 JW |
1449 | |
1450 | if (v & VIRTIO_NET_S_ANNOUNCE) { | |
ee89bab1 | 1451 | netdev_notify_peers(vi->dev); |
586d17c5 JW |
1452 | virtnet_ack_link_announce(vi); |
1453 | } | |
9f4d26d0 MM |
1454 | |
1455 | /* Ignore unknown (future) status bits */ | |
1456 | v &= VIRTIO_NET_S_LINK_UP; | |
1457 | ||
1458 | if (vi->status == v) | |
507613bf | 1459 | return; |
9f4d26d0 MM |
1460 | |
1461 | vi->status = v; | |
1462 | ||
1463 | if (vi->status & VIRTIO_NET_S_LINK_UP) { | |
1464 | netif_carrier_on(vi->dev); | |
986a4f4d | 1465 | netif_tx_wake_all_queues(vi->dev); |
9f4d26d0 MM |
1466 | } else { |
1467 | netif_carrier_off(vi->dev); | |
986a4f4d | 1468 | netif_tx_stop_all_queues(vi->dev); |
9f4d26d0 MM |
1469 | } |
1470 | } | |
1471 | ||
1472 | static void virtnet_config_changed(struct virtio_device *vdev) | |
1473 | { | |
1474 | struct virtnet_info *vi = vdev->priv; | |
1475 | ||
3b07e9ca | 1476 | schedule_work(&vi->config_work); |
9f4d26d0 MM |
1477 | } |
1478 | ||
986a4f4d JW |
1479 | static void virtnet_free_queues(struct virtnet_info *vi) |
1480 | { | |
d4fb84ee AV |
1481 | int i; |
1482 | ||
ab3971b1 JW |
1483 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1484 | napi_hash_del(&vi->rq[i].napi); | |
d4fb84ee | 1485 | netif_napi_del(&vi->rq[i].napi); |
ab3971b1 | 1486 | } |
d4fb84ee | 1487 | |
986a4f4d JW |
1488 | kfree(vi->rq); |
1489 | kfree(vi->sq); | |
1490 | } | |
1491 | ||
1492 | static void free_receive_bufs(struct virtnet_info *vi) | |
1493 | { | |
1494 | int i; | |
1495 | ||
1496 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1497 | while (vi->rq[i].pages) | |
1498 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); | |
1499 | } | |
1500 | } | |
1501 | ||
fb51879d MD |
1502 | static void free_receive_page_frags(struct virtnet_info *vi) |
1503 | { | |
1504 | int i; | |
1505 | for (i = 0; i < vi->max_queue_pairs; i++) | |
1506 | if (vi->rq[i].alloc_frag.page) | |
1507 | put_page(vi->rq[i].alloc_frag.page); | |
1508 | } | |
1509 | ||
986a4f4d JW |
1510 | static void free_unused_bufs(struct virtnet_info *vi) |
1511 | { | |
1512 | void *buf; | |
1513 | int i; | |
1514 | ||
1515 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1516 | struct virtqueue *vq = vi->sq[i].vq; | |
1517 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) | |
1518 | dev_kfree_skb(buf); | |
1519 | } | |
1520 | ||
1521 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1522 | struct virtqueue *vq = vi->rq[i].vq; | |
1523 | ||
1524 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | |
ab7db917 MD |
1525 | if (vi->mergeable_rx_bufs) { |
1526 | unsigned long ctx = (unsigned long)buf; | |
1527 | void *base = mergeable_ctx_to_buf_address(ctx); | |
1528 | put_page(virt_to_head_page(base)); | |
1529 | } else if (vi->big_packets) { | |
fa9fac17 | 1530 | give_pages(&vi->rq[i], buf); |
ab7db917 | 1531 | } else { |
986a4f4d | 1532 | dev_kfree_skb(buf); |
ab7db917 | 1533 | } |
986a4f4d | 1534 | } |
986a4f4d JW |
1535 | } |
1536 | } | |
1537 | ||
e9d7417b JW |
1538 | static void virtnet_del_vqs(struct virtnet_info *vi) |
1539 | { | |
1540 | struct virtio_device *vdev = vi->vdev; | |
1541 | ||
8898c21c | 1542 | virtnet_clean_affinity(vi, -1); |
986a4f4d | 1543 | |
e9d7417b | 1544 | vdev->config->del_vqs(vdev); |
986a4f4d JW |
1545 | |
1546 | virtnet_free_queues(vi); | |
e9d7417b JW |
1547 | } |
1548 | ||
986a4f4d | 1549 | static int virtnet_find_vqs(struct virtnet_info *vi) |
3f9c10b0 | 1550 | { |
986a4f4d JW |
1551 | vq_callback_t **callbacks; |
1552 | struct virtqueue **vqs; | |
1553 | int ret = -ENOMEM; | |
1554 | int i, total_vqs; | |
1555 | const char **names; | |
1556 | ||
1557 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by | |
1558 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by | |
1559 | * possible control vq. | |
1560 | */ | |
1561 | total_vqs = vi->max_queue_pairs * 2 + | |
1562 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); | |
1563 | ||
1564 | /* Allocate space for find_vqs parameters */ | |
1565 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); | |
1566 | if (!vqs) | |
1567 | goto err_vq; | |
1568 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); | |
1569 | if (!callbacks) | |
1570 | goto err_callback; | |
1571 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); | |
1572 | if (!names) | |
1573 | goto err_names; | |
1574 | ||
1575 | /* Parameters for control virtqueue, if any */ | |
1576 | if (vi->has_cvq) { | |
1577 | callbacks[total_vqs - 1] = NULL; | |
1578 | names[total_vqs - 1] = "control"; | |
1579 | } | |
3f9c10b0 | 1580 | |
986a4f4d JW |
1581 | /* Allocate/initialize parameters for send/receive virtqueues */ |
1582 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1583 | callbacks[rxq2vq(i)] = skb_recv_done; | |
1584 | callbacks[txq2vq(i)] = skb_xmit_done; | |
1585 | sprintf(vi->rq[i].name, "input.%d", i); | |
1586 | sprintf(vi->sq[i].name, "output.%d", i); | |
1587 | names[rxq2vq(i)] = vi->rq[i].name; | |
1588 | names[txq2vq(i)] = vi->sq[i].name; | |
1589 | } | |
3f9c10b0 | 1590 | |
986a4f4d JW |
1591 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
1592 | names); | |
1593 | if (ret) | |
1594 | goto err_find; | |
3f9c10b0 | 1595 | |
986a4f4d JW |
1596 | if (vi->has_cvq) { |
1597 | vi->cvq = vqs[total_vqs - 1]; | |
3f9c10b0 | 1598 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
f646968f | 1599 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
3f9c10b0 | 1600 | } |
986a4f4d JW |
1601 | |
1602 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1603 | vi->rq[i].vq = vqs[rxq2vq(i)]; | |
1604 | vi->sq[i].vq = vqs[txq2vq(i)]; | |
1605 | } | |
1606 | ||
1607 | kfree(names); | |
1608 | kfree(callbacks); | |
1609 | kfree(vqs); | |
1610 | ||
3f9c10b0 | 1611 | return 0; |
986a4f4d JW |
1612 | |
1613 | err_find: | |
1614 | kfree(names); | |
1615 | err_names: | |
1616 | kfree(callbacks); | |
1617 | err_callback: | |
1618 | kfree(vqs); | |
1619 | err_vq: | |
1620 | return ret; | |
1621 | } | |
1622 | ||
1623 | static int virtnet_alloc_queues(struct virtnet_info *vi) | |
1624 | { | |
1625 | int i; | |
1626 | ||
1627 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); | |
1628 | if (!vi->sq) | |
1629 | goto err_sq; | |
1630 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); | |
008d4278 | 1631 | if (!vi->rq) |
986a4f4d JW |
1632 | goto err_rq; |
1633 | ||
1634 | INIT_DELAYED_WORK(&vi->refill, refill_work); | |
1635 | for (i = 0; i < vi->max_queue_pairs; i++) { | |
1636 | vi->rq[i].pages = NULL; | |
1637 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, | |
1638 | napi_weight); | |
1639 | ||
1640 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); | |
5377d758 | 1641 | ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); |
986a4f4d JW |
1642 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
1643 | } | |
1644 | ||
1645 | return 0; | |
1646 | ||
1647 | err_rq: | |
1648 | kfree(vi->sq); | |
1649 | err_sq: | |
1650 | return -ENOMEM; | |
1651 | } | |
1652 | ||
1653 | static int init_vqs(struct virtnet_info *vi) | |
1654 | { | |
1655 | int ret; | |
1656 | ||
1657 | /* Allocate send & receive queues */ | |
1658 | ret = virtnet_alloc_queues(vi); | |
1659 | if (ret) | |
1660 | goto err; | |
1661 | ||
1662 | ret = virtnet_find_vqs(vi); | |
1663 | if (ret) | |
1664 | goto err_free; | |
1665 | ||
47be2479 | 1666 | get_online_cpus(); |
8898c21c | 1667 | virtnet_set_affinity(vi); |
47be2479 WG |
1668 | put_online_cpus(); |
1669 | ||
986a4f4d JW |
1670 | return 0; |
1671 | ||
1672 | err_free: | |
1673 | virtnet_free_queues(vi); | |
1674 | err: | |
1675 | return ret; | |
3f9c10b0 AS |
1676 | } |
1677 | ||
fbf28d78 MD |
1678 | #ifdef CONFIG_SYSFS |
1679 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, | |
1680 | struct rx_queue_attribute *attribute, char *buf) | |
1681 | { | |
1682 | struct virtnet_info *vi = netdev_priv(queue->dev); | |
1683 | unsigned int queue_index = get_netdev_rx_queue_index(queue); | |
5377d758 | 1684 | struct ewma_pkt_len *avg; |
fbf28d78 MD |
1685 | |
1686 | BUG_ON(queue_index >= vi->max_queue_pairs); | |
1687 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; | |
1688 | return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); | |
1689 | } | |
1690 | ||
1691 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = | |
1692 | __ATTR_RO(mergeable_rx_buffer_size); | |
1693 | ||
1694 | static struct attribute *virtio_net_mrg_rx_attrs[] = { | |
1695 | &mergeable_rx_buffer_size_attribute.attr, | |
1696 | NULL | |
1697 | }; | |
1698 | ||
1699 | static const struct attribute_group virtio_net_mrg_rx_group = { | |
1700 | .name = "virtio_net", | |
1701 | .attrs = virtio_net_mrg_rx_attrs | |
1702 | }; | |
1703 | #endif | |
1704 | ||
892d6eb1 JW |
1705 | static bool virtnet_fail_on_feature(struct virtio_device *vdev, |
1706 | unsigned int fbit, | |
1707 | const char *fname, const char *dname) | |
1708 | { | |
1709 | if (!virtio_has_feature(vdev, fbit)) | |
1710 | return false; | |
1711 | ||
1712 | dev_err(&vdev->dev, "device advertises feature %s but not %s", | |
1713 | fname, dname); | |
1714 | ||
1715 | return true; | |
1716 | } | |
1717 | ||
1718 | #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ | |
1719 | virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) | |
1720 | ||
1721 | static bool virtnet_validate_features(struct virtio_device *vdev) | |
1722 | { | |
1723 | if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && | |
1724 | (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, | |
1725 | "VIRTIO_NET_F_CTRL_VQ") || | |
1726 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, | |
1727 | "VIRTIO_NET_F_CTRL_VQ") || | |
1728 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, | |
1729 | "VIRTIO_NET_F_CTRL_VQ") || | |
1730 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || | |
1731 | VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, | |
1732 | "VIRTIO_NET_F_CTRL_VQ"))) { | |
1733 | return false; | |
1734 | } | |
1735 | ||
1736 | return true; | |
1737 | } | |
1738 | ||
d0c2c997 JW |
1739 | #define MIN_MTU ETH_MIN_MTU |
1740 | #define MAX_MTU ETH_MAX_MTU | |
1741 | ||
296f96fc RR |
1742 | static int virtnet_probe(struct virtio_device *vdev) |
1743 | { | |
986a4f4d | 1744 | int i, err; |
296f96fc RR |
1745 | struct net_device *dev; |
1746 | struct virtnet_info *vi; | |
986a4f4d | 1747 | u16 max_queue_pairs; |
14de9d11 | 1748 | int mtu; |
986a4f4d | 1749 | |
6ba42248 MT |
1750 | if (!vdev->config->get) { |
1751 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | |
1752 | __func__); | |
1753 | return -EINVAL; | |
1754 | } | |
1755 | ||
892d6eb1 JW |
1756 | if (!virtnet_validate_features(vdev)) |
1757 | return -EINVAL; | |
1758 | ||
986a4f4d | 1759 | /* Find if host supports multiqueue virtio_net device */ |
855e0c52 RR |
1760 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
1761 | struct virtio_net_config, | |
1762 | max_virtqueue_pairs, &max_queue_pairs); | |
986a4f4d JW |
1763 | |
1764 | /* We need at least 2 queue's */ | |
1765 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || | |
1766 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || | |
1767 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) | |
1768 | max_queue_pairs = 1; | |
296f96fc RR |
1769 | |
1770 | /* Allocate ourselves a network device with room for our info */ | |
986a4f4d | 1771 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
296f96fc RR |
1772 | if (!dev) |
1773 | return -ENOMEM; | |
1774 | ||
1775 | /* Set up network device as normal. */ | |
f2f2c8b4 | 1776 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
76288b4e | 1777 | dev->netdev_ops = &virtnet_netdev; |
296f96fc | 1778 | dev->features = NETIF_F_HIGHDMA; |
3fa2a1df | 1779 | |
7ad24ea4 | 1780 | dev->ethtool_ops = &virtnet_ethtool_ops; |
296f96fc RR |
1781 | SET_NETDEV_DEV(dev, &vdev->dev); |
1782 | ||
1783 | /* Do we support "hardware" checksums? */ | |
98e778c9 | 1784 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
296f96fc | 1785 | /* This opens up the world of extra features. */ |
48900cb6 | 1786 | dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
98e778c9 | 1787 | if (csum) |
48900cb6 | 1788 | dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; |
98e778c9 MM |
1789 | |
1790 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { | |
e3e3c423 | 1791 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
34a48579 RR |
1792 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
1793 | } | |
5539ae96 | 1794 | /* Individual feature bits: what can host handle? */ |
98e778c9 MM |
1795 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
1796 | dev->hw_features |= NETIF_F_TSO; | |
1797 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) | |
1798 | dev->hw_features |= NETIF_F_TSO6; | |
1799 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) | |
1800 | dev->hw_features |= NETIF_F_TSO_ECN; | |
e3e3c423 VY |
1801 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) |
1802 | dev->hw_features |= NETIF_F_UFO; | |
98e778c9 | 1803 | |
41f2f127 JW |
1804 | dev->features |= NETIF_F_GSO_ROBUST; |
1805 | ||
98e778c9 | 1806 | if (gso) |
e3e3c423 | 1807 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
98e778c9 | 1808 | /* (!csum && gso) case will be fixed by register_netdev() */ |
296f96fc | 1809 | } |
4f49129b TH |
1810 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
1811 | dev->features |= NETIF_F_RXCSUM; | |
296f96fc | 1812 | |
4fda8302 JW |
1813 | dev->vlan_features = dev->features; |
1814 | ||
d0c2c997 JW |
1815 | /* MTU range: 68 - 65535 */ |
1816 | dev->min_mtu = MIN_MTU; | |
1817 | dev->max_mtu = MAX_MTU; | |
1818 | ||
296f96fc | 1819 | /* Configuration may specify what MAC to use. Otherwise random. */ |
855e0c52 RR |
1820 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
1821 | virtio_cread_bytes(vdev, | |
1822 | offsetof(struct virtio_net_config, mac), | |
1823 | dev->dev_addr, dev->addr_len); | |
1824 | else | |
f2cedb63 | 1825 | eth_hw_addr_random(dev); |
296f96fc RR |
1826 | |
1827 | /* Set up our device-specific information */ | |
1828 | vi = netdev_priv(dev); | |
296f96fc RR |
1829 | vi->dev = dev; |
1830 | vi->vdev = vdev; | |
d9d5dcc8 | 1831 | vdev->priv = vi; |
3fa2a1df | 1832 | vi->stats = alloc_percpu(struct virtnet_stats); |
1833 | err = -ENOMEM; | |
1834 | if (vi->stats == NULL) | |
1835 | goto free; | |
1836 | ||
827da44c JS |
1837 | for_each_possible_cpu(i) { |
1838 | struct virtnet_stats *virtnet_stats; | |
1839 | virtnet_stats = per_cpu_ptr(vi->stats, i); | |
1840 | u64_stats_init(&virtnet_stats->tx_syncp); | |
1841 | u64_stats_init(&virtnet_stats->rx_syncp); | |
1842 | } | |
1843 | ||
586d17c5 | 1844 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
296f96fc | 1845 | |
97402b96 | 1846 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
8e95a202 JP |
1847 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
1848 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || | |
e3e3c423 VY |
1849 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
1850 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) | |
97402b96 HX |
1851 | vi->big_packets = true; |
1852 | ||
3f2c31d9 MM |
1853 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
1854 | vi->mergeable_rx_bufs = true; | |
1855 | ||
d04302b3 MT |
1856 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || |
1857 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
012873d0 MT |
1858 | vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
1859 | else | |
1860 | vi->hdr_len = sizeof(struct virtio_net_hdr); | |
1861 | ||
75993300 MT |
1862 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
1863 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | |
e7428e95 MT |
1864 | vi->any_header_sg = true; |
1865 | ||
986a4f4d JW |
1866 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
1867 | vi->has_cvq = true; | |
1868 | ||
14de9d11 AC |
1869 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { |
1870 | mtu = virtio_cread16(vdev, | |
1871 | offsetof(struct virtio_net_config, | |
1872 | mtu)); | |
d0c2c997 | 1873 | if (mtu < dev->min_mtu || mtu > dev->max_mtu) |
14de9d11 | 1874 | __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); |
d0c2c997 JW |
1875 | else |
1876 | dev->mtu = mtu; | |
14de9d11 AC |
1877 | } |
1878 | ||
012873d0 MT |
1879 | if (vi->any_header_sg) |
1880 | dev->needed_headroom = vi->hdr_len; | |
6ebbc1a6 | 1881 | |
986a4f4d JW |
1882 | /* Use single tx/rx queue pair as default */ |
1883 | vi->curr_queue_pairs = 1; | |
1884 | vi->max_queue_pairs = max_queue_pairs; | |
1885 | ||
1886 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ | |
3f9c10b0 | 1887 | err = init_vqs(vi); |
d2a7ddda | 1888 | if (err) |
9bb8ca86 | 1889 | goto free_stats; |
296f96fc | 1890 | |
fbf28d78 MD |
1891 | #ifdef CONFIG_SYSFS |
1892 | if (vi->mergeable_rx_bufs) | |
1893 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; | |
1894 | #endif | |
0f13b66b ZYW |
1895 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
1896 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); | |
986a4f4d | 1897 | |
16032be5 NA |
1898 | virtnet_init_settings(dev); |
1899 | ||
296f96fc RR |
1900 | err = register_netdev(dev); |
1901 | if (err) { | |
1902 | pr_debug("virtio_net: registering device failed\n"); | |
d2a7ddda | 1903 | goto free_vqs; |
296f96fc | 1904 | } |
b3369c1f | 1905 | |
4baf1e33 MT |
1906 | virtio_device_ready(vdev); |
1907 | ||
8017c279 | 1908 | err = virtnet_cpu_notif_add(vi); |
8de4b2f3 WG |
1909 | if (err) { |
1910 | pr_debug("virtio_net: registering cpu notifier failed\n"); | |
f00e35e2 | 1911 | goto free_unregister_netdev; |
8de4b2f3 WG |
1912 | } |
1913 | ||
167c25e4 JW |
1914 | /* Assume link up if device can't report link status, |
1915 | otherwise get link status from config. */ | |
1916 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { | |
1917 | netif_carrier_off(dev); | |
3b07e9ca | 1918 | schedule_work(&vi->config_work); |
167c25e4 JW |
1919 | } else { |
1920 | vi->status = VIRTIO_NET_S_LINK_UP; | |
1921 | netif_carrier_on(dev); | |
1922 | } | |
9f4d26d0 | 1923 | |
986a4f4d JW |
1924 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
1925 | dev->name, max_queue_pairs); | |
1926 | ||
296f96fc RR |
1927 | return 0; |
1928 | ||
f00e35e2 | 1929 | free_unregister_netdev: |
02465555 MT |
1930 | vi->vdev->config->reset(vdev); |
1931 | ||
b3369c1f | 1932 | unregister_netdev(dev); |
d2a7ddda | 1933 | free_vqs: |
986a4f4d | 1934 | cancel_delayed_work_sync(&vi->refill); |
fb51879d | 1935 | free_receive_page_frags(vi); |
e9d7417b | 1936 | virtnet_del_vqs(vi); |
3fa2a1df | 1937 | free_stats: |
1938 | free_percpu(vi->stats); | |
296f96fc RR |
1939 | free: |
1940 | free_netdev(dev); | |
1941 | return err; | |
1942 | } | |
1943 | ||
04486ed0 | 1944 | static void remove_vq_common(struct virtnet_info *vi) |
296f96fc | 1945 | { |
04486ed0 | 1946 | vi->vdev->config->reset(vi->vdev); |
830a8a97 SM |
1947 | |
1948 | /* Free unused buffers in both send and recv, if any. */ | |
9ab86bbc | 1949 | free_unused_bufs(vi); |
fb6813f4 | 1950 | |
986a4f4d | 1951 | free_receive_bufs(vi); |
d2a7ddda | 1952 | |
fb51879d MD |
1953 | free_receive_page_frags(vi); |
1954 | ||
986a4f4d | 1955 | virtnet_del_vqs(vi); |
04486ed0 AS |
1956 | } |
1957 | ||
8cc085d6 | 1958 | static void virtnet_remove(struct virtio_device *vdev) |
04486ed0 AS |
1959 | { |
1960 | struct virtnet_info *vi = vdev->priv; | |
1961 | ||
8017c279 | 1962 | virtnet_cpu_notif_remove(vi); |
8de4b2f3 | 1963 | |
102a2786 MT |
1964 | /* Make sure no work handler is accessing the device. */ |
1965 | flush_work(&vi->config_work); | |
586d17c5 | 1966 | |
04486ed0 AS |
1967 | unregister_netdev(vi->dev); |
1968 | ||
1969 | remove_vq_common(vi); | |
fb6813f4 | 1970 | |
2e66f55b | 1971 | free_percpu(vi->stats); |
74b2553f | 1972 | free_netdev(vi->dev); |
296f96fc RR |
1973 | } |
1974 | ||
89107000 | 1975 | #ifdef CONFIG_PM_SLEEP |
0741bcb5 AS |
1976 | static int virtnet_freeze(struct virtio_device *vdev) |
1977 | { | |
1978 | struct virtnet_info *vi = vdev->priv; | |
986a4f4d | 1979 | int i; |
0741bcb5 | 1980 | |
8017c279 | 1981 | virtnet_cpu_notif_remove(vi); |
ec9debbd | 1982 | |
102a2786 MT |
1983 | /* Make sure no work handler is accessing the device */ |
1984 | flush_work(&vi->config_work); | |
586d17c5 | 1985 | |
0741bcb5 AS |
1986 | netif_device_detach(vi->dev); |
1987 | cancel_delayed_work_sync(&vi->refill); | |
1988 | ||
91815639 | 1989 | if (netif_running(vi->dev)) { |
ab3971b1 | 1990 | for (i = 0; i < vi->max_queue_pairs; i++) |
986a4f4d | 1991 | napi_disable(&vi->rq[i].napi); |
91815639 | 1992 | } |
0741bcb5 AS |
1993 | |
1994 | remove_vq_common(vi); | |
1995 | ||
1996 | return 0; | |
1997 | } | |
1998 | ||
1999 | static int virtnet_restore(struct virtio_device *vdev) | |
2000 | { | |
2001 | struct virtnet_info *vi = vdev->priv; | |
986a4f4d | 2002 | int err, i; |
0741bcb5 AS |
2003 | |
2004 | err = init_vqs(vi); | |
2005 | if (err) | |
2006 | return err; | |
2007 | ||
e53fbd11 MT |
2008 | virtio_device_ready(vdev); |
2009 | ||
6cd4ce00 JW |
2010 | if (netif_running(vi->dev)) { |
2011 | for (i = 0; i < vi->curr_queue_pairs; i++) | |
946fa564 | 2012 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
6cd4ce00 JW |
2013 | schedule_delayed_work(&vi->refill, 0); |
2014 | ||
986a4f4d JW |
2015 | for (i = 0; i < vi->max_queue_pairs; i++) |
2016 | virtnet_napi_enable(&vi->rq[i]); | |
6cd4ce00 | 2017 | } |
0741bcb5 AS |
2018 | |
2019 | netif_device_attach(vi->dev); | |
2020 | ||
35ed159b | 2021 | rtnl_lock(); |
986a4f4d | 2022 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
35ed159b | 2023 | rtnl_unlock(); |
986a4f4d | 2024 | |
8017c279 | 2025 | err = virtnet_cpu_notif_add(vi); |
ec9debbd JW |
2026 | if (err) |
2027 | return err; | |
2028 | ||
0741bcb5 AS |
2029 | return 0; |
2030 | } | |
2031 | #endif | |
2032 | ||
296f96fc RR |
2033 | static struct virtio_device_id id_table[] = { |
2034 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, | |
2035 | { 0 }, | |
2036 | }; | |
2037 | ||
c45a6816 | 2038 | static unsigned int features[] = { |
5e4fe5c4 MM |
2039 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
2040 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | |
e3e3c423 | 2041 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
97402b96 | 2042 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
e3e3c423 | 2043 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
2a41f71d | 2044 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
0bde9569 | 2045 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
986a4f4d | 2046 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, |
7e58d5ae | 2047 | VIRTIO_NET_F_CTRL_MAC_ADDR, |
e7428e95 | 2048 | VIRTIO_F_ANY_LAYOUT, |
14de9d11 | 2049 | VIRTIO_NET_F_MTU, |
c45a6816 RR |
2050 | }; |
2051 | ||
22402529 | 2052 | static struct virtio_driver virtio_net_driver = { |
c45a6816 RR |
2053 | .feature_table = features, |
2054 | .feature_table_size = ARRAY_SIZE(features), | |
296f96fc RR |
2055 | .driver.name = KBUILD_MODNAME, |
2056 | .driver.owner = THIS_MODULE, | |
2057 | .id_table = id_table, | |
2058 | .probe = virtnet_probe, | |
8cc085d6 | 2059 | .remove = virtnet_remove, |
9f4d26d0 | 2060 | .config_changed = virtnet_config_changed, |
89107000 | 2061 | #ifdef CONFIG_PM_SLEEP |
0741bcb5 AS |
2062 | .freeze = virtnet_freeze, |
2063 | .restore = virtnet_restore, | |
2064 | #endif | |
296f96fc RR |
2065 | }; |
2066 | ||
8017c279 SAS |
2067 | static __init int virtio_net_driver_init(void) |
2068 | { | |
2069 | int ret; | |
2070 | ||
2071 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "AP_VIRT_NET_ONLINE", | |
2072 | virtnet_cpu_online, | |
2073 | virtnet_cpu_down_prep); | |
2074 | if (ret < 0) | |
2075 | goto out; | |
2076 | virtionet_online = ret; | |
2077 | ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "VIRT_NET_DEAD", | |
2078 | NULL, virtnet_cpu_dead); | |
2079 | if (ret) | |
2080 | goto err_dead; | |
2081 | ||
2082 | ret = register_virtio_driver(&virtio_net_driver); | |
2083 | if (ret) | |
2084 | goto err_virtio; | |
2085 | return 0; | |
2086 | err_virtio: | |
2087 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); | |
2088 | err_dead: | |
2089 | cpuhp_remove_multi_state(virtionet_online); | |
2090 | out: | |
2091 | return ret; | |
2092 | } | |
2093 | module_init(virtio_net_driver_init); | |
2094 | ||
2095 | static __exit void virtio_net_driver_exit(void) | |
2096 | { | |
2097 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); | |
2098 | cpuhp_remove_multi_state(virtionet_online); | |
2099 | unregister_virtio_driver(&virtio_net_driver); | |
2100 | } | |
2101 | module_exit(virtio_net_driver_exit); | |
296f96fc RR |
2102 | |
2103 | MODULE_DEVICE_TABLE(virtio, id_table); | |
2104 | MODULE_DESCRIPTION("Virtio network driver"); | |
2105 | MODULE_LICENSE("GPL"); |