]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/vmxnet3/vmxnet3_drv.c
ASoC: sti: fix missing clk_disable_unprepare() on error in uni_player_start()
[mirror_ubuntu-bionic-kernel.git] / drivers / net / vmxnet3 / vmxnet3_drv.c
1 /*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
4 * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
23 * Maintained by: pv-drivers@vmware.com
24 *
25 */
26
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
29
30 #include "vmxnet3_int.h"
31
32 char vmxnet3_driver_name[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34
35 /*
36 * PCI Device ID Table
37 * Last entry must be all 0s
38 */
39 static const struct pci_device_id vmxnet3_pciid_table[] = {
40 {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41 {0}
42 };
43
44 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45
46 static int enable_mq = 1;
47
48 static void
49 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
50
51 /*
52 * Enable/Disable the given intr
53 */
54 static void
55 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
56 {
57 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
58 }
59
60
61 static void
62 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
63 {
64 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
65 }
66
67
68 /*
69 * Enable/Disable all intrs used by the device
70 */
71 static void
72 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
73 {
74 int i;
75
76 for (i = 0; i < adapter->intr.num_intrs; i++)
77 vmxnet3_enable_intr(adapter, i);
78 adapter->shared->devRead.intrConf.intrCtrl &=
79 cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
80 }
81
82
83 static void
84 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
85 {
86 int i;
87
88 adapter->shared->devRead.intrConf.intrCtrl |=
89 cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
90 for (i = 0; i < adapter->intr.num_intrs; i++)
91 vmxnet3_disable_intr(adapter, i);
92 }
93
94
95 static void
96 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
97 {
98 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
99 }
100
101
102 static bool
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
104 {
105 return tq->stopped;
106 }
107
108
109 static void
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111 {
112 tq->stopped = false;
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
114 }
115
116
117 static void
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119 {
120 tq->stopped = false;
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
122 }
123
124
125 static void
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
127 {
128 tq->stopped = true;
129 tq->num_stop++;
130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
131 }
132
133
134 /*
135 * Check the link state. This may start or stop the tx queue.
136 */
137 static void
138 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139 {
140 u32 ret;
141 int i;
142 unsigned long flags;
143
144 spin_lock_irqsave(&adapter->cmd_lock, flags);
145 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
146 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
147 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
148
149 adapter->link_speed = ret >> 16;
150 if (ret & 1) { /* Link is up. */
151 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
152 adapter->link_speed);
153 netif_carrier_on(adapter->netdev);
154
155 if (affectTxQueue) {
156 for (i = 0; i < adapter->num_tx_queues; i++)
157 vmxnet3_tq_start(&adapter->tx_queue[i],
158 adapter);
159 }
160 } else {
161 netdev_info(adapter->netdev, "NIC Link is Down\n");
162 netif_carrier_off(adapter->netdev);
163
164 if (affectTxQueue) {
165 for (i = 0; i < adapter->num_tx_queues; i++)
166 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
167 }
168 }
169 }
170
171 static void
172 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173 {
174 int i;
175 unsigned long flags;
176 u32 events = le32_to_cpu(adapter->shared->ecr);
177 if (!events)
178 return;
179
180 vmxnet3_ack_events(adapter, events);
181
182 /* Check if link state has changed */
183 if (events & VMXNET3_ECR_LINK)
184 vmxnet3_check_link(adapter, true);
185
186 /* Check if there is an error on xmit/recv queues */
187 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
188 spin_lock_irqsave(&adapter->cmd_lock, flags);
189 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
190 VMXNET3_CMD_GET_QUEUE_STATUS);
191 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
192
193 for (i = 0; i < adapter->num_tx_queues; i++)
194 if (adapter->tqd_start[i].status.stopped)
195 dev_err(&adapter->netdev->dev,
196 "%s: tq[%d] error 0x%x\n",
197 adapter->netdev->name, i, le32_to_cpu(
198 adapter->tqd_start[i].status.error));
199 for (i = 0; i < adapter->num_rx_queues; i++)
200 if (adapter->rqd_start[i].status.stopped)
201 dev_err(&adapter->netdev->dev,
202 "%s: rq[%d] error 0x%x\n",
203 adapter->netdev->name, i,
204 adapter->rqd_start[i].status.error);
205
206 schedule_work(&adapter->work);
207 }
208 }
209
210 #ifdef __BIG_ENDIAN_BITFIELD
211 /*
212 * The device expects the bitfields in shared structures to be written in
213 * little endian. When CPU is big endian, the following routines are used to
214 * correctly read and write into ABI.
215 * The general technique used here is : double word bitfields are defined in
216 * opposite order for big endian architecture. Then before reading them in
217 * driver the complete double word is translated using le32_to_cpu. Similarly
218 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
219 * double words into required format.
220 * In order to avoid touching bits in shared structure more than once, temporary
221 * descriptors are used. These are passed as srcDesc to following functions.
222 */
223 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
224 struct Vmxnet3_RxDesc *dstDesc)
225 {
226 u32 *src = (u32 *)srcDesc + 2;
227 u32 *dst = (u32 *)dstDesc + 2;
228 dstDesc->addr = le64_to_cpu(srcDesc->addr);
229 *dst = le32_to_cpu(*src);
230 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
231 }
232
233 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
234 struct Vmxnet3_TxDesc *dstDesc)
235 {
236 int i;
237 u32 *src = (u32 *)(srcDesc + 1);
238 u32 *dst = (u32 *)(dstDesc + 1);
239
240 /* Working backwards so that the gen bit is set at the end. */
241 for (i = 2; i > 0; i--) {
242 src--;
243 dst--;
244 *dst = cpu_to_le32(*src);
245 }
246 }
247
248
249 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
250 struct Vmxnet3_RxCompDesc *dstDesc)
251 {
252 int i = 0;
253 u32 *src = (u32 *)srcDesc;
254 u32 *dst = (u32 *)dstDesc;
255 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
256 *dst = le32_to_cpu(*src);
257 src++;
258 dst++;
259 }
260 }
261
262
263 /* Used to read bitfield values from double words. */
264 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
265 {
266 u32 temp = le32_to_cpu(*bitfield);
267 u32 mask = ((1 << size) - 1) << pos;
268 temp &= mask;
269 temp >>= pos;
270 return temp;
271 }
272
273
274
275 #endif /* __BIG_ENDIAN_BITFIELD */
276
277 #ifdef __BIG_ENDIAN_BITFIELD
278
279 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
280 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
281 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
282 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
283 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
284 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
285 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
286 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
287 VMXNET3_TCD_GEN_SIZE)
288 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
289 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
290 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
291 (dstrcd) = (tmp); \
292 vmxnet3_RxCompToCPU((rcd), (tmp)); \
293 } while (0)
294 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
295 (dstrxd) = (tmp); \
296 vmxnet3_RxDescToCPU((rxd), (tmp)); \
297 } while (0)
298
299 #else
300
301 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
302 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
303 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
304 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
305 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
306 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
307
308 #endif /* __BIG_ENDIAN_BITFIELD */
309
310
311 static void
312 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
313 struct pci_dev *pdev)
314 {
315 if (tbi->map_type == VMXNET3_MAP_SINGLE)
316 dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
317 PCI_DMA_TODEVICE);
318 else if (tbi->map_type == VMXNET3_MAP_PAGE)
319 dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
320 PCI_DMA_TODEVICE);
321 else
322 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
323
324 tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
325 }
326
327
328 static int
329 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
330 struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
331 {
332 struct sk_buff *skb;
333 int entries = 0;
334
335 /* no out of order completion */
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
338
339 skb = tq->buf_info[eop_idx].skb;
340 BUG_ON(skb == NULL);
341 tq->buf_info[eop_idx].skb = NULL;
342
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
344
345 while (tq->tx_ring.next2comp != eop_idx) {
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
347 pdev);
348
349 /* update next2comp w/o tx_lock. Since we are marking more,
350 * instead of less, tx ring entries avail, the worst case is
351 * that the tx routine incorrectly re-queues a pkt due to
352 * insufficient tx ring entries.
353 */
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
355 entries++;
356 }
357
358 dev_kfree_skb_any(skb);
359 return entries;
360 }
361
362
363 static int
364 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
365 struct vmxnet3_adapter *adapter)
366 {
367 int completed = 0;
368 union Vmxnet3_GenericDesc *gdesc;
369
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373 &gdesc->tcd), tq, adapter->pdev,
374 adapter);
375
376 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
377 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
378 }
379
380 if (completed) {
381 spin_lock(&tq->tx_lock);
382 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
383 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
384 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
385 netif_carrier_ok(adapter->netdev))) {
386 vmxnet3_tq_wake(tq, adapter);
387 }
388 spin_unlock(&tq->tx_lock);
389 }
390 return completed;
391 }
392
393
394 static void
395 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
396 struct vmxnet3_adapter *adapter)
397 {
398 int i;
399
400 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
401 struct vmxnet3_tx_buf_info *tbi;
402
403 tbi = tq->buf_info + tq->tx_ring.next2comp;
404
405 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
406 if (tbi->skb) {
407 dev_kfree_skb_any(tbi->skb);
408 tbi->skb = NULL;
409 }
410 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
411 }
412
413 /* sanity check, verify all buffers are indeed unmapped and freed */
414 for (i = 0; i < tq->tx_ring.size; i++) {
415 BUG_ON(tq->buf_info[i].skb != NULL ||
416 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
417 }
418
419 tq->tx_ring.gen = VMXNET3_INIT_GEN;
420 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
421
422 tq->comp_ring.gen = VMXNET3_INIT_GEN;
423 tq->comp_ring.next2proc = 0;
424 }
425
426
427 static void
428 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
429 struct vmxnet3_adapter *adapter)
430 {
431 if (tq->tx_ring.base) {
432 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
433 sizeof(struct Vmxnet3_TxDesc),
434 tq->tx_ring.base, tq->tx_ring.basePA);
435 tq->tx_ring.base = NULL;
436 }
437 if (tq->data_ring.base) {
438 dma_free_coherent(&adapter->pdev->dev,
439 tq->data_ring.size * tq->txdata_desc_size,
440 tq->data_ring.base, tq->data_ring.basePA);
441 tq->data_ring.base = NULL;
442 }
443 if (tq->comp_ring.base) {
444 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
445 sizeof(struct Vmxnet3_TxCompDesc),
446 tq->comp_ring.base, tq->comp_ring.basePA);
447 tq->comp_ring.base = NULL;
448 }
449 if (tq->buf_info) {
450 dma_free_coherent(&adapter->pdev->dev,
451 tq->tx_ring.size * sizeof(tq->buf_info[0]),
452 tq->buf_info, tq->buf_info_pa);
453 tq->buf_info = NULL;
454 }
455 }
456
457
458 /* Destroy all tx queues */
459 void
460 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
461 {
462 int i;
463
464 for (i = 0; i < adapter->num_tx_queues; i++)
465 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
466 }
467
468
469 static void
470 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
471 struct vmxnet3_adapter *adapter)
472 {
473 int i;
474
475 /* reset the tx ring contents to 0 and reset the tx ring states */
476 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
477 sizeof(struct Vmxnet3_TxDesc));
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
479 tq->tx_ring.gen = VMXNET3_INIT_GEN;
480
481 memset(tq->data_ring.base, 0,
482 tq->data_ring.size * tq->txdata_desc_size);
483
484 /* reset the tx comp ring contents to 0 and reset comp ring states */
485 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
486 sizeof(struct Vmxnet3_TxCompDesc));
487 tq->comp_ring.next2proc = 0;
488 tq->comp_ring.gen = VMXNET3_INIT_GEN;
489
490 /* reset the bookkeeping data */
491 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
492 for (i = 0; i < tq->tx_ring.size; i++)
493 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
494
495 /* stats are not reset */
496 }
497
498
499 static int
500 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
501 struct vmxnet3_adapter *adapter)
502 {
503 size_t sz;
504
505 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
506 tq->comp_ring.base || tq->buf_info);
507
508 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
509 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
510 &tq->tx_ring.basePA, GFP_KERNEL);
511 if (!tq->tx_ring.base) {
512 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
513 goto err;
514 }
515
516 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
517 tq->data_ring.size * tq->txdata_desc_size,
518 &tq->data_ring.basePA, GFP_KERNEL);
519 if (!tq->data_ring.base) {
520 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
521 goto err;
522 }
523
524 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
525 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
526 &tq->comp_ring.basePA, GFP_KERNEL);
527 if (!tq->comp_ring.base) {
528 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
529 goto err;
530 }
531
532 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
533 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
534 &tq->buf_info_pa, GFP_KERNEL);
535 if (!tq->buf_info)
536 goto err;
537
538 return 0;
539
540 err:
541 vmxnet3_tq_destroy(tq, adapter);
542 return -ENOMEM;
543 }
544
545 static void
546 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
547 {
548 int i;
549
550 for (i = 0; i < adapter->num_tx_queues; i++)
551 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
552 }
553
554 /*
555 * starting from ring->next2fill, allocate rx buffers for the given ring
556 * of the rx queue and update the rx desc. stop after @num_to_alloc buffers
557 * are allocated or allocation fails
558 */
559
560 static int
561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
562 int num_to_alloc, struct vmxnet3_adapter *adapter)
563 {
564 int num_allocated = 0;
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
567 u32 val;
568
569 while (num_allocated <= num_to_alloc) {
570 struct vmxnet3_rx_buf_info *rbi;
571 union Vmxnet3_GenericDesc *gd;
572
573 rbi = rbi_base + ring->next2fill;
574 gd = ring->base + ring->next2fill;
575
576 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
577 if (rbi->skb == NULL) {
578 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
579 rbi->len,
580 GFP_KERNEL);
581 if (unlikely(rbi->skb == NULL)) {
582 rq->stats.rx_buf_alloc_failure++;
583 break;
584 }
585
586 rbi->dma_addr = dma_map_single(
587 &adapter->pdev->dev,
588 rbi->skb->data, rbi->len,
589 PCI_DMA_FROMDEVICE);
590 if (dma_mapping_error(&adapter->pdev->dev,
591 rbi->dma_addr)) {
592 dev_kfree_skb_any(rbi->skb);
593 rq->stats.rx_buf_alloc_failure++;
594 break;
595 }
596 } else {
597 /* rx buffer skipped by the device */
598 }
599 val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
600 } else {
601 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
602 rbi->len != PAGE_SIZE);
603
604 if (rbi->page == NULL) {
605 rbi->page = alloc_page(GFP_ATOMIC);
606 if (unlikely(rbi->page == NULL)) {
607 rq->stats.rx_buf_alloc_failure++;
608 break;
609 }
610 rbi->dma_addr = dma_map_page(
611 &adapter->pdev->dev,
612 rbi->page, 0, PAGE_SIZE,
613 PCI_DMA_FROMDEVICE);
614 if (dma_mapping_error(&adapter->pdev->dev,
615 rbi->dma_addr)) {
616 put_page(rbi->page);
617 rq->stats.rx_buf_alloc_failure++;
618 break;
619 }
620 } else {
621 /* rx buffers skipped by the device */
622 }
623 val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
624 }
625
626 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
627 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
628 | val | rbi->len);
629
630 /* Fill the last buffer but dont mark it ready, or else the
631 * device will think that the queue is full */
632 if (num_allocated == num_to_alloc)
633 break;
634
635 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
636 num_allocated++;
637 vmxnet3_cmd_ring_adv_next2fill(ring);
638 }
639
640 netdev_dbg(adapter->netdev,
641 "alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
642 num_allocated, ring->next2fill, ring->next2comp);
643
644 /* so that the device can distinguish a full ring and an empty ring */
645 BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
646
647 return num_allocated;
648 }
649
650
651 static void
652 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
653 struct vmxnet3_rx_buf_info *rbi)
654 {
655 struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
656 skb_shinfo(skb)->nr_frags;
657
658 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
659
660 __skb_frag_set_page(frag, rbi->page);
661 frag->page_offset = 0;
662 skb_frag_size_set(frag, rcd->len);
663 skb->data_len += rcd->len;
664 skb->truesize += PAGE_SIZE;
665 skb_shinfo(skb)->nr_frags++;
666 }
667
668
669 static int
670 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
671 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
672 struct vmxnet3_adapter *adapter)
673 {
674 u32 dw2, len;
675 unsigned long buf_offset;
676 int i;
677 union Vmxnet3_GenericDesc *gdesc;
678 struct vmxnet3_tx_buf_info *tbi = NULL;
679
680 BUG_ON(ctx->copy_size > skb_headlen(skb));
681
682 /* use the previous gen bit for the SOP desc */
683 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
684
685 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
686 gdesc = ctx->sop_txd; /* both loops below can be skipped */
687
688 /* no need to map the buffer if headers are copied */
689 if (ctx->copy_size) {
690 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
691 tq->tx_ring.next2fill *
692 tq->txdata_desc_size);
693 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
694 ctx->sop_txd->dword[3] = 0;
695
696 tbi = tq->buf_info + tq->tx_ring.next2fill;
697 tbi->map_type = VMXNET3_MAP_NONE;
698
699 netdev_dbg(adapter->netdev,
700 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
701 tq->tx_ring.next2fill,
702 le64_to_cpu(ctx->sop_txd->txd.addr),
703 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
704 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
705
706 /* use the right gen for non-SOP desc */
707 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
708 }
709
710 /* linear part can use multiple tx desc if it's big */
711 len = skb_headlen(skb) - ctx->copy_size;
712 buf_offset = ctx->copy_size;
713 while (len) {
714 u32 buf_size;
715
716 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
717 buf_size = len;
718 dw2 |= len;
719 } else {
720 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
721 /* spec says that for TxDesc.len, 0 == 2^14 */
722 }
723
724 tbi = tq->buf_info + tq->tx_ring.next2fill;
725 tbi->map_type = VMXNET3_MAP_SINGLE;
726 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
727 skb->data + buf_offset, buf_size,
728 PCI_DMA_TODEVICE);
729 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
730 return -EFAULT;
731
732 tbi->len = buf_size;
733
734 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
735 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
736
737 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
738 gdesc->dword[2] = cpu_to_le32(dw2);
739 gdesc->dword[3] = 0;
740
741 netdev_dbg(adapter->netdev,
742 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
743 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
744 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
745 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
746 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
747
748 len -= buf_size;
749 buf_offset += buf_size;
750 }
751
752 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
753 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
754 u32 buf_size;
755
756 buf_offset = 0;
757 len = skb_frag_size(frag);
758 while (len) {
759 tbi = tq->buf_info + tq->tx_ring.next2fill;
760 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
761 buf_size = len;
762 dw2 |= len;
763 } else {
764 buf_size = VMXNET3_MAX_TX_BUF_SIZE;
765 /* spec says that for TxDesc.len, 0 == 2^14 */
766 }
767 tbi->map_type = VMXNET3_MAP_PAGE;
768 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
769 buf_offset, buf_size,
770 DMA_TO_DEVICE);
771 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
772 return -EFAULT;
773
774 tbi->len = buf_size;
775
776 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
777 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
778
779 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
780 gdesc->dword[2] = cpu_to_le32(dw2);
781 gdesc->dword[3] = 0;
782
783 netdev_dbg(adapter->netdev,
784 "txd[%u]: 0x%llx %u %u\n",
785 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
786 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
787 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
788 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
789
790 len -= buf_size;
791 buf_offset += buf_size;
792 }
793 }
794
795 ctx->eop_txd = gdesc;
796
797 /* set the last buf_info for the pkt */
798 tbi->skb = skb;
799 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
800
801 return 0;
802 }
803
804
805 /* Init all tx queues */
806 static void
807 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
808 {
809 int i;
810
811 for (i = 0; i < adapter->num_tx_queues; i++)
812 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
813 }
814
815
816 /*
817 * parse relevant protocol headers:
818 * For a tso pkt, relevant headers are L2/3/4 including options
819 * For a pkt requesting csum offloading, they are L2/3 and may include L4
820 * if it's a TCP/UDP pkt
821 *
822 * Returns:
823 * -1: error happens during parsing
824 * 0: protocol headers parsed, but too big to be copied
825 * 1: protocol headers parsed and copied
826 *
827 * Other effects:
828 * 1. related *ctx fields are updated.
829 * 2. ctx->copy_size is # of bytes copied
830 * 3. the portion to be copied is guaranteed to be in the linear part
831 *
832 */
833 static int
834 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
835 struct vmxnet3_tx_ctx *ctx,
836 struct vmxnet3_adapter *adapter)
837 {
838 u8 protocol = 0;
839
840 if (ctx->mss) { /* TSO */
841 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
842 ctx->l4_hdr_size = tcp_hdrlen(skb);
843 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
844 } else {
845 if (skb->ip_summed == CHECKSUM_PARTIAL) {
846 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
847
848 if (ctx->ipv4) {
849 const struct iphdr *iph = ip_hdr(skb);
850
851 protocol = iph->protocol;
852 } else if (ctx->ipv6) {
853 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
854
855 protocol = ipv6h->nexthdr;
856 }
857
858 switch (protocol) {
859 case IPPROTO_TCP:
860 ctx->l4_hdr_size = tcp_hdrlen(skb);
861 break;
862 case IPPROTO_UDP:
863 ctx->l4_hdr_size = sizeof(struct udphdr);
864 break;
865 default:
866 ctx->l4_hdr_size = 0;
867 break;
868 }
869
870 ctx->copy_size = min(ctx->eth_ip_hdr_size +
871 ctx->l4_hdr_size, skb->len);
872 } else {
873 ctx->eth_ip_hdr_size = 0;
874 ctx->l4_hdr_size = 0;
875 /* copy as much as allowed */
876 ctx->copy_size = min_t(unsigned int,
877 tq->txdata_desc_size,
878 skb_headlen(skb));
879 }
880
881 if (skb->len <= VMXNET3_HDR_COPY_SIZE)
882 ctx->copy_size = skb->len;
883
884 /* make sure headers are accessible directly */
885 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
886 goto err;
887 }
888
889 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
890 tq->stats.oversized_hdr++;
891 ctx->copy_size = 0;
892 return 0;
893 }
894
895 return 1;
896 err:
897 return -1;
898 }
899
900 /*
901 * copy relevant protocol headers to the transmit ring:
902 * For a tso pkt, relevant headers are L2/3/4 including options
903 * For a pkt requesting csum offloading, they are L2/3 and may include L4
904 * if it's a TCP/UDP pkt
905 *
906 *
907 * Note that this requires that vmxnet3_parse_hdr be called first to set the
908 * appropriate bits in ctx first
909 */
910 static void
911 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
912 struct vmxnet3_tx_ctx *ctx,
913 struct vmxnet3_adapter *adapter)
914 {
915 struct Vmxnet3_TxDataDesc *tdd;
916
917 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
918
919 memcpy(tdd->data, skb->data, ctx->copy_size);
920 netdev_dbg(adapter->netdev,
921 "copy %u bytes to dataRing[%u]\n",
922 ctx->copy_size, tq->tx_ring.next2fill);
923 }
924
925
926 static void
927 vmxnet3_prepare_tso(struct sk_buff *skb,
928 struct vmxnet3_tx_ctx *ctx)
929 {
930 struct tcphdr *tcph = tcp_hdr(skb);
931
932 if (ctx->ipv4) {
933 struct iphdr *iph = ip_hdr(skb);
934
935 iph->check = 0;
936 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
937 IPPROTO_TCP, 0);
938 } else if (ctx->ipv6) {
939 struct ipv6hdr *iph = ipv6_hdr(skb);
940
941 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
942 IPPROTO_TCP, 0);
943 }
944 }
945
946 static int txd_estimate(const struct sk_buff *skb)
947 {
948 int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
949 int i;
950
951 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
952 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
953
954 count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
955 }
956 return count;
957 }
958
959 /*
960 * Transmits a pkt thru a given tq
961 * Returns:
962 * NETDEV_TX_OK: descriptors are setup successfully
963 * NETDEV_TX_OK: error occurred, the pkt is dropped
964 * NETDEV_TX_BUSY: tx ring is full, queue is stopped
965 *
966 * Side-effects:
967 * 1. tx ring may be changed
968 * 2. tq stats may be updated accordingly
969 * 3. shared->txNumDeferred may be updated
970 */
971
972 static int
973 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
974 struct vmxnet3_adapter *adapter, struct net_device *netdev)
975 {
976 int ret;
977 u32 count;
978 unsigned long flags;
979 struct vmxnet3_tx_ctx ctx;
980 union Vmxnet3_GenericDesc *gdesc;
981 #ifdef __BIG_ENDIAN_BITFIELD
982 /* Use temporary descriptor to avoid touching bits multiple times */
983 union Vmxnet3_GenericDesc tempTxDesc;
984 #endif
985
986 count = txd_estimate(skb);
987
988 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
989 ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
990
991 ctx.mss = skb_shinfo(skb)->gso_size;
992 if (ctx.mss) {
993 if (skb_header_cloned(skb)) {
994 if (unlikely(pskb_expand_head(skb, 0, 0,
995 GFP_ATOMIC) != 0)) {
996 tq->stats.drop_tso++;
997 goto drop_pkt;
998 }
999 tq->stats.copy_skb_header++;
1000 }
1001 vmxnet3_prepare_tso(skb, &ctx);
1002 } else {
1003 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
1004
1005 /* non-tso pkts must not use more than
1006 * VMXNET3_MAX_TXD_PER_PKT entries
1007 */
1008 if (skb_linearize(skb) != 0) {
1009 tq->stats.drop_too_many_frags++;
1010 goto drop_pkt;
1011 }
1012 tq->stats.linearized++;
1013
1014 /* recalculate the # of descriptors to use */
1015 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
1016 }
1017 }
1018
1019 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1020 if (ret >= 0) {
1021 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1022 /* hdrs parsed, check against other limits */
1023 if (ctx.mss) {
1024 if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
1025 VMXNET3_MAX_TX_BUF_SIZE)) {
1026 tq->stats.drop_oversized_hdr++;
1027 goto drop_pkt;
1028 }
1029 } else {
1030 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1031 if (unlikely(ctx.eth_ip_hdr_size +
1032 skb->csum_offset >
1033 VMXNET3_MAX_CSUM_OFFSET)) {
1034 tq->stats.drop_oversized_hdr++;
1035 goto drop_pkt;
1036 }
1037 }
1038 }
1039 } else {
1040 tq->stats.drop_hdr_inspect_err++;
1041 goto drop_pkt;
1042 }
1043
1044 spin_lock_irqsave(&tq->tx_lock, flags);
1045
1046 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1047 tq->stats.tx_ring_full++;
1048 netdev_dbg(adapter->netdev,
1049 "tx queue stopped on %s, next2comp %u"
1050 " next2fill %u\n", adapter->netdev->name,
1051 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1052
1053 vmxnet3_tq_stop(tq, adapter);
1054 spin_unlock_irqrestore(&tq->tx_lock, flags);
1055 return NETDEV_TX_BUSY;
1056 }
1057
1058
1059 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1060
1061 /* fill tx descs related to addr & len */
1062 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1063 goto unlock_drop_pkt;
1064
1065 /* setup the EOP desc */
1066 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1067
1068 /* setup the SOP desc */
1069 #ifdef __BIG_ENDIAN_BITFIELD
1070 gdesc = &tempTxDesc;
1071 gdesc->dword[2] = ctx.sop_txd->dword[2];
1072 gdesc->dword[3] = ctx.sop_txd->dword[3];
1073 #else
1074 gdesc = ctx.sop_txd;
1075 #endif
1076 if (ctx.mss) {
1077 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1078 gdesc->txd.om = VMXNET3_OM_TSO;
1079 gdesc->txd.msscof = ctx.mss;
1080 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1081 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1082 } else {
1083 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1084 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1085 gdesc->txd.om = VMXNET3_OM_CSUM;
1086 gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1087 skb->csum_offset;
1088 } else {
1089 gdesc->txd.om = 0;
1090 gdesc->txd.msscof = 0;
1091 }
1092 le32_add_cpu(&tq->shared->txNumDeferred, 1);
1093 }
1094
1095 if (skb_vlan_tag_present(skb)) {
1096 gdesc->txd.ti = 1;
1097 gdesc->txd.tci = skb_vlan_tag_get(skb);
1098 }
1099
1100 /* finally flips the GEN bit of the SOP desc. */
1101 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1102 VMXNET3_TXD_GEN);
1103 #ifdef __BIG_ENDIAN_BITFIELD
1104 /* Finished updating in bitfields of Tx Desc, so write them in original
1105 * place.
1106 */
1107 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1108 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1109 gdesc = ctx.sop_txd;
1110 #endif
1111 netdev_dbg(adapter->netdev,
1112 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1113 (u32)(ctx.sop_txd -
1114 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1115 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1116
1117 spin_unlock_irqrestore(&tq->tx_lock, flags);
1118
1119 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1120 le32_to_cpu(tq->shared->txThreshold)) {
1121 tq->shared->txNumDeferred = 0;
1122 VMXNET3_WRITE_BAR0_REG(adapter,
1123 VMXNET3_REG_TXPROD + tq->qid * 8,
1124 tq->tx_ring.next2fill);
1125 }
1126
1127 return NETDEV_TX_OK;
1128
1129 unlock_drop_pkt:
1130 spin_unlock_irqrestore(&tq->tx_lock, flags);
1131 drop_pkt:
1132 tq->stats.drop_total++;
1133 dev_kfree_skb_any(skb);
1134 return NETDEV_TX_OK;
1135 }
1136
1137
1138 static netdev_tx_t
1139 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1140 {
1141 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1142
1143 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1144 return vmxnet3_tq_xmit(skb,
1145 &adapter->tx_queue[skb->queue_mapping],
1146 adapter, netdev);
1147 }
1148
1149
1150 static void
1151 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1152 struct sk_buff *skb,
1153 union Vmxnet3_GenericDesc *gdesc)
1154 {
1155 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1156 if (gdesc->rcd.v4 &&
1157 (le32_to_cpu(gdesc->dword[3]) &
1158 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1159 skb->ip_summed = CHECKSUM_UNNECESSARY;
1160 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1161 BUG_ON(gdesc->rcd.frg);
1162 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1163 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1164 skb->ip_summed = CHECKSUM_UNNECESSARY;
1165 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1166 BUG_ON(gdesc->rcd.frg);
1167 } else {
1168 if (gdesc->rcd.csum) {
1169 skb->csum = htons(gdesc->rcd.csum);
1170 skb->ip_summed = CHECKSUM_PARTIAL;
1171 } else {
1172 skb_checksum_none_assert(skb);
1173 }
1174 }
1175 } else {
1176 skb_checksum_none_assert(skb);
1177 }
1178 }
1179
1180
1181 static void
1182 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1183 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1184 {
1185 rq->stats.drop_err++;
1186 if (!rcd->fcs)
1187 rq->stats.drop_fcs++;
1188
1189 rq->stats.drop_total++;
1190
1191 /*
1192 * We do not unmap and chain the rx buffer to the skb.
1193 * We basically pretend this buffer is not used and will be recycled
1194 * by vmxnet3_rq_alloc_rx_buf()
1195 */
1196
1197 /*
1198 * ctx->skb may be NULL if this is the first and the only one
1199 * desc for the pkt
1200 */
1201 if (ctx->skb)
1202 dev_kfree_skb_irq(ctx->skb);
1203
1204 ctx->skb = NULL;
1205 }
1206
1207
1208 static u32
1209 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1210 union Vmxnet3_GenericDesc *gdesc)
1211 {
1212 u32 hlen, maplen;
1213 union {
1214 void *ptr;
1215 struct ethhdr *eth;
1216 struct iphdr *ipv4;
1217 struct ipv6hdr *ipv6;
1218 struct tcphdr *tcp;
1219 } hdr;
1220 BUG_ON(gdesc->rcd.tcp == 0);
1221
1222 maplen = skb_headlen(skb);
1223 if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
1224 return 0;
1225
1226 hdr.eth = eth_hdr(skb);
1227 if (gdesc->rcd.v4) {
1228 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
1229 hdr.ptr += sizeof(struct ethhdr);
1230 BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
1231 hlen = hdr.ipv4->ihl << 2;
1232 hdr.ptr += hdr.ipv4->ihl << 2;
1233 } else if (gdesc->rcd.v6) {
1234 BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
1235 hdr.ptr += sizeof(struct ethhdr);
1236 /* Use an estimated value, since we also need to handle
1237 * TSO case.
1238 */
1239 if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1240 return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1241 hlen = sizeof(struct ipv6hdr);
1242 hdr.ptr += sizeof(struct ipv6hdr);
1243 } else {
1244 /* Non-IP pkt, dont estimate header length */
1245 return 0;
1246 }
1247
1248 if (hlen + sizeof(struct tcphdr) > maplen)
1249 return 0;
1250
1251 return (hlen + (hdr.tcp->doff << 2));
1252 }
1253
1254 static int
1255 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1256 struct vmxnet3_adapter *adapter, int quota)
1257 {
1258 static const u32 rxprod_reg[2] = {
1259 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1260 };
1261 u32 num_pkts = 0;
1262 bool skip_page_frags = false;
1263 struct Vmxnet3_RxCompDesc *rcd;
1264 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1265 u16 segCnt = 0, mss = 0;
1266 #ifdef __BIG_ENDIAN_BITFIELD
1267 struct Vmxnet3_RxDesc rxCmdDesc;
1268 struct Vmxnet3_RxCompDesc rxComp;
1269 #endif
1270 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1271 &rxComp);
1272 while (rcd->gen == rq->comp_ring.gen) {
1273 struct vmxnet3_rx_buf_info *rbi;
1274 struct sk_buff *skb, *new_skb = NULL;
1275 struct page *new_page = NULL;
1276 dma_addr_t new_dma_addr;
1277 int num_to_alloc;
1278 struct Vmxnet3_RxDesc *rxd;
1279 u32 idx, ring_idx;
1280 struct vmxnet3_cmd_ring *ring = NULL;
1281 if (num_pkts >= quota) {
1282 /* we may stop even before we see the EOP desc of
1283 * the current pkt
1284 */
1285 break;
1286 }
1287 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1288 rcd->rqID != rq->dataRingQid);
1289 idx = rcd->rxdIdx;
1290 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1291 ring = rq->rx_ring + ring_idx;
1292 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1293 &rxCmdDesc);
1294 rbi = rq->buf_info[ring_idx] + idx;
1295
1296 BUG_ON(rxd->addr != rbi->dma_addr ||
1297 rxd->len != rbi->len);
1298
1299 if (unlikely(rcd->eop && rcd->err)) {
1300 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1301 goto rcd_done;
1302 }
1303
1304 if (rcd->sop) { /* first buf of the pkt */
1305 bool rxDataRingUsed;
1306 u16 len;
1307
1308 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1309 (rcd->rqID != rq->qid &&
1310 rcd->rqID != rq->dataRingQid));
1311
1312 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1313 BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1314
1315 if (unlikely(rcd->len == 0)) {
1316 /* Pretend the rx buffer is skipped. */
1317 BUG_ON(!(rcd->sop && rcd->eop));
1318 netdev_dbg(adapter->netdev,
1319 "rxRing[%u][%u] 0 length\n",
1320 ring_idx, idx);
1321 goto rcd_done;
1322 }
1323
1324 skip_page_frags = false;
1325 ctx->skb = rbi->skb;
1326
1327 rxDataRingUsed =
1328 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1329 len = rxDataRingUsed ? rcd->len : rbi->len;
1330 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1331 len);
1332 if (new_skb == NULL) {
1333 /* Skb allocation failed, do not handover this
1334 * skb to stack. Reuse it. Drop the existing pkt
1335 */
1336 rq->stats.rx_buf_alloc_failure++;
1337 ctx->skb = NULL;
1338 rq->stats.drop_total++;
1339 skip_page_frags = true;
1340 goto rcd_done;
1341 }
1342
1343 if (rxDataRingUsed) {
1344 size_t sz;
1345
1346 BUG_ON(rcd->len > rq->data_ring.desc_size);
1347
1348 ctx->skb = new_skb;
1349 sz = rcd->rxdIdx * rq->data_ring.desc_size;
1350 memcpy(new_skb->data,
1351 &rq->data_ring.base[sz], rcd->len);
1352 } else {
1353 ctx->skb = rbi->skb;
1354
1355 new_dma_addr =
1356 dma_map_single(&adapter->pdev->dev,
1357 new_skb->data, rbi->len,
1358 PCI_DMA_FROMDEVICE);
1359 if (dma_mapping_error(&adapter->pdev->dev,
1360 new_dma_addr)) {
1361 dev_kfree_skb(new_skb);
1362 /* Skb allocation failed, do not
1363 * handover this skb to stack. Reuse
1364 * it. Drop the existing pkt.
1365 */
1366 rq->stats.rx_buf_alloc_failure++;
1367 ctx->skb = NULL;
1368 rq->stats.drop_total++;
1369 skip_page_frags = true;
1370 goto rcd_done;
1371 }
1372
1373 dma_unmap_single(&adapter->pdev->dev,
1374 rbi->dma_addr,
1375 rbi->len,
1376 PCI_DMA_FROMDEVICE);
1377
1378 /* Immediate refill */
1379 rbi->skb = new_skb;
1380 rbi->dma_addr = new_dma_addr;
1381 rxd->addr = cpu_to_le64(rbi->dma_addr);
1382 rxd->len = rbi->len;
1383 }
1384
1385 #ifdef VMXNET3_RSS
1386 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
1387 (adapter->netdev->features & NETIF_F_RXHASH))
1388 skb_set_hash(ctx->skb,
1389 le32_to_cpu(rcd->rssHash),
1390 PKT_HASH_TYPE_L3);
1391 #endif
1392 skb_put(ctx->skb, rcd->len);
1393
1394 if (VMXNET3_VERSION_GE_2(adapter) &&
1395 rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
1396 struct Vmxnet3_RxCompDescExt *rcdlro;
1397 rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
1398
1399 segCnt = rcdlro->segCnt;
1400 WARN_ON_ONCE(segCnt == 0);
1401 mss = rcdlro->mss;
1402 if (unlikely(segCnt <= 1))
1403 segCnt = 0;
1404 } else {
1405 segCnt = 0;
1406 }
1407 } else {
1408 BUG_ON(ctx->skb == NULL && !skip_page_frags);
1409
1410 /* non SOP buffer must be type 1 in most cases */
1411 BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1412 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1413
1414 /* If an sop buffer was dropped, skip all
1415 * following non-sop fragments. They will be reused.
1416 */
1417 if (skip_page_frags)
1418 goto rcd_done;
1419
1420 if (rcd->len) {
1421 new_page = alloc_page(GFP_ATOMIC);
1422 /* Replacement page frag could not be allocated.
1423 * Reuse this page. Drop the pkt and free the
1424 * skb which contained this page as a frag. Skip
1425 * processing all the following non-sop frags.
1426 */
1427 if (unlikely(!new_page)) {
1428 rq->stats.rx_buf_alloc_failure++;
1429 dev_kfree_skb(ctx->skb);
1430 ctx->skb = NULL;
1431 skip_page_frags = true;
1432 goto rcd_done;
1433 }
1434 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1435 new_page,
1436 0, PAGE_SIZE,
1437 PCI_DMA_FROMDEVICE);
1438 if (dma_mapping_error(&adapter->pdev->dev,
1439 new_dma_addr)) {
1440 put_page(new_page);
1441 rq->stats.rx_buf_alloc_failure++;
1442 dev_kfree_skb(ctx->skb);
1443 ctx->skb = NULL;
1444 skip_page_frags = true;
1445 goto rcd_done;
1446 }
1447
1448 dma_unmap_page(&adapter->pdev->dev,
1449 rbi->dma_addr, rbi->len,
1450 PCI_DMA_FROMDEVICE);
1451
1452 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1453
1454 /* Immediate refill */
1455 rbi->page = new_page;
1456 rbi->dma_addr = new_dma_addr;
1457 rxd->addr = cpu_to_le64(rbi->dma_addr);
1458 rxd->len = rbi->len;
1459 }
1460 }
1461
1462
1463 skb = ctx->skb;
1464 if (rcd->eop) {
1465 u32 mtu = adapter->netdev->mtu;
1466 skb->len += skb->data_len;
1467
1468 vmxnet3_rx_csum(adapter, skb,
1469 (union Vmxnet3_GenericDesc *)rcd);
1470 skb->protocol = eth_type_trans(skb, adapter->netdev);
1471 if (!rcd->tcp || !adapter->lro)
1472 goto not_lro;
1473
1474 if (segCnt != 0 && mss != 0) {
1475 skb_shinfo(skb)->gso_type = rcd->v4 ?
1476 SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1477 skb_shinfo(skb)->gso_size = mss;
1478 skb_shinfo(skb)->gso_segs = segCnt;
1479 } else if (segCnt != 0 || skb->len > mtu) {
1480 u32 hlen;
1481
1482 hlen = vmxnet3_get_hdr_len(adapter, skb,
1483 (union Vmxnet3_GenericDesc *)rcd);
1484 if (hlen == 0)
1485 goto not_lro;
1486
1487 skb_shinfo(skb)->gso_type =
1488 rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
1489 if (segCnt != 0) {
1490 skb_shinfo(skb)->gso_segs = segCnt;
1491 skb_shinfo(skb)->gso_size =
1492 DIV_ROUND_UP(skb->len -
1493 hlen, segCnt);
1494 } else {
1495 skb_shinfo(skb)->gso_size = mtu - hlen;
1496 }
1497 }
1498 not_lro:
1499 if (unlikely(rcd->ts))
1500 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
1501
1502 if (adapter->netdev->features & NETIF_F_LRO)
1503 netif_receive_skb(skb);
1504 else
1505 napi_gro_receive(&rq->napi, skb);
1506
1507 ctx->skb = NULL;
1508 num_pkts++;
1509 }
1510
1511 rcd_done:
1512 /* device may have skipped some rx descs */
1513 ring->next2comp = idx;
1514 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1515 ring = rq->rx_ring + ring_idx;
1516 while (num_to_alloc) {
1517 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1518 &rxCmdDesc);
1519 BUG_ON(!rxd->addr);
1520
1521 /* Recv desc is ready to be used by the device */
1522 rxd->gen = ring->gen;
1523 vmxnet3_cmd_ring_adv_next2fill(ring);
1524 num_to_alloc--;
1525 }
1526
1527 /* if needed, update the register */
1528 if (unlikely(rq->shared->updateRxProd)) {
1529 VMXNET3_WRITE_BAR0_REG(adapter,
1530 rxprod_reg[ring_idx] + rq->qid * 8,
1531 ring->next2fill);
1532 }
1533
1534 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1535 vmxnet3_getRxComp(rcd,
1536 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1537 }
1538
1539 return num_pkts;
1540 }
1541
1542
1543 static void
1544 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1545 struct vmxnet3_adapter *adapter)
1546 {
1547 u32 i, ring_idx;
1548 struct Vmxnet3_RxDesc *rxd;
1549
1550 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1551 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1552 #ifdef __BIG_ENDIAN_BITFIELD
1553 struct Vmxnet3_RxDesc rxDesc;
1554 #endif
1555 vmxnet3_getRxDesc(rxd,
1556 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1557
1558 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1559 rq->buf_info[ring_idx][i].skb) {
1560 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1561 rxd->len, PCI_DMA_FROMDEVICE);
1562 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1563 rq->buf_info[ring_idx][i].skb = NULL;
1564 } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1565 rq->buf_info[ring_idx][i].page) {
1566 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1567 rxd->len, PCI_DMA_FROMDEVICE);
1568 put_page(rq->buf_info[ring_idx][i].page);
1569 rq->buf_info[ring_idx][i].page = NULL;
1570 }
1571 }
1572
1573 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1574 rq->rx_ring[ring_idx].next2fill =
1575 rq->rx_ring[ring_idx].next2comp = 0;
1576 }
1577
1578 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1579 rq->comp_ring.next2proc = 0;
1580 }
1581
1582
1583 static void
1584 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1585 {
1586 int i;
1587
1588 for (i = 0; i < adapter->num_rx_queues; i++)
1589 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1590 }
1591
1592
1593 static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1594 struct vmxnet3_adapter *adapter)
1595 {
1596 int i;
1597 int j;
1598
1599 /* all rx buffers must have already been freed */
1600 for (i = 0; i < 2; i++) {
1601 if (rq->buf_info[i]) {
1602 for (j = 0; j < rq->rx_ring[i].size; j++)
1603 BUG_ON(rq->buf_info[i][j].page != NULL);
1604 }
1605 }
1606
1607
1608 for (i = 0; i < 2; i++) {
1609 if (rq->rx_ring[i].base) {
1610 dma_free_coherent(&adapter->pdev->dev,
1611 rq->rx_ring[i].size
1612 * sizeof(struct Vmxnet3_RxDesc),
1613 rq->rx_ring[i].base,
1614 rq->rx_ring[i].basePA);
1615 rq->rx_ring[i].base = NULL;
1616 }
1617 rq->buf_info[i] = NULL;
1618 }
1619
1620 if (rq->data_ring.base) {
1621 dma_free_coherent(&adapter->pdev->dev,
1622 rq->rx_ring[0].size * rq->data_ring.desc_size,
1623 rq->data_ring.base, rq->data_ring.basePA);
1624 rq->data_ring.base = NULL;
1625 }
1626
1627 if (rq->comp_ring.base) {
1628 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
1629 * sizeof(struct Vmxnet3_RxCompDesc),
1630 rq->comp_ring.base, rq->comp_ring.basePA);
1631 rq->comp_ring.base = NULL;
1632 }
1633
1634 if (rq->buf_info[0]) {
1635 size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
1636 (rq->rx_ring[0].size + rq->rx_ring[1].size);
1637 dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
1638 rq->buf_info_pa);
1639 }
1640 }
1641
1642 void
1643 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
1644 {
1645 int i;
1646
1647 for (i = 0; i < adapter->num_rx_queues; i++) {
1648 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1649
1650 if (rq->data_ring.base) {
1651 dma_free_coherent(&adapter->pdev->dev,
1652 (rq->rx_ring[0].size *
1653 rq->data_ring.desc_size),
1654 rq->data_ring.base,
1655 rq->data_ring.basePA);
1656 rq->data_ring.base = NULL;
1657 rq->data_ring.desc_size = 0;
1658 }
1659 }
1660 }
1661
1662 static int
1663 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1664 struct vmxnet3_adapter *adapter)
1665 {
1666 int i;
1667
1668 /* initialize buf_info */
1669 for (i = 0; i < rq->rx_ring[0].size; i++) {
1670
1671 /* 1st buf for a pkt is skbuff */
1672 if (i % adapter->rx_buf_per_pkt == 0) {
1673 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1674 rq->buf_info[0][i].len = adapter->skb_buf_size;
1675 } else { /* subsequent bufs for a pkt is frag */
1676 rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1677 rq->buf_info[0][i].len = PAGE_SIZE;
1678 }
1679 }
1680 for (i = 0; i < rq->rx_ring[1].size; i++) {
1681 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1682 rq->buf_info[1][i].len = PAGE_SIZE;
1683 }
1684
1685 /* reset internal state and allocate buffers for both rings */
1686 for (i = 0; i < 2; i++) {
1687 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1688
1689 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1690 sizeof(struct Vmxnet3_RxDesc));
1691 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1692 }
1693 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1694 adapter) == 0) {
1695 /* at least has 1 rx buffer for the 1st ring */
1696 return -ENOMEM;
1697 }
1698 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1699
1700 /* reset the comp ring */
1701 rq->comp_ring.next2proc = 0;
1702 memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1703 sizeof(struct Vmxnet3_RxCompDesc));
1704 rq->comp_ring.gen = VMXNET3_INIT_GEN;
1705
1706 /* reset rxctx */
1707 rq->rx_ctx.skb = NULL;
1708
1709 /* stats are not reset */
1710 return 0;
1711 }
1712
1713
1714 static int
1715 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1716 {
1717 int i, err = 0;
1718
1719 for (i = 0; i < adapter->num_rx_queues; i++) {
1720 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1721 if (unlikely(err)) {
1722 dev_err(&adapter->netdev->dev, "%s: failed to "
1723 "initialize rx queue%i\n",
1724 adapter->netdev->name, i);
1725 break;
1726 }
1727 }
1728 return err;
1729
1730 }
1731
1732
1733 static int
1734 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1735 {
1736 int i;
1737 size_t sz;
1738 struct vmxnet3_rx_buf_info *bi;
1739
1740 for (i = 0; i < 2; i++) {
1741
1742 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1743 rq->rx_ring[i].base = dma_alloc_coherent(
1744 &adapter->pdev->dev, sz,
1745 &rq->rx_ring[i].basePA,
1746 GFP_KERNEL);
1747 if (!rq->rx_ring[i].base) {
1748 netdev_err(adapter->netdev,
1749 "failed to allocate rx ring %d\n", i);
1750 goto err;
1751 }
1752 }
1753
1754 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
1755 sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
1756 rq->data_ring.base =
1757 dma_alloc_coherent(&adapter->pdev->dev, sz,
1758 &rq->data_ring.basePA,
1759 GFP_KERNEL);
1760 if (!rq->data_ring.base) {
1761 netdev_err(adapter->netdev,
1762 "rx data ring will be disabled\n");
1763 adapter->rxdataring_enabled = false;
1764 }
1765 } else {
1766 rq->data_ring.base = NULL;
1767 rq->data_ring.desc_size = 0;
1768 }
1769
1770 sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1771 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
1772 &rq->comp_ring.basePA,
1773 GFP_KERNEL);
1774 if (!rq->comp_ring.base) {
1775 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
1776 goto err;
1777 }
1778
1779 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1780 rq->rx_ring[1].size);
1781 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1782 GFP_KERNEL);
1783 if (!bi)
1784 goto err;
1785
1786 rq->buf_info[0] = bi;
1787 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1788
1789 return 0;
1790
1791 err:
1792 vmxnet3_rq_destroy(rq, adapter);
1793 return -ENOMEM;
1794 }
1795
1796
1797 static int
1798 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1799 {
1800 int i, err = 0;
1801
1802 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
1803
1804 for (i = 0; i < adapter->num_rx_queues; i++) {
1805 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1806 if (unlikely(err)) {
1807 dev_err(&adapter->netdev->dev,
1808 "%s: failed to create rx queue%i\n",
1809 adapter->netdev->name, i);
1810 goto err_out;
1811 }
1812 }
1813
1814 if (!adapter->rxdataring_enabled)
1815 vmxnet3_rq_destroy_all_rxdataring(adapter);
1816
1817 return err;
1818 err_out:
1819 vmxnet3_rq_destroy_all(adapter);
1820 return err;
1821
1822 }
1823
1824 /* Multiple queue aware polling function for tx and rx */
1825
1826 static int
1827 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1828 {
1829 int rcd_done = 0, i;
1830 if (unlikely(adapter->shared->ecr))
1831 vmxnet3_process_events(adapter);
1832 for (i = 0; i < adapter->num_tx_queues; i++)
1833 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1834
1835 for (i = 0; i < adapter->num_rx_queues; i++)
1836 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1837 adapter, budget);
1838 return rcd_done;
1839 }
1840
1841
1842 static int
1843 vmxnet3_poll(struct napi_struct *napi, int budget)
1844 {
1845 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1846 struct vmxnet3_rx_queue, napi);
1847 int rxd_done;
1848
1849 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1850
1851 if (rxd_done < budget) {
1852 napi_complete(napi);
1853 vmxnet3_enable_all_intrs(rx_queue->adapter);
1854 }
1855 return rxd_done;
1856 }
1857
1858 /*
1859 * NAPI polling function for MSI-X mode with multiple Rx queues
1860 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1861 */
1862
1863 static int
1864 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1865 {
1866 struct vmxnet3_rx_queue *rq = container_of(napi,
1867 struct vmxnet3_rx_queue, napi);
1868 struct vmxnet3_adapter *adapter = rq->adapter;
1869 int rxd_done;
1870
1871 /* When sharing interrupt with corresponding tx queue, process
1872 * tx completions in that queue as well
1873 */
1874 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1875 struct vmxnet3_tx_queue *tq =
1876 &adapter->tx_queue[rq - adapter->rx_queue];
1877 vmxnet3_tq_tx_complete(tq, adapter);
1878 }
1879
1880 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1881
1882 if (rxd_done < budget) {
1883 napi_complete(napi);
1884 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1885 }
1886 return rxd_done;
1887 }
1888
1889
1890 #ifdef CONFIG_PCI_MSI
1891
1892 /*
1893 * Handle completion interrupts on tx queues
1894 * Returns whether or not the intr is handled
1895 */
1896
1897 static irqreturn_t
1898 vmxnet3_msix_tx(int irq, void *data)
1899 {
1900 struct vmxnet3_tx_queue *tq = data;
1901 struct vmxnet3_adapter *adapter = tq->adapter;
1902
1903 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1904 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1905
1906 /* Handle the case where only one irq is allocate for all tx queues */
1907 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1908 int i;
1909 for (i = 0; i < adapter->num_tx_queues; i++) {
1910 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1911 vmxnet3_tq_tx_complete(txq, adapter);
1912 }
1913 } else {
1914 vmxnet3_tq_tx_complete(tq, adapter);
1915 }
1916 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1917
1918 return IRQ_HANDLED;
1919 }
1920
1921
1922 /*
1923 * Handle completion interrupts on rx queues. Returns whether or not the
1924 * intr is handled
1925 */
1926
1927 static irqreturn_t
1928 vmxnet3_msix_rx(int irq, void *data)
1929 {
1930 struct vmxnet3_rx_queue *rq = data;
1931 struct vmxnet3_adapter *adapter = rq->adapter;
1932
1933 /* disable intr if needed */
1934 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1935 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1936 napi_schedule(&rq->napi);
1937
1938 return IRQ_HANDLED;
1939 }
1940
1941 /*
1942 *----------------------------------------------------------------------------
1943 *
1944 * vmxnet3_msix_event --
1945 *
1946 * vmxnet3 msix event intr handler
1947 *
1948 * Result:
1949 * whether or not the intr is handled
1950 *
1951 *----------------------------------------------------------------------------
1952 */
1953
1954 static irqreturn_t
1955 vmxnet3_msix_event(int irq, void *data)
1956 {
1957 struct net_device *dev = data;
1958 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1959
1960 /* disable intr if needed */
1961 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1962 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1963
1964 if (adapter->shared->ecr)
1965 vmxnet3_process_events(adapter);
1966
1967 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1968
1969 return IRQ_HANDLED;
1970 }
1971
1972 #endif /* CONFIG_PCI_MSI */
1973
1974
1975 /* Interrupt handler for vmxnet3 */
1976 static irqreturn_t
1977 vmxnet3_intr(int irq, void *dev_id)
1978 {
1979 struct net_device *dev = dev_id;
1980 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1981
1982 if (adapter->intr.type == VMXNET3_IT_INTX) {
1983 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1984 if (unlikely(icr == 0))
1985 /* not ours */
1986 return IRQ_NONE;
1987 }
1988
1989
1990 /* disable intr if needed */
1991 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1992 vmxnet3_disable_all_intrs(adapter);
1993
1994 napi_schedule(&adapter->rx_queue[0].napi);
1995
1996 return IRQ_HANDLED;
1997 }
1998
1999 #ifdef CONFIG_NET_POLL_CONTROLLER
2000
2001 /* netpoll callback. */
2002 static void
2003 vmxnet3_netpoll(struct net_device *netdev)
2004 {
2005 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2006
2007 switch (adapter->intr.type) {
2008 #ifdef CONFIG_PCI_MSI
2009 case VMXNET3_IT_MSIX: {
2010 int i;
2011 for (i = 0; i < adapter->num_rx_queues; i++)
2012 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2013 break;
2014 }
2015 #endif
2016 case VMXNET3_IT_MSI:
2017 default:
2018 vmxnet3_intr(0, adapter->netdev);
2019 break;
2020 }
2021
2022 }
2023 #endif /* CONFIG_NET_POLL_CONTROLLER */
2024
2025 static int
2026 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2027 {
2028 struct vmxnet3_intr *intr = &adapter->intr;
2029 int err = 0, i;
2030 int vector = 0;
2031
2032 #ifdef CONFIG_PCI_MSI
2033 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2034 for (i = 0; i < adapter->num_tx_queues; i++) {
2035 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2036 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2037 adapter->netdev->name, vector);
2038 err = request_irq(
2039 intr->msix_entries[vector].vector,
2040 vmxnet3_msix_tx, 0,
2041 adapter->tx_queue[i].name,
2042 &adapter->tx_queue[i]);
2043 } else {
2044 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2045 adapter->netdev->name, vector);
2046 }
2047 if (err) {
2048 dev_err(&adapter->netdev->dev,
2049 "Failed to request irq for MSIX, %s, "
2050 "error %d\n",
2051 adapter->tx_queue[i].name, err);
2052 return err;
2053 }
2054
2055 /* Handle the case where only 1 MSIx was allocated for
2056 * all tx queues */
2057 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2058 for (; i < adapter->num_tx_queues; i++)
2059 adapter->tx_queue[i].comp_ring.intr_idx
2060 = vector;
2061 vector++;
2062 break;
2063 } else {
2064 adapter->tx_queue[i].comp_ring.intr_idx
2065 = vector++;
2066 }
2067 }
2068 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2069 vector = 0;
2070
2071 for (i = 0; i < adapter->num_rx_queues; i++) {
2072 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2073 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2074 adapter->netdev->name, vector);
2075 else
2076 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2077 adapter->netdev->name, vector);
2078 err = request_irq(intr->msix_entries[vector].vector,
2079 vmxnet3_msix_rx, 0,
2080 adapter->rx_queue[i].name,
2081 &(adapter->rx_queue[i]));
2082 if (err) {
2083 netdev_err(adapter->netdev,
2084 "Failed to request irq for MSIX, "
2085 "%s, error %d\n",
2086 adapter->rx_queue[i].name, err);
2087 return err;
2088 }
2089
2090 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2091 }
2092
2093 sprintf(intr->event_msi_vector_name, "%s-event-%d",
2094 adapter->netdev->name, vector);
2095 err = request_irq(intr->msix_entries[vector].vector,
2096 vmxnet3_msix_event, 0,
2097 intr->event_msi_vector_name, adapter->netdev);
2098 intr->event_intr_idx = vector;
2099
2100 } else if (intr->type == VMXNET3_IT_MSI) {
2101 adapter->num_rx_queues = 1;
2102 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2103 adapter->netdev->name, adapter->netdev);
2104 } else {
2105 #endif
2106 adapter->num_rx_queues = 1;
2107 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2108 IRQF_SHARED, adapter->netdev->name,
2109 adapter->netdev);
2110 #ifdef CONFIG_PCI_MSI
2111 }
2112 #endif
2113 intr->num_intrs = vector + 1;
2114 if (err) {
2115 netdev_err(adapter->netdev,
2116 "Failed to request irq (intr type:%d), error %d\n",
2117 intr->type, err);
2118 } else {
2119 /* Number of rx queues will not change after this */
2120 for (i = 0; i < adapter->num_rx_queues; i++) {
2121 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2122 rq->qid = i;
2123 rq->qid2 = i + adapter->num_rx_queues;
2124 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2125 }
2126
2127 /* init our intr settings */
2128 for (i = 0; i < intr->num_intrs; i++)
2129 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
2130 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2131 adapter->intr.event_intr_idx = 0;
2132 for (i = 0; i < adapter->num_tx_queues; i++)
2133 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2134 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2135 }
2136
2137 netdev_info(adapter->netdev,
2138 "intr type %u, mode %u, %u vectors allocated\n",
2139 intr->type, intr->mask_mode, intr->num_intrs);
2140 }
2141
2142 return err;
2143 }
2144
2145
2146 static void
2147 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2148 {
2149 struct vmxnet3_intr *intr = &adapter->intr;
2150 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
2151
2152 switch (intr->type) {
2153 #ifdef CONFIG_PCI_MSI
2154 case VMXNET3_IT_MSIX:
2155 {
2156 int i, vector = 0;
2157
2158 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2159 for (i = 0; i < adapter->num_tx_queues; i++) {
2160 free_irq(intr->msix_entries[vector++].vector,
2161 &(adapter->tx_queue[i]));
2162 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2163 break;
2164 }
2165 }
2166
2167 for (i = 0; i < adapter->num_rx_queues; i++) {
2168 free_irq(intr->msix_entries[vector++].vector,
2169 &(adapter->rx_queue[i]));
2170 }
2171
2172 free_irq(intr->msix_entries[vector].vector,
2173 adapter->netdev);
2174 BUG_ON(vector >= intr->num_intrs);
2175 break;
2176 }
2177 #endif
2178 case VMXNET3_IT_MSI:
2179 free_irq(adapter->pdev->irq, adapter->netdev);
2180 break;
2181 case VMXNET3_IT_INTX:
2182 free_irq(adapter->pdev->irq, adapter->netdev);
2183 break;
2184 default:
2185 BUG();
2186 }
2187 }
2188
2189
2190 static void
2191 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2192 {
2193 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2194 u16 vid;
2195
2196 /* allow untagged pkts */
2197 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
2198
2199 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2200 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2201 }
2202
2203
2204 static int
2205 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2206 {
2207 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2208
2209 if (!(netdev->flags & IFF_PROMISC)) {
2210 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2211 unsigned long flags;
2212
2213 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
2214 spin_lock_irqsave(&adapter->cmd_lock, flags);
2215 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2216 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2217 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2218 }
2219
2220 set_bit(vid, adapter->active_vlans);
2221
2222 return 0;
2223 }
2224
2225
2226 static int
2227 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2228 {
2229 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2230
2231 if (!(netdev->flags & IFF_PROMISC)) {
2232 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2233 unsigned long flags;
2234
2235 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
2236 spin_lock_irqsave(&adapter->cmd_lock, flags);
2237 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2238 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2239 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2240 }
2241
2242 clear_bit(vid, adapter->active_vlans);
2243
2244 return 0;
2245 }
2246
2247
2248 static u8 *
2249 vmxnet3_copy_mc(struct net_device *netdev)
2250 {
2251 u8 *buf = NULL;
2252 u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
2253
2254 /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
2255 if (sz <= 0xffff) {
2256 /* We may be called with BH disabled */
2257 buf = kmalloc(sz, GFP_ATOMIC);
2258 if (buf) {
2259 struct netdev_hw_addr *ha;
2260 int i = 0;
2261
2262 netdev_for_each_mc_addr(ha, netdev)
2263 memcpy(buf + i++ * ETH_ALEN, ha->addr,
2264 ETH_ALEN);
2265 }
2266 }
2267 return buf;
2268 }
2269
2270
2271 static void
2272 vmxnet3_set_mc(struct net_device *netdev)
2273 {
2274 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2275 unsigned long flags;
2276 struct Vmxnet3_RxFilterConf *rxConf =
2277 &adapter->shared->devRead.rxFilterConf;
2278 u8 *new_table = NULL;
2279 dma_addr_t new_table_pa = 0;
2280 u32 new_mode = VMXNET3_RXM_UCAST;
2281
2282 if (netdev->flags & IFF_PROMISC) {
2283 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2284 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2285
2286 new_mode |= VMXNET3_RXM_PROMISC;
2287 } else {
2288 vmxnet3_restore_vlan(adapter);
2289 }
2290
2291 if (netdev->flags & IFF_BROADCAST)
2292 new_mode |= VMXNET3_RXM_BCAST;
2293
2294 if (netdev->flags & IFF_ALLMULTI)
2295 new_mode |= VMXNET3_RXM_ALL_MULTI;
2296 else
2297 if (!netdev_mc_empty(netdev)) {
2298 new_table = vmxnet3_copy_mc(netdev);
2299 if (new_table) {
2300 size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
2301
2302 rxConf->mfTableLen = cpu_to_le16(sz);
2303 new_table_pa = dma_map_single(
2304 &adapter->pdev->dev,
2305 new_table,
2306 sz,
2307 PCI_DMA_TODEVICE);
2308 }
2309
2310 if (!dma_mapping_error(&adapter->pdev->dev,
2311 new_table_pa)) {
2312 new_mode |= VMXNET3_RXM_MCAST;
2313 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2314 } else {
2315 netdev_info(netdev,
2316 "failed to copy mcast list, setting ALL_MULTI\n");
2317 new_mode |= VMXNET3_RXM_ALL_MULTI;
2318 }
2319 }
2320
2321 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2322 rxConf->mfTableLen = 0;
2323 rxConf->mfTablePA = 0;
2324 }
2325
2326 spin_lock_irqsave(&adapter->cmd_lock, flags);
2327 if (new_mode != rxConf->rxMode) {
2328 rxConf->rxMode = cpu_to_le32(new_mode);
2329 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2330 VMXNET3_CMD_UPDATE_RX_MODE);
2331 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2332 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2333 }
2334
2335 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2336 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2337 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2338
2339 if (new_table_pa)
2340 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2341 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2342 kfree(new_table);
2343 }
2344
2345 void
2346 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2347 {
2348 int i;
2349
2350 for (i = 0; i < adapter->num_rx_queues; i++)
2351 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2352 }
2353
2354
2355 /*
2356 * Set up driver_shared based on settings in adapter.
2357 */
2358
2359 static void
2360 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2361 {
2362 struct Vmxnet3_DriverShared *shared = adapter->shared;
2363 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2364 struct Vmxnet3_TxQueueConf *tqc;
2365 struct Vmxnet3_RxQueueConf *rqc;
2366 int i;
2367
2368 memset(shared, 0, sizeof(*shared));
2369
2370 /* driver settings */
2371 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2372 devRead->misc.driverInfo.version = cpu_to_le32(
2373 VMXNET3_DRIVER_VERSION_NUM);
2374 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2375 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2376 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2377 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2378 *((u32 *)&devRead->misc.driverInfo.gos));
2379 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2380 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2381
2382 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2383 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2384
2385 /* set up feature flags */
2386 if (adapter->netdev->features & NETIF_F_RXCSUM)
2387 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2388
2389 if (adapter->netdev->features & NETIF_F_LRO) {
2390 devRead->misc.uptFeatures |= UPT1_F_LRO;
2391 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2392 }
2393 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2394 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2395
2396 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2397 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2398 devRead->misc.queueDescLen = cpu_to_le32(
2399 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2400 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2401
2402 /* tx queue settings */
2403 devRead->misc.numTxQueues = adapter->num_tx_queues;
2404 for (i = 0; i < adapter->num_tx_queues; i++) {
2405 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2406 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2407 tqc = &adapter->tqd_start[i].conf;
2408 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2409 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2410 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2411 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2412 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2413 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2414 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2415 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2416 tqc->ddLen = cpu_to_le32(
2417 sizeof(struct vmxnet3_tx_buf_info) *
2418 tqc->txRingSize);
2419 tqc->intrIdx = tq->comp_ring.intr_idx;
2420 }
2421
2422 /* rx queue settings */
2423 devRead->misc.numRxQueues = adapter->num_rx_queues;
2424 for (i = 0; i < adapter->num_rx_queues; i++) {
2425 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2426 rqc = &adapter->rqd_start[i].conf;
2427 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2428 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2429 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
2430 rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
2431 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
2432 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
2433 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
2434 rqc->ddLen = cpu_to_le32(
2435 sizeof(struct vmxnet3_rx_buf_info) *
2436 (rqc->rxRingSize[0] +
2437 rqc->rxRingSize[1]));
2438 rqc->intrIdx = rq->comp_ring.intr_idx;
2439 if (VMXNET3_VERSION_GE_3(adapter)) {
2440 rqc->rxDataRingBasePA =
2441 cpu_to_le64(rq->data_ring.basePA);
2442 rqc->rxDataRingDescSize =
2443 cpu_to_le16(rq->data_ring.desc_size);
2444 }
2445 }
2446
2447 #ifdef VMXNET3_RSS
2448 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2449
2450 if (adapter->rss) {
2451 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2452
2453 devRead->misc.uptFeatures |= UPT1_F_RSS;
2454 devRead->misc.numRxQueues = adapter->num_rx_queues;
2455 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2456 UPT1_RSS_HASH_TYPE_IPV4 |
2457 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2458 UPT1_RSS_HASH_TYPE_IPV6;
2459 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2460 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2461 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2462 netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
2463
2464 for (i = 0; i < rssConf->indTableSize; i++)
2465 rssConf->indTable[i] = ethtool_rxfh_indir_default(
2466 i, adapter->num_rx_queues);
2467
2468 devRead->rssConfDesc.confVer = 1;
2469 devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
2470 devRead->rssConfDesc.confPA =
2471 cpu_to_le64(adapter->rss_conf_pa);
2472 }
2473
2474 #endif /* VMXNET3_RSS */
2475
2476 /* intr settings */
2477 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2478 VMXNET3_IMM_AUTO;
2479 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2480 for (i = 0; i < adapter->intr.num_intrs; i++)
2481 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2482
2483 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2484 devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2485
2486 /* rx filter settings */
2487 devRead->rxFilterConf.rxMode = 0;
2488 vmxnet3_restore_vlan(adapter);
2489 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2490
2491 /* the rest are already zeroed */
2492 }
2493
2494 static void
2495 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2496 {
2497 struct Vmxnet3_DriverShared *shared = adapter->shared;
2498 union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
2499 unsigned long flags;
2500
2501 if (!VMXNET3_VERSION_GE_3(adapter))
2502 return;
2503
2504 spin_lock_irqsave(&adapter->cmd_lock, flags);
2505 cmdInfo->varConf.confVer = 1;
2506 cmdInfo->varConf.confLen =
2507 cpu_to_le32(sizeof(*adapter->coal_conf));
2508 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2509
2510 if (adapter->default_coal_mode) {
2511 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2512 VMXNET3_CMD_GET_COALESCE);
2513 } else {
2514 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2515 VMXNET3_CMD_SET_COALESCE);
2516 }
2517
2518 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2519 }
2520
2521 int
2522 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2523 {
2524 int err, i;
2525 u32 ret;
2526 unsigned long flags;
2527
2528 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2529 " ring sizes %u %u %u\n", adapter->netdev->name,
2530 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2531 adapter->tx_queue[0].tx_ring.size,
2532 adapter->rx_queue[0].rx_ring[0].size,
2533 adapter->rx_queue[0].rx_ring[1].size);
2534
2535 vmxnet3_tq_init_all(adapter);
2536 err = vmxnet3_rq_init_all(adapter);
2537 if (err) {
2538 netdev_err(adapter->netdev,
2539 "Failed to init rx queue error %d\n", err);
2540 goto rq_err;
2541 }
2542
2543 err = vmxnet3_request_irqs(adapter);
2544 if (err) {
2545 netdev_err(adapter->netdev,
2546 "Failed to setup irq for error %d\n", err);
2547 goto irq_err;
2548 }
2549
2550 vmxnet3_setup_driver_shared(adapter);
2551
2552 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2553 adapter->shared_pa));
2554 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2555 adapter->shared_pa));
2556 spin_lock_irqsave(&adapter->cmd_lock, flags);
2557 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2558 VMXNET3_CMD_ACTIVATE_DEV);
2559 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2560 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2561
2562 if (ret != 0) {
2563 netdev_err(adapter->netdev,
2564 "Failed to activate dev: error %u\n", ret);
2565 err = -EINVAL;
2566 goto activate_err;
2567 }
2568
2569 vmxnet3_init_coalesce(adapter);
2570
2571 for (i = 0; i < adapter->num_rx_queues; i++) {
2572 VMXNET3_WRITE_BAR0_REG(adapter,
2573 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2574 adapter->rx_queue[i].rx_ring[0].next2fill);
2575 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2576 (i * VMXNET3_REG_ALIGN)),
2577 adapter->rx_queue[i].rx_ring[1].next2fill);
2578 }
2579
2580 /* Apply the rx filter settins last. */
2581 vmxnet3_set_mc(adapter->netdev);
2582
2583 /*
2584 * Check link state when first activating device. It will start the
2585 * tx queue if the link is up.
2586 */
2587 vmxnet3_check_link(adapter, true);
2588 for (i = 0; i < adapter->num_rx_queues; i++)
2589 napi_enable(&adapter->rx_queue[i].napi);
2590 vmxnet3_enable_all_intrs(adapter);
2591 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2592 return 0;
2593
2594 activate_err:
2595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2596 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2597 vmxnet3_free_irqs(adapter);
2598 irq_err:
2599 rq_err:
2600 /* free up buffers we allocated */
2601 vmxnet3_rq_cleanup_all(adapter);
2602 return err;
2603 }
2604
2605
2606 void
2607 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2608 {
2609 unsigned long flags;
2610 spin_lock_irqsave(&adapter->cmd_lock, flags);
2611 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2612 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2613 }
2614
2615
2616 int
2617 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2618 {
2619 int i;
2620 unsigned long flags;
2621 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2622 return 0;
2623
2624
2625 spin_lock_irqsave(&adapter->cmd_lock, flags);
2626 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2627 VMXNET3_CMD_QUIESCE_DEV);
2628 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2629 vmxnet3_disable_all_intrs(adapter);
2630
2631 for (i = 0; i < adapter->num_rx_queues; i++)
2632 napi_disable(&adapter->rx_queue[i].napi);
2633 netif_tx_disable(adapter->netdev);
2634 adapter->link_speed = 0;
2635 netif_carrier_off(adapter->netdev);
2636
2637 vmxnet3_tq_cleanup_all(adapter);
2638 vmxnet3_rq_cleanup_all(adapter);
2639 vmxnet3_free_irqs(adapter);
2640 return 0;
2641 }
2642
2643
2644 static void
2645 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2646 {
2647 u32 tmp;
2648
2649 tmp = *(u32 *)mac;
2650 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2651
2652 tmp = (mac[5] << 8) | mac[4];
2653 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2654 }
2655
2656
2657 static int
2658 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2659 {
2660 struct sockaddr *addr = p;
2661 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2662
2663 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2664 vmxnet3_write_mac_addr(adapter, addr->sa_data);
2665
2666 return 0;
2667 }
2668
2669
2670 /* ==================== initialization and cleanup routines ============ */
2671
2672 static int
2673 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2674 {
2675 int err;
2676 unsigned long mmio_start, mmio_len;
2677 struct pci_dev *pdev = adapter->pdev;
2678
2679 err = pci_enable_device(pdev);
2680 if (err) {
2681 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
2682 return err;
2683 }
2684
2685 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2686 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2687 dev_err(&pdev->dev,
2688 "pci_set_consistent_dma_mask failed\n");
2689 err = -EIO;
2690 goto err_set_mask;
2691 }
2692 *dma64 = true;
2693 } else {
2694 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2695 dev_err(&pdev->dev,
2696 "pci_set_dma_mask failed\n");
2697 err = -EIO;
2698 goto err_set_mask;
2699 }
2700 *dma64 = false;
2701 }
2702
2703 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2704 vmxnet3_driver_name);
2705 if (err) {
2706 dev_err(&pdev->dev,
2707 "Failed to request region for adapter: error %d\n", err);
2708 goto err_set_mask;
2709 }
2710
2711 pci_set_master(pdev);
2712
2713 mmio_start = pci_resource_start(pdev, 0);
2714 mmio_len = pci_resource_len(pdev, 0);
2715 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2716 if (!adapter->hw_addr0) {
2717 dev_err(&pdev->dev, "Failed to map bar0\n");
2718 err = -EIO;
2719 goto err_ioremap;
2720 }
2721
2722 mmio_start = pci_resource_start(pdev, 1);
2723 mmio_len = pci_resource_len(pdev, 1);
2724 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2725 if (!adapter->hw_addr1) {
2726 dev_err(&pdev->dev, "Failed to map bar1\n");
2727 err = -EIO;
2728 goto err_bar1;
2729 }
2730 return 0;
2731
2732 err_bar1:
2733 iounmap(adapter->hw_addr0);
2734 err_ioremap:
2735 pci_release_selected_regions(pdev, (1 << 2) - 1);
2736 err_set_mask:
2737 pci_disable_device(pdev);
2738 return err;
2739 }
2740
2741
2742 static void
2743 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2744 {
2745 BUG_ON(!adapter->pdev);
2746
2747 iounmap(adapter->hw_addr0);
2748 iounmap(adapter->hw_addr1);
2749 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2750 pci_disable_device(adapter->pdev);
2751 }
2752
2753
2754 static void
2755 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2756 {
2757 size_t sz, i, ring0_size, ring1_size, comp_size;
2758 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2759
2760
2761 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2762 VMXNET3_MAX_ETH_HDR_SIZE) {
2763 adapter->skb_buf_size = adapter->netdev->mtu +
2764 VMXNET3_MAX_ETH_HDR_SIZE;
2765 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2766 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2767
2768 adapter->rx_buf_per_pkt = 1;
2769 } else {
2770 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2771 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2772 VMXNET3_MAX_ETH_HDR_SIZE;
2773 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2774 }
2775
2776 /*
2777 * for simplicity, force the ring0 size to be a multiple of
2778 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2779 */
2780 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2781 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2782 ring0_size = (ring0_size + sz - 1) / sz * sz;
2783 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2784 sz * sz);
2785 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2786 ring1_size = (ring1_size + sz - 1) / sz * sz;
2787 ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
2788 sz * sz);
2789 comp_size = ring0_size + ring1_size;
2790
2791 for (i = 0; i < adapter->num_rx_queues; i++) {
2792 rq = &adapter->rx_queue[i];
2793 rq->rx_ring[0].size = ring0_size;
2794 rq->rx_ring[1].size = ring1_size;
2795 rq->comp_ring.size = comp_size;
2796 }
2797 }
2798
2799
2800 int
2801 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2802 u32 rx_ring_size, u32 rx_ring2_size,
2803 u16 txdata_desc_size, u16 rxdata_desc_size)
2804 {
2805 int err = 0, i;
2806
2807 for (i = 0; i < adapter->num_tx_queues; i++) {
2808 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2809 tq->tx_ring.size = tx_ring_size;
2810 tq->data_ring.size = tx_ring_size;
2811 tq->comp_ring.size = tx_ring_size;
2812 tq->txdata_desc_size = txdata_desc_size;
2813 tq->shared = &adapter->tqd_start[i].ctrl;
2814 tq->stopped = true;
2815 tq->adapter = adapter;
2816 tq->qid = i;
2817 err = vmxnet3_tq_create(tq, adapter);
2818 /*
2819 * Too late to change num_tx_queues. We cannot do away with
2820 * lesser number of queues than what we asked for
2821 */
2822 if (err)
2823 goto queue_err;
2824 }
2825
2826 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2827 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2828 vmxnet3_adjust_rx_ring_size(adapter);
2829
2830 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2831 for (i = 0; i < adapter->num_rx_queues; i++) {
2832 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2833 /* qid and qid2 for rx queues will be assigned later when num
2834 * of rx queues is finalized after allocating intrs */
2835 rq->shared = &adapter->rqd_start[i].ctrl;
2836 rq->adapter = adapter;
2837 rq->data_ring.desc_size = rxdata_desc_size;
2838 err = vmxnet3_rq_create(rq, adapter);
2839 if (err) {
2840 if (i == 0) {
2841 netdev_err(adapter->netdev,
2842 "Could not allocate any rx queues. "
2843 "Aborting.\n");
2844 goto queue_err;
2845 } else {
2846 netdev_info(adapter->netdev,
2847 "Number of rx queues changed "
2848 "to : %d.\n", i);
2849 adapter->num_rx_queues = i;
2850 err = 0;
2851 break;
2852 }
2853 }
2854 }
2855
2856 if (!adapter->rxdataring_enabled)
2857 vmxnet3_rq_destroy_all_rxdataring(adapter);
2858
2859 return err;
2860 queue_err:
2861 vmxnet3_tq_destroy_all(adapter);
2862 return err;
2863 }
2864
2865 static int
2866 vmxnet3_open(struct net_device *netdev)
2867 {
2868 struct vmxnet3_adapter *adapter;
2869 int err, i;
2870
2871 adapter = netdev_priv(netdev);
2872
2873 for (i = 0; i < adapter->num_tx_queues; i++)
2874 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2875
2876 if (VMXNET3_VERSION_GE_3(adapter)) {
2877 unsigned long flags;
2878 u16 txdata_desc_size;
2879
2880 spin_lock_irqsave(&adapter->cmd_lock, flags);
2881 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2882 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
2883 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
2884 VMXNET3_REG_CMD);
2885 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2886
2887 if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
2888 (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
2889 (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
2890 adapter->txdata_desc_size =
2891 sizeof(struct Vmxnet3_TxDataDesc);
2892 } else {
2893 adapter->txdata_desc_size = txdata_desc_size;
2894 }
2895 } else {
2896 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
2897 }
2898
2899 err = vmxnet3_create_queues(adapter,
2900 adapter->tx_ring_size,
2901 adapter->rx_ring_size,
2902 adapter->rx_ring2_size,
2903 adapter->txdata_desc_size,
2904 adapter->rxdata_desc_size);
2905 if (err)
2906 goto queue_err;
2907
2908 err = vmxnet3_activate_dev(adapter);
2909 if (err)
2910 goto activate_err;
2911
2912 return 0;
2913
2914 activate_err:
2915 vmxnet3_rq_destroy_all(adapter);
2916 vmxnet3_tq_destroy_all(adapter);
2917 queue_err:
2918 return err;
2919 }
2920
2921
2922 static int
2923 vmxnet3_close(struct net_device *netdev)
2924 {
2925 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2926
2927 /*
2928 * Reset_work may be in the middle of resetting the device, wait for its
2929 * completion.
2930 */
2931 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2932 msleep(1);
2933
2934 vmxnet3_quiesce_dev(adapter);
2935
2936 vmxnet3_rq_destroy_all(adapter);
2937 vmxnet3_tq_destroy_all(adapter);
2938
2939 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2940
2941
2942 return 0;
2943 }
2944
2945
2946 void
2947 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2948 {
2949 int i;
2950
2951 /*
2952 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2953 * vmxnet3_close() will deadlock.
2954 */
2955 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2956
2957 /* we need to enable NAPI, otherwise dev_close will deadlock */
2958 for (i = 0; i < adapter->num_rx_queues; i++)
2959 napi_enable(&adapter->rx_queue[i].napi);
2960 dev_close(adapter->netdev);
2961 }
2962
2963
2964 static int
2965 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2966 {
2967 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2968 int err = 0;
2969
2970 if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2971 return -EINVAL;
2972
2973 netdev->mtu = new_mtu;
2974
2975 /*
2976 * Reset_work may be in the middle of resetting the device, wait for its
2977 * completion.
2978 */
2979 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2980 msleep(1);
2981
2982 if (netif_running(netdev)) {
2983 vmxnet3_quiesce_dev(adapter);
2984 vmxnet3_reset_dev(adapter);
2985
2986 /* we need to re-create the rx queue based on the new mtu */
2987 vmxnet3_rq_destroy_all(adapter);
2988 vmxnet3_adjust_rx_ring_size(adapter);
2989 err = vmxnet3_rq_create_all(adapter);
2990 if (err) {
2991 netdev_err(netdev,
2992 "failed to re-create rx queues, "
2993 " error %d. Closing it.\n", err);
2994 goto out;
2995 }
2996
2997 err = vmxnet3_activate_dev(adapter);
2998 if (err) {
2999 netdev_err(netdev,
3000 "failed to re-activate, error %d. "
3001 "Closing it\n", err);
3002 goto out;
3003 }
3004 }
3005
3006 out:
3007 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3008 if (err)
3009 vmxnet3_force_close(adapter);
3010
3011 return err;
3012 }
3013
3014
3015 static void
3016 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
3017 {
3018 struct net_device *netdev = adapter->netdev;
3019
3020 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3021 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3022 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
3023 NETIF_F_LRO;
3024 if (dma64)
3025 netdev->hw_features |= NETIF_F_HIGHDMA;
3026 netdev->vlan_features = netdev->hw_features &
3027 ~(NETIF_F_HW_VLAN_CTAG_TX |
3028 NETIF_F_HW_VLAN_CTAG_RX);
3029 netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3030 }
3031
3032
3033 static void
3034 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3035 {
3036 u32 tmp;
3037
3038 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3039 *(u32 *)mac = tmp;
3040
3041 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3042 mac[4] = tmp & 0xff;
3043 mac[5] = (tmp >> 8) & 0xff;
3044 }
3045
3046 #ifdef CONFIG_PCI_MSI
3047
3048 /*
3049 * Enable MSIx vectors.
3050 * Returns :
3051 * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
3052 * were enabled.
3053 * number of vectors which were enabled otherwise (this number is greater
3054 * than VMXNET3_LINUX_MIN_MSIX_VECT)
3055 */
3056
3057 static int
3058 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3059 {
3060 int ret = pci_enable_msix_range(adapter->pdev,
3061 adapter->intr.msix_entries, nvec, nvec);
3062
3063 if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
3064 dev_err(&adapter->netdev->dev,
3065 "Failed to enable %d MSI-X, trying %d\n",
3066 nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
3067
3068 ret = pci_enable_msix_range(adapter->pdev,
3069 adapter->intr.msix_entries,
3070 VMXNET3_LINUX_MIN_MSIX_VECT,
3071 VMXNET3_LINUX_MIN_MSIX_VECT);
3072 }
3073
3074 if (ret < 0) {
3075 dev_err(&adapter->netdev->dev,
3076 "Failed to enable MSI-X, error: %d\n", ret);
3077 }
3078
3079 return ret;
3080 }
3081
3082
3083 #endif /* CONFIG_PCI_MSI */
3084
3085 static void
3086 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3087 {
3088 u32 cfg;
3089 unsigned long flags;
3090
3091 /* intr settings */
3092 spin_lock_irqsave(&adapter->cmd_lock, flags);
3093 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3094 VMXNET3_CMD_GET_CONF_INTR);
3095 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3096 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3097 adapter->intr.type = cfg & 0x3;
3098 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3099
3100 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3101 adapter->intr.type = VMXNET3_IT_MSIX;
3102 }
3103
3104 #ifdef CONFIG_PCI_MSI
3105 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3106 int i, nvec;
3107
3108 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3109 1 : adapter->num_tx_queues;
3110 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3111 0 : adapter->num_rx_queues;
3112 nvec += 1; /* for link event */
3113 nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
3114 nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
3115
3116 for (i = 0; i < nvec; i++)
3117 adapter->intr.msix_entries[i].entry = i;
3118
3119 nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
3120 if (nvec < 0)
3121 goto msix_err;
3122
3123 /* If we cannot allocate one MSIx vector per queue
3124 * then limit the number of rx queues to 1
3125 */
3126 if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
3127 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3128 || adapter->num_rx_queues != 1) {
3129 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3130 netdev_err(adapter->netdev,
3131 "Number of rx queues : 1\n");
3132 adapter->num_rx_queues = 1;
3133 }
3134 }
3135
3136 adapter->intr.num_intrs = nvec;
3137 return;
3138
3139 msix_err:
3140 /* If we cannot allocate MSIx vectors use only one rx queue */
3141 dev_info(&adapter->pdev->dev,
3142 "Failed to enable MSI-X, error %d. "
3143 "Limiting #rx queues to 1, try MSI.\n", nvec);
3144
3145 adapter->intr.type = VMXNET3_IT_MSI;
3146 }
3147
3148 if (adapter->intr.type == VMXNET3_IT_MSI) {
3149 if (!pci_enable_msi(adapter->pdev)) {
3150 adapter->num_rx_queues = 1;
3151 adapter->intr.num_intrs = 1;
3152 return;
3153 }
3154 }
3155 #endif /* CONFIG_PCI_MSI */
3156
3157 adapter->num_rx_queues = 1;
3158 dev_info(&adapter->netdev->dev,
3159 "Using INTx interrupt, #Rx queues: 1.\n");
3160 adapter->intr.type = VMXNET3_IT_INTX;
3161
3162 /* INT-X related setting */
3163 adapter->intr.num_intrs = 1;
3164 }
3165
3166
3167 static void
3168 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3169 {
3170 if (adapter->intr.type == VMXNET3_IT_MSIX)
3171 pci_disable_msix(adapter->pdev);
3172 else if (adapter->intr.type == VMXNET3_IT_MSI)
3173 pci_disable_msi(adapter->pdev);
3174 else
3175 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3176 }
3177
3178
3179 static void
3180 vmxnet3_tx_timeout(struct net_device *netdev)
3181 {
3182 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3183 adapter->tx_timeout_count++;
3184
3185 netdev_err(adapter->netdev, "tx hang\n");
3186 schedule_work(&adapter->work);
3187 netif_wake_queue(adapter->netdev);
3188 }
3189
3190
3191 static void
3192 vmxnet3_reset_work(struct work_struct *data)
3193 {
3194 struct vmxnet3_adapter *adapter;
3195
3196 adapter = container_of(data, struct vmxnet3_adapter, work);
3197
3198 /* if another thread is resetting the device, no need to proceed */
3199 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3200 return;
3201
3202 /* if the device is closed, we must leave it alone */
3203 rtnl_lock();
3204 if (netif_running(adapter->netdev)) {
3205 netdev_notice(adapter->netdev, "resetting\n");
3206 vmxnet3_quiesce_dev(adapter);
3207 vmxnet3_reset_dev(adapter);
3208 vmxnet3_activate_dev(adapter);
3209 } else {
3210 netdev_info(adapter->netdev, "already closed\n");
3211 }
3212 rtnl_unlock();
3213
3214 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3215 }
3216
3217
3218 static int
3219 vmxnet3_probe_device(struct pci_dev *pdev,
3220 const struct pci_device_id *id)
3221 {
3222 static const struct net_device_ops vmxnet3_netdev_ops = {
3223 .ndo_open = vmxnet3_open,
3224 .ndo_stop = vmxnet3_close,
3225 .ndo_start_xmit = vmxnet3_xmit_frame,
3226 .ndo_set_mac_address = vmxnet3_set_mac_addr,
3227 .ndo_change_mtu = vmxnet3_change_mtu,
3228 .ndo_set_features = vmxnet3_set_features,
3229 .ndo_get_stats64 = vmxnet3_get_stats64,
3230 .ndo_tx_timeout = vmxnet3_tx_timeout,
3231 .ndo_set_rx_mode = vmxnet3_set_mc,
3232 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
3233 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
3234 #ifdef CONFIG_NET_POLL_CONTROLLER
3235 .ndo_poll_controller = vmxnet3_netpoll,
3236 #endif
3237 };
3238 int err;
3239 bool dma64 = false; /* stupid gcc */
3240 u32 ver;
3241 struct net_device *netdev;
3242 struct vmxnet3_adapter *adapter;
3243 u8 mac[ETH_ALEN];
3244 int size;
3245 int num_tx_queues;
3246 int num_rx_queues;
3247
3248 if (!pci_msi_enabled())
3249 enable_mq = 0;
3250
3251 #ifdef VMXNET3_RSS
3252 if (enable_mq)
3253 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3254 (int)num_online_cpus());
3255 else
3256 #endif
3257 num_rx_queues = 1;
3258 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3259
3260 if (enable_mq)
3261 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
3262 (int)num_online_cpus());
3263 else
3264 num_tx_queues = 1;
3265
3266 num_tx_queues = rounddown_pow_of_two(num_tx_queues);
3267 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
3268 max(num_tx_queues, num_rx_queues));
3269 dev_info(&pdev->dev,
3270 "# of Tx queues : %d, # of Rx queues : %d\n",
3271 num_tx_queues, num_rx_queues);
3272
3273 if (!netdev)
3274 return -ENOMEM;
3275
3276 pci_set_drvdata(pdev, netdev);
3277 adapter = netdev_priv(netdev);
3278 adapter->netdev = netdev;
3279 adapter->pdev = pdev;
3280
3281 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3282 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3283 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3284
3285 spin_lock_init(&adapter->cmd_lock);
3286 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3287 sizeof(struct vmxnet3_adapter),
3288 PCI_DMA_TODEVICE);
3289 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3290 dev_err(&pdev->dev, "Failed to map dma\n");
3291 err = -EFAULT;
3292 goto err_dma_map;
3293 }
3294 adapter->shared = dma_alloc_coherent(
3295 &adapter->pdev->dev,
3296 sizeof(struct Vmxnet3_DriverShared),
3297 &adapter->shared_pa, GFP_KERNEL);
3298 if (!adapter->shared) {
3299 dev_err(&pdev->dev, "Failed to allocate memory\n");
3300 err = -ENOMEM;
3301 goto err_alloc_shared;
3302 }
3303
3304 adapter->num_rx_queues = num_rx_queues;
3305 adapter->num_tx_queues = num_tx_queues;
3306 adapter->rx_buf_per_pkt = 1;
3307
3308 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3309 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3310 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3311 &adapter->queue_desc_pa,
3312 GFP_KERNEL);
3313
3314 if (!adapter->tqd_start) {
3315 dev_err(&pdev->dev, "Failed to allocate memory\n");
3316 err = -ENOMEM;
3317 goto err_alloc_queue_desc;
3318 }
3319 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3320 adapter->num_tx_queues);
3321
3322 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3323 sizeof(struct Vmxnet3_PMConf),
3324 &adapter->pm_conf_pa,
3325 GFP_KERNEL);
3326 if (adapter->pm_conf == NULL) {
3327 err = -ENOMEM;
3328 goto err_alloc_pm;
3329 }
3330
3331 #ifdef VMXNET3_RSS
3332
3333 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
3334 sizeof(struct UPT1_RSSConf),
3335 &adapter->rss_conf_pa,
3336 GFP_KERNEL);
3337 if (adapter->rss_conf == NULL) {
3338 err = -ENOMEM;
3339 goto err_alloc_rss;
3340 }
3341 #endif /* VMXNET3_RSS */
3342
3343 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
3344 if (err < 0)
3345 goto err_alloc_pci;
3346
3347 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3348 if (ver & (1 << VMXNET3_REV_3)) {
3349 VMXNET3_WRITE_BAR1_REG(adapter,
3350 VMXNET3_REG_VRRS,
3351 1 << VMXNET3_REV_3);
3352 adapter->version = VMXNET3_REV_3 + 1;
3353 } else if (ver & (1 << VMXNET3_REV_2)) {
3354 VMXNET3_WRITE_BAR1_REG(adapter,
3355 VMXNET3_REG_VRRS,
3356 1 << VMXNET3_REV_2);
3357 adapter->version = VMXNET3_REV_2 + 1;
3358 } else if (ver & (1 << VMXNET3_REV_1)) {
3359 VMXNET3_WRITE_BAR1_REG(adapter,
3360 VMXNET3_REG_VRRS,
3361 1 << VMXNET3_REV_1);
3362 adapter->version = VMXNET3_REV_1 + 1;
3363 } else {
3364 dev_err(&pdev->dev,
3365 "Incompatible h/w version (0x%x) for adapter\n", ver);
3366 err = -EBUSY;
3367 goto err_ver;
3368 }
3369 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3370
3371 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3372 if (ver & 1) {
3373 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3374 } else {
3375 dev_err(&pdev->dev,
3376 "Incompatible upt version (0x%x) for adapter\n", ver);
3377 err = -EBUSY;
3378 goto err_ver;
3379 }
3380
3381 if (VMXNET3_VERSION_GE_3(adapter)) {
3382 adapter->coal_conf =
3383 dma_alloc_coherent(&adapter->pdev->dev,
3384 sizeof(struct Vmxnet3_CoalesceScheme)
3385 ,
3386 &adapter->coal_conf_pa,
3387 GFP_KERNEL);
3388 if (!adapter->coal_conf) {
3389 err = -ENOMEM;
3390 goto err_ver;
3391 }
3392 memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
3393 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
3394 adapter->default_coal_mode = true;
3395 }
3396
3397 SET_NETDEV_DEV(netdev, &pdev->dev);
3398 vmxnet3_declare_features(adapter, dma64);
3399
3400 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
3401 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
3402
3403 if (adapter->num_tx_queues == adapter->num_rx_queues)
3404 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
3405 else
3406 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3407
3408 vmxnet3_alloc_intr_resources(adapter);
3409
3410 #ifdef VMXNET3_RSS
3411 if (adapter->num_rx_queues > 1 &&
3412 adapter->intr.type == VMXNET3_IT_MSIX) {
3413 adapter->rss = true;
3414 netdev->hw_features |= NETIF_F_RXHASH;
3415 netdev->features |= NETIF_F_RXHASH;
3416 dev_dbg(&pdev->dev, "RSS is enabled.\n");
3417 } else {
3418 adapter->rss = false;
3419 }
3420 #endif
3421
3422 vmxnet3_read_mac_addr(adapter, mac);
3423 memcpy(netdev->dev_addr, mac, netdev->addr_len);
3424
3425 netdev->netdev_ops = &vmxnet3_netdev_ops;
3426 vmxnet3_set_ethtool_ops(netdev);
3427 netdev->watchdog_timeo = 5 * HZ;
3428
3429 INIT_WORK(&adapter->work, vmxnet3_reset_work);
3430 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3431
3432 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3433 int i;
3434 for (i = 0; i < adapter->num_rx_queues; i++) {
3435 netif_napi_add(adapter->netdev,
3436 &adapter->rx_queue[i].napi,
3437 vmxnet3_poll_rx_only, 64);
3438 }
3439 } else {
3440 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3441 vmxnet3_poll, 64);
3442 }
3443
3444 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3445 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3446
3447 netif_carrier_off(netdev);
3448 err = register_netdev(netdev);
3449
3450 if (err) {
3451 dev_err(&pdev->dev, "Failed to register adapter\n");
3452 goto err_register;
3453 }
3454
3455 vmxnet3_check_link(adapter, false);
3456 return 0;
3457
3458 err_register:
3459 if (VMXNET3_VERSION_GE_3(adapter)) {
3460 dma_free_coherent(&adapter->pdev->dev,
3461 sizeof(struct Vmxnet3_CoalesceScheme),
3462 adapter->coal_conf, adapter->coal_conf_pa);
3463 }
3464 vmxnet3_free_intr_resources(adapter);
3465 err_ver:
3466 vmxnet3_free_pci_resources(adapter);
3467 err_alloc_pci:
3468 #ifdef VMXNET3_RSS
3469 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3470 adapter->rss_conf, adapter->rss_conf_pa);
3471 err_alloc_rss:
3472 #endif
3473 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3474 adapter->pm_conf, adapter->pm_conf_pa);
3475 err_alloc_pm:
3476 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3477 adapter->queue_desc_pa);
3478 err_alloc_queue_desc:
3479 dma_free_coherent(&adapter->pdev->dev,
3480 sizeof(struct Vmxnet3_DriverShared),
3481 adapter->shared, adapter->shared_pa);
3482 err_alloc_shared:
3483 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3484 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3485 err_dma_map:
3486 free_netdev(netdev);
3487 return err;
3488 }
3489
3490
3491 static void
3492 vmxnet3_remove_device(struct pci_dev *pdev)
3493 {
3494 struct net_device *netdev = pci_get_drvdata(pdev);
3495 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3496 int size = 0;
3497 int num_rx_queues;
3498
3499 #ifdef VMXNET3_RSS
3500 if (enable_mq)
3501 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3502 (int)num_online_cpus());
3503 else
3504 #endif
3505 num_rx_queues = 1;
3506 num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3507
3508 cancel_work_sync(&adapter->work);
3509
3510 unregister_netdev(netdev);
3511
3512 vmxnet3_free_intr_resources(adapter);
3513 vmxnet3_free_pci_resources(adapter);
3514 if (VMXNET3_VERSION_GE_3(adapter)) {
3515 dma_free_coherent(&adapter->pdev->dev,
3516 sizeof(struct Vmxnet3_CoalesceScheme),
3517 adapter->coal_conf, adapter->coal_conf_pa);
3518 }
3519 #ifdef VMXNET3_RSS
3520 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
3521 adapter->rss_conf, adapter->rss_conf_pa);
3522 #endif
3523 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
3524 adapter->pm_conf, adapter->pm_conf_pa);
3525
3526 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3527 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3528 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
3529 adapter->queue_desc_pa);
3530 dma_free_coherent(&adapter->pdev->dev,
3531 sizeof(struct Vmxnet3_DriverShared),
3532 adapter->shared, adapter->shared_pa);
3533 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3534 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3535 free_netdev(netdev);
3536 }
3537
3538 static void vmxnet3_shutdown_device(struct pci_dev *pdev)
3539 {
3540 struct net_device *netdev = pci_get_drvdata(pdev);
3541 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3542 unsigned long flags;
3543
3544 /* Reset_work may be in the middle of resetting the device, wait for its
3545 * completion.
3546 */
3547 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3548 msleep(1);
3549
3550 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
3551 &adapter->state)) {
3552 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3553 return;
3554 }
3555 spin_lock_irqsave(&adapter->cmd_lock, flags);
3556 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3557 VMXNET3_CMD_QUIESCE_DEV);
3558 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3559 vmxnet3_disable_all_intrs(adapter);
3560
3561 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3562 }
3563
3564
3565 #ifdef CONFIG_PM
3566
3567 static int
3568 vmxnet3_suspend(struct device *device)
3569 {
3570 struct pci_dev *pdev = to_pci_dev(device);
3571 struct net_device *netdev = pci_get_drvdata(pdev);
3572 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3573 struct Vmxnet3_PMConf *pmConf;
3574 struct ethhdr *ehdr;
3575 struct arphdr *ahdr;
3576 u8 *arpreq;
3577 struct in_device *in_dev;
3578 struct in_ifaddr *ifa;
3579 unsigned long flags;
3580 int i = 0;
3581
3582 if (!netif_running(netdev))
3583 return 0;
3584
3585 for (i = 0; i < adapter->num_rx_queues; i++)
3586 napi_disable(&adapter->rx_queue[i].napi);
3587
3588 vmxnet3_disable_all_intrs(adapter);
3589 vmxnet3_free_irqs(adapter);
3590 vmxnet3_free_intr_resources(adapter);
3591
3592 netif_device_detach(netdev);
3593 netif_tx_stop_all_queues(netdev);
3594
3595 /* Create wake-up filters. */
3596 pmConf = adapter->pm_conf;
3597 memset(pmConf, 0, sizeof(*pmConf));
3598
3599 if (adapter->wol & WAKE_UCAST) {
3600 pmConf->filters[i].patternSize = ETH_ALEN;
3601 pmConf->filters[i].maskSize = 1;
3602 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3603 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3604
3605 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3606 i++;
3607 }
3608
3609 if (adapter->wol & WAKE_ARP) {
3610 in_dev = in_dev_get(netdev);
3611 if (!in_dev)
3612 goto skip_arp;
3613
3614 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3615 if (!ifa)
3616 goto skip_arp;
3617
3618 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3619 sizeof(struct arphdr) + /* ARP header */
3620 2 * ETH_ALEN + /* 2 Ethernet addresses*/
3621 2 * sizeof(u32); /*2 IPv4 addresses */
3622 pmConf->filters[i].maskSize =
3623 (pmConf->filters[i].patternSize - 1) / 8 + 1;
3624
3625 /* ETH_P_ARP in Ethernet header. */
3626 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3627 ehdr->h_proto = htons(ETH_P_ARP);
3628
3629 /* ARPOP_REQUEST in ARP header. */
3630 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3631 ahdr->ar_op = htons(ARPOP_REQUEST);
3632 arpreq = (u8 *)(ahdr + 1);
3633
3634 /* The Unicast IPv4 address in 'tip' field. */
3635 arpreq += 2 * ETH_ALEN + sizeof(u32);
3636 *(u32 *)arpreq = ifa->ifa_address;
3637
3638 /* The mask for the relevant bits. */
3639 pmConf->filters[i].mask[0] = 0x00;
3640 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3641 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3642 pmConf->filters[i].mask[3] = 0x00;
3643 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3644 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3645 in_dev_put(in_dev);
3646
3647 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3648 i++;
3649 }
3650
3651 skip_arp:
3652 if (adapter->wol & WAKE_MAGIC)
3653 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3654
3655 pmConf->numFilters = i;
3656
3657 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3658 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3659 *pmConf));
3660 adapter->shared->devRead.pmConfDesc.confPA =
3661 cpu_to_le64(adapter->pm_conf_pa);
3662
3663 spin_lock_irqsave(&adapter->cmd_lock, flags);
3664 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3665 VMXNET3_CMD_UPDATE_PMCFG);
3666 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3667
3668 pci_save_state(pdev);
3669 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3670 adapter->wol);
3671 pci_disable_device(pdev);
3672 pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3673
3674 return 0;
3675 }
3676
3677
3678 static int
3679 vmxnet3_resume(struct device *device)
3680 {
3681 int err;
3682 unsigned long flags;
3683 struct pci_dev *pdev = to_pci_dev(device);
3684 struct net_device *netdev = pci_get_drvdata(pdev);
3685 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3686
3687 if (!netif_running(netdev))
3688 return 0;
3689
3690 pci_set_power_state(pdev, PCI_D0);
3691 pci_restore_state(pdev);
3692 err = pci_enable_device_mem(pdev);
3693 if (err != 0)
3694 return err;
3695
3696 pci_enable_wake(pdev, PCI_D0, 0);
3697
3698 vmxnet3_alloc_intr_resources(adapter);
3699
3700 /* During hibernate and suspend, device has to be reinitialized as the
3701 * device state need not be preserved.
3702 */
3703
3704 /* Need not check adapter state as other reset tasks cannot run during
3705 * device resume.
3706 */
3707 spin_lock_irqsave(&adapter->cmd_lock, flags);
3708 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3709 VMXNET3_CMD_QUIESCE_DEV);
3710 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3711 vmxnet3_tq_cleanup_all(adapter);
3712 vmxnet3_rq_cleanup_all(adapter);
3713
3714 vmxnet3_reset_dev(adapter);
3715 err = vmxnet3_activate_dev(adapter);
3716 if (err != 0) {
3717 netdev_err(netdev,
3718 "failed to re-activate on resume, error: %d", err);
3719 vmxnet3_force_close(adapter);
3720 return err;
3721 }
3722 netif_device_attach(netdev);
3723
3724 return 0;
3725 }
3726
3727 static const struct dev_pm_ops vmxnet3_pm_ops = {
3728 .suspend = vmxnet3_suspend,
3729 .resume = vmxnet3_resume,
3730 .freeze = vmxnet3_suspend,
3731 .restore = vmxnet3_resume,
3732 };
3733 #endif
3734
3735 static struct pci_driver vmxnet3_driver = {
3736 .name = vmxnet3_driver_name,
3737 .id_table = vmxnet3_pciid_table,
3738 .probe = vmxnet3_probe_device,
3739 .remove = vmxnet3_remove_device,
3740 .shutdown = vmxnet3_shutdown_device,
3741 #ifdef CONFIG_PM
3742 .driver.pm = &vmxnet3_pm_ops,
3743 #endif
3744 };
3745
3746
3747 static int __init
3748 vmxnet3_init_module(void)
3749 {
3750 pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
3751 VMXNET3_DRIVER_VERSION_REPORT);
3752 return pci_register_driver(&vmxnet3_driver);
3753 }
3754
3755 module_init(vmxnet3_init_module);
3756
3757
3758 static void
3759 vmxnet3_exit_module(void)
3760 {
3761 pci_unregister_driver(&vmxnet3_driver);
3762 }
3763
3764 module_exit(vmxnet3_exit_module);
3765
3766 MODULE_AUTHOR("VMware, Inc.");
3767 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3768 MODULE_LICENSE("GPL v2");
3769 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);