]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
net/bonding: Enforce active-backup policy for IPoIB bonds
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / chelsio / cxgb4vf / cxgb4vf_main.c
CommitLineData
be839e39
CL
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
428ac43f
JP
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
be839e39
CL
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/init.h>
41#include <linux/pci.h>
42#include <linux/dma-mapping.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/debugfs.h>
46#include <linux/ethtool.h>
5ad24def 47#include <linux/mdio.h>
be839e39
CL
48
49#include "t4vf_common.h"
50#include "t4vf_defs.h"
51
52#include "../cxgb4/t4_regs.h"
53#include "../cxgb4/t4_msg.h"
54
55/*
56 * Generic information about the driver.
57 */
622c62b5 58#define DRV_VERSION "2.0.0-ko"
52a5f846 59#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
be839e39
CL
60
61/*
62 * Module Parameters.
63 * ==================
64 */
65
66/*
67 * Default ethtool "message level" for adapters.
68 */
69#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
72
73static int dflt_msg_enable = DFLT_MSG_ENABLE;
74
75module_param(dflt_msg_enable, int, 0644);
76MODULE_PARM_DESC(dflt_msg_enable,
8a21ec4e
HS
77 "default adapter ethtool message level bitmap, "
78 "deprecated parameter");
be839e39
CL
79
80/*
81 * The driver uses the best interrupt scheme available on a platform in the
82 * order MSI-X then MSI. This parameter determines which of these schemes the
83 * driver may consider as follows:
84 *
85 * msi = 2: choose from among MSI-X and MSI
86 * msi = 1: only consider MSI interrupts
87 *
88 * Note that unlike the Physical Function driver, this Virtual Function driver
89 * does _not_ support legacy INTx interrupts (this limitation is mandated by
90 * the PCI-E SR-IOV standard).
91 */
92#define MSI_MSIX 2
93#define MSI_MSI 1
94#define MSI_DEFAULT MSI_MSIX
95
96static int msi = MSI_DEFAULT;
97
98module_param(msi, int, 0644);
99MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
100
101/*
102 * Fundamental constants.
103 * ======================
104 */
105
106enum {
107 MAX_TXQ_ENTRIES = 16384,
108 MAX_RSPQ_ENTRIES = 16384,
109 MAX_RX_BUFFERS = 16384,
110
111 MIN_TXQ_ENTRIES = 32,
112 MIN_RSPQ_ENTRIES = 128,
113 MIN_FL_ENTRIES = 16,
114
115 /*
116 * For purposes of manipulating the Free List size we need to
117 * recognize that Free Lists are actually Egress Queues (the host
118 * produces free buffers which the hardware consumes), Egress Queues
119 * indices are all in units of Egress Context Units bytes, and free
120 * list entries are 64-bit PCI DMA addresses. And since the state of
121 * the Producer Index == the Consumer Index implies an EMPTY list, we
122 * always have at least one Egress Unit's worth of Free List entries
123 * unused. See sge.c for more details ...
124 */
125 EQ_UNIT = SGE_EQ_IDXSIZE,
126 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
127 MIN_FL_RESID = FL_PER_EQ_UNIT,
128};
129
130/*
131 * Global driver state.
132 * ====================
133 */
134
135static struct dentry *cxgb4vf_debugfs_root;
136
137/*
138 * OS "Callback" functions.
139 * ========================
140 */
141
142/*
143 * The link status has changed on the indicated "port" (Virtual Interface).
144 */
145void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
146{
147 struct net_device *dev = adapter->port[pidx];
148
149 /*
150 * If the port is disabled or the current recorded "link up"
151 * status matches the new status, just return.
152 */
153 if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
154 return;
155
156 /*
157 * Tell the OS that the link status has changed and print a short
158 * informative message on the console about the event.
159 */
160 if (link_ok) {
161 const char *s;
162 const char *fc;
163 const struct port_info *pi = netdev_priv(dev);
164
165 netif_carrier_on(dev);
166
167 switch (pi->link_cfg.speed) {
897d55df
HS
168 case 40000:
169 s = "40Gbps";
170 break;
171
172 case 10000:
be839e39
CL
173 s = "10Gbps";
174 break;
175
897d55df 176 case 1000:
be839e39
CL
177 s = "1000Mbps";
178 break;
179
897d55df 180 case 100:
be839e39
CL
181 s = "100Mbps";
182 break;
183
184 default:
185 s = "unknown";
186 break;
187 }
188
189 switch (pi->link_cfg.fc) {
190 case PAUSE_RX:
191 fc = "RX";
192 break;
193
194 case PAUSE_TX:
195 fc = "TX";
196 break;
197
198 case PAUSE_RX|PAUSE_TX:
199 fc = "RX/TX";
200 break;
201
202 default:
203 fc = "no";
204 break;
205 }
206
428ac43f 207 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
be839e39
CL
208 } else {
209 netif_carrier_off(dev);
428ac43f 210 netdev_info(dev, "link down\n");
be839e39
CL
211 }
212}
213
5ad24def
HS
214/*
215 * THe port module type has changed on the indicated "port" (Virtual
216 * Interface).
217 */
218void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
219{
220 static const char * const mod_str[] = {
221 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
222 };
223 const struct net_device *dev = adapter->port[pidx];
224 const struct port_info *pi = netdev_priv(dev);
225
226 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
227 dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
228 dev->name);
229 else if (pi->mod_type < ARRAY_SIZE(mod_str))
230 dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
231 dev->name, mod_str[pi->mod_type]);
232 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
233 dev_info(adapter->pdev_dev, "%s: unsupported optical port "
234 "module inserted\n", dev->name);
235 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
236 dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
237 "forcing TWINAX\n", dev->name);
238 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
239 dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
240 dev->name);
241 else
242 dev_info(adapter->pdev_dev, "%s: unknown module type %d "
243 "inserted\n", dev->name, pi->mod_type);
244}
245
be839e39
CL
246/*
247 * Net device operations.
248 * ======================
249 */
250
be839e39 251
87737663 252
be839e39
CL
253
254/*
255 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
256 * Interface).
257 */
258static int link_start(struct net_device *dev)
259{
260 int ret;
261 struct port_info *pi = netdev_priv(dev);
262
263 /*
264 * We do not set address filters and promiscuity here, the stack does
87737663 265 * that step explicitly. Enable vlan accel.
be839e39 266 */
87737663 267 ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
be839e39
CL
268 true);
269 if (ret == 0) {
270 ret = t4vf_change_mac(pi->adapter, pi->viid,
271 pi->xact_addr_filt, dev->dev_addr, true);
272 if (ret >= 0) {
273 pi->xact_addr_filt = ret;
274 ret = 0;
275 }
276 }
277
278 /*
279 * We don't need to actually "start the link" itself since the
280 * firmware will do that for us when the first Virtual Interface
281 * is enabled on a port.
282 */
283 if (ret == 0)
284 ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
285 return ret;
286}
287
288/*
289 * Name the MSI-X interrupts.
290 */
291static void name_msix_vecs(struct adapter *adapter)
292{
293 int namelen = sizeof(adapter->msix_info[0].desc) - 1;
294 int pidx;
295
296 /*
297 * Firmware events.
298 */
299 snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
300 "%s-FWeventq", adapter->name);
301 adapter->msix_info[MSIX_FW].desc[namelen] = 0;
302
303 /*
304 * Ethernet queues.
305 */
306 for_each_port(adapter, pidx) {
307 struct net_device *dev = adapter->port[pidx];
308 const struct port_info *pi = netdev_priv(dev);
309 int qs, msi;
310
caedda35 311 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
be839e39
CL
312 snprintf(adapter->msix_info[msi].desc, namelen,
313 "%s-%d", dev->name, qs);
314 adapter->msix_info[msi].desc[namelen] = 0;
315 }
316 }
317}
318
319/*
320 * Request all of our MSI-X resources.
321 */
322static int request_msix_queue_irqs(struct adapter *adapter)
323{
324 struct sge *s = &adapter->sge;
325 int rxq, msi, err;
326
327 /*
328 * Firmware events.
329 */
330 err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
331 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
332 if (err)
333 return err;
334
335 /*
336 * Ethernet queues.
337 */
caedda35 338 msi = MSIX_IQFLINT;
be839e39
CL
339 for_each_ethrxq(s, rxq) {
340 err = request_irq(adapter->msix_info[msi].vec,
341 t4vf_sge_intr_msix, 0,
342 adapter->msix_info[msi].desc,
343 &s->ethrxq[rxq].rspq);
344 if (err)
345 goto err_free_irqs;
346 msi++;
347 }
348 return 0;
349
350err_free_irqs:
351 while (--rxq >= 0)
352 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
353 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
354 return err;
355}
356
357/*
358 * Free our MSI-X resources.
359 */
360static void free_msix_queue_irqs(struct adapter *adapter)
361{
362 struct sge *s = &adapter->sge;
363 int rxq, msi;
364
365 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
caedda35 366 msi = MSIX_IQFLINT;
be839e39
CL
367 for_each_ethrxq(s, rxq)
368 free_irq(adapter->msix_info[msi++].vec,
369 &s->ethrxq[rxq].rspq);
370}
371
372/*
373 * Turn on NAPI and start up interrupts on a response queue.
374 */
375static void qenable(struct sge_rspq *rspq)
376{
377 napi_enable(&rspq->napi);
378
379 /*
380 * 0-increment the Going To Sleep register to start the timer and
381 * enable interrupts.
382 */
383 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
f612b815
HS
384 CIDXINC_V(0) |
385 SEINTARM_V(rspq->intr_params) |
386 INGRESSQID_V(rspq->cntxt_id));
be839e39
CL
387}
388
389/*
390 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
391 */
392static void enable_rx(struct adapter *adapter)
393{
394 int rxq;
395 struct sge *s = &adapter->sge;
396
397 for_each_ethrxq(s, rxq)
398 qenable(&s->ethrxq[rxq].rspq);
399 qenable(&s->fw_evtq);
400
401 /*
402 * The interrupt queue doesn't use NAPI so we do the 0-increment of
403 * its Going To Sleep register here to get it started.
404 */
405 if (adapter->flags & USING_MSI)
406 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
f612b815
HS
407 CIDXINC_V(0) |
408 SEINTARM_V(s->intrq.intr_params) |
409 INGRESSQID_V(s->intrq.cntxt_id));
be839e39
CL
410
411}
412
413/*
414 * Wait until all NAPI handlers are descheduled.
415 */
416static void quiesce_rx(struct adapter *adapter)
417{
418 struct sge *s = &adapter->sge;
419 int rxq;
420
421 for_each_ethrxq(s, rxq)
422 napi_disable(&s->ethrxq[rxq].rspq.napi);
423 napi_disable(&s->fw_evtq.napi);
424}
425
426/*
427 * Response queue handler for the firmware event queue.
428 */
429static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
430 const struct pkt_gl *gl)
431{
432 /*
433 * Extract response opcode and get pointer to CPL message body.
434 */
435 struct adapter *adapter = rspq->adapter;
436 u8 opcode = ((const struct rss_header *)rsp)->opcode;
437 void *cpl = (void *)(rsp + 1);
438
439 switch (opcode) {
440 case CPL_FW6_MSG: {
441 /*
442 * We've received an asynchronous message from the firmware.
443 */
444 const struct cpl_fw6_msg *fw_msg = cpl;
445 if (fw_msg->type == FW6_TYPE_CMD_RPL)
446 t4vf_handle_fw_rpl(adapter, fw_msg->data);
447 break;
448 }
449
94dace10
VP
450 case CPL_FW4_MSG: {
451 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
452 */
453 const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
6c53e938 454 opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
94dace10
VP
455 if (opcode != CPL_SGE_EGR_UPDATE) {
456 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
457 , opcode);
458 break;
459 }
460 cpl = (void *)p;
461 /*FALLTHROUGH*/
462 }
463
be839e39
CL
464 case CPL_SGE_EGR_UPDATE: {
465 /*
7f9dd2fa
CL
466 * We've received an Egress Queue Status Update message. We
467 * get these, if the SGE is configured to send these when the
468 * firmware passes certain points in processing our TX
469 * Ethernet Queue or if we make an explicit request for one.
470 * We use these updates to determine when we may need to
471 * restart a TX Ethernet Queue which was stopped for lack of
472 * free TX Queue Descriptors ...
be839e39 473 */
64699336 474 const struct cpl_sge_egr_update *p = cpl;
bdc590b9 475 unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
be839e39
CL
476 struct sge *s = &adapter->sge;
477 struct sge_txq *tq;
478 struct sge_eth_txq *txq;
479 unsigned int eq_idx;
be839e39
CL
480
481 /*
482 * Perform sanity checking on the Queue ID to make sure it
483 * really refers to one of our TX Ethernet Egress Queues which
484 * is active and matches the queue's ID. None of these error
485 * conditions should ever happen so we may want to either make
486 * them fatal and/or conditionalized under DEBUG.
487 */
488 eq_idx = EQ_IDX(s, qid);
489 if (unlikely(eq_idx >= MAX_EGRQ)) {
490 dev_err(adapter->pdev_dev,
491 "Egress Update QID %d out of range\n", qid);
492 break;
493 }
494 tq = s->egr_map[eq_idx];
495 if (unlikely(tq == NULL)) {
496 dev_err(adapter->pdev_dev,
497 "Egress Update QID %d TXQ=NULL\n", qid);
498 break;
499 }
500 txq = container_of(tq, struct sge_eth_txq, q);
501 if (unlikely(tq->abs_id != qid)) {
502 dev_err(adapter->pdev_dev,
503 "Egress Update QID %d refers to TXQ %d\n",
504 qid, tq->abs_id);
505 break;
506 }
507
be839e39
CL
508 /*
509 * Restart a stopped TX Queue which has less than half of its
510 * TX ring in use ...
511 */
512 txq->q.restarts++;
513 netif_tx_wake_queue(txq->txq);
514 break;
515 }
516
517 default:
518 dev_err(adapter->pdev_dev,
519 "unexpected CPL %#x on FW event queue\n", opcode);
520 }
521
522 return 0;
523}
524
525/*
526 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
527 * to use and initializes them. We support multiple "Queue Sets" per port if
528 * we have MSI-X, otherwise just one queue set per port.
529 */
530static int setup_sge_queues(struct adapter *adapter)
531{
532 struct sge *s = &adapter->sge;
533 int err, pidx, msix;
534
535 /*
536 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
537 * state.
538 */
539 bitmap_zero(s->starving_fl, MAX_EGRQ);
540
541 /*
542 * If we're using MSI interrupt mode we need to set up a "forwarded
543 * interrupt" queue which we'll set up with our MSI vector. The rest
544 * of the ingress queues will be set up to forward their interrupts to
545 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
546 * the intrq's queue ID as the interrupt forwarding queue for the
547 * subsequent calls ...
548 */
549 if (adapter->flags & USING_MSI) {
550 err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
551 adapter->port[0], 0, NULL, NULL);
552 if (err)
553 goto err_free_queues;
554 }
555
556 /*
557 * Allocate our ingress queue for asynchronous firmware messages.
558 */
559 err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
560 MSIX_FW, NULL, fwevtq_handler);
561 if (err)
562 goto err_free_queues;
563
564 /*
565 * Allocate each "port"'s initial Queue Sets. These can be changed
566 * later on ... up to the point where any interface on the adapter is
567 * brought up at which point lots of things get nailed down
568 * permanently ...
569 */
caedda35 570 msix = MSIX_IQFLINT;
be839e39
CL
571 for_each_port(adapter, pidx) {
572 struct net_device *dev = adapter->port[pidx];
573 struct port_info *pi = netdev_priv(dev);
574 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
575 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
be839e39
CL
576 int qs;
577
c8639a82 578 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
be839e39
CL
579 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
580 dev, msix++,
581 &rxq->fl, t4vf_ethrx_handler);
582 if (err)
583 goto err_free_queues;
584
585 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
586 netdev_get_tx_queue(dev, qs),
587 s->fw_evtq.cntxt_id);
588 if (err)
589 goto err_free_queues;
590
591 rxq->rspq.idx = qs;
592 memset(&rxq->stats, 0, sizeof(rxq->stats));
593 }
594 }
595
596 /*
597 * Create the reverse mappings for the queues.
598 */
599 s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
600 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
601 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
602 for_each_port(adapter, pidx) {
603 struct net_device *dev = adapter->port[pidx];
604 struct port_info *pi = netdev_priv(dev);
605 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
606 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
be839e39
CL
607 int qs;
608
c8639a82 609 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
be839e39
CL
610 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
611 EQ_MAP(s, txq->q.abs_id) = &txq->q;
612
613 /*
614 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
615 * for Free Lists but since all of the Egress Queues
616 * (including Free Lists) have Relative Queue IDs
617 * which are computed as Absolute - Base Queue ID, we
618 * can synthesize the Absolute Queue IDs for the Free
619 * Lists. This is useful for debugging purposes when
620 * we want to dump Queue Contexts via the PF Driver.
621 */
622 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
623 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
624 }
625 }
626 return 0;
627
628err_free_queues:
629 t4vf_free_sge_resources(adapter);
630 return err;
631}
632
633/*
634 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
635 * queues. We configure the RSS CPU lookup table to distribute to the number
636 * of HW receive queues, and the response queue lookup table to narrow that
637 * down to the response queues actually configured for each "port" (Virtual
638 * Interface). We always configure the RSS mapping for all ports since the
639 * mapping table has plenty of entries.
640 */
641static int setup_rss(struct adapter *adapter)
642{
643 int pidx;
644
645 for_each_port(adapter, pidx) {
646 struct port_info *pi = adap2pinfo(adapter, pidx);
647 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
648 u16 rss[MAX_PORT_QSETS];
649 int qs, err;
650
651 for (qs = 0; qs < pi->nqsets; qs++)
652 rss[qs] = rxq[qs].rspq.abs_id;
653
654 err = t4vf_config_rss_range(adapter, pi->viid,
655 0, pi->rss_size, rss, pi->nqsets);
656 if (err)
657 return err;
658
659 /*
660 * Perform Global RSS Mode-specific initialization.
661 */
662 switch (adapter->params.rss.mode) {
663 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
664 /*
665 * If Tunnel All Lookup isn't specified in the global
666 * RSS Configuration, then we need to specify a
667 * default Ingress Queue for any ingress packets which
668 * aren't hashed. We'll use our first ingress queue
669 * ...
670 */
671 if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
672 union rss_vi_config config;
673 err = t4vf_read_rss_vi_config(adapter,
674 pi->viid,
675 &config);
676 if (err)
677 return err;
678 config.basicvirtual.defaultq =
679 rxq[0].rspq.abs_id;
680 err = t4vf_write_rss_vi_config(adapter,
681 pi->viid,
682 &config);
683 if (err)
684 return err;
685 }
686 break;
687 }
688 }
689
690 return 0;
691}
692
693/*
694 * Bring the adapter up. Called whenever we go from no "ports" open to having
695 * one open. This function performs the actions necessary to make an adapter
696 * operational, such as completing the initialization of HW modules, and
697 * enabling interrupts. Must be called with the rtnl lock held. (Note that
698 * this is called "cxgb_up" in the PF Driver.)
699 */
700static int adapter_up(struct adapter *adapter)
701{
702 int err;
703
704 /*
705 * If this is the first time we've been called, perform basic
706 * adapter setup. Once we've done this, many of our adapter
707 * parameters can no longer be changed ...
708 */
709 if ((adapter->flags & FULL_INIT_DONE) == 0) {
710 err = setup_sge_queues(adapter);
711 if (err)
712 return err;
713 err = setup_rss(adapter);
714 if (err) {
715 t4vf_free_sge_resources(adapter);
716 return err;
717 }
718
719 if (adapter->flags & USING_MSIX)
720 name_msix_vecs(adapter);
721 adapter->flags |= FULL_INIT_DONE;
722 }
723
724 /*
725 * Acquire our interrupt resources. We only support MSI-X and MSI.
726 */
727 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
728 if (adapter->flags & USING_MSIX)
729 err = request_msix_queue_irqs(adapter);
730 else
731 err = request_irq(adapter->pdev->irq,
732 t4vf_intr_handler(adapter), 0,
733 adapter->name, adapter);
734 if (err) {
735 dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
736 err);
737 return err;
738 }
739
740 /*
741 * Enable NAPI ingress processing and return success.
742 */
743 enable_rx(adapter);
744 t4vf_sge_start(adapter);
fe5d2709
HS
745
746 /* Initialize hash mac addr list*/
747 INIT_LIST_HEAD(&adapter->mac_hlist);
be839e39
CL
748 return 0;
749}
750
751/*
752 * Bring the adapter down. Called whenever the last "port" (Virtual
753 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
754 * Driver.)
755 */
756static void adapter_down(struct adapter *adapter)
757{
758 /*
759 * Free interrupt resources.
760 */
761 if (adapter->flags & USING_MSIX)
762 free_msix_queue_irqs(adapter);
763 else
764 free_irq(adapter->pdev->irq, adapter);
765
766 /*
767 * Wait for NAPI handlers to finish.
768 */
769 quiesce_rx(adapter);
770}
771
772/*
773 * Start up a net device.
774 */
775static int cxgb4vf_open(struct net_device *dev)
776{
777 int err;
778 struct port_info *pi = netdev_priv(dev);
779 struct adapter *adapter = pi->adapter;
780
781 /*
782 * If this is the first interface that we're opening on the "adapter",
783 * bring the "adapter" up now.
784 */
785 if (adapter->open_device_map == 0) {
786 err = adapter_up(adapter);
787 if (err)
788 return err;
789 }
790
791 /*
792 * Note that this interface is up and start everything up ...
793 */
e7a3795f
CL
794 err = link_start(dev);
795 if (err)
343a8d13
CL
796 goto err_unwind;
797
be839e39 798 netif_tx_start_all_queues(dev);
343a8d13 799 set_bit(pi->port_id, &adapter->open_device_map);
be839e39 800 return 0;
343a8d13
CL
801
802err_unwind:
803 if (adapter->open_device_map == 0)
804 adapter_down(adapter);
805 return err;
be839e39
CL
806}
807
808/*
809 * Shut down a net device. This routine is called "cxgb_close" in the PF
810 * Driver ...
811 */
812static int cxgb4vf_stop(struct net_device *dev)
813{
be839e39
CL
814 struct port_info *pi = netdev_priv(dev);
815 struct adapter *adapter = pi->adapter;
816
817 netif_tx_stop_all_queues(dev);
818 netif_carrier_off(dev);
343a8d13 819 t4vf_enable_vi(adapter, pi->viid, false, false);
be839e39
CL
820 pi->link_cfg.link_ok = 0;
821
822 clear_bit(pi->port_id, &adapter->open_device_map);
823 if (adapter->open_device_map == 0)
824 adapter_down(adapter);
825 return 0;
826}
827
828/*
829 * Translate our basic statistics into the standard "ifconfig" statistics.
830 */
831static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
832{
833 struct t4vf_port_stats stats;
834 struct port_info *pi = netdev2pinfo(dev);
835 struct adapter *adapter = pi->adapter;
836 struct net_device_stats *ns = &dev->stats;
837 int err;
838
839 spin_lock(&adapter->stats_lock);
840 err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
841 spin_unlock(&adapter->stats_lock);
842
843 memset(ns, 0, sizeof(*ns));
844 if (err)
845 return ns;
846
847 ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
848 stats.tx_ucast_bytes + stats.tx_offload_bytes);
849 ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
850 stats.tx_ucast_frames + stats.tx_offload_frames);
851 ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
852 stats.rx_ucast_bytes);
853 ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
854 stats.rx_ucast_frames);
855 ns->multicast = stats.rx_mcast_frames;
856 ns->tx_errors = stats.tx_drop_frames;
857 ns->rx_errors = stats.rx_err_frames;
858
859 return ns;
860}
861
fe5d2709
HS
862static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
863{
864 struct adapter *adapter = pi->adapter;
865 u64 vec = 0;
866 bool ucast = false;
867 struct hash_mac_addr *entry;
868
869 /* Calculate the hash vector for the updated list and program it */
870 list_for_each_entry(entry, &adapter->mac_hlist, list) {
871 ucast |= is_unicast_ether_addr(entry->addr);
872 vec |= (1ULL << hash_mac_addr(entry->addr));
873 }
874 return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
875}
876
877static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
be839e39 878{
fe5d2709
HS
879 struct port_info *pi = netdev_priv(netdev);
880 struct adapter *adapter = pi->adapter;
881 int ret;
be839e39
CL
882 u64 mhash = 0;
883 u64 uhash = 0;
fe5d2709
HS
884 bool free = false;
885 bool ucast = is_unicast_ether_addr(mac_addr);
886 const u8 *maclist[1] = {mac_addr};
887 struct hash_mac_addr *new_entry;
888
889 ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
890 NULL, ucast ? &uhash : &mhash, false);
891 if (ret < 0)
892 goto out;
893 /* if hash != 0, then add the addr to hash addr list
894 * so on the end we will calculate the hash for the
895 * list and program it
896 */
897 if (uhash || mhash) {
898 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
899 if (!new_entry)
900 return -ENOMEM;
901 ether_addr_copy(new_entry->addr, mac_addr);
902 list_add_tail(&new_entry->list, &adapter->mac_hlist);
903 ret = cxgb4vf_set_addr_hash(pi);
be839e39 904 }
fe5d2709
HS
905out:
906 return ret < 0 ? ret : 0;
907}
be839e39 908
fe5d2709
HS
909static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
910{
911 struct port_info *pi = netdev_priv(netdev);
912 struct adapter *adapter = pi->adapter;
913 int ret;
914 const u8 *maclist[1] = {mac_addr};
915 struct hash_mac_addr *entry, *tmp;
42eb59d3 916
fe5d2709
HS
917 /* If the MAC address to be removed is in the hash addr
918 * list, delete it from the list and update hash vector
919 */
920 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
921 if (ether_addr_equal(entry->addr, mac_addr)) {
922 list_del(&entry->list);
923 kfree(entry);
924 return cxgb4vf_set_addr_hash(pi);
925 }
be839e39
CL
926 }
927
fe5d2709
HS
928 ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
929 return ret < 0 ? -EINVAL : 0;
be839e39
CL
930}
931
932/*
933 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
934 * If @mtu is -1 it is left unchanged.
935 */
936static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
937{
be839e39
CL
938 struct port_info *pi = netdev_priv(dev);
939
d01f7abc
HS
940 __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
941 __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
fe5d2709
HS
942 return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
943 (dev->flags & IFF_PROMISC) != 0,
944 (dev->flags & IFF_ALLMULTI) != 0,
945 1, -1, sleep_ok);
be839e39
CL
946}
947
948/*
949 * Set the current receive modes on the device.
950 */
951static void cxgb4vf_set_rxmode(struct net_device *dev)
952{
953 /* unfortunately we can't return errors to the stack */
954 set_rxmode(dev, -1, false);
955}
956
957/*
958 * Find the entry in the interrupt holdoff timer value array which comes
959 * closest to the specified interrupt holdoff value.
960 */
961static int closest_timer(const struct sge *s, int us)
962{
963 int i, timer_idx = 0, min_delta = INT_MAX;
964
965 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
966 int delta = us - s->timer_val[i];
967 if (delta < 0)
968 delta = -delta;
969 if (delta < min_delta) {
970 min_delta = delta;
971 timer_idx = i;
972 }
973 }
974 return timer_idx;
975}
976
977static int closest_thres(const struct sge *s, int thres)
978{
979 int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
980
981 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
982 delta = thres - s->counter_val[i];
983 if (delta < 0)
984 delta = -delta;
985 if (delta < min_delta) {
986 min_delta = delta;
987 pktcnt_idx = i;
988 }
989 }
990 return pktcnt_idx;
991}
992
993/*
994 * Return a queue's interrupt hold-off time in us. 0 means no timer.
995 */
996static unsigned int qtimer_val(const struct adapter *adapter,
997 const struct sge_rspq *rspq)
998{
1ecc7b7a 999 unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
be839e39
CL
1000
1001 return timer_idx < SGE_NTIMERS
1002 ? adapter->sge.timer_val[timer_idx]
1003 : 0;
1004}
1005
1006/**
1007 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1008 * @adapter: the adapter
1009 * @rspq: the RX response queue
1010 * @us: the hold-off time in us, or 0 to disable timer
1011 * @cnt: the hold-off packet count, or 0 to disable counter
1012 *
1013 * Sets an RX response queue's interrupt hold-off time and packet count.
1014 * At least one of the two needs to be enabled for the queue to generate
1015 * interrupts.
1016 */
1017static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1018 unsigned int us, unsigned int cnt)
1019{
1020 unsigned int timer_idx;
1021
1022 /*
1023 * If both the interrupt holdoff timer and count are specified as
1024 * zero, default to a holdoff count of 1 ...
1025 */
1026 if ((us | cnt) == 0)
1027 cnt = 1;
1028
1029 /*
1030 * If an interrupt holdoff count has been specified, then find the
1031 * closest configured holdoff count and use that. If the response
1032 * queue has already been created, then update its queue context
1033 * parameters ...
1034 */
1035 if (cnt) {
1036 int err;
1037 u32 v, pktcnt_idx;
1038
1039 pktcnt_idx = closest_thres(&adapter->sge, cnt);
1040 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
5167865a
HS
1041 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1042 FW_PARAMS_PARAM_X_V(
be839e39 1043 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
5167865a 1044 FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
be839e39
CL
1045 err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1046 if (err)
1047 return err;
1048 }
1049 rspq->pktcnt_idx = pktcnt_idx;
1050 }
1051
1052 /*
1053 * Compute the closest holdoff timer index from the supplied holdoff
1054 * timer value.
1055 */
1056 timer_idx = (us == 0
1057 ? SGE_TIMER_RSTRT_CNTR
1058 : closest_timer(&adapter->sge, us));
1059
1060 /*
1061 * Update the response queue's interrupt coalescing parameters and
1062 * return success.
1063 */
1ecc7b7a
HS
1064 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1065 QINTR_CNT_EN_V(cnt > 0));
be839e39
CL
1066 return 0;
1067}
1068
1069/*
1070 * Return a version number to identify the type of adapter. The scheme is:
1071 * - bits 0..9: chip version
1072 * - bits 10..15: chip revision
1073 */
1074static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1075{
1076 /*
1077 * Chip version 4, revision 0x3f (cxgb4vf).
1078 */
70ee3666 1079 return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
be839e39
CL
1080}
1081
1082/*
1083 * Execute the specified ioctl command.
1084 */
1085static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1086{
1087 int ret = 0;
1088
1089 switch (cmd) {
1090 /*
1091 * The VF Driver doesn't have access to any of the other
1092 * common Ethernet device ioctl()'s (like reading/writing
1093 * PHY registers, etc.
1094 */
1095
1096 default:
1097 ret = -EOPNOTSUPP;
1098 break;
1099 }
1100 return ret;
1101}
1102
1103/*
1104 * Change the device's MTU.
1105 */
1106static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1107{
1108 int ret;
1109 struct port_info *pi = netdev_priv(dev);
1110
1111 /* accommodate SACK */
1112 if (new_mtu < 81)
1113 return -EINVAL;
1114
1115 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1116 -1, -1, -1, -1, true);
1117 if (!ret)
1118 dev->mtu = new_mtu;
1119 return ret;
1120}
1121
c8f44aff
MM
1122static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1123 netdev_features_t features)
87737663
JP
1124{
1125 /*
1126 * Since there is no support for separate rx/tx vlan accel
1127 * enable/disable make sure tx flag is always in same state as rx.
1128 */
f646968f
PM
1129 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1130 features |= NETIF_F_HW_VLAN_CTAG_TX;
87737663 1131 else
f646968f 1132 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
87737663
JP
1133
1134 return features;
1135}
1136
c8f44aff
MM
1137static int cxgb4vf_set_features(struct net_device *dev,
1138 netdev_features_t features)
87737663
JP
1139{
1140 struct port_info *pi = netdev_priv(dev);
c8f44aff 1141 netdev_features_t changed = dev->features ^ features;
87737663 1142
f646968f 1143 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
87737663 1144 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
f646968f 1145 features & NETIF_F_HW_VLAN_CTAG_TX, 0);
87737663
JP
1146
1147 return 0;
1148}
1149
be839e39
CL
1150/*
1151 * Change the devices MAC address.
1152 */
1153static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1154{
1155 int ret;
1156 struct sockaddr *addr = _addr;
1157 struct port_info *pi = netdev_priv(dev);
1158
1159 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 1160 return -EADDRNOTAVAIL;
be839e39
CL
1161
1162 ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1163 addr->sa_data, true);
1164 if (ret < 0)
1165 return ret;
1166
1167 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1168 pi->xact_addr_filt = ret;
1169 return 0;
1170}
1171
be839e39
CL
1172#ifdef CONFIG_NET_POLL_CONTROLLER
1173/*
1174 * Poll all of our receive queues. This is called outside of normal interrupt
1175 * context.
1176 */
1177static void cxgb4vf_poll_controller(struct net_device *dev)
1178{
1179 struct port_info *pi = netdev_priv(dev);
1180 struct adapter *adapter = pi->adapter;
1181
1182 if (adapter->flags & USING_MSIX) {
1183 struct sge_eth_rxq *rxq;
1184 int nqsets;
1185
1186 rxq = &adapter->sge.ethrxq[pi->first_qset];
1187 for (nqsets = pi->nqsets; nqsets; nqsets--) {
1188 t4vf_sge_intr_msix(0, &rxq->rspq);
1189 rxq++;
1190 }
1191 } else
1192 t4vf_intr_handler(adapter)(0, adapter);
1193}
1194#endif
1195
1196/*
1197 * Ethtool operations.
1198 * ===================
1199 *
1200 * Note that we don't support any ethtool operations which change the physical
1201 * state of the port to which we're linked.
1202 */
1203
5ad24def
HS
1204static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
1205 unsigned int caps)
be839e39 1206{
5ad24def
HS
1207 unsigned int v = 0;
1208
1209 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1210 type == FW_PORT_TYPE_BT_XAUI) {
1211 v |= SUPPORTED_TP;
1212 if (caps & FW_PORT_CAP_SPEED_100M)
1213 v |= SUPPORTED_100baseT_Full;
1214 if (caps & FW_PORT_CAP_SPEED_1G)
1215 v |= SUPPORTED_1000baseT_Full;
1216 if (caps & FW_PORT_CAP_SPEED_10G)
1217 v |= SUPPORTED_10000baseT_Full;
1218 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1219 v |= SUPPORTED_Backplane;
1220 if (caps & FW_PORT_CAP_SPEED_1G)
1221 v |= SUPPORTED_1000baseKX_Full;
1222 if (caps & FW_PORT_CAP_SPEED_10G)
1223 v |= SUPPORTED_10000baseKX4_Full;
1224 } else if (type == FW_PORT_TYPE_KR)
1225 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1226 else if (type == FW_PORT_TYPE_BP_AP)
1227 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1228 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1229 else if (type == FW_PORT_TYPE_BP4_AP)
1230 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1231 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1232 SUPPORTED_10000baseKX4_Full;
1233 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1234 type == FW_PORT_TYPE_FIBER_XAUI ||
1235 type == FW_PORT_TYPE_SFP ||
1236 type == FW_PORT_TYPE_QSFP_10G ||
1237 type == FW_PORT_TYPE_QSA) {
1238 v |= SUPPORTED_FIBRE;
1239 if (caps & FW_PORT_CAP_SPEED_1G)
1240 v |= SUPPORTED_1000baseT_Full;
1241 if (caps & FW_PORT_CAP_SPEED_10G)
1242 v |= SUPPORTED_10000baseT_Full;
1243 } else if (type == FW_PORT_TYPE_BP40_BA ||
1244 type == FW_PORT_TYPE_QSFP) {
1245 v |= SUPPORTED_40000baseSR4_Full;
1246 v |= SUPPORTED_FIBRE;
1247 }
1248
1249 if (caps & FW_PORT_CAP_ANEG)
1250 v |= SUPPORTED_Autoneg;
1251 return v;
1252}
be839e39 1253
5ad24def
HS
1254static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1255{
1256 const struct port_info *p = netdev_priv(dev);
1257
1258 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1259 p->port_type == FW_PORT_TYPE_BT_XFI ||
1260 p->port_type == FW_PORT_TYPE_BT_XAUI)
1261 cmd->port = PORT_TP;
1262 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1263 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1264 cmd->port = PORT_FIBRE;
1265 else if (p->port_type == FW_PORT_TYPE_SFP ||
1266 p->port_type == FW_PORT_TYPE_QSFP_10G ||
1267 p->port_type == FW_PORT_TYPE_QSA ||
1268 p->port_type == FW_PORT_TYPE_QSFP) {
1269 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
1270 p->mod_type == FW_PORT_MOD_TYPE_SR ||
1271 p->mod_type == FW_PORT_MOD_TYPE_ER ||
1272 p->mod_type == FW_PORT_MOD_TYPE_LRM)
1273 cmd->port = PORT_FIBRE;
1274 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1275 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1276 cmd->port = PORT_DA;
1277 else
1278 cmd->port = PORT_OTHER;
1279 } else
1280 cmd->port = PORT_OTHER;
1281
1282 if (p->mdio_addr >= 0) {
1283 cmd->phy_address = p->mdio_addr;
1284 cmd->transceiver = XCVR_EXTERNAL;
1285 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1286 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1287 } else {
1288 cmd->phy_address = 0; /* not really, but no better option */
1289 cmd->transceiver = XCVR_INTERNAL;
1290 cmd->mdio_support = 0;
1291 }
1292
1293 cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
1294 p->link_cfg.supported);
1295 cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
1296 p->link_cfg.advertising);
70739497 1297 ethtool_cmd_speed_set(cmd,
5ad24def 1298 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
be839e39 1299 cmd->duplex = DUPLEX_FULL;
5ad24def 1300 cmd->autoneg = p->link_cfg.autoneg;
be839e39
CL
1301 cmd->maxtxpkt = 0;
1302 cmd->maxrxpkt = 0;
1303 return 0;
1304}
1305
1306/*
1307 * Return our driver information.
1308 */
1309static void cxgb4vf_get_drvinfo(struct net_device *dev,
1310 struct ethtool_drvinfo *drvinfo)
1311{
1312 struct adapter *adapter = netdev2adap(dev);
1313
23020ab3
RJ
1314 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1315 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1316 strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1317 sizeof(drvinfo->bus_info));
be839e39
CL
1318 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1319 "%u.%u.%u.%u, TP %u.%u.%u.%u",
b2e1a3f0
HS
1320 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1321 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1322 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1323 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1324 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1325 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1326 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1327 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
be839e39
CL
1328}
1329
1330/*
1331 * Return current adapter message level.
1332 */
1333static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1334{
1335 return netdev2adap(dev)->msg_enable;
1336}
1337
1338/*
1339 * Set current adapter message level.
1340 */
1341static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1342{
1343 netdev2adap(dev)->msg_enable = msglevel;
1344}
1345
1346/*
1347 * Return the device's current Queue Set ring size parameters along with the
1348 * allowed maximum values. Since ethtool doesn't understand the concept of
1349 * multi-queue devices, we just return the current values associated with the
1350 * first Queue Set.
1351 */
1352static void cxgb4vf_get_ringparam(struct net_device *dev,
1353 struct ethtool_ringparam *rp)
1354{
1355 const struct port_info *pi = netdev_priv(dev);
1356 const struct sge *s = &pi->adapter->sge;
1357
1358 rp->rx_max_pending = MAX_RX_BUFFERS;
1359 rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1360 rp->rx_jumbo_max_pending = 0;
1361 rp->tx_max_pending = MAX_TXQ_ENTRIES;
1362
1363 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1364 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1365 rp->rx_jumbo_pending = 0;
1366 rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1367}
1368
1369/*
1370 * Set the Queue Set ring size parameters for the device. Again, since
1371 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1372 * apply these new values across all of the Queue Sets associated with the
1373 * device -- after vetting them of course!
1374 */
1375static int cxgb4vf_set_ringparam(struct net_device *dev,
1376 struct ethtool_ringparam *rp)
1377{
1378 const struct port_info *pi = netdev_priv(dev);
1379 struct adapter *adapter = pi->adapter;
1380 struct sge *s = &adapter->sge;
1381 int qs;
1382
1383 if (rp->rx_pending > MAX_RX_BUFFERS ||
1384 rp->rx_jumbo_pending ||
1385 rp->tx_pending > MAX_TXQ_ENTRIES ||
1386 rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1387 rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1388 rp->rx_pending < MIN_FL_ENTRIES ||
1389 rp->tx_pending < MIN_TXQ_ENTRIES)
1390 return -EINVAL;
1391
1392 if (adapter->flags & FULL_INIT_DONE)
1393 return -EBUSY;
1394
1395 for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1396 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1397 s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1398 s->ethtxq[qs].q.size = rp->tx_pending;
1399 }
1400 return 0;
1401}
1402
1403/*
1404 * Return the interrupt holdoff timer and count for the first Queue Set on the
1405 * device. Our extension ioctl() (the cxgbtool interface) allows the
1406 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1407 */
1408static int cxgb4vf_get_coalesce(struct net_device *dev,
1409 struct ethtool_coalesce *coalesce)
1410{
1411 const struct port_info *pi = netdev_priv(dev);
1412 const struct adapter *adapter = pi->adapter;
1413 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1414
1415 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1416 coalesce->rx_max_coalesced_frames =
1ecc7b7a 1417 ((rspq->intr_params & QINTR_CNT_EN_F)
be839e39
CL
1418 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1419 : 0);
1420 return 0;
1421}
1422
1423/*
1424 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1425 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1426 * the interrupt holdoff timer on any of the device's Queue Sets.
1427 */
1428static int cxgb4vf_set_coalesce(struct net_device *dev,
1429 struct ethtool_coalesce *coalesce)
1430{
1431 const struct port_info *pi = netdev_priv(dev);
1432 struct adapter *adapter = pi->adapter;
1433
1434 return set_rxq_intr_params(adapter,
1435 &adapter->sge.ethrxq[pi->first_qset].rspq,
1436 coalesce->rx_coalesce_usecs,
1437 coalesce->rx_max_coalesced_frames);
1438}
1439
1440/*
1441 * Report current port link pause parameter settings.
1442 */
1443static void cxgb4vf_get_pauseparam(struct net_device *dev,
1444 struct ethtool_pauseparam *pauseparam)
1445{
1446 struct port_info *pi = netdev_priv(dev);
1447
1448 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1449 pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1450 pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1451}
1452
be839e39
CL
1453/*
1454 * Identify the port by blinking the port's LED.
1455 */
857a3d0f
DM
1456static int cxgb4vf_phys_id(struct net_device *dev,
1457 enum ethtool_phys_id_state state)
be839e39 1458{
857a3d0f 1459 unsigned int val;
be839e39
CL
1460 struct port_info *pi = netdev_priv(dev);
1461
857a3d0f
DM
1462 if (state == ETHTOOL_ID_ACTIVE)
1463 val = 0xffff;
1464 else if (state == ETHTOOL_ID_INACTIVE)
1465 val = 0;
1466 else
1467 return -EINVAL;
1468
1469 return t4vf_identify_port(pi->adapter, pi->viid, val);
be839e39
CL
1470}
1471
1472/*
1473 * Port stats maintained per queue of the port.
1474 */
1475struct queue_port_stats {
1476 u64 tso;
1477 u64 tx_csum;
1478 u64 rx_csum;
1479 u64 vlan_ex;
1480 u64 vlan_ins;
f12fe353
CL
1481 u64 lro_pkts;
1482 u64 lro_merged;
be839e39
CL
1483};
1484
1485/*
1486 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1487 * these need to match the order of statistics returned by
1488 * t4vf_get_port_stats().
1489 */
1490static const char stats_strings[][ETH_GSTRING_LEN] = {
1491 /*
1492 * These must match the layout of the t4vf_port_stats structure.
1493 */
1494 "TxBroadcastBytes ",
1495 "TxBroadcastFrames ",
1496 "TxMulticastBytes ",
1497 "TxMulticastFrames ",
1498 "TxUnicastBytes ",
1499 "TxUnicastFrames ",
1500 "TxDroppedFrames ",
1501 "TxOffloadBytes ",
1502 "TxOffloadFrames ",
1503 "RxBroadcastBytes ",
1504 "RxBroadcastFrames ",
1505 "RxMulticastBytes ",
1506 "RxMulticastFrames ",
1507 "RxUnicastBytes ",
1508 "RxUnicastFrames ",
1509 "RxErrorFrames ",
1510
1511 /*
1512 * These are accumulated per-queue statistics and must match the
1513 * order of the fields in the queue_port_stats structure.
1514 */
1515 "TSO ",
1516 "TxCsumOffload ",
1517 "RxCsumGood ",
1518 "VLANextractions ",
1519 "VLANinsertions ",
f12fe353
CL
1520 "GROPackets ",
1521 "GROMerged ",
be839e39
CL
1522};
1523
1524/*
1525 * Return the number of statistics in the specified statistics set.
1526 */
1527static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1528{
1529 switch (sset) {
1530 case ETH_SS_STATS:
1531 return ARRAY_SIZE(stats_strings);
1532 default:
1533 return -EOPNOTSUPP;
1534 }
1535 /*NOTREACHED*/
1536}
1537
1538/*
1539 * Return the strings for the specified statistics set.
1540 */
1541static void cxgb4vf_get_strings(struct net_device *dev,
1542 u32 sset,
1543 u8 *data)
1544{
1545 switch (sset) {
1546 case ETH_SS_STATS:
1547 memcpy(data, stats_strings, sizeof(stats_strings));
1548 break;
1549 }
1550}
1551
1552/*
1553 * Small utility routine to accumulate queue statistics across the queues of
1554 * a "port".
1555 */
1556static void collect_sge_port_stats(const struct adapter *adapter,
1557 const struct port_info *pi,
1558 struct queue_port_stats *stats)
1559{
1560 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1561 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1562 int qs;
1563
1564 memset(stats, 0, sizeof(*stats));
1565 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1566 stats->tso += txq->tso;
1567 stats->tx_csum += txq->tx_cso;
1568 stats->rx_csum += rxq->stats.rx_cso;
1569 stats->vlan_ex += rxq->stats.vlan_ex;
1570 stats->vlan_ins += txq->vlan_ins;
f12fe353
CL
1571 stats->lro_pkts += rxq->stats.lro_pkts;
1572 stats->lro_merged += rxq->stats.lro_merged;
be839e39
CL
1573 }
1574}
1575
1576/*
1577 * Return the ETH_SS_STATS statistics set.
1578 */
1579static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1580 struct ethtool_stats *stats,
1581 u64 *data)
1582{
1583 struct port_info *pi = netdev2pinfo(dev);
1584 struct adapter *adapter = pi->adapter;
1585 int err = t4vf_get_port_stats(adapter, pi->pidx,
1586 (struct t4vf_port_stats *)data);
1587 if (err)
1588 memset(data, 0, sizeof(struct t4vf_port_stats));
1589
1590 data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1591 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1592}
1593
1594/*
1595 * Return the size of our register map.
1596 */
1597static int cxgb4vf_get_regs_len(struct net_device *dev)
1598{
1599 return T4VF_REGMAP_SIZE;
1600}
1601
1602/*
1603 * Dump a block of registers, start to end inclusive, into a buffer.
1604 */
1605static void reg_block_dump(struct adapter *adapter, void *regbuf,
1606 unsigned int start, unsigned int end)
1607{
1608 u32 *bp = regbuf + start - T4VF_REGMAP_START;
1609
1610 for ( ; start <= end; start += sizeof(u32)) {
1611 /*
1612 * Avoid reading the Mailbox Control register since that
1613 * can trigger a Mailbox Ownership Arbitration cycle and
1614 * interfere with communication with the firmware.
1615 */
1616 if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1617 *bp++ = 0xffff;
1618 else
1619 *bp++ = t4_read_reg(adapter, start);
1620 }
1621}
1622
1623/*
1624 * Copy our entire register map into the provided buffer.
1625 */
1626static void cxgb4vf_get_regs(struct net_device *dev,
1627 struct ethtool_regs *regs,
1628 void *regbuf)
1629{
1630 struct adapter *adapter = netdev2adap(dev);
1631
1632 regs->version = mk_adap_vers(adapter);
1633
1634 /*
1635 * Fill in register buffer with our register map.
1636 */
1637 memset(regbuf, 0, T4VF_REGMAP_SIZE);
1638
1639 reg_block_dump(adapter, regbuf,
1640 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1641 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1642 reg_block_dump(adapter, regbuf,
1643 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1644 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
70ee3666
HS
1645
1646 /* T5 adds new registers in the PL Register map.
1647 */
be839e39
CL
1648 reg_block_dump(adapter, regbuf,
1649 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
70ee3666 1650 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
0d804338 1651 ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
be839e39
CL
1652 reg_block_dump(adapter, regbuf,
1653 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1654 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1655
1656 reg_block_dump(adapter, regbuf,
1657 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1658 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1659}
1660
1661/*
1662 * Report current Wake On LAN settings.
1663 */
1664static void cxgb4vf_get_wol(struct net_device *dev,
1665 struct ethtool_wolinfo *wol)
1666{
1667 wol->supported = 0;
1668 wol->wolopts = 0;
1669 memset(&wol->sopass, 0, sizeof(wol->sopass));
1670}
1671
410989f6
CL
1672/*
1673 * TCP Segmentation Offload flags which we support.
1674 */
1675#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1676
9b07be4b 1677static const struct ethtool_ops cxgb4vf_ethtool_ops = {
be839e39
CL
1678 .get_settings = cxgb4vf_get_settings,
1679 .get_drvinfo = cxgb4vf_get_drvinfo,
1680 .get_msglevel = cxgb4vf_get_msglevel,
1681 .set_msglevel = cxgb4vf_set_msglevel,
1682 .get_ringparam = cxgb4vf_get_ringparam,
1683 .set_ringparam = cxgb4vf_set_ringparam,
1684 .get_coalesce = cxgb4vf_get_coalesce,
1685 .set_coalesce = cxgb4vf_set_coalesce,
1686 .get_pauseparam = cxgb4vf_get_pauseparam,
be839e39
CL
1687 .get_link = ethtool_op_get_link,
1688 .get_strings = cxgb4vf_get_strings,
857a3d0f 1689 .set_phys_id = cxgb4vf_phys_id,
be839e39
CL
1690 .get_sset_count = cxgb4vf_get_sset_count,
1691 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1692 .get_regs_len = cxgb4vf_get_regs_len,
1693 .get_regs = cxgb4vf_get_regs,
1694 .get_wol = cxgb4vf_get_wol,
be839e39
CL
1695};
1696
1697/*
1698 * /sys/kernel/debug/cxgb4vf support code and data.
1699 * ================================================
1700 */
1701
ae7b7576
HS
1702/*
1703 * Show Firmware Mailbox Command/Reply Log
1704 *
1705 * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1706 * it's possible that we can catch things during a log update and therefore
1707 * see partially corrupted log entries. But i9t's probably Good Enough(tm).
1708 * If we ever decide that we want to make sure that we're dumping a coherent
1709 * log, we'd need to perform locking in the mailbox logging and in
1710 * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1711 * like we do for the Firmware Device Log. But as stated above, meh ...
1712 */
1713static int mboxlog_show(struct seq_file *seq, void *v)
1714{
1715 struct adapter *adapter = seq->private;
1716 struct mbox_cmd_log *log = adapter->mbox_log;
1717 struct mbox_cmd *entry;
1718 int entry_idx, i;
1719
1720 if (v == SEQ_START_TOKEN) {
1721 seq_printf(seq,
1722 "%10s %15s %5s %5s %s\n",
1723 "Seq#", "Tstamp", "Atime", "Etime",
1724 "Command/Reply");
1725 return 0;
1726 }
1727
1728 entry_idx = log->cursor + ((uintptr_t)v - 2);
1729 if (entry_idx >= log->size)
1730 entry_idx -= log->size;
1731 entry = mbox_cmd_log_entry(log, entry_idx);
1732
1733 /* skip over unused entries */
1734 if (entry->timestamp == 0)
1735 return 0;
1736
1737 seq_printf(seq, "%10u %15llu %5d %5d",
1738 entry->seqno, entry->timestamp,
1739 entry->access, entry->execute);
1740 for (i = 0; i < MBOX_LEN / 8; i++) {
1741 u64 flit = entry->cmd[i];
1742 u32 hi = (u32)(flit >> 32);
1743 u32 lo = (u32)flit;
1744
1745 seq_printf(seq, " %08x %08x", hi, lo);
1746 }
1747 seq_puts(seq, "\n");
1748 return 0;
1749}
1750
1751static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1752{
1753 struct adapter *adapter = seq->private;
1754 struct mbox_cmd_log *log = adapter->mbox_log;
1755
1756 return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
1757}
1758
1759static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
1760{
1761 return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
1762}
1763
1764static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
1765{
1766 ++*pos;
1767 return mboxlog_get_idx(seq, *pos);
1768}
1769
1770static void mboxlog_stop(struct seq_file *seq, void *v)
1771{
1772}
1773
1774static const struct seq_operations mboxlog_seq_ops = {
1775 .start = mboxlog_start,
1776 .next = mboxlog_next,
1777 .stop = mboxlog_stop,
1778 .show = mboxlog_show
1779};
1780
1781static int mboxlog_open(struct inode *inode, struct file *file)
1782{
1783 int res = seq_open(file, &mboxlog_seq_ops);
1784
1785 if (!res) {
1786 struct seq_file *seq = file->private_data;
1787
1788 seq->private = inode->i_private;
1789 }
1790 return res;
1791}
1792
1793static const struct file_operations mboxlog_fops = {
1794 .owner = THIS_MODULE,
1795 .open = mboxlog_open,
1796 .read = seq_read,
1797 .llseek = seq_lseek,
1798 .release = seq_release,
1799};
1800
be839e39
CL
1801/*
1802 * Show SGE Queue Set information. We display QPL Queues Sets per line.
1803 */
1804#define QPL 4
1805
1806static int sge_qinfo_show(struct seq_file *seq, void *v)
1807{
1808 struct adapter *adapter = seq->private;
1809 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1810 int qs, r = (uintptr_t)v - 1;
1811
1812 if (r)
1813 seq_putc(seq, '\n');
1814
1815 #define S3(fmt_spec, s, v) \
1816 do {\
1817 seq_printf(seq, "%-12s", s); \
1818 for (qs = 0; qs < n; ++qs) \
1819 seq_printf(seq, " %16" fmt_spec, v); \
1820 seq_putc(seq, '\n'); \
1821 } while (0)
1822 #define S(s, v) S3("s", s, v)
1823 #define T(s, v) S3("u", s, txq[qs].v)
1824 #define R(s, v) S3("u", s, rxq[qs].v)
1825
1826 if (r < eth_entries) {
1827 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1828 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1829 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1830
1831 S("QType:", "Ethernet");
1832 S("Interface:",
1833 (rxq[qs].rspq.netdev
1834 ? rxq[qs].rspq.netdev->name
1835 : "N/A"));
1836 S3("d", "Port:",
1837 (rxq[qs].rspq.netdev
1838 ? ((struct port_info *)
1839 netdev_priv(rxq[qs].rspq.netdev))->port_id
1840 : -1));
1841 T("TxQ ID:", q.abs_id);
1842 T("TxQ size:", q.size);
1843 T("TxQ inuse:", q.in_use);
1844 T("TxQ PIdx:", q.pidx);
1845 T("TxQ CIdx:", q.cidx);
1846 R("RspQ ID:", rspq.abs_id);
1847 R("RspQ size:", rspq.size);
1848 R("RspQE size:", rspq.iqe_len);
1849 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1850 S3("u", "Intr pktcnt:",
1851 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1852 R("RspQ CIdx:", rspq.cidx);
1853 R("RspQ Gen:", rspq.gen);
1854 R("FL ID:", fl.abs_id);
1855 R("FL size:", fl.size - MIN_FL_RESID);
1856 R("FL avail:", fl.avail);
1857 R("FL PIdx:", fl.pidx);
1858 R("FL CIdx:", fl.cidx);
1859 return 0;
1860 }
1861
1862 r -= eth_entries;
1863 if (r == 0) {
1864 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1865
1866 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1867 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1868 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1869 qtimer_val(adapter, evtq));
1870 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1871 adapter->sge.counter_val[evtq->pktcnt_idx]);
1872 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1873 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1874 } else if (r == 1) {
1875 const struct sge_rspq *intrq = &adapter->sge.intrq;
1876
1877 seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1878 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1879 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1880 qtimer_val(adapter, intrq));
1881 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1882 adapter->sge.counter_val[intrq->pktcnt_idx]);
1883 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1884 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1885 }
1886
1887 #undef R
1888 #undef T
1889 #undef S
1890 #undef S3
1891
1892 return 0;
1893}
1894
1895/*
1896 * Return the number of "entries" in our "file". We group the multi-Queue
1897 * sections with QPL Queue Sets per "entry". The sections of the output are:
1898 *
1899 * Ethernet RX/TX Queue Sets
1900 * Firmware Event Queue
1901 * Forwarded Interrupt Queue (if in MSI mode)
1902 */
1903static int sge_queue_entries(const struct adapter *adapter)
1904{
1905 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1906 ((adapter->flags & USING_MSI) != 0);
1907}
1908
1909static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1910{
1911 int entries = sge_queue_entries(seq->private);
1912
1913 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1914}
1915
1916static void sge_queue_stop(struct seq_file *seq, void *v)
1917{
1918}
1919
1920static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1921{
1922 int entries = sge_queue_entries(seq->private);
1923
1924 ++*pos;
1925 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1926}
1927
1928static const struct seq_operations sge_qinfo_seq_ops = {
1929 .start = sge_queue_start,
1930 .next = sge_queue_next,
1931 .stop = sge_queue_stop,
1932 .show = sge_qinfo_show
1933};
1934
1935static int sge_qinfo_open(struct inode *inode, struct file *file)
1936{
1937 int res = seq_open(file, &sge_qinfo_seq_ops);
1938
1939 if (!res) {
1940 struct seq_file *seq = file->private_data;
1941 seq->private = inode->i_private;
1942 }
1943 return res;
1944}
1945
1946static const struct file_operations sge_qinfo_debugfs_fops = {
1947 .owner = THIS_MODULE,
1948 .open = sge_qinfo_open,
1949 .read = seq_read,
1950 .llseek = seq_lseek,
1951 .release = seq_release,
1952};
1953
1954/*
1955 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
1956 */
1957#define QPL 4
1958
1959static int sge_qstats_show(struct seq_file *seq, void *v)
1960{
1961 struct adapter *adapter = seq->private;
1962 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1963 int qs, r = (uintptr_t)v - 1;
1964
1965 if (r)
1966 seq_putc(seq, '\n');
1967
1968 #define S3(fmt, s, v) \
1969 do { \
1970 seq_printf(seq, "%-16s", s); \
1971 for (qs = 0; qs < n; ++qs) \
1972 seq_printf(seq, " %8" fmt, v); \
1973 seq_putc(seq, '\n'); \
1974 } while (0)
1975 #define S(s, v) S3("s", s, v)
1976
1977 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1978 #define T(s, v) T3("lu", s, v)
1979
1980 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1981 #define R(s, v) R3("lu", s, v)
1982
1983 if (r < eth_entries) {
1984 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1985 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1986 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1987
1988 S("QType:", "Ethernet");
1989 S("Interface:",
1990 (rxq[qs].rspq.netdev
1991 ? rxq[qs].rspq.netdev->name
1992 : "N/A"));
68dc9d36 1993 R3("u", "RspQNullInts:", rspq.unhandled_irqs);
be839e39
CL
1994 R("RxPackets:", stats.pkts);
1995 R("RxCSO:", stats.rx_cso);
1996 R("VLANxtract:", stats.vlan_ex);
1997 R("LROmerged:", stats.lro_merged);
1998 R("LROpackets:", stats.lro_pkts);
1999 R("RxDrops:", stats.rx_drops);
2000 T("TSO:", tso);
2001 T("TxCSO:", tx_cso);
2002 T("VLANins:", vlan_ins);
2003 T("TxQFull:", q.stops);
2004 T("TxQRestarts:", q.restarts);
2005 T("TxMapErr:", mapping_err);
2006 R("FLAllocErr:", fl.alloc_failed);
2007 R("FLLrgAlcErr:", fl.large_alloc_failed);
2008 R("FLStarving:", fl.starving);
2009 return 0;
2010 }
2011
2012 r -= eth_entries;
2013 if (r == 0) {
2014 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2015
2016 seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
68dc9d36
CL
2017 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2018 evtq->unhandled_irqs);
be839e39
CL
2019 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2020 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2021 } else if (r == 1) {
2022 const struct sge_rspq *intrq = &adapter->sge.intrq;
2023
2024 seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
68dc9d36
CL
2025 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2026 intrq->unhandled_irqs);
be839e39
CL
2027 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2028 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2029 }
2030
2031 #undef R
2032 #undef T
2033 #undef S
2034 #undef R3
2035 #undef T3
2036 #undef S3
2037
2038 return 0;
2039}
2040
2041/*
2042 * Return the number of "entries" in our "file". We group the multi-Queue
2043 * sections with QPL Queue Sets per "entry". The sections of the output are:
2044 *
2045 * Ethernet RX/TX Queue Sets
2046 * Firmware Event Queue
2047 * Forwarded Interrupt Queue (if in MSI mode)
2048 */
2049static int sge_qstats_entries(const struct adapter *adapter)
2050{
2051 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2052 ((adapter->flags & USING_MSI) != 0);
2053}
2054
2055static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2056{
2057 int entries = sge_qstats_entries(seq->private);
2058
2059 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2060}
2061
2062static void sge_qstats_stop(struct seq_file *seq, void *v)
2063{
2064}
2065
2066static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2067{
2068 int entries = sge_qstats_entries(seq->private);
2069
2070 (*pos)++;
2071 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2072}
2073
2074static const struct seq_operations sge_qstats_seq_ops = {
2075 .start = sge_qstats_start,
2076 .next = sge_qstats_next,
2077 .stop = sge_qstats_stop,
2078 .show = sge_qstats_show
2079};
2080
2081static int sge_qstats_open(struct inode *inode, struct file *file)
2082{
2083 int res = seq_open(file, &sge_qstats_seq_ops);
2084
2085 if (res == 0) {
2086 struct seq_file *seq = file->private_data;
2087 seq->private = inode->i_private;
2088 }
2089 return res;
2090}
2091
2092static const struct file_operations sge_qstats_proc_fops = {
2093 .owner = THIS_MODULE,
2094 .open = sge_qstats_open,
2095 .read = seq_read,
2096 .llseek = seq_lseek,
2097 .release = seq_release,
2098};
2099
2100/*
2101 * Show PCI-E SR-IOV Virtual Function Resource Limits.
2102 */
2103static int resources_show(struct seq_file *seq, void *v)
2104{
2105 struct adapter *adapter = seq->private;
2106 struct vf_resources *vfres = &adapter->params.vfres;
2107
2108 #define S(desc, fmt, var) \
2109 seq_printf(seq, "%-60s " fmt "\n", \
2110 desc " (" #var "):", vfres->var)
2111
2112 S("Virtual Interfaces", "%d", nvi);
2113 S("Egress Queues", "%d", neq);
2114 S("Ethernet Control", "%d", nethctrl);
2115 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2116 S("Ingress Queues", "%d", niq);
2117 S("Traffic Class", "%d", tc);
2118 S("Port Access Rights Mask", "%#x", pmask);
2119 S("MAC Address Filters", "%d", nexactf);
2120 S("Firmware Command Read Capabilities", "%#x", r_caps);
2121 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2122
2123 #undef S
2124
2125 return 0;
2126}
2127
2128static int resources_open(struct inode *inode, struct file *file)
2129{
2130 return single_open(file, resources_show, inode->i_private);
2131}
2132
2133static const struct file_operations resources_proc_fops = {
2134 .owner = THIS_MODULE,
2135 .open = resources_open,
2136 .read = seq_read,
2137 .llseek = seq_lseek,
2138 .release = single_release,
2139};
2140
2141/*
2142 * Show Virtual Interfaces.
2143 */
2144static int interfaces_show(struct seq_file *seq, void *v)
2145{
2146 if (v == SEQ_START_TOKEN) {
2147 seq_puts(seq, "Interface Port VIID\n");
2148 } else {
2149 struct adapter *adapter = seq->private;
2150 int pidx = (uintptr_t)v - 2;
2151 struct net_device *dev = adapter->port[pidx];
2152 struct port_info *pi = netdev_priv(dev);
2153
2154 seq_printf(seq, "%9s %4d %#5x\n",
2155 dev->name, pi->port_id, pi->viid);
2156 }
2157 return 0;
2158}
2159
2160static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2161{
2162 return pos <= adapter->params.nports
2163 ? (void *)(uintptr_t)(pos + 1)
2164 : NULL;
2165}
2166
2167static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2168{
2169 return *pos
2170 ? interfaces_get_idx(seq->private, *pos)
2171 : SEQ_START_TOKEN;
2172}
2173
2174static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2175{
2176 (*pos)++;
2177 return interfaces_get_idx(seq->private, *pos);
2178}
2179
2180static void interfaces_stop(struct seq_file *seq, void *v)
2181{
2182}
2183
2184static const struct seq_operations interfaces_seq_ops = {
2185 .start = interfaces_start,
2186 .next = interfaces_next,
2187 .stop = interfaces_stop,
2188 .show = interfaces_show
2189};
2190
2191static int interfaces_open(struct inode *inode, struct file *file)
2192{
2193 int res = seq_open(file, &interfaces_seq_ops);
2194
2195 if (res == 0) {
2196 struct seq_file *seq = file->private_data;
2197 seq->private = inode->i_private;
2198 }
2199 return res;
2200}
2201
2202static const struct file_operations interfaces_proc_fops = {
2203 .owner = THIS_MODULE,
2204 .open = interfaces_open,
2205 .read = seq_read,
2206 .llseek = seq_lseek,
2207 .release = seq_release,
2208};
2209
2210/*
2211 * /sys/kernel/debugfs/cxgb4vf/ files list.
2212 */
2213struct cxgb4vf_debugfs_entry {
2214 const char *name; /* name of debugfs node */
f4ae40a6 2215 umode_t mode; /* file system mode */
be839e39
CL
2216 const struct file_operations *fops;
2217};
2218
2219static struct cxgb4vf_debugfs_entry debugfs_files[] = {
ae7b7576 2220 { "mboxlog", S_IRUGO, &mboxlog_fops },
be839e39
CL
2221 { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
2222 { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2223 { "resources", S_IRUGO, &resources_proc_fops },
2224 { "interfaces", S_IRUGO, &interfaces_proc_fops },
2225};
2226
2227/*
2228 * Module and device initialization and cleanup code.
2229 * ==================================================
2230 */
2231
2232/*
2233 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2234 * directory (debugfs_root) has already been set up.
2235 */
d289f864 2236static int setup_debugfs(struct adapter *adapter)
be839e39
CL
2237{
2238 int i;
2239
843635e0 2240 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
be839e39
CL
2241
2242 /*
2243 * Debugfs support is best effort.
2244 */
2245 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2246 (void)debugfs_create_file(debugfs_files[i].name,
2247 debugfs_files[i].mode,
2248 adapter->debugfs_root,
2249 (void *)adapter,
2250 debugfs_files[i].fops);
2251
2252 return 0;
2253}
2254
2255/*
2256 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2257 * it to our caller to tear down the directory (debugfs_root).
2258 */
4204875d 2259static void cleanup_debugfs(struct adapter *adapter)
be839e39 2260{
843635e0 2261 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
be839e39
CL
2262
2263 /*
2264 * Unlike our sister routine cleanup_proc(), we don't need to remove
2265 * individual entries because a call will be made to
2266 * debugfs_remove_recursive(). We just need to clean up any ancillary
2267 * persistent state.
2268 */
2269 /* nothing to do */
2270}
2271
495c22bb
HS
2272/* Figure out how many Ports and Queue Sets we can support. This depends on
2273 * knowing our Virtual Function Resources and may be called a second time if
2274 * we fall back from MSI-X to MSI Interrupt Mode.
2275 */
2276static void size_nports_qsets(struct adapter *adapter)
2277{
2278 struct vf_resources *vfres = &adapter->params.vfres;
2279 unsigned int ethqsets, pmask_nports;
2280
2281 /* The number of "ports" which we support is equal to the number of
2282 * Virtual Interfaces with which we've been provisioned.
2283 */
2284 adapter->params.nports = vfres->nvi;
2285 if (adapter->params.nports > MAX_NPORTS) {
2286 dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2287 " allowed virtual interfaces\n", MAX_NPORTS,
2288 adapter->params.nports);
2289 adapter->params.nports = MAX_NPORTS;
2290 }
2291
2292 /* We may have been provisioned with more VIs than the number of
2293 * ports we're allowed to access (our Port Access Rights Mask).
2294 * This is obviously a configuration conflict but we don't want to
2295 * crash the kernel or anything silly just because of that.
2296 */
2297 pmask_nports = hweight32(adapter->params.vfres.pmask);
2298 if (pmask_nports < adapter->params.nports) {
2299 dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
2300 " virtual interfaces; limited by Port Access Rights"
2301 " mask %#x\n", pmask_nports, adapter->params.nports,
2302 adapter->params.vfres.pmask);
2303 adapter->params.nports = pmask_nports;
2304 }
2305
2306 /* We need to reserve an Ingress Queue for the Asynchronous Firmware
2307 * Event Queue. And if we're using MSI Interrupts, we'll also need to
2308 * reserve an Ingress Queue for a Forwarded Interrupts.
2309 *
2310 * The rest of the FL/Intr-capable ingress queues will be matched up
2311 * one-for-one with Ethernet/Control egress queues in order to form
2312 * "Queue Sets" which will be aportioned between the "ports". For
2313 * each Queue Set, we'll need the ability to allocate two Egress
2314 * Contexts -- one for the Ingress Queue Free List and one for the TX
2315 * Ethernet Queue.
2316 *
2317 * Note that even if we're currently configured to use MSI-X
2318 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2319 * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that
2320 * happens we'll need to adjust things later.
2321 */
2322 ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2323 if (vfres->nethctrl != ethqsets)
2324 ethqsets = min(vfres->nethctrl, ethqsets);
2325 if (vfres->neq < ethqsets*2)
2326 ethqsets = vfres->neq/2;
2327 if (ethqsets > MAX_ETH_QSETS)
2328 ethqsets = MAX_ETH_QSETS;
2329 adapter->sge.max_ethqsets = ethqsets;
2330
2331 if (adapter->sge.max_ethqsets < adapter->params.nports) {
2332 dev_warn(adapter->pdev_dev, "only using %d of %d available"
2333 " virtual interfaces (too few Queue Sets)\n",
2334 adapter->sge.max_ethqsets, adapter->params.nports);
2335 adapter->params.nports = adapter->sge.max_ethqsets;
2336 }
2337}
2338
be839e39
CL
2339/*
2340 * Perform early "adapter" initialization. This is where we discover what
2341 * adapter parameters we're going to be using and initialize basic adapter
2342 * hardware support.
2343 */
d289f864 2344static int adap_init0(struct adapter *adapter)
be839e39 2345{
be839e39
CL
2346 struct sge_params *sge_params = &adapter->params.sge;
2347 struct sge *s = &adapter->sge;
be839e39 2348 int err;
94dace10 2349 u32 param, val = 0;
be839e39 2350
e68e6133
CL
2351 /*
2352 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2353 * 2.6.31 and later we can't call pci_reset_function() in order to
2354 * issue an FLR because of a self- deadlock on the device semaphore.
2355 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2356 * cases where they're needed -- for instance, some versions of KVM
2357 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2358 * use the firmware based reset in order to reset any per function
2359 * state.
2360 */
2361 err = t4vf_fw_reset(adapter);
2362 if (err < 0) {
2363 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2364 return err;
2365 }
2366
be839e39
CL
2367 /*
2368 * Grab basic operational parameters. These will predominantly have
2369 * been set up by the Physical Function Driver or will be hard coded
2370 * into the adapter. We just have to live with them ... Note that
2371 * we _must_ get our VPD parameters before our SGE parameters because
2372 * we need to know the adapter's core clock from the VPD in order to
2373 * properly decode the SGE Timer Values.
2374 */
2375 err = t4vf_get_dev_params(adapter);
2376 if (err) {
2377 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2378 " device parameters: err=%d\n", err);
2379 return err;
2380 }
2381 err = t4vf_get_vpd_params(adapter);
2382 if (err) {
2383 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2384 " VPD parameters: err=%d\n", err);
2385 return err;
2386 }
2387 err = t4vf_get_sge_params(adapter);
2388 if (err) {
2389 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2390 " SGE parameters: err=%d\n", err);
2391 return err;
2392 }
2393 err = t4vf_get_rss_glb_config(adapter);
2394 if (err) {
2395 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2396 " RSS parameters: err=%d\n", err);
2397 return err;
2398 }
2399 if (adapter->params.rss.mode !=
2400 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2401 dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2402 " mode %d\n", adapter->params.rss.mode);
2403 return -EINVAL;
2404 }
2405 err = t4vf_sge_init(adapter);
2406 if (err) {
2407 dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2408 " err=%d\n", err);
2409 return err;
2410 }
2411
94dace10
VP
2412 /* If we're running on newer firmware, let it know that we're
2413 * prepared to deal with encapsulated CPL messages. Older
2414 * firmware won't understand this and we'll just get
2415 * unencapsulated messages ...
2416 */
5167865a
HS
2417 param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2418 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
94dace10
VP
2419 val = 1;
2420 (void) t4vf_set_params(adapter, 1, &param, &val);
2421
be839e39
CL
2422 /*
2423 * Retrieve our RX interrupt holdoff timer values and counter
2424 * threshold values from the SGE parameters.
2425 */
2426 s->timer_val[0] = core_ticks_to_us(adapter,
f061de42 2427 TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
be839e39 2428 s->timer_val[1] = core_ticks_to_us(adapter,
f061de42 2429 TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
be839e39 2430 s->timer_val[2] = core_ticks_to_us(adapter,
f061de42 2431 TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
be839e39 2432 s->timer_val[3] = core_ticks_to_us(adapter,
f061de42 2433 TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
be839e39 2434 s->timer_val[4] = core_ticks_to_us(adapter,
f061de42 2435 TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
be839e39 2436 s->timer_val[5] = core_ticks_to_us(adapter,
f061de42 2437 TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
be839e39 2438
f612b815
HS
2439 s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2440 s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2441 s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2442 s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
be839e39
CL
2443
2444 /*
2445 * Grab our Virtual Interface resource allocation, extract the
2446 * features that we're interested in and do a bit of sanity testing on
2447 * what we discover.
2448 */
2449 err = t4vf_get_vfres(adapter);
2450 if (err) {
2451 dev_err(adapter->pdev_dev, "unable to get virtual interface"
2452 " resources: err=%d\n", err);
2453 return err;
2454 }
2455
495c22bb 2456 /* Check for various parameter sanity issues */
28f71c6d
HS
2457 if (adapter->params.vfres.pmask == 0) {
2458 dev_err(adapter->pdev_dev, "no port access configured\n"
2459 "usable!\n");
2460 return -EINVAL;
2461 }
495c22bb 2462 if (adapter->params.vfres.nvi == 0) {
be839e39
CL
2463 dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2464 "usable!\n");
2465 return -EINVAL;
2466 }
495c22bb
HS
2467
2468 /* Initialize nports and max_ethqsets now that we have our Virtual
2469 * Function Resources.
2470 */
2471 size_nports_qsets(adapter);
2472
be839e39
CL
2473 return 0;
2474}
2475
2476static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2477 u8 pkt_cnt_idx, unsigned int size,
2478 unsigned int iqe_size)
2479{
1ecc7b7a
HS
2480 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2481 (pkt_cnt_idx < SGE_NCOUNTERS ?
2482 QINTR_CNT_EN_F : 0));
be839e39
CL
2483 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2484 ? pkt_cnt_idx
2485 : 0);
2486 rspq->iqe_len = iqe_size;
2487 rspq->size = size;
2488}
2489
2490/*
2491 * Perform default configuration of DMA queues depending on the number and
2492 * type of ports we found and the number of available CPUs. Most settings can
2493 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2494 * being brought up for the first time.
2495 */
d289f864 2496static void cfg_queues(struct adapter *adapter)
be839e39
CL
2497{
2498 struct sge *s = &adapter->sge;
2499 int q10g, n10g, qidx, pidx, qs;
c710245c 2500 size_t iqe_size;
be839e39
CL
2501
2502 /*
2503 * We should not be called till we know how many Queue Sets we can
2504 * support. In particular, this means that we need to know what kind
2505 * of interrupts we'll be using ...
2506 */
2507 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2508
2509 /*
2510 * Count the number of 10GbE Virtual Interfaces that we have.
2511 */
2512 n10g = 0;
2513 for_each_port(adapter, pidx)
14b3812f 2514 n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
be839e39
CL
2515
2516 /*
2517 * We default to 1 queue per non-10G port and up to # of cores queues
2518 * per 10G port.
2519 */
2520 if (n10g == 0)
2521 q10g = 0;
2522 else {
2523 int n1g = (adapter->params.nports - n10g);
2524 q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2525 if (q10g > num_online_cpus())
2526 q10g = num_online_cpus();
2527 }
2528
2529 /*
2530 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2531 * The layout will be established in setup_sge_queues() when the
2532 * adapter is brough up for the first time.
2533 */
2534 qidx = 0;
2535 for_each_port(adapter, pidx) {
2536 struct port_info *pi = adap2pinfo(adapter, pidx);
2537
2538 pi->first_qset = qidx;
897d55df 2539 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
be839e39
CL
2540 qidx += pi->nqsets;
2541 }
2542 s->ethqsets = qidx;
2543
c710245c
CL
2544 /*
2545 * The Ingress Queue Entry Size for our various Response Queues needs
2546 * to be big enough to accommodate the largest message we can receive
2547 * from the chip/firmware; which is 64 bytes ...
2548 */
2549 iqe_size = 64;
2550
be839e39
CL
2551 /*
2552 * Set up default Queue Set parameters ... Start off with the
2553 * shortest interrupt holdoff timer.
2554 */
2555 for (qs = 0; qs < s->max_ethqsets; qs++) {
2556 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2557 struct sge_eth_txq *txq = &s->ethtxq[qs];
2558
c710245c 2559 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
be839e39
CL
2560 rxq->fl.size = 72;
2561 txq->q.size = 1024;
2562 }
2563
2564 /*
2565 * The firmware event queue is used for link state changes and
2566 * notifications of TX DMA completions.
2567 */
c710245c 2568 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
be839e39
CL
2569
2570 /*
2571 * The forwarded interrupt queue is used when we're in MSI interrupt
2572 * mode. In this mode all interrupts associated with RX queues will
2573 * be forwarded to a single queue which we'll associate with our MSI
2574 * interrupt vector. The messages dropped in the forwarded interrupt
2575 * queue will indicate which ingress queue needs servicing ... This
2576 * queue needs to be large enough to accommodate all of the ingress
2577 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2578 * from equalling the CIDX if every ingress queue has an outstanding
2579 * interrupt). The queue doesn't need to be any larger because no
2580 * ingress queue will ever have more than one outstanding interrupt at
2581 * any time ...
2582 */
2583 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
c710245c 2584 iqe_size);
be839e39
CL
2585}
2586
2587/*
2588 * Reduce the number of Ethernet queues across all ports to at most n.
2589 * n provides at least one queue per port.
2590 */
d289f864 2591static void reduce_ethqs(struct adapter *adapter, int n)
be839e39
CL
2592{
2593 int i;
2594 struct port_info *pi;
2595
2596 /*
2597 * While we have too many active Ether Queue Sets, interate across the
2598 * "ports" and reduce their individual Queue Set allocations.
2599 */
2600 BUG_ON(n < adapter->params.nports);
2601 while (n < adapter->sge.ethqsets)
2602 for_each_port(adapter, i) {
2603 pi = adap2pinfo(adapter, i);
2604 if (pi->nqsets > 1) {
2605 pi->nqsets--;
2606 adapter->sge.ethqsets--;
2607 if (adapter->sge.ethqsets <= n)
2608 break;
2609 }
2610 }
2611
2612 /*
2613 * Reassign the starting Queue Sets for each of the "ports" ...
2614 */
2615 n = 0;
2616 for_each_port(adapter, i) {
2617 pi = adap2pinfo(adapter, i);
2618 pi->first_qset = n;
2619 n += pi->nqsets;
2620 }
2621}
2622
2623/*
2624 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2625 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2626 * need. Minimally we need one for every Virtual Interface plus those needed
2627 * for our "extras". Note that this process may lower the maximum number of
2628 * allowed Queue Sets ...
2629 */
d289f864 2630static int enable_msix(struct adapter *adapter)
be839e39 2631{
bd663689 2632 int i, want, need, nqsets;
be839e39
CL
2633 struct msix_entry entries[MSIX_ENTRIES];
2634 struct sge *s = &adapter->sge;
2635
2636 for (i = 0; i < MSIX_ENTRIES; ++i)
2637 entries[i].entry = i;
2638
2639 /*
2640 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2641 * plus those needed for our "extras" (for example, the firmware
2642 * message queue). We _need_ at least one "Queue Set" per Virtual
2643 * Interface plus those needed for our "extras". So now we get to see
2644 * if the song is right ...
2645 */
2646 want = s->max_ethqsets + MSIX_EXTRAS;
2647 need = adapter->params.nports + MSIX_EXTRAS;
bd663689
AG
2648
2649 want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2650 if (want < 0)
2651 return want;
2652
2653 nqsets = want - MSIX_EXTRAS;
2654 if (nqsets < s->max_ethqsets) {
2655 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2656 " for %d Queue Sets\n", nqsets);
2657 s->max_ethqsets = nqsets;
2658 if (nqsets < s->ethqsets)
2659 reduce_ethqs(adapter, nqsets);
be839e39 2660 }
bd663689
AG
2661 for (i = 0; i < want; ++i)
2662 adapter->msix_info[i].vec = entries[i].vector;
2663
2664 return 0;
be839e39
CL
2665}
2666
be839e39
CL
2667static const struct net_device_ops cxgb4vf_netdev_ops = {
2668 .ndo_open = cxgb4vf_open,
2669 .ndo_stop = cxgb4vf_stop,
2670 .ndo_start_xmit = t4vf_eth_xmit,
2671 .ndo_get_stats = cxgb4vf_get_stats,
2672 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2673 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
be839e39
CL
2674 .ndo_validate_addr = eth_validate_addr,
2675 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2676 .ndo_change_mtu = cxgb4vf_change_mtu,
87737663
JP
2677 .ndo_fix_features = cxgb4vf_fix_features,
2678 .ndo_set_features = cxgb4vf_set_features,
be839e39
CL
2679#ifdef CONFIG_NET_POLL_CONTROLLER
2680 .ndo_poll_controller = cxgb4vf_poll_controller,
2681#endif
2682};
be839e39
CL
2683
2684/*
2685 * "Probe" a device: initialize a device and construct all kernel and driver
2686 * state needed to manage the device. This routine is called "init_one" in
2687 * the PF Driver ...
2688 */
d289f864 2689static int cxgb4vf_pci_probe(struct pci_dev *pdev,
1dd06ae8 2690 const struct pci_device_id *ent)
be839e39 2691{
be839e39
CL
2692 int pci_using_dac;
2693 int err, pidx;
2694 unsigned int pmask;
2695 struct adapter *adapter;
2696 struct port_info *pi;
2697 struct net_device *netdev;
2698
be839e39
CL
2699 /*
2700 * Print our driver banner the first time we're called to initialize a
2701 * device.
2702 */
428ac43f 2703 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
be839e39
CL
2704
2705 /*
7a0c2029 2706 * Initialize generic PCI device state.
be839e39 2707 */
7a0c2029 2708 err = pci_enable_device(pdev);
be839e39 2709 if (err) {
7a0c2029 2710 dev_err(&pdev->dev, "cannot enable PCI device\n");
be839e39
CL
2711 return err;
2712 }
2713
2714 /*
7a0c2029
KV
2715 * Reserve PCI resources for the device. If we can't get them some
2716 * other driver may have already claimed the device ...
be839e39 2717 */
7a0c2029 2718 err = pci_request_regions(pdev, KBUILD_MODNAME);
be839e39 2719 if (err) {
7a0c2029
KV
2720 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2721 goto err_disable_device;
be839e39
CL
2722 }
2723
2724 /*
2725 * Set up our DMA mask: try for 64-bit address masking first and
2726 * fall back to 32-bit if we can't get 64 bits ...
2727 */
2728 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2729 if (err == 0) {
2730 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2731 if (err) {
2732 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2733 " coherent allocations\n");
7a0c2029 2734 goto err_release_regions;
be839e39
CL
2735 }
2736 pci_using_dac = 1;
2737 } else {
2738 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2739 if (err != 0) {
2740 dev_err(&pdev->dev, "no usable DMA configuration\n");
7a0c2029 2741 goto err_release_regions;
be839e39
CL
2742 }
2743 pci_using_dac = 0;
2744 }
2745
2746 /*
2747 * Enable bus mastering for the device ...
2748 */
2749 pci_set_master(pdev);
2750
2751 /*
2752 * Allocate our adapter data structure and attach it to the device.
2753 */
2754 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2755 if (!adapter) {
2756 err = -ENOMEM;
7a0c2029 2757 goto err_release_regions;
be839e39
CL
2758 }
2759 pci_set_drvdata(pdev, adapter);
2760 adapter->pdev = pdev;
2761 adapter->pdev_dev = &pdev->dev;
2762
ae7b7576
HS
2763 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
2764 (sizeof(struct mbox_cmd) *
2765 T4VF_OS_LOG_MBOX_CMDS),
2766 GFP_KERNEL);
2767 if (!adapter->mbox_log) {
2768 err = -ENOMEM;
2769 goto err_free_adapter;
2770 }
2771 adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
2772
be839e39
CL
2773 /*
2774 * Initialize SMP data synchronization resources.
2775 */
2776 spin_lock_init(&adapter->stats_lock);
b38066da
HS
2777 spin_lock_init(&adapter->mbox_lock);
2778 INIT_LIST_HEAD(&adapter->mlist.list);
be839e39
CL
2779
2780 /*
2781 * Map our I/O registers in BAR0.
2782 */
2783 adapter->regs = pci_ioremap_bar(pdev, 0);
2784 if (!adapter->regs) {
2785 dev_err(&pdev->dev, "cannot map device registers\n");
2786 err = -ENOMEM;
2787 goto err_free_adapter;
2788 }
2789
e0a8b34a
HS
2790 /* Wait for the device to become ready before proceeding ...
2791 */
2792 err = t4vf_prep_adapter(adapter);
2793 if (err) {
2794 dev_err(adapter->pdev_dev, "device didn't become ready:"
2795 " err=%d\n", err);
2796 goto err_unmap_bar0;
2797 }
2798
2799 /* For T5 and later we want to use the new BAR-based User Doorbells,
2800 * so we need to map BAR2 here ...
2801 */
2802 if (!is_t4(adapter->params.chip)) {
2803 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2804 pci_resource_len(pdev, 2));
2805 if (!adapter->bar2) {
2806 dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2807 err = -ENOMEM;
2808 goto err_unmap_bar0;
2809 }
2810 }
be839e39
CL
2811 /*
2812 * Initialize adapter level features.
2813 */
2814 adapter->name = pci_name(pdev);
2815 adapter->msg_enable = dflt_msg_enable;
2816 err = adap_init0(adapter);
2817 if (err)
2818 goto err_unmap_bar;
2819
2820 /*
2821 * Allocate our "adapter ports" and stitch everything together.
2822 */
2823 pmask = adapter->params.vfres.pmask;
2824 for_each_port(adapter, pidx) {
2825 int port_id, viid;
2826
2827 /*
2828 * We simplistically allocate our virtual interfaces
2829 * sequentially across the port numbers to which we have
2830 * access rights. This should be configurable in some manner
2831 * ...
2832 */
2833 if (pmask == 0)
2834 break;
2835 port_id = ffs(pmask) - 1;
2836 pmask &= ~(1 << port_id);
2837 viid = t4vf_alloc_vi(adapter, port_id);
2838 if (viid < 0) {
2839 dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2840 " err=%d\n", port_id, viid);
2841 err = viid;
2842 goto err_free_dev;
2843 }
2844
2845 /*
2846 * Allocate our network device and stitch things together.
2847 */
2848 netdev = alloc_etherdev_mq(sizeof(struct port_info),
2849 MAX_PORT_QSETS);
2850 if (netdev == NULL) {
be839e39
CL
2851 t4vf_free_vi(adapter, viid);
2852 err = -ENOMEM;
2853 goto err_free_dev;
2854 }
2855 adapter->port[pidx] = netdev;
2856 SET_NETDEV_DEV(netdev, &pdev->dev);
2857 pi = netdev_priv(netdev);
2858 pi->adapter = adapter;
2859 pi->pidx = pidx;
2860 pi->port_id = port_id;
2861 pi->viid = viid;
2862
2863 /*
2864 * Initialize the starting state of our "port" and register
2865 * it.
2866 */
2867 pi->xact_addr_filt = -1;
be839e39 2868 netif_carrier_off(netdev);
be839e39
CL
2869 netdev->irq = pdev->irq;
2870
2ed28baa
MM
2871 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2872 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
f646968f 2873 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2ed28baa
MM
2874 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2875 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2876 NETIF_F_HIGHDMA;
f646968f
PM
2877 netdev->features = netdev->hw_features |
2878 NETIF_F_HW_VLAN_CTAG_TX;
be839e39
CL
2879 if (pci_using_dac)
2880 netdev->features |= NETIF_F_HIGHDMA;
be839e39 2881
01789349
JP
2882 netdev->priv_flags |= IFF_UNICAST_FLT;
2883
be839e39 2884 netdev->netdev_ops = &cxgb4vf_netdev_ops;
7ad24ea4 2885 netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
be839e39
CL
2886
2887 /*
2888 * Initialize the hardware/software state for the port.
2889 */
2890 err = t4vf_port_init(adapter, pidx);
2891 if (err) {
2892 dev_err(&pdev->dev, "cannot initialize port %d\n",
2893 pidx);
2894 goto err_free_dev;
2895 }
2896 }
2897
84f67018
HS
2898 /* See what interrupts we'll be using. If we've been configured to
2899 * use MSI-X interrupts, try to enable them but fall back to using
2900 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2901 * get MSI interrupts we bail with the error.
2902 */
2903 if (msi == MSI_MSIX && enable_msix(adapter) == 0)
2904 adapter->flags |= USING_MSIX;
2905 else {
495c22bb
HS
2906 if (msi == MSI_MSIX) {
2907 dev_info(adapter->pdev_dev,
2908 "Unable to use MSI-X Interrupts; falling "
2909 "back to MSI Interrupts\n");
2910
2911 /* We're going to need a Forwarded Interrupt Queue so
2912 * that may cut into how many Queue Sets we can
2913 * support.
2914 */
2915 msi = MSI_MSI;
2916 size_nports_qsets(adapter);
2917 }
84f67018
HS
2918 err = pci_enable_msi(pdev);
2919 if (err) {
495c22bb
HS
2920 dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
2921 " err=%d\n", err);
84f67018
HS
2922 goto err_free_dev;
2923 }
2924 adapter->flags |= USING_MSI;
2925 }
2926
495c22bb
HS
2927 /* Now that we know how many "ports" we have and what interrupt
2928 * mechanism we're going to use, we can configure our queue resources.
2929 */
2930 cfg_queues(adapter);
2931
be839e39
CL
2932 /*
2933 * The "card" is now ready to go. If any errors occur during device
2934 * registration we do not fail the whole "card" but rather proceed
2935 * only with the ports we manage to register successfully. However we
2936 * must register at least one net device.
2937 */
2938 for_each_port(adapter, pidx) {
a8d16d08 2939 struct port_info *pi = netdev_priv(adapter->port[pidx]);
be839e39
CL
2940 netdev = adapter->port[pidx];
2941 if (netdev == NULL)
2942 continue;
2943
a8d16d08
HS
2944 netif_set_real_num_tx_queues(netdev, pi->nqsets);
2945 netif_set_real_num_rx_queues(netdev, pi->nqsets);
2946
be839e39
CL
2947 err = register_netdev(netdev);
2948 if (err) {
2949 dev_warn(&pdev->dev, "cannot register net device %s,"
2950 " skipping\n", netdev->name);
2951 continue;
2952 }
2953
2954 set_bit(pidx, &adapter->registered_device_map);
2955 }
2956 if (adapter->registered_device_map == 0) {
2957 dev_err(&pdev->dev, "could not register any net devices\n");
84f67018 2958 goto err_disable_interrupts;
be839e39
CL
2959 }
2960
2961 /*
2962 * Set up our debugfs entries.
2963 */
843635e0 2964 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
be839e39
CL
2965 adapter->debugfs_root =
2966 debugfs_create_dir(pci_name(pdev),
2967 cxgb4vf_debugfs_root);
843635e0 2968 if (IS_ERR_OR_NULL(adapter->debugfs_root))
be839e39
CL
2969 dev_warn(&pdev->dev, "could not create debugfs"
2970 " directory");
2971 else
2972 setup_debugfs(adapter);
2973 }
2974
be839e39 2975 /*
25985edc 2976 * Print a short notice on the existence and configuration of the new
be839e39
CL
2977 * VF network device ...
2978 */
2979 for_each_port(adapter, pidx) {
2980 dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
2981 adapter->port[pidx]->name,
2982 (adapter->flags & USING_MSIX) ? "MSI-X" :
2983 (adapter->flags & USING_MSI) ? "MSI" : "");
2984 }
2985
2986 /*
2987 * Return success!
2988 */
2989 return 0;
2990
2991 /*
2992 * Error recovery and exit code. Unwind state that's been created
2993 * so far and return the error.
2994 */
84f67018
HS
2995err_disable_interrupts:
2996 if (adapter->flags & USING_MSIX) {
2997 pci_disable_msix(adapter->pdev);
2998 adapter->flags &= ~USING_MSIX;
2999 } else if (adapter->flags & USING_MSI) {
3000 pci_disable_msi(adapter->pdev);
3001 adapter->flags &= ~USING_MSI;
be839e39
CL
3002 }
3003
3004err_free_dev:
3005 for_each_port(adapter, pidx) {
3006 netdev = adapter->port[pidx];
3007 if (netdev == NULL)
3008 continue;
3009 pi = netdev_priv(netdev);
3010 t4vf_free_vi(adapter, pi->viid);
3011 if (test_bit(pidx, &adapter->registered_device_map))
3012 unregister_netdev(netdev);
3013 free_netdev(netdev);
3014 }
3015
3016err_unmap_bar:
e0a8b34a
HS
3017 if (!is_t4(adapter->params.chip))
3018 iounmap(adapter->bar2);
3019
3020err_unmap_bar0:
be839e39
CL
3021 iounmap(adapter->regs);
3022
3023err_free_adapter:
ae7b7576 3024 kfree(adapter->mbox_log);
be839e39 3025 kfree(adapter);
be839e39 3026
be839e39
CL
3027err_release_regions:
3028 pci_release_regions(pdev);
7a0c2029
KV
3029 pci_clear_master(pdev);
3030
3031err_disable_device:
3032 pci_disable_device(pdev);
be839e39 3033
be839e39
CL
3034 return err;
3035}
3036
3037/*
3038 * "Remove" a device: tear down all kernel and driver state created in the
3039 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
3040 * that this is called "remove_one" in the PF Driver.)
3041 */
d289f864 3042static void cxgb4vf_pci_remove(struct pci_dev *pdev)
be839e39
CL
3043{
3044 struct adapter *adapter = pci_get_drvdata(pdev);
3045
3046 /*
3047 * Tear down driver state associated with device.
3048 */
3049 if (adapter) {
3050 int pidx;
3051
3052 /*
3053 * Stop all of our activity. Unregister network port,
3054 * disable interrupts, etc.
3055 */
3056 for_each_port(adapter, pidx)
3057 if (test_bit(pidx, &adapter->registered_device_map))
3058 unregister_netdev(adapter->port[pidx]);
3059 t4vf_sge_stop(adapter);
3060 if (adapter->flags & USING_MSIX) {
3061 pci_disable_msix(adapter->pdev);
3062 adapter->flags &= ~USING_MSIX;
3063 } else if (adapter->flags & USING_MSI) {
3064 pci_disable_msi(adapter->pdev);
3065 adapter->flags &= ~USING_MSI;
3066 }
3067
3068 /*
3069 * Tear down our debugfs entries.
3070 */
843635e0 3071 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
be839e39
CL
3072 cleanup_debugfs(adapter);
3073 debugfs_remove_recursive(adapter->debugfs_root);
3074 }
3075
3076 /*
3077 * Free all of the various resources which we've acquired ...
3078 */
3079 t4vf_free_sge_resources(adapter);
3080 for_each_port(adapter, pidx) {
3081 struct net_device *netdev = adapter->port[pidx];
3082 struct port_info *pi;
3083
3084 if (netdev == NULL)
3085 continue;
3086
3087 pi = netdev_priv(netdev);
3088 t4vf_free_vi(adapter, pi->viid);
3089 free_netdev(netdev);
3090 }
3091 iounmap(adapter->regs);
e0a8b34a
HS
3092 if (!is_t4(adapter->params.chip))
3093 iounmap(adapter->bar2);
ae7b7576 3094 kfree(adapter->mbox_log);
be839e39 3095 kfree(adapter);
be839e39
CL
3096 }
3097
3098 /*
3099 * Disable the device and release its PCI resources.
3100 */
3101 pci_disable_device(pdev);
3102 pci_clear_master(pdev);
3103 pci_release_regions(pdev);
3104}
3105
7e9c2629
CL
3106/*
3107 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3108 * delivery.
3109 */
d289f864 3110static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
7e9c2629
CL
3111{
3112 struct adapter *adapter;
3113 int pidx;
3114
3115 adapter = pci_get_drvdata(pdev);
3116 if (!adapter)
3117 return;
3118
c2a19856 3119 /* Disable all Virtual Interfaces. This will shut down the
7e9c2629
CL
3120 * delivery of all ingress packets into the chip for these
3121 * Virtual Interfaces.
3122 */
c2a19856
HS
3123 for_each_port(adapter, pidx)
3124 if (test_bit(pidx, &adapter->registered_device_map))
3125 unregister_netdev(adapter->port[pidx]);
7e9c2629 3126
c2a19856
HS
3127 /* Free up all Queues which will prevent further DMA and
3128 * Interrupts allowing various internal pathways to drain.
3129 */
3130 t4vf_sge_stop(adapter);
3131 if (adapter->flags & USING_MSIX) {
3132 pci_disable_msix(adapter->pdev);
3133 adapter->flags &= ~USING_MSIX;
3134 } else if (adapter->flags & USING_MSI) {
3135 pci_disable_msi(adapter->pdev);
3136 adapter->flags &= ~USING_MSI;
7e9c2629
CL
3137 }
3138
3139 /*
3140 * Free up all Queues which will prevent further DMA and
3141 * Interrupts allowing various internal pathways to drain.
3142 */
3143 t4vf_free_sge_resources(adapter);
c2a19856 3144 pci_set_drvdata(pdev, NULL);
7e9c2629
CL
3145}
3146
3fedeab1
HS
3147/* Macros needed to support the PCI Device ID Table ...
3148 */
3149#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
768ffc66 3150 static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3fedeab1
HS
3151#define CH_PCI_DEVICE_ID_FUNCTION 0x8
3152
3153#define CH_PCI_ID_TABLE_ENTRY(devid) \
3154 { PCI_VDEVICE(CHELSIO, (devid)), 0 }
3155
3156#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3157
3158#include "../cxgb4/t4_pci_id_tbl.h"
be839e39
CL
3159
3160MODULE_DESCRIPTION(DRV_DESC);
3161MODULE_AUTHOR("Chelsio Communications");
3162MODULE_LICENSE("Dual BSD/GPL");
3163MODULE_VERSION(DRV_VERSION);
3164MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3165
3166static struct pci_driver cxgb4vf_driver = {
3167 .name = KBUILD_MODNAME,
3168 .id_table = cxgb4vf_pci_tbl,
3169 .probe = cxgb4vf_pci_probe,
d289f864
BP
3170 .remove = cxgb4vf_pci_remove,
3171 .shutdown = cxgb4vf_pci_shutdown,
be839e39
CL
3172};
3173
3174/*
3175 * Initialize global driver state.
3176 */
3177static int __init cxgb4vf_module_init(void)
3178{
3179 int ret;
3180
bb14a1af
CL
3181 /*
3182 * Vet our module parameters.
3183 */
3184 if (msi != MSI_MSIX && msi != MSI_MSI) {
428ac43f
JP
3185 pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3186 msi, MSI_MSIX, MSI_MSI);
bb14a1af
CL
3187 return -EINVAL;
3188 }
3189
be839e39
CL
3190 /* Debugfs support is optional, just warn if this fails */
3191 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
843635e0 3192 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
428ac43f 3193 pr_warn("could not create debugfs entry, continuing\n");
be839e39
CL
3194
3195 ret = pci_register_driver(&cxgb4vf_driver);
843635e0 3196 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
be839e39
CL
3197 debugfs_remove(cxgb4vf_debugfs_root);
3198 return ret;
3199}
3200
3201/*
3202 * Tear down global driver state.
3203 */
3204static void __exit cxgb4vf_module_exit(void)
3205{
3206 pci_unregister_driver(&cxgb4vf_driver);
3207 debugfs_remove(cxgb4vf_debugfs_root);
3208}
3209
3210module_init(cxgb4vf_module_init);
3211module_exit(cxgb4vf_module_exit);