]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
cxgb4: Add support to enable logging of firmware mailbox commands
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / chelsio / cxgb4vf / cxgb4vf_main.c
CommitLineData
be839e39
CL
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
428ac43f
JP
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
be839e39
CL
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/init.h>
41#include <linux/pci.h>
42#include <linux/dma-mapping.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/debugfs.h>
46#include <linux/ethtool.h>
5ad24def 47#include <linux/mdio.h>
be839e39
CL
48
49#include "t4vf_common.h"
50#include "t4vf_defs.h"
51
52#include "../cxgb4/t4_regs.h"
53#include "../cxgb4/t4_msg.h"
54
55/*
56 * Generic information about the driver.
57 */
622c62b5 58#define DRV_VERSION "2.0.0-ko"
52a5f846 59#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
be839e39
CL
60
61/*
62 * Module Parameters.
63 * ==================
64 */
65
66/*
67 * Default ethtool "message level" for adapters.
68 */
69#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
72
73static int dflt_msg_enable = DFLT_MSG_ENABLE;
74
75module_param(dflt_msg_enable, int, 0644);
76MODULE_PARM_DESC(dflt_msg_enable,
8a21ec4e
HS
77 "default adapter ethtool message level bitmap, "
78 "deprecated parameter");
be839e39
CL
79
80/*
81 * The driver uses the best interrupt scheme available on a platform in the
82 * order MSI-X then MSI. This parameter determines which of these schemes the
83 * driver may consider as follows:
84 *
85 * msi = 2: choose from among MSI-X and MSI
86 * msi = 1: only consider MSI interrupts
87 *
88 * Note that unlike the Physical Function driver, this Virtual Function driver
89 * does _not_ support legacy INTx interrupts (this limitation is mandated by
90 * the PCI-E SR-IOV standard).
91 */
92#define MSI_MSIX 2
93#define MSI_MSI 1
94#define MSI_DEFAULT MSI_MSIX
95
96static int msi = MSI_DEFAULT;
97
98module_param(msi, int, 0644);
99MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
100
101/*
102 * Fundamental constants.
103 * ======================
104 */
105
106enum {
107 MAX_TXQ_ENTRIES = 16384,
108 MAX_RSPQ_ENTRIES = 16384,
109 MAX_RX_BUFFERS = 16384,
110
111 MIN_TXQ_ENTRIES = 32,
112 MIN_RSPQ_ENTRIES = 128,
113 MIN_FL_ENTRIES = 16,
114
115 /*
116 * For purposes of manipulating the Free List size we need to
117 * recognize that Free Lists are actually Egress Queues (the host
118 * produces free buffers which the hardware consumes), Egress Queues
119 * indices are all in units of Egress Context Units bytes, and free
120 * list entries are 64-bit PCI DMA addresses. And since the state of
121 * the Producer Index == the Consumer Index implies an EMPTY list, we
122 * always have at least one Egress Unit's worth of Free List entries
123 * unused. See sge.c for more details ...
124 */
125 EQ_UNIT = SGE_EQ_IDXSIZE,
126 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
127 MIN_FL_RESID = FL_PER_EQ_UNIT,
128};
129
130/*
131 * Global driver state.
132 * ====================
133 */
134
135static struct dentry *cxgb4vf_debugfs_root;
136
137/*
138 * OS "Callback" functions.
139 * ========================
140 */
141
142/*
143 * The link status has changed on the indicated "port" (Virtual Interface).
144 */
145void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
146{
147 struct net_device *dev = adapter->port[pidx];
148
149 /*
150 * If the port is disabled or the current recorded "link up"
151 * status matches the new status, just return.
152 */
153 if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
154 return;
155
156 /*
157 * Tell the OS that the link status has changed and print a short
158 * informative message on the console about the event.
159 */
160 if (link_ok) {
161 const char *s;
162 const char *fc;
163 const struct port_info *pi = netdev_priv(dev);
164
165 netif_carrier_on(dev);
166
167 switch (pi->link_cfg.speed) {
897d55df
HS
168 case 40000:
169 s = "40Gbps";
170 break;
171
172 case 10000:
be839e39
CL
173 s = "10Gbps";
174 break;
175
897d55df 176 case 1000:
be839e39
CL
177 s = "1000Mbps";
178 break;
179
897d55df 180 case 100:
be839e39
CL
181 s = "100Mbps";
182 break;
183
184 default:
185 s = "unknown";
186 break;
187 }
188
189 switch (pi->link_cfg.fc) {
190 case PAUSE_RX:
191 fc = "RX";
192 break;
193
194 case PAUSE_TX:
195 fc = "TX";
196 break;
197
198 case PAUSE_RX|PAUSE_TX:
199 fc = "RX/TX";
200 break;
201
202 default:
203 fc = "no";
204 break;
205 }
206
428ac43f 207 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
be839e39
CL
208 } else {
209 netif_carrier_off(dev);
428ac43f 210 netdev_info(dev, "link down\n");
be839e39
CL
211 }
212}
213
5ad24def
HS
214/*
215 * THe port module type has changed on the indicated "port" (Virtual
216 * Interface).
217 */
218void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
219{
220 static const char * const mod_str[] = {
221 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
222 };
223 const struct net_device *dev = adapter->port[pidx];
224 const struct port_info *pi = netdev_priv(dev);
225
226 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
227 dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
228 dev->name);
229 else if (pi->mod_type < ARRAY_SIZE(mod_str))
230 dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
231 dev->name, mod_str[pi->mod_type]);
232 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
233 dev_info(adapter->pdev_dev, "%s: unsupported optical port "
234 "module inserted\n", dev->name);
235 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
236 dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
237 "forcing TWINAX\n", dev->name);
238 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
239 dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
240 dev->name);
241 else
242 dev_info(adapter->pdev_dev, "%s: unknown module type %d "
243 "inserted\n", dev->name, pi->mod_type);
244}
245
be839e39
CL
246/*
247 * Net device operations.
248 * ======================
249 */
250
be839e39 251
87737663 252
be839e39
CL
253
254/*
255 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
256 * Interface).
257 */
258static int link_start(struct net_device *dev)
259{
260 int ret;
261 struct port_info *pi = netdev_priv(dev);
262
263 /*
264 * We do not set address filters and promiscuity here, the stack does
87737663 265 * that step explicitly. Enable vlan accel.
be839e39 266 */
87737663 267 ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
be839e39
CL
268 true);
269 if (ret == 0) {
270 ret = t4vf_change_mac(pi->adapter, pi->viid,
271 pi->xact_addr_filt, dev->dev_addr, true);
272 if (ret >= 0) {
273 pi->xact_addr_filt = ret;
274 ret = 0;
275 }
276 }
277
278 /*
279 * We don't need to actually "start the link" itself since the
280 * firmware will do that for us when the first Virtual Interface
281 * is enabled on a port.
282 */
283 if (ret == 0)
284 ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
285 return ret;
286}
287
288/*
289 * Name the MSI-X interrupts.
290 */
291static void name_msix_vecs(struct adapter *adapter)
292{
293 int namelen = sizeof(adapter->msix_info[0].desc) - 1;
294 int pidx;
295
296 /*
297 * Firmware events.
298 */
299 snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
300 "%s-FWeventq", adapter->name);
301 adapter->msix_info[MSIX_FW].desc[namelen] = 0;
302
303 /*
304 * Ethernet queues.
305 */
306 for_each_port(adapter, pidx) {
307 struct net_device *dev = adapter->port[pidx];
308 const struct port_info *pi = netdev_priv(dev);
309 int qs, msi;
310
caedda35 311 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
be839e39
CL
312 snprintf(adapter->msix_info[msi].desc, namelen,
313 "%s-%d", dev->name, qs);
314 adapter->msix_info[msi].desc[namelen] = 0;
315 }
316 }
317}
318
319/*
320 * Request all of our MSI-X resources.
321 */
322static int request_msix_queue_irqs(struct adapter *adapter)
323{
324 struct sge *s = &adapter->sge;
325 int rxq, msi, err;
326
327 /*
328 * Firmware events.
329 */
330 err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
331 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
332 if (err)
333 return err;
334
335 /*
336 * Ethernet queues.
337 */
caedda35 338 msi = MSIX_IQFLINT;
be839e39
CL
339 for_each_ethrxq(s, rxq) {
340 err = request_irq(adapter->msix_info[msi].vec,
341 t4vf_sge_intr_msix, 0,
342 adapter->msix_info[msi].desc,
343 &s->ethrxq[rxq].rspq);
344 if (err)
345 goto err_free_irqs;
346 msi++;
347 }
348 return 0;
349
350err_free_irqs:
351 while (--rxq >= 0)
352 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
353 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
354 return err;
355}
356
357/*
358 * Free our MSI-X resources.
359 */
360static void free_msix_queue_irqs(struct adapter *adapter)
361{
362 struct sge *s = &adapter->sge;
363 int rxq, msi;
364
365 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
caedda35 366 msi = MSIX_IQFLINT;
be839e39
CL
367 for_each_ethrxq(s, rxq)
368 free_irq(adapter->msix_info[msi++].vec,
369 &s->ethrxq[rxq].rspq);
370}
371
372/*
373 * Turn on NAPI and start up interrupts on a response queue.
374 */
375static void qenable(struct sge_rspq *rspq)
376{
377 napi_enable(&rspq->napi);
378
379 /*
380 * 0-increment the Going To Sleep register to start the timer and
381 * enable interrupts.
382 */
383 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
f612b815
HS
384 CIDXINC_V(0) |
385 SEINTARM_V(rspq->intr_params) |
386 INGRESSQID_V(rspq->cntxt_id));
be839e39
CL
387}
388
389/*
390 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
391 */
392static void enable_rx(struct adapter *adapter)
393{
394 int rxq;
395 struct sge *s = &adapter->sge;
396
397 for_each_ethrxq(s, rxq)
398 qenable(&s->ethrxq[rxq].rspq);
399 qenable(&s->fw_evtq);
400
401 /*
402 * The interrupt queue doesn't use NAPI so we do the 0-increment of
403 * its Going To Sleep register here to get it started.
404 */
405 if (adapter->flags & USING_MSI)
406 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
f612b815
HS
407 CIDXINC_V(0) |
408 SEINTARM_V(s->intrq.intr_params) |
409 INGRESSQID_V(s->intrq.cntxt_id));
be839e39
CL
410
411}
412
413/*
414 * Wait until all NAPI handlers are descheduled.
415 */
416static void quiesce_rx(struct adapter *adapter)
417{
418 struct sge *s = &adapter->sge;
419 int rxq;
420
421 for_each_ethrxq(s, rxq)
422 napi_disable(&s->ethrxq[rxq].rspq.napi);
423 napi_disable(&s->fw_evtq.napi);
424}
425
426/*
427 * Response queue handler for the firmware event queue.
428 */
429static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
430 const struct pkt_gl *gl)
431{
432 /*
433 * Extract response opcode and get pointer to CPL message body.
434 */
435 struct adapter *adapter = rspq->adapter;
436 u8 opcode = ((const struct rss_header *)rsp)->opcode;
437 void *cpl = (void *)(rsp + 1);
438
439 switch (opcode) {
440 case CPL_FW6_MSG: {
441 /*
442 * We've received an asynchronous message from the firmware.
443 */
444 const struct cpl_fw6_msg *fw_msg = cpl;
445 if (fw_msg->type == FW6_TYPE_CMD_RPL)
446 t4vf_handle_fw_rpl(adapter, fw_msg->data);
447 break;
448 }
449
94dace10
VP
450 case CPL_FW4_MSG: {
451 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
452 */
453 const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
6c53e938 454 opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
94dace10
VP
455 if (opcode != CPL_SGE_EGR_UPDATE) {
456 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
457 , opcode);
458 break;
459 }
460 cpl = (void *)p;
461 /*FALLTHROUGH*/
462 }
463
be839e39
CL
464 case CPL_SGE_EGR_UPDATE: {
465 /*
7f9dd2fa
CL
466 * We've received an Egress Queue Status Update message. We
467 * get these, if the SGE is configured to send these when the
468 * firmware passes certain points in processing our TX
469 * Ethernet Queue or if we make an explicit request for one.
470 * We use these updates to determine when we may need to
471 * restart a TX Ethernet Queue which was stopped for lack of
472 * free TX Queue Descriptors ...
be839e39 473 */
64699336 474 const struct cpl_sge_egr_update *p = cpl;
bdc590b9 475 unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
be839e39
CL
476 struct sge *s = &adapter->sge;
477 struct sge_txq *tq;
478 struct sge_eth_txq *txq;
479 unsigned int eq_idx;
be839e39
CL
480
481 /*
482 * Perform sanity checking on the Queue ID to make sure it
483 * really refers to one of our TX Ethernet Egress Queues which
484 * is active and matches the queue's ID. None of these error
485 * conditions should ever happen so we may want to either make
486 * them fatal and/or conditionalized under DEBUG.
487 */
488 eq_idx = EQ_IDX(s, qid);
489 if (unlikely(eq_idx >= MAX_EGRQ)) {
490 dev_err(adapter->pdev_dev,
491 "Egress Update QID %d out of range\n", qid);
492 break;
493 }
494 tq = s->egr_map[eq_idx];
495 if (unlikely(tq == NULL)) {
496 dev_err(adapter->pdev_dev,
497 "Egress Update QID %d TXQ=NULL\n", qid);
498 break;
499 }
500 txq = container_of(tq, struct sge_eth_txq, q);
501 if (unlikely(tq->abs_id != qid)) {
502 dev_err(adapter->pdev_dev,
503 "Egress Update QID %d refers to TXQ %d\n",
504 qid, tq->abs_id);
505 break;
506 }
507
be839e39
CL
508 /*
509 * Restart a stopped TX Queue which has less than half of its
510 * TX ring in use ...
511 */
512 txq->q.restarts++;
513 netif_tx_wake_queue(txq->txq);
514 break;
515 }
516
517 default:
518 dev_err(adapter->pdev_dev,
519 "unexpected CPL %#x on FW event queue\n", opcode);
520 }
521
522 return 0;
523}
524
525/*
526 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
527 * to use and initializes them. We support multiple "Queue Sets" per port if
528 * we have MSI-X, otherwise just one queue set per port.
529 */
530static int setup_sge_queues(struct adapter *adapter)
531{
532 struct sge *s = &adapter->sge;
533 int err, pidx, msix;
534
535 /*
536 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
537 * state.
538 */
539 bitmap_zero(s->starving_fl, MAX_EGRQ);
540
541 /*
542 * If we're using MSI interrupt mode we need to set up a "forwarded
543 * interrupt" queue which we'll set up with our MSI vector. The rest
544 * of the ingress queues will be set up to forward their interrupts to
545 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
546 * the intrq's queue ID as the interrupt forwarding queue for the
547 * subsequent calls ...
548 */
549 if (adapter->flags & USING_MSI) {
550 err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
551 adapter->port[0], 0, NULL, NULL);
552 if (err)
553 goto err_free_queues;
554 }
555
556 /*
557 * Allocate our ingress queue for asynchronous firmware messages.
558 */
559 err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
560 MSIX_FW, NULL, fwevtq_handler);
561 if (err)
562 goto err_free_queues;
563
564 /*
565 * Allocate each "port"'s initial Queue Sets. These can be changed
566 * later on ... up to the point where any interface on the adapter is
567 * brought up at which point lots of things get nailed down
568 * permanently ...
569 */
caedda35 570 msix = MSIX_IQFLINT;
be839e39
CL
571 for_each_port(adapter, pidx) {
572 struct net_device *dev = adapter->port[pidx];
573 struct port_info *pi = netdev_priv(dev);
574 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
575 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
be839e39
CL
576 int qs;
577
c8639a82 578 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
be839e39
CL
579 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
580 dev, msix++,
581 &rxq->fl, t4vf_ethrx_handler);
582 if (err)
583 goto err_free_queues;
584
585 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
586 netdev_get_tx_queue(dev, qs),
587 s->fw_evtq.cntxt_id);
588 if (err)
589 goto err_free_queues;
590
591 rxq->rspq.idx = qs;
592 memset(&rxq->stats, 0, sizeof(rxq->stats));
593 }
594 }
595
596 /*
597 * Create the reverse mappings for the queues.
598 */
599 s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
600 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
601 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
602 for_each_port(adapter, pidx) {
603 struct net_device *dev = adapter->port[pidx];
604 struct port_info *pi = netdev_priv(dev);
605 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
606 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
be839e39
CL
607 int qs;
608
c8639a82 609 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
be839e39
CL
610 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
611 EQ_MAP(s, txq->q.abs_id) = &txq->q;
612
613 /*
614 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
615 * for Free Lists but since all of the Egress Queues
616 * (including Free Lists) have Relative Queue IDs
617 * which are computed as Absolute - Base Queue ID, we
618 * can synthesize the Absolute Queue IDs for the Free
619 * Lists. This is useful for debugging purposes when
620 * we want to dump Queue Contexts via the PF Driver.
621 */
622 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
623 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
624 }
625 }
626 return 0;
627
628err_free_queues:
629 t4vf_free_sge_resources(adapter);
630 return err;
631}
632
633/*
634 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
635 * queues. We configure the RSS CPU lookup table to distribute to the number
636 * of HW receive queues, and the response queue lookup table to narrow that
637 * down to the response queues actually configured for each "port" (Virtual
638 * Interface). We always configure the RSS mapping for all ports since the
639 * mapping table has plenty of entries.
640 */
641static int setup_rss(struct adapter *adapter)
642{
643 int pidx;
644
645 for_each_port(adapter, pidx) {
646 struct port_info *pi = adap2pinfo(adapter, pidx);
647 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
648 u16 rss[MAX_PORT_QSETS];
649 int qs, err;
650
651 for (qs = 0; qs < pi->nqsets; qs++)
652 rss[qs] = rxq[qs].rspq.abs_id;
653
654 err = t4vf_config_rss_range(adapter, pi->viid,
655 0, pi->rss_size, rss, pi->nqsets);
656 if (err)
657 return err;
658
659 /*
660 * Perform Global RSS Mode-specific initialization.
661 */
662 switch (adapter->params.rss.mode) {
663 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
664 /*
665 * If Tunnel All Lookup isn't specified in the global
666 * RSS Configuration, then we need to specify a
667 * default Ingress Queue for any ingress packets which
668 * aren't hashed. We'll use our first ingress queue
669 * ...
670 */
671 if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
672 union rss_vi_config config;
673 err = t4vf_read_rss_vi_config(adapter,
674 pi->viid,
675 &config);
676 if (err)
677 return err;
678 config.basicvirtual.defaultq =
679 rxq[0].rspq.abs_id;
680 err = t4vf_write_rss_vi_config(adapter,
681 pi->viid,
682 &config);
683 if (err)
684 return err;
685 }
686 break;
687 }
688 }
689
690 return 0;
691}
692
693/*
694 * Bring the adapter up. Called whenever we go from no "ports" open to having
695 * one open. This function performs the actions necessary to make an adapter
696 * operational, such as completing the initialization of HW modules, and
697 * enabling interrupts. Must be called with the rtnl lock held. (Note that
698 * this is called "cxgb_up" in the PF Driver.)
699 */
700static int adapter_up(struct adapter *adapter)
701{
702 int err;
703
704 /*
705 * If this is the first time we've been called, perform basic
706 * adapter setup. Once we've done this, many of our adapter
707 * parameters can no longer be changed ...
708 */
709 if ((adapter->flags & FULL_INIT_DONE) == 0) {
710 err = setup_sge_queues(adapter);
711 if (err)
712 return err;
713 err = setup_rss(adapter);
714 if (err) {
715 t4vf_free_sge_resources(adapter);
716 return err;
717 }
718
719 if (adapter->flags & USING_MSIX)
720 name_msix_vecs(adapter);
721 adapter->flags |= FULL_INIT_DONE;
722 }
723
724 /*
725 * Acquire our interrupt resources. We only support MSI-X and MSI.
726 */
727 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
728 if (adapter->flags & USING_MSIX)
729 err = request_msix_queue_irqs(adapter);
730 else
731 err = request_irq(adapter->pdev->irq,
732 t4vf_intr_handler(adapter), 0,
733 adapter->name, adapter);
734 if (err) {
735 dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
736 err);
737 return err;
738 }
739
740 /*
741 * Enable NAPI ingress processing and return success.
742 */
743 enable_rx(adapter);
744 t4vf_sge_start(adapter);
fe5d2709
HS
745
746 /* Initialize hash mac addr list*/
747 INIT_LIST_HEAD(&adapter->mac_hlist);
be839e39
CL
748 return 0;
749}
750
751/*
752 * Bring the adapter down. Called whenever the last "port" (Virtual
753 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
754 * Driver.)
755 */
756static void adapter_down(struct adapter *adapter)
757{
758 /*
759 * Free interrupt resources.
760 */
761 if (adapter->flags & USING_MSIX)
762 free_msix_queue_irqs(adapter);
763 else
764 free_irq(adapter->pdev->irq, adapter);
765
766 /*
767 * Wait for NAPI handlers to finish.
768 */
769 quiesce_rx(adapter);
770}
771
772/*
773 * Start up a net device.
774 */
775static int cxgb4vf_open(struct net_device *dev)
776{
777 int err;
778 struct port_info *pi = netdev_priv(dev);
779 struct adapter *adapter = pi->adapter;
780
781 /*
782 * If this is the first interface that we're opening on the "adapter",
783 * bring the "adapter" up now.
784 */
785 if (adapter->open_device_map == 0) {
786 err = adapter_up(adapter);
787 if (err)
788 return err;
789 }
790
791 /*
792 * Note that this interface is up and start everything up ...
793 */
e7a3795f
CL
794 err = link_start(dev);
795 if (err)
343a8d13
CL
796 goto err_unwind;
797
be839e39 798 netif_tx_start_all_queues(dev);
343a8d13 799 set_bit(pi->port_id, &adapter->open_device_map);
be839e39 800 return 0;
343a8d13
CL
801
802err_unwind:
803 if (adapter->open_device_map == 0)
804 adapter_down(adapter);
805 return err;
be839e39
CL
806}
807
808/*
809 * Shut down a net device. This routine is called "cxgb_close" in the PF
810 * Driver ...
811 */
812static int cxgb4vf_stop(struct net_device *dev)
813{
be839e39
CL
814 struct port_info *pi = netdev_priv(dev);
815 struct adapter *adapter = pi->adapter;
816
817 netif_tx_stop_all_queues(dev);
818 netif_carrier_off(dev);
343a8d13 819 t4vf_enable_vi(adapter, pi->viid, false, false);
be839e39
CL
820 pi->link_cfg.link_ok = 0;
821
822 clear_bit(pi->port_id, &adapter->open_device_map);
823 if (adapter->open_device_map == 0)
824 adapter_down(adapter);
825 return 0;
826}
827
828/*
829 * Translate our basic statistics into the standard "ifconfig" statistics.
830 */
831static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
832{
833 struct t4vf_port_stats stats;
834 struct port_info *pi = netdev2pinfo(dev);
835 struct adapter *adapter = pi->adapter;
836 struct net_device_stats *ns = &dev->stats;
837 int err;
838
839 spin_lock(&adapter->stats_lock);
840 err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
841 spin_unlock(&adapter->stats_lock);
842
843 memset(ns, 0, sizeof(*ns));
844 if (err)
845 return ns;
846
847 ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
848 stats.tx_ucast_bytes + stats.tx_offload_bytes);
849 ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
850 stats.tx_ucast_frames + stats.tx_offload_frames);
851 ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
852 stats.rx_ucast_bytes);
853 ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
854 stats.rx_ucast_frames);
855 ns->multicast = stats.rx_mcast_frames;
856 ns->tx_errors = stats.tx_drop_frames;
857 ns->rx_errors = stats.rx_err_frames;
858
859 return ns;
860}
861
fe5d2709
HS
862static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
863{
864 struct adapter *adapter = pi->adapter;
865 u64 vec = 0;
866 bool ucast = false;
867 struct hash_mac_addr *entry;
868
869 /* Calculate the hash vector for the updated list and program it */
870 list_for_each_entry(entry, &adapter->mac_hlist, list) {
871 ucast |= is_unicast_ether_addr(entry->addr);
872 vec |= (1ULL << hash_mac_addr(entry->addr));
873 }
874 return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
875}
876
877static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
be839e39 878{
fe5d2709
HS
879 struct port_info *pi = netdev_priv(netdev);
880 struct adapter *adapter = pi->adapter;
881 int ret;
be839e39
CL
882 u64 mhash = 0;
883 u64 uhash = 0;
fe5d2709
HS
884 bool free = false;
885 bool ucast = is_unicast_ether_addr(mac_addr);
886 const u8 *maclist[1] = {mac_addr};
887 struct hash_mac_addr *new_entry;
888
889 ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
890 NULL, ucast ? &uhash : &mhash, false);
891 if (ret < 0)
892 goto out;
893 /* if hash != 0, then add the addr to hash addr list
894 * so on the end we will calculate the hash for the
895 * list and program it
896 */
897 if (uhash || mhash) {
898 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
899 if (!new_entry)
900 return -ENOMEM;
901 ether_addr_copy(new_entry->addr, mac_addr);
902 list_add_tail(&new_entry->list, &adapter->mac_hlist);
903 ret = cxgb4vf_set_addr_hash(pi);
be839e39 904 }
fe5d2709
HS
905out:
906 return ret < 0 ? ret : 0;
907}
be839e39 908
fe5d2709
HS
909static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
910{
911 struct port_info *pi = netdev_priv(netdev);
912 struct adapter *adapter = pi->adapter;
913 int ret;
914 const u8 *maclist[1] = {mac_addr};
915 struct hash_mac_addr *entry, *tmp;
42eb59d3 916
fe5d2709
HS
917 /* If the MAC address to be removed is in the hash addr
918 * list, delete it from the list and update hash vector
919 */
920 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
921 if (ether_addr_equal(entry->addr, mac_addr)) {
922 list_del(&entry->list);
923 kfree(entry);
924 return cxgb4vf_set_addr_hash(pi);
925 }
be839e39
CL
926 }
927
fe5d2709
HS
928 ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
929 return ret < 0 ? -EINVAL : 0;
be839e39
CL
930}
931
932/*
933 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
934 * If @mtu is -1 it is left unchanged.
935 */
936static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
937{
be839e39
CL
938 struct port_info *pi = netdev_priv(dev);
939
fe5d2709
HS
940 if (!(dev->flags & IFF_PROMISC)) {
941 __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
942 if (!(dev->flags & IFF_ALLMULTI))
943 __dev_mc_sync(dev, cxgb4vf_mac_sync,
944 cxgb4vf_mac_unsync);
945 }
946 return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
947 (dev->flags & IFF_PROMISC) != 0,
948 (dev->flags & IFF_ALLMULTI) != 0,
949 1, -1, sleep_ok);
be839e39
CL
950}
951
952/*
953 * Set the current receive modes on the device.
954 */
955static void cxgb4vf_set_rxmode(struct net_device *dev)
956{
957 /* unfortunately we can't return errors to the stack */
958 set_rxmode(dev, -1, false);
959}
960
961/*
962 * Find the entry in the interrupt holdoff timer value array which comes
963 * closest to the specified interrupt holdoff value.
964 */
965static int closest_timer(const struct sge *s, int us)
966{
967 int i, timer_idx = 0, min_delta = INT_MAX;
968
969 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
970 int delta = us - s->timer_val[i];
971 if (delta < 0)
972 delta = -delta;
973 if (delta < min_delta) {
974 min_delta = delta;
975 timer_idx = i;
976 }
977 }
978 return timer_idx;
979}
980
981static int closest_thres(const struct sge *s, int thres)
982{
983 int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
984
985 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
986 delta = thres - s->counter_val[i];
987 if (delta < 0)
988 delta = -delta;
989 if (delta < min_delta) {
990 min_delta = delta;
991 pktcnt_idx = i;
992 }
993 }
994 return pktcnt_idx;
995}
996
997/*
998 * Return a queue's interrupt hold-off time in us. 0 means no timer.
999 */
1000static unsigned int qtimer_val(const struct adapter *adapter,
1001 const struct sge_rspq *rspq)
1002{
1ecc7b7a 1003 unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
be839e39
CL
1004
1005 return timer_idx < SGE_NTIMERS
1006 ? adapter->sge.timer_val[timer_idx]
1007 : 0;
1008}
1009
1010/**
1011 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1012 * @adapter: the adapter
1013 * @rspq: the RX response queue
1014 * @us: the hold-off time in us, or 0 to disable timer
1015 * @cnt: the hold-off packet count, or 0 to disable counter
1016 *
1017 * Sets an RX response queue's interrupt hold-off time and packet count.
1018 * At least one of the two needs to be enabled for the queue to generate
1019 * interrupts.
1020 */
1021static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1022 unsigned int us, unsigned int cnt)
1023{
1024 unsigned int timer_idx;
1025
1026 /*
1027 * If both the interrupt holdoff timer and count are specified as
1028 * zero, default to a holdoff count of 1 ...
1029 */
1030 if ((us | cnt) == 0)
1031 cnt = 1;
1032
1033 /*
1034 * If an interrupt holdoff count has been specified, then find the
1035 * closest configured holdoff count and use that. If the response
1036 * queue has already been created, then update its queue context
1037 * parameters ...
1038 */
1039 if (cnt) {
1040 int err;
1041 u32 v, pktcnt_idx;
1042
1043 pktcnt_idx = closest_thres(&adapter->sge, cnt);
1044 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
5167865a
HS
1045 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1046 FW_PARAMS_PARAM_X_V(
be839e39 1047 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
5167865a 1048 FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
be839e39
CL
1049 err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1050 if (err)
1051 return err;
1052 }
1053 rspq->pktcnt_idx = pktcnt_idx;
1054 }
1055
1056 /*
1057 * Compute the closest holdoff timer index from the supplied holdoff
1058 * timer value.
1059 */
1060 timer_idx = (us == 0
1061 ? SGE_TIMER_RSTRT_CNTR
1062 : closest_timer(&adapter->sge, us));
1063
1064 /*
1065 * Update the response queue's interrupt coalescing parameters and
1066 * return success.
1067 */
1ecc7b7a
HS
1068 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1069 QINTR_CNT_EN_V(cnt > 0));
be839e39
CL
1070 return 0;
1071}
1072
1073/*
1074 * Return a version number to identify the type of adapter. The scheme is:
1075 * - bits 0..9: chip version
1076 * - bits 10..15: chip revision
1077 */
1078static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1079{
1080 /*
1081 * Chip version 4, revision 0x3f (cxgb4vf).
1082 */
70ee3666 1083 return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
be839e39
CL
1084}
1085
1086/*
1087 * Execute the specified ioctl command.
1088 */
1089static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1090{
1091 int ret = 0;
1092
1093 switch (cmd) {
1094 /*
1095 * The VF Driver doesn't have access to any of the other
1096 * common Ethernet device ioctl()'s (like reading/writing
1097 * PHY registers, etc.
1098 */
1099
1100 default:
1101 ret = -EOPNOTSUPP;
1102 break;
1103 }
1104 return ret;
1105}
1106
1107/*
1108 * Change the device's MTU.
1109 */
1110static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1111{
1112 int ret;
1113 struct port_info *pi = netdev_priv(dev);
1114
1115 /* accommodate SACK */
1116 if (new_mtu < 81)
1117 return -EINVAL;
1118
1119 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1120 -1, -1, -1, -1, true);
1121 if (!ret)
1122 dev->mtu = new_mtu;
1123 return ret;
1124}
1125
c8f44aff
MM
1126static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1127 netdev_features_t features)
87737663
JP
1128{
1129 /*
1130 * Since there is no support for separate rx/tx vlan accel
1131 * enable/disable make sure tx flag is always in same state as rx.
1132 */
f646968f
PM
1133 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1134 features |= NETIF_F_HW_VLAN_CTAG_TX;
87737663 1135 else
f646968f 1136 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
87737663
JP
1137
1138 return features;
1139}
1140
c8f44aff
MM
1141static int cxgb4vf_set_features(struct net_device *dev,
1142 netdev_features_t features)
87737663
JP
1143{
1144 struct port_info *pi = netdev_priv(dev);
c8f44aff 1145 netdev_features_t changed = dev->features ^ features;
87737663 1146
f646968f 1147 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
87737663 1148 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
f646968f 1149 features & NETIF_F_HW_VLAN_CTAG_TX, 0);
87737663
JP
1150
1151 return 0;
1152}
1153
be839e39
CL
1154/*
1155 * Change the devices MAC address.
1156 */
1157static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1158{
1159 int ret;
1160 struct sockaddr *addr = _addr;
1161 struct port_info *pi = netdev_priv(dev);
1162
1163 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 1164 return -EADDRNOTAVAIL;
be839e39
CL
1165
1166 ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1167 addr->sa_data, true);
1168 if (ret < 0)
1169 return ret;
1170
1171 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1172 pi->xact_addr_filt = ret;
1173 return 0;
1174}
1175
be839e39
CL
1176#ifdef CONFIG_NET_POLL_CONTROLLER
1177/*
1178 * Poll all of our receive queues. This is called outside of normal interrupt
1179 * context.
1180 */
1181static void cxgb4vf_poll_controller(struct net_device *dev)
1182{
1183 struct port_info *pi = netdev_priv(dev);
1184 struct adapter *adapter = pi->adapter;
1185
1186 if (adapter->flags & USING_MSIX) {
1187 struct sge_eth_rxq *rxq;
1188 int nqsets;
1189
1190 rxq = &adapter->sge.ethrxq[pi->first_qset];
1191 for (nqsets = pi->nqsets; nqsets; nqsets--) {
1192 t4vf_sge_intr_msix(0, &rxq->rspq);
1193 rxq++;
1194 }
1195 } else
1196 t4vf_intr_handler(adapter)(0, adapter);
1197}
1198#endif
1199
1200/*
1201 * Ethtool operations.
1202 * ===================
1203 *
1204 * Note that we don't support any ethtool operations which change the physical
1205 * state of the port to which we're linked.
1206 */
1207
5ad24def
HS
1208static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
1209 unsigned int caps)
be839e39 1210{
5ad24def
HS
1211 unsigned int v = 0;
1212
1213 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1214 type == FW_PORT_TYPE_BT_XAUI) {
1215 v |= SUPPORTED_TP;
1216 if (caps & FW_PORT_CAP_SPEED_100M)
1217 v |= SUPPORTED_100baseT_Full;
1218 if (caps & FW_PORT_CAP_SPEED_1G)
1219 v |= SUPPORTED_1000baseT_Full;
1220 if (caps & FW_PORT_CAP_SPEED_10G)
1221 v |= SUPPORTED_10000baseT_Full;
1222 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1223 v |= SUPPORTED_Backplane;
1224 if (caps & FW_PORT_CAP_SPEED_1G)
1225 v |= SUPPORTED_1000baseKX_Full;
1226 if (caps & FW_PORT_CAP_SPEED_10G)
1227 v |= SUPPORTED_10000baseKX4_Full;
1228 } else if (type == FW_PORT_TYPE_KR)
1229 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1230 else if (type == FW_PORT_TYPE_BP_AP)
1231 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1232 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1233 else if (type == FW_PORT_TYPE_BP4_AP)
1234 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1235 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1236 SUPPORTED_10000baseKX4_Full;
1237 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1238 type == FW_PORT_TYPE_FIBER_XAUI ||
1239 type == FW_PORT_TYPE_SFP ||
1240 type == FW_PORT_TYPE_QSFP_10G ||
1241 type == FW_PORT_TYPE_QSA) {
1242 v |= SUPPORTED_FIBRE;
1243 if (caps & FW_PORT_CAP_SPEED_1G)
1244 v |= SUPPORTED_1000baseT_Full;
1245 if (caps & FW_PORT_CAP_SPEED_10G)
1246 v |= SUPPORTED_10000baseT_Full;
1247 } else if (type == FW_PORT_TYPE_BP40_BA ||
1248 type == FW_PORT_TYPE_QSFP) {
1249 v |= SUPPORTED_40000baseSR4_Full;
1250 v |= SUPPORTED_FIBRE;
1251 }
1252
1253 if (caps & FW_PORT_CAP_ANEG)
1254 v |= SUPPORTED_Autoneg;
1255 return v;
1256}
be839e39 1257
5ad24def
HS
1258static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1259{
1260 const struct port_info *p = netdev_priv(dev);
1261
1262 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1263 p->port_type == FW_PORT_TYPE_BT_XFI ||
1264 p->port_type == FW_PORT_TYPE_BT_XAUI)
1265 cmd->port = PORT_TP;
1266 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1267 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1268 cmd->port = PORT_FIBRE;
1269 else if (p->port_type == FW_PORT_TYPE_SFP ||
1270 p->port_type == FW_PORT_TYPE_QSFP_10G ||
1271 p->port_type == FW_PORT_TYPE_QSA ||
1272 p->port_type == FW_PORT_TYPE_QSFP) {
1273 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
1274 p->mod_type == FW_PORT_MOD_TYPE_SR ||
1275 p->mod_type == FW_PORT_MOD_TYPE_ER ||
1276 p->mod_type == FW_PORT_MOD_TYPE_LRM)
1277 cmd->port = PORT_FIBRE;
1278 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1279 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1280 cmd->port = PORT_DA;
1281 else
1282 cmd->port = PORT_OTHER;
1283 } else
1284 cmd->port = PORT_OTHER;
1285
1286 if (p->mdio_addr >= 0) {
1287 cmd->phy_address = p->mdio_addr;
1288 cmd->transceiver = XCVR_EXTERNAL;
1289 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1290 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1291 } else {
1292 cmd->phy_address = 0; /* not really, but no better option */
1293 cmd->transceiver = XCVR_INTERNAL;
1294 cmd->mdio_support = 0;
1295 }
1296
1297 cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
1298 p->link_cfg.supported);
1299 cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
1300 p->link_cfg.advertising);
70739497 1301 ethtool_cmd_speed_set(cmd,
5ad24def 1302 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
be839e39 1303 cmd->duplex = DUPLEX_FULL;
5ad24def 1304 cmd->autoneg = p->link_cfg.autoneg;
be839e39
CL
1305 cmd->maxtxpkt = 0;
1306 cmd->maxrxpkt = 0;
1307 return 0;
1308}
1309
1310/*
1311 * Return our driver information.
1312 */
1313static void cxgb4vf_get_drvinfo(struct net_device *dev,
1314 struct ethtool_drvinfo *drvinfo)
1315{
1316 struct adapter *adapter = netdev2adap(dev);
1317
23020ab3
RJ
1318 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1319 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1320 strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1321 sizeof(drvinfo->bus_info));
be839e39
CL
1322 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1323 "%u.%u.%u.%u, TP %u.%u.%u.%u",
b2e1a3f0
HS
1324 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1325 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1326 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1327 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1328 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1329 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1330 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1331 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
be839e39
CL
1332}
1333
1334/*
1335 * Return current adapter message level.
1336 */
1337static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1338{
1339 return netdev2adap(dev)->msg_enable;
1340}
1341
1342/*
1343 * Set current adapter message level.
1344 */
1345static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1346{
1347 netdev2adap(dev)->msg_enable = msglevel;
1348}
1349
1350/*
1351 * Return the device's current Queue Set ring size parameters along with the
1352 * allowed maximum values. Since ethtool doesn't understand the concept of
1353 * multi-queue devices, we just return the current values associated with the
1354 * first Queue Set.
1355 */
1356static void cxgb4vf_get_ringparam(struct net_device *dev,
1357 struct ethtool_ringparam *rp)
1358{
1359 const struct port_info *pi = netdev_priv(dev);
1360 const struct sge *s = &pi->adapter->sge;
1361
1362 rp->rx_max_pending = MAX_RX_BUFFERS;
1363 rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1364 rp->rx_jumbo_max_pending = 0;
1365 rp->tx_max_pending = MAX_TXQ_ENTRIES;
1366
1367 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1368 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1369 rp->rx_jumbo_pending = 0;
1370 rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1371}
1372
1373/*
1374 * Set the Queue Set ring size parameters for the device. Again, since
1375 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1376 * apply these new values across all of the Queue Sets associated with the
1377 * device -- after vetting them of course!
1378 */
1379static int cxgb4vf_set_ringparam(struct net_device *dev,
1380 struct ethtool_ringparam *rp)
1381{
1382 const struct port_info *pi = netdev_priv(dev);
1383 struct adapter *adapter = pi->adapter;
1384 struct sge *s = &adapter->sge;
1385 int qs;
1386
1387 if (rp->rx_pending > MAX_RX_BUFFERS ||
1388 rp->rx_jumbo_pending ||
1389 rp->tx_pending > MAX_TXQ_ENTRIES ||
1390 rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1391 rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1392 rp->rx_pending < MIN_FL_ENTRIES ||
1393 rp->tx_pending < MIN_TXQ_ENTRIES)
1394 return -EINVAL;
1395
1396 if (adapter->flags & FULL_INIT_DONE)
1397 return -EBUSY;
1398
1399 for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1400 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1401 s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1402 s->ethtxq[qs].q.size = rp->tx_pending;
1403 }
1404 return 0;
1405}
1406
1407/*
1408 * Return the interrupt holdoff timer and count for the first Queue Set on the
1409 * device. Our extension ioctl() (the cxgbtool interface) allows the
1410 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1411 */
1412static int cxgb4vf_get_coalesce(struct net_device *dev,
1413 struct ethtool_coalesce *coalesce)
1414{
1415 const struct port_info *pi = netdev_priv(dev);
1416 const struct adapter *adapter = pi->adapter;
1417 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1418
1419 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1420 coalesce->rx_max_coalesced_frames =
1ecc7b7a 1421 ((rspq->intr_params & QINTR_CNT_EN_F)
be839e39
CL
1422 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1423 : 0);
1424 return 0;
1425}
1426
1427/*
1428 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1429 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1430 * the interrupt holdoff timer on any of the device's Queue Sets.
1431 */
1432static int cxgb4vf_set_coalesce(struct net_device *dev,
1433 struct ethtool_coalesce *coalesce)
1434{
1435 const struct port_info *pi = netdev_priv(dev);
1436 struct adapter *adapter = pi->adapter;
1437
1438 return set_rxq_intr_params(adapter,
1439 &adapter->sge.ethrxq[pi->first_qset].rspq,
1440 coalesce->rx_coalesce_usecs,
1441 coalesce->rx_max_coalesced_frames);
1442}
1443
1444/*
1445 * Report current port link pause parameter settings.
1446 */
1447static void cxgb4vf_get_pauseparam(struct net_device *dev,
1448 struct ethtool_pauseparam *pauseparam)
1449{
1450 struct port_info *pi = netdev_priv(dev);
1451
1452 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1453 pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1454 pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1455}
1456
be839e39
CL
1457/*
1458 * Identify the port by blinking the port's LED.
1459 */
857a3d0f
DM
1460static int cxgb4vf_phys_id(struct net_device *dev,
1461 enum ethtool_phys_id_state state)
be839e39 1462{
857a3d0f 1463 unsigned int val;
be839e39
CL
1464 struct port_info *pi = netdev_priv(dev);
1465
857a3d0f
DM
1466 if (state == ETHTOOL_ID_ACTIVE)
1467 val = 0xffff;
1468 else if (state == ETHTOOL_ID_INACTIVE)
1469 val = 0;
1470 else
1471 return -EINVAL;
1472
1473 return t4vf_identify_port(pi->adapter, pi->viid, val);
be839e39
CL
1474}
1475
1476/*
1477 * Port stats maintained per queue of the port.
1478 */
1479struct queue_port_stats {
1480 u64 tso;
1481 u64 tx_csum;
1482 u64 rx_csum;
1483 u64 vlan_ex;
1484 u64 vlan_ins;
f12fe353
CL
1485 u64 lro_pkts;
1486 u64 lro_merged;
be839e39
CL
1487};
1488
1489/*
1490 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1491 * these need to match the order of statistics returned by
1492 * t4vf_get_port_stats().
1493 */
1494static const char stats_strings[][ETH_GSTRING_LEN] = {
1495 /*
1496 * These must match the layout of the t4vf_port_stats structure.
1497 */
1498 "TxBroadcastBytes ",
1499 "TxBroadcastFrames ",
1500 "TxMulticastBytes ",
1501 "TxMulticastFrames ",
1502 "TxUnicastBytes ",
1503 "TxUnicastFrames ",
1504 "TxDroppedFrames ",
1505 "TxOffloadBytes ",
1506 "TxOffloadFrames ",
1507 "RxBroadcastBytes ",
1508 "RxBroadcastFrames ",
1509 "RxMulticastBytes ",
1510 "RxMulticastFrames ",
1511 "RxUnicastBytes ",
1512 "RxUnicastFrames ",
1513 "RxErrorFrames ",
1514
1515 /*
1516 * These are accumulated per-queue statistics and must match the
1517 * order of the fields in the queue_port_stats structure.
1518 */
1519 "TSO ",
1520 "TxCsumOffload ",
1521 "RxCsumGood ",
1522 "VLANextractions ",
1523 "VLANinsertions ",
f12fe353
CL
1524 "GROPackets ",
1525 "GROMerged ",
be839e39
CL
1526};
1527
1528/*
1529 * Return the number of statistics in the specified statistics set.
1530 */
1531static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1532{
1533 switch (sset) {
1534 case ETH_SS_STATS:
1535 return ARRAY_SIZE(stats_strings);
1536 default:
1537 return -EOPNOTSUPP;
1538 }
1539 /*NOTREACHED*/
1540}
1541
1542/*
1543 * Return the strings for the specified statistics set.
1544 */
1545static void cxgb4vf_get_strings(struct net_device *dev,
1546 u32 sset,
1547 u8 *data)
1548{
1549 switch (sset) {
1550 case ETH_SS_STATS:
1551 memcpy(data, stats_strings, sizeof(stats_strings));
1552 break;
1553 }
1554}
1555
1556/*
1557 * Small utility routine to accumulate queue statistics across the queues of
1558 * a "port".
1559 */
1560static void collect_sge_port_stats(const struct adapter *adapter,
1561 const struct port_info *pi,
1562 struct queue_port_stats *stats)
1563{
1564 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1565 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1566 int qs;
1567
1568 memset(stats, 0, sizeof(*stats));
1569 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1570 stats->tso += txq->tso;
1571 stats->tx_csum += txq->tx_cso;
1572 stats->rx_csum += rxq->stats.rx_cso;
1573 stats->vlan_ex += rxq->stats.vlan_ex;
1574 stats->vlan_ins += txq->vlan_ins;
f12fe353
CL
1575 stats->lro_pkts += rxq->stats.lro_pkts;
1576 stats->lro_merged += rxq->stats.lro_merged;
be839e39
CL
1577 }
1578}
1579
1580/*
1581 * Return the ETH_SS_STATS statistics set.
1582 */
1583static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1584 struct ethtool_stats *stats,
1585 u64 *data)
1586{
1587 struct port_info *pi = netdev2pinfo(dev);
1588 struct adapter *adapter = pi->adapter;
1589 int err = t4vf_get_port_stats(adapter, pi->pidx,
1590 (struct t4vf_port_stats *)data);
1591 if (err)
1592 memset(data, 0, sizeof(struct t4vf_port_stats));
1593
1594 data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1595 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1596}
1597
1598/*
1599 * Return the size of our register map.
1600 */
1601static int cxgb4vf_get_regs_len(struct net_device *dev)
1602{
1603 return T4VF_REGMAP_SIZE;
1604}
1605
1606/*
1607 * Dump a block of registers, start to end inclusive, into a buffer.
1608 */
1609static void reg_block_dump(struct adapter *adapter, void *regbuf,
1610 unsigned int start, unsigned int end)
1611{
1612 u32 *bp = regbuf + start - T4VF_REGMAP_START;
1613
1614 for ( ; start <= end; start += sizeof(u32)) {
1615 /*
1616 * Avoid reading the Mailbox Control register since that
1617 * can trigger a Mailbox Ownership Arbitration cycle and
1618 * interfere with communication with the firmware.
1619 */
1620 if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1621 *bp++ = 0xffff;
1622 else
1623 *bp++ = t4_read_reg(adapter, start);
1624 }
1625}
1626
1627/*
1628 * Copy our entire register map into the provided buffer.
1629 */
1630static void cxgb4vf_get_regs(struct net_device *dev,
1631 struct ethtool_regs *regs,
1632 void *regbuf)
1633{
1634 struct adapter *adapter = netdev2adap(dev);
1635
1636 regs->version = mk_adap_vers(adapter);
1637
1638 /*
1639 * Fill in register buffer with our register map.
1640 */
1641 memset(regbuf, 0, T4VF_REGMAP_SIZE);
1642
1643 reg_block_dump(adapter, regbuf,
1644 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1645 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1646 reg_block_dump(adapter, regbuf,
1647 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1648 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
70ee3666
HS
1649
1650 /* T5 adds new registers in the PL Register map.
1651 */
be839e39
CL
1652 reg_block_dump(adapter, regbuf,
1653 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
70ee3666 1654 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
0d804338 1655 ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
be839e39
CL
1656 reg_block_dump(adapter, regbuf,
1657 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1658 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1659
1660 reg_block_dump(adapter, regbuf,
1661 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1662 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1663}
1664
1665/*
1666 * Report current Wake On LAN settings.
1667 */
1668static void cxgb4vf_get_wol(struct net_device *dev,
1669 struct ethtool_wolinfo *wol)
1670{
1671 wol->supported = 0;
1672 wol->wolopts = 0;
1673 memset(&wol->sopass, 0, sizeof(wol->sopass));
1674}
1675
410989f6
CL
1676/*
1677 * TCP Segmentation Offload flags which we support.
1678 */
1679#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1680
9b07be4b 1681static const struct ethtool_ops cxgb4vf_ethtool_ops = {
be839e39
CL
1682 .get_settings = cxgb4vf_get_settings,
1683 .get_drvinfo = cxgb4vf_get_drvinfo,
1684 .get_msglevel = cxgb4vf_get_msglevel,
1685 .set_msglevel = cxgb4vf_set_msglevel,
1686 .get_ringparam = cxgb4vf_get_ringparam,
1687 .set_ringparam = cxgb4vf_set_ringparam,
1688 .get_coalesce = cxgb4vf_get_coalesce,
1689 .set_coalesce = cxgb4vf_set_coalesce,
1690 .get_pauseparam = cxgb4vf_get_pauseparam,
be839e39
CL
1691 .get_link = ethtool_op_get_link,
1692 .get_strings = cxgb4vf_get_strings,
857a3d0f 1693 .set_phys_id = cxgb4vf_phys_id,
be839e39
CL
1694 .get_sset_count = cxgb4vf_get_sset_count,
1695 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1696 .get_regs_len = cxgb4vf_get_regs_len,
1697 .get_regs = cxgb4vf_get_regs,
1698 .get_wol = cxgb4vf_get_wol,
be839e39
CL
1699};
1700
1701/*
1702 * /sys/kernel/debug/cxgb4vf support code and data.
1703 * ================================================
1704 */
1705
1706/*
1707 * Show SGE Queue Set information. We display QPL Queues Sets per line.
1708 */
1709#define QPL 4
1710
1711static int sge_qinfo_show(struct seq_file *seq, void *v)
1712{
1713 struct adapter *adapter = seq->private;
1714 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1715 int qs, r = (uintptr_t)v - 1;
1716
1717 if (r)
1718 seq_putc(seq, '\n');
1719
1720 #define S3(fmt_spec, s, v) \
1721 do {\
1722 seq_printf(seq, "%-12s", s); \
1723 for (qs = 0; qs < n; ++qs) \
1724 seq_printf(seq, " %16" fmt_spec, v); \
1725 seq_putc(seq, '\n'); \
1726 } while (0)
1727 #define S(s, v) S3("s", s, v)
1728 #define T(s, v) S3("u", s, txq[qs].v)
1729 #define R(s, v) S3("u", s, rxq[qs].v)
1730
1731 if (r < eth_entries) {
1732 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1733 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1734 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1735
1736 S("QType:", "Ethernet");
1737 S("Interface:",
1738 (rxq[qs].rspq.netdev
1739 ? rxq[qs].rspq.netdev->name
1740 : "N/A"));
1741 S3("d", "Port:",
1742 (rxq[qs].rspq.netdev
1743 ? ((struct port_info *)
1744 netdev_priv(rxq[qs].rspq.netdev))->port_id
1745 : -1));
1746 T("TxQ ID:", q.abs_id);
1747 T("TxQ size:", q.size);
1748 T("TxQ inuse:", q.in_use);
1749 T("TxQ PIdx:", q.pidx);
1750 T("TxQ CIdx:", q.cidx);
1751 R("RspQ ID:", rspq.abs_id);
1752 R("RspQ size:", rspq.size);
1753 R("RspQE size:", rspq.iqe_len);
1754 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1755 S3("u", "Intr pktcnt:",
1756 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1757 R("RspQ CIdx:", rspq.cidx);
1758 R("RspQ Gen:", rspq.gen);
1759 R("FL ID:", fl.abs_id);
1760 R("FL size:", fl.size - MIN_FL_RESID);
1761 R("FL avail:", fl.avail);
1762 R("FL PIdx:", fl.pidx);
1763 R("FL CIdx:", fl.cidx);
1764 return 0;
1765 }
1766
1767 r -= eth_entries;
1768 if (r == 0) {
1769 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1770
1771 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1772 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1773 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1774 qtimer_val(adapter, evtq));
1775 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1776 adapter->sge.counter_val[evtq->pktcnt_idx]);
1777 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1778 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1779 } else if (r == 1) {
1780 const struct sge_rspq *intrq = &adapter->sge.intrq;
1781
1782 seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1783 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1784 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1785 qtimer_val(adapter, intrq));
1786 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1787 adapter->sge.counter_val[intrq->pktcnt_idx]);
1788 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1789 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1790 }
1791
1792 #undef R
1793 #undef T
1794 #undef S
1795 #undef S3
1796
1797 return 0;
1798}
1799
1800/*
1801 * Return the number of "entries" in our "file". We group the multi-Queue
1802 * sections with QPL Queue Sets per "entry". The sections of the output are:
1803 *
1804 * Ethernet RX/TX Queue Sets
1805 * Firmware Event Queue
1806 * Forwarded Interrupt Queue (if in MSI mode)
1807 */
1808static int sge_queue_entries(const struct adapter *adapter)
1809{
1810 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1811 ((adapter->flags & USING_MSI) != 0);
1812}
1813
1814static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1815{
1816 int entries = sge_queue_entries(seq->private);
1817
1818 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1819}
1820
1821static void sge_queue_stop(struct seq_file *seq, void *v)
1822{
1823}
1824
1825static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1826{
1827 int entries = sge_queue_entries(seq->private);
1828
1829 ++*pos;
1830 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1831}
1832
1833static const struct seq_operations sge_qinfo_seq_ops = {
1834 .start = sge_queue_start,
1835 .next = sge_queue_next,
1836 .stop = sge_queue_stop,
1837 .show = sge_qinfo_show
1838};
1839
1840static int sge_qinfo_open(struct inode *inode, struct file *file)
1841{
1842 int res = seq_open(file, &sge_qinfo_seq_ops);
1843
1844 if (!res) {
1845 struct seq_file *seq = file->private_data;
1846 seq->private = inode->i_private;
1847 }
1848 return res;
1849}
1850
1851static const struct file_operations sge_qinfo_debugfs_fops = {
1852 .owner = THIS_MODULE,
1853 .open = sge_qinfo_open,
1854 .read = seq_read,
1855 .llseek = seq_lseek,
1856 .release = seq_release,
1857};
1858
1859/*
1860 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
1861 */
1862#define QPL 4
1863
1864static int sge_qstats_show(struct seq_file *seq, void *v)
1865{
1866 struct adapter *adapter = seq->private;
1867 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1868 int qs, r = (uintptr_t)v - 1;
1869
1870 if (r)
1871 seq_putc(seq, '\n');
1872
1873 #define S3(fmt, s, v) \
1874 do { \
1875 seq_printf(seq, "%-16s", s); \
1876 for (qs = 0; qs < n; ++qs) \
1877 seq_printf(seq, " %8" fmt, v); \
1878 seq_putc(seq, '\n'); \
1879 } while (0)
1880 #define S(s, v) S3("s", s, v)
1881
1882 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1883 #define T(s, v) T3("lu", s, v)
1884
1885 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1886 #define R(s, v) R3("lu", s, v)
1887
1888 if (r < eth_entries) {
1889 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1890 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1891 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1892
1893 S("QType:", "Ethernet");
1894 S("Interface:",
1895 (rxq[qs].rspq.netdev
1896 ? rxq[qs].rspq.netdev->name
1897 : "N/A"));
68dc9d36 1898 R3("u", "RspQNullInts:", rspq.unhandled_irqs);
be839e39
CL
1899 R("RxPackets:", stats.pkts);
1900 R("RxCSO:", stats.rx_cso);
1901 R("VLANxtract:", stats.vlan_ex);
1902 R("LROmerged:", stats.lro_merged);
1903 R("LROpackets:", stats.lro_pkts);
1904 R("RxDrops:", stats.rx_drops);
1905 T("TSO:", tso);
1906 T("TxCSO:", tx_cso);
1907 T("VLANins:", vlan_ins);
1908 T("TxQFull:", q.stops);
1909 T("TxQRestarts:", q.restarts);
1910 T("TxMapErr:", mapping_err);
1911 R("FLAllocErr:", fl.alloc_failed);
1912 R("FLLrgAlcErr:", fl.large_alloc_failed);
1913 R("FLStarving:", fl.starving);
1914 return 0;
1915 }
1916
1917 r -= eth_entries;
1918 if (r == 0) {
1919 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1920
1921 seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
68dc9d36
CL
1922 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1923 evtq->unhandled_irqs);
be839e39
CL
1924 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
1925 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
1926 } else if (r == 1) {
1927 const struct sge_rspq *intrq = &adapter->sge.intrq;
1928
1929 seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
68dc9d36
CL
1930 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1931 intrq->unhandled_irqs);
be839e39
CL
1932 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
1933 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
1934 }
1935
1936 #undef R
1937 #undef T
1938 #undef S
1939 #undef R3
1940 #undef T3
1941 #undef S3
1942
1943 return 0;
1944}
1945
1946/*
1947 * Return the number of "entries" in our "file". We group the multi-Queue
1948 * sections with QPL Queue Sets per "entry". The sections of the output are:
1949 *
1950 * Ethernet RX/TX Queue Sets
1951 * Firmware Event Queue
1952 * Forwarded Interrupt Queue (if in MSI mode)
1953 */
1954static int sge_qstats_entries(const struct adapter *adapter)
1955{
1956 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1957 ((adapter->flags & USING_MSI) != 0);
1958}
1959
1960static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
1961{
1962 int entries = sge_qstats_entries(seq->private);
1963
1964 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1965}
1966
1967static void sge_qstats_stop(struct seq_file *seq, void *v)
1968{
1969}
1970
1971static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
1972{
1973 int entries = sge_qstats_entries(seq->private);
1974
1975 (*pos)++;
1976 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1977}
1978
1979static const struct seq_operations sge_qstats_seq_ops = {
1980 .start = sge_qstats_start,
1981 .next = sge_qstats_next,
1982 .stop = sge_qstats_stop,
1983 .show = sge_qstats_show
1984};
1985
1986static int sge_qstats_open(struct inode *inode, struct file *file)
1987{
1988 int res = seq_open(file, &sge_qstats_seq_ops);
1989
1990 if (res == 0) {
1991 struct seq_file *seq = file->private_data;
1992 seq->private = inode->i_private;
1993 }
1994 return res;
1995}
1996
1997static const struct file_operations sge_qstats_proc_fops = {
1998 .owner = THIS_MODULE,
1999 .open = sge_qstats_open,
2000 .read = seq_read,
2001 .llseek = seq_lseek,
2002 .release = seq_release,
2003};
2004
2005/*
2006 * Show PCI-E SR-IOV Virtual Function Resource Limits.
2007 */
2008static int resources_show(struct seq_file *seq, void *v)
2009{
2010 struct adapter *adapter = seq->private;
2011 struct vf_resources *vfres = &adapter->params.vfres;
2012
2013 #define S(desc, fmt, var) \
2014 seq_printf(seq, "%-60s " fmt "\n", \
2015 desc " (" #var "):", vfres->var)
2016
2017 S("Virtual Interfaces", "%d", nvi);
2018 S("Egress Queues", "%d", neq);
2019 S("Ethernet Control", "%d", nethctrl);
2020 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2021 S("Ingress Queues", "%d", niq);
2022 S("Traffic Class", "%d", tc);
2023 S("Port Access Rights Mask", "%#x", pmask);
2024 S("MAC Address Filters", "%d", nexactf);
2025 S("Firmware Command Read Capabilities", "%#x", r_caps);
2026 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2027
2028 #undef S
2029
2030 return 0;
2031}
2032
2033static int resources_open(struct inode *inode, struct file *file)
2034{
2035 return single_open(file, resources_show, inode->i_private);
2036}
2037
2038static const struct file_operations resources_proc_fops = {
2039 .owner = THIS_MODULE,
2040 .open = resources_open,
2041 .read = seq_read,
2042 .llseek = seq_lseek,
2043 .release = single_release,
2044};
2045
2046/*
2047 * Show Virtual Interfaces.
2048 */
2049static int interfaces_show(struct seq_file *seq, void *v)
2050{
2051 if (v == SEQ_START_TOKEN) {
2052 seq_puts(seq, "Interface Port VIID\n");
2053 } else {
2054 struct adapter *adapter = seq->private;
2055 int pidx = (uintptr_t)v - 2;
2056 struct net_device *dev = adapter->port[pidx];
2057 struct port_info *pi = netdev_priv(dev);
2058
2059 seq_printf(seq, "%9s %4d %#5x\n",
2060 dev->name, pi->port_id, pi->viid);
2061 }
2062 return 0;
2063}
2064
2065static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2066{
2067 return pos <= adapter->params.nports
2068 ? (void *)(uintptr_t)(pos + 1)
2069 : NULL;
2070}
2071
2072static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2073{
2074 return *pos
2075 ? interfaces_get_idx(seq->private, *pos)
2076 : SEQ_START_TOKEN;
2077}
2078
2079static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2080{
2081 (*pos)++;
2082 return interfaces_get_idx(seq->private, *pos);
2083}
2084
2085static void interfaces_stop(struct seq_file *seq, void *v)
2086{
2087}
2088
2089static const struct seq_operations interfaces_seq_ops = {
2090 .start = interfaces_start,
2091 .next = interfaces_next,
2092 .stop = interfaces_stop,
2093 .show = interfaces_show
2094};
2095
2096static int interfaces_open(struct inode *inode, struct file *file)
2097{
2098 int res = seq_open(file, &interfaces_seq_ops);
2099
2100 if (res == 0) {
2101 struct seq_file *seq = file->private_data;
2102 seq->private = inode->i_private;
2103 }
2104 return res;
2105}
2106
2107static const struct file_operations interfaces_proc_fops = {
2108 .owner = THIS_MODULE,
2109 .open = interfaces_open,
2110 .read = seq_read,
2111 .llseek = seq_lseek,
2112 .release = seq_release,
2113};
2114
2115/*
2116 * /sys/kernel/debugfs/cxgb4vf/ files list.
2117 */
2118struct cxgb4vf_debugfs_entry {
2119 const char *name; /* name of debugfs node */
f4ae40a6 2120 umode_t mode; /* file system mode */
be839e39
CL
2121 const struct file_operations *fops;
2122};
2123
2124static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2125 { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
2126 { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2127 { "resources", S_IRUGO, &resources_proc_fops },
2128 { "interfaces", S_IRUGO, &interfaces_proc_fops },
2129};
2130
2131/*
2132 * Module and device initialization and cleanup code.
2133 * ==================================================
2134 */
2135
2136/*
2137 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2138 * directory (debugfs_root) has already been set up.
2139 */
d289f864 2140static int setup_debugfs(struct adapter *adapter)
be839e39
CL
2141{
2142 int i;
2143
843635e0 2144 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
be839e39
CL
2145
2146 /*
2147 * Debugfs support is best effort.
2148 */
2149 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2150 (void)debugfs_create_file(debugfs_files[i].name,
2151 debugfs_files[i].mode,
2152 adapter->debugfs_root,
2153 (void *)adapter,
2154 debugfs_files[i].fops);
2155
2156 return 0;
2157}
2158
2159/*
2160 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2161 * it to our caller to tear down the directory (debugfs_root).
2162 */
4204875d 2163static void cleanup_debugfs(struct adapter *adapter)
be839e39 2164{
843635e0 2165 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
be839e39
CL
2166
2167 /*
2168 * Unlike our sister routine cleanup_proc(), we don't need to remove
2169 * individual entries because a call will be made to
2170 * debugfs_remove_recursive(). We just need to clean up any ancillary
2171 * persistent state.
2172 */
2173 /* nothing to do */
2174}
2175
495c22bb
HS
2176/* Figure out how many Ports and Queue Sets we can support. This depends on
2177 * knowing our Virtual Function Resources and may be called a second time if
2178 * we fall back from MSI-X to MSI Interrupt Mode.
2179 */
2180static void size_nports_qsets(struct adapter *adapter)
2181{
2182 struct vf_resources *vfres = &adapter->params.vfres;
2183 unsigned int ethqsets, pmask_nports;
2184
2185 /* The number of "ports" which we support is equal to the number of
2186 * Virtual Interfaces with which we've been provisioned.
2187 */
2188 adapter->params.nports = vfres->nvi;
2189 if (adapter->params.nports > MAX_NPORTS) {
2190 dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2191 " allowed virtual interfaces\n", MAX_NPORTS,
2192 adapter->params.nports);
2193 adapter->params.nports = MAX_NPORTS;
2194 }
2195
2196 /* We may have been provisioned with more VIs than the number of
2197 * ports we're allowed to access (our Port Access Rights Mask).
2198 * This is obviously a configuration conflict but we don't want to
2199 * crash the kernel or anything silly just because of that.
2200 */
2201 pmask_nports = hweight32(adapter->params.vfres.pmask);
2202 if (pmask_nports < adapter->params.nports) {
2203 dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
2204 " virtual interfaces; limited by Port Access Rights"
2205 " mask %#x\n", pmask_nports, adapter->params.nports,
2206 adapter->params.vfres.pmask);
2207 adapter->params.nports = pmask_nports;
2208 }
2209
2210 /* We need to reserve an Ingress Queue for the Asynchronous Firmware
2211 * Event Queue. And if we're using MSI Interrupts, we'll also need to
2212 * reserve an Ingress Queue for a Forwarded Interrupts.
2213 *
2214 * The rest of the FL/Intr-capable ingress queues will be matched up
2215 * one-for-one with Ethernet/Control egress queues in order to form
2216 * "Queue Sets" which will be aportioned between the "ports". For
2217 * each Queue Set, we'll need the ability to allocate two Egress
2218 * Contexts -- one for the Ingress Queue Free List and one for the TX
2219 * Ethernet Queue.
2220 *
2221 * Note that even if we're currently configured to use MSI-X
2222 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2223 * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that
2224 * happens we'll need to adjust things later.
2225 */
2226 ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2227 if (vfres->nethctrl != ethqsets)
2228 ethqsets = min(vfres->nethctrl, ethqsets);
2229 if (vfres->neq < ethqsets*2)
2230 ethqsets = vfres->neq/2;
2231 if (ethqsets > MAX_ETH_QSETS)
2232 ethqsets = MAX_ETH_QSETS;
2233 adapter->sge.max_ethqsets = ethqsets;
2234
2235 if (adapter->sge.max_ethqsets < adapter->params.nports) {
2236 dev_warn(adapter->pdev_dev, "only using %d of %d available"
2237 " virtual interfaces (too few Queue Sets)\n",
2238 adapter->sge.max_ethqsets, adapter->params.nports);
2239 adapter->params.nports = adapter->sge.max_ethqsets;
2240 }
2241}
2242
be839e39
CL
2243/*
2244 * Perform early "adapter" initialization. This is where we discover what
2245 * adapter parameters we're going to be using and initialize basic adapter
2246 * hardware support.
2247 */
d289f864 2248static int adap_init0(struct adapter *adapter)
be839e39 2249{
be839e39
CL
2250 struct sge_params *sge_params = &adapter->params.sge;
2251 struct sge *s = &adapter->sge;
be839e39 2252 int err;
94dace10 2253 u32 param, val = 0;
be839e39 2254
e68e6133
CL
2255 /*
2256 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2257 * 2.6.31 and later we can't call pci_reset_function() in order to
2258 * issue an FLR because of a self- deadlock on the device semaphore.
2259 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2260 * cases where they're needed -- for instance, some versions of KVM
2261 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2262 * use the firmware based reset in order to reset any per function
2263 * state.
2264 */
2265 err = t4vf_fw_reset(adapter);
2266 if (err < 0) {
2267 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2268 return err;
2269 }
2270
be839e39
CL
2271 /*
2272 * Grab basic operational parameters. These will predominantly have
2273 * been set up by the Physical Function Driver or will be hard coded
2274 * into the adapter. We just have to live with them ... Note that
2275 * we _must_ get our VPD parameters before our SGE parameters because
2276 * we need to know the adapter's core clock from the VPD in order to
2277 * properly decode the SGE Timer Values.
2278 */
2279 err = t4vf_get_dev_params(adapter);
2280 if (err) {
2281 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2282 " device parameters: err=%d\n", err);
2283 return err;
2284 }
2285 err = t4vf_get_vpd_params(adapter);
2286 if (err) {
2287 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2288 " VPD parameters: err=%d\n", err);
2289 return err;
2290 }
2291 err = t4vf_get_sge_params(adapter);
2292 if (err) {
2293 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2294 " SGE parameters: err=%d\n", err);
2295 return err;
2296 }
2297 err = t4vf_get_rss_glb_config(adapter);
2298 if (err) {
2299 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2300 " RSS parameters: err=%d\n", err);
2301 return err;
2302 }
2303 if (adapter->params.rss.mode !=
2304 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2305 dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2306 " mode %d\n", adapter->params.rss.mode);
2307 return -EINVAL;
2308 }
2309 err = t4vf_sge_init(adapter);
2310 if (err) {
2311 dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2312 " err=%d\n", err);
2313 return err;
2314 }
2315
94dace10
VP
2316 /* If we're running on newer firmware, let it know that we're
2317 * prepared to deal with encapsulated CPL messages. Older
2318 * firmware won't understand this and we'll just get
2319 * unencapsulated messages ...
2320 */
5167865a
HS
2321 param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2322 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
94dace10
VP
2323 val = 1;
2324 (void) t4vf_set_params(adapter, 1, &param, &val);
2325
be839e39
CL
2326 /*
2327 * Retrieve our RX interrupt holdoff timer values and counter
2328 * threshold values from the SGE parameters.
2329 */
2330 s->timer_val[0] = core_ticks_to_us(adapter,
f061de42 2331 TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
be839e39 2332 s->timer_val[1] = core_ticks_to_us(adapter,
f061de42 2333 TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
be839e39 2334 s->timer_val[2] = core_ticks_to_us(adapter,
f061de42 2335 TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
be839e39 2336 s->timer_val[3] = core_ticks_to_us(adapter,
f061de42 2337 TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
be839e39 2338 s->timer_val[4] = core_ticks_to_us(adapter,
f061de42 2339 TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
be839e39 2340 s->timer_val[5] = core_ticks_to_us(adapter,
f061de42 2341 TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
be839e39 2342
f612b815
HS
2343 s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2344 s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2345 s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2346 s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
be839e39
CL
2347
2348 /*
2349 * Grab our Virtual Interface resource allocation, extract the
2350 * features that we're interested in and do a bit of sanity testing on
2351 * what we discover.
2352 */
2353 err = t4vf_get_vfres(adapter);
2354 if (err) {
2355 dev_err(adapter->pdev_dev, "unable to get virtual interface"
2356 " resources: err=%d\n", err);
2357 return err;
2358 }
2359
495c22bb 2360 /* Check for various parameter sanity issues */
28f71c6d
HS
2361 if (adapter->params.vfres.pmask == 0) {
2362 dev_err(adapter->pdev_dev, "no port access configured\n"
2363 "usable!\n");
2364 return -EINVAL;
2365 }
495c22bb 2366 if (adapter->params.vfres.nvi == 0) {
be839e39
CL
2367 dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2368 "usable!\n");
2369 return -EINVAL;
2370 }
495c22bb
HS
2371
2372 /* Initialize nports and max_ethqsets now that we have our Virtual
2373 * Function Resources.
2374 */
2375 size_nports_qsets(adapter);
2376
be839e39
CL
2377 return 0;
2378}
2379
2380static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2381 u8 pkt_cnt_idx, unsigned int size,
2382 unsigned int iqe_size)
2383{
1ecc7b7a
HS
2384 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2385 (pkt_cnt_idx < SGE_NCOUNTERS ?
2386 QINTR_CNT_EN_F : 0));
be839e39
CL
2387 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2388 ? pkt_cnt_idx
2389 : 0);
2390 rspq->iqe_len = iqe_size;
2391 rspq->size = size;
2392}
2393
2394/*
2395 * Perform default configuration of DMA queues depending on the number and
2396 * type of ports we found and the number of available CPUs. Most settings can
2397 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2398 * being brought up for the first time.
2399 */
d289f864 2400static void cfg_queues(struct adapter *adapter)
be839e39
CL
2401{
2402 struct sge *s = &adapter->sge;
2403 int q10g, n10g, qidx, pidx, qs;
c710245c 2404 size_t iqe_size;
be839e39
CL
2405
2406 /*
2407 * We should not be called till we know how many Queue Sets we can
2408 * support. In particular, this means that we need to know what kind
2409 * of interrupts we'll be using ...
2410 */
2411 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2412
2413 /*
2414 * Count the number of 10GbE Virtual Interfaces that we have.
2415 */
2416 n10g = 0;
2417 for_each_port(adapter, pidx)
14b3812f 2418 n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
be839e39
CL
2419
2420 /*
2421 * We default to 1 queue per non-10G port and up to # of cores queues
2422 * per 10G port.
2423 */
2424 if (n10g == 0)
2425 q10g = 0;
2426 else {
2427 int n1g = (adapter->params.nports - n10g);
2428 q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2429 if (q10g > num_online_cpus())
2430 q10g = num_online_cpus();
2431 }
2432
2433 /*
2434 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2435 * The layout will be established in setup_sge_queues() when the
2436 * adapter is brough up for the first time.
2437 */
2438 qidx = 0;
2439 for_each_port(adapter, pidx) {
2440 struct port_info *pi = adap2pinfo(adapter, pidx);
2441
2442 pi->first_qset = qidx;
897d55df 2443 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
be839e39
CL
2444 qidx += pi->nqsets;
2445 }
2446 s->ethqsets = qidx;
2447
c710245c
CL
2448 /*
2449 * The Ingress Queue Entry Size for our various Response Queues needs
2450 * to be big enough to accommodate the largest message we can receive
2451 * from the chip/firmware; which is 64 bytes ...
2452 */
2453 iqe_size = 64;
2454
be839e39
CL
2455 /*
2456 * Set up default Queue Set parameters ... Start off with the
2457 * shortest interrupt holdoff timer.
2458 */
2459 for (qs = 0; qs < s->max_ethqsets; qs++) {
2460 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2461 struct sge_eth_txq *txq = &s->ethtxq[qs];
2462
c710245c 2463 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
be839e39
CL
2464 rxq->fl.size = 72;
2465 txq->q.size = 1024;
2466 }
2467
2468 /*
2469 * The firmware event queue is used for link state changes and
2470 * notifications of TX DMA completions.
2471 */
c710245c 2472 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
be839e39
CL
2473
2474 /*
2475 * The forwarded interrupt queue is used when we're in MSI interrupt
2476 * mode. In this mode all interrupts associated with RX queues will
2477 * be forwarded to a single queue which we'll associate with our MSI
2478 * interrupt vector. The messages dropped in the forwarded interrupt
2479 * queue will indicate which ingress queue needs servicing ... This
2480 * queue needs to be large enough to accommodate all of the ingress
2481 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2482 * from equalling the CIDX if every ingress queue has an outstanding
2483 * interrupt). The queue doesn't need to be any larger because no
2484 * ingress queue will ever have more than one outstanding interrupt at
2485 * any time ...
2486 */
2487 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
c710245c 2488 iqe_size);
be839e39
CL
2489}
2490
2491/*
2492 * Reduce the number of Ethernet queues across all ports to at most n.
2493 * n provides at least one queue per port.
2494 */
d289f864 2495static void reduce_ethqs(struct adapter *adapter, int n)
be839e39
CL
2496{
2497 int i;
2498 struct port_info *pi;
2499
2500 /*
2501 * While we have too many active Ether Queue Sets, interate across the
2502 * "ports" and reduce their individual Queue Set allocations.
2503 */
2504 BUG_ON(n < adapter->params.nports);
2505 while (n < adapter->sge.ethqsets)
2506 for_each_port(adapter, i) {
2507 pi = adap2pinfo(adapter, i);
2508 if (pi->nqsets > 1) {
2509 pi->nqsets--;
2510 adapter->sge.ethqsets--;
2511 if (adapter->sge.ethqsets <= n)
2512 break;
2513 }
2514 }
2515
2516 /*
2517 * Reassign the starting Queue Sets for each of the "ports" ...
2518 */
2519 n = 0;
2520 for_each_port(adapter, i) {
2521 pi = adap2pinfo(adapter, i);
2522 pi->first_qset = n;
2523 n += pi->nqsets;
2524 }
2525}
2526
2527/*
2528 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2529 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2530 * need. Minimally we need one for every Virtual Interface plus those needed
2531 * for our "extras". Note that this process may lower the maximum number of
2532 * allowed Queue Sets ...
2533 */
d289f864 2534static int enable_msix(struct adapter *adapter)
be839e39 2535{
bd663689 2536 int i, want, need, nqsets;
be839e39
CL
2537 struct msix_entry entries[MSIX_ENTRIES];
2538 struct sge *s = &adapter->sge;
2539
2540 for (i = 0; i < MSIX_ENTRIES; ++i)
2541 entries[i].entry = i;
2542
2543 /*
2544 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2545 * plus those needed for our "extras" (for example, the firmware
2546 * message queue). We _need_ at least one "Queue Set" per Virtual
2547 * Interface plus those needed for our "extras". So now we get to see
2548 * if the song is right ...
2549 */
2550 want = s->max_ethqsets + MSIX_EXTRAS;
2551 need = adapter->params.nports + MSIX_EXTRAS;
bd663689
AG
2552
2553 want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2554 if (want < 0)
2555 return want;
2556
2557 nqsets = want - MSIX_EXTRAS;
2558 if (nqsets < s->max_ethqsets) {
2559 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2560 " for %d Queue Sets\n", nqsets);
2561 s->max_ethqsets = nqsets;
2562 if (nqsets < s->ethqsets)
2563 reduce_ethqs(adapter, nqsets);
be839e39 2564 }
bd663689
AG
2565 for (i = 0; i < want; ++i)
2566 adapter->msix_info[i].vec = entries[i].vector;
2567
2568 return 0;
be839e39
CL
2569}
2570
be839e39
CL
2571static const struct net_device_ops cxgb4vf_netdev_ops = {
2572 .ndo_open = cxgb4vf_open,
2573 .ndo_stop = cxgb4vf_stop,
2574 .ndo_start_xmit = t4vf_eth_xmit,
2575 .ndo_get_stats = cxgb4vf_get_stats,
2576 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2577 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
be839e39
CL
2578 .ndo_validate_addr = eth_validate_addr,
2579 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2580 .ndo_change_mtu = cxgb4vf_change_mtu,
87737663
JP
2581 .ndo_fix_features = cxgb4vf_fix_features,
2582 .ndo_set_features = cxgb4vf_set_features,
be839e39
CL
2583#ifdef CONFIG_NET_POLL_CONTROLLER
2584 .ndo_poll_controller = cxgb4vf_poll_controller,
2585#endif
2586};
be839e39
CL
2587
2588/*
2589 * "Probe" a device: initialize a device and construct all kernel and driver
2590 * state needed to manage the device. This routine is called "init_one" in
2591 * the PF Driver ...
2592 */
d289f864 2593static int cxgb4vf_pci_probe(struct pci_dev *pdev,
1dd06ae8 2594 const struct pci_device_id *ent)
be839e39 2595{
be839e39
CL
2596 int pci_using_dac;
2597 int err, pidx;
2598 unsigned int pmask;
2599 struct adapter *adapter;
2600 struct port_info *pi;
2601 struct net_device *netdev;
2602
be839e39
CL
2603 /*
2604 * Print our driver banner the first time we're called to initialize a
2605 * device.
2606 */
428ac43f 2607 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
be839e39
CL
2608
2609 /*
7a0c2029 2610 * Initialize generic PCI device state.
be839e39 2611 */
7a0c2029 2612 err = pci_enable_device(pdev);
be839e39 2613 if (err) {
7a0c2029 2614 dev_err(&pdev->dev, "cannot enable PCI device\n");
be839e39
CL
2615 return err;
2616 }
2617
2618 /*
7a0c2029
KV
2619 * Reserve PCI resources for the device. If we can't get them some
2620 * other driver may have already claimed the device ...
be839e39 2621 */
7a0c2029 2622 err = pci_request_regions(pdev, KBUILD_MODNAME);
be839e39 2623 if (err) {
7a0c2029
KV
2624 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2625 goto err_disable_device;
be839e39
CL
2626 }
2627
2628 /*
2629 * Set up our DMA mask: try for 64-bit address masking first and
2630 * fall back to 32-bit if we can't get 64 bits ...
2631 */
2632 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2633 if (err == 0) {
2634 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2635 if (err) {
2636 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2637 " coherent allocations\n");
7a0c2029 2638 goto err_release_regions;
be839e39
CL
2639 }
2640 pci_using_dac = 1;
2641 } else {
2642 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2643 if (err != 0) {
2644 dev_err(&pdev->dev, "no usable DMA configuration\n");
7a0c2029 2645 goto err_release_regions;
be839e39
CL
2646 }
2647 pci_using_dac = 0;
2648 }
2649
2650 /*
2651 * Enable bus mastering for the device ...
2652 */
2653 pci_set_master(pdev);
2654
2655 /*
2656 * Allocate our adapter data structure and attach it to the device.
2657 */
2658 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2659 if (!adapter) {
2660 err = -ENOMEM;
7a0c2029 2661 goto err_release_regions;
be839e39
CL
2662 }
2663 pci_set_drvdata(pdev, adapter);
2664 adapter->pdev = pdev;
2665 adapter->pdev_dev = &pdev->dev;
2666
2667 /*
2668 * Initialize SMP data synchronization resources.
2669 */
2670 spin_lock_init(&adapter->stats_lock);
2671
2672 /*
2673 * Map our I/O registers in BAR0.
2674 */
2675 adapter->regs = pci_ioremap_bar(pdev, 0);
2676 if (!adapter->regs) {
2677 dev_err(&pdev->dev, "cannot map device registers\n");
2678 err = -ENOMEM;
2679 goto err_free_adapter;
2680 }
2681
e0a8b34a
HS
2682 /* Wait for the device to become ready before proceeding ...
2683 */
2684 err = t4vf_prep_adapter(adapter);
2685 if (err) {
2686 dev_err(adapter->pdev_dev, "device didn't become ready:"
2687 " err=%d\n", err);
2688 goto err_unmap_bar0;
2689 }
2690
2691 /* For T5 and later we want to use the new BAR-based User Doorbells,
2692 * so we need to map BAR2 here ...
2693 */
2694 if (!is_t4(adapter->params.chip)) {
2695 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2696 pci_resource_len(pdev, 2));
2697 if (!adapter->bar2) {
2698 dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2699 err = -ENOMEM;
2700 goto err_unmap_bar0;
2701 }
2702 }
be839e39
CL
2703 /*
2704 * Initialize adapter level features.
2705 */
2706 adapter->name = pci_name(pdev);
2707 adapter->msg_enable = dflt_msg_enable;
2708 err = adap_init0(adapter);
2709 if (err)
2710 goto err_unmap_bar;
2711
2712 /*
2713 * Allocate our "adapter ports" and stitch everything together.
2714 */
2715 pmask = adapter->params.vfres.pmask;
2716 for_each_port(adapter, pidx) {
2717 int port_id, viid;
2718
2719 /*
2720 * We simplistically allocate our virtual interfaces
2721 * sequentially across the port numbers to which we have
2722 * access rights. This should be configurable in some manner
2723 * ...
2724 */
2725 if (pmask == 0)
2726 break;
2727 port_id = ffs(pmask) - 1;
2728 pmask &= ~(1 << port_id);
2729 viid = t4vf_alloc_vi(adapter, port_id);
2730 if (viid < 0) {
2731 dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2732 " err=%d\n", port_id, viid);
2733 err = viid;
2734 goto err_free_dev;
2735 }
2736
2737 /*
2738 * Allocate our network device and stitch things together.
2739 */
2740 netdev = alloc_etherdev_mq(sizeof(struct port_info),
2741 MAX_PORT_QSETS);
2742 if (netdev == NULL) {
be839e39
CL
2743 t4vf_free_vi(adapter, viid);
2744 err = -ENOMEM;
2745 goto err_free_dev;
2746 }
2747 adapter->port[pidx] = netdev;
2748 SET_NETDEV_DEV(netdev, &pdev->dev);
2749 pi = netdev_priv(netdev);
2750 pi->adapter = adapter;
2751 pi->pidx = pidx;
2752 pi->port_id = port_id;
2753 pi->viid = viid;
2754
2755 /*
2756 * Initialize the starting state of our "port" and register
2757 * it.
2758 */
2759 pi->xact_addr_filt = -1;
be839e39 2760 netif_carrier_off(netdev);
be839e39
CL
2761 netdev->irq = pdev->irq;
2762
2ed28baa
MM
2763 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2764 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
f646968f 2765 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2ed28baa
MM
2766 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2767 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2768 NETIF_F_HIGHDMA;
f646968f
PM
2769 netdev->features = netdev->hw_features |
2770 NETIF_F_HW_VLAN_CTAG_TX;
be839e39
CL
2771 if (pci_using_dac)
2772 netdev->features |= NETIF_F_HIGHDMA;
be839e39 2773
01789349
JP
2774 netdev->priv_flags |= IFF_UNICAST_FLT;
2775
be839e39 2776 netdev->netdev_ops = &cxgb4vf_netdev_ops;
7ad24ea4 2777 netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
be839e39
CL
2778
2779 /*
2780 * Initialize the hardware/software state for the port.
2781 */
2782 err = t4vf_port_init(adapter, pidx);
2783 if (err) {
2784 dev_err(&pdev->dev, "cannot initialize port %d\n",
2785 pidx);
2786 goto err_free_dev;
2787 }
2788 }
2789
84f67018
HS
2790 /* See what interrupts we'll be using. If we've been configured to
2791 * use MSI-X interrupts, try to enable them but fall back to using
2792 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2793 * get MSI interrupts we bail with the error.
2794 */
2795 if (msi == MSI_MSIX && enable_msix(adapter) == 0)
2796 adapter->flags |= USING_MSIX;
2797 else {
495c22bb
HS
2798 if (msi == MSI_MSIX) {
2799 dev_info(adapter->pdev_dev,
2800 "Unable to use MSI-X Interrupts; falling "
2801 "back to MSI Interrupts\n");
2802
2803 /* We're going to need a Forwarded Interrupt Queue so
2804 * that may cut into how many Queue Sets we can
2805 * support.
2806 */
2807 msi = MSI_MSI;
2808 size_nports_qsets(adapter);
2809 }
84f67018
HS
2810 err = pci_enable_msi(pdev);
2811 if (err) {
495c22bb
HS
2812 dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
2813 " err=%d\n", err);
84f67018
HS
2814 goto err_free_dev;
2815 }
2816 adapter->flags |= USING_MSI;
2817 }
2818
495c22bb
HS
2819 /* Now that we know how many "ports" we have and what interrupt
2820 * mechanism we're going to use, we can configure our queue resources.
2821 */
2822 cfg_queues(adapter);
2823
be839e39
CL
2824 /*
2825 * The "card" is now ready to go. If any errors occur during device
2826 * registration we do not fail the whole "card" but rather proceed
2827 * only with the ports we manage to register successfully. However we
2828 * must register at least one net device.
2829 */
2830 for_each_port(adapter, pidx) {
a8d16d08 2831 struct port_info *pi = netdev_priv(adapter->port[pidx]);
be839e39
CL
2832 netdev = adapter->port[pidx];
2833 if (netdev == NULL)
2834 continue;
2835
a8d16d08
HS
2836 netif_set_real_num_tx_queues(netdev, pi->nqsets);
2837 netif_set_real_num_rx_queues(netdev, pi->nqsets);
2838
be839e39
CL
2839 err = register_netdev(netdev);
2840 if (err) {
2841 dev_warn(&pdev->dev, "cannot register net device %s,"
2842 " skipping\n", netdev->name);
2843 continue;
2844 }
2845
2846 set_bit(pidx, &adapter->registered_device_map);
2847 }
2848 if (adapter->registered_device_map == 0) {
2849 dev_err(&pdev->dev, "could not register any net devices\n");
84f67018 2850 goto err_disable_interrupts;
be839e39
CL
2851 }
2852
2853 /*
2854 * Set up our debugfs entries.
2855 */
843635e0 2856 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
be839e39
CL
2857 adapter->debugfs_root =
2858 debugfs_create_dir(pci_name(pdev),
2859 cxgb4vf_debugfs_root);
843635e0 2860 if (IS_ERR_OR_NULL(adapter->debugfs_root))
be839e39
CL
2861 dev_warn(&pdev->dev, "could not create debugfs"
2862 " directory");
2863 else
2864 setup_debugfs(adapter);
2865 }
2866
be839e39 2867 /*
25985edc 2868 * Print a short notice on the existence and configuration of the new
be839e39
CL
2869 * VF network device ...
2870 */
2871 for_each_port(adapter, pidx) {
2872 dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
2873 adapter->port[pidx]->name,
2874 (adapter->flags & USING_MSIX) ? "MSI-X" :
2875 (adapter->flags & USING_MSI) ? "MSI" : "");
2876 }
2877
2878 /*
2879 * Return success!
2880 */
2881 return 0;
2882
2883 /*
2884 * Error recovery and exit code. Unwind state that's been created
2885 * so far and return the error.
2886 */
84f67018
HS
2887err_disable_interrupts:
2888 if (adapter->flags & USING_MSIX) {
2889 pci_disable_msix(adapter->pdev);
2890 adapter->flags &= ~USING_MSIX;
2891 } else if (adapter->flags & USING_MSI) {
2892 pci_disable_msi(adapter->pdev);
2893 adapter->flags &= ~USING_MSI;
be839e39
CL
2894 }
2895
2896err_free_dev:
2897 for_each_port(adapter, pidx) {
2898 netdev = adapter->port[pidx];
2899 if (netdev == NULL)
2900 continue;
2901 pi = netdev_priv(netdev);
2902 t4vf_free_vi(adapter, pi->viid);
2903 if (test_bit(pidx, &adapter->registered_device_map))
2904 unregister_netdev(netdev);
2905 free_netdev(netdev);
2906 }
2907
2908err_unmap_bar:
e0a8b34a
HS
2909 if (!is_t4(adapter->params.chip))
2910 iounmap(adapter->bar2);
2911
2912err_unmap_bar0:
be839e39
CL
2913 iounmap(adapter->regs);
2914
2915err_free_adapter:
2916 kfree(adapter);
be839e39 2917
be839e39
CL
2918err_release_regions:
2919 pci_release_regions(pdev);
7a0c2029
KV
2920 pci_clear_master(pdev);
2921
2922err_disable_device:
2923 pci_disable_device(pdev);
be839e39 2924
be839e39
CL
2925 return err;
2926}
2927
2928/*
2929 * "Remove" a device: tear down all kernel and driver state created in the
2930 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
2931 * that this is called "remove_one" in the PF Driver.)
2932 */
d289f864 2933static void cxgb4vf_pci_remove(struct pci_dev *pdev)
be839e39
CL
2934{
2935 struct adapter *adapter = pci_get_drvdata(pdev);
2936
2937 /*
2938 * Tear down driver state associated with device.
2939 */
2940 if (adapter) {
2941 int pidx;
2942
2943 /*
2944 * Stop all of our activity. Unregister network port,
2945 * disable interrupts, etc.
2946 */
2947 for_each_port(adapter, pidx)
2948 if (test_bit(pidx, &adapter->registered_device_map))
2949 unregister_netdev(adapter->port[pidx]);
2950 t4vf_sge_stop(adapter);
2951 if (adapter->flags & USING_MSIX) {
2952 pci_disable_msix(adapter->pdev);
2953 adapter->flags &= ~USING_MSIX;
2954 } else if (adapter->flags & USING_MSI) {
2955 pci_disable_msi(adapter->pdev);
2956 adapter->flags &= ~USING_MSI;
2957 }
2958
2959 /*
2960 * Tear down our debugfs entries.
2961 */
843635e0 2962 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
be839e39
CL
2963 cleanup_debugfs(adapter);
2964 debugfs_remove_recursive(adapter->debugfs_root);
2965 }
2966
2967 /*
2968 * Free all of the various resources which we've acquired ...
2969 */
2970 t4vf_free_sge_resources(adapter);
2971 for_each_port(adapter, pidx) {
2972 struct net_device *netdev = adapter->port[pidx];
2973 struct port_info *pi;
2974
2975 if (netdev == NULL)
2976 continue;
2977
2978 pi = netdev_priv(netdev);
2979 t4vf_free_vi(adapter, pi->viid);
2980 free_netdev(netdev);
2981 }
2982 iounmap(adapter->regs);
e0a8b34a
HS
2983 if (!is_t4(adapter->params.chip))
2984 iounmap(adapter->bar2);
be839e39 2985 kfree(adapter);
be839e39
CL
2986 }
2987
2988 /*
2989 * Disable the device and release its PCI resources.
2990 */
2991 pci_disable_device(pdev);
2992 pci_clear_master(pdev);
2993 pci_release_regions(pdev);
2994}
2995
7e9c2629
CL
2996/*
2997 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2998 * delivery.
2999 */
d289f864 3000static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
7e9c2629
CL
3001{
3002 struct adapter *adapter;
3003 int pidx;
3004
3005 adapter = pci_get_drvdata(pdev);
3006 if (!adapter)
3007 return;
3008
c2a19856 3009 /* Disable all Virtual Interfaces. This will shut down the
7e9c2629
CL
3010 * delivery of all ingress packets into the chip for these
3011 * Virtual Interfaces.
3012 */
c2a19856
HS
3013 for_each_port(adapter, pidx)
3014 if (test_bit(pidx, &adapter->registered_device_map))
3015 unregister_netdev(adapter->port[pidx]);
7e9c2629 3016
c2a19856
HS
3017 /* Free up all Queues which will prevent further DMA and
3018 * Interrupts allowing various internal pathways to drain.
3019 */
3020 t4vf_sge_stop(adapter);
3021 if (adapter->flags & USING_MSIX) {
3022 pci_disable_msix(adapter->pdev);
3023 adapter->flags &= ~USING_MSIX;
3024 } else if (adapter->flags & USING_MSI) {
3025 pci_disable_msi(adapter->pdev);
3026 adapter->flags &= ~USING_MSI;
7e9c2629
CL
3027 }
3028
3029 /*
3030 * Free up all Queues which will prevent further DMA and
3031 * Interrupts allowing various internal pathways to drain.
3032 */
3033 t4vf_free_sge_resources(adapter);
c2a19856 3034 pci_set_drvdata(pdev, NULL);
7e9c2629
CL
3035}
3036
3fedeab1
HS
3037/* Macros needed to support the PCI Device ID Table ...
3038 */
3039#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
768ffc66 3040 static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3fedeab1
HS
3041#define CH_PCI_DEVICE_ID_FUNCTION 0x8
3042
3043#define CH_PCI_ID_TABLE_ENTRY(devid) \
3044 { PCI_VDEVICE(CHELSIO, (devid)), 0 }
3045
3046#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3047
3048#include "../cxgb4/t4_pci_id_tbl.h"
be839e39
CL
3049
3050MODULE_DESCRIPTION(DRV_DESC);
3051MODULE_AUTHOR("Chelsio Communications");
3052MODULE_LICENSE("Dual BSD/GPL");
3053MODULE_VERSION(DRV_VERSION);
3054MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3055
3056static struct pci_driver cxgb4vf_driver = {
3057 .name = KBUILD_MODNAME,
3058 .id_table = cxgb4vf_pci_tbl,
3059 .probe = cxgb4vf_pci_probe,
d289f864
BP
3060 .remove = cxgb4vf_pci_remove,
3061 .shutdown = cxgb4vf_pci_shutdown,
be839e39
CL
3062};
3063
3064/*
3065 * Initialize global driver state.
3066 */
3067static int __init cxgb4vf_module_init(void)
3068{
3069 int ret;
3070
bb14a1af
CL
3071 /*
3072 * Vet our module parameters.
3073 */
3074 if (msi != MSI_MSIX && msi != MSI_MSI) {
428ac43f
JP
3075 pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3076 msi, MSI_MSIX, MSI_MSI);
bb14a1af
CL
3077 return -EINVAL;
3078 }
3079
be839e39
CL
3080 /* Debugfs support is optional, just warn if this fails */
3081 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
843635e0 3082 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
428ac43f 3083 pr_warn("could not create debugfs entry, continuing\n");
be839e39
CL
3084
3085 ret = pci_register_driver(&cxgb4vf_driver);
843635e0 3086 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
be839e39
CL
3087 debugfs_remove(cxgb4vf_debugfs_root);
3088 return ret;
3089}
3090
3091/*
3092 * Tear down global driver state.
3093 */
3094static void __exit cxgb4vf_module_exit(void)
3095{
3096 pci_unregister_driver(&cxgb4vf_driver);
3097 debugfs_remove(cxgb4vf_debugfs_root);
3098}
3099
3100module_init(cxgb4vf_module_init);
3101module_exit(cxgb4vf_module_exit);