2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <linux/uaccess.h>
67 #include <linux/crash_dump.h>
70 #include "cxgb4_filter.h"
72 #include "t4_values.h"
75 #include "t4fw_version.h"
76 #include "cxgb4_dcb.h"
77 #include "cxgb4_debugfs.h"
81 #include "cxgb4_tc_u32.h"
83 char cxgb4_driver_name
[] = KBUILD_MODNAME
;
88 #define DRV_VERSION "2.0.0-ko"
89 const char cxgb4_driver_version
[] = DRV_VERSION
;
90 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
92 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
93 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
94 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
96 /* Macros needed to support the PCI Device ID Table ...
98 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
99 static const struct pci_device_id cxgb4_pci_tbl[] = {
100 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
102 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
105 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
107 #define CH_PCI_ID_TABLE_ENTRY(devid) \
108 {PCI_VDEVICE(CHELSIO, (devid)), 4}
110 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
114 #include "t4_pci_id_tbl.h"
116 #define FW4_FNAME "cxgb4/t4fw.bin"
117 #define FW5_FNAME "cxgb4/t5fw.bin"
118 #define FW6_FNAME "cxgb4/t6fw.bin"
119 #define FW4_CFNAME "cxgb4/t4-config.txt"
120 #define FW5_CFNAME "cxgb4/t5-config.txt"
121 #define FW6_CFNAME "cxgb4/t6-config.txt"
122 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
123 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
124 #define PHY_AQ1202_DEVICEID 0x4409
125 #define PHY_BCM84834_DEVICEID 0x4486
127 MODULE_DESCRIPTION(DRV_DESC
);
128 MODULE_AUTHOR("Chelsio Communications");
129 MODULE_LICENSE("Dual BSD/GPL");
130 MODULE_VERSION(DRV_VERSION
);
131 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
132 MODULE_FIRMWARE(FW4_FNAME
);
133 MODULE_FIRMWARE(FW5_FNAME
);
134 MODULE_FIRMWARE(FW6_FNAME
);
137 * The driver uses the best interrupt scheme available on a platform in the
138 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
139 * of these schemes the driver may consider as follows:
141 * msi = 2: choose from among all three options
142 * msi = 1: only consider MSI and INTx interrupts
143 * msi = 0: force INTx interrupts
147 module_param(msi
, int, 0644);
148 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
151 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
152 * offset by 2 bytes in order to have the IP headers line up on 4-byte
153 * boundaries. This is a requirement for many architectures which will throw
154 * a machine check fault if an attempt is made to access one of the 4-byte IP
155 * header fields on a non-4-byte boundary. And it's a major performance issue
156 * even on some architectures which allow it like some implementations of the
157 * x86 ISA. However, some architectures don't mind this and for some very
158 * edge-case performance sensitive applications (like forwarding large volumes
159 * of small packets), setting this DMA offset to 0 will decrease the number of
160 * PCI-E Bus transfers enough to measurably affect performance.
162 static int rx_dma_offset
= 2;
164 /* TX Queue select used to determine what algorithm to use for selecting TX
165 * queue. Select between the kernel provided function (select_queue=0) or user
166 * cxgb_select_queue function (select_queue=1)
168 * Default: select_queue=0
170 static int select_queue
;
171 module_param(select_queue
, int, 0644);
172 MODULE_PARM_DESC(select_queue
,
173 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
175 static struct dentry
*cxgb4_debugfs_root
;
177 LIST_HEAD(adapter_list
);
178 DEFINE_MUTEX(uld_mutex
);
180 static void link_report(struct net_device
*dev
)
182 if (!netif_carrier_ok(dev
))
183 netdev_info(dev
, "link down\n");
185 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
188 const struct port_info
*p
= netdev_priv(dev
);
190 switch (p
->link_cfg
.speed
) {
210 pr_info("%s: unsupported speed: %d\n",
211 dev
->name
, p
->link_cfg
.speed
);
215 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
220 #ifdef CONFIG_CHELSIO_T4_DCB
221 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
222 static void dcb_tx_queue_prio_enable(struct net_device
*dev
, int enable
)
224 struct port_info
*pi
= netdev_priv(dev
);
225 struct adapter
*adap
= pi
->adapter
;
226 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
229 /* We use a simple mapping of Port TX Queue Index to DCB
230 * Priority when we're enabling DCB.
232 for (i
= 0; i
< pi
->nqsets
; i
++, txq
++) {
236 name
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
238 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH
) |
239 FW_PARAMS_PARAM_YZ_V(txq
->q
.cntxt_id
));
240 value
= enable
? i
: 0xffffffff;
242 /* Since we can be called while atomic (from "interrupt
243 * level") we need to issue the Set Parameters Commannd
244 * without sleeping (timeout < 0).
246 err
= t4_set_params_timeout(adap
, adap
->mbox
, adap
->pf
, 0, 1,
248 -FW_CMD_MAX_TIMEOUT
);
251 dev_err(adap
->pdev_dev
,
252 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
253 enable
? "set" : "unset", pi
->port_id
, i
, -err
);
255 txq
->dcb_prio
= value
;
259 static int cxgb4_dcb_enabled(const struct net_device
*dev
)
261 struct port_info
*pi
= netdev_priv(dev
);
263 if (!pi
->dcb
.enabled
)
266 return ((pi
->dcb
.state
== CXGB4_DCB_STATE_FW_ALLSYNCED
) ||
267 (pi
->dcb
.state
== CXGB4_DCB_STATE_HOST
));
269 #endif /* CONFIG_CHELSIO_T4_DCB */
271 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
273 struct net_device
*dev
= adapter
->port
[port_id
];
275 /* Skip changes from disabled ports. */
276 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
278 netif_carrier_on(dev
);
280 #ifdef CONFIG_CHELSIO_T4_DCB
281 if (cxgb4_dcb_enabled(dev
)) {
282 cxgb4_dcb_state_init(dev
);
283 dcb_tx_queue_prio_enable(dev
, false);
285 #endif /* CONFIG_CHELSIO_T4_DCB */
286 netif_carrier_off(dev
);
293 void t4_os_portmod_changed(const struct adapter
*adap
, int port_id
)
295 static const char *mod_str
[] = {
296 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
299 const struct net_device
*dev
= adap
->port
[port_id
];
300 const struct port_info
*pi
= netdev_priv(dev
);
302 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
303 netdev_info(dev
, "port module unplugged\n");
304 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
305 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
306 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_NOTSUPPORTED
)
307 netdev_info(dev
, "%s: unsupported port module inserted\n",
309 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_UNKNOWN
)
310 netdev_info(dev
, "%s: unknown port module inserted\n",
312 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_ERROR
)
313 netdev_info(dev
, "%s: transceiver module error\n", dev
->name
);
315 netdev_info(dev
, "%s: unknown module type %d inserted\n",
316 dev
->name
, pi
->mod_type
);
319 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
320 module_param(dbfifo_int_thresh
, int, 0644);
321 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
324 * usecs to sleep while draining the dbfifo
326 static int dbfifo_drain_delay
= 1000;
327 module_param(dbfifo_drain_delay
, int, 0644);
328 MODULE_PARM_DESC(dbfifo_drain_delay
,
329 "usecs to sleep while draining the dbfifo");
331 static inline int cxgb4_set_addr_hash(struct port_info
*pi
)
333 struct adapter
*adap
= pi
->adapter
;
336 struct hash_mac_addr
*entry
;
338 /* Calculate the hash vector for the updated list and program it */
339 list_for_each_entry(entry
, &adap
->mac_hlist
, list
) {
340 ucast
|= is_unicast_ether_addr(entry
->addr
);
341 vec
|= (1ULL << hash_mac_addr(entry
->addr
));
343 return t4_set_addr_hash(adap
, adap
->mbox
, pi
->viid
, ucast
,
347 static int cxgb4_mac_sync(struct net_device
*netdev
, const u8
*mac_addr
)
349 struct port_info
*pi
= netdev_priv(netdev
);
350 struct adapter
*adap
= pi
->adapter
;
355 bool ucast
= is_unicast_ether_addr(mac_addr
);
356 const u8
*maclist
[1] = {mac_addr
};
357 struct hash_mac_addr
*new_entry
;
359 ret
= t4_alloc_mac_filt(adap
, adap
->mbox
, pi
->viid
, free
, 1, maclist
,
360 NULL
, ucast
? &uhash
: &mhash
, false);
363 /* if hash != 0, then add the addr to hash addr list
364 * so on the end we will calculate the hash for the
365 * list and program it
367 if (uhash
|| mhash
) {
368 new_entry
= kzalloc(sizeof(*new_entry
), GFP_ATOMIC
);
371 ether_addr_copy(new_entry
->addr
, mac_addr
);
372 list_add_tail(&new_entry
->list
, &adap
->mac_hlist
);
373 ret
= cxgb4_set_addr_hash(pi
);
376 return ret
< 0 ? ret
: 0;
379 static int cxgb4_mac_unsync(struct net_device
*netdev
, const u8
*mac_addr
)
381 struct port_info
*pi
= netdev_priv(netdev
);
382 struct adapter
*adap
= pi
->adapter
;
384 const u8
*maclist
[1] = {mac_addr
};
385 struct hash_mac_addr
*entry
, *tmp
;
387 /* If the MAC address to be removed is in the hash addr
388 * list, delete it from the list and update hash vector
390 list_for_each_entry_safe(entry
, tmp
, &adap
->mac_hlist
, list
) {
391 if (ether_addr_equal(entry
->addr
, mac_addr
)) {
392 list_del(&entry
->list
);
394 return cxgb4_set_addr_hash(pi
);
398 ret
= t4_free_mac_filt(adap
, adap
->mbox
, pi
->viid
, 1, maclist
, false);
399 return ret
< 0 ? -EINVAL
: 0;
403 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
404 * If @mtu is -1 it is left unchanged.
406 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
408 struct port_info
*pi
= netdev_priv(dev
);
409 struct adapter
*adapter
= pi
->adapter
;
411 __dev_uc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
412 __dev_mc_sync(dev
, cxgb4_mac_sync
, cxgb4_mac_unsync
);
414 return t4_set_rxmode(adapter
, adapter
->mbox
, pi
->viid
, mtu
,
415 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
416 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
421 * link_start - enable a port
422 * @dev: the port to enable
424 * Performs the MAC and PHY actions needed to enable a port.
426 static int link_start(struct net_device
*dev
)
429 struct port_info
*pi
= netdev_priv(dev
);
430 unsigned int mb
= pi
->adapter
->pf
;
433 * We do not set address filters and promiscuity here, the stack does
434 * that step explicitly.
436 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
437 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
439 ret
= t4_change_mac(pi
->adapter
, mb
, pi
->viid
,
440 pi
->xact_addr_filt
, dev
->dev_addr
, true,
443 pi
->xact_addr_filt
= ret
;
448 ret
= t4_link_l1cfg(pi
->adapter
, mb
, pi
->tx_chan
,
452 ret
= t4_enable_vi_params(pi
->adapter
, mb
, pi
->viid
, true,
453 true, CXGB4_DCB_ENABLED
);
460 #ifdef CONFIG_CHELSIO_T4_DCB
461 /* Handle a Data Center Bridging update message from the firmware. */
462 static void dcb_rpl(struct adapter
*adap
, const struct fw_port_cmd
*pcmd
)
464 int port
= FW_PORT_CMD_PORTID_G(ntohl(pcmd
->op_to_portid
));
465 struct net_device
*dev
= adap
->port
[adap
->chan_map
[port
]];
466 int old_dcb_enabled
= cxgb4_dcb_enabled(dev
);
469 cxgb4_dcb_handle_fw_update(adap
, pcmd
);
470 new_dcb_enabled
= cxgb4_dcb_enabled(dev
);
472 /* If the DCB has become enabled or disabled on the port then we're
473 * going to need to set up/tear down DCB Priority parameters for the
474 * TX Queues associated with the port.
476 if (new_dcb_enabled
!= old_dcb_enabled
)
477 dcb_tx_queue_prio_enable(dev
, new_dcb_enabled
);
479 #endif /* CONFIG_CHELSIO_T4_DCB */
481 /* Response queue handler for the FW event queue.
483 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
484 const struct pkt_gl
*gl
)
486 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
488 rsp
++; /* skip RSS header */
490 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
492 if (unlikely(opcode
== CPL_FW4_MSG
&&
493 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
495 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
497 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
498 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
504 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
505 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
506 unsigned int qid
= EGR_QID_G(ntohl(p
->opcode_qid
));
509 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
511 if (txq
->q_type
== CXGB4_TXQ_ETH
) {
512 struct sge_eth_txq
*eq
;
514 eq
= container_of(txq
, struct sge_eth_txq
, q
);
515 netif_tx_wake_queue(eq
->txq
);
517 struct sge_uld_txq
*oq
;
519 oq
= container_of(txq
, struct sge_uld_txq
, q
);
520 tasklet_schedule(&oq
->qresume_tsk
);
522 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
523 const struct cpl_fw6_msg
*p
= (void *)rsp
;
525 #ifdef CONFIG_CHELSIO_T4_DCB
526 const struct fw_port_cmd
*pcmd
= (const void *)p
->data
;
527 unsigned int cmd
= FW_CMD_OP_G(ntohl(pcmd
->op_to_portid
));
528 unsigned int action
=
529 FW_PORT_CMD_ACTION_G(ntohl(pcmd
->action_to_len16
));
531 if (cmd
== FW_PORT_CMD
&&
532 action
== FW_PORT_ACTION_GET_PORT_INFO
) {
533 int port
= FW_PORT_CMD_PORTID_G(
534 be32_to_cpu(pcmd
->op_to_portid
));
535 struct net_device
*dev
=
536 q
->adap
->port
[q
->adap
->chan_map
[port
]];
537 int state_input
= ((pcmd
->u
.info
.dcbxdis_pkd
&
538 FW_PORT_CMD_DCBXDIS_F
)
539 ? CXGB4_DCB_INPUT_FW_DISABLED
540 : CXGB4_DCB_INPUT_FW_ENABLED
);
542 cxgb4_dcb_state_fsm(dev
, state_input
);
545 if (cmd
== FW_PORT_CMD
&&
546 action
== FW_PORT_ACTION_L2_DCB_CFG
)
547 dcb_rpl(q
->adap
, pcmd
);
551 t4_handle_fw_rpl(q
->adap
, p
->data
);
552 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
553 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
555 do_l2t_write_rpl(q
->adap
, p
);
556 } else if (opcode
== CPL_SET_TCB_RPL
) {
557 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
559 filter_rpl(q
->adap
, p
);
561 dev_err(q
->adap
->pdev_dev
,
562 "unexpected CPL %#x on FW event queue\n", opcode
);
567 static void disable_msi(struct adapter
*adapter
)
569 if (adapter
->flags
& USING_MSIX
) {
570 pci_disable_msix(adapter
->pdev
);
571 adapter
->flags
&= ~USING_MSIX
;
572 } else if (adapter
->flags
& USING_MSI
) {
573 pci_disable_msi(adapter
->pdev
);
574 adapter
->flags
&= ~USING_MSI
;
579 * Interrupt handler for non-data events used with MSI-X.
581 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
583 struct adapter
*adap
= cookie
;
584 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
));
588 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
), v
);
590 if (adap
->flags
& MASTER_PF
)
591 t4_slow_intr_handler(adap
);
596 * Name the MSI-X interrupts.
598 static void name_msix_vecs(struct adapter
*adap
)
600 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
602 /* non-data interrupts */
603 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
606 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
607 adap
->port
[0]->name
);
609 /* Ethernet queues */
610 for_each_port(adap
, j
) {
611 struct net_device
*d
= adap
->port
[j
];
612 const struct port_info
*pi
= netdev_priv(d
);
614 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
615 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
620 static int request_msix_queue_irqs(struct adapter
*adap
)
622 struct sge
*s
= &adap
->sge
;
626 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
627 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
631 for_each_ethrxq(s
, ethqidx
) {
632 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
634 adap
->msix_info
[msi_index
].desc
,
635 &s
->ethrxq
[ethqidx
].rspq
);
643 while (--ethqidx
>= 0)
644 free_irq(adap
->msix_info
[--msi_index
].vec
,
645 &s
->ethrxq
[ethqidx
].rspq
);
646 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
650 static void free_msix_queue_irqs(struct adapter
*adap
)
652 int i
, msi_index
= 2;
653 struct sge
*s
= &adap
->sge
;
655 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
656 for_each_ethrxq(s
, i
)
657 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ethrxq
[i
].rspq
);
661 * cxgb4_write_rss - write the RSS table for a given port
663 * @queues: array of queue indices for RSS
665 * Sets up the portion of the HW RSS table for the port's VI to distribute
666 * packets to the Rx queues in @queues.
667 * Should never be called before setting up sge eth rx queues
669 int cxgb4_write_rss(const struct port_info
*pi
, const u16
*queues
)
673 struct adapter
*adapter
= pi
->adapter
;
674 const struct sge_eth_rxq
*rxq
;
676 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
677 rss
= kmalloc(pi
->rss_size
* sizeof(u16
), GFP_KERNEL
);
681 /* map the queue indices to queue ids */
682 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
683 rss
[i
] = rxq
[*queues
].rspq
.abs_id
;
685 err
= t4_config_rss_range(adapter
, adapter
->pf
, pi
->viid
, 0,
686 pi
->rss_size
, rss
, pi
->rss_size
);
687 /* If Tunnel All Lookup isn't specified in the global RSS
688 * Configuration, then we need to specify a default Ingress
689 * Queue for any ingress packets which aren't hashed. We'll
690 * use our first ingress queue ...
693 err
= t4_config_vi_rss(adapter
, adapter
->mbox
, pi
->viid
,
694 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
|
695 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
|
696 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
|
697 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
|
698 FW_RSS_VI_CONFIG_CMD_UDPEN_F
,
705 * setup_rss - configure RSS
708 * Sets up RSS for each port.
710 static int setup_rss(struct adapter
*adap
)
714 for_each_port(adap
, i
) {
715 const struct port_info
*pi
= adap2pinfo(adap
, i
);
717 /* Fill default values with equal distribution */
718 for (j
= 0; j
< pi
->rss_size
; j
++)
719 pi
->rss
[j
] = j
% pi
->nqsets
;
721 err
= cxgb4_write_rss(pi
, pi
->rss
);
729 * Return the channel of the ingress queue with the given qid.
731 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
733 qid
-= p
->ingr_start
;
734 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
738 * Wait until all NAPI handlers are descheduled.
740 static void quiesce_rx(struct adapter
*adap
)
744 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
745 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
748 napi_disable(&q
->napi
);
752 /* Disable interrupt and napi handler */
753 static void disable_interrupts(struct adapter
*adap
)
755 if (adap
->flags
& FULL_INIT_DONE
) {
756 t4_intr_disable(adap
);
757 if (adap
->flags
& USING_MSIX
) {
758 free_msix_queue_irqs(adap
);
759 free_irq(adap
->msix_info
[0].vec
, adap
);
761 free_irq(adap
->pdev
->irq
, adap
);
768 * Enable NAPI scheduling and interrupt generation for all Rx queues.
770 static void enable_rx(struct adapter
*adap
)
774 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
775 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
780 napi_enable(&q
->napi
);
782 /* 0-increment GTS to start the timer and enable interrupts */
783 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
784 SEINTARM_V(q
->intr_params
) |
785 INGRESSQID_V(q
->cntxt_id
));
790 static int setup_fw_sge_queues(struct adapter
*adap
)
792 struct sge
*s
= &adap
->sge
;
795 bitmap_zero(s
->starving_fl
, s
->egr_sz
);
796 bitmap_zero(s
->txq_maperr
, s
->egr_sz
);
798 if (adap
->flags
& USING_MSIX
)
799 adap
->msi_idx
= 1; /* vector 0 is for non-queue interrupts */
801 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
802 NULL
, NULL
, NULL
, -1);
805 adap
->msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
808 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
809 adap
->msi_idx
, NULL
, fwevtq_handler
, NULL
, -1);
811 t4_free_sge_resources(adap
);
816 * setup_sge_queues - configure SGE Tx/Rx/response queues
819 * Determines how many sets of SGE queues to use and initializes them.
820 * We support multiple queue sets per port if we have MSI-X, otherwise
821 * just one queue set per port.
823 static int setup_sge_queues(struct adapter
*adap
)
826 struct sge
*s
= &adap
->sge
;
827 struct sge_uld_rxq_info
*rxq_info
= s
->uld_rxq_info
[CXGB4_ULD_RDMA
];
828 unsigned int cmplqid
= 0;
830 for_each_port(adap
, i
) {
831 struct net_device
*dev
= adap
->port
[i
];
832 struct port_info
*pi
= netdev_priv(dev
);
833 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
834 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
836 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
837 if (adap
->msi_idx
> 0)
839 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
840 adap
->msi_idx
, &q
->fl
,
843 t4_get_mps_bg_map(adap
,
848 memset(&q
->stats
, 0, sizeof(q
->stats
));
850 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
851 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
852 netdev_get_tx_queue(dev
, j
),
853 s
->fw_evtq
.cntxt_id
);
859 for_each_port(adap
, i
) {
860 /* Note that cmplqid below is 0 if we don't
861 * have RDMA queues, and that's the right value.
864 cmplqid
= rxq_info
->uldrxq
[i
].rspq
.cntxt_id
;
866 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
867 s
->fw_evtq
.cntxt_id
, cmplqid
);
872 t4_write_reg(adap
, is_t4(adap
->params
.chip
) ?
873 MPS_TRC_RSS_CONTROL_A
:
874 MPS_T5_TRC_RSS_CONTROL_A
,
875 RSSCONTROL_V(netdev2pinfo(adap
->port
[0])->tx_chan
) |
876 QUEUENUMBER_V(s
->ethrxq
[0].rspq
.abs_id
));
879 t4_free_sge_resources(adap
);
883 static u16
cxgb_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
884 void *accel_priv
, select_queue_fallback_t fallback
)
888 #ifdef CONFIG_CHELSIO_T4_DCB
889 /* If a Data Center Bridging has been successfully negotiated on this
890 * link then we'll use the skb's priority to map it to a TX Queue.
891 * The skb's priority is determined via the VLAN Tag Priority Code
894 if (cxgb4_dcb_enabled(dev
)) {
898 err
= vlan_get_tag(skb
, &vlan_tci
);
902 "TX Packet without VLAN Tag on DCB Link\n");
905 txq
= (vlan_tci
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
906 #ifdef CONFIG_CHELSIO_T4_FCOE
907 if (skb
->protocol
== htons(ETH_P_FCOE
))
908 txq
= skb
->priority
& 0x7;
909 #endif /* CONFIG_CHELSIO_T4_FCOE */
913 #endif /* CONFIG_CHELSIO_T4_DCB */
916 txq
= (skb_rx_queue_recorded(skb
)
917 ? skb_get_rx_queue(skb
)
918 : smp_processor_id());
920 while (unlikely(txq
>= dev
->real_num_tx_queues
))
921 txq
-= dev
->real_num_tx_queues
;
926 return fallback(dev
, skb
) % dev
->real_num_tx_queues
;
929 static int closest_timer(const struct sge
*s
, int time
)
931 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
933 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
934 delta
= time
- s
->timer_val
[i
];
937 if (delta
< min_delta
) {
945 static int closest_thres(const struct sge
*s
, int thres
)
947 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
949 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
950 delta
= thres
- s
->counter_val
[i
];
953 if (delta
< min_delta
) {
962 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
964 * @us: the hold-off time in us, or 0 to disable timer
965 * @cnt: the hold-off packet count, or 0 to disable counter
967 * Sets an Rx queue's interrupt hold-off time and packet count. At least
968 * one of the two needs to be enabled for the queue to generate interrupts.
970 int cxgb4_set_rspq_intr_params(struct sge_rspq
*q
,
971 unsigned int us
, unsigned int cnt
)
973 struct adapter
*adap
= q
->adap
;
982 new_idx
= closest_thres(&adap
->sge
, cnt
);
983 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
984 /* the queue has already been created, update it */
985 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
987 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
988 FW_PARAMS_PARAM_YZ_V(q
->cntxt_id
);
989 err
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
994 q
->pktcnt_idx
= new_idx
;
997 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
998 q
->intr_params
= QINTR_TIMER_IDX_V(us
) | QINTR_CNT_EN_V(cnt
> 0);
1002 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
1004 const struct port_info
*pi
= netdev_priv(dev
);
1005 netdev_features_t changed
= dev
->features
^ features
;
1008 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
1011 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, -1,
1013 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
1015 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
1019 static int setup_debugfs(struct adapter
*adap
)
1021 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
1024 #ifdef CONFIG_DEBUG_FS
1025 t4_setup_debugfs(adap
);
1031 * upper-layer driver support
1035 * Allocate an active-open TID and set it to the supplied value.
1037 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
1041 spin_lock_bh(&t
->atid_lock
);
1043 union aopen_entry
*p
= t
->afree
;
1045 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
1050 spin_unlock_bh(&t
->atid_lock
);
1053 EXPORT_SYMBOL(cxgb4_alloc_atid
);
1056 * Release an active-open TID.
1058 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
1060 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
1062 spin_lock_bh(&t
->atid_lock
);
1066 spin_unlock_bh(&t
->atid_lock
);
1068 EXPORT_SYMBOL(cxgb4_free_atid
);
1071 * Allocate a server TID and set it to the supplied value.
1073 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
1077 spin_lock_bh(&t
->stid_lock
);
1078 if (family
== PF_INET
) {
1079 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
1080 if (stid
< t
->nstids
)
1081 __set_bit(stid
, t
->stid_bmap
);
1085 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 1);
1090 t
->stid_tab
[stid
].data
= data
;
1091 stid
+= t
->stid_base
;
1092 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1093 * This is equivalent to 4 TIDs. With CLIP enabled it
1096 if (family
== PF_INET
)
1099 t
->stids_in_use
+= 2;
1101 spin_unlock_bh(&t
->stid_lock
);
1104 EXPORT_SYMBOL(cxgb4_alloc_stid
);
1106 /* Allocate a server filter TID and set it to the supplied value.
1108 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
1112 spin_lock_bh(&t
->stid_lock
);
1113 if (family
== PF_INET
) {
1114 stid
= find_next_zero_bit(t
->stid_bmap
,
1115 t
->nstids
+ t
->nsftids
, t
->nstids
);
1116 if (stid
< (t
->nstids
+ t
->nsftids
))
1117 __set_bit(stid
, t
->stid_bmap
);
1124 t
->stid_tab
[stid
].data
= data
;
1126 stid
+= t
->sftid_base
;
1129 spin_unlock_bh(&t
->stid_lock
);
1132 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
1134 /* Release a server TID.
1136 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
1138 /* Is it a server filter TID? */
1139 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
1140 stid
-= t
->sftid_base
;
1143 stid
-= t
->stid_base
;
1146 spin_lock_bh(&t
->stid_lock
);
1147 if (family
== PF_INET
)
1148 __clear_bit(stid
, t
->stid_bmap
);
1150 bitmap_release_region(t
->stid_bmap
, stid
, 1);
1151 t
->stid_tab
[stid
].data
= NULL
;
1152 if (stid
< t
->nstids
) {
1153 if (family
== PF_INET
)
1156 t
->stids_in_use
-= 2;
1160 spin_unlock_bh(&t
->stid_lock
);
1162 EXPORT_SYMBOL(cxgb4_free_stid
);
1165 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1167 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
1170 struct cpl_tid_release
*req
;
1172 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1173 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
1174 INIT_TP_WR(req
, tid
);
1175 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
1179 * Queue a TID release request and if necessary schedule a work queue to
1182 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
1185 void **p
= &t
->tid_tab
[tid
];
1186 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1188 spin_lock_bh(&adap
->tid_release_lock
);
1189 *p
= adap
->tid_release_head
;
1190 /* Low 2 bits encode the Tx channel number */
1191 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
1192 if (!adap
->tid_release_task_busy
) {
1193 adap
->tid_release_task_busy
= true;
1194 queue_work(adap
->workq
, &adap
->tid_release_task
);
1196 spin_unlock_bh(&adap
->tid_release_lock
);
1200 * Process the list of pending TID release requests.
1202 static void process_tid_release_list(struct work_struct
*work
)
1204 struct sk_buff
*skb
;
1205 struct adapter
*adap
;
1207 adap
= container_of(work
, struct adapter
, tid_release_task
);
1209 spin_lock_bh(&adap
->tid_release_lock
);
1210 while (adap
->tid_release_head
) {
1211 void **p
= adap
->tid_release_head
;
1212 unsigned int chan
= (uintptr_t)p
& 3;
1213 p
= (void *)p
- chan
;
1215 adap
->tid_release_head
= *p
;
1217 spin_unlock_bh(&adap
->tid_release_lock
);
1219 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
1221 schedule_timeout_uninterruptible(1);
1223 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
1224 t4_ofld_send(adap
, skb
);
1225 spin_lock_bh(&adap
->tid_release_lock
);
1227 adap
->tid_release_task_busy
= false;
1228 spin_unlock_bh(&adap
->tid_release_lock
);
1232 * Release a TID and inform HW. If we are unable to allocate the release
1233 * message we defer to a work queue.
1235 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
)
1237 struct sk_buff
*skb
;
1238 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1240 WARN_ON(tid
>= t
->ntids
);
1242 if (t
->tid_tab
[tid
]) {
1243 t
->tid_tab
[tid
] = NULL
;
1244 if (t
->hash_base
&& (tid
>= t
->hash_base
))
1245 atomic_dec(&t
->hash_tids_in_use
);
1247 atomic_dec(&t
->tids_in_use
);
1250 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
1252 mk_tid_release(skb
, chan
, tid
);
1253 t4_ofld_send(adap
, skb
);
1255 cxgb4_queue_tid_release(t
, chan
, tid
);
1257 EXPORT_SYMBOL(cxgb4_remove_tid
);
1260 * Allocate and initialize the TID tables. Returns 0 on success.
1262 static int tid_init(struct tid_info
*t
)
1264 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1265 unsigned int max_ftids
= t
->nftids
+ t
->nsftids
;
1266 unsigned int natids
= t
->natids
;
1267 unsigned int stid_bmap_size
;
1268 unsigned int ftid_bmap_size
;
1271 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
1272 ftid_bmap_size
= BITS_TO_LONGS(t
->nftids
);
1273 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
1274 natids
* sizeof(*t
->atid_tab
) +
1275 t
->nstids
* sizeof(*t
->stid_tab
) +
1276 t
->nsftids
* sizeof(*t
->stid_tab
) +
1277 stid_bmap_size
* sizeof(long) +
1278 max_ftids
* sizeof(*t
->ftid_tab
) +
1279 ftid_bmap_size
* sizeof(long);
1281 t
->tid_tab
= kvzalloc(size
, GFP_KERNEL
);
1285 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
1286 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
1287 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
1288 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
1289 t
->ftid_bmap
= (unsigned long *)&t
->ftid_tab
[max_ftids
];
1290 spin_lock_init(&t
->stid_lock
);
1291 spin_lock_init(&t
->atid_lock
);
1292 spin_lock_init(&t
->ftid_lock
);
1294 t
->stids_in_use
= 0;
1295 t
->sftids_in_use
= 0;
1297 t
->atids_in_use
= 0;
1298 atomic_set(&t
->tids_in_use
, 0);
1299 atomic_set(&t
->hash_tids_in_use
, 0);
1301 /* Setup the free list for atid_tab and clear the stid bitmap. */
1304 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1305 t
->afree
= t
->atid_tab
;
1308 if (is_offload(adap
)) {
1309 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
1310 /* Reserve stid 0 for T4/T5 adapters */
1311 if (!t
->stid_base
&&
1312 CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
1313 __set_bit(0, t
->stid_bmap
);
1316 bitmap_zero(t
->ftid_bmap
, t
->nftids
);
1321 * cxgb4_create_server - create an IP server
1323 * @stid: the server TID
1324 * @sip: local IP address to bind server to
1325 * @sport: the server's TCP port
1326 * @queue: queue to direct messages from this server to
1328 * Create an IP server for the given port and address.
1329 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1331 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
1332 __be32 sip
, __be16 sport
, __be16 vlan
,
1336 struct sk_buff
*skb
;
1337 struct adapter
*adap
;
1338 struct cpl_pass_open_req
*req
;
1341 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1345 adap
= netdev2adap(dev
);
1346 req
= (struct cpl_pass_open_req
*)__skb_put(skb
, sizeof(*req
));
1348 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
1349 req
->local_port
= sport
;
1350 req
->peer_port
= htons(0);
1351 req
->local_ip
= sip
;
1352 req
->peer_ip
= htonl(0);
1353 chan
= rxq_to_chan(&adap
->sge
, queue
);
1354 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1355 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1356 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1357 ret
= t4_mgmt_tx(adap
, skb
);
1358 return net_xmit_eval(ret
);
1360 EXPORT_SYMBOL(cxgb4_create_server
);
1362 /* cxgb4_create_server6 - create an IPv6 server
1364 * @stid: the server TID
1365 * @sip: local IPv6 address to bind server to
1366 * @sport: the server's TCP port
1367 * @queue: queue to direct messages from this server to
1369 * Create an IPv6 server for the given port and address.
1370 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1372 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
1373 const struct in6_addr
*sip
, __be16 sport
,
1377 struct sk_buff
*skb
;
1378 struct adapter
*adap
;
1379 struct cpl_pass_open_req6
*req
;
1382 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1386 adap
= netdev2adap(dev
);
1387 req
= (struct cpl_pass_open_req6
*)__skb_put(skb
, sizeof(*req
));
1389 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
1390 req
->local_port
= sport
;
1391 req
->peer_port
= htons(0);
1392 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
1393 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
1394 req
->peer_ip_hi
= cpu_to_be64(0);
1395 req
->peer_ip_lo
= cpu_to_be64(0);
1396 chan
= rxq_to_chan(&adap
->sge
, queue
);
1397 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1398 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1399 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1400 ret
= t4_mgmt_tx(adap
, skb
);
1401 return net_xmit_eval(ret
);
1403 EXPORT_SYMBOL(cxgb4_create_server6
);
1405 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
1406 unsigned int queue
, bool ipv6
)
1408 struct sk_buff
*skb
;
1409 struct adapter
*adap
;
1410 struct cpl_close_listsvr_req
*req
;
1413 adap
= netdev2adap(dev
);
1415 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1419 req
= (struct cpl_close_listsvr_req
*)__skb_put(skb
, sizeof(*req
));
1421 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
1422 req
->reply_ctrl
= htons(NO_REPLY_V(0) | (ipv6
? LISTSVR_IPV6_V(1) :
1423 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue
));
1424 ret
= t4_mgmt_tx(adap
, skb
);
1425 return net_xmit_eval(ret
);
1427 EXPORT_SYMBOL(cxgb4_remove_server
);
1430 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1431 * @mtus: the HW MTU table
1432 * @mtu: the target MTU
1433 * @idx: index of selected entry in the MTU table
1435 * Returns the index and the value in the HW MTU table that is closest to
1436 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1437 * table, in which case that smallest available value is selected.
1439 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
1444 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
1450 EXPORT_SYMBOL(cxgb4_best_mtu
);
1453 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1454 * @mtus: the HW MTU table
1455 * @header_size: Header Size
1456 * @data_size_max: maximum Data Segment Size
1457 * @data_size_align: desired Data Segment Size Alignment (2^N)
1458 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1460 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1461 * MTU Table based solely on a Maximum MTU parameter, we break that
1462 * parameter up into a Header Size and Maximum Data Segment Size, and
1463 * provide a desired Data Segment Size Alignment. If we find an MTU in
1464 * the Hardware MTU Table which will result in a Data Segment Size with
1465 * the requested alignment _and_ that MTU isn't "too far" from the
1466 * closest MTU, then we'll return that rather than the closest MTU.
1468 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus
,
1469 unsigned short header_size
,
1470 unsigned short data_size_max
,
1471 unsigned short data_size_align
,
1472 unsigned int *mtu_idxp
)
1474 unsigned short max_mtu
= header_size
+ data_size_max
;
1475 unsigned short data_size_align_mask
= data_size_align
- 1;
1476 int mtu_idx
, aligned_mtu_idx
;
1478 /* Scan the MTU Table till we find an MTU which is larger than our
1479 * Maximum MTU or we reach the end of the table. Along the way,
1480 * record the last MTU found, if any, which will result in a Data
1481 * Segment Length matching the requested alignment.
1483 for (mtu_idx
= 0, aligned_mtu_idx
= -1; mtu_idx
< NMTUS
; mtu_idx
++) {
1484 unsigned short data_size
= mtus
[mtu_idx
] - header_size
;
1486 /* If this MTU minus the Header Size would result in a
1487 * Data Segment Size of the desired alignment, remember it.
1489 if ((data_size
& data_size_align_mask
) == 0)
1490 aligned_mtu_idx
= mtu_idx
;
1492 /* If we're not at the end of the Hardware MTU Table and the
1493 * next element is larger than our Maximum MTU, drop out of
1496 if (mtu_idx
+1 < NMTUS
&& mtus
[mtu_idx
+1] > max_mtu
)
1500 /* If we fell out of the loop because we ran to the end of the table,
1501 * then we just have to use the last [largest] entry.
1503 if (mtu_idx
== NMTUS
)
1506 /* If we found an MTU which resulted in the requested Data Segment
1507 * Length alignment and that's "not far" from the largest MTU which is
1508 * less than or equal to the maximum MTU, then use that.
1510 if (aligned_mtu_idx
>= 0 &&
1511 mtu_idx
- aligned_mtu_idx
<= 1)
1512 mtu_idx
= aligned_mtu_idx
;
1514 /* If the caller has passed in an MTU Index pointer, pass the
1515 * MTU Index back. Return the MTU value.
1518 *mtu_idxp
= mtu_idx
;
1519 return mtus
[mtu_idx
];
1521 EXPORT_SYMBOL(cxgb4_best_aligned_mtu
);
1524 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1526 * @viid: VI id of the given port
1528 * Return the SMT index for this VI.
1530 unsigned int cxgb4_tp_smt_idx(enum chip_type chip
, unsigned int viid
)
1532 /* In T4/T5, SMT contains 256 SMAC entries organized in
1533 * 128 rows of 2 entries each.
1534 * In T6, SMT contains 256 SMAC entries in 256 rows.
1535 * TODO: The below code needs to be updated when we add support
1538 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
1539 return ((viid
& 0x7f) << 1);
1541 return (viid
& 0x7f);
1543 EXPORT_SYMBOL(cxgb4_tp_smt_idx
);
1546 * cxgb4_port_chan - get the HW channel of a port
1547 * @dev: the net device for the port
1549 * Return the HW Tx channel of the given port.
1551 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
1553 return netdev2pinfo(dev
)->tx_chan
;
1555 EXPORT_SYMBOL(cxgb4_port_chan
);
1557 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
1559 struct adapter
*adap
= netdev2adap(dev
);
1560 u32 v1
, v2
, lp_count
, hp_count
;
1562 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
1563 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
1564 if (is_t4(adap
->params
.chip
)) {
1565 lp_count
= LP_COUNT_G(v1
);
1566 hp_count
= HP_COUNT_G(v1
);
1568 lp_count
= LP_COUNT_T5_G(v1
);
1569 hp_count
= HP_COUNT_T5_G(v2
);
1571 return lpfifo
? lp_count
: hp_count
;
1573 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
1576 * cxgb4_port_viid - get the VI id of a port
1577 * @dev: the net device for the port
1579 * Return the VI id of the given port.
1581 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
1583 return netdev2pinfo(dev
)->viid
;
1585 EXPORT_SYMBOL(cxgb4_port_viid
);
1588 * cxgb4_port_idx - get the index of a port
1589 * @dev: the net device for the port
1591 * Return the index of the given port.
1593 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
1595 return netdev2pinfo(dev
)->port_id
;
1597 EXPORT_SYMBOL(cxgb4_port_idx
);
1599 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
1600 struct tp_tcp_stats
*v6
)
1602 struct adapter
*adap
= pci_get_drvdata(pdev
);
1604 spin_lock(&adap
->stats_lock
);
1605 t4_tp_get_tcp_stats(adap
, v4
, v6
);
1606 spin_unlock(&adap
->stats_lock
);
1608 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
1610 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
1611 const unsigned int *pgsz_order
)
1613 struct adapter
*adap
= netdev2adap(dev
);
1615 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
, tag_mask
);
1616 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ_A
, HPZ0_V(pgsz_order
[0]) |
1617 HPZ1_V(pgsz_order
[1]) | HPZ2_V(pgsz_order
[2]) |
1618 HPZ3_V(pgsz_order
[3]));
1620 EXPORT_SYMBOL(cxgb4_iscsi_init
);
1622 int cxgb4_flush_eq_cache(struct net_device
*dev
)
1624 struct adapter
*adap
= netdev2adap(dev
);
1626 return t4_sge_ctxt_flush(adap
, adap
->mbox
);
1628 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
1630 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
1632 u32 addr
= t4_read_reg(adap
, SGE_DBQ_CTXT_BADDR_A
) + 24 * qid
+ 8;
1636 spin_lock(&adap
->win0_lock
);
1637 ret
= t4_memory_rw(adap
, 0, MEM_EDC0
, addr
,
1638 sizeof(indices
), (__be32
*)&indices
,
1640 spin_unlock(&adap
->win0_lock
);
1642 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
1643 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
1648 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
1651 struct adapter
*adap
= netdev2adap(dev
);
1652 u16 hw_pidx
, hw_cidx
;
1655 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
1659 if (pidx
!= hw_pidx
) {
1663 if (pidx
>= hw_pidx
)
1664 delta
= pidx
- hw_pidx
;
1666 delta
= size
- hw_pidx
+ pidx
;
1668 if (is_t4(adap
->params
.chip
))
1669 val
= PIDX_V(delta
);
1671 val
= PIDX_T5_V(delta
);
1673 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
1679 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
1681 int cxgb4_read_tpte(struct net_device
*dev
, u32 stag
, __be32
*tpte
)
1683 struct adapter
*adap
;
1684 u32 offset
, memtype
, memaddr
;
1685 u32 edc0_size
, edc1_size
, mc0_size
, mc1_size
, size
;
1686 u32 edc0_end
, edc1_end
, mc0_end
, mc1_end
;
1689 adap
= netdev2adap(dev
);
1691 offset
= ((stag
>> 8) * 32) + adap
->vres
.stag
.start
;
1693 /* Figure out where the offset lands in the Memory Type/Address scheme.
1694 * This code assumes that the memory is laid out starting at offset 0
1695 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1696 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1697 * MC0, and some have both MC0 and MC1.
1699 size
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
1700 edc0_size
= EDRAM0_SIZE_G(size
) << 20;
1701 size
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
1702 edc1_size
= EDRAM1_SIZE_G(size
) << 20;
1703 size
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
1704 mc0_size
= EXT_MEM0_SIZE_G(size
) << 20;
1706 edc0_end
= edc0_size
;
1707 edc1_end
= edc0_end
+ edc1_size
;
1708 mc0_end
= edc1_end
+ mc0_size
;
1710 if (offset
< edc0_end
) {
1713 } else if (offset
< edc1_end
) {
1715 memaddr
= offset
- edc0_end
;
1717 if (offset
< mc0_end
) {
1719 memaddr
= offset
- edc1_end
;
1720 } else if (is_t5(adap
->params
.chip
)) {
1721 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
1722 mc1_size
= EXT_MEM1_SIZE_G(size
) << 20;
1723 mc1_end
= mc0_end
+ mc1_size
;
1724 if (offset
< mc1_end
) {
1726 memaddr
= offset
- mc0_end
;
1728 /* offset beyond the end of any memory */
1732 /* T4/T6 only has a single memory channel */
1737 spin_lock(&adap
->win0_lock
);
1738 ret
= t4_memory_rw(adap
, 0, memtype
, memaddr
, 32, tpte
, T4_MEMORY_READ
);
1739 spin_unlock(&adap
->win0_lock
);
1743 dev_err(adap
->pdev_dev
, "stag %#x, offset %#x out of range\n",
1747 EXPORT_SYMBOL(cxgb4_read_tpte
);
1749 u64
cxgb4_read_sge_timestamp(struct net_device
*dev
)
1752 struct adapter
*adap
;
1754 adap
= netdev2adap(dev
);
1755 lo
= t4_read_reg(adap
, SGE_TIMESTAMP_LO_A
);
1756 hi
= TSVAL_G(t4_read_reg(adap
, SGE_TIMESTAMP_HI_A
));
1758 return ((u64
)hi
<< 32) | (u64
)lo
;
1760 EXPORT_SYMBOL(cxgb4_read_sge_timestamp
);
1762 int cxgb4_bar2_sge_qregs(struct net_device
*dev
,
1764 enum cxgb4_bar2_qtype qtype
,
1767 unsigned int *pbar2_qid
)
1769 return t4_bar2_sge_qregs(netdev2adap(dev
),
1771 (qtype
== CXGB4_BAR2_QTYPE_EGRESS
1772 ? T4_BAR2_QTYPE_EGRESS
1773 : T4_BAR2_QTYPE_INGRESS
),
1778 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs
);
1780 static struct pci_driver cxgb4_driver
;
1782 static void check_neigh_update(struct neighbour
*neigh
)
1784 const struct device
*parent
;
1785 const struct net_device
*netdev
= neigh
->dev
;
1787 if (is_vlan_dev(netdev
))
1788 netdev
= vlan_dev_real_dev(netdev
);
1789 parent
= netdev
->dev
.parent
;
1790 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
1791 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
1794 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
1798 case NETEVENT_NEIGH_UPDATE
:
1799 check_neigh_update(data
);
1801 case NETEVENT_REDIRECT
:
1808 static bool netevent_registered
;
1809 static struct notifier_block cxgb4_netevent_nb
= {
1810 .notifier_call
= netevent_cb
1813 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
1815 u32 v1
, v2
, lp_count
, hp_count
;
1818 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
1819 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
1820 if (is_t4(adap
->params
.chip
)) {
1821 lp_count
= LP_COUNT_G(v1
);
1822 hp_count
= HP_COUNT_G(v1
);
1824 lp_count
= LP_COUNT_T5_G(v1
);
1825 hp_count
= HP_COUNT_T5_G(v2
);
1828 if (lp_count
== 0 && hp_count
== 0)
1830 set_current_state(TASK_UNINTERRUPTIBLE
);
1831 schedule_timeout(usecs_to_jiffies(usecs
));
1835 static void disable_txq_db(struct sge_txq
*q
)
1837 unsigned long flags
;
1839 spin_lock_irqsave(&q
->db_lock
, flags
);
1841 spin_unlock_irqrestore(&q
->db_lock
, flags
);
1844 static void enable_txq_db(struct adapter
*adap
, struct sge_txq
*q
)
1846 spin_lock_irq(&q
->db_lock
);
1847 if (q
->db_pidx_inc
) {
1848 /* Make sure that all writes to the TX descriptors
1849 * are committed before we tell HW about them.
1852 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
1853 QID_V(q
->cntxt_id
) | PIDX_V(q
->db_pidx_inc
));
1857 spin_unlock_irq(&q
->db_lock
);
1860 static void disable_dbs(struct adapter
*adap
)
1864 for_each_ethrxq(&adap
->sge
, i
)
1865 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
1866 if (is_offload(adap
)) {
1867 struct sge_uld_txq_info
*txq_info
=
1868 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
1871 for_each_ofldtxq(&adap
->sge
, i
) {
1872 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
1874 disable_txq_db(&txq
->q
);
1878 for_each_port(adap
, i
)
1879 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
1882 static void enable_dbs(struct adapter
*adap
)
1886 for_each_ethrxq(&adap
->sge
, i
)
1887 enable_txq_db(adap
, &adap
->sge
.ethtxq
[i
].q
);
1888 if (is_offload(adap
)) {
1889 struct sge_uld_txq_info
*txq_info
=
1890 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
1893 for_each_ofldtxq(&adap
->sge
, i
) {
1894 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
1896 enable_txq_db(adap
, &txq
->q
);
1900 for_each_port(adap
, i
)
1901 enable_txq_db(adap
, &adap
->sge
.ctrlq
[i
].q
);
1904 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
1906 enum cxgb4_uld type
= CXGB4_ULD_RDMA
;
1908 if (adap
->uld
&& adap
->uld
[type
].handle
)
1909 adap
->uld
[type
].control(adap
->uld
[type
].handle
, cmd
);
1912 static void process_db_full(struct work_struct
*work
)
1914 struct adapter
*adap
;
1916 adap
= container_of(work
, struct adapter
, db_full_task
);
1918 drain_db_fifo(adap
, dbfifo_drain_delay
);
1920 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
1921 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
1922 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
1923 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
,
1924 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
);
1926 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
1927 DBFIFO_LP_INT_F
, DBFIFO_LP_INT_F
);
1930 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
1932 u16 hw_pidx
, hw_cidx
;
1935 spin_lock_irq(&q
->db_lock
);
1936 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
1939 if (q
->db_pidx
!= hw_pidx
) {
1943 if (q
->db_pidx
>= hw_pidx
)
1944 delta
= q
->db_pidx
- hw_pidx
;
1946 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
1948 if (is_t4(adap
->params
.chip
))
1949 val
= PIDX_V(delta
);
1951 val
= PIDX_T5_V(delta
);
1953 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
1954 QID_V(q
->cntxt_id
) | val
);
1959 spin_unlock_irq(&q
->db_lock
);
1961 CH_WARN(adap
, "DB drop recovery failed.\n");
1964 static void recover_all_queues(struct adapter
*adap
)
1968 for_each_ethrxq(&adap
->sge
, i
)
1969 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
1970 if (is_offload(adap
)) {
1971 struct sge_uld_txq_info
*txq_info
=
1972 adap
->sge
.uld_txq_info
[CXGB4_TX_OFLD
];
1974 for_each_ofldtxq(&adap
->sge
, i
) {
1975 struct sge_uld_txq
*txq
= &txq_info
->uldtxq
[i
];
1977 sync_txq_pidx(adap
, &txq
->q
);
1981 for_each_port(adap
, i
)
1982 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
1985 static void process_db_drop(struct work_struct
*work
)
1987 struct adapter
*adap
;
1989 adap
= container_of(work
, struct adapter
, db_drop_task
);
1991 if (is_t4(adap
->params
.chip
)) {
1992 drain_db_fifo(adap
, dbfifo_drain_delay
);
1993 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
1994 drain_db_fifo(adap
, dbfifo_drain_delay
);
1995 recover_all_queues(adap
);
1996 drain_db_fifo(adap
, dbfifo_drain_delay
);
1998 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
1999 } else if (is_t5(adap
->params
.chip
)) {
2000 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
2001 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
2002 u16 pidx_inc
= dropped_db
& 0x1fff;
2004 unsigned int bar2_qid
;
2007 ret
= t4_bar2_sge_qregs(adap
, qid
, T4_BAR2_QTYPE_EGRESS
,
2008 0, &bar2_qoffset
, &bar2_qid
);
2010 dev_err(adap
->pdev_dev
, "doorbell drop recovery: "
2011 "qid=%d, pidx_inc=%d\n", qid
, pidx_inc
);
2013 writel(PIDX_T5_V(pidx_inc
) | QID_V(bar2_qid
),
2014 adap
->bar2
+ bar2_qoffset
+ SGE_UDB_KDOORBELL
);
2016 /* Re-enable BAR2 WC */
2017 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
2020 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2021 t4_set_reg_field(adap
, SGE_DOORBELL_CONTROL_A
, DROPPED_DB_F
, 0);
2024 void t4_db_full(struct adapter
*adap
)
2026 if (is_t4(adap
->params
.chip
)) {
2028 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2029 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2030 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
, 0);
2031 queue_work(adap
->workq
, &adap
->db_full_task
);
2035 void t4_db_dropped(struct adapter
*adap
)
2037 if (is_t4(adap
->params
.chip
)) {
2039 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2041 queue_work(adap
->workq
, &adap
->db_drop_task
);
2044 void t4_register_netevent_notifier(void)
2046 if (!netevent_registered
) {
2047 register_netevent_notifier(&cxgb4_netevent_nb
);
2048 netevent_registered
= true;
2052 static void detach_ulds(struct adapter
*adap
)
2056 mutex_lock(&uld_mutex
);
2057 list_del(&adap
->list_node
);
2058 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2059 if (adap
->uld
&& adap
->uld
[i
].handle
) {
2060 adap
->uld
[i
].state_change(adap
->uld
[i
].handle
,
2061 CXGB4_STATE_DETACH
);
2062 adap
->uld
[i
].handle
= NULL
;
2064 if (netevent_registered
&& list_empty(&adapter_list
)) {
2065 unregister_netevent_notifier(&cxgb4_netevent_nb
);
2066 netevent_registered
= false;
2068 mutex_unlock(&uld_mutex
);
2071 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
2075 mutex_lock(&uld_mutex
);
2076 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2077 if (adap
->uld
&& adap
->uld
[i
].handle
)
2078 adap
->uld
[i
].state_change(adap
->uld
[i
].handle
,
2080 mutex_unlock(&uld_mutex
);
2083 #if IS_ENABLED(CONFIG_IPV6)
2084 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
2085 unsigned long event
, void *data
)
2087 struct inet6_ifaddr
*ifa
= data
;
2088 struct net_device
*event_dev
= ifa
->idev
->dev
;
2089 const struct device
*parent
= NULL
;
2090 #if IS_ENABLED(CONFIG_BONDING)
2091 struct adapter
*adap
;
2093 if (is_vlan_dev(event_dev
))
2094 event_dev
= vlan_dev_real_dev(event_dev
);
2095 #if IS_ENABLED(CONFIG_BONDING)
2096 if (event_dev
->flags
& IFF_MASTER
) {
2097 list_for_each_entry(adap
, &adapter_list
, list_node
) {
2100 cxgb4_clip_get(adap
->port
[0],
2101 (const u32
*)ifa
, 1);
2104 cxgb4_clip_release(adap
->port
[0],
2105 (const u32
*)ifa
, 1);
2116 parent
= event_dev
->dev
.parent
;
2118 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
) {
2121 cxgb4_clip_get(event_dev
, (const u32
*)ifa
, 1);
2124 cxgb4_clip_release(event_dev
, (const u32
*)ifa
, 1);
2133 static bool inet6addr_registered
;
2134 static struct notifier_block cxgb4_inet6addr_notifier
= {
2135 .notifier_call
= cxgb4_inet6addr_handler
2138 static void update_clip(const struct adapter
*adap
)
2141 struct net_device
*dev
;
2146 for (i
= 0; i
< MAX_NPORTS
; i
++) {
2147 dev
= adap
->port
[i
];
2151 ret
= cxgb4_update_root_dev_clip(dev
);
2158 #endif /* IS_ENABLED(CONFIG_IPV6) */
2161 * cxgb_up - enable the adapter
2162 * @adap: adapter being enabled
2164 * Called when the first port is enabled, this function performs the
2165 * actions necessary to make an adapter operational, such as completing
2166 * the initialization of HW modules, and enabling interrupts.
2168 * Must be called with the rtnl lock held.
2170 static int cxgb_up(struct adapter
*adap
)
2174 err
= setup_sge_queues(adap
);
2177 err
= setup_rss(adap
);
2181 if (adap
->flags
& USING_MSIX
) {
2182 name_msix_vecs(adap
);
2183 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
2184 adap
->msix_info
[0].desc
, adap
);
2187 err
= request_msix_queue_irqs(adap
);
2189 free_irq(adap
->msix_info
[0].vec
, adap
);
2193 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
2194 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
2195 adap
->port
[0]->name
, adap
);
2201 t4_intr_enable(adap
);
2202 adap
->flags
|= FULL_INIT_DONE
;
2203 notify_ulds(adap
, CXGB4_STATE_UP
);
2204 #if IS_ENABLED(CONFIG_IPV6)
2207 /* Initialize hash mac addr list*/
2208 INIT_LIST_HEAD(&adap
->mac_hlist
);
2212 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
2214 t4_free_sge_resources(adap
);
2218 static void cxgb_down(struct adapter
*adapter
)
2220 cancel_work_sync(&adapter
->tid_release_task
);
2221 cancel_work_sync(&adapter
->db_full_task
);
2222 cancel_work_sync(&adapter
->db_drop_task
);
2223 adapter
->tid_release_task_busy
= false;
2224 adapter
->tid_release_head
= NULL
;
2226 t4_sge_stop(adapter
);
2227 t4_free_sge_resources(adapter
);
2228 adapter
->flags
&= ~FULL_INIT_DONE
;
2232 * net_device operations
2234 static int cxgb_open(struct net_device
*dev
)
2237 struct port_info
*pi
= netdev_priv(dev
);
2238 struct adapter
*adapter
= pi
->adapter
;
2240 netif_carrier_off(dev
);
2242 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
2243 err
= cxgb_up(adapter
);
2248 err
= link_start(dev
);
2250 netif_tx_start_all_queues(dev
);
2254 static int cxgb_close(struct net_device
*dev
)
2256 struct port_info
*pi
= netdev_priv(dev
);
2257 struct adapter
*adapter
= pi
->adapter
;
2259 netif_tx_stop_all_queues(dev
);
2260 netif_carrier_off(dev
);
2261 return t4_enable_vi(adapter
, adapter
->pf
, pi
->viid
, false, false);
2264 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
2265 __be32 sip
, __be16 sport
, __be16 vlan
,
2266 unsigned int queue
, unsigned char port
, unsigned char mask
)
2269 struct filter_entry
*f
;
2270 struct adapter
*adap
;
2274 adap
= netdev2adap(dev
);
2276 /* Adjust stid to correct filter index */
2277 stid
-= adap
->tids
.sftid_base
;
2278 stid
+= adap
->tids
.nftids
;
2280 /* Check to make sure the filter requested is writable ...
2282 f
= &adap
->tids
.ftid_tab
[stid
];
2283 ret
= writable_filter(f
);
2287 /* Clear out any old resources being used by the filter before
2288 * we start constructing the new filter.
2291 clear_filter(adap
, f
);
2293 /* Clear out filter specifications */
2294 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
2295 f
->fs
.val
.lport
= cpu_to_be16(sport
);
2296 f
->fs
.mask
.lport
= ~0;
2298 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
2299 for (i
= 0; i
< 4; i
++) {
2300 f
->fs
.val
.lip
[i
] = val
[i
];
2301 f
->fs
.mask
.lip
[i
] = ~0;
2303 if (adap
->params
.tp
.vlan_pri_map
& PORT_F
) {
2304 f
->fs
.val
.iport
= port
;
2305 f
->fs
.mask
.iport
= mask
;
2309 if (adap
->params
.tp
.vlan_pri_map
& PROTOCOL_F
) {
2310 f
->fs
.val
.proto
= IPPROTO_TCP
;
2311 f
->fs
.mask
.proto
= ~0;
2316 /* Mark filter as locked */
2320 /* Save the actual tid. We need this to get the corresponding
2321 * filter entry structure in filter_rpl.
2323 f
->tid
= stid
+ adap
->tids
.ftid_base
;
2324 ret
= set_filter_wr(adap
, stid
);
2326 clear_filter(adap
, f
);
2332 EXPORT_SYMBOL(cxgb4_create_server_filter
);
2334 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
2335 unsigned int queue
, bool ipv6
)
2337 struct filter_entry
*f
;
2338 struct adapter
*adap
;
2340 adap
= netdev2adap(dev
);
2342 /* Adjust stid to correct filter index */
2343 stid
-= adap
->tids
.sftid_base
;
2344 stid
+= adap
->tids
.nftids
;
2346 f
= &adap
->tids
.ftid_tab
[stid
];
2347 /* Unlock the filter */
2350 return delete_filter(adap
, stid
);
2352 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
2354 static void cxgb_get_stats(struct net_device
*dev
,
2355 struct rtnl_link_stats64
*ns
)
2357 struct port_stats stats
;
2358 struct port_info
*p
= netdev_priv(dev
);
2359 struct adapter
*adapter
= p
->adapter
;
2361 /* Block retrieving statistics during EEH error
2362 * recovery. Otherwise, the recovery might fail
2363 * and the PCI device will be removed permanently
2365 spin_lock(&adapter
->stats_lock
);
2366 if (!netif_device_present(dev
)) {
2367 spin_unlock(&adapter
->stats_lock
);
2370 t4_get_port_stats_offset(adapter
, p
->tx_chan
, &stats
,
2372 spin_unlock(&adapter
->stats_lock
);
2374 ns
->tx_bytes
= stats
.tx_octets
;
2375 ns
->tx_packets
= stats
.tx_frames
;
2376 ns
->rx_bytes
= stats
.rx_octets
;
2377 ns
->rx_packets
= stats
.rx_frames
;
2378 ns
->multicast
= stats
.rx_mcast_frames
;
2380 /* detailed rx_errors */
2381 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
2383 ns
->rx_over_errors
= 0;
2384 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
2385 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
2386 ns
->rx_dropped
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
2387 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
2388 stats
.rx_trunc0
+ stats
.rx_trunc1
+
2389 stats
.rx_trunc2
+ stats
.rx_trunc3
;
2390 ns
->rx_missed_errors
= 0;
2392 /* detailed tx_errors */
2393 ns
->tx_aborted_errors
= 0;
2394 ns
->tx_carrier_errors
= 0;
2395 ns
->tx_fifo_errors
= 0;
2396 ns
->tx_heartbeat_errors
= 0;
2397 ns
->tx_window_errors
= 0;
2399 ns
->tx_errors
= stats
.tx_error_frames
;
2400 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
2401 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
2404 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
2407 int ret
= 0, prtad
, devad
;
2408 struct port_info
*pi
= netdev_priv(dev
);
2409 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
2413 if (pi
->mdio_addr
< 0)
2415 data
->phy_id
= pi
->mdio_addr
;
2419 if (mdio_phy_id_is_c45(data
->phy_id
)) {
2420 prtad
= mdio_phy_id_prtad(data
->phy_id
);
2421 devad
= mdio_phy_id_devad(data
->phy_id
);
2422 } else if (data
->phy_id
< 32) {
2423 prtad
= data
->phy_id
;
2425 data
->reg_num
&= 0x1f;
2429 mbox
= pi
->adapter
->pf
;
2430 if (cmd
== SIOCGMIIREG
)
2431 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
2432 data
->reg_num
, &data
->val_out
);
2434 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
2435 data
->reg_num
, data
->val_in
);
2438 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
2439 sizeof(pi
->tstamp_config
)) ?
2442 if (copy_from_user(&pi
->tstamp_config
, req
->ifr_data
,
2443 sizeof(pi
->tstamp_config
)))
2446 switch (pi
->tstamp_config
.rx_filter
) {
2447 case HWTSTAMP_FILTER_NONE
:
2448 pi
->rxtstamp
= false;
2450 case HWTSTAMP_FILTER_ALL
:
2451 pi
->rxtstamp
= true;
2454 pi
->tstamp_config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2458 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
2459 sizeof(pi
->tstamp_config
)) ?
2467 static void cxgb_set_rxmode(struct net_device
*dev
)
2469 /* unfortunately we can't return errors to the stack */
2470 set_rxmode(dev
, -1, false);
2473 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
2476 struct port_info
*pi
= netdev_priv(dev
);
2478 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, new_mtu
, -1,
2485 #ifdef CONFIG_PCI_IOV
2486 static int dummy_open(struct net_device
*dev
)
2488 /* Turn carrier off since we don't have to transmit anything on this
2491 netif_carrier_off(dev
);
2495 /* Fill MAC address that will be assigned by the FW */
2496 static void fill_vf_station_mac_addr(struct adapter
*adap
)
2499 u8 hw_addr
[ETH_ALEN
], macaddr
[ETH_ALEN
];
2504 err
= t4_get_raw_vpd_params(adap
, &adap
->params
.vpd
);
2506 na
= adap
->params
.vpd
.na
;
2507 for (i
= 0; i
< ETH_ALEN
; i
++)
2508 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
2509 hex2val(na
[2 * i
+ 1]));
2510 a
= (hw_addr
[0] << 8) | hw_addr
[1];
2511 b
= (hw_addr
[1] << 8) | hw_addr
[2];
2513 a
|= 0x0200; /* locally assigned Ethernet MAC address */
2514 a
&= ~0x0100; /* not a multicast Ethernet MAC address */
2515 macaddr
[0] = a
>> 8;
2516 macaddr
[1] = a
& 0xff;
2518 for (i
= 2; i
< 5; i
++)
2519 macaddr
[i
] = hw_addr
[i
+ 1];
2521 for (i
= 0; i
< adap
->num_vfs
; i
++) {
2522 macaddr
[5] = adap
->pf
* 16 + i
;
2523 ether_addr_copy(adap
->vfinfo
[i
].vf_mac_addr
, macaddr
);
2528 static int cxgb_set_vf_mac(struct net_device
*dev
, int vf
, u8
*mac
)
2530 struct port_info
*pi
= netdev_priv(dev
);
2531 struct adapter
*adap
= pi
->adapter
;
2534 /* verify MAC addr is valid */
2535 if (!is_valid_ether_addr(mac
)) {
2536 dev_err(pi
->adapter
->pdev_dev
,
2537 "Invalid Ethernet address %pM for VF %d\n",
2542 dev_info(pi
->adapter
->pdev_dev
,
2543 "Setting MAC %pM on VF %d\n", mac
, vf
);
2544 ret
= t4_set_vf_mac_acl(adap
, vf
+ 1, 1, mac
);
2546 ether_addr_copy(adap
->vfinfo
[vf
].vf_mac_addr
, mac
);
2550 static int cxgb_get_vf_config(struct net_device
*dev
,
2551 int vf
, struct ifla_vf_info
*ivi
)
2553 struct port_info
*pi
= netdev_priv(dev
);
2554 struct adapter
*adap
= pi
->adapter
;
2556 if (vf
>= adap
->num_vfs
)
2559 ether_addr_copy(ivi
->mac
, adap
->vfinfo
[vf
].vf_mac_addr
);
2563 static int cxgb_get_phys_port_id(struct net_device
*dev
,
2564 struct netdev_phys_item_id
*ppid
)
2566 struct port_info
*pi
= netdev_priv(dev
);
2567 unsigned int phy_port_id
;
2569 phy_port_id
= pi
->adapter
->adap_idx
* 10 + pi
->port_id
;
2570 ppid
->id_len
= sizeof(phy_port_id
);
2571 memcpy(ppid
->id
, &phy_port_id
, ppid
->id_len
);
2577 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
2580 struct sockaddr
*addr
= p
;
2581 struct port_info
*pi
= netdev_priv(dev
);
2583 if (!is_valid_ether_addr(addr
->sa_data
))
2584 return -EADDRNOTAVAIL
;
2586 ret
= t4_change_mac(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
,
2587 pi
->xact_addr_filt
, addr
->sa_data
, true, true);
2591 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2592 pi
->xact_addr_filt
= ret
;
2596 #ifdef CONFIG_NET_POLL_CONTROLLER
2597 static void cxgb_netpoll(struct net_device
*dev
)
2599 struct port_info
*pi
= netdev_priv(dev
);
2600 struct adapter
*adap
= pi
->adapter
;
2602 if (adap
->flags
& USING_MSIX
) {
2604 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
2606 for (i
= pi
->nqsets
; i
; i
--, rx
++)
2607 t4_sge_intr_msix(0, &rx
->rspq
);
2609 t4_intr_handler(adap
)(0, adap
);
2613 static int cxgb_set_tx_maxrate(struct net_device
*dev
, int index
, u32 rate
)
2615 struct port_info
*pi
= netdev_priv(dev
);
2616 struct adapter
*adap
= pi
->adapter
;
2617 struct sched_class
*e
;
2618 struct ch_sched_params p
;
2619 struct ch_sched_queue qe
;
2623 if (!can_sched(dev
))
2626 if (index
< 0 || index
> pi
->nqsets
- 1)
2629 if (!(adap
->flags
& FULL_INIT_DONE
)) {
2630 dev_err(adap
->pdev_dev
,
2631 "Failed to rate limit on queue %d. Link Down?\n",
2636 /* Convert from Mbps to Kbps */
2637 req_rate
= rate
<< 10;
2639 /* Max rate is 10 Gbps */
2640 if (req_rate
>= SCHED_MAX_RATE_KBPS
) {
2641 dev_err(adap
->pdev_dev
,
2642 "Invalid rate %u Mbps, Max rate is %u Gbps\n",
2643 rate
, SCHED_MAX_RATE_KBPS
);
2647 /* First unbind the queue from any existing class */
2648 memset(&qe
, 0, sizeof(qe
));
2650 qe
.class = SCHED_CLS_NONE
;
2652 err
= cxgb4_sched_class_unbind(dev
, (void *)(&qe
), SCHED_QUEUE
);
2654 dev_err(adap
->pdev_dev
,
2655 "Unbinding Queue %d on port %d fail. Err: %d\n",
2656 index
, pi
->port_id
, err
);
2660 /* Queue already unbound */
2664 /* Fetch any available unused or matching scheduling class */
2665 memset(&p
, 0, sizeof(p
));
2666 p
.type
= SCHED_CLASS_TYPE_PACKET
;
2667 p
.u
.params
.level
= SCHED_CLASS_LEVEL_CL_RL
;
2668 p
.u
.params
.mode
= SCHED_CLASS_MODE_CLASS
;
2669 p
.u
.params
.rateunit
= SCHED_CLASS_RATEUNIT_BITS
;
2670 p
.u
.params
.ratemode
= SCHED_CLASS_RATEMODE_ABS
;
2671 p
.u
.params
.channel
= pi
->tx_chan
;
2672 p
.u
.params
.class = SCHED_CLS_NONE
;
2673 p
.u
.params
.minrate
= 0;
2674 p
.u
.params
.maxrate
= req_rate
;
2675 p
.u
.params
.weight
= 0;
2676 p
.u
.params
.pktsize
= dev
->mtu
;
2678 e
= cxgb4_sched_class_alloc(dev
, &p
);
2682 /* Bind the queue to a scheduling class */
2683 memset(&qe
, 0, sizeof(qe
));
2687 err
= cxgb4_sched_class_bind(dev
, (void *)(&qe
), SCHED_QUEUE
);
2689 dev_err(adap
->pdev_dev
,
2690 "Queue rate limiting failed. Err: %d\n", err
);
2694 static int cxgb_setup_tc(struct net_device
*dev
, u32 handle
, __be16 proto
,
2695 struct tc_to_netdev
*tc
)
2697 struct port_info
*pi
= netdev2pinfo(dev
);
2698 struct adapter
*adap
= netdev2adap(dev
);
2700 if (!(adap
->flags
& FULL_INIT_DONE
)) {
2701 dev_err(adap
->pdev_dev
,
2702 "Failed to setup tc on port %d. Link Down?\n",
2707 if (TC_H_MAJ(handle
) == TC_H_MAJ(TC_H_INGRESS
) &&
2708 tc
->type
== TC_SETUP_CLSU32
) {
2709 switch (tc
->cls_u32
->command
) {
2710 case TC_CLSU32_NEW_KNODE
:
2711 case TC_CLSU32_REPLACE_KNODE
:
2712 return cxgb4_config_knode(dev
, proto
, tc
->cls_u32
);
2713 case TC_CLSU32_DELETE_KNODE
:
2714 return cxgb4_delete_knode(dev
, proto
, tc
->cls_u32
);
2723 static const struct net_device_ops cxgb4_netdev_ops
= {
2724 .ndo_open
= cxgb_open
,
2725 .ndo_stop
= cxgb_close
,
2726 .ndo_start_xmit
= t4_eth_xmit
,
2727 .ndo_select_queue
= cxgb_select_queue
,
2728 .ndo_get_stats64
= cxgb_get_stats
,
2729 .ndo_set_rx_mode
= cxgb_set_rxmode
,
2730 .ndo_set_mac_address
= cxgb_set_mac_addr
,
2731 .ndo_set_features
= cxgb_set_features
,
2732 .ndo_validate_addr
= eth_validate_addr
,
2733 .ndo_do_ioctl
= cxgb_ioctl
,
2734 .ndo_change_mtu
= cxgb_change_mtu
,
2735 #ifdef CONFIG_NET_POLL_CONTROLLER
2736 .ndo_poll_controller
= cxgb_netpoll
,
2738 #ifdef CONFIG_CHELSIO_T4_FCOE
2739 .ndo_fcoe_enable
= cxgb_fcoe_enable
,
2740 .ndo_fcoe_disable
= cxgb_fcoe_disable
,
2741 #endif /* CONFIG_CHELSIO_T4_FCOE */
2742 .ndo_set_tx_maxrate
= cxgb_set_tx_maxrate
,
2743 .ndo_setup_tc
= cxgb_setup_tc
,
2746 #ifdef CONFIG_PCI_IOV
2747 static const struct net_device_ops cxgb4_mgmt_netdev_ops
= {
2748 .ndo_open
= dummy_open
,
2749 .ndo_set_vf_mac
= cxgb_set_vf_mac
,
2750 .ndo_get_vf_config
= cxgb_get_vf_config
,
2751 .ndo_get_phys_port_id
= cxgb_get_phys_port_id
,
2755 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
2757 struct adapter
*adapter
= netdev2adap(dev
);
2759 strlcpy(info
->driver
, cxgb4_driver_name
, sizeof(info
->driver
));
2760 strlcpy(info
->version
, cxgb4_driver_version
,
2761 sizeof(info
->version
));
2762 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
2763 sizeof(info
->bus_info
));
2766 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops
= {
2767 .get_drvinfo
= get_drvinfo
,
2770 void t4_fatal_err(struct adapter
*adap
)
2774 /* Disable the SGE since ULDs are going to free resources that
2775 * could be exposed to the adapter. RDMA MWs for example...
2777 t4_shutdown_adapter(adap
);
2778 for_each_port(adap
, port
) {
2779 struct net_device
*dev
= adap
->port
[port
];
2781 /* If we get here in very early initialization the network
2782 * devices may not have been set up yet.
2787 netif_tx_stop_all_queues(dev
);
2788 netif_carrier_off(dev
);
2790 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
2793 static void setup_memwin(struct adapter
*adap
)
2795 u32 nic_win_base
= t4_get_util_window(adap
);
2797 t4_setup_memwin(adap
, nic_win_base
, MEMWIN_NIC
);
2800 static void setup_memwin_rdma(struct adapter
*adap
)
2802 if (adap
->vres
.ocq
.size
) {
2806 start
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_2
);
2807 start
&= PCI_BASE_ADDRESS_MEM_MASK
;
2808 start
+= OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
2809 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
2811 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, 3),
2812 start
| BIR_V(1) | WINDOW_V(ilog2(sz_kb
)));
2814 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3),
2815 adap
->vres
.ocq
.start
);
2817 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3));
2821 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
2826 /* get device capabilities */
2827 memset(c
, 0, sizeof(*c
));
2828 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
2829 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
2830 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
2831 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), c
);
2835 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
2836 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
2837 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), NULL
);
2841 ret
= t4_config_glbl_rss(adap
, adap
->pf
,
2842 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
2843 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F
|
2844 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F
);
2848 ret
= t4_cfg_pfvf(adap
, adap
->mbox
, adap
->pf
, 0, adap
->sge
.egr_sz
, 64,
2849 MAX_INGQ
, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
,
2856 /* tweak some settings */
2857 t4_write_reg(adap
, TP_SHIFT_CNT_A
, 0x64f8849);
2858 t4_write_reg(adap
, ULP_RX_TDDP_PSZ_A
, HPZ0_V(PAGE_SHIFT
- 12));
2859 t4_write_reg(adap
, TP_PIO_ADDR_A
, TP_INGRESS_CONFIG_A
);
2860 v
= t4_read_reg(adap
, TP_PIO_DATA_A
);
2861 t4_write_reg(adap
, TP_PIO_DATA_A
, v
& ~CSUM_HAS_PSEUDO_HDR_F
);
2863 /* first 4 Tx modulation queues point to consecutive Tx channels */
2864 adap
->params
.tp
.tx_modq_map
= 0xE4;
2865 t4_write_reg(adap
, TP_TX_MOD_QUEUE_REQ_MAP_A
,
2866 TX_MOD_QUEUE_REQ_MAP_V(adap
->params
.tp
.tx_modq_map
));
2868 /* associate each Tx modulation queue with consecutive Tx channels */
2870 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
2871 &v
, 1, TP_TX_SCHED_HDR_A
);
2872 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
2873 &v
, 1, TP_TX_SCHED_FIFO_A
);
2874 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
2875 &v
, 1, TP_TX_SCHED_PCMD_A
);
2877 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
2878 if (is_offload(adap
)) {
2879 t4_write_reg(adap
, TP_TX_MOD_QUEUE_WEIGHT0_A
,
2880 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
2881 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
2882 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
2883 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
2884 t4_write_reg(adap
, TP_TX_MOD_CHANNEL_WEIGHT_A
,
2885 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
2886 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
2887 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
2888 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
2891 /* get basic stuff going */
2892 return t4_early_init(adap
, adap
->pf
);
2896 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2898 #define MAX_ATIDS 8192U
2901 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2903 * If the firmware we're dealing with has Configuration File support, then
2904 * we use that to perform all configuration
2908 * Tweak configuration based on module parameters, etc. Most of these have
2909 * defaults assigned to them by Firmware Configuration Files (if we're using
2910 * them) but need to be explicitly set if we're using hard-coded
2911 * initialization. But even in the case of using Firmware Configuration
2912 * Files, we'd like to expose the ability to change these via module
2913 * parameters so these are essentially common tweaks/settings for
2914 * Configuration Files and hard-coded initialization ...
2916 static int adap_init0_tweaks(struct adapter
*adapter
)
2919 * Fix up various Host-Dependent Parameters like Page Size, Cache
2920 * Line Size, etc. The firmware default is for a 4KB Page Size and
2921 * 64B Cache Line Size ...
2923 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
2926 * Process module parameters which affect early initialization.
2928 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
2929 dev_err(&adapter
->pdev
->dev
,
2930 "Ignoring illegal rx_dma_offset=%d, using 2\n",
2934 t4_set_reg_field(adapter
, SGE_CONTROL_A
,
2935 PKTSHIFT_V(PKTSHIFT_M
),
2936 PKTSHIFT_V(rx_dma_offset
));
2939 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
2940 * adds the pseudo header itself.
2942 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG_A
,
2943 CSUM_HAS_PSEUDO_HDR_F
, 0);
2948 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
2949 * unto themselves and they contain their own firmware to perform their
2952 static int phy_aq1202_version(const u8
*phy_fw_data
,
2957 /* At offset 0x8 you're looking for the primary image's
2958 * starting offset which is 3 Bytes wide
2960 * At offset 0xa of the primary image, you look for the offset
2961 * of the DRAM segment which is 3 Bytes wide.
2963 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
2966 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
2967 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
2968 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
2970 offset
= le24(phy_fw_data
+ 0x8) << 12;
2971 offset
= le24(phy_fw_data
+ offset
+ 0xa);
2972 return be16(phy_fw_data
+ offset
+ 0x27e);
2979 static struct info_10gbt_phy_fw
{
2980 unsigned int phy_fw_id
; /* PCI Device ID */
2981 char *phy_fw_file
; /* /lib/firmware/ PHY Firmware file */
2982 int (*phy_fw_version
)(const u8
*phy_fw_data
, size_t phy_fw_size
);
2983 int phy_flash
; /* Has FLASH for PHY Firmware */
2984 } phy_info_array
[] = {
2986 PHY_AQ1202_DEVICEID
,
2987 PHY_AQ1202_FIRMWARE
,
2992 PHY_BCM84834_DEVICEID
,
2993 PHY_BCM84834_FIRMWARE
,
3000 static struct info_10gbt_phy_fw
*find_phy_info(int devid
)
3004 for (i
= 0; i
< ARRAY_SIZE(phy_info_array
); i
++) {
3005 if (phy_info_array
[i
].phy_fw_id
== devid
)
3006 return &phy_info_array
[i
];
3011 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3012 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3013 * we return a negative error number. If we transfer new firmware we return 1
3014 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3016 static int adap_init0_phy(struct adapter
*adap
)
3018 const struct firmware
*phyf
;
3020 struct info_10gbt_phy_fw
*phy_info
;
3022 /* Use the device ID to determine which PHY file to flash.
3024 phy_info
= find_phy_info(adap
->pdev
->device
);
3026 dev_warn(adap
->pdev_dev
,
3027 "No PHY Firmware file found for this PHY\n");
3031 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3032 * use that. The adapter firmware provides us with a memory buffer
3033 * where we can load a PHY firmware file from the host if we want to
3034 * override the PHY firmware File in flash.
3036 ret
= request_firmware_direct(&phyf
, phy_info
->phy_fw_file
,
3039 /* For adapters without FLASH attached to PHY for their
3040 * firmware, it's obviously a fatal error if we can't get the
3041 * firmware to the adapter. For adapters with PHY firmware
3042 * FLASH storage, it's worth a warning if we can't find the
3043 * PHY Firmware but we'll neuter the error ...
3045 dev_err(adap
->pdev_dev
, "unable to find PHY Firmware image "
3046 "/lib/firmware/%s, error %d\n",
3047 phy_info
->phy_fw_file
, -ret
);
3048 if (phy_info
->phy_flash
) {
3049 int cur_phy_fw_ver
= 0;
3051 t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
3052 dev_warn(adap
->pdev_dev
, "continuing with, on-adapter "
3053 "FLASH copy, version %#x\n", cur_phy_fw_ver
);
3060 /* Load PHY Firmware onto adapter.
3062 ret
= t4_load_phy_fw(adap
, MEMWIN_NIC
, &adap
->win0_lock
,
3063 phy_info
->phy_fw_version
,
3064 (u8
*)phyf
->data
, phyf
->size
);
3066 dev_err(adap
->pdev_dev
, "PHY Firmware transfer error %d\n",
3069 int new_phy_fw_ver
= 0;
3071 if (phy_info
->phy_fw_version
)
3072 new_phy_fw_ver
= phy_info
->phy_fw_version(phyf
->data
,
3074 dev_info(adap
->pdev_dev
, "Successfully transferred PHY "
3075 "Firmware /lib/firmware/%s, version %#x\n",
3076 phy_info
->phy_fw_file
, new_phy_fw_ver
);
3079 release_firmware(phyf
);
3085 * Attempt to initialize the adapter via a Firmware Configuration File.
3087 static int adap_init0_config(struct adapter
*adapter
, int reset
)
3089 struct fw_caps_config_cmd caps_cmd
;
3090 const struct firmware
*cf
;
3091 unsigned long mtype
= 0, maddr
= 0;
3092 u32 finiver
, finicsum
, cfcsum
;
3094 int config_issued
= 0;
3095 char *fw_config_file
, fw_config_file_path
[256];
3096 char *config_name
= NULL
;
3099 * Reset device if necessary.
3102 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
3103 PIORSTMODE_F
| PIORST_F
);
3108 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3109 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3110 * to be performed after any global adapter RESET above since some
3111 * PHYs only have local RAM copies of the PHY firmware.
3113 if (is_10gbt_device(adapter
->pdev
->device
)) {
3114 ret
= adap_init0_phy(adapter
);
3119 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3120 * then use that. Otherwise, use the configuration file stored
3121 * in the adapter flash ...
3123 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
3125 fw_config_file
= FW4_CFNAME
;
3128 fw_config_file
= FW5_CFNAME
;
3131 fw_config_file
= FW6_CFNAME
;
3134 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
3135 adapter
->pdev
->device
);
3140 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
3142 config_name
= "On FLASH";
3143 mtype
= FW_MEMTYPE_CF_FLASH
;
3144 maddr
= t4_flash_cfg_addr(adapter
);
3146 u32 params
[7], val
[7];
3148 sprintf(fw_config_file_path
,
3149 "/lib/firmware/%s", fw_config_file
);
3150 config_name
= fw_config_file_path
;
3152 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
3155 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3156 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3157 ret
= t4_query_params(adapter
, adapter
->mbox
,
3158 adapter
->pf
, 0, 1, params
, val
);
3161 * For t4_memory_rw() below addresses and
3162 * sizes have to be in terms of multiples of 4
3163 * bytes. So, if the Configuration File isn't
3164 * a multiple of 4 bytes in length we'll have
3165 * to write that out separately since we can't
3166 * guarantee that the bytes following the
3167 * residual byte in the buffer returned by
3168 * request_firmware() are zeroed out ...
3170 size_t resid
= cf
->size
& 0x3;
3171 size_t size
= cf
->size
& ~0x3;
3172 __be32
*data
= (__be32
*)cf
->data
;
3174 mtype
= FW_PARAMS_PARAM_Y_G(val
[0]);
3175 maddr
= FW_PARAMS_PARAM_Z_G(val
[0]) << 16;
3177 spin_lock(&adapter
->win0_lock
);
3178 ret
= t4_memory_rw(adapter
, 0, mtype
, maddr
,
3179 size
, data
, T4_MEMORY_WRITE
);
3180 if (ret
== 0 && resid
!= 0) {
3187 last
.word
= data
[size
>> 2];
3188 for (i
= resid
; i
< 4; i
++)
3190 ret
= t4_memory_rw(adapter
, 0, mtype
,
3195 spin_unlock(&adapter
->win0_lock
);
3199 release_firmware(cf
);
3205 * Issue a Capability Configuration command to the firmware to get it
3206 * to parse the Configuration File. We don't use t4_fw_config_file()
3207 * because we want the ability to modify various features after we've
3208 * processed the configuration file ...
3210 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3211 caps_cmd
.op_to_write
=
3212 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3215 caps_cmd
.cfvalid_to_len16
=
3216 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
3217 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
3218 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
3219 FW_LEN16(caps_cmd
));
3220 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3223 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3224 * Configuration File in FLASH), our last gasp effort is to use the
3225 * Firmware Configuration File which is embedded in the firmware. A
3226 * very few early versions of the firmware didn't have one embedded
3227 * but we can ignore those.
3229 if (ret
== -ENOENT
) {
3230 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3231 caps_cmd
.op_to_write
=
3232 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3235 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3236 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
3237 sizeof(caps_cmd
), &caps_cmd
);
3238 config_name
= "Firmware Default";
3245 finiver
= ntohl(caps_cmd
.finiver
);
3246 finicsum
= ntohl(caps_cmd
.finicsum
);
3247 cfcsum
= ntohl(caps_cmd
.cfcsum
);
3248 if (finicsum
!= cfcsum
)
3249 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
3250 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3254 * And now tell the firmware to use the configuration we just loaded.
3256 caps_cmd
.op_to_write
=
3257 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3260 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3261 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3267 * Tweak configuration based on system architecture, module
3270 ret
= adap_init0_tweaks(adapter
);
3275 * And finally tell the firmware to initialize itself using the
3276 * parameters from the Configuration File.
3278 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
3282 /* Emit Firmware Configuration File information and return
3285 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
3286 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3287 config_name
, finiver
, cfcsum
);
3291 * Something bad happened. Return the error ... (If the "error"
3292 * is that there's no Configuration File on the adapter we don't
3293 * want to issue a warning since this is fairly common.)
3296 if (config_issued
&& ret
!= -ENOENT
)
3297 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
3302 static struct fw_info fw_info_array
[] = {
3305 .fs_name
= FW4_CFNAME
,
3306 .fw_mod_name
= FW4_FNAME
,
3308 .chip
= FW_HDR_CHIP_T4
,
3309 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
3310 .intfver_nic
= FW_INTFVER(T4
, NIC
),
3311 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
3312 .intfver_ri
= FW_INTFVER(T4
, RI
),
3313 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
3314 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
3318 .fs_name
= FW5_CFNAME
,
3319 .fw_mod_name
= FW5_FNAME
,
3321 .chip
= FW_HDR_CHIP_T5
,
3322 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
3323 .intfver_nic
= FW_INTFVER(T5
, NIC
),
3324 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
3325 .intfver_ri
= FW_INTFVER(T5
, RI
),
3326 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
3327 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
3331 .fs_name
= FW6_CFNAME
,
3332 .fw_mod_name
= FW6_FNAME
,
3334 .chip
= FW_HDR_CHIP_T6
,
3335 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
3336 .intfver_nic
= FW_INTFVER(T6
, NIC
),
3337 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
3338 .intfver_ofld
= FW_INTFVER(T6
, OFLD
),
3339 .intfver_ri
= FW_INTFVER(T6
, RI
),
3340 .intfver_iscsipdu
= FW_INTFVER(T6
, ISCSIPDU
),
3341 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
3342 .intfver_fcoepdu
= FW_INTFVER(T6
, FCOEPDU
),
3343 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
3349 static struct fw_info
*find_fw_info(int chip
)
3353 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
3354 if (fw_info_array
[i
].chip
== chip
)
3355 return &fw_info_array
[i
];
3361 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3363 static int adap_init0(struct adapter
*adap
)
3367 enum dev_state state
;
3368 u32 params
[7], val
[7];
3369 struct fw_caps_config_cmd caps_cmd
;
3372 /* Grab Firmware Device Log parameters as early as possible so we have
3373 * access to it for debugging, etc.
3375 ret
= t4_init_devlog_params(adap
);
3379 /* Contact FW, advertising Master capability */
3380 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->mbox
,
3381 is_kdump_kernel() ? MASTER_MUST
: MASTER_MAY
, &state
);
3383 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
3387 if (ret
== adap
->mbox
)
3388 adap
->flags
|= MASTER_PF
;
3391 * If we're the Master PF Driver and the device is uninitialized,
3392 * then let's consider upgrading the firmware ... (We always want
3393 * to check the firmware version number in order to A. get it for
3394 * later reporting and B. to warn if the currently loaded firmware
3395 * is excessively mismatched relative to the driver.)
3397 t4_get_fw_version(adap
, &adap
->params
.fw_vers
);
3398 t4_get_bs_version(adap
, &adap
->params
.bs_vers
);
3399 t4_get_tp_version(adap
, &adap
->params
.tp_vers
);
3400 t4_get_exprom_version(adap
, &adap
->params
.er_vers
);
3402 ret
= t4_check_fw_version(adap
);
3403 /* If firmware is too old (not supported by driver) force an update. */
3405 state
= DEV_STATE_UNINIT
;
3406 if ((adap
->flags
& MASTER_PF
) && state
!= DEV_STATE_INIT
) {
3407 struct fw_info
*fw_info
;
3408 struct fw_hdr
*card_fw
;
3409 const struct firmware
*fw
;
3410 const u8
*fw_data
= NULL
;
3411 unsigned int fw_size
= 0;
3413 /* This is the firmware whose headers the driver was compiled
3416 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3417 if (fw_info
== NULL
) {
3418 dev_err(adap
->pdev_dev
,
3419 "unable to get firmware info for chip %d.\n",
3420 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3424 /* allocate memory to read the header of the firmware on the
3427 card_fw
= kvzalloc(sizeof(*card_fw
), GFP_KERNEL
);
3429 /* Get FW from from /lib/firmware/ */
3430 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
3433 dev_err(adap
->pdev_dev
,
3434 "unable to load firmware image %s, error %d\n",
3435 fw_info
->fw_mod_name
, ret
);
3441 /* upgrade FW logic */
3442 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
3446 release_firmware(fw
);
3454 * Grab VPD parameters. This should be done after we establish a
3455 * connection to the firmware since some of the VPD parameters
3456 * (notably the Core Clock frequency) are retrieved via requests to
3457 * the firmware. On the other hand, we need these fairly early on
3458 * so we do this right after getting ahold of the firmware.
3460 ret
= t4_get_vpd_params(adap
, &adap
->params
.vpd
);
3465 * Find out what ports are available to us. Note that we need to do
3466 * this before calling adap_init0_no_config() since it needs nports
3470 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3471 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC
);
3472 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, &v
, &port_vec
);
3476 adap
->params
.nports
= hweight32(port_vec
);
3477 adap
->params
.portvec
= port_vec
;
3479 /* If the firmware is initialized already, emit a simply note to that
3480 * effect. Otherwise, it's time to try initializing the adapter.
3482 if (state
== DEV_STATE_INIT
) {
3483 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
3484 "Adapter already initialized\n",
3485 adap
->flags
& MASTER_PF
? "MASTER" : "SLAVE");
3487 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
3488 "Initializing adapter\n");
3490 /* Find out whether we're dealing with a version of the
3491 * firmware which has configuration file support.
3493 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3494 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3495 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
3498 /* If the firmware doesn't support Configuration Files,
3502 dev_err(adap
->pdev_dev
, "firmware doesn't support "
3503 "Firmware Configuration Files\n");
3507 /* The firmware provides us with a memory buffer where we can
3508 * load a Configuration File from the host if we want to
3509 * override the Configuration File in flash.
3511 ret
= adap_init0_config(adap
, reset
);
3512 if (ret
== -ENOENT
) {
3513 dev_err(adap
->pdev_dev
, "no Configuration File "
3514 "present on adapter.\n");
3518 dev_err(adap
->pdev_dev
, "could not initialize "
3519 "adapter, error %d\n", -ret
);
3524 /* Give the SGE code a chance to pull in anything that it needs ...
3525 * Note that this must be called after we retrieve our VPD parameters
3526 * in order to know how to convert core ticks to seconds, etc.
3528 ret
= t4_sge_init(adap
);
3532 if (is_bypass_device(adap
->pdev
->device
))
3533 adap
->params
.bypass
= 1;
3536 * Grab some of our basic fundamental operating parameters.
3538 #define FW_PARAM_DEV(param) \
3539 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3540 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3542 #define FW_PARAM_PFVF(param) \
3543 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3544 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3545 FW_PARAMS_PARAM_Y_V(0) | \
3546 FW_PARAMS_PARAM_Z_V(0)
3548 params
[0] = FW_PARAM_PFVF(EQ_START
);
3549 params
[1] = FW_PARAM_PFVF(L2T_START
);
3550 params
[2] = FW_PARAM_PFVF(L2T_END
);
3551 params
[3] = FW_PARAM_PFVF(FILTER_START
);
3552 params
[4] = FW_PARAM_PFVF(FILTER_END
);
3553 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
3554 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
, val
);
3557 adap
->sge
.egr_start
= val
[0];
3558 adap
->l2t_start
= val
[1];
3559 adap
->l2t_end
= val
[2];
3560 adap
->tids
.ftid_base
= val
[3];
3561 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
3562 adap
->sge
.ingr_start
= val
[5];
3564 /* qids (ingress/egress) returned from firmware can be anywhere
3565 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3566 * Hence driver needs to allocate memory for this range to
3567 * store the queue info. Get the highest IQFLINT/EQ index returned
3568 * in FW_EQ_*_CMD.alloc command.
3570 params
[0] = FW_PARAM_PFVF(EQ_END
);
3571 params
[1] = FW_PARAM_PFVF(IQFLINT_END
);
3572 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3575 adap
->sge
.egr_sz
= val
[0] - adap
->sge
.egr_start
+ 1;
3576 adap
->sge
.ingr_sz
= val
[1] - adap
->sge
.ingr_start
+ 1;
3578 adap
->sge
.egr_map
= kcalloc(adap
->sge
.egr_sz
,
3579 sizeof(*adap
->sge
.egr_map
), GFP_KERNEL
);
3580 if (!adap
->sge
.egr_map
) {
3585 adap
->sge
.ingr_map
= kcalloc(adap
->sge
.ingr_sz
,
3586 sizeof(*adap
->sge
.ingr_map
), GFP_KERNEL
);
3587 if (!adap
->sge
.ingr_map
) {
3592 /* Allocate the memory for the vaious egress queue bitmaps
3593 * ie starving_fl, txq_maperr and blocked_fl.
3595 adap
->sge
.starving_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3596 sizeof(long), GFP_KERNEL
);
3597 if (!adap
->sge
.starving_fl
) {
3602 adap
->sge
.txq_maperr
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3603 sizeof(long), GFP_KERNEL
);
3604 if (!adap
->sge
.txq_maperr
) {
3609 #ifdef CONFIG_DEBUG_FS
3610 adap
->sge
.blocked_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3611 sizeof(long), GFP_KERNEL
);
3612 if (!adap
->sge
.blocked_fl
) {
3618 params
[0] = FW_PARAM_PFVF(CLIP_START
);
3619 params
[1] = FW_PARAM_PFVF(CLIP_END
);
3620 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3623 adap
->clipt_start
= val
[0];
3624 adap
->clipt_end
= val
[1];
3626 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
3627 * Classes supported by the hardware/firmware so we hard code it here
3630 adap
->params
.nsched_cls
= is_t4(adap
->params
.chip
) ? 15 : 16;
3632 /* query params related to active filter region */
3633 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
3634 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
3635 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3636 /* If Active filter size is set we enable establishing
3637 * offload connection through firmware work request
3639 if ((val
[0] != val
[1]) && (ret
>= 0)) {
3640 adap
->flags
|= FW_OFLD_CONN
;
3641 adap
->tids
.aftid_base
= val
[0];
3642 adap
->tids
.aftid_end
= val
[1];
3645 /* If we're running on newer firmware, let it know that we're
3646 * prepared to deal with encapsulated CPL messages. Older
3647 * firmware won't understand this and we'll just get
3648 * unencapsulated messages ...
3650 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
3652 (void)t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
, val
);
3655 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
3656 * capability. Earlier versions of the firmware didn't have the
3657 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
3658 * permission to use ULPTX MEMWRITE DSGL.
3660 if (is_t4(adap
->params
.chip
)) {
3661 adap
->params
.ulptx_memwrite_dsgl
= false;
3663 params
[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL
);
3664 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
3666 adap
->params
.ulptx_memwrite_dsgl
= (ret
== 0 && val
[0] != 0);
3669 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
3670 params
[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR
);
3671 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
3673 adap
->params
.fr_nsmr_tpte_wr_support
= (ret
== 0 && val
[0] != 0);
3676 * Get device capabilities so we can determine what resources we need
3679 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3680 caps_cmd
.op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3681 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3682 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3683 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3688 if (caps_cmd
.ofldcaps
) {
3689 /* query offload-related parameters */
3690 params
[0] = FW_PARAM_DEV(NTID
);
3691 params
[1] = FW_PARAM_PFVF(SERVER_START
);
3692 params
[2] = FW_PARAM_PFVF(SERVER_END
);
3693 params
[3] = FW_PARAM_PFVF(TDDP_START
);
3694 params
[4] = FW_PARAM_PFVF(TDDP_END
);
3695 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
3696 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
3700 adap
->tids
.ntids
= val
[0];
3701 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
3702 adap
->tids
.stid_base
= val
[1];
3703 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
3705 * Setup server filter region. Divide the available filter
3706 * region into two parts. Regular filters get 1/3rd and server
3707 * filters get 2/3rd part. This is only enabled if workarond
3709 * 1. For regular filters.
3710 * 2. Server filter: This are special filters which are used
3711 * to redirect SYN packets to offload queue.
3713 if (adap
->flags
& FW_OFLD_CONN
&& !is_bypass(adap
)) {
3714 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
3715 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
3716 adap
->tids
.nsftids
= adap
->tids
.nftids
-
3717 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
3718 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
3719 adap
->tids
.ftid_base
;
3721 adap
->vres
.ddp
.start
= val
[3];
3722 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
3723 adap
->params
.ofldq_wr_cred
= val
[5];
3725 adap
->params
.offload
= 1;
3726 adap
->num_ofld_uld
+= 1;
3728 if (caps_cmd
.rdmacaps
) {
3729 params
[0] = FW_PARAM_PFVF(STAG_START
);
3730 params
[1] = FW_PARAM_PFVF(STAG_END
);
3731 params
[2] = FW_PARAM_PFVF(RQ_START
);
3732 params
[3] = FW_PARAM_PFVF(RQ_END
);
3733 params
[4] = FW_PARAM_PFVF(PBL_START
);
3734 params
[5] = FW_PARAM_PFVF(PBL_END
);
3735 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
3739 adap
->vres
.stag
.start
= val
[0];
3740 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
3741 adap
->vres
.rq
.start
= val
[2];
3742 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
3743 adap
->vres
.pbl
.start
= val
[4];
3744 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
3746 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
3747 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
3748 params
[2] = FW_PARAM_PFVF(CQ_START
);
3749 params
[3] = FW_PARAM_PFVF(CQ_END
);
3750 params
[4] = FW_PARAM_PFVF(OCQ_START
);
3751 params
[5] = FW_PARAM_PFVF(OCQ_END
);
3752 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
,
3756 adap
->vres
.qp
.start
= val
[0];
3757 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
3758 adap
->vres
.cq
.start
= val
[2];
3759 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
3760 adap
->vres
.ocq
.start
= val
[4];
3761 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
3763 params
[0] = FW_PARAM_DEV(MAXORDIRD_QP
);
3764 params
[1] = FW_PARAM_DEV(MAXIRD_ADAPTER
);
3765 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
,
3768 adap
->params
.max_ordird_qp
= 8;
3769 adap
->params
.max_ird_adapter
= 32 * adap
->tids
.ntids
;
3772 adap
->params
.max_ordird_qp
= val
[0];
3773 adap
->params
.max_ird_adapter
= val
[1];
3775 dev_info(adap
->pdev_dev
,
3776 "max_ordird_qp %d max_ird_adapter %d\n",
3777 adap
->params
.max_ordird_qp
,
3778 adap
->params
.max_ird_adapter
);
3779 adap
->num_ofld_uld
+= 2;
3781 if (caps_cmd
.iscsicaps
) {
3782 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
3783 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
3784 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
3788 adap
->vres
.iscsi
.start
= val
[0];
3789 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
3790 /* LIO target and cxgb4i initiaitor */
3791 adap
->num_ofld_uld
+= 2;
3793 if (caps_cmd
.cryptocaps
) {
3794 /* Should query params here...TODO */
3795 params
[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE
);
3796 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
3802 adap
->vres
.ncrypto_fc
= val
[0];
3804 adap
->params
.crypto
|= ULP_CRYPTO_LOOKASIDE
;
3807 #undef FW_PARAM_PFVF
3810 /* The MTU/MSS Table is initialized by now, so load their values. If
3811 * we're initializing the adapter, then we'll make any modifications
3812 * we want to the MTU/MSS Table and also initialize the congestion
3815 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
3816 if (state
!= DEV_STATE_INIT
) {
3819 /* The default MTU Table contains values 1492 and 1500.
3820 * However, for TCP, it's better to have two values which are
3821 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
3822 * This allows us to have a TCP Data Payload which is a
3823 * multiple of 8 regardless of what combination of TCP Options
3824 * are in use (always a multiple of 4 bytes) which is
3825 * important for performance reasons. For instance, if no
3826 * options are in use, then we have a 20-byte IP header and a
3827 * 20-byte TCP header. In this case, a 1500-byte MSS would
3828 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
3829 * which is not a multiple of 8. So using an MSS of 1488 in
3830 * this case results in a TCP Data Payload of 1448 bytes which
3831 * is a multiple of 8. On the other hand, if 12-byte TCP Time
3832 * Stamps have been negotiated, then an MTU of 1500 bytes
3833 * results in a TCP Data Payload of 1448 bytes which, as
3834 * above, is a multiple of 8 bytes ...
3836 for (i
= 0; i
< NMTUS
; i
++)
3837 if (adap
->params
.mtus
[i
] == 1492) {
3838 adap
->params
.mtus
[i
] = 1488;
3842 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
3843 adap
->params
.b_wnd
);
3845 t4_init_sge_params(adap
);
3846 adap
->flags
|= FW_OK
;
3847 t4_init_tp_params(adap
);
3851 * Something bad happened. If a command timed out or failed with EIO
3852 * FW does not operate within its spec or something catastrophic
3853 * happened to HW/FW, stop issuing commands.
3856 kfree(adap
->sge
.egr_map
);
3857 kfree(adap
->sge
.ingr_map
);
3858 kfree(adap
->sge
.starving_fl
);
3859 kfree(adap
->sge
.txq_maperr
);
3860 #ifdef CONFIG_DEBUG_FS
3861 kfree(adap
->sge
.blocked_fl
);
3863 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
3864 t4_fw_bye(adap
, adap
->mbox
);
3870 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
3871 pci_channel_state_t state
)
3874 struct adapter
*adap
= pci_get_drvdata(pdev
);
3880 adap
->flags
&= ~FW_OK
;
3881 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
3882 spin_lock(&adap
->stats_lock
);
3883 for_each_port(adap
, i
) {
3884 struct net_device
*dev
= adap
->port
[i
];
3886 netif_device_detach(dev
);
3887 netif_carrier_off(dev
);
3889 spin_unlock(&adap
->stats_lock
);
3890 disable_interrupts(adap
);
3891 if (adap
->flags
& FULL_INIT_DONE
)
3894 if ((adap
->flags
& DEV_ENABLED
)) {
3895 pci_disable_device(pdev
);
3896 adap
->flags
&= ~DEV_ENABLED
;
3898 out
: return state
== pci_channel_io_perm_failure
?
3899 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
3902 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
3905 struct fw_caps_config_cmd c
;
3906 struct adapter
*adap
= pci_get_drvdata(pdev
);
3909 pci_restore_state(pdev
);
3910 pci_save_state(pdev
);
3911 return PCI_ERS_RESULT_RECOVERED
;
3914 if (!(adap
->flags
& DEV_ENABLED
)) {
3915 if (pci_enable_device(pdev
)) {
3916 dev_err(&pdev
->dev
, "Cannot reenable PCI "
3917 "device after reset\n");
3918 return PCI_ERS_RESULT_DISCONNECT
;
3920 adap
->flags
|= DEV_ENABLED
;
3923 pci_set_master(pdev
);
3924 pci_restore_state(pdev
);
3925 pci_save_state(pdev
);
3926 pci_cleanup_aer_uncorrect_error_status(pdev
);
3928 if (t4_wait_dev_ready(adap
->regs
) < 0)
3929 return PCI_ERS_RESULT_DISCONNECT
;
3930 if (t4_fw_hello(adap
, adap
->mbox
, adap
->pf
, MASTER_MUST
, NULL
) < 0)
3931 return PCI_ERS_RESULT_DISCONNECT
;
3932 adap
->flags
|= FW_OK
;
3933 if (adap_init1(adap
, &c
))
3934 return PCI_ERS_RESULT_DISCONNECT
;
3936 for_each_port(adap
, i
) {
3937 struct port_info
*p
= adap2pinfo(adap
, i
);
3939 ret
= t4_alloc_vi(adap
, adap
->mbox
, p
->tx_chan
, adap
->pf
, 0, 1,
3942 return PCI_ERS_RESULT_DISCONNECT
;
3944 p
->xact_addr_filt
= -1;
3947 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
3948 adap
->params
.b_wnd
);
3951 return PCI_ERS_RESULT_DISCONNECT
;
3952 return PCI_ERS_RESULT_RECOVERED
;
3955 static void eeh_resume(struct pci_dev
*pdev
)
3958 struct adapter
*adap
= pci_get_drvdata(pdev
);
3964 for_each_port(adap
, i
) {
3965 struct net_device
*dev
= adap
->port
[i
];
3967 if (netif_running(dev
)) {
3969 cxgb_set_rxmode(dev
);
3971 netif_device_attach(dev
);
3976 static const struct pci_error_handlers cxgb4_eeh
= {
3977 .error_detected
= eeh_err_detected
,
3978 .slot_reset
= eeh_slot_reset
,
3979 .resume
= eeh_resume
,
3982 /* Return true if the Link Configuration supports "High Speeds" (those greater
3985 static inline bool is_x_10g_port(const struct link_config
*lc
)
3987 unsigned int speeds
, high_speeds
;
3989 speeds
= FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc
->supported
));
3990 high_speeds
= speeds
& ~(FW_PORT_CAP_SPEED_100M
| FW_PORT_CAP_SPEED_1G
);
3992 return high_speeds
!= 0;
3996 * Perform default configuration of DMA queues depending on the number and type
3997 * of ports we found and the number of available CPUs. Most settings can be
3998 * modified by the admin prior to actual use.
4000 static void cfg_queues(struct adapter
*adap
)
4002 struct sge
*s
= &adap
->sge
;
4003 int i
= 0, n10g
= 0, qidx
= 0;
4004 #ifndef CONFIG_CHELSIO_T4_DCB
4008 /* Reduce memory usage in kdump environment, disable all offload.
4010 if (is_kdump_kernel()) {
4011 adap
->params
.offload
= 0;
4012 adap
->params
.crypto
= 0;
4013 } else if (is_uld(adap
) && t4_uld_mem_alloc(adap
)) {
4014 adap
->params
.offload
= 0;
4015 adap
->params
.crypto
= 0;
4018 n10g
+= is_x_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
4019 #ifdef CONFIG_CHELSIO_T4_DCB
4020 /* For Data Center Bridging support we need to be able to support up
4021 * to 8 Traffic Priorities; each of which will be assigned to its
4022 * own TX Queue in order to prevent Head-Of-Line Blocking.
4024 if (adap
->params
.nports
* 8 > MAX_ETH_QSETS
) {
4025 dev_err(adap
->pdev_dev
, "MAX_ETH_QSETS=%d < %d!\n",
4026 MAX_ETH_QSETS
, adap
->params
.nports
* 8);
4030 for_each_port(adap
, i
) {
4031 struct port_info
*pi
= adap2pinfo(adap
, i
);
4033 pi
->first_qset
= qidx
;
4037 #else /* !CONFIG_CHELSIO_T4_DCB */
4039 * We default to 1 queue per non-10G port and up to # of cores queues
4043 q10g
= (MAX_ETH_QSETS
- (adap
->params
.nports
- n10g
)) / n10g
;
4044 if (q10g
> netif_get_num_default_rss_queues())
4045 q10g
= netif_get_num_default_rss_queues();
4047 for_each_port(adap
, i
) {
4048 struct port_info
*pi
= adap2pinfo(adap
, i
);
4050 pi
->first_qset
= qidx
;
4051 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
4054 #endif /* !CONFIG_CHELSIO_T4_DCB */
4057 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
4061 * For offload we use 1 queue/channel if all ports are up to 1G,
4062 * otherwise we divide all available queues amongst the channels
4063 * capped by the number of available cores.
4066 i
= min_t(int, MAX_OFLD_QSETS
, num_online_cpus());
4067 s
->ofldqsets
= roundup(i
, adap
->params
.nports
);
4069 s
->ofldqsets
= adap
->params
.nports
;
4073 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
4074 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
4076 init_rspq(adap
, &r
->rspq
, 5, 10, 1024, 64);
4080 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
4081 s
->ethtxq
[i
].q
.size
= 1024;
4083 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
4084 s
->ctrlq
[i
].q
.size
= 512;
4086 init_rspq(adap
, &s
->fw_evtq
, 0, 1, 1024, 64);
4087 init_rspq(adap
, &s
->intrq
, 0, 1, 512, 64);
4091 * Reduce the number of Ethernet queues across all ports to at most n.
4092 * n provides at least one queue per port.
4094 static void reduce_ethqs(struct adapter
*adap
, int n
)
4097 struct port_info
*pi
;
4099 while (n
< adap
->sge
.ethqsets
)
4100 for_each_port(adap
, i
) {
4101 pi
= adap2pinfo(adap
, i
);
4102 if (pi
->nqsets
> 1) {
4104 adap
->sge
.ethqsets
--;
4105 if (adap
->sge
.ethqsets
<= n
)
4111 for_each_port(adap
, i
) {
4112 pi
= adap2pinfo(adap
, i
);
4118 static int get_msix_info(struct adapter
*adap
)
4120 struct uld_msix_info
*msix_info
;
4121 unsigned int max_ingq
= 0;
4123 if (is_offload(adap
))
4124 max_ingq
+= MAX_OFLD_QSETS
* adap
->num_ofld_uld
;
4125 if (is_pci_uld(adap
))
4126 max_ingq
+= MAX_OFLD_QSETS
* adap
->num_uld
;
4131 msix_info
= kcalloc(max_ingq
, sizeof(*msix_info
), GFP_KERNEL
);
4135 adap
->msix_bmap_ulds
.msix_bmap
= kcalloc(BITS_TO_LONGS(max_ingq
),
4136 sizeof(long), GFP_KERNEL
);
4137 if (!adap
->msix_bmap_ulds
.msix_bmap
) {
4141 spin_lock_init(&adap
->msix_bmap_ulds
.lock
);
4142 adap
->msix_info_ulds
= msix_info
;
4147 static void free_msix_info(struct adapter
*adap
)
4149 if (!(adap
->num_uld
&& adap
->num_ofld_uld
))
4152 kfree(adap
->msix_info_ulds
);
4153 kfree(adap
->msix_bmap_ulds
.msix_bmap
);
4156 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4157 #define EXTRA_VECS 2
4159 static int enable_msix(struct adapter
*adap
)
4161 int ofld_need
= 0, uld_need
= 0;
4162 int i
, j
, want
, need
, allocated
;
4163 struct sge
*s
= &adap
->sge
;
4164 unsigned int nchan
= adap
->params
.nports
;
4165 struct msix_entry
*entries
;
4166 int max_ingq
= MAX_INGQ
;
4168 if (is_pci_uld(adap
))
4169 max_ingq
+= (MAX_OFLD_QSETS
* adap
->num_uld
);
4170 if (is_offload(adap
))
4171 max_ingq
+= (MAX_OFLD_QSETS
* adap
->num_ofld_uld
);
4172 entries
= kmalloc(sizeof(*entries
) * (max_ingq
+ 1),
4178 if (get_msix_info(adap
)) {
4179 adap
->params
.offload
= 0;
4180 adap
->params
.crypto
= 0;
4183 for (i
= 0; i
< max_ingq
+ 1; ++i
)
4184 entries
[i
].entry
= i
;
4186 want
= s
->max_ethqsets
+ EXTRA_VECS
;
4187 if (is_offload(adap
)) {
4188 want
+= adap
->num_ofld_uld
* s
->ofldqsets
;
4189 ofld_need
= adap
->num_ofld_uld
* nchan
;
4191 if (is_pci_uld(adap
)) {
4192 want
+= adap
->num_uld
* s
->ofldqsets
;
4193 uld_need
= adap
->num_uld
* nchan
;
4195 #ifdef CONFIG_CHELSIO_T4_DCB
4196 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4199 need
= 8 * adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
+ uld_need
;
4201 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
+ uld_need
;
4203 allocated
= pci_enable_msix_range(adap
->pdev
, entries
, need
, want
);
4204 if (allocated
< 0) {
4205 dev_info(adap
->pdev_dev
, "not enough MSI-X vectors left,"
4206 " not using MSI-X\n");
4211 /* Distribute available vectors to the various queue groups.
4212 * Every group gets its minimum requirement and NIC gets top
4213 * priority for leftovers.
4215 i
= allocated
- EXTRA_VECS
- ofld_need
- uld_need
;
4216 if (i
< s
->max_ethqsets
) {
4217 s
->max_ethqsets
= i
;
4218 if (i
< s
->ethqsets
)
4219 reduce_ethqs(adap
, i
);
4222 if (allocated
< want
)
4223 s
->nqs_per_uld
= nchan
;
4225 s
->nqs_per_uld
= s
->ofldqsets
;
4228 for (i
= 0; i
< (s
->max_ethqsets
+ EXTRA_VECS
); ++i
)
4229 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
4231 for (j
= 0 ; i
< allocated
; ++i
, j
++) {
4232 adap
->msix_info_ulds
[j
].vec
= entries
[i
].vector
;
4233 adap
->msix_info_ulds
[j
].idx
= i
;
4235 adap
->msix_bmap_ulds
.mapsize
= j
;
4237 dev_info(adap
->pdev_dev
, "%d MSI-X vectors allocated, "
4238 "nic %d per uld %d\n",
4239 allocated
, s
->max_ethqsets
, s
->nqs_per_uld
);
4247 static int init_rss(struct adapter
*adap
)
4252 err
= t4_init_rss_mode(adap
, adap
->mbox
);
4256 for_each_port(adap
, i
) {
4257 struct port_info
*pi
= adap2pinfo(adap
, i
);
4259 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
4266 static int cxgb4_get_pcie_dev_link_caps(struct adapter
*adap
,
4267 enum pci_bus_speed
*speed
,
4268 enum pcie_link_width
*width
)
4270 u32 lnkcap1
, lnkcap2
;
4273 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
4275 *speed
= PCI_SPEED_UNKNOWN
;
4276 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
4278 err1
= pcie_capability_read_dword(adap
->pdev
, PCI_EXP_LNKCAP
,
4280 err2
= pcie_capability_read_dword(adap
->pdev
, PCI_EXP_LNKCAP2
,
4282 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
4283 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
4284 *speed
= PCIE_SPEED_8_0GT
;
4285 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
4286 *speed
= PCIE_SPEED_5_0GT
;
4287 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
4288 *speed
= PCIE_SPEED_2_5GT
;
4291 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
4292 if (!lnkcap2
) { /* pre-r3.0 */
4293 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
4294 *speed
= PCIE_SPEED_5_0GT
;
4295 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
4296 *speed
= PCIE_SPEED_2_5GT
;
4300 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
)
4301 return err1
? err1
: err2
? err2
: -EINVAL
;
4305 static void cxgb4_check_pcie_caps(struct adapter
*adap
)
4307 enum pcie_link_width width
, width_cap
;
4308 enum pci_bus_speed speed
, speed_cap
;
4310 #define PCIE_SPEED_STR(speed) \
4311 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4312 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4313 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4316 if (cxgb4_get_pcie_dev_link_caps(adap
, &speed_cap
, &width_cap
)) {
4317 dev_warn(adap
->pdev_dev
,
4318 "Unable to determine PCIe device BW capabilities\n");
4322 if (pcie_get_minimum_link(adap
->pdev
, &speed
, &width
) ||
4323 speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
) {
4324 dev_warn(adap
->pdev_dev
,
4325 "Unable to determine PCI Express bandwidth.\n");
4329 dev_info(adap
->pdev_dev
, "PCIe link speed is %s, device supports %s\n",
4330 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
4331 dev_info(adap
->pdev_dev
, "PCIe link width is x%d, device supports x%d\n",
4333 if (speed
< speed_cap
|| width
< width_cap
)
4334 dev_info(adap
->pdev_dev
,
4335 "A slot with more lanes and/or higher speed is "
4336 "suggested for optimal performance.\n");
4339 /* Dump basic information about the adapter */
4340 static void print_adapter_info(struct adapter
*adapter
)
4342 /* Device information */
4343 dev_info(adapter
->pdev_dev
, "Chelsio %s rev %d\n",
4344 adapter
->params
.vpd
.id
,
4345 CHELSIO_CHIP_RELEASE(adapter
->params
.chip
));
4346 dev_info(adapter
->pdev_dev
, "S/N: %s, P/N: %s\n",
4347 adapter
->params
.vpd
.sn
, adapter
->params
.vpd
.pn
);
4349 /* Firmware Version */
4350 if (!adapter
->params
.fw_vers
)
4351 dev_warn(adapter
->pdev_dev
, "No firmware loaded\n");
4353 dev_info(adapter
->pdev_dev
, "Firmware version: %u.%u.%u.%u\n",
4354 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.fw_vers
),
4355 FW_HDR_FW_VER_MINOR_G(adapter
->params
.fw_vers
),
4356 FW_HDR_FW_VER_MICRO_G(adapter
->params
.fw_vers
),
4357 FW_HDR_FW_VER_BUILD_G(adapter
->params
.fw_vers
));
4359 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
4360 * Firmware, so dev_info() is more appropriate here.)
4362 if (!adapter
->params
.bs_vers
)
4363 dev_info(adapter
->pdev_dev
, "No bootstrap loaded\n");
4365 dev_info(adapter
->pdev_dev
, "Bootstrap version: %u.%u.%u.%u\n",
4366 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.bs_vers
),
4367 FW_HDR_FW_VER_MINOR_G(adapter
->params
.bs_vers
),
4368 FW_HDR_FW_VER_MICRO_G(adapter
->params
.bs_vers
),
4369 FW_HDR_FW_VER_BUILD_G(adapter
->params
.bs_vers
));
4371 /* TP Microcode Version */
4372 if (!adapter
->params
.tp_vers
)
4373 dev_warn(adapter
->pdev_dev
, "No TP Microcode loaded\n");
4375 dev_info(adapter
->pdev_dev
,
4376 "TP Microcode version: %u.%u.%u.%u\n",
4377 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.tp_vers
),
4378 FW_HDR_FW_VER_MINOR_G(adapter
->params
.tp_vers
),
4379 FW_HDR_FW_VER_MICRO_G(adapter
->params
.tp_vers
),
4380 FW_HDR_FW_VER_BUILD_G(adapter
->params
.tp_vers
));
4382 /* Expansion ROM version */
4383 if (!adapter
->params
.er_vers
)
4384 dev_info(adapter
->pdev_dev
, "No Expansion ROM loaded\n");
4386 dev_info(adapter
->pdev_dev
,
4387 "Expansion ROM version: %u.%u.%u.%u\n",
4388 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.er_vers
),
4389 FW_HDR_FW_VER_MINOR_G(adapter
->params
.er_vers
),
4390 FW_HDR_FW_VER_MICRO_G(adapter
->params
.er_vers
),
4391 FW_HDR_FW_VER_BUILD_G(adapter
->params
.er_vers
));
4393 /* Software/Hardware configuration */
4394 dev_info(adapter
->pdev_dev
, "Configuration: %sNIC %s, %s capable\n",
4395 is_offload(adapter
) ? "R" : "",
4396 ((adapter
->flags
& USING_MSIX
) ? "MSI-X" :
4397 (adapter
->flags
& USING_MSI
) ? "MSI" : ""),
4398 is_offload(adapter
) ? "Offload" : "non-Offload");
4401 static void print_port_info(const struct net_device
*dev
)
4405 const char *spd
= "";
4406 const struct port_info
*pi
= netdev_priv(dev
);
4407 const struct adapter
*adap
= pi
->adapter
;
4409 if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_2_5GB
)
4411 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_5_0GB
)
4413 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_8_0GB
)
4416 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100M
)
4417 bufp
+= sprintf(bufp
, "100M/");
4418 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_1G
)
4419 bufp
+= sprintf(bufp
, "1G/");
4420 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_10G
)
4421 bufp
+= sprintf(bufp
, "10G/");
4422 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_25G
)
4423 bufp
+= sprintf(bufp
, "25G/");
4424 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_40G
)
4425 bufp
+= sprintf(bufp
, "40G/");
4426 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100G
)
4427 bufp
+= sprintf(bufp
, "100G/");
4430 sprintf(bufp
, "BASE-%s", t4_get_port_type_description(pi
->port_type
));
4432 netdev_info(dev
, "%s: Chelsio %s (%s) %s\n",
4433 dev
->name
, adap
->params
.vpd
.id
, adap
->name
, buf
);
4436 static void enable_pcie_relaxed_ordering(struct pci_dev
*dev
)
4438 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
4442 * Free the following resources:
4443 * - memory used for tables
4446 * - resources FW is holding for us
4448 static void free_some_resources(struct adapter
*adapter
)
4452 kvfree(adapter
->l2t
);
4453 t4_cleanup_sched(adapter
);
4454 kvfree(adapter
->tids
.tid_tab
);
4455 cxgb4_cleanup_tc_u32(adapter
);
4456 kfree(adapter
->sge
.egr_map
);
4457 kfree(adapter
->sge
.ingr_map
);
4458 kfree(adapter
->sge
.starving_fl
);
4459 kfree(adapter
->sge
.txq_maperr
);
4460 #ifdef CONFIG_DEBUG_FS
4461 kfree(adapter
->sge
.blocked_fl
);
4463 disable_msi(adapter
);
4465 for_each_port(adapter
, i
)
4466 if (adapter
->port
[i
]) {
4467 struct port_info
*pi
= adap2pinfo(adapter
, i
);
4470 t4_free_vi(adapter
, adapter
->mbox
, adapter
->pf
,
4472 kfree(adap2pinfo(adapter
, i
)->rss
);
4473 free_netdev(adapter
->port
[i
]);
4475 if (adapter
->flags
& FW_OK
)
4476 t4_fw_bye(adapter
, adapter
->pf
);
4479 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4480 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4481 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4482 #define SEGMENT_SIZE 128
4484 static int get_chip_type(struct pci_dev
*pdev
, u32 pl_rev
)
4488 /* Retrieve adapter's device ID */
4489 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &device_id
);
4491 switch (device_id
>> 12) {
4493 return CHELSIO_CHIP_CODE(CHELSIO_T4
, pl_rev
);
4495 return CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
4497 return CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
4499 dev_err(&pdev
->dev
, "Device %d is not supported\n",
4505 #ifdef CONFIG_PCI_IOV
4506 static void dummy_setup(struct net_device
*dev
)
4508 dev
->type
= ARPHRD_NONE
;
4510 dev
->hard_header_len
= 0;
4512 dev
->tx_queue_len
= 0;
4513 dev
->flags
|= IFF_NOARP
;
4514 dev
->priv_flags
|= IFF_NO_QUEUE
;
4516 /* Initialize the device structure. */
4517 dev
->netdev_ops
= &cxgb4_mgmt_netdev_ops
;
4518 dev
->ethtool_ops
= &cxgb4_mgmt_ethtool_ops
;
4519 dev
->destructor
= free_netdev
;
4522 static int config_mgmt_dev(struct pci_dev
*pdev
)
4524 struct adapter
*adap
= pci_get_drvdata(pdev
);
4525 struct net_device
*netdev
;
4526 struct port_info
*pi
;
4527 char name
[IFNAMSIZ
];
4530 snprintf(name
, IFNAMSIZ
, "mgmtpf%d%d", adap
->adap_idx
, adap
->pf
);
4531 netdev
= alloc_netdev(sizeof(struct port_info
), name
, NET_NAME_UNKNOWN
,
4536 pi
= netdev_priv(netdev
);
4538 pi
->port_id
= adap
->pf
% adap
->params
.nports
;
4539 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4541 adap
->port
[0] = netdev
;
4543 err
= register_netdev(adap
->port
[0]);
4545 pr_info("Unable to register VF mgmt netdev %s\n", name
);
4546 free_netdev(adap
->port
[0]);
4547 adap
->port
[0] = NULL
;
4553 static int cxgb4_iov_configure(struct pci_dev
*pdev
, int num_vfs
)
4555 struct adapter
*adap
= pci_get_drvdata(pdev
);
4557 int current_vfs
= pci_num_vf(pdev
);
4560 pcie_fw
= readl(adap
->regs
+ PCIE_FW_A
);
4561 /* Check if cxgb4 is the MASTER and fw is initialized */
4562 if (!(pcie_fw
& PCIE_FW_INIT_F
) ||
4563 !(pcie_fw
& PCIE_FW_MASTER_VLD_F
) ||
4564 PCIE_FW_MASTER_G(pcie_fw
) != 4) {
4565 dev_warn(&pdev
->dev
,
4566 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4570 /* If any of the VF's is already assigned to Guest OS, then
4571 * SRIOV for the same cannot be modified
4573 if (current_vfs
&& pci_vfs_assigned(pdev
)) {
4575 "Cannot modify SR-IOV while VFs are assigned\n");
4576 num_vfs
= current_vfs
;
4580 /* Disable SRIOV when zero is passed.
4581 * One needs to disable SRIOV before modifying it, else
4582 * stack throws the below warning:
4583 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
4586 pci_disable_sriov(pdev
);
4587 if (adap
->port
[0]) {
4588 unregister_netdev(adap
->port
[0]);
4589 adap
->port
[0] = NULL
;
4591 /* free VF resources */
4592 kfree(adap
->vfinfo
);
4593 adap
->vfinfo
= NULL
;
4598 if (num_vfs
!= current_vfs
) {
4599 err
= pci_enable_sriov(pdev
, num_vfs
);
4603 adap
->num_vfs
= num_vfs
;
4604 err
= config_mgmt_dev(pdev
);
4609 adap
->vfinfo
= kcalloc(adap
->num_vfs
,
4610 sizeof(struct vf_info
), GFP_KERNEL
);
4612 fill_vf_station_mac_addr(adap
);
4617 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4619 int func
, i
, err
, s_qpp
, qpp
, num_seg
;
4620 struct port_info
*pi
;
4621 bool highdma
= false;
4622 struct adapter
*adapter
= NULL
;
4623 struct net_device
*netdev
;
4626 enum chip_type chip
;
4627 static int adap_idx
= 1;
4628 #ifdef CONFIG_PCI_IOV
4632 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
4634 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
4636 /* Just info, some other driver may have claimed the device. */
4637 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
4641 err
= pci_enable_device(pdev
);
4643 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
4644 goto out_release_regions
;
4647 regs
= pci_ioremap_bar(pdev
, 0);
4649 dev_err(&pdev
->dev
, "cannot map device registers\n");
4651 goto out_disable_device
;
4654 err
= t4_wait_dev_ready(regs
);
4656 goto out_unmap_bar0
;
4658 /* We control everything through one PF */
4659 whoami
= readl(regs
+ PL_WHOAMI_A
);
4660 pl_rev
= REV_G(readl(regs
+ PL_REV_A
));
4661 chip
= get_chip_type(pdev
, pl_rev
);
4662 func
= CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
?
4663 SOURCEPF_G(whoami
) : T6_SOURCEPF_G(whoami
);
4664 if (func
!= ent
->driver_data
) {
4665 #ifndef CONFIG_PCI_IOV
4668 pci_disable_device(pdev
);
4669 pci_save_state(pdev
); /* to restore SR-IOV later */
4673 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4675 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4677 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
4678 "coherent allocations\n");
4679 goto out_unmap_bar0
;
4682 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4684 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
4685 goto out_unmap_bar0
;
4689 pci_enable_pcie_error_reporting(pdev
);
4690 enable_pcie_relaxed_ordering(pdev
);
4691 pci_set_master(pdev
);
4692 pci_save_state(pdev
);
4694 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
4697 goto out_unmap_bar0
;
4701 adapter
->workq
= create_singlethread_workqueue("cxgb4");
4702 if (!adapter
->workq
) {
4704 goto out_free_adapter
;
4707 adapter
->mbox_log
= kzalloc(sizeof(*adapter
->mbox_log
) +
4708 (sizeof(struct mbox_cmd
) *
4709 T4_OS_LOG_MBOX_CMDS
),
4711 if (!adapter
->mbox_log
) {
4713 goto out_free_adapter
;
4715 adapter
->mbox_log
->size
= T4_OS_LOG_MBOX_CMDS
;
4717 /* PCI device has been enabled */
4718 adapter
->flags
|= DEV_ENABLED
;
4720 adapter
->regs
= regs
;
4721 adapter
->pdev
= pdev
;
4722 adapter
->pdev_dev
= &pdev
->dev
;
4723 adapter
->name
= pci_name(pdev
);
4724 adapter
->mbox
= func
;
4726 adapter
->msg_enable
= DFLT_MSG_ENABLE
;
4727 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
4729 spin_lock_init(&adapter
->stats_lock
);
4730 spin_lock_init(&adapter
->tid_release_lock
);
4731 spin_lock_init(&adapter
->win0_lock
);
4732 spin_lock_init(&adapter
->mbox_lock
);
4734 INIT_LIST_HEAD(&adapter
->mlist
.list
);
4736 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
4737 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
4738 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
4740 err
= t4_prep_adapter(adapter
);
4742 goto out_free_adapter
;
4745 if (!is_t4(adapter
->params
.chip
)) {
4746 s_qpp
= (QUEUESPERPAGEPF0_S
+
4747 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) *
4749 qpp
= 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter
,
4750 SGE_EGRESS_QUEUES_PER_PAGE_PF_A
) >> s_qpp
);
4751 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
4753 /* Each segment size is 128B. Write coalescing is enabled only
4754 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
4755 * queue is less no of segments that can be accommodated in
4758 if (qpp
> num_seg
) {
4760 "Incorrect number of egress queues per page\n");
4762 goto out_free_adapter
;
4764 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
4765 pci_resource_len(pdev
, 2));
4766 if (!adapter
->bar2
) {
4767 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
4769 goto out_free_adapter
;
4773 setup_memwin(adapter
);
4774 err
= adap_init0(adapter
);
4775 #ifdef CONFIG_DEBUG_FS
4776 bitmap_zero(adapter
->sge
.blocked_fl
, adapter
->sge
.egr_sz
);
4778 setup_memwin_rdma(adapter
);
4782 /* configure SGE_STAT_CFG_A to read WC stats */
4783 if (!is_t4(adapter
->params
.chip
))
4784 t4_write_reg(adapter
, SGE_STAT_CFG_A
, STATSOURCE_T5_V(7) |
4785 (is_t5(adapter
->params
.chip
) ? STATMODE_V(0) :
4788 for_each_port(adapter
, i
) {
4789 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
4796 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4798 adapter
->port
[i
] = netdev
;
4799 pi
= netdev_priv(netdev
);
4800 pi
->adapter
= adapter
;
4801 pi
->xact_addr_filt
= -1;
4803 netdev
->irq
= pdev
->irq
;
4805 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
4806 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4807 NETIF_F_RXCSUM
| NETIF_F_RXHASH
|
4808 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
4811 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
4812 netdev
->features
|= netdev
->hw_features
;
4813 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
4815 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4817 /* MTU range: 81 - 9600 */
4818 netdev
->min_mtu
= 81;
4819 netdev
->max_mtu
= MAX_MTU
;
4821 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
4822 #ifdef CONFIG_CHELSIO_T4_DCB
4823 netdev
->dcbnl_ops
= &cxgb4_dcb_ops
;
4824 cxgb4_dcb_state_init(netdev
);
4826 cxgb4_set_ethtool_ops(netdev
);
4829 pci_set_drvdata(pdev
, adapter
);
4831 if (adapter
->flags
& FW_OK
) {
4832 err
= t4_port_init(adapter
, func
, func
, 0);
4835 } else if (adapter
->params
.nports
== 1) {
4836 /* If we don't have a connection to the firmware -- possibly
4837 * because of an error -- grab the raw VPD parameters so we
4838 * can set the proper MAC Address on the debug network
4839 * interface that we've created.
4841 u8 hw_addr
[ETH_ALEN
];
4842 u8
*na
= adapter
->params
.vpd
.na
;
4844 err
= t4_get_raw_vpd_params(adapter
, &adapter
->params
.vpd
);
4846 for (i
= 0; i
< ETH_ALEN
; i
++)
4847 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
4848 hex2val(na
[2 * i
+ 1]));
4849 t4_set_hw_addr(adapter
, 0, hw_addr
);
4853 /* Configure queues and allocate tables now, they can be needed as
4854 * soon as the first register_netdev completes.
4856 cfg_queues(adapter
);
4858 adapter
->l2t
= t4_init_l2t(adapter
->l2t_start
, adapter
->l2t_end
);
4859 if (!adapter
->l2t
) {
4860 /* We tolerate a lack of L2T, giving up some functionality */
4861 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
4862 adapter
->params
.offload
= 0;
4865 #if IS_ENABLED(CONFIG_IPV6)
4866 if ((CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
) &&
4867 (!(t4_read_reg(adapter
, LE_DB_CONFIG_A
) & ASLIPCOMPEN_F
))) {
4868 /* CLIP functionality is not present in hardware,
4869 * hence disable all offload features
4871 dev_warn(&pdev
->dev
,
4872 "CLIP not enabled in hardware, continuing\n");
4873 adapter
->params
.offload
= 0;
4875 adapter
->clipt
= t4_init_clip_tbl(adapter
->clipt_start
,
4876 adapter
->clipt_end
);
4877 if (!adapter
->clipt
) {
4878 /* We tolerate a lack of clip_table, giving up
4879 * some functionality
4881 dev_warn(&pdev
->dev
,
4882 "could not allocate Clip table, continuing\n");
4883 adapter
->params
.offload
= 0;
4888 for_each_port(adapter
, i
) {
4889 pi
= adap2pinfo(adapter
, i
);
4890 pi
->sched_tbl
= t4_init_sched(adapter
->params
.nsched_cls
);
4892 dev_warn(&pdev
->dev
,
4893 "could not activate scheduling on port %d\n",
4897 if (tid_init(&adapter
->tids
) < 0) {
4898 dev_warn(&pdev
->dev
, "could not allocate TID table, "
4900 adapter
->params
.offload
= 0;
4902 adapter
->tc_u32
= cxgb4_init_tc_u32(adapter
);
4903 if (!adapter
->tc_u32
)
4904 dev_warn(&pdev
->dev
,
4905 "could not offload tc u32, continuing\n");
4908 if (is_offload(adapter
)) {
4909 if (t4_read_reg(adapter
, LE_DB_CONFIG_A
) & HASHEN_F
) {
4910 u32 hash_base
, hash_reg
;
4912 if (chip
<= CHELSIO_T5
) {
4913 hash_reg
= LE_DB_TID_HASHBASE_A
;
4914 hash_base
= t4_read_reg(adapter
, hash_reg
);
4915 adapter
->tids
.hash_base
= hash_base
/ 4;
4917 hash_reg
= T6_LE_DB_HASH_TID_BASE_A
;
4918 hash_base
= t4_read_reg(adapter
, hash_reg
);
4919 adapter
->tids
.hash_base
= hash_base
;
4924 /* See what interrupts we'll be using */
4925 if (msi
> 1 && enable_msix(adapter
) == 0)
4926 adapter
->flags
|= USING_MSIX
;
4927 else if (msi
> 0 && pci_enable_msi(pdev
) == 0) {
4928 adapter
->flags
|= USING_MSI
;
4930 free_msix_info(adapter
);
4933 /* check for PCI Express bandwidth capabiltites */
4934 cxgb4_check_pcie_caps(adapter
);
4936 err
= init_rss(adapter
);
4941 * The card is now ready to go. If any errors occur during device
4942 * registration we do not fail the whole card but rather proceed only
4943 * with the ports we manage to register successfully. However we must
4944 * register at least one net device.
4946 for_each_port(adapter
, i
) {
4947 pi
= adap2pinfo(adapter
, i
);
4948 adapter
->port
[i
]->dev_port
= pi
->lport
;
4949 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
4950 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
4952 err
= register_netdev(adapter
->port
[i
]);
4955 adapter
->chan_map
[pi
->tx_chan
] = i
;
4956 print_port_info(adapter
->port
[i
]);
4959 dev_err(&pdev
->dev
, "could not register any net devices\n");
4963 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
4967 if (cxgb4_debugfs_root
) {
4968 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
4969 cxgb4_debugfs_root
);
4970 setup_debugfs(adapter
);
4973 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4974 pdev
->needs_freset
= 1;
4976 if (is_uld(adapter
)) {
4977 mutex_lock(&uld_mutex
);
4978 list_add_tail(&adapter
->list_node
, &adapter_list
);
4979 mutex_unlock(&uld_mutex
);
4982 print_adapter_info(adapter
);
4983 setup_fw_sge_queues(adapter
);
4987 #ifdef CONFIG_PCI_IOV
4988 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
4991 goto free_pci_region
;
4994 adapter
->pdev
= pdev
;
4995 adapter
->pdev_dev
= &pdev
->dev
;
4996 adapter
->name
= pci_name(pdev
);
4997 adapter
->mbox
= func
;
4999 adapter
->regs
= regs
;
5000 adapter
->adap_idx
= adap_idx
;
5001 adapter
->mbox_log
= kzalloc(sizeof(*adapter
->mbox_log
) +
5002 (sizeof(struct mbox_cmd
) *
5003 T4_OS_LOG_MBOX_CMDS
),
5005 if (!adapter
->mbox_log
) {
5009 spin_lock_init(&adapter
->mbox_lock
);
5010 INIT_LIST_HEAD(&adapter
->mlist
.list
);
5012 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
5013 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC
);
5014 err
= t4_query_params(adapter
, adapter
->mbox
, adapter
->pf
, 0, 1,
5017 dev_err(adapter
->pdev_dev
, "Could not fetch port params\n");
5021 adapter
->params
.nports
= hweight32(port_vec
);
5022 pci_set_drvdata(pdev
, adapter
);
5029 pci_disable_sriov(pdev
);
5030 pci_release_regions(pdev
);
5037 free_some_resources(adapter
);
5038 if (adapter
->flags
& USING_MSIX
)
5039 free_msix_info(adapter
);
5040 if (adapter
->num_uld
|| adapter
->num_ofld_uld
)
5041 t4_uld_mem_free(adapter
);
5043 if (!is_t4(adapter
->params
.chip
))
5044 iounmap(adapter
->bar2
);
5047 destroy_workqueue(adapter
->workq
);
5049 kfree(adapter
->mbox_log
);
5054 pci_disable_pcie_error_reporting(pdev
);
5055 pci_disable_device(pdev
);
5056 out_release_regions
:
5057 pci_release_regions(pdev
);
5061 static void remove_one(struct pci_dev
*pdev
)
5063 struct adapter
*adapter
= pci_get_drvdata(pdev
);
5066 pci_release_regions(pdev
);
5070 if (adapter
->pf
== 4) {
5073 /* Tear down per-adapter Work Queue first since it can contain
5074 * references to our adapter data structure.
5076 destroy_workqueue(adapter
->workq
);
5078 if (is_uld(adapter
))
5079 detach_ulds(adapter
);
5081 disable_interrupts(adapter
);
5083 for_each_port(adapter
, i
)
5084 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
5085 unregister_netdev(adapter
->port
[i
]);
5087 debugfs_remove_recursive(adapter
->debugfs_root
);
5089 /* If we allocated filters, free up state associated with any
5092 clear_all_filters(adapter
);
5094 if (adapter
->flags
& FULL_INIT_DONE
)
5097 if (adapter
->flags
& USING_MSIX
)
5098 free_msix_info(adapter
);
5099 if (adapter
->num_uld
|| adapter
->num_ofld_uld
)
5100 t4_uld_mem_free(adapter
);
5101 free_some_resources(adapter
);
5102 #if IS_ENABLED(CONFIG_IPV6)
5103 t4_cleanup_clip_tbl(adapter
);
5105 iounmap(adapter
->regs
);
5106 if (!is_t4(adapter
->params
.chip
))
5107 iounmap(adapter
->bar2
);
5108 pci_disable_pcie_error_reporting(pdev
);
5109 if ((adapter
->flags
& DEV_ENABLED
)) {
5110 pci_disable_device(pdev
);
5111 adapter
->flags
&= ~DEV_ENABLED
;
5113 pci_release_regions(pdev
);
5114 kfree(adapter
->mbox_log
);
5118 #ifdef CONFIG_PCI_IOV
5120 if (adapter
->port
[0])
5121 unregister_netdev(adapter
->port
[0]);
5122 iounmap(adapter
->regs
);
5123 kfree(adapter
->vfinfo
);
5125 pci_disable_sriov(pdev
);
5126 pci_release_regions(pdev
);
5131 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5132 * delivery. This is essentially a stripped down version of the PCI remove()
5133 * function where we do the minimal amount of work necessary to shutdown any
5136 static void shutdown_one(struct pci_dev
*pdev
)
5138 struct adapter
*adapter
= pci_get_drvdata(pdev
);
5140 /* As with remove_one() above (see extended comment), we only want do
5141 * do cleanup on PCI Devices which went all the way through init_one()
5145 pci_release_regions(pdev
);
5149 if (adapter
->pf
== 4) {
5152 for_each_port(adapter
, i
)
5153 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
5154 cxgb_close(adapter
->port
[i
]);
5156 t4_uld_clean_up(adapter
);
5157 disable_interrupts(adapter
);
5158 disable_msi(adapter
);
5160 t4_sge_stop(adapter
);
5161 if (adapter
->flags
& FW_OK
)
5162 t4_fw_bye(adapter
, adapter
->mbox
);
5164 #ifdef CONFIG_PCI_IOV
5166 if (adapter
->port
[0])
5167 unregister_netdev(adapter
->port
[0]);
5168 iounmap(adapter
->regs
);
5169 kfree(adapter
->vfinfo
);
5171 pci_disable_sriov(pdev
);
5172 pci_release_regions(pdev
);
5177 static struct pci_driver cxgb4_driver
= {
5178 .name
= KBUILD_MODNAME
,
5179 .id_table
= cxgb4_pci_tbl
,
5181 .remove
= remove_one
,
5182 .shutdown
= shutdown_one
,
5183 #ifdef CONFIG_PCI_IOV
5184 .sriov_configure
= cxgb4_iov_configure
,
5186 .err_handler
= &cxgb4_eeh
,
5189 static int __init
cxgb4_init_module(void)
5193 /* Debugfs support is optional, just warn if this fails */
5194 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
5195 if (!cxgb4_debugfs_root
)
5196 pr_warn("could not create debugfs entry, continuing\n");
5198 ret
= pci_register_driver(&cxgb4_driver
);
5200 debugfs_remove(cxgb4_debugfs_root
);
5202 #if IS_ENABLED(CONFIG_IPV6)
5203 if (!inet6addr_registered
) {
5204 register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5205 inet6addr_registered
= true;
5212 static void __exit
cxgb4_cleanup_module(void)
5214 #if IS_ENABLED(CONFIG_IPV6)
5215 if (inet6addr_registered
) {
5216 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5217 inet6addr_registered
= false;
5220 pci_unregister_driver(&cxgb4_driver
);
5221 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
5224 module_init(cxgb4_init_module
);
5225 module_exit(cxgb4_cleanup_module
);