2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <asm/uaccess.h>
70 #include "t4_values.h"
73 #include "t4fw_version.h"
74 #include "cxgb4_dcb.h"
75 #include "cxgb4_debugfs.h"
79 char cxgb4_driver_name
[] = KBUILD_MODNAME
;
84 #define DRV_VERSION "2.0.0-ko"
85 const char cxgb4_driver_version
[] = DRV_VERSION
;
86 #define DRV_DESC "Chelsio T4/T5 Network Driver"
88 /* Host shadow copy of ingress filter entry. This is in host native format
89 * and doesn't match the ordering or bit order, etc. of the hardware of the
90 * firmware command. The use of bit-field structure elements is purely to
91 * remind ourselves of the field size limitations and save memory in the case
92 * where the filter table is large.
95 /* Administrative fields for filter.
97 u32 valid
:1; /* filter allocated and valid */
98 u32 locked
:1; /* filter is administratively locked */
100 u32 pending
:1; /* filter action is pending firmware reply */
101 u32 smtidx
:8; /* Source MAC Table index for smac */
102 struct l2t_entry
*l2t
; /* Layer Two Table entry for dmac */
104 /* The filter itself. Most of this is a straight copy of information
105 * provided by the extended ioctl(). Some fields are translated to
106 * internal forms -- for instance the Ingress Queue ID passed in from
107 * the ioctl() is translated into the Absolute Ingress Queue ID.
109 struct ch_filter_specification fs
;
112 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
113 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
114 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
116 /* Macros needed to support the PCI Device ID Table ...
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
119 static const struct pci_device_id cxgb4_pci_tbl[] = {
120 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
122 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
125 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
127 #define CH_PCI_ID_TABLE_ENTRY(devid) \
128 {PCI_VDEVICE(CHELSIO, (devid)), 4}
130 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
134 #include "t4_pci_id_tbl.h"
136 #define FW4_FNAME "cxgb4/t4fw.bin"
137 #define FW5_FNAME "cxgb4/t5fw.bin"
138 #define FW6_FNAME "cxgb4/t6fw.bin"
139 #define FW4_CFNAME "cxgb4/t4-config.txt"
140 #define FW5_CFNAME "cxgb4/t5-config.txt"
141 #define FW6_CFNAME "cxgb4/t6-config.txt"
142 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
143 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
144 #define PHY_AQ1202_DEVICEID 0x4409
145 #define PHY_BCM84834_DEVICEID 0x4486
147 MODULE_DESCRIPTION(DRV_DESC
);
148 MODULE_AUTHOR("Chelsio Communications");
149 MODULE_LICENSE("Dual BSD/GPL");
150 MODULE_VERSION(DRV_VERSION
);
151 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
152 MODULE_FIRMWARE(FW4_FNAME
);
153 MODULE_FIRMWARE(FW5_FNAME
);
156 * Normally we're willing to become the firmware's Master PF but will be happy
157 * if another PF has already become the Master and initialized the adapter.
158 * Setting "force_init" will cause this driver to forcibly establish itself as
159 * the Master PF and initialize the adapter.
161 static uint force_init
;
163 module_param(force_init
, uint
, 0644);
164 MODULE_PARM_DESC(force_init
, "Forcibly become Master PF and initialize adapter");
167 * Normally if the firmware we connect to has Configuration File support, we
168 * use that and only fall back to the old Driver-based initialization if the
169 * Configuration File fails for some reason. If force_old_init is set, then
170 * we'll always use the old Driver-based initialization sequence.
172 static uint force_old_init
;
174 module_param(force_old_init
, uint
, 0644);
175 MODULE_PARM_DESC(force_old_init
, "Force old initialization sequence, deprecated"
178 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
180 module_param(dflt_msg_enable
, int, 0644);
181 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T4 default message enable bitmap");
184 * The driver uses the best interrupt scheme available on a platform in the
185 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
186 * of these schemes the driver may consider as follows:
188 * msi = 2: choose from among all three options
189 * msi = 1: only consider MSI and INTx interrupts
190 * msi = 0: force INTx interrupts
194 module_param(msi
, int, 0644);
195 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
198 * Queue interrupt hold-off timer values. Queues default to the first of these
201 static unsigned int intr_holdoff
[SGE_NTIMERS
- 1] = { 5, 10, 20, 50, 100 };
203 module_param_array(intr_holdoff
, uint
, NULL
, 0644);
204 MODULE_PARM_DESC(intr_holdoff
, "values for queue interrupt hold-off timers "
205 "0..4 in microseconds, deprecated parameter");
207 static unsigned int intr_cnt
[SGE_NCOUNTERS
- 1] = { 4, 8, 16 };
209 module_param_array(intr_cnt
, uint
, NULL
, 0644);
210 MODULE_PARM_DESC(intr_cnt
,
211 "thresholds 1..3 for queue interrupt packet counters, "
212 "deprecated parameter");
215 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
216 * offset by 2 bytes in order to have the IP headers line up on 4-byte
217 * boundaries. This is a requirement for many architectures which will throw
218 * a machine check fault if an attempt is made to access one of the 4-byte IP
219 * header fields on a non-4-byte boundary. And it's a major performance issue
220 * even on some architectures which allow it like some implementations of the
221 * x86 ISA. However, some architectures don't mind this and for some very
222 * edge-case performance sensitive applications (like forwarding large volumes
223 * of small packets), setting this DMA offset to 0 will decrease the number of
224 * PCI-E Bus transfers enough to measurably affect performance.
226 static int rx_dma_offset
= 2;
230 #ifdef CONFIG_PCI_IOV
231 module_param(vf_acls
, bool, 0644);
232 MODULE_PARM_DESC(vf_acls
, "if set enable virtualization L2 ACL enforcement, "
233 "deprecated parameter");
235 /* Configure the number of PCI-E Virtual Function which are to be instantiated
236 * on SR-IOV Capable Physical Functions.
238 static unsigned int num_vf
[NUM_OF_PF_WITH_SRIOV
];
240 module_param_array(num_vf
, uint
, NULL
, 0644);
241 MODULE_PARM_DESC(num_vf
, "number of VFs for each of PFs 0-3");
244 /* TX Queue select used to determine what algorithm to use for selecting TX
245 * queue. Select between the kernel provided function (select_queue=0) or user
246 * cxgb_select_queue function (select_queue=1)
248 * Default: select_queue=0
250 static int select_queue
;
251 module_param(select_queue
, int, 0644);
252 MODULE_PARM_DESC(select_queue
,
253 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
255 static unsigned int tp_vlan_pri_map
= HW_TPL_FR_MT_PR_IV_P_FC
;
257 module_param(tp_vlan_pri_map
, uint
, 0644);
258 MODULE_PARM_DESC(tp_vlan_pri_map
, "global compressed filter configuration, "
259 "deprecated parameter");
261 static struct dentry
*cxgb4_debugfs_root
;
263 static LIST_HEAD(adapter_list
);
264 static DEFINE_MUTEX(uld_mutex
);
265 /* Adapter list to be accessed from atomic context */
266 static LIST_HEAD(adap_rcu_list
);
267 static DEFINE_SPINLOCK(adap_rcu_lock
);
268 static struct cxgb4_uld_info ulds
[CXGB4_ULD_MAX
];
269 static const char *uld_str
[] = { "RDMA", "iSCSI" };
271 static void link_report(struct net_device
*dev
)
273 if (!netif_carrier_ok(dev
))
274 netdev_info(dev
, "link down\n");
276 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
278 const char *s
= "10Mbps";
279 const struct port_info
*p
= netdev_priv(dev
);
281 switch (p
->link_cfg
.speed
) {
296 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
301 #ifdef CONFIG_CHELSIO_T4_DCB
302 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
303 static void dcb_tx_queue_prio_enable(struct net_device
*dev
, int enable
)
305 struct port_info
*pi
= netdev_priv(dev
);
306 struct adapter
*adap
= pi
->adapter
;
307 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
310 /* We use a simple mapping of Port TX Queue Index to DCB
311 * Priority when we're enabling DCB.
313 for (i
= 0; i
< pi
->nqsets
; i
++, txq
++) {
317 name
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
319 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH
) |
320 FW_PARAMS_PARAM_YZ_V(txq
->q
.cntxt_id
));
321 value
= enable
? i
: 0xffffffff;
323 /* Since we can be called while atomic (from "interrupt
324 * level") we need to issue the Set Parameters Commannd
325 * without sleeping (timeout < 0).
327 err
= t4_set_params_timeout(adap
, adap
->mbox
, adap
->pf
, 0, 1,
329 -FW_CMD_MAX_TIMEOUT
);
332 dev_err(adap
->pdev_dev
,
333 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
334 enable
? "set" : "unset", pi
->port_id
, i
, -err
);
336 txq
->dcb_prio
= value
;
339 #endif /* CONFIG_CHELSIO_T4_DCB */
341 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
343 struct net_device
*dev
= adapter
->port
[port_id
];
345 /* Skip changes from disabled ports. */
346 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
348 netif_carrier_on(dev
);
350 #ifdef CONFIG_CHELSIO_T4_DCB
351 cxgb4_dcb_state_init(dev
);
352 dcb_tx_queue_prio_enable(dev
, false);
353 #endif /* CONFIG_CHELSIO_T4_DCB */
354 netif_carrier_off(dev
);
361 void t4_os_portmod_changed(const struct adapter
*adap
, int port_id
)
363 static const char *mod_str
[] = {
364 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
367 const struct net_device
*dev
= adap
->port
[port_id
];
368 const struct port_info
*pi
= netdev_priv(dev
);
370 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
371 netdev_info(dev
, "port module unplugged\n");
372 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
373 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
377 * Configure the exact and hash address filters to handle a port's multicast
378 * and secondary unicast MAC addresses.
380 static int set_addr_filters(const struct net_device
*dev
, bool sleep
)
388 const struct netdev_hw_addr
*ha
;
389 int uc_cnt
= netdev_uc_count(dev
);
390 int mc_cnt
= netdev_mc_count(dev
);
391 const struct port_info
*pi
= netdev_priv(dev
);
392 unsigned int mb
= pi
->adapter
->pf
;
394 /* first do the secondary unicast addresses */
395 netdev_for_each_uc_addr(ha
, dev
) {
396 addr
[naddr
++] = ha
->addr
;
397 if (--uc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
398 ret
= t4_alloc_mac_filt(pi
->adapter
, mb
, pi
->viid
, free
,
399 naddr
, addr
, filt_idx
, &uhash
, sleep
);
408 /* next set up the multicast addresses */
409 netdev_for_each_mc_addr(ha
, dev
) {
410 addr
[naddr
++] = ha
->addr
;
411 if (--mc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
412 ret
= t4_alloc_mac_filt(pi
->adapter
, mb
, pi
->viid
, free
,
413 naddr
, addr
, filt_idx
, &mhash
, sleep
);
422 return t4_set_addr_hash(pi
->adapter
, mb
, pi
->viid
, uhash
!= 0,
423 uhash
| mhash
, sleep
);
426 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
427 module_param(dbfifo_int_thresh
, int, 0644);
428 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
431 * usecs to sleep while draining the dbfifo
433 static int dbfifo_drain_delay
= 1000;
434 module_param(dbfifo_drain_delay
, int, 0644);
435 MODULE_PARM_DESC(dbfifo_drain_delay
,
436 "usecs to sleep while draining the dbfifo");
439 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
440 * If @mtu is -1 it is left unchanged.
442 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
445 struct port_info
*pi
= netdev_priv(dev
);
447 ret
= set_addr_filters(dev
, sleep_ok
);
449 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, mtu
,
450 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
451 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
457 * link_start - enable a port
458 * @dev: the port to enable
460 * Performs the MAC and PHY actions needed to enable a port.
462 static int link_start(struct net_device
*dev
)
465 struct port_info
*pi
= netdev_priv(dev
);
466 unsigned int mb
= pi
->adapter
->pf
;
469 * We do not set address filters and promiscuity here, the stack does
470 * that step explicitly.
472 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
473 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
475 ret
= t4_change_mac(pi
->adapter
, mb
, pi
->viid
,
476 pi
->xact_addr_filt
, dev
->dev_addr
, true,
479 pi
->xact_addr_filt
= ret
;
484 ret
= t4_link_l1cfg(pi
->adapter
, mb
, pi
->tx_chan
,
488 ret
= t4_enable_vi_params(pi
->adapter
, mb
, pi
->viid
, true,
489 true, CXGB4_DCB_ENABLED
);
496 int cxgb4_dcb_enabled(const struct net_device
*dev
)
498 #ifdef CONFIG_CHELSIO_T4_DCB
499 struct port_info
*pi
= netdev_priv(dev
);
501 if (!pi
->dcb
.enabled
)
504 return ((pi
->dcb
.state
== CXGB4_DCB_STATE_FW_ALLSYNCED
) ||
505 (pi
->dcb
.state
== CXGB4_DCB_STATE_HOST
));
510 EXPORT_SYMBOL(cxgb4_dcb_enabled
);
512 #ifdef CONFIG_CHELSIO_T4_DCB
513 /* Handle a Data Center Bridging update message from the firmware. */
514 static void dcb_rpl(struct adapter
*adap
, const struct fw_port_cmd
*pcmd
)
516 int port
= FW_PORT_CMD_PORTID_G(ntohl(pcmd
->op_to_portid
));
517 struct net_device
*dev
= adap
->port
[port
];
518 int old_dcb_enabled
= cxgb4_dcb_enabled(dev
);
521 cxgb4_dcb_handle_fw_update(adap
, pcmd
);
522 new_dcb_enabled
= cxgb4_dcb_enabled(dev
);
524 /* If the DCB has become enabled or disabled on the port then we're
525 * going to need to set up/tear down DCB Priority parameters for the
526 * TX Queues associated with the port.
528 if (new_dcb_enabled
!= old_dcb_enabled
)
529 dcb_tx_queue_prio_enable(dev
, new_dcb_enabled
);
531 #endif /* CONFIG_CHELSIO_T4_DCB */
533 /* Clear a filter and release any of its resources that we own. This also
534 * clears the filter's "pending" status.
536 static void clear_filter(struct adapter
*adap
, struct filter_entry
*f
)
538 /* If the new or old filter have loopback rewriteing rules then we'll
539 * need to free any existing Layer Two Table (L2T) entries of the old
540 * filter rule. The firmware will handle freeing up any Source MAC
541 * Table (SMT) entries used for rewriting Source MAC Addresses in
545 cxgb4_l2t_release(f
->l2t
);
547 /* The zeroing of the filter rule below clears the filter valid,
548 * pending, locked flags, l2t pointer, etc. so it's all we need for
551 memset(f
, 0, sizeof(*f
));
554 /* Handle a filter write/deletion reply.
556 static void filter_rpl(struct adapter
*adap
, const struct cpl_set_tcb_rpl
*rpl
)
558 unsigned int idx
= GET_TID(rpl
);
559 unsigned int nidx
= idx
- adap
->tids
.ftid_base
;
561 struct filter_entry
*f
;
563 if (idx
>= adap
->tids
.ftid_base
&& nidx
<
564 (adap
->tids
.nftids
+ adap
->tids
.nsftids
)) {
566 ret
= TCB_COOKIE_G(rpl
->cookie
);
567 f
= &adap
->tids
.ftid_tab
[idx
];
569 if (ret
== FW_FILTER_WR_FLT_DELETED
) {
570 /* Clear the filter when we get confirmation from the
571 * hardware that the filter has been deleted.
573 clear_filter(adap
, f
);
574 } else if (ret
== FW_FILTER_WR_SMT_TBL_FULL
) {
575 dev_err(adap
->pdev_dev
, "filter %u setup failed due to full SMT\n",
577 clear_filter(adap
, f
);
578 } else if (ret
== FW_FILTER_WR_FLT_ADDED
) {
579 f
->smtidx
= (be64_to_cpu(rpl
->oldval
) >> 24) & 0xff;
580 f
->pending
= 0; /* asynchronous setup completed */
583 /* Something went wrong. Issue a warning about the
584 * problem and clear everything out.
586 dev_err(adap
->pdev_dev
, "filter %u setup failed with error %u\n",
588 clear_filter(adap
, f
);
593 /* Response queue handler for the FW event queue.
595 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
596 const struct pkt_gl
*gl
)
598 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
600 rsp
++; /* skip RSS header */
602 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
604 if (unlikely(opcode
== CPL_FW4_MSG
&&
605 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
607 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
609 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
610 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
616 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
617 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
618 unsigned int qid
= EGR_QID_G(ntohl(p
->opcode_qid
));
621 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
623 if ((u8
*)txq
< (u8
*)q
->adap
->sge
.ofldtxq
) {
624 struct sge_eth_txq
*eq
;
626 eq
= container_of(txq
, struct sge_eth_txq
, q
);
627 netif_tx_wake_queue(eq
->txq
);
629 struct sge_ofld_txq
*oq
;
631 oq
= container_of(txq
, struct sge_ofld_txq
, q
);
632 tasklet_schedule(&oq
->qresume_tsk
);
634 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
635 const struct cpl_fw6_msg
*p
= (void *)rsp
;
637 #ifdef CONFIG_CHELSIO_T4_DCB
638 const struct fw_port_cmd
*pcmd
= (const void *)p
->data
;
639 unsigned int cmd
= FW_CMD_OP_G(ntohl(pcmd
->op_to_portid
));
640 unsigned int action
=
641 FW_PORT_CMD_ACTION_G(ntohl(pcmd
->action_to_len16
));
643 if (cmd
== FW_PORT_CMD
&&
644 action
== FW_PORT_ACTION_GET_PORT_INFO
) {
645 int port
= FW_PORT_CMD_PORTID_G(
646 be32_to_cpu(pcmd
->op_to_portid
));
647 struct net_device
*dev
= q
->adap
->port
[port
];
648 int state_input
= ((pcmd
->u
.info
.dcbxdis_pkd
&
649 FW_PORT_CMD_DCBXDIS_F
)
650 ? CXGB4_DCB_INPUT_FW_DISABLED
651 : CXGB4_DCB_INPUT_FW_ENABLED
);
653 cxgb4_dcb_state_fsm(dev
, state_input
);
656 if (cmd
== FW_PORT_CMD
&&
657 action
== FW_PORT_ACTION_L2_DCB_CFG
)
658 dcb_rpl(q
->adap
, pcmd
);
662 t4_handle_fw_rpl(q
->adap
, p
->data
);
663 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
664 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
666 do_l2t_write_rpl(q
->adap
, p
);
667 } else if (opcode
== CPL_SET_TCB_RPL
) {
668 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
670 filter_rpl(q
->adap
, p
);
672 dev_err(q
->adap
->pdev_dev
,
673 "unexpected CPL %#x on FW event queue\n", opcode
);
679 * uldrx_handler - response queue handler for ULD queues
680 * @q: the response queue that received the packet
681 * @rsp: the response queue descriptor holding the offload message
682 * @gl: the gather list of packet fragments
684 * Deliver an ingress offload packet to a ULD. All processing is done by
685 * the ULD, we just maintain statistics.
687 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
688 const struct pkt_gl
*gl
)
690 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
692 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
694 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
695 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
698 if (ulds
[q
->uld
].rx_handler(q
->adap
->uld_handle
[q
->uld
], rsp
, gl
)) {
704 else if (gl
== CXGB4_MSG_AN
)
711 static void disable_msi(struct adapter
*adapter
)
713 if (adapter
->flags
& USING_MSIX
) {
714 pci_disable_msix(adapter
->pdev
);
715 adapter
->flags
&= ~USING_MSIX
;
716 } else if (adapter
->flags
& USING_MSI
) {
717 pci_disable_msi(adapter
->pdev
);
718 adapter
->flags
&= ~USING_MSI
;
723 * Interrupt handler for non-data events used with MSI-X.
725 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
727 struct adapter
*adap
= cookie
;
728 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
));
732 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
), v
);
734 if (adap
->flags
& MASTER_PF
)
735 t4_slow_intr_handler(adap
);
740 * Name the MSI-X interrupts.
742 static void name_msix_vecs(struct adapter
*adap
)
744 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
746 /* non-data interrupts */
747 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
750 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
751 adap
->port
[0]->name
);
753 /* Ethernet queues */
754 for_each_port(adap
, j
) {
755 struct net_device
*d
= adap
->port
[j
];
756 const struct port_info
*pi
= netdev_priv(d
);
758 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
759 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
764 for_each_ofldrxq(&adap
->sge
, i
)
765 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-ofld%d",
766 adap
->port
[0]->name
, i
);
768 for_each_rdmarxq(&adap
->sge
, i
)
769 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma%d",
770 adap
->port
[0]->name
, i
);
772 for_each_rdmaciq(&adap
->sge
, i
)
773 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma-ciq%d",
774 adap
->port
[0]->name
, i
);
777 static int request_msix_queue_irqs(struct adapter
*adap
)
779 struct sge
*s
= &adap
->sge
;
780 int err
, ethqidx
, ofldqidx
= 0, rdmaqidx
= 0, rdmaciqqidx
= 0;
783 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
784 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
788 for_each_ethrxq(s
, ethqidx
) {
789 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
791 adap
->msix_info
[msi_index
].desc
,
792 &s
->ethrxq
[ethqidx
].rspq
);
797 for_each_ofldrxq(s
, ofldqidx
) {
798 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
800 adap
->msix_info
[msi_index
].desc
,
801 &s
->ofldrxq
[ofldqidx
].rspq
);
806 for_each_rdmarxq(s
, rdmaqidx
) {
807 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
809 adap
->msix_info
[msi_index
].desc
,
810 &s
->rdmarxq
[rdmaqidx
].rspq
);
815 for_each_rdmaciq(s
, rdmaciqqidx
) {
816 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
818 adap
->msix_info
[msi_index
].desc
,
819 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
827 while (--rdmaciqqidx
>= 0)
828 free_irq(adap
->msix_info
[--msi_index
].vec
,
829 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
830 while (--rdmaqidx
>= 0)
831 free_irq(adap
->msix_info
[--msi_index
].vec
,
832 &s
->rdmarxq
[rdmaqidx
].rspq
);
833 while (--ofldqidx
>= 0)
834 free_irq(adap
->msix_info
[--msi_index
].vec
,
835 &s
->ofldrxq
[ofldqidx
].rspq
);
836 while (--ethqidx
>= 0)
837 free_irq(adap
->msix_info
[--msi_index
].vec
,
838 &s
->ethrxq
[ethqidx
].rspq
);
839 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
843 static void free_msix_queue_irqs(struct adapter
*adap
)
845 int i
, msi_index
= 2;
846 struct sge
*s
= &adap
->sge
;
848 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
849 for_each_ethrxq(s
, i
)
850 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ethrxq
[i
].rspq
);
851 for_each_ofldrxq(s
, i
)
852 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ofldrxq
[i
].rspq
);
853 for_each_rdmarxq(s
, i
)
854 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmarxq
[i
].rspq
);
855 for_each_rdmaciq(s
, i
)
856 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmaciq
[i
].rspq
);
860 * cxgb4_write_rss - write the RSS table for a given port
862 * @queues: array of queue indices for RSS
864 * Sets up the portion of the HW RSS table for the port's VI to distribute
865 * packets to the Rx queues in @queues.
866 * Should never be called before setting up sge eth rx queues
868 int cxgb4_write_rss(const struct port_info
*pi
, const u16
*queues
)
872 struct adapter
*adapter
= pi
->adapter
;
873 const struct sge_eth_rxq
*rxq
;
875 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
876 rss
= kmalloc(pi
->rss_size
* sizeof(u16
), GFP_KERNEL
);
880 /* map the queue indices to queue ids */
881 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
882 rss
[i
] = rxq
[*queues
].rspq
.abs_id
;
884 err
= t4_config_rss_range(adapter
, adapter
->pf
, pi
->viid
, 0,
885 pi
->rss_size
, rss
, pi
->rss_size
);
886 /* If Tunnel All Lookup isn't specified in the global RSS
887 * Configuration, then we need to specify a default Ingress
888 * Queue for any ingress packets which aren't hashed. We'll
889 * use our first ingress queue ...
892 err
= t4_config_vi_rss(adapter
, adapter
->mbox
, pi
->viid
,
893 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
|
894 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
|
895 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
|
896 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
|
897 FW_RSS_VI_CONFIG_CMD_UDPEN_F
,
904 * setup_rss - configure RSS
907 * Sets up RSS for each port.
909 static int setup_rss(struct adapter
*adap
)
913 for_each_port(adap
, i
) {
914 const struct port_info
*pi
= adap2pinfo(adap
, i
);
916 /* Fill default values with equal distribution */
917 for (j
= 0; j
< pi
->rss_size
; j
++)
918 pi
->rss
[j
] = j
% pi
->nqsets
;
920 err
= cxgb4_write_rss(pi
, pi
->rss
);
928 * Return the channel of the ingress queue with the given qid.
930 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
932 qid
-= p
->ingr_start
;
933 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
937 * Wait until all NAPI handlers are descheduled.
939 static void quiesce_rx(struct adapter
*adap
)
943 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
944 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
946 if (q
&& q
->handler
) {
947 napi_disable(&q
->napi
);
949 while (!cxgb_poll_lock_napi(q
))
957 /* Disable interrupt and napi handler */
958 static void disable_interrupts(struct adapter
*adap
)
960 if (adap
->flags
& FULL_INIT_DONE
) {
961 t4_intr_disable(adap
);
962 if (adap
->flags
& USING_MSIX
) {
963 free_msix_queue_irqs(adap
);
964 free_irq(adap
->msix_info
[0].vec
, adap
);
966 free_irq(adap
->pdev
->irq
, adap
);
973 * Enable NAPI scheduling and interrupt generation for all Rx queues.
975 static void enable_rx(struct adapter
*adap
)
979 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
980 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
985 cxgb_busy_poll_init_lock(q
);
986 napi_enable(&q
->napi
);
988 /* 0-increment GTS to start the timer and enable interrupts */
989 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
990 SEINTARM_V(q
->intr_params
) |
991 INGRESSQID_V(q
->cntxt_id
));
995 static int alloc_ofld_rxqs(struct adapter
*adap
, struct sge_ofld_rxq
*q
,
996 unsigned int nq
, unsigned int per_chan
, int msi_idx
,
1001 for (i
= 0; i
< nq
; i
++, q
++) {
1004 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false,
1005 adap
->port
[i
/ per_chan
],
1006 msi_idx
, q
->fl
.size
? &q
->fl
: NULL
,
1010 memset(&q
->stats
, 0, sizeof(q
->stats
));
1012 ids
[i
] = q
->rspq
.abs_id
;
1018 * setup_sge_queues - configure SGE Tx/Rx/response queues
1019 * @adap: the adapter
1021 * Determines how many sets of SGE queues to use and initializes them.
1022 * We support multiple queue sets per port if we have MSI-X, otherwise
1023 * just one queue set per port.
1025 static int setup_sge_queues(struct adapter
*adap
)
1027 int err
, msi_idx
, i
, j
;
1028 struct sge
*s
= &adap
->sge
;
1030 bitmap_zero(s
->starving_fl
, s
->egr_sz
);
1031 bitmap_zero(s
->txq_maperr
, s
->egr_sz
);
1033 if (adap
->flags
& USING_MSIX
)
1034 msi_idx
= 1; /* vector 0 is for non-queue interrupts */
1036 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
1040 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
1043 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1044 * don't forget to update the following which need to be
1045 * synchronized to and changes here.
1047 * 1. The calculations of MAX_INGQ in cxgb4.h.
1049 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1050 * to accommodate any new/deleted Ingress Queues
1051 * which need MSI-X Vectors.
1053 * 3. Update sge_qinfo_show() to include information on the
1054 * new/deleted queues.
1056 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
1057 msi_idx
, NULL
, fwevtq_handler
, -1);
1059 freeout
: t4_free_sge_resources(adap
);
1063 for_each_port(adap
, i
) {
1064 struct net_device
*dev
= adap
->port
[i
];
1065 struct port_info
*pi
= netdev_priv(dev
);
1066 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
1067 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
1069 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
1072 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
1075 t4_get_mps_bg_map(adap
,
1080 memset(&q
->stats
, 0, sizeof(q
->stats
));
1082 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
1083 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
1084 netdev_get_tx_queue(dev
, j
),
1085 s
->fw_evtq
.cntxt_id
);
1091 j
= s
->ofldqsets
/ adap
->params
.nports
; /* ofld queues per channel */
1092 for_each_ofldrxq(s
, i
) {
1093 err
= t4_sge_alloc_ofld_txq(adap
, &s
->ofldtxq
[i
],
1095 s
->fw_evtq
.cntxt_id
);
1100 #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
1101 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
1108 ALLOC_OFLD_RXQS(s
->ofldrxq
, s
->ofldqsets
, j
, s
->ofld_rxq
);
1109 ALLOC_OFLD_RXQS(s
->rdmarxq
, s
->rdmaqs
, 1, s
->rdma_rxq
);
1110 j
= s
->rdmaciqs
/ adap
->params
.nports
; /* rdmaq queues per channel */
1111 ALLOC_OFLD_RXQS(s
->rdmaciq
, s
->rdmaciqs
, j
, s
->rdma_ciq
);
1113 #undef ALLOC_OFLD_RXQS
1115 for_each_port(adap
, i
) {
1117 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1118 * have RDMA queues, and that's the right value.
1120 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
1121 s
->fw_evtq
.cntxt_id
,
1122 s
->rdmarxq
[i
].rspq
.cntxt_id
);
1127 t4_write_reg(adap
, is_t4(adap
->params
.chip
) ?
1128 MPS_TRC_RSS_CONTROL_A
:
1129 MPS_T5_TRC_RSS_CONTROL_A
,
1130 RSSCONTROL_V(netdev2pinfo(adap
->port
[0])->tx_chan
) |
1131 QUEUENUMBER_V(s
->ethrxq
[0].rspq
.abs_id
));
1136 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1137 * The allocated memory is cleared.
1139 void *t4_alloc_mem(size_t size
)
1141 void *p
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1149 * Free memory allocated through alloc_mem().
1151 void t4_free_mem(void *addr
)
1156 /* Send a Work Request to write the filter at a specified index. We construct
1157 * a Firmware Filter Work Request to have the work done and put the indicated
1158 * filter into "pending" mode which will prevent any further actions against
1159 * it till we get a reply from the firmware on the completion status of the
1162 static int set_filter_wr(struct adapter
*adapter
, int fidx
)
1164 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1165 struct sk_buff
*skb
;
1166 struct fw_filter_wr
*fwr
;
1169 skb
= alloc_skb(sizeof(*fwr
), GFP_KERNEL
);
1173 /* If the new filter requires loopback Destination MAC and/or VLAN
1174 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1177 if (f
->fs
.newdmac
|| f
->fs
.newvlan
) {
1178 /* allocate L2T entry for new filter */
1179 f
->l2t
= t4_l2t_alloc_switching(adapter
->l2t
);
1180 if (f
->l2t
== NULL
) {
1184 if (t4_l2t_set_switching(adapter
, f
->l2t
, f
->fs
.vlan
,
1185 f
->fs
.eport
, f
->fs
.dmac
)) {
1186 cxgb4_l2t_release(f
->l2t
);
1193 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1195 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, sizeof(*fwr
));
1196 memset(fwr
, 0, sizeof(*fwr
));
1198 /* It would be nice to put most of the following in t4_hw.c but most
1199 * of the work is translating the cxgbtool ch_filter_specification
1200 * into the Work Request and the definition of that structure is
1201 * currently in cxgbtool.h which isn't appropriate to pull into the
1202 * common code. We may eventually try to come up with a more neutral
1203 * filter specification structure but for now it's easiest to simply
1204 * put this fairly direct code in line ...
1206 fwr
->op_pkd
= htonl(FW_WR_OP_V(FW_FILTER_WR
));
1207 fwr
->len16_pkd
= htonl(FW_WR_LEN16_V(sizeof(*fwr
)/16));
1209 htonl(FW_FILTER_WR_TID_V(ftid
) |
1210 FW_FILTER_WR_RQTYPE_V(f
->fs
.type
) |
1211 FW_FILTER_WR_NOREPLY_V(0) |
1212 FW_FILTER_WR_IQ_V(f
->fs
.iq
));
1213 fwr
->del_filter_to_l2tix
=
1214 htonl(FW_FILTER_WR_RPTTID_V(f
->fs
.rpttid
) |
1215 FW_FILTER_WR_DROP_V(f
->fs
.action
== FILTER_DROP
) |
1216 FW_FILTER_WR_DIRSTEER_V(f
->fs
.dirsteer
) |
1217 FW_FILTER_WR_MASKHASH_V(f
->fs
.maskhash
) |
1218 FW_FILTER_WR_DIRSTEERHASH_V(f
->fs
.dirsteerhash
) |
1219 FW_FILTER_WR_LPBK_V(f
->fs
.action
== FILTER_SWITCH
) |
1220 FW_FILTER_WR_DMAC_V(f
->fs
.newdmac
) |
1221 FW_FILTER_WR_SMAC_V(f
->fs
.newsmac
) |
1222 FW_FILTER_WR_INSVLAN_V(f
->fs
.newvlan
== VLAN_INSERT
||
1223 f
->fs
.newvlan
== VLAN_REWRITE
) |
1224 FW_FILTER_WR_RMVLAN_V(f
->fs
.newvlan
== VLAN_REMOVE
||
1225 f
->fs
.newvlan
== VLAN_REWRITE
) |
1226 FW_FILTER_WR_HITCNTS_V(f
->fs
.hitcnts
) |
1227 FW_FILTER_WR_TXCHAN_V(f
->fs
.eport
) |
1228 FW_FILTER_WR_PRIO_V(f
->fs
.prio
) |
1229 FW_FILTER_WR_L2TIX_V(f
->l2t
? f
->l2t
->idx
: 0));
1230 fwr
->ethtype
= htons(f
->fs
.val
.ethtype
);
1231 fwr
->ethtypem
= htons(f
->fs
.mask
.ethtype
);
1232 fwr
->frag_to_ovlan_vldm
=
1233 (FW_FILTER_WR_FRAG_V(f
->fs
.val
.frag
) |
1234 FW_FILTER_WR_FRAGM_V(f
->fs
.mask
.frag
) |
1235 FW_FILTER_WR_IVLAN_VLD_V(f
->fs
.val
.ivlan_vld
) |
1236 FW_FILTER_WR_OVLAN_VLD_V(f
->fs
.val
.ovlan_vld
) |
1237 FW_FILTER_WR_IVLAN_VLDM_V(f
->fs
.mask
.ivlan_vld
) |
1238 FW_FILTER_WR_OVLAN_VLDM_V(f
->fs
.mask
.ovlan_vld
));
1240 fwr
->rx_chan_rx_rpl_iq
=
1241 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1242 FW_FILTER_WR_RX_RPL_IQ_V(adapter
->sge
.fw_evtq
.abs_id
));
1243 fwr
->maci_to_matchtypem
=
1244 htonl(FW_FILTER_WR_MACI_V(f
->fs
.val
.macidx
) |
1245 FW_FILTER_WR_MACIM_V(f
->fs
.mask
.macidx
) |
1246 FW_FILTER_WR_FCOE_V(f
->fs
.val
.fcoe
) |
1247 FW_FILTER_WR_FCOEM_V(f
->fs
.mask
.fcoe
) |
1248 FW_FILTER_WR_PORT_V(f
->fs
.val
.iport
) |
1249 FW_FILTER_WR_PORTM_V(f
->fs
.mask
.iport
) |
1250 FW_FILTER_WR_MATCHTYPE_V(f
->fs
.val
.matchtype
) |
1251 FW_FILTER_WR_MATCHTYPEM_V(f
->fs
.mask
.matchtype
));
1252 fwr
->ptcl
= f
->fs
.val
.proto
;
1253 fwr
->ptclm
= f
->fs
.mask
.proto
;
1254 fwr
->ttyp
= f
->fs
.val
.tos
;
1255 fwr
->ttypm
= f
->fs
.mask
.tos
;
1256 fwr
->ivlan
= htons(f
->fs
.val
.ivlan
);
1257 fwr
->ivlanm
= htons(f
->fs
.mask
.ivlan
);
1258 fwr
->ovlan
= htons(f
->fs
.val
.ovlan
);
1259 fwr
->ovlanm
= htons(f
->fs
.mask
.ovlan
);
1260 memcpy(fwr
->lip
, f
->fs
.val
.lip
, sizeof(fwr
->lip
));
1261 memcpy(fwr
->lipm
, f
->fs
.mask
.lip
, sizeof(fwr
->lipm
));
1262 memcpy(fwr
->fip
, f
->fs
.val
.fip
, sizeof(fwr
->fip
));
1263 memcpy(fwr
->fipm
, f
->fs
.mask
.fip
, sizeof(fwr
->fipm
));
1264 fwr
->lp
= htons(f
->fs
.val
.lport
);
1265 fwr
->lpm
= htons(f
->fs
.mask
.lport
);
1266 fwr
->fp
= htons(f
->fs
.val
.fport
);
1267 fwr
->fpm
= htons(f
->fs
.mask
.fport
);
1269 memcpy(fwr
->sma
, f
->fs
.smac
, sizeof(fwr
->sma
));
1271 /* Mark the filter as "pending" and ship off the Filter Work Request.
1272 * When we get the Work Request Reply we'll clear the pending status.
1275 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, f
->fs
.val
.iport
& 0x3);
1276 t4_ofld_send(adapter
, skb
);
1280 /* Delete the filter at a specified index.
1282 static int del_filter_wr(struct adapter
*adapter
, int fidx
)
1284 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1285 struct sk_buff
*skb
;
1286 struct fw_filter_wr
*fwr
;
1287 unsigned int len
, ftid
;
1290 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1292 skb
= alloc_skb(len
, GFP_KERNEL
);
1296 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, len
);
1297 t4_mk_filtdelwr(ftid
, fwr
, adapter
->sge
.fw_evtq
.abs_id
);
1299 /* Mark the filter as "pending" and ship off the Filter Work Request.
1300 * When we get the Work Request Reply we'll clear the pending status.
1303 t4_mgmt_tx(adapter
, skb
);
1307 static u16
cxgb_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1308 void *accel_priv
, select_queue_fallback_t fallback
)
1312 #ifdef CONFIG_CHELSIO_T4_DCB
1313 /* If a Data Center Bridging has been successfully negotiated on this
1314 * link then we'll use the skb's priority to map it to a TX Queue.
1315 * The skb's priority is determined via the VLAN Tag Priority Code
1318 if (cxgb4_dcb_enabled(dev
)) {
1322 err
= vlan_get_tag(skb
, &vlan_tci
);
1323 if (unlikely(err
)) {
1324 if (net_ratelimit())
1326 "TX Packet without VLAN Tag on DCB Link\n");
1329 txq
= (vlan_tci
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
1330 #ifdef CONFIG_CHELSIO_T4_FCOE
1331 if (skb
->protocol
== htons(ETH_P_FCOE
))
1332 txq
= skb
->priority
& 0x7;
1333 #endif /* CONFIG_CHELSIO_T4_FCOE */
1337 #endif /* CONFIG_CHELSIO_T4_DCB */
1340 txq
= (skb_rx_queue_recorded(skb
)
1341 ? skb_get_rx_queue(skb
)
1342 : smp_processor_id());
1344 while (unlikely(txq
>= dev
->real_num_tx_queues
))
1345 txq
-= dev
->real_num_tx_queues
;
1350 return fallback(dev
, skb
) % dev
->real_num_tx_queues
;
1353 static int closest_timer(const struct sge
*s
, int time
)
1355 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1357 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
1358 delta
= time
- s
->timer_val
[i
];
1361 if (delta
< min_delta
) {
1369 static int closest_thres(const struct sge
*s
, int thres
)
1371 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1373 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
1374 delta
= thres
- s
->counter_val
[i
];
1377 if (delta
< min_delta
) {
1386 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1388 * @us: the hold-off time in us, or 0 to disable timer
1389 * @cnt: the hold-off packet count, or 0 to disable counter
1391 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1392 * one of the two needs to be enabled for the queue to generate interrupts.
1394 int cxgb4_set_rspq_intr_params(struct sge_rspq
*q
,
1395 unsigned int us
, unsigned int cnt
)
1397 struct adapter
*adap
= q
->adap
;
1399 if ((us
| cnt
) == 0)
1406 new_idx
= closest_thres(&adap
->sge
, cnt
);
1407 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
1408 /* the queue has already been created, update it */
1409 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
1410 FW_PARAMS_PARAM_X_V(
1411 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1412 FW_PARAMS_PARAM_YZ_V(q
->cntxt_id
);
1413 err
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
1418 q
->pktcnt_idx
= new_idx
;
1421 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
1422 q
->intr_params
= QINTR_TIMER_IDX_V(us
) | QINTR_CNT_EN_V(cnt
> 0);
1426 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
1428 const struct port_info
*pi
= netdev_priv(dev
);
1429 netdev_features_t changed
= dev
->features
^ features
;
1432 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
1435 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, -1,
1437 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
1439 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
1443 static int setup_debugfs(struct adapter
*adap
)
1445 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
1448 #ifdef CONFIG_DEBUG_FS
1449 t4_setup_debugfs(adap
);
1455 * upper-layer driver support
1459 * Allocate an active-open TID and set it to the supplied value.
1461 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
1465 spin_lock_bh(&t
->atid_lock
);
1467 union aopen_entry
*p
= t
->afree
;
1469 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
1474 spin_unlock_bh(&t
->atid_lock
);
1477 EXPORT_SYMBOL(cxgb4_alloc_atid
);
1480 * Release an active-open TID.
1482 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
1484 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
1486 spin_lock_bh(&t
->atid_lock
);
1490 spin_unlock_bh(&t
->atid_lock
);
1492 EXPORT_SYMBOL(cxgb4_free_atid
);
1495 * Allocate a server TID and set it to the supplied value.
1497 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
1501 spin_lock_bh(&t
->stid_lock
);
1502 if (family
== PF_INET
) {
1503 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
1504 if (stid
< t
->nstids
)
1505 __set_bit(stid
, t
->stid_bmap
);
1509 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 2);
1514 t
->stid_tab
[stid
].data
= data
;
1515 stid
+= t
->stid_base
;
1516 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1517 * This is equivalent to 4 TIDs. With CLIP enabled it
1520 if (family
== PF_INET
)
1523 t
->stids_in_use
+= 4;
1525 spin_unlock_bh(&t
->stid_lock
);
1528 EXPORT_SYMBOL(cxgb4_alloc_stid
);
1530 /* Allocate a server filter TID and set it to the supplied value.
1532 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
1536 spin_lock_bh(&t
->stid_lock
);
1537 if (family
== PF_INET
) {
1538 stid
= find_next_zero_bit(t
->stid_bmap
,
1539 t
->nstids
+ t
->nsftids
, t
->nstids
);
1540 if (stid
< (t
->nstids
+ t
->nsftids
))
1541 __set_bit(stid
, t
->stid_bmap
);
1548 t
->stid_tab
[stid
].data
= data
;
1550 stid
+= t
->sftid_base
;
1553 spin_unlock_bh(&t
->stid_lock
);
1556 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
1558 /* Release a server TID.
1560 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
1562 /* Is it a server filter TID? */
1563 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
1564 stid
-= t
->sftid_base
;
1567 stid
-= t
->stid_base
;
1570 spin_lock_bh(&t
->stid_lock
);
1571 if (family
== PF_INET
)
1572 __clear_bit(stid
, t
->stid_bmap
);
1574 bitmap_release_region(t
->stid_bmap
, stid
, 2);
1575 t
->stid_tab
[stid
].data
= NULL
;
1576 if (stid
< t
->nstids
) {
1577 if (family
== PF_INET
)
1580 t
->stids_in_use
-= 4;
1584 spin_unlock_bh(&t
->stid_lock
);
1586 EXPORT_SYMBOL(cxgb4_free_stid
);
1589 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1591 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
1594 struct cpl_tid_release
*req
;
1596 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1597 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
1598 INIT_TP_WR(req
, tid
);
1599 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
1603 * Queue a TID release request and if necessary schedule a work queue to
1606 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
1609 void **p
= &t
->tid_tab
[tid
];
1610 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1612 spin_lock_bh(&adap
->tid_release_lock
);
1613 *p
= adap
->tid_release_head
;
1614 /* Low 2 bits encode the Tx channel number */
1615 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
1616 if (!adap
->tid_release_task_busy
) {
1617 adap
->tid_release_task_busy
= true;
1618 queue_work(adap
->workq
, &adap
->tid_release_task
);
1620 spin_unlock_bh(&adap
->tid_release_lock
);
1624 * Process the list of pending TID release requests.
1626 static void process_tid_release_list(struct work_struct
*work
)
1628 struct sk_buff
*skb
;
1629 struct adapter
*adap
;
1631 adap
= container_of(work
, struct adapter
, tid_release_task
);
1633 spin_lock_bh(&adap
->tid_release_lock
);
1634 while (adap
->tid_release_head
) {
1635 void **p
= adap
->tid_release_head
;
1636 unsigned int chan
= (uintptr_t)p
& 3;
1637 p
= (void *)p
- chan
;
1639 adap
->tid_release_head
= *p
;
1641 spin_unlock_bh(&adap
->tid_release_lock
);
1643 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
1645 schedule_timeout_uninterruptible(1);
1647 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
1648 t4_ofld_send(adap
, skb
);
1649 spin_lock_bh(&adap
->tid_release_lock
);
1651 adap
->tid_release_task_busy
= false;
1652 spin_unlock_bh(&adap
->tid_release_lock
);
1656 * Release a TID and inform HW. If we are unable to allocate the release
1657 * message we defer to a work queue.
1659 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
)
1661 struct sk_buff
*skb
;
1662 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1664 WARN_ON(tid
>= t
->ntids
);
1666 if (t
->tid_tab
[tid
]) {
1667 t
->tid_tab
[tid
] = NULL
;
1668 if (t
->hash_base
&& (tid
>= t
->hash_base
))
1669 atomic_dec(&t
->hash_tids_in_use
);
1671 atomic_dec(&t
->tids_in_use
);
1674 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
1676 mk_tid_release(skb
, chan
, tid
);
1677 t4_ofld_send(adap
, skb
);
1679 cxgb4_queue_tid_release(t
, chan
, tid
);
1681 EXPORT_SYMBOL(cxgb4_remove_tid
);
1684 * Allocate and initialize the TID tables. Returns 0 on success.
1686 static int tid_init(struct tid_info
*t
)
1689 unsigned int stid_bmap_size
;
1690 unsigned int natids
= t
->natids
;
1691 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1693 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
1694 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
1695 natids
* sizeof(*t
->atid_tab
) +
1696 t
->nstids
* sizeof(*t
->stid_tab
) +
1697 t
->nsftids
* sizeof(*t
->stid_tab
) +
1698 stid_bmap_size
* sizeof(long) +
1699 t
->nftids
* sizeof(*t
->ftid_tab
) +
1700 t
->nsftids
* sizeof(*t
->ftid_tab
);
1702 t
->tid_tab
= t4_alloc_mem(size
);
1706 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
1707 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
1708 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
1709 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
1710 spin_lock_init(&t
->stid_lock
);
1711 spin_lock_init(&t
->atid_lock
);
1713 t
->stids_in_use
= 0;
1714 t
->sftids_in_use
= 0;
1716 t
->atids_in_use
= 0;
1717 atomic_set(&t
->tids_in_use
, 0);
1718 atomic_set(&t
->hash_tids_in_use
, 0);
1720 /* Setup the free list for atid_tab and clear the stid bitmap. */
1723 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1724 t
->afree
= t
->atid_tab
;
1726 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
1727 /* Reserve stid 0 for T4/T5 adapters */
1728 if (!t
->stid_base
&&
1729 (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
))
1730 __set_bit(0, t
->stid_bmap
);
1736 * cxgb4_create_server - create an IP server
1738 * @stid: the server TID
1739 * @sip: local IP address to bind server to
1740 * @sport: the server's TCP port
1741 * @queue: queue to direct messages from this server to
1743 * Create an IP server for the given port and address.
1744 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1746 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
1747 __be32 sip
, __be16 sport
, __be16 vlan
,
1751 struct sk_buff
*skb
;
1752 struct adapter
*adap
;
1753 struct cpl_pass_open_req
*req
;
1756 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1760 adap
= netdev2adap(dev
);
1761 req
= (struct cpl_pass_open_req
*)__skb_put(skb
, sizeof(*req
));
1763 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
1764 req
->local_port
= sport
;
1765 req
->peer_port
= htons(0);
1766 req
->local_ip
= sip
;
1767 req
->peer_ip
= htonl(0);
1768 chan
= rxq_to_chan(&adap
->sge
, queue
);
1769 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1770 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1771 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1772 ret
= t4_mgmt_tx(adap
, skb
);
1773 return net_xmit_eval(ret
);
1775 EXPORT_SYMBOL(cxgb4_create_server
);
1777 /* cxgb4_create_server6 - create an IPv6 server
1779 * @stid: the server TID
1780 * @sip: local IPv6 address to bind server to
1781 * @sport: the server's TCP port
1782 * @queue: queue to direct messages from this server to
1784 * Create an IPv6 server for the given port and address.
1785 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1787 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
1788 const struct in6_addr
*sip
, __be16 sport
,
1792 struct sk_buff
*skb
;
1793 struct adapter
*adap
;
1794 struct cpl_pass_open_req6
*req
;
1797 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1801 adap
= netdev2adap(dev
);
1802 req
= (struct cpl_pass_open_req6
*)__skb_put(skb
, sizeof(*req
));
1804 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
1805 req
->local_port
= sport
;
1806 req
->peer_port
= htons(0);
1807 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
1808 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
1809 req
->peer_ip_hi
= cpu_to_be64(0);
1810 req
->peer_ip_lo
= cpu_to_be64(0);
1811 chan
= rxq_to_chan(&adap
->sge
, queue
);
1812 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1813 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1814 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1815 ret
= t4_mgmt_tx(adap
, skb
);
1816 return net_xmit_eval(ret
);
1818 EXPORT_SYMBOL(cxgb4_create_server6
);
1820 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
1821 unsigned int queue
, bool ipv6
)
1823 struct sk_buff
*skb
;
1824 struct adapter
*adap
;
1825 struct cpl_close_listsvr_req
*req
;
1828 adap
= netdev2adap(dev
);
1830 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1834 req
= (struct cpl_close_listsvr_req
*)__skb_put(skb
, sizeof(*req
));
1836 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
1837 req
->reply_ctrl
= htons(NO_REPLY_V(0) | (ipv6
? LISTSVR_IPV6_V(1) :
1838 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue
));
1839 ret
= t4_mgmt_tx(adap
, skb
);
1840 return net_xmit_eval(ret
);
1842 EXPORT_SYMBOL(cxgb4_remove_server
);
1845 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1846 * @mtus: the HW MTU table
1847 * @mtu: the target MTU
1848 * @idx: index of selected entry in the MTU table
1850 * Returns the index and the value in the HW MTU table that is closest to
1851 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1852 * table, in which case that smallest available value is selected.
1854 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
1859 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
1865 EXPORT_SYMBOL(cxgb4_best_mtu
);
1868 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1869 * @mtus: the HW MTU table
1870 * @header_size: Header Size
1871 * @data_size_max: maximum Data Segment Size
1872 * @data_size_align: desired Data Segment Size Alignment (2^N)
1873 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1875 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1876 * MTU Table based solely on a Maximum MTU parameter, we break that
1877 * parameter up into a Header Size and Maximum Data Segment Size, and
1878 * provide a desired Data Segment Size Alignment. If we find an MTU in
1879 * the Hardware MTU Table which will result in a Data Segment Size with
1880 * the requested alignment _and_ that MTU isn't "too far" from the
1881 * closest MTU, then we'll return that rather than the closest MTU.
1883 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus
,
1884 unsigned short header_size
,
1885 unsigned short data_size_max
,
1886 unsigned short data_size_align
,
1887 unsigned int *mtu_idxp
)
1889 unsigned short max_mtu
= header_size
+ data_size_max
;
1890 unsigned short data_size_align_mask
= data_size_align
- 1;
1891 int mtu_idx
, aligned_mtu_idx
;
1893 /* Scan the MTU Table till we find an MTU which is larger than our
1894 * Maximum MTU or we reach the end of the table. Along the way,
1895 * record the last MTU found, if any, which will result in a Data
1896 * Segment Length matching the requested alignment.
1898 for (mtu_idx
= 0, aligned_mtu_idx
= -1; mtu_idx
< NMTUS
; mtu_idx
++) {
1899 unsigned short data_size
= mtus
[mtu_idx
] - header_size
;
1901 /* If this MTU minus the Header Size would result in a
1902 * Data Segment Size of the desired alignment, remember it.
1904 if ((data_size
& data_size_align_mask
) == 0)
1905 aligned_mtu_idx
= mtu_idx
;
1907 /* If we're not at the end of the Hardware MTU Table and the
1908 * next element is larger than our Maximum MTU, drop out of
1911 if (mtu_idx
+1 < NMTUS
&& mtus
[mtu_idx
+1] > max_mtu
)
1915 /* If we fell out of the loop because we ran to the end of the table,
1916 * then we just have to use the last [largest] entry.
1918 if (mtu_idx
== NMTUS
)
1921 /* If we found an MTU which resulted in the requested Data Segment
1922 * Length alignment and that's "not far" from the largest MTU which is
1923 * less than or equal to the maximum MTU, then use that.
1925 if (aligned_mtu_idx
>= 0 &&
1926 mtu_idx
- aligned_mtu_idx
<= 1)
1927 mtu_idx
= aligned_mtu_idx
;
1929 /* If the caller has passed in an MTU Index pointer, pass the
1930 * MTU Index back. Return the MTU value.
1933 *mtu_idxp
= mtu_idx
;
1934 return mtus
[mtu_idx
];
1936 EXPORT_SYMBOL(cxgb4_best_aligned_mtu
);
1939 * cxgb4_port_chan - get the HW channel of a port
1940 * @dev: the net device for the port
1942 * Return the HW Tx channel of the given port.
1944 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
1946 return netdev2pinfo(dev
)->tx_chan
;
1948 EXPORT_SYMBOL(cxgb4_port_chan
);
1950 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
1952 struct adapter
*adap
= netdev2adap(dev
);
1953 u32 v1
, v2
, lp_count
, hp_count
;
1955 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
1956 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
1957 if (is_t4(adap
->params
.chip
)) {
1958 lp_count
= LP_COUNT_G(v1
);
1959 hp_count
= HP_COUNT_G(v1
);
1961 lp_count
= LP_COUNT_T5_G(v1
);
1962 hp_count
= HP_COUNT_T5_G(v2
);
1964 return lpfifo
? lp_count
: hp_count
;
1966 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
1969 * cxgb4_port_viid - get the VI id of a port
1970 * @dev: the net device for the port
1972 * Return the VI id of the given port.
1974 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
1976 return netdev2pinfo(dev
)->viid
;
1978 EXPORT_SYMBOL(cxgb4_port_viid
);
1981 * cxgb4_port_idx - get the index of a port
1982 * @dev: the net device for the port
1984 * Return the index of the given port.
1986 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
1988 return netdev2pinfo(dev
)->port_id
;
1990 EXPORT_SYMBOL(cxgb4_port_idx
);
1992 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
1993 struct tp_tcp_stats
*v6
)
1995 struct adapter
*adap
= pci_get_drvdata(pdev
);
1997 spin_lock(&adap
->stats_lock
);
1998 t4_tp_get_tcp_stats(adap
, v4
, v6
);
1999 spin_unlock(&adap
->stats_lock
);
2001 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
2003 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
2004 const unsigned int *pgsz_order
)
2006 struct adapter
*adap
= netdev2adap(dev
);
2008 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
, tag_mask
);
2009 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ_A
, HPZ0_V(pgsz_order
[0]) |
2010 HPZ1_V(pgsz_order
[1]) | HPZ2_V(pgsz_order
[2]) |
2011 HPZ3_V(pgsz_order
[3]));
2013 EXPORT_SYMBOL(cxgb4_iscsi_init
);
2015 int cxgb4_flush_eq_cache(struct net_device
*dev
)
2017 struct adapter
*adap
= netdev2adap(dev
);
2019 return t4_sge_ctxt_flush(adap
, adap
->mbox
);
2021 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
2023 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
2025 u32 addr
= t4_read_reg(adap
, SGE_DBQ_CTXT_BADDR_A
) + 24 * qid
+ 8;
2029 spin_lock(&adap
->win0_lock
);
2030 ret
= t4_memory_rw(adap
, 0, MEM_EDC0
, addr
,
2031 sizeof(indices
), (__be32
*)&indices
,
2033 spin_unlock(&adap
->win0_lock
);
2035 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
2036 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
2041 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
2044 struct adapter
*adap
= netdev2adap(dev
);
2045 u16 hw_pidx
, hw_cidx
;
2048 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
2052 if (pidx
!= hw_pidx
) {
2056 if (pidx
>= hw_pidx
)
2057 delta
= pidx
- hw_pidx
;
2059 delta
= size
- hw_pidx
+ pidx
;
2061 if (is_t4(adap
->params
.chip
))
2062 val
= PIDX_V(delta
);
2064 val
= PIDX_T5_V(delta
);
2066 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2072 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
2074 int cxgb4_read_tpte(struct net_device
*dev
, u32 stag
, __be32
*tpte
)
2076 struct adapter
*adap
;
2077 u32 offset
, memtype
, memaddr
;
2078 u32 edc0_size
, edc1_size
, mc0_size
, mc1_size
, size
;
2079 u32 edc0_end
, edc1_end
, mc0_end
, mc1_end
;
2082 adap
= netdev2adap(dev
);
2084 offset
= ((stag
>> 8) * 32) + adap
->vres
.stag
.start
;
2086 /* Figure out where the offset lands in the Memory Type/Address scheme.
2087 * This code assumes that the memory is laid out starting at offset 0
2088 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2089 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
2090 * MC0, and some have both MC0 and MC1.
2092 size
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
2093 edc0_size
= EDRAM0_SIZE_G(size
) << 20;
2094 size
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
2095 edc1_size
= EDRAM1_SIZE_G(size
) << 20;
2096 size
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
2097 mc0_size
= EXT_MEM0_SIZE_G(size
) << 20;
2099 edc0_end
= edc0_size
;
2100 edc1_end
= edc0_end
+ edc1_size
;
2101 mc0_end
= edc1_end
+ mc0_size
;
2103 if (offset
< edc0_end
) {
2106 } else if (offset
< edc1_end
) {
2108 memaddr
= offset
- edc0_end
;
2110 if (offset
< mc0_end
) {
2112 memaddr
= offset
- edc1_end
;
2113 } else if (is_t5(adap
->params
.chip
)) {
2114 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
2115 mc1_size
= EXT_MEM1_SIZE_G(size
) << 20;
2116 mc1_end
= mc0_end
+ mc1_size
;
2117 if (offset
< mc1_end
) {
2119 memaddr
= offset
- mc0_end
;
2121 /* offset beyond the end of any memory */
2125 /* T4/T6 only has a single memory channel */
2130 spin_lock(&adap
->win0_lock
);
2131 ret
= t4_memory_rw(adap
, 0, memtype
, memaddr
, 32, tpte
, T4_MEMORY_READ
);
2132 spin_unlock(&adap
->win0_lock
);
2136 dev_err(adap
->pdev_dev
, "stag %#x, offset %#x out of range\n",
2140 EXPORT_SYMBOL(cxgb4_read_tpte
);
2142 u64
cxgb4_read_sge_timestamp(struct net_device
*dev
)
2145 struct adapter
*adap
;
2147 adap
= netdev2adap(dev
);
2148 lo
= t4_read_reg(adap
, SGE_TIMESTAMP_LO_A
);
2149 hi
= TSVAL_G(t4_read_reg(adap
, SGE_TIMESTAMP_HI_A
));
2151 return ((u64
)hi
<< 32) | (u64
)lo
;
2153 EXPORT_SYMBOL(cxgb4_read_sge_timestamp
);
2155 int cxgb4_bar2_sge_qregs(struct net_device
*dev
,
2157 enum cxgb4_bar2_qtype qtype
,
2160 unsigned int *pbar2_qid
)
2162 return t4_bar2_sge_qregs(netdev2adap(dev
),
2164 (qtype
== CXGB4_BAR2_QTYPE_EGRESS
2165 ? T4_BAR2_QTYPE_EGRESS
2166 : T4_BAR2_QTYPE_INGRESS
),
2171 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs
);
2173 static struct pci_driver cxgb4_driver
;
2175 static void check_neigh_update(struct neighbour
*neigh
)
2177 const struct device
*parent
;
2178 const struct net_device
*netdev
= neigh
->dev
;
2180 if (netdev
->priv_flags
& IFF_802_1Q_VLAN
)
2181 netdev
= vlan_dev_real_dev(netdev
);
2182 parent
= netdev
->dev
.parent
;
2183 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
2184 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
2187 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
2191 case NETEVENT_NEIGH_UPDATE
:
2192 check_neigh_update(data
);
2194 case NETEVENT_REDIRECT
:
2201 static bool netevent_registered
;
2202 static struct notifier_block cxgb4_netevent_nb
= {
2203 .notifier_call
= netevent_cb
2206 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
2208 u32 v1
, v2
, lp_count
, hp_count
;
2211 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
2212 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
2213 if (is_t4(adap
->params
.chip
)) {
2214 lp_count
= LP_COUNT_G(v1
);
2215 hp_count
= HP_COUNT_G(v1
);
2217 lp_count
= LP_COUNT_T5_G(v1
);
2218 hp_count
= HP_COUNT_T5_G(v2
);
2221 if (lp_count
== 0 && hp_count
== 0)
2223 set_current_state(TASK_UNINTERRUPTIBLE
);
2224 schedule_timeout(usecs_to_jiffies(usecs
));
2228 static void disable_txq_db(struct sge_txq
*q
)
2230 unsigned long flags
;
2232 spin_lock_irqsave(&q
->db_lock
, flags
);
2234 spin_unlock_irqrestore(&q
->db_lock
, flags
);
2237 static void enable_txq_db(struct adapter
*adap
, struct sge_txq
*q
)
2239 spin_lock_irq(&q
->db_lock
);
2240 if (q
->db_pidx_inc
) {
2241 /* Make sure that all writes to the TX descriptors
2242 * are committed before we tell HW about them.
2245 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2246 QID_V(q
->cntxt_id
) | PIDX_V(q
->db_pidx_inc
));
2250 spin_unlock_irq(&q
->db_lock
);
2253 static void disable_dbs(struct adapter
*adap
)
2257 for_each_ethrxq(&adap
->sge
, i
)
2258 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
2259 for_each_ofldrxq(&adap
->sge
, i
)
2260 disable_txq_db(&adap
->sge
.ofldtxq
[i
].q
);
2261 for_each_port(adap
, i
)
2262 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
2265 static void enable_dbs(struct adapter
*adap
)
2269 for_each_ethrxq(&adap
->sge
, i
)
2270 enable_txq_db(adap
, &adap
->sge
.ethtxq
[i
].q
);
2271 for_each_ofldrxq(&adap
->sge
, i
)
2272 enable_txq_db(adap
, &adap
->sge
.ofldtxq
[i
].q
);
2273 for_each_port(adap
, i
)
2274 enable_txq_db(adap
, &adap
->sge
.ctrlq
[i
].q
);
2277 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
2279 if (adap
->uld_handle
[CXGB4_ULD_RDMA
])
2280 ulds
[CXGB4_ULD_RDMA
].control(adap
->uld_handle
[CXGB4_ULD_RDMA
],
2284 static void process_db_full(struct work_struct
*work
)
2286 struct adapter
*adap
;
2288 adap
= container_of(work
, struct adapter
, db_full_task
);
2290 drain_db_fifo(adap
, dbfifo_drain_delay
);
2292 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2293 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2294 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2295 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
,
2296 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
);
2298 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2299 DBFIFO_LP_INT_F
, DBFIFO_LP_INT_F
);
2302 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
2304 u16 hw_pidx
, hw_cidx
;
2307 spin_lock_irq(&q
->db_lock
);
2308 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
2311 if (q
->db_pidx
!= hw_pidx
) {
2315 if (q
->db_pidx
>= hw_pidx
)
2316 delta
= q
->db_pidx
- hw_pidx
;
2318 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
2320 if (is_t4(adap
->params
.chip
))
2321 val
= PIDX_V(delta
);
2323 val
= PIDX_T5_V(delta
);
2325 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2326 QID_V(q
->cntxt_id
) | val
);
2331 spin_unlock_irq(&q
->db_lock
);
2333 CH_WARN(adap
, "DB drop recovery failed.\n");
2335 static void recover_all_queues(struct adapter
*adap
)
2339 for_each_ethrxq(&adap
->sge
, i
)
2340 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
2341 for_each_ofldrxq(&adap
->sge
, i
)
2342 sync_txq_pidx(adap
, &adap
->sge
.ofldtxq
[i
].q
);
2343 for_each_port(adap
, i
)
2344 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
2347 static void process_db_drop(struct work_struct
*work
)
2349 struct adapter
*adap
;
2351 adap
= container_of(work
, struct adapter
, db_drop_task
);
2353 if (is_t4(adap
->params
.chip
)) {
2354 drain_db_fifo(adap
, dbfifo_drain_delay
);
2355 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
2356 drain_db_fifo(adap
, dbfifo_drain_delay
);
2357 recover_all_queues(adap
);
2358 drain_db_fifo(adap
, dbfifo_drain_delay
);
2360 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2361 } else if (is_t5(adap
->params
.chip
)) {
2362 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
2363 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
2364 u16 pidx_inc
= dropped_db
& 0x1fff;
2366 unsigned int bar2_qid
;
2369 ret
= t4_bar2_sge_qregs(adap
, qid
, T4_BAR2_QTYPE_EGRESS
,
2370 0, &bar2_qoffset
, &bar2_qid
);
2372 dev_err(adap
->pdev_dev
, "doorbell drop recovery: "
2373 "qid=%d, pidx_inc=%d\n", qid
, pidx_inc
);
2375 writel(PIDX_T5_V(pidx_inc
) | QID_V(bar2_qid
),
2376 adap
->bar2
+ bar2_qoffset
+ SGE_UDB_KDOORBELL
);
2378 /* Re-enable BAR2 WC */
2379 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
2382 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2383 t4_set_reg_field(adap
, SGE_DOORBELL_CONTROL_A
, DROPPED_DB_F
, 0);
2386 void t4_db_full(struct adapter
*adap
)
2388 if (is_t4(adap
->params
.chip
)) {
2390 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2391 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2392 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
, 0);
2393 queue_work(adap
->workq
, &adap
->db_full_task
);
2397 void t4_db_dropped(struct adapter
*adap
)
2399 if (is_t4(adap
->params
.chip
)) {
2401 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2403 queue_work(adap
->workq
, &adap
->db_drop_task
);
2406 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
2409 struct cxgb4_lld_info lli
;
2412 lli
.pdev
= adap
->pdev
;
2414 lli
.l2t
= adap
->l2t
;
2415 lli
.tids
= &adap
->tids
;
2416 lli
.ports
= adap
->port
;
2417 lli
.vr
= &adap
->vres
;
2418 lli
.mtus
= adap
->params
.mtus
;
2419 if (uld
== CXGB4_ULD_RDMA
) {
2420 lli
.rxq_ids
= adap
->sge
.rdma_rxq
;
2421 lli
.ciq_ids
= adap
->sge
.rdma_ciq
;
2422 lli
.nrxq
= adap
->sge
.rdmaqs
;
2423 lli
.nciq
= adap
->sge
.rdmaciqs
;
2424 } else if (uld
== CXGB4_ULD_ISCSI
) {
2425 lli
.rxq_ids
= adap
->sge
.ofld_rxq
;
2426 lli
.nrxq
= adap
->sge
.ofldqsets
;
2428 lli
.ntxq
= adap
->sge
.ofldqsets
;
2429 lli
.nchan
= adap
->params
.nports
;
2430 lli
.nports
= adap
->params
.nports
;
2431 lli
.wr_cred
= adap
->params
.ofldq_wr_cred
;
2432 lli
.adapter_type
= adap
->params
.chip
;
2433 lli
.iscsi_iolen
= MAXRXDATA_G(t4_read_reg(adap
, TP_PARA_REG2_A
));
2434 lli
.cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
2435 lli
.udb_density
= 1 << adap
->params
.sge
.eq_qpp
;
2436 lli
.ucq_density
= 1 << adap
->params
.sge
.iq_qpp
;
2437 lli
.filt_mode
= adap
->params
.tp
.vlan_pri_map
;
2438 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2439 for (i
= 0; i
< NCHAN
; i
++)
2441 lli
.gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS_A
);
2442 lli
.db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL_A
);
2443 lli
.fw_vers
= adap
->params
.fw_vers
;
2444 lli
.dbfifo_int_thresh
= dbfifo_int_thresh
;
2445 lli
.sge_ingpadboundary
= adap
->sge
.fl_align
;
2446 lli
.sge_egrstatuspagesize
= adap
->sge
.stat_len
;
2447 lli
.sge_pktshift
= adap
->sge
.pktshift
;
2448 lli
.enable_fw_ofld_conn
= adap
->flags
& FW_OFLD_CONN
;
2449 lli
.max_ordird_qp
= adap
->params
.max_ordird_qp
;
2450 lli
.max_ird_adapter
= adap
->params
.max_ird_adapter
;
2451 lli
.ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
2452 lli
.nodeid
= dev_to_node(adap
->pdev_dev
);
2454 handle
= ulds
[uld
].add(&lli
);
2455 if (IS_ERR(handle
)) {
2456 dev_warn(adap
->pdev_dev
,
2457 "could not attach to the %s driver, error %ld\n",
2458 uld_str
[uld
], PTR_ERR(handle
));
2462 adap
->uld_handle
[uld
] = handle
;
2464 if (!netevent_registered
) {
2465 register_netevent_notifier(&cxgb4_netevent_nb
);
2466 netevent_registered
= true;
2469 if (adap
->flags
& FULL_INIT_DONE
)
2470 ulds
[uld
].state_change(handle
, CXGB4_STATE_UP
);
2473 static void attach_ulds(struct adapter
*adap
)
2477 spin_lock(&adap_rcu_lock
);
2478 list_add_tail_rcu(&adap
->rcu_node
, &adap_rcu_list
);
2479 spin_unlock(&adap_rcu_lock
);
2481 mutex_lock(&uld_mutex
);
2482 list_add_tail(&adap
->list_node
, &adapter_list
);
2483 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2485 uld_attach(adap
, i
);
2486 mutex_unlock(&uld_mutex
);
2489 static void detach_ulds(struct adapter
*adap
)
2493 mutex_lock(&uld_mutex
);
2494 list_del(&adap
->list_node
);
2495 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2496 if (adap
->uld_handle
[i
]) {
2497 ulds
[i
].state_change(adap
->uld_handle
[i
],
2498 CXGB4_STATE_DETACH
);
2499 adap
->uld_handle
[i
] = NULL
;
2501 if (netevent_registered
&& list_empty(&adapter_list
)) {
2502 unregister_netevent_notifier(&cxgb4_netevent_nb
);
2503 netevent_registered
= false;
2505 mutex_unlock(&uld_mutex
);
2507 spin_lock(&adap_rcu_lock
);
2508 list_del_rcu(&adap
->rcu_node
);
2509 spin_unlock(&adap_rcu_lock
);
2512 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
2516 mutex_lock(&uld_mutex
);
2517 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2518 if (adap
->uld_handle
[i
])
2519 ulds
[i
].state_change(adap
->uld_handle
[i
], new_state
);
2520 mutex_unlock(&uld_mutex
);
2524 * cxgb4_register_uld - register an upper-layer driver
2525 * @type: the ULD type
2526 * @p: the ULD methods
2528 * Registers an upper-layer driver with this driver and notifies the ULD
2529 * about any presently available devices that support its type. Returns
2530 * %-EBUSY if a ULD of the same type is already registered.
2532 int cxgb4_register_uld(enum cxgb4_uld type
, const struct cxgb4_uld_info
*p
)
2535 struct adapter
*adap
;
2537 if (type
>= CXGB4_ULD_MAX
)
2539 mutex_lock(&uld_mutex
);
2540 if (ulds
[type
].add
) {
2545 list_for_each_entry(adap
, &adapter_list
, list_node
)
2546 uld_attach(adap
, type
);
2547 out
: mutex_unlock(&uld_mutex
);
2550 EXPORT_SYMBOL(cxgb4_register_uld
);
2553 * cxgb4_unregister_uld - unregister an upper-layer driver
2554 * @type: the ULD type
2556 * Unregisters an existing upper-layer driver.
2558 int cxgb4_unregister_uld(enum cxgb4_uld type
)
2560 struct adapter
*adap
;
2562 if (type
>= CXGB4_ULD_MAX
)
2564 mutex_lock(&uld_mutex
);
2565 list_for_each_entry(adap
, &adapter_list
, list_node
)
2566 adap
->uld_handle
[type
] = NULL
;
2567 ulds
[type
].add
= NULL
;
2568 mutex_unlock(&uld_mutex
);
2571 EXPORT_SYMBOL(cxgb4_unregister_uld
);
2573 #if IS_ENABLED(CONFIG_IPV6)
2574 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
2575 unsigned long event
, void *data
)
2577 struct inet6_ifaddr
*ifa
= data
;
2578 struct net_device
*event_dev
= ifa
->idev
->dev
;
2579 const struct device
*parent
= NULL
;
2580 #if IS_ENABLED(CONFIG_BONDING)
2581 struct adapter
*adap
;
2583 if (event_dev
->priv_flags
& IFF_802_1Q_VLAN
)
2584 event_dev
= vlan_dev_real_dev(event_dev
);
2585 #if IS_ENABLED(CONFIG_BONDING)
2586 if (event_dev
->flags
& IFF_MASTER
) {
2587 list_for_each_entry(adap
, &adapter_list
, list_node
) {
2590 cxgb4_clip_get(adap
->port
[0],
2591 (const u32
*)ifa
, 1);
2594 cxgb4_clip_release(adap
->port
[0],
2595 (const u32
*)ifa
, 1);
2606 parent
= event_dev
->dev
.parent
;
2608 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
) {
2611 cxgb4_clip_get(event_dev
, (const u32
*)ifa
, 1);
2614 cxgb4_clip_release(event_dev
, (const u32
*)ifa
, 1);
2623 static bool inet6addr_registered
;
2624 static struct notifier_block cxgb4_inet6addr_notifier
= {
2625 .notifier_call
= cxgb4_inet6addr_handler
2628 static void update_clip(const struct adapter
*adap
)
2631 struct net_device
*dev
;
2636 for (i
= 0; i
< MAX_NPORTS
; i
++) {
2637 dev
= adap
->port
[i
];
2641 ret
= cxgb4_update_root_dev_clip(dev
);
2648 #endif /* IS_ENABLED(CONFIG_IPV6) */
2651 * cxgb_up - enable the adapter
2652 * @adap: adapter being enabled
2654 * Called when the first port is enabled, this function performs the
2655 * actions necessary to make an adapter operational, such as completing
2656 * the initialization of HW modules, and enabling interrupts.
2658 * Must be called with the rtnl lock held.
2660 static int cxgb_up(struct adapter
*adap
)
2664 err
= setup_sge_queues(adap
);
2667 err
= setup_rss(adap
);
2671 if (adap
->flags
& USING_MSIX
) {
2672 name_msix_vecs(adap
);
2673 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
2674 adap
->msix_info
[0].desc
, adap
);
2678 err
= request_msix_queue_irqs(adap
);
2680 free_irq(adap
->msix_info
[0].vec
, adap
);
2684 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
2685 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
2686 adap
->port
[0]->name
, adap
);
2692 t4_intr_enable(adap
);
2693 adap
->flags
|= FULL_INIT_DONE
;
2694 notify_ulds(adap
, CXGB4_STATE_UP
);
2695 #if IS_ENABLED(CONFIG_IPV6)
2701 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
2703 t4_free_sge_resources(adap
);
2707 static void cxgb_down(struct adapter
*adapter
)
2709 cancel_work_sync(&adapter
->tid_release_task
);
2710 cancel_work_sync(&adapter
->db_full_task
);
2711 cancel_work_sync(&adapter
->db_drop_task
);
2712 adapter
->tid_release_task_busy
= false;
2713 adapter
->tid_release_head
= NULL
;
2715 t4_sge_stop(adapter
);
2716 t4_free_sge_resources(adapter
);
2717 adapter
->flags
&= ~FULL_INIT_DONE
;
2721 * net_device operations
2723 static int cxgb_open(struct net_device
*dev
)
2726 struct port_info
*pi
= netdev_priv(dev
);
2727 struct adapter
*adapter
= pi
->adapter
;
2729 netif_carrier_off(dev
);
2731 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
2732 err
= cxgb_up(adapter
);
2737 err
= link_start(dev
);
2739 netif_tx_start_all_queues(dev
);
2743 static int cxgb_close(struct net_device
*dev
)
2745 struct port_info
*pi
= netdev_priv(dev
);
2746 struct adapter
*adapter
= pi
->adapter
;
2748 netif_tx_stop_all_queues(dev
);
2749 netif_carrier_off(dev
);
2750 return t4_enable_vi(adapter
, adapter
->pf
, pi
->viid
, false, false);
2753 /* Return an error number if the indicated filter isn't writable ...
2755 static int writable_filter(struct filter_entry
*f
)
2765 /* Delete the filter at the specified index (if valid). The checks for all
2766 * the common problems with doing this like the filter being locked, currently
2767 * pending in another operation, etc.
2769 static int delete_filter(struct adapter
*adapter
, unsigned int fidx
)
2771 struct filter_entry
*f
;
2774 if (fidx
>= adapter
->tids
.nftids
+ adapter
->tids
.nsftids
)
2777 f
= &adapter
->tids
.ftid_tab
[fidx
];
2778 ret
= writable_filter(f
);
2782 return del_filter_wr(adapter
, fidx
);
2787 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
2788 __be32 sip
, __be16 sport
, __be16 vlan
,
2789 unsigned int queue
, unsigned char port
, unsigned char mask
)
2792 struct filter_entry
*f
;
2793 struct adapter
*adap
;
2797 adap
= netdev2adap(dev
);
2799 /* Adjust stid to correct filter index */
2800 stid
-= adap
->tids
.sftid_base
;
2801 stid
+= adap
->tids
.nftids
;
2803 /* Check to make sure the filter requested is writable ...
2805 f
= &adap
->tids
.ftid_tab
[stid
];
2806 ret
= writable_filter(f
);
2810 /* Clear out any old resources being used by the filter before
2811 * we start constructing the new filter.
2814 clear_filter(adap
, f
);
2816 /* Clear out filter specifications */
2817 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
2818 f
->fs
.val
.lport
= cpu_to_be16(sport
);
2819 f
->fs
.mask
.lport
= ~0;
2821 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
2822 for (i
= 0; i
< 4; i
++) {
2823 f
->fs
.val
.lip
[i
] = val
[i
];
2824 f
->fs
.mask
.lip
[i
] = ~0;
2826 if (adap
->params
.tp
.vlan_pri_map
& PORT_F
) {
2827 f
->fs
.val
.iport
= port
;
2828 f
->fs
.mask
.iport
= mask
;
2832 if (adap
->params
.tp
.vlan_pri_map
& PROTOCOL_F
) {
2833 f
->fs
.val
.proto
= IPPROTO_TCP
;
2834 f
->fs
.mask
.proto
= ~0;
2839 /* Mark filter as locked */
2843 ret
= set_filter_wr(adap
, stid
);
2845 clear_filter(adap
, f
);
2851 EXPORT_SYMBOL(cxgb4_create_server_filter
);
2853 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
2854 unsigned int queue
, bool ipv6
)
2857 struct filter_entry
*f
;
2858 struct adapter
*adap
;
2860 adap
= netdev2adap(dev
);
2862 /* Adjust stid to correct filter index */
2863 stid
-= adap
->tids
.sftid_base
;
2864 stid
+= adap
->tids
.nftids
;
2866 f
= &adap
->tids
.ftid_tab
[stid
];
2867 /* Unlock the filter */
2870 ret
= delete_filter(adap
, stid
);
2876 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
2878 static struct rtnl_link_stats64
*cxgb_get_stats(struct net_device
*dev
,
2879 struct rtnl_link_stats64
*ns
)
2881 struct port_stats stats
;
2882 struct port_info
*p
= netdev_priv(dev
);
2883 struct adapter
*adapter
= p
->adapter
;
2885 /* Block retrieving statistics during EEH error
2886 * recovery. Otherwise, the recovery might fail
2887 * and the PCI device will be removed permanently
2889 spin_lock(&adapter
->stats_lock
);
2890 if (!netif_device_present(dev
)) {
2891 spin_unlock(&adapter
->stats_lock
);
2894 t4_get_port_stats_offset(adapter
, p
->tx_chan
, &stats
,
2896 spin_unlock(&adapter
->stats_lock
);
2898 ns
->tx_bytes
= stats
.tx_octets
;
2899 ns
->tx_packets
= stats
.tx_frames
;
2900 ns
->rx_bytes
= stats
.rx_octets
;
2901 ns
->rx_packets
= stats
.rx_frames
;
2902 ns
->multicast
= stats
.rx_mcast_frames
;
2904 /* detailed rx_errors */
2905 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
2907 ns
->rx_over_errors
= 0;
2908 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
2909 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
2910 ns
->rx_fifo_errors
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
2911 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
2912 stats
.rx_trunc0
+ stats
.rx_trunc1
+
2913 stats
.rx_trunc2
+ stats
.rx_trunc3
;
2914 ns
->rx_missed_errors
= 0;
2916 /* detailed tx_errors */
2917 ns
->tx_aborted_errors
= 0;
2918 ns
->tx_carrier_errors
= 0;
2919 ns
->tx_fifo_errors
= 0;
2920 ns
->tx_heartbeat_errors
= 0;
2921 ns
->tx_window_errors
= 0;
2923 ns
->tx_errors
= stats
.tx_error_frames
;
2924 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
2925 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
2929 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
2932 int ret
= 0, prtad
, devad
;
2933 struct port_info
*pi
= netdev_priv(dev
);
2934 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
2938 if (pi
->mdio_addr
< 0)
2940 data
->phy_id
= pi
->mdio_addr
;
2944 if (mdio_phy_id_is_c45(data
->phy_id
)) {
2945 prtad
= mdio_phy_id_prtad(data
->phy_id
);
2946 devad
= mdio_phy_id_devad(data
->phy_id
);
2947 } else if (data
->phy_id
< 32) {
2948 prtad
= data
->phy_id
;
2950 data
->reg_num
&= 0x1f;
2954 mbox
= pi
->adapter
->pf
;
2955 if (cmd
== SIOCGMIIREG
)
2956 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
2957 data
->reg_num
, &data
->val_out
);
2959 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
2960 data
->reg_num
, data
->val_in
);
2968 static void cxgb_set_rxmode(struct net_device
*dev
)
2970 /* unfortunately we can't return errors to the stack */
2971 set_rxmode(dev
, -1, false);
2974 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
2977 struct port_info
*pi
= netdev_priv(dev
);
2979 if (new_mtu
< 81 || new_mtu
> MAX_MTU
) /* accommodate SACK */
2981 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, new_mtu
, -1,
2988 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
2991 struct sockaddr
*addr
= p
;
2992 struct port_info
*pi
= netdev_priv(dev
);
2994 if (!is_valid_ether_addr(addr
->sa_data
))
2995 return -EADDRNOTAVAIL
;
2997 ret
= t4_change_mac(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
,
2998 pi
->xact_addr_filt
, addr
->sa_data
, true, true);
3002 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3003 pi
->xact_addr_filt
= ret
;
3007 #ifdef CONFIG_NET_POLL_CONTROLLER
3008 static void cxgb_netpoll(struct net_device
*dev
)
3010 struct port_info
*pi
= netdev_priv(dev
);
3011 struct adapter
*adap
= pi
->adapter
;
3013 if (adap
->flags
& USING_MSIX
) {
3015 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
3017 for (i
= pi
->nqsets
; i
; i
--, rx
++)
3018 t4_sge_intr_msix(0, &rx
->rspq
);
3020 t4_intr_handler(adap
)(0, adap
);
3024 static const struct net_device_ops cxgb4_netdev_ops
= {
3025 .ndo_open
= cxgb_open
,
3026 .ndo_stop
= cxgb_close
,
3027 .ndo_start_xmit
= t4_eth_xmit
,
3028 .ndo_select_queue
= cxgb_select_queue
,
3029 .ndo_get_stats64
= cxgb_get_stats
,
3030 .ndo_set_rx_mode
= cxgb_set_rxmode
,
3031 .ndo_set_mac_address
= cxgb_set_mac_addr
,
3032 .ndo_set_features
= cxgb_set_features
,
3033 .ndo_validate_addr
= eth_validate_addr
,
3034 .ndo_do_ioctl
= cxgb_ioctl
,
3035 .ndo_change_mtu
= cxgb_change_mtu
,
3036 #ifdef CONFIG_NET_POLL_CONTROLLER
3037 .ndo_poll_controller
= cxgb_netpoll
,
3039 #ifdef CONFIG_CHELSIO_T4_FCOE
3040 .ndo_fcoe_enable
= cxgb_fcoe_enable
,
3041 .ndo_fcoe_disable
= cxgb_fcoe_disable
,
3042 #endif /* CONFIG_CHELSIO_T4_FCOE */
3043 #ifdef CONFIG_NET_RX_BUSY_POLL
3044 .ndo_busy_poll
= cxgb_busy_poll
,
3049 void t4_fatal_err(struct adapter
*adap
)
3051 t4_set_reg_field(adap
, SGE_CONTROL_A
, GLOBALENABLE_F
, 0);
3052 t4_intr_disable(adap
);
3053 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
3056 static void setup_memwin(struct adapter
*adap
)
3058 u32 nic_win_base
= t4_get_util_window(adap
);
3060 t4_setup_memwin(adap
, nic_win_base
, MEMWIN_NIC
);
3063 static void setup_memwin_rdma(struct adapter
*adap
)
3065 if (adap
->vres
.ocq
.size
) {
3069 start
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_2
);
3070 start
&= PCI_BASE_ADDRESS_MEM_MASK
;
3071 start
+= OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
3072 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
3074 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, 3),
3075 start
| BIR_V(1) | WINDOW_V(ilog2(sz_kb
)));
3077 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3),
3078 adap
->vres
.ocq
.start
);
3080 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3));
3084 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
3089 /* get device capabilities */
3090 memset(c
, 0, sizeof(*c
));
3091 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3092 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3093 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
3094 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), c
);
3098 /* select capabilities we'll be using */
3099 if (c
->niccaps
& htons(FW_CAPS_CONFIG_NIC_VM
)) {
3101 c
->niccaps
^= htons(FW_CAPS_CONFIG_NIC_VM
);
3103 c
->niccaps
= htons(FW_CAPS_CONFIG_NIC_VM
);
3104 } else if (vf_acls
) {
3105 dev_err(adap
->pdev_dev
, "virtualization ACLs not supported");
3108 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3109 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3110 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), NULL
);
3114 ret
= t4_config_glbl_rss(adap
, adap
->pf
,
3115 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
3116 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F
|
3117 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F
);
3121 ret
= t4_cfg_pfvf(adap
, adap
->mbox
, adap
->pf
, 0, adap
->sge
.egr_sz
, 64,
3122 MAX_INGQ
, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
,
3129 /* tweak some settings */
3130 t4_write_reg(adap
, TP_SHIFT_CNT_A
, 0x64f8849);
3131 t4_write_reg(adap
, ULP_RX_TDDP_PSZ_A
, HPZ0_V(PAGE_SHIFT
- 12));
3132 t4_write_reg(adap
, TP_PIO_ADDR_A
, TP_INGRESS_CONFIG_A
);
3133 v
= t4_read_reg(adap
, TP_PIO_DATA_A
);
3134 t4_write_reg(adap
, TP_PIO_DATA_A
, v
& ~CSUM_HAS_PSEUDO_HDR_F
);
3136 /* first 4 Tx modulation queues point to consecutive Tx channels */
3137 adap
->params
.tp
.tx_modq_map
= 0xE4;
3138 t4_write_reg(adap
, TP_TX_MOD_QUEUE_REQ_MAP_A
,
3139 TX_MOD_QUEUE_REQ_MAP_V(adap
->params
.tp
.tx_modq_map
));
3141 /* associate each Tx modulation queue with consecutive Tx channels */
3143 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3144 &v
, 1, TP_TX_SCHED_HDR_A
);
3145 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3146 &v
, 1, TP_TX_SCHED_FIFO_A
);
3147 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3148 &v
, 1, TP_TX_SCHED_PCMD_A
);
3150 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3151 if (is_offload(adap
)) {
3152 t4_write_reg(adap
, TP_TX_MOD_QUEUE_WEIGHT0_A
,
3153 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3154 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3155 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3156 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3157 t4_write_reg(adap
, TP_TX_MOD_CHANNEL_WEIGHT_A
,
3158 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3159 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3160 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3161 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3164 /* get basic stuff going */
3165 return t4_early_init(adap
, adap
->pf
);
3169 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3171 #define MAX_ATIDS 8192U
3174 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3176 * If the firmware we're dealing with has Configuration File support, then
3177 * we use that to perform all configuration
3181 * Tweak configuration based on module parameters, etc. Most of these have
3182 * defaults assigned to them by Firmware Configuration Files (if we're using
3183 * them) but need to be explicitly set if we're using hard-coded
3184 * initialization. But even in the case of using Firmware Configuration
3185 * Files, we'd like to expose the ability to change these via module
3186 * parameters so these are essentially common tweaks/settings for
3187 * Configuration Files and hard-coded initialization ...
3189 static int adap_init0_tweaks(struct adapter
*adapter
)
3192 * Fix up various Host-Dependent Parameters like Page Size, Cache
3193 * Line Size, etc. The firmware default is for a 4KB Page Size and
3194 * 64B Cache Line Size ...
3196 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
3199 * Process module parameters which affect early initialization.
3201 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
3202 dev_err(&adapter
->pdev
->dev
,
3203 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3207 t4_set_reg_field(adapter
, SGE_CONTROL_A
,
3208 PKTSHIFT_V(PKTSHIFT_M
),
3209 PKTSHIFT_V(rx_dma_offset
));
3212 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3213 * adds the pseudo header itself.
3215 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG_A
,
3216 CSUM_HAS_PSEUDO_HDR_F
, 0);
3221 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3222 * unto themselves and they contain their own firmware to perform their
3225 static int phy_aq1202_version(const u8
*phy_fw_data
,
3230 /* At offset 0x8 you're looking for the primary image's
3231 * starting offset which is 3 Bytes wide
3233 * At offset 0xa of the primary image, you look for the offset
3234 * of the DRAM segment which is 3 Bytes wide.
3236 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3239 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3240 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3241 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3243 offset
= le24(phy_fw_data
+ 0x8) << 12;
3244 offset
= le24(phy_fw_data
+ offset
+ 0xa);
3245 return be16(phy_fw_data
+ offset
+ 0x27e);
3252 static struct info_10gbt_phy_fw
{
3253 unsigned int phy_fw_id
; /* PCI Device ID */
3254 char *phy_fw_file
; /* /lib/firmware/ PHY Firmware file */
3255 int (*phy_fw_version
)(const u8
*phy_fw_data
, size_t phy_fw_size
);
3256 int phy_flash
; /* Has FLASH for PHY Firmware */
3257 } phy_info_array
[] = {
3259 PHY_AQ1202_DEVICEID
,
3260 PHY_AQ1202_FIRMWARE
,
3265 PHY_BCM84834_DEVICEID
,
3266 PHY_BCM84834_FIRMWARE
,
3273 static struct info_10gbt_phy_fw
*find_phy_info(int devid
)
3277 for (i
= 0; i
< ARRAY_SIZE(phy_info_array
); i
++) {
3278 if (phy_info_array
[i
].phy_fw_id
== devid
)
3279 return &phy_info_array
[i
];
3284 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3285 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3286 * we return a negative error number. If we transfer new firmware we return 1
3287 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3289 static int adap_init0_phy(struct adapter
*adap
)
3291 const struct firmware
*phyf
;
3293 struct info_10gbt_phy_fw
*phy_info
;
3295 /* Use the device ID to determine which PHY file to flash.
3297 phy_info
= find_phy_info(adap
->pdev
->device
);
3299 dev_warn(adap
->pdev_dev
,
3300 "No PHY Firmware file found for this PHY\n");
3304 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3305 * use that. The adapter firmware provides us with a memory buffer
3306 * where we can load a PHY firmware file from the host if we want to
3307 * override the PHY firmware File in flash.
3309 ret
= request_firmware_direct(&phyf
, phy_info
->phy_fw_file
,
3312 /* For adapters without FLASH attached to PHY for their
3313 * firmware, it's obviously a fatal error if we can't get the
3314 * firmware to the adapter. For adapters with PHY firmware
3315 * FLASH storage, it's worth a warning if we can't find the
3316 * PHY Firmware but we'll neuter the error ...
3318 dev_err(adap
->pdev_dev
, "unable to find PHY Firmware image "
3319 "/lib/firmware/%s, error %d\n",
3320 phy_info
->phy_fw_file
, -ret
);
3321 if (phy_info
->phy_flash
) {
3322 int cur_phy_fw_ver
= 0;
3324 t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
3325 dev_warn(adap
->pdev_dev
, "continuing with, on-adapter "
3326 "FLASH copy, version %#x\n", cur_phy_fw_ver
);
3333 /* Load PHY Firmware onto adapter.
3335 ret
= t4_load_phy_fw(adap
, MEMWIN_NIC
, &adap
->win0_lock
,
3336 phy_info
->phy_fw_version
,
3337 (u8
*)phyf
->data
, phyf
->size
);
3339 dev_err(adap
->pdev_dev
, "PHY Firmware transfer error %d\n",
3342 int new_phy_fw_ver
= 0;
3344 if (phy_info
->phy_fw_version
)
3345 new_phy_fw_ver
= phy_info
->phy_fw_version(phyf
->data
,
3347 dev_info(adap
->pdev_dev
, "Successfully transferred PHY "
3348 "Firmware /lib/firmware/%s, version %#x\n",
3349 phy_info
->phy_fw_file
, new_phy_fw_ver
);
3352 release_firmware(phyf
);
3358 * Attempt to initialize the adapter via a Firmware Configuration File.
3360 static int adap_init0_config(struct adapter
*adapter
, int reset
)
3362 struct fw_caps_config_cmd caps_cmd
;
3363 const struct firmware
*cf
;
3364 unsigned long mtype
= 0, maddr
= 0;
3365 u32 finiver
, finicsum
, cfcsum
;
3367 int config_issued
= 0;
3368 char *fw_config_file
, fw_config_file_path
[256];
3369 char *config_name
= NULL
;
3372 * Reset device if necessary.
3375 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
3376 PIORSTMODE_F
| PIORST_F
);
3381 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3382 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3383 * to be performed after any global adapter RESET above since some
3384 * PHYs only have local RAM copies of the PHY firmware.
3386 if (is_10gbt_device(adapter
->pdev
->device
)) {
3387 ret
= adap_init0_phy(adapter
);
3392 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3393 * then use that. Otherwise, use the configuration file stored
3394 * in the adapter flash ...
3396 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
3398 fw_config_file
= FW4_CFNAME
;
3401 fw_config_file
= FW5_CFNAME
;
3404 fw_config_file
= FW6_CFNAME
;
3407 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
3408 adapter
->pdev
->device
);
3413 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
3415 config_name
= "On FLASH";
3416 mtype
= FW_MEMTYPE_CF_FLASH
;
3417 maddr
= t4_flash_cfg_addr(adapter
);
3419 u32 params
[7], val
[7];
3421 sprintf(fw_config_file_path
,
3422 "/lib/firmware/%s", fw_config_file
);
3423 config_name
= fw_config_file_path
;
3425 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
3428 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3429 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3430 ret
= t4_query_params(adapter
, adapter
->mbox
,
3431 adapter
->pf
, 0, 1, params
, val
);
3434 * For t4_memory_rw() below addresses and
3435 * sizes have to be in terms of multiples of 4
3436 * bytes. So, if the Configuration File isn't
3437 * a multiple of 4 bytes in length we'll have
3438 * to write that out separately since we can't
3439 * guarantee that the bytes following the
3440 * residual byte in the buffer returned by
3441 * request_firmware() are zeroed out ...
3443 size_t resid
= cf
->size
& 0x3;
3444 size_t size
= cf
->size
& ~0x3;
3445 __be32
*data
= (__be32
*)cf
->data
;
3447 mtype
= FW_PARAMS_PARAM_Y_G(val
[0]);
3448 maddr
= FW_PARAMS_PARAM_Z_G(val
[0]) << 16;
3450 spin_lock(&adapter
->win0_lock
);
3451 ret
= t4_memory_rw(adapter
, 0, mtype
, maddr
,
3452 size
, data
, T4_MEMORY_WRITE
);
3453 if (ret
== 0 && resid
!= 0) {
3460 last
.word
= data
[size
>> 2];
3461 for (i
= resid
; i
< 4; i
++)
3463 ret
= t4_memory_rw(adapter
, 0, mtype
,
3468 spin_unlock(&adapter
->win0_lock
);
3472 release_firmware(cf
);
3478 * Issue a Capability Configuration command to the firmware to get it
3479 * to parse the Configuration File. We don't use t4_fw_config_file()
3480 * because we want the ability to modify various features after we've
3481 * processed the configuration file ...
3483 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3484 caps_cmd
.op_to_write
=
3485 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3488 caps_cmd
.cfvalid_to_len16
=
3489 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
3490 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
3491 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
3492 FW_LEN16(caps_cmd
));
3493 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3496 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3497 * Configuration File in FLASH), our last gasp effort is to use the
3498 * Firmware Configuration File which is embedded in the firmware. A
3499 * very few early versions of the firmware didn't have one embedded
3500 * but we can ignore those.
3502 if (ret
== -ENOENT
) {
3503 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3504 caps_cmd
.op_to_write
=
3505 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3508 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3509 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
3510 sizeof(caps_cmd
), &caps_cmd
);
3511 config_name
= "Firmware Default";
3518 finiver
= ntohl(caps_cmd
.finiver
);
3519 finicsum
= ntohl(caps_cmd
.finicsum
);
3520 cfcsum
= ntohl(caps_cmd
.cfcsum
);
3521 if (finicsum
!= cfcsum
)
3522 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
3523 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3527 * And now tell the firmware to use the configuration we just loaded.
3529 caps_cmd
.op_to_write
=
3530 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3533 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3534 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3540 * Tweak configuration based on system architecture, module
3543 ret
= adap_init0_tweaks(adapter
);
3548 * And finally tell the firmware to initialize itself using the
3549 * parameters from the Configuration File.
3551 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
3555 /* Emit Firmware Configuration File information and return
3558 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
3559 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3560 config_name
, finiver
, cfcsum
);
3564 * Something bad happened. Return the error ... (If the "error"
3565 * is that there's no Configuration File on the adapter we don't
3566 * want to issue a warning since this is fairly common.)
3569 if (config_issued
&& ret
!= -ENOENT
)
3570 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
3575 static struct fw_info fw_info_array
[] = {
3578 .fs_name
= FW4_CFNAME
,
3579 .fw_mod_name
= FW4_FNAME
,
3581 .chip
= FW_HDR_CHIP_T4
,
3582 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
3583 .intfver_nic
= FW_INTFVER(T4
, NIC
),
3584 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
3585 .intfver_ri
= FW_INTFVER(T4
, RI
),
3586 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
3587 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
3591 .fs_name
= FW5_CFNAME
,
3592 .fw_mod_name
= FW5_FNAME
,
3594 .chip
= FW_HDR_CHIP_T5
,
3595 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
3596 .intfver_nic
= FW_INTFVER(T5
, NIC
),
3597 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
3598 .intfver_ri
= FW_INTFVER(T5
, RI
),
3599 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
3600 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
3604 .fs_name
= FW6_CFNAME
,
3605 .fw_mod_name
= FW6_FNAME
,
3607 .chip
= FW_HDR_CHIP_T6
,
3608 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
3609 .intfver_nic
= FW_INTFVER(T6
, NIC
),
3610 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
3611 .intfver_ofld
= FW_INTFVER(T6
, OFLD
),
3612 .intfver_ri
= FW_INTFVER(T6
, RI
),
3613 .intfver_iscsipdu
= FW_INTFVER(T6
, ISCSIPDU
),
3614 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
3615 .intfver_fcoepdu
= FW_INTFVER(T6
, FCOEPDU
),
3616 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
3622 static struct fw_info
*find_fw_info(int chip
)
3626 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
3627 if (fw_info_array
[i
].chip
== chip
)
3628 return &fw_info_array
[i
];
3634 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3636 static int adap_init0(struct adapter
*adap
)
3640 enum dev_state state
;
3641 u32 params
[7], val
[7];
3642 struct fw_caps_config_cmd caps_cmd
;
3645 /* Grab Firmware Device Log parameters as early as possible so we have
3646 * access to it for debugging, etc.
3648 ret
= t4_init_devlog_params(adap
);
3652 /* Contact FW, advertising Master capability */
3653 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->mbox
, MASTER_MAY
, &state
);
3655 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
3659 if (ret
== adap
->mbox
)
3660 adap
->flags
|= MASTER_PF
;
3663 * If we're the Master PF Driver and the device is uninitialized,
3664 * then let's consider upgrading the firmware ... (We always want
3665 * to check the firmware version number in order to A. get it for
3666 * later reporting and B. to warn if the currently loaded firmware
3667 * is excessively mismatched relative to the driver.)
3669 t4_get_fw_version(adap
, &adap
->params
.fw_vers
);
3670 t4_get_tp_version(adap
, &adap
->params
.tp_vers
);
3671 if ((adap
->flags
& MASTER_PF
) && state
!= DEV_STATE_INIT
) {
3672 struct fw_info
*fw_info
;
3673 struct fw_hdr
*card_fw
;
3674 const struct firmware
*fw
;
3675 const u8
*fw_data
= NULL
;
3676 unsigned int fw_size
= 0;
3678 /* This is the firmware whose headers the driver was compiled
3681 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3682 if (fw_info
== NULL
) {
3683 dev_err(adap
->pdev_dev
,
3684 "unable to get firmware info for chip %d.\n",
3685 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3689 /* allocate memory to read the header of the firmware on the
3692 card_fw
= t4_alloc_mem(sizeof(*card_fw
));
3694 /* Get FW from from /lib/firmware/ */
3695 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
3698 dev_err(adap
->pdev_dev
,
3699 "unable to load firmware image %s, error %d\n",
3700 fw_info
->fw_mod_name
, ret
);
3706 /* upgrade FW logic */
3707 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
3711 release_firmware(fw
);
3712 t4_free_mem(card_fw
);
3719 * Grab VPD parameters. This should be done after we establish a
3720 * connection to the firmware since some of the VPD parameters
3721 * (notably the Core Clock frequency) are retrieved via requests to
3722 * the firmware. On the other hand, we need these fairly early on
3723 * so we do this right after getting ahold of the firmware.
3725 ret
= t4_get_vpd_params(adap
, &adap
->params
.vpd
);
3730 * Find out what ports are available to us. Note that we need to do
3731 * this before calling adap_init0_no_config() since it needs nports
3735 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3736 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC
);
3737 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, &v
, &port_vec
);
3741 adap
->params
.nports
= hweight32(port_vec
);
3742 adap
->params
.portvec
= port_vec
;
3744 /* If the firmware is initialized already, emit a simply note to that
3745 * effect. Otherwise, it's time to try initializing the adapter.
3747 if (state
== DEV_STATE_INIT
) {
3748 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
3749 "Adapter already initialized\n",
3750 adap
->flags
& MASTER_PF
? "MASTER" : "SLAVE");
3752 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
3753 "Initializing adapter\n");
3755 /* Find out whether we're dealing with a version of the
3756 * firmware which has configuration file support.
3758 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3759 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3760 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
3763 /* If the firmware doesn't support Configuration Files,
3767 dev_err(adap
->pdev_dev
, "firmware doesn't support "
3768 "Firmware Configuration Files\n");
3772 /* The firmware provides us with a memory buffer where we can
3773 * load a Configuration File from the host if we want to
3774 * override the Configuration File in flash.
3776 ret
= adap_init0_config(adap
, reset
);
3777 if (ret
== -ENOENT
) {
3778 dev_err(adap
->pdev_dev
, "no Configuration File "
3779 "present on adapter.\n");
3783 dev_err(adap
->pdev_dev
, "could not initialize "
3784 "adapter, error %d\n", -ret
);
3789 /* Give the SGE code a chance to pull in anything that it needs ...
3790 * Note that this must be called after we retrieve our VPD parameters
3791 * in order to know how to convert core ticks to seconds, etc.
3793 ret
= t4_sge_init(adap
);
3797 if (is_bypass_device(adap
->pdev
->device
))
3798 adap
->params
.bypass
= 1;
3801 * Grab some of our basic fundamental operating parameters.
3803 #define FW_PARAM_DEV(param) \
3804 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3805 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3807 #define FW_PARAM_PFVF(param) \
3808 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3809 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3810 FW_PARAMS_PARAM_Y_V(0) | \
3811 FW_PARAMS_PARAM_Z_V(0)
3813 params
[0] = FW_PARAM_PFVF(EQ_START
);
3814 params
[1] = FW_PARAM_PFVF(L2T_START
);
3815 params
[2] = FW_PARAM_PFVF(L2T_END
);
3816 params
[3] = FW_PARAM_PFVF(FILTER_START
);
3817 params
[4] = FW_PARAM_PFVF(FILTER_END
);
3818 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
3819 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
, val
);
3822 adap
->sge
.egr_start
= val
[0];
3823 adap
->l2t_start
= val
[1];
3824 adap
->l2t_end
= val
[2];
3825 adap
->tids
.ftid_base
= val
[3];
3826 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
3827 adap
->sge
.ingr_start
= val
[5];
3829 /* qids (ingress/egress) returned from firmware can be anywhere
3830 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3831 * Hence driver needs to allocate memory for this range to
3832 * store the queue info. Get the highest IQFLINT/EQ index returned
3833 * in FW_EQ_*_CMD.alloc command.
3835 params
[0] = FW_PARAM_PFVF(EQ_END
);
3836 params
[1] = FW_PARAM_PFVF(IQFLINT_END
);
3837 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3840 adap
->sge
.egr_sz
= val
[0] - adap
->sge
.egr_start
+ 1;
3841 adap
->sge
.ingr_sz
= val
[1] - adap
->sge
.ingr_start
+ 1;
3843 adap
->sge
.egr_map
= kcalloc(adap
->sge
.egr_sz
,
3844 sizeof(*adap
->sge
.egr_map
), GFP_KERNEL
);
3845 if (!adap
->sge
.egr_map
) {
3850 adap
->sge
.ingr_map
= kcalloc(adap
->sge
.ingr_sz
,
3851 sizeof(*adap
->sge
.ingr_map
), GFP_KERNEL
);
3852 if (!adap
->sge
.ingr_map
) {
3857 /* Allocate the memory for the vaious egress queue bitmaps
3858 * ie starving_fl, txq_maperr and blocked_fl.
3860 adap
->sge
.starving_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3861 sizeof(long), GFP_KERNEL
);
3862 if (!adap
->sge
.starving_fl
) {
3867 adap
->sge
.txq_maperr
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3868 sizeof(long), GFP_KERNEL
);
3869 if (!adap
->sge
.txq_maperr
) {
3874 #ifdef CONFIG_DEBUG_FS
3875 adap
->sge
.blocked_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3876 sizeof(long), GFP_KERNEL
);
3877 if (!adap
->sge
.blocked_fl
) {
3883 params
[0] = FW_PARAM_PFVF(CLIP_START
);
3884 params
[1] = FW_PARAM_PFVF(CLIP_END
);
3885 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3888 adap
->clipt_start
= val
[0];
3889 adap
->clipt_end
= val
[1];
3891 /* query params related to active filter region */
3892 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
3893 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
3894 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3895 /* If Active filter size is set we enable establishing
3896 * offload connection through firmware work request
3898 if ((val
[0] != val
[1]) && (ret
>= 0)) {
3899 adap
->flags
|= FW_OFLD_CONN
;
3900 adap
->tids
.aftid_base
= val
[0];
3901 adap
->tids
.aftid_end
= val
[1];
3904 /* If we're running on newer firmware, let it know that we're
3905 * prepared to deal with encapsulated CPL messages. Older
3906 * firmware won't understand this and we'll just get
3907 * unencapsulated messages ...
3909 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
3911 (void)t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
, val
);
3914 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
3915 * capability. Earlier versions of the firmware didn't have the
3916 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
3917 * permission to use ULPTX MEMWRITE DSGL.
3919 if (is_t4(adap
->params
.chip
)) {
3920 adap
->params
.ulptx_memwrite_dsgl
= false;
3922 params
[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL
);
3923 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
3925 adap
->params
.ulptx_memwrite_dsgl
= (ret
== 0 && val
[0] != 0);
3929 * Get device capabilities so we can determine what resources we need
3932 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3933 caps_cmd
.op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3934 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3935 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3936 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3941 if (caps_cmd
.ofldcaps
) {
3942 /* query offload-related parameters */
3943 params
[0] = FW_PARAM_DEV(NTID
);
3944 params
[1] = FW_PARAM_PFVF(SERVER_START
);
3945 params
[2] = FW_PARAM_PFVF(SERVER_END
);
3946 params
[3] = FW_PARAM_PFVF(TDDP_START
);
3947 params
[4] = FW_PARAM_PFVF(TDDP_END
);
3948 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
3949 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
3953 adap
->tids
.ntids
= val
[0];
3954 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
3955 adap
->tids
.stid_base
= val
[1];
3956 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
3958 * Setup server filter region. Divide the available filter
3959 * region into two parts. Regular filters get 1/3rd and server
3960 * filters get 2/3rd part. This is only enabled if workarond
3962 * 1. For regular filters.
3963 * 2. Server filter: This are special filters which are used
3964 * to redirect SYN packets to offload queue.
3966 if (adap
->flags
& FW_OFLD_CONN
&& !is_bypass(adap
)) {
3967 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
3968 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
3969 adap
->tids
.nsftids
= adap
->tids
.nftids
-
3970 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
3971 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
3972 adap
->tids
.ftid_base
;
3974 adap
->vres
.ddp
.start
= val
[3];
3975 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
3976 adap
->params
.ofldq_wr_cred
= val
[5];
3978 adap
->params
.offload
= 1;
3980 if (caps_cmd
.rdmacaps
) {
3981 params
[0] = FW_PARAM_PFVF(STAG_START
);
3982 params
[1] = FW_PARAM_PFVF(STAG_END
);
3983 params
[2] = FW_PARAM_PFVF(RQ_START
);
3984 params
[3] = FW_PARAM_PFVF(RQ_END
);
3985 params
[4] = FW_PARAM_PFVF(PBL_START
);
3986 params
[5] = FW_PARAM_PFVF(PBL_END
);
3987 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
3991 adap
->vres
.stag
.start
= val
[0];
3992 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
3993 adap
->vres
.rq
.start
= val
[2];
3994 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
3995 adap
->vres
.pbl
.start
= val
[4];
3996 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
3998 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
3999 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
4000 params
[2] = FW_PARAM_PFVF(CQ_START
);
4001 params
[3] = FW_PARAM_PFVF(CQ_END
);
4002 params
[4] = FW_PARAM_PFVF(OCQ_START
);
4003 params
[5] = FW_PARAM_PFVF(OCQ_END
);
4004 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
,
4008 adap
->vres
.qp
.start
= val
[0];
4009 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
4010 adap
->vres
.cq
.start
= val
[2];
4011 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
4012 adap
->vres
.ocq
.start
= val
[4];
4013 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
4015 params
[0] = FW_PARAM_DEV(MAXORDIRD_QP
);
4016 params
[1] = FW_PARAM_DEV(MAXIRD_ADAPTER
);
4017 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
,
4020 adap
->params
.max_ordird_qp
= 8;
4021 adap
->params
.max_ird_adapter
= 32 * adap
->tids
.ntids
;
4024 adap
->params
.max_ordird_qp
= val
[0];
4025 adap
->params
.max_ird_adapter
= val
[1];
4027 dev_info(adap
->pdev_dev
,
4028 "max_ordird_qp %d max_ird_adapter %d\n",
4029 adap
->params
.max_ordird_qp
,
4030 adap
->params
.max_ird_adapter
);
4032 if (caps_cmd
.iscsicaps
) {
4033 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
4034 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
4035 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4039 adap
->vres
.iscsi
.start
= val
[0];
4040 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
4042 #undef FW_PARAM_PFVF
4045 /* The MTU/MSS Table is initialized by now, so load their values. If
4046 * we're initializing the adapter, then we'll make any modifications
4047 * we want to the MTU/MSS Table and also initialize the congestion
4050 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
4051 if (state
!= DEV_STATE_INIT
) {
4054 /* The default MTU Table contains values 1492 and 1500.
4055 * However, for TCP, it's better to have two values which are
4056 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4057 * This allows us to have a TCP Data Payload which is a
4058 * multiple of 8 regardless of what combination of TCP Options
4059 * are in use (always a multiple of 4 bytes) which is
4060 * important for performance reasons. For instance, if no
4061 * options are in use, then we have a 20-byte IP header and a
4062 * 20-byte TCP header. In this case, a 1500-byte MSS would
4063 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4064 * which is not a multiple of 8. So using an MSS of 1488 in
4065 * this case results in a TCP Data Payload of 1448 bytes which
4066 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4067 * Stamps have been negotiated, then an MTU of 1500 bytes
4068 * results in a TCP Data Payload of 1448 bytes which, as
4069 * above, is a multiple of 8 bytes ...
4071 for (i
= 0; i
< NMTUS
; i
++)
4072 if (adap
->params
.mtus
[i
] == 1492) {
4073 adap
->params
.mtus
[i
] = 1488;
4077 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4078 adap
->params
.b_wnd
);
4080 t4_init_sge_params(adap
);
4081 adap
->flags
|= FW_OK
;
4082 t4_init_tp_params(adap
);
4086 * Something bad happened. If a command timed out or failed with EIO
4087 * FW does not operate within its spec or something catastrophic
4088 * happened to HW/FW, stop issuing commands.
4091 kfree(adap
->sge
.egr_map
);
4092 kfree(adap
->sge
.ingr_map
);
4093 kfree(adap
->sge
.starving_fl
);
4094 kfree(adap
->sge
.txq_maperr
);
4095 #ifdef CONFIG_DEBUG_FS
4096 kfree(adap
->sge
.blocked_fl
);
4098 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
4099 t4_fw_bye(adap
, adap
->mbox
);
4105 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
4106 pci_channel_state_t state
)
4109 struct adapter
*adap
= pci_get_drvdata(pdev
);
4115 adap
->flags
&= ~FW_OK
;
4116 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
4117 spin_lock(&adap
->stats_lock
);
4118 for_each_port(adap
, i
) {
4119 struct net_device
*dev
= adap
->port
[i
];
4121 netif_device_detach(dev
);
4122 netif_carrier_off(dev
);
4124 spin_unlock(&adap
->stats_lock
);
4125 disable_interrupts(adap
);
4126 if (adap
->flags
& FULL_INIT_DONE
)
4129 if ((adap
->flags
& DEV_ENABLED
)) {
4130 pci_disable_device(pdev
);
4131 adap
->flags
&= ~DEV_ENABLED
;
4133 out
: return state
== pci_channel_io_perm_failure
?
4134 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
4137 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
4140 struct fw_caps_config_cmd c
;
4141 struct adapter
*adap
= pci_get_drvdata(pdev
);
4144 pci_restore_state(pdev
);
4145 pci_save_state(pdev
);
4146 return PCI_ERS_RESULT_RECOVERED
;
4149 if (!(adap
->flags
& DEV_ENABLED
)) {
4150 if (pci_enable_device(pdev
)) {
4151 dev_err(&pdev
->dev
, "Cannot reenable PCI "
4152 "device after reset\n");
4153 return PCI_ERS_RESULT_DISCONNECT
;
4155 adap
->flags
|= DEV_ENABLED
;
4158 pci_set_master(pdev
);
4159 pci_restore_state(pdev
);
4160 pci_save_state(pdev
);
4161 pci_cleanup_aer_uncorrect_error_status(pdev
);
4163 if (t4_wait_dev_ready(adap
->regs
) < 0)
4164 return PCI_ERS_RESULT_DISCONNECT
;
4165 if (t4_fw_hello(adap
, adap
->mbox
, adap
->pf
, MASTER_MUST
, NULL
) < 0)
4166 return PCI_ERS_RESULT_DISCONNECT
;
4167 adap
->flags
|= FW_OK
;
4168 if (adap_init1(adap
, &c
))
4169 return PCI_ERS_RESULT_DISCONNECT
;
4171 for_each_port(adap
, i
) {
4172 struct port_info
*p
= adap2pinfo(adap
, i
);
4174 ret
= t4_alloc_vi(adap
, adap
->mbox
, p
->tx_chan
, adap
->pf
, 0, 1,
4177 return PCI_ERS_RESULT_DISCONNECT
;
4179 p
->xact_addr_filt
= -1;
4182 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4183 adap
->params
.b_wnd
);
4186 return PCI_ERS_RESULT_DISCONNECT
;
4187 return PCI_ERS_RESULT_RECOVERED
;
4190 static void eeh_resume(struct pci_dev
*pdev
)
4193 struct adapter
*adap
= pci_get_drvdata(pdev
);
4199 for_each_port(adap
, i
) {
4200 struct net_device
*dev
= adap
->port
[i
];
4202 if (netif_running(dev
)) {
4204 cxgb_set_rxmode(dev
);
4206 netif_device_attach(dev
);
4211 static const struct pci_error_handlers cxgb4_eeh
= {
4212 .error_detected
= eeh_err_detected
,
4213 .slot_reset
= eeh_slot_reset
,
4214 .resume
= eeh_resume
,
4217 static inline bool is_x_10g_port(const struct link_config
*lc
)
4219 return (lc
->supported
& FW_PORT_CAP_SPEED_10G
) != 0 ||
4220 (lc
->supported
& FW_PORT_CAP_SPEED_40G
) != 0;
4223 static inline void init_rspq(struct adapter
*adap
, struct sge_rspq
*q
,
4224 unsigned int us
, unsigned int cnt
,
4225 unsigned int size
, unsigned int iqe_size
)
4228 cxgb4_set_rspq_intr_params(q
, us
, cnt
);
4229 q
->iqe_len
= iqe_size
;
4234 * Perform default configuration of DMA queues depending on the number and type
4235 * of ports we found and the number of available CPUs. Most settings can be
4236 * modified by the admin prior to actual use.
4238 static void cfg_queues(struct adapter
*adap
)
4240 struct sge
*s
= &adap
->sge
;
4241 int i
, n10g
= 0, qidx
= 0;
4242 #ifndef CONFIG_CHELSIO_T4_DCB
4247 for_each_port(adap
, i
)
4248 n10g
+= is_x_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
4249 #ifdef CONFIG_CHELSIO_T4_DCB
4250 /* For Data Center Bridging support we need to be able to support up
4251 * to 8 Traffic Priorities; each of which will be assigned to its
4252 * own TX Queue in order to prevent Head-Of-Line Blocking.
4254 if (adap
->params
.nports
* 8 > MAX_ETH_QSETS
) {
4255 dev_err(adap
->pdev_dev
, "MAX_ETH_QSETS=%d < %d!\n",
4256 MAX_ETH_QSETS
, adap
->params
.nports
* 8);
4260 for_each_port(adap
, i
) {
4261 struct port_info
*pi
= adap2pinfo(adap
, i
);
4263 pi
->first_qset
= qidx
;
4267 #else /* !CONFIG_CHELSIO_T4_DCB */
4269 * We default to 1 queue per non-10G port and up to # of cores queues
4273 q10g
= (MAX_ETH_QSETS
- (adap
->params
.nports
- n10g
)) / n10g
;
4274 if (q10g
> netif_get_num_default_rss_queues())
4275 q10g
= netif_get_num_default_rss_queues();
4277 for_each_port(adap
, i
) {
4278 struct port_info
*pi
= adap2pinfo(adap
, i
);
4280 pi
->first_qset
= qidx
;
4281 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
4284 #endif /* !CONFIG_CHELSIO_T4_DCB */
4287 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
4289 if (is_offload(adap
)) {
4291 * For offload we use 1 queue/channel if all ports are up to 1G,
4292 * otherwise we divide all available queues amongst the channels
4293 * capped by the number of available cores.
4296 i
= min_t(int, ARRAY_SIZE(s
->ofldrxq
),
4298 s
->ofldqsets
= roundup(i
, adap
->params
.nports
);
4300 s
->ofldqsets
= adap
->params
.nports
;
4301 /* For RDMA one Rx queue per channel suffices */
4302 s
->rdmaqs
= adap
->params
.nports
;
4303 /* Try and allow at least 1 CIQ per cpu rounding down
4304 * to the number of ports, with a minimum of 1 per port.
4305 * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4306 * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4307 * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4309 s
->rdmaciqs
= min_t(int, MAX_RDMA_CIQS
, num_online_cpus());
4310 s
->rdmaciqs
= (s
->rdmaciqs
/ adap
->params
.nports
) *
4311 adap
->params
.nports
;
4312 s
->rdmaciqs
= max_t(int, s
->rdmaciqs
, adap
->params
.nports
);
4315 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
4316 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
4318 init_rspq(adap
, &r
->rspq
, 5, 10, 1024, 64);
4322 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
4323 s
->ethtxq
[i
].q
.size
= 1024;
4325 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
4326 s
->ctrlq
[i
].q
.size
= 512;
4328 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++)
4329 s
->ofldtxq
[i
].q
.size
= 1024;
4331 for (i
= 0; i
< ARRAY_SIZE(s
->ofldrxq
); i
++) {
4332 struct sge_ofld_rxq
*r
= &s
->ofldrxq
[i
];
4334 init_rspq(adap
, &r
->rspq
, 5, 1, 1024, 64);
4335 r
->rspq
.uld
= CXGB4_ULD_ISCSI
;
4339 for (i
= 0; i
< ARRAY_SIZE(s
->rdmarxq
); i
++) {
4340 struct sge_ofld_rxq
*r
= &s
->rdmarxq
[i
];
4342 init_rspq(adap
, &r
->rspq
, 5, 1, 511, 64);
4343 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
4347 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
4348 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
4349 CH_WARN(adap
, "CIQ size too small for available IQs\n");
4350 ciq_size
= SGE_MAX_IQ_SIZE
;
4353 for (i
= 0; i
< ARRAY_SIZE(s
->rdmaciq
); i
++) {
4354 struct sge_ofld_rxq
*r
= &s
->rdmaciq
[i
];
4356 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
4357 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
4360 init_rspq(adap
, &s
->fw_evtq
, 0, 1, 1024, 64);
4361 init_rspq(adap
, &s
->intrq
, 0, 1, 2 * MAX_INGQ
, 64);
4365 * Reduce the number of Ethernet queues across all ports to at most n.
4366 * n provides at least one queue per port.
4368 static void reduce_ethqs(struct adapter
*adap
, int n
)
4371 struct port_info
*pi
;
4373 while (n
< adap
->sge
.ethqsets
)
4374 for_each_port(adap
, i
) {
4375 pi
= adap2pinfo(adap
, i
);
4376 if (pi
->nqsets
> 1) {
4378 adap
->sge
.ethqsets
--;
4379 if (adap
->sge
.ethqsets
<= n
)
4385 for_each_port(adap
, i
) {
4386 pi
= adap2pinfo(adap
, i
);
4392 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4393 #define EXTRA_VECS 2
4395 static int enable_msix(struct adapter
*adap
)
4398 int i
, want
, need
, allocated
;
4399 struct sge
*s
= &adap
->sge
;
4400 unsigned int nchan
= adap
->params
.nports
;
4401 struct msix_entry
*entries
;
4403 entries
= kmalloc(sizeof(*entries
) * (MAX_INGQ
+ 1),
4408 for (i
= 0; i
< MAX_INGQ
+ 1; ++i
)
4409 entries
[i
].entry
= i
;
4411 want
= s
->max_ethqsets
+ EXTRA_VECS
;
4412 if (is_offload(adap
)) {
4413 want
+= s
->rdmaqs
+ s
->rdmaciqs
+ s
->ofldqsets
;
4414 /* need nchan for each possible ULD */
4415 ofld_need
= 3 * nchan
;
4417 #ifdef CONFIG_CHELSIO_T4_DCB
4418 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4421 need
= 8 * adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
4423 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
4425 allocated
= pci_enable_msix_range(adap
->pdev
, entries
, need
, want
);
4426 if (allocated
< 0) {
4427 dev_info(adap
->pdev_dev
, "not enough MSI-X vectors left,"
4428 " not using MSI-X\n");
4433 /* Distribute available vectors to the various queue groups.
4434 * Every group gets its minimum requirement and NIC gets top
4435 * priority for leftovers.
4437 i
= allocated
- EXTRA_VECS
- ofld_need
;
4438 if (i
< s
->max_ethqsets
) {
4439 s
->max_ethqsets
= i
;
4440 if (i
< s
->ethqsets
)
4441 reduce_ethqs(adap
, i
);
4443 if (is_offload(adap
)) {
4444 if (allocated
< want
) {
4446 s
->rdmaciqs
= nchan
;
4449 /* leftovers go to OFLD */
4450 i
= allocated
- EXTRA_VECS
- s
->max_ethqsets
-
4451 s
->rdmaqs
- s
->rdmaciqs
;
4452 s
->ofldqsets
= (i
/ nchan
) * nchan
; /* round down */
4454 for (i
= 0; i
< allocated
; ++i
)
4455 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
4463 static int init_rss(struct adapter
*adap
)
4468 err
= t4_init_rss_mode(adap
, adap
->mbox
);
4472 for_each_port(adap
, i
) {
4473 struct port_info
*pi
= adap2pinfo(adap
, i
);
4475 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
4482 static void print_port_info(const struct net_device
*dev
)
4486 const char *spd
= "";
4487 const struct port_info
*pi
= netdev_priv(dev
);
4488 const struct adapter
*adap
= pi
->adapter
;
4490 if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_2_5GB
)
4492 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_5_0GB
)
4494 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_8_0GB
)
4497 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100M
)
4498 bufp
+= sprintf(bufp
, "100/");
4499 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_1G
)
4500 bufp
+= sprintf(bufp
, "1000/");
4501 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_10G
)
4502 bufp
+= sprintf(bufp
, "10G/");
4503 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_40G
)
4504 bufp
+= sprintf(bufp
, "40G/");
4507 sprintf(bufp
, "BASE-%s", t4_get_port_type_description(pi
->port_type
));
4509 netdev_info(dev
, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4510 adap
->params
.vpd
.id
,
4511 CHELSIO_CHIP_RELEASE(adap
->params
.chip
), buf
,
4512 is_offload(adap
) ? "R" : "", adap
->params
.pci
.width
, spd
,
4513 (adap
->flags
& USING_MSIX
) ? " MSI-X" :
4514 (adap
->flags
& USING_MSI
) ? " MSI" : "");
4515 netdev_info(dev
, "S/N: %s, P/N: %s\n",
4516 adap
->params
.vpd
.sn
, adap
->params
.vpd
.pn
);
4519 static void enable_pcie_relaxed_ordering(struct pci_dev
*dev
)
4521 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
4525 * Free the following resources:
4526 * - memory used for tables
4529 * - resources FW is holding for us
4531 static void free_some_resources(struct adapter
*adapter
)
4535 t4_free_mem(adapter
->l2t
);
4536 t4_free_mem(adapter
->tids
.tid_tab
);
4537 kfree(adapter
->sge
.egr_map
);
4538 kfree(adapter
->sge
.ingr_map
);
4539 kfree(adapter
->sge
.starving_fl
);
4540 kfree(adapter
->sge
.txq_maperr
);
4541 #ifdef CONFIG_DEBUG_FS
4542 kfree(adapter
->sge
.blocked_fl
);
4544 disable_msi(adapter
);
4546 for_each_port(adapter
, i
)
4547 if (adapter
->port
[i
]) {
4548 struct port_info
*pi
= adap2pinfo(adapter
, i
);
4551 t4_free_vi(adapter
, adapter
->mbox
, adapter
->pf
,
4553 kfree(adap2pinfo(adapter
, i
)->rss
);
4554 free_netdev(adapter
->port
[i
]);
4556 if (adapter
->flags
& FW_OK
)
4557 t4_fw_bye(adapter
, adapter
->pf
);
4560 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4561 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4562 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4563 #define SEGMENT_SIZE 128
4565 static int get_chip_type(struct pci_dev
*pdev
, u32 pl_rev
)
4570 /* Retrieve adapter's device ID */
4571 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &device_id
);
4572 ver
= device_id
>> 12;
4575 chip
|= CHELSIO_CHIP_CODE(CHELSIO_T4
, pl_rev
);
4578 chip
|= CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
4581 chip
|= CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
4584 dev_err(&pdev
->dev
, "Device %d is not supported\n",
4591 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4593 int func
, i
, err
, s_qpp
, qpp
, num_seg
;
4594 struct port_info
*pi
;
4595 bool highdma
= false;
4596 struct adapter
*adapter
= NULL
;
4599 enum chip_type chip
;
4601 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
4603 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
4605 /* Just info, some other driver may have claimed the device. */
4606 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
4610 err
= pci_enable_device(pdev
);
4612 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
4613 goto out_release_regions
;
4616 regs
= pci_ioremap_bar(pdev
, 0);
4618 dev_err(&pdev
->dev
, "cannot map device registers\n");
4620 goto out_disable_device
;
4623 err
= t4_wait_dev_ready(regs
);
4625 goto out_unmap_bar0
;
4627 /* We control everything through one PF */
4628 whoami
= readl(regs
+ PL_WHOAMI_A
);
4629 pl_rev
= REV_G(readl(regs
+ PL_REV_A
));
4630 chip
= get_chip_type(pdev
, pl_rev
);
4631 func
= CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
?
4632 SOURCEPF_G(whoami
) : T6_SOURCEPF_G(whoami
);
4633 if (func
!= ent
->driver_data
) {
4635 pci_disable_device(pdev
);
4636 pci_save_state(pdev
); /* to restore SR-IOV later */
4640 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4642 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4644 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
4645 "coherent allocations\n");
4646 goto out_unmap_bar0
;
4649 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4651 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
4652 goto out_unmap_bar0
;
4656 pci_enable_pcie_error_reporting(pdev
);
4657 enable_pcie_relaxed_ordering(pdev
);
4658 pci_set_master(pdev
);
4659 pci_save_state(pdev
);
4661 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
4664 goto out_unmap_bar0
;
4667 adapter
->workq
= create_singlethread_workqueue("cxgb4");
4668 if (!adapter
->workq
) {
4670 goto out_free_adapter
;
4673 /* PCI device has been enabled */
4674 adapter
->flags
|= DEV_ENABLED
;
4676 adapter
->regs
= regs
;
4677 adapter
->pdev
= pdev
;
4678 adapter
->pdev_dev
= &pdev
->dev
;
4679 adapter
->mbox
= func
;
4681 adapter
->msg_enable
= dflt_msg_enable
;
4682 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
4684 spin_lock_init(&adapter
->stats_lock
);
4685 spin_lock_init(&adapter
->tid_release_lock
);
4686 spin_lock_init(&adapter
->win0_lock
);
4688 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
4689 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
4690 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
4692 err
= t4_prep_adapter(adapter
);
4694 goto out_free_adapter
;
4697 if (!is_t4(adapter
->params
.chip
)) {
4698 s_qpp
= (QUEUESPERPAGEPF0_S
+
4699 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) *
4701 qpp
= 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter
,
4702 SGE_EGRESS_QUEUES_PER_PAGE_PF_A
) >> s_qpp
);
4703 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
4705 /* Each segment size is 128B. Write coalescing is enabled only
4706 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
4707 * queue is less no of segments that can be accommodated in
4710 if (qpp
> num_seg
) {
4712 "Incorrect number of egress queues per page\n");
4714 goto out_free_adapter
;
4716 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
4717 pci_resource_len(pdev
, 2));
4718 if (!adapter
->bar2
) {
4719 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
4721 goto out_free_adapter
;
4723 t4_write_reg(adapter
, SGE_STAT_CFG_A
,
4724 STATSOURCE_T5_V(7) | STATMODE_V(0));
4727 setup_memwin(adapter
);
4728 err
= adap_init0(adapter
);
4729 #ifdef CONFIG_DEBUG_FS
4730 bitmap_zero(adapter
->sge
.blocked_fl
, adapter
->sge
.egr_sz
);
4732 setup_memwin_rdma(adapter
);
4736 for_each_port(adapter
, i
) {
4737 struct net_device
*netdev
;
4739 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
4746 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4748 adapter
->port
[i
] = netdev
;
4749 pi
= netdev_priv(netdev
);
4750 pi
->adapter
= adapter
;
4751 pi
->xact_addr_filt
= -1;
4753 netdev
->irq
= pdev
->irq
;
4755 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
4756 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4757 NETIF_F_RXCSUM
| NETIF_F_RXHASH
|
4758 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
4760 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
4761 netdev
->features
|= netdev
->hw_features
;
4762 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
4764 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4766 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
4767 #ifdef CONFIG_CHELSIO_T4_DCB
4768 netdev
->dcbnl_ops
= &cxgb4_dcb_ops
;
4769 cxgb4_dcb_state_init(netdev
);
4771 cxgb4_set_ethtool_ops(netdev
);
4774 pci_set_drvdata(pdev
, adapter
);
4776 if (adapter
->flags
& FW_OK
) {
4777 err
= t4_port_init(adapter
, func
, func
, 0);
4780 } else if (adapter
->params
.nports
== 1) {
4781 /* If we don't have a connection to the firmware -- possibly
4782 * because of an error -- grab the raw VPD parameters so we
4783 * can set the proper MAC Address on the debug network
4784 * interface that we've created.
4786 u8 hw_addr
[ETH_ALEN
];
4787 u8
*na
= adapter
->params
.vpd
.na
;
4789 err
= t4_get_raw_vpd_params(adapter
, &adapter
->params
.vpd
);
4791 for (i
= 0; i
< ETH_ALEN
; i
++)
4792 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
4793 hex2val(na
[2 * i
+ 1]));
4794 t4_set_hw_addr(adapter
, 0, hw_addr
);
4798 /* Configure queues and allocate tables now, they can be needed as
4799 * soon as the first register_netdev completes.
4801 cfg_queues(adapter
);
4803 adapter
->l2t
= t4_init_l2t(adapter
->l2t_start
, adapter
->l2t_end
);
4804 if (!adapter
->l2t
) {
4805 /* We tolerate a lack of L2T, giving up some functionality */
4806 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
4807 adapter
->params
.offload
= 0;
4810 #if IS_ENABLED(CONFIG_IPV6)
4811 adapter
->clipt
= t4_init_clip_tbl(adapter
->clipt_start
,
4812 adapter
->clipt_end
);
4813 if (!adapter
->clipt
) {
4814 /* We tolerate a lack of clip_table, giving up
4815 * some functionality
4817 dev_warn(&pdev
->dev
,
4818 "could not allocate Clip table, continuing\n");
4819 adapter
->params
.offload
= 0;
4822 if (is_offload(adapter
) && tid_init(&adapter
->tids
) < 0) {
4823 dev_warn(&pdev
->dev
, "could not allocate TID table, "
4825 adapter
->params
.offload
= 0;
4828 if (is_offload(adapter
)) {
4829 if (t4_read_reg(adapter
, LE_DB_CONFIG_A
) & HASHEN_F
) {
4830 u32 hash_base
, hash_reg
;
4832 if (chip
<= CHELSIO_T5
) {
4833 hash_reg
= LE_DB_TID_HASHBASE_A
;
4834 hash_base
= t4_read_reg(adapter
, hash_reg
);
4835 adapter
->tids
.hash_base
= hash_base
/ 4;
4837 hash_reg
= T6_LE_DB_HASH_TID_BASE_A
;
4838 hash_base
= t4_read_reg(adapter
, hash_reg
);
4839 adapter
->tids
.hash_base
= hash_base
;
4844 /* See what interrupts we'll be using */
4845 if (msi
> 1 && enable_msix(adapter
) == 0)
4846 adapter
->flags
|= USING_MSIX
;
4847 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
4848 adapter
->flags
|= USING_MSI
;
4850 err
= init_rss(adapter
);
4855 * The card is now ready to go. If any errors occur during device
4856 * registration we do not fail the whole card but rather proceed only
4857 * with the ports we manage to register successfully. However we must
4858 * register at least one net device.
4860 for_each_port(adapter
, i
) {
4861 pi
= adap2pinfo(adapter
, i
);
4862 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
4863 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
4865 err
= register_netdev(adapter
->port
[i
]);
4868 adapter
->chan_map
[pi
->tx_chan
] = i
;
4869 print_port_info(adapter
->port
[i
]);
4872 dev_err(&pdev
->dev
, "could not register any net devices\n");
4876 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
4880 if (cxgb4_debugfs_root
) {
4881 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
4882 cxgb4_debugfs_root
);
4883 setup_debugfs(adapter
);
4886 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4887 pdev
->needs_freset
= 1;
4889 if (is_offload(adapter
))
4890 attach_ulds(adapter
);
4893 #ifdef CONFIG_PCI_IOV
4894 if (func
< ARRAY_SIZE(num_vf
) && num_vf
[func
] > 0)
4895 if (pci_enable_sriov(pdev
, num_vf
[func
]) == 0)
4896 dev_info(&pdev
->dev
,
4897 "instantiated %u virtual functions\n",
4903 free_some_resources(adapter
);
4905 if (!is_t4(adapter
->params
.chip
))
4906 iounmap(adapter
->bar2
);
4909 destroy_workqueue(adapter
->workq
);
4915 pci_disable_pcie_error_reporting(pdev
);
4916 pci_disable_device(pdev
);
4917 out_release_regions
:
4918 pci_release_regions(pdev
);
4922 static void remove_one(struct pci_dev
*pdev
)
4924 struct adapter
*adapter
= pci_get_drvdata(pdev
);
4926 #ifdef CONFIG_PCI_IOV
4927 pci_disable_sriov(pdev
);
4934 /* Tear down per-adapter Work Queue first since it can contain
4935 * references to our adapter data structure.
4937 destroy_workqueue(adapter
->workq
);
4939 if (is_offload(adapter
))
4940 detach_ulds(adapter
);
4942 disable_interrupts(adapter
);
4944 for_each_port(adapter
, i
)
4945 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
4946 unregister_netdev(adapter
->port
[i
]);
4948 debugfs_remove_recursive(adapter
->debugfs_root
);
4950 /* If we allocated filters, free up state associated with any
4953 if (adapter
->tids
.ftid_tab
) {
4954 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[0];
4955 for (i
= 0; i
< (adapter
->tids
.nftids
+
4956 adapter
->tids
.nsftids
); i
++, f
++)
4958 clear_filter(adapter
, f
);
4961 if (adapter
->flags
& FULL_INIT_DONE
)
4964 free_some_resources(adapter
);
4965 #if IS_ENABLED(CONFIG_IPV6)
4966 t4_cleanup_clip_tbl(adapter
);
4968 iounmap(adapter
->regs
);
4969 if (!is_t4(adapter
->params
.chip
))
4970 iounmap(adapter
->bar2
);
4971 pci_disable_pcie_error_reporting(pdev
);
4972 if ((adapter
->flags
& DEV_ENABLED
)) {
4973 pci_disable_device(pdev
);
4974 adapter
->flags
&= ~DEV_ENABLED
;
4976 pci_release_regions(pdev
);
4980 pci_release_regions(pdev
);
4983 static struct pci_driver cxgb4_driver
= {
4984 .name
= KBUILD_MODNAME
,
4985 .id_table
= cxgb4_pci_tbl
,
4987 .remove
= remove_one
,
4988 .shutdown
= remove_one
,
4989 .err_handler
= &cxgb4_eeh
,
4992 static int __init
cxgb4_init_module(void)
4996 /* Debugfs support is optional, just warn if this fails */
4997 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
4998 if (!cxgb4_debugfs_root
)
4999 pr_warn("could not create debugfs entry, continuing\n");
5001 ret
= pci_register_driver(&cxgb4_driver
);
5003 debugfs_remove(cxgb4_debugfs_root
);
5005 #if IS_ENABLED(CONFIG_IPV6)
5006 if (!inet6addr_registered
) {
5007 register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5008 inet6addr_registered
= true;
5015 static void __exit
cxgb4_cleanup_module(void)
5017 #if IS_ENABLED(CONFIG_IPV6)
5018 if (inet6addr_registered
) {
5019 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5020 inet6addr_registered
= false;
5023 pci_unregister_driver(&cxgb4_driver
);
5024 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
5027 module_init(cxgb4_init_module
);
5028 module_exit(cxgb4_cleanup_module
);