]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
net/mlx4_core: destroy workqueue when driver fails to register
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
b8ff05a9
DM
64#include <asm/uaccess.h>
65
66#include "cxgb4.h"
67#include "t4_regs.h"
68#include "t4_msg.h"
69#include "t4fw_api.h"
70#include "l2t.h"
71
01bcca68
VP
72#include <../drivers/net/bonding/bonding.h>
73
74#ifdef DRV_VERSION
75#undef DRV_VERSION
76#endif
3a7f8554
SR
77#define DRV_VERSION "2.0.0-ko"
78#define DRV_DESC "Chelsio T4/T5 Network Driver"
b8ff05a9
DM
79
80/*
81 * Max interrupt hold-off timer value in us. Queues fall back to this value
82 * under extreme memory pressure so it's largish to give the system time to
83 * recover.
84 */
85#define MAX_SGE_TIMERVAL 200U
86
7ee9ff94 87enum {
13ee15d3
VP
88 /*
89 * Physical Function provisioning constants.
90 */
91 PFRES_NVI = 4, /* # of Virtual Interfaces */
92 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
93 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
94 */
95 PFRES_NEQ = 256, /* # of egress queues */
96 PFRES_NIQ = 0, /* # of ingress queues */
97 PFRES_TC = 0, /* PCI-E traffic class */
98 PFRES_NEXACTF = 128, /* # of exact MPS filters */
99
100 PFRES_R_CAPS = FW_CMD_CAP_PF,
101 PFRES_WX_CAPS = FW_CMD_CAP_PF,
102
103#ifdef CONFIG_PCI_IOV
104 /*
105 * Virtual Function provisioning constants. We need two extra Ingress
106 * Queues with Interrupt capability to serve as the VF's Firmware
107 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108 * neither will have Free Lists associated with them). For each
109 * Ethernet/Control Egress Queue and for each Free List, we need an
110 * Egress Context.
111 */
7ee9ff94
CL
112 VFRES_NPORTS = 1, /* # of "ports" per VF */
113 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
114
115 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
116 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
117 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
7ee9ff94 118 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
13ee15d3 119 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
7ee9ff94
CL
120 VFRES_TC = 0, /* PCI-E traffic class */
121 VFRES_NEXACTF = 16, /* # of exact MPS filters */
122
123 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
124 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
13ee15d3 125#endif
7ee9ff94
CL
126};
127
128/*
129 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
130 * static and likely not to be useful in the long run. We really need to
131 * implement some form of persistent configuration which the firmware
132 * controls.
133 */
134static unsigned int pfvfres_pmask(struct adapter *adapter,
135 unsigned int pf, unsigned int vf)
136{
137 unsigned int portn, portvec;
138
139 /*
140 * Give PF's access to all of the ports.
141 */
142 if (vf == 0)
143 return FW_PFVF_CMD_PMASK_MASK;
144
145 /*
146 * For VFs, we'll assign them access to the ports based purely on the
147 * PF. We assign active ports in order, wrapping around if there are
148 * fewer active ports than PFs: e.g. active port[pf % nports].
149 * Unfortunately the adapter's port_info structs haven't been
150 * initialized yet so we have to compute this.
151 */
152 if (adapter->params.nports == 0)
153 return 0;
154
155 portn = pf % adapter->params.nports;
156 portvec = adapter->params.portvec;
157 for (;;) {
158 /*
159 * Isolate the lowest set bit in the port vector. If we're at
160 * the port number that we want, return that as the pmask.
161 * otherwise mask that bit out of the port vector and
162 * decrement our port number ...
163 */
164 unsigned int pmask = portvec ^ (portvec & (portvec-1));
165 if (portn == 0)
166 return pmask;
167 portn--;
168 portvec &= ~pmask;
169 }
170 /*NOTREACHED*/
171}
7ee9ff94 172
b8ff05a9
DM
173enum {
174 MAX_TXQ_ENTRIES = 16384,
175 MAX_CTRL_TXQ_ENTRIES = 1024,
176 MAX_RSPQ_ENTRIES = 16384,
177 MAX_RX_BUFFERS = 16384,
178 MIN_TXQ_ENTRIES = 32,
179 MIN_CTRL_TXQ_ENTRIES = 32,
180 MIN_RSPQ_ENTRIES = 128,
181 MIN_FL_ENTRIES = 16
182};
183
f2b7e78d
VP
184/* Host shadow copy of ingress filter entry. This is in host native format
185 * and doesn't match the ordering or bit order, etc. of the hardware of the
186 * firmware command. The use of bit-field structure elements is purely to
187 * remind ourselves of the field size limitations and save memory in the case
188 * where the filter table is large.
189 */
190struct filter_entry {
191 /* Administrative fields for filter.
192 */
193 u32 valid:1; /* filter allocated and valid */
194 u32 locked:1; /* filter is administratively locked */
195
196 u32 pending:1; /* filter action is pending firmware reply */
197 u32 smtidx:8; /* Source MAC Table index for smac */
198 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
199
200 /* The filter itself. Most of this is a straight copy of information
201 * provided by the extended ioctl(). Some fields are translated to
202 * internal forms -- for instance the Ingress Queue ID passed in from
203 * the ioctl() is translated into the Absolute Ingress Queue ID.
204 */
205 struct ch_filter_specification fs;
206};
207
b8ff05a9
DM
208#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
211
060e0c75 212#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
213
214static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 215 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
216 CH_DEVICE(0x4001, -1),
217 CH_DEVICE(0x4002, -1),
218 CH_DEVICE(0x4003, -1),
219 CH_DEVICE(0x4004, -1),
220 CH_DEVICE(0x4005, -1),
221 CH_DEVICE(0x4006, -1),
222 CH_DEVICE(0x4007, -1),
223 CH_DEVICE(0x4008, -1),
224 CH_DEVICE(0x4009, -1),
225 CH_DEVICE(0x400a, -1),
226 CH_DEVICE(0x4401, 4),
227 CH_DEVICE(0x4402, 4),
228 CH_DEVICE(0x4403, 4),
229 CH_DEVICE(0x4404, 4),
230 CH_DEVICE(0x4405, 4),
231 CH_DEVICE(0x4406, 4),
232 CH_DEVICE(0x4407, 4),
233 CH_DEVICE(0x4408, 4),
234 CH_DEVICE(0x4409, 4),
235 CH_DEVICE(0x440a, 4),
f637d577
VP
236 CH_DEVICE(0x440d, 4),
237 CH_DEVICE(0x440e, 4),
9ef603a0
VP
238 CH_DEVICE(0x5001, 4),
239 CH_DEVICE(0x5002, 4),
240 CH_DEVICE(0x5003, 4),
241 CH_DEVICE(0x5004, 4),
242 CH_DEVICE(0x5005, 4),
243 CH_DEVICE(0x5006, 4),
244 CH_DEVICE(0x5007, 4),
245 CH_DEVICE(0x5008, 4),
246 CH_DEVICE(0x5009, 4),
247 CH_DEVICE(0x500A, 4),
248 CH_DEVICE(0x500B, 4),
249 CH_DEVICE(0x500C, 4),
250 CH_DEVICE(0x500D, 4),
251 CH_DEVICE(0x500E, 4),
252 CH_DEVICE(0x500F, 4),
253 CH_DEVICE(0x5010, 4),
254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4),
257 CH_DEVICE(0x5401, 4),
258 CH_DEVICE(0x5402, 4),
259 CH_DEVICE(0x5403, 4),
260 CH_DEVICE(0x5404, 4),
261 CH_DEVICE(0x5405, 4),
262 CH_DEVICE(0x5406, 4),
263 CH_DEVICE(0x5407, 4),
264 CH_DEVICE(0x5408, 4),
265 CH_DEVICE(0x5409, 4),
266 CH_DEVICE(0x540A, 4),
267 CH_DEVICE(0x540B, 4),
268 CH_DEVICE(0x540C, 4),
269 CH_DEVICE(0x540D, 4),
270 CH_DEVICE(0x540E, 4),
271 CH_DEVICE(0x540F, 4),
272 CH_DEVICE(0x5410, 4),
273 CH_DEVICE(0x5411, 4),
274 CH_DEVICE(0x5412, 4),
275 CH_DEVICE(0x5413, 4),
b8ff05a9
DM
276 { 0, }
277};
278
279#define FW_FNAME "cxgb4/t4fw.bin"
0a57a536 280#define FW5_FNAME "cxgb4/t5fw.bin"
636f9d37 281#define FW_CFNAME "cxgb4/t4-config.txt"
0a57a536 282#define FW5_CFNAME "cxgb4/t5-config.txt"
b8ff05a9
DM
283
284MODULE_DESCRIPTION(DRV_DESC);
285MODULE_AUTHOR("Chelsio Communications");
286MODULE_LICENSE("Dual BSD/GPL");
287MODULE_VERSION(DRV_VERSION);
288MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
289MODULE_FIRMWARE(FW_FNAME);
0a57a536 290MODULE_FIRMWARE(FW5_FNAME);
b8ff05a9 291
636f9d37
VP
292/*
293 * Normally we're willing to become the firmware's Master PF but will be happy
294 * if another PF has already become the Master and initialized the adapter.
295 * Setting "force_init" will cause this driver to forcibly establish itself as
296 * the Master PF and initialize the adapter.
297 */
298static uint force_init;
299
300module_param(force_init, uint, 0644);
301MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
302
13ee15d3
VP
303/*
304 * Normally if the firmware we connect to has Configuration File support, we
305 * use that and only fall back to the old Driver-based initialization if the
306 * Configuration File fails for some reason. If force_old_init is set, then
307 * we'll always use the old Driver-based initialization sequence.
308 */
309static uint force_old_init;
310
311module_param(force_old_init, uint, 0644);
312MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
313
b8ff05a9
DM
314static int dflt_msg_enable = DFLT_MSG_ENABLE;
315
316module_param(dflt_msg_enable, int, 0644);
317MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
318
319/*
320 * The driver uses the best interrupt scheme available on a platform in the
321 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
322 * of these schemes the driver may consider as follows:
323 *
324 * msi = 2: choose from among all three options
325 * msi = 1: only consider MSI and INTx interrupts
326 * msi = 0: force INTx interrupts
327 */
328static int msi = 2;
329
330module_param(msi, int, 0644);
331MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
332
333/*
334 * Queue interrupt hold-off timer values. Queues default to the first of these
335 * upon creation.
336 */
337static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
338
339module_param_array(intr_holdoff, uint, NULL, 0644);
340MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
341 "0..4 in microseconds");
342
343static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
344
345module_param_array(intr_cnt, uint, NULL, 0644);
346MODULE_PARM_DESC(intr_cnt,
347 "thresholds 1..3 for queue interrupt packet counters");
348
636f9d37
VP
349/*
350 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
351 * offset by 2 bytes in order to have the IP headers line up on 4-byte
352 * boundaries. This is a requirement for many architectures which will throw
353 * a machine check fault if an attempt is made to access one of the 4-byte IP
354 * header fields on a non-4-byte boundary. And it's a major performance issue
355 * even on some architectures which allow it like some implementations of the
356 * x86 ISA. However, some architectures don't mind this and for some very
357 * edge-case performance sensitive applications (like forwarding large volumes
358 * of small packets), setting this DMA offset to 0 will decrease the number of
359 * PCI-E Bus transfers enough to measurably affect performance.
360 */
361static int rx_dma_offset = 2;
362
eb939922 363static bool vf_acls;
b8ff05a9
DM
364
365#ifdef CONFIG_PCI_IOV
366module_param(vf_acls, bool, 0644);
367MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
368
7d6727cf
SR
369/* Configure the number of PCI-E Virtual Function which are to be instantiated
370 * on SR-IOV Capable Physical Functions.
0a57a536 371 */
7d6727cf 372static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
b8ff05a9
DM
373
374module_param_array(num_vf, uint, NULL, 0644);
7d6727cf 375MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
b8ff05a9
DM
376#endif
377
13ee15d3
VP
378/*
379 * The filter TCAM has a fixed portion and a variable portion. The fixed
380 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
381 * ports. The variable portion is 36 bits which can include things like Exact
382 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
383 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
384 * far exceed the 36-bit budget for this "compressed" header portion of the
385 * filter. Thus, we have a scarce resource which must be carefully managed.
386 *
387 * By default we set this up to mostly match the set of filter matching
388 * capabilities of T3 but with accommodations for some of T4's more
389 * interesting features:
390 *
391 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
392 * [Inner] VLAN (17), Port (3), FCoE (1) }
393 */
394enum {
395 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
396 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
397 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
398};
399
400static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
401
f2b7e78d
VP
402module_param(tp_vlan_pri_map, uint, 0644);
403MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
404
b8ff05a9
DM
405static struct dentry *cxgb4_debugfs_root;
406
407static LIST_HEAD(adapter_list);
408static DEFINE_MUTEX(uld_mutex);
01bcca68
VP
409/* Adapter list to be accessed from atomic context */
410static LIST_HEAD(adap_rcu_list);
411static DEFINE_SPINLOCK(adap_rcu_lock);
b8ff05a9
DM
412static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
413static const char *uld_str[] = { "RDMA", "iSCSI" };
414
415static void link_report(struct net_device *dev)
416{
417 if (!netif_carrier_ok(dev))
418 netdev_info(dev, "link down\n");
419 else {
420 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
421
422 const char *s = "10Mbps";
423 const struct port_info *p = netdev_priv(dev);
424
425 switch (p->link_cfg.speed) {
426 case SPEED_10000:
427 s = "10Gbps";
428 break;
429 case SPEED_1000:
430 s = "1000Mbps";
431 break;
432 case SPEED_100:
433 s = "100Mbps";
434 break;
435 }
436
437 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
438 fc[p->link_cfg.fc]);
439 }
440}
441
442void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
443{
444 struct net_device *dev = adapter->port[port_id];
445
446 /* Skip changes from disabled ports. */
447 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
448 if (link_stat)
449 netif_carrier_on(dev);
450 else
451 netif_carrier_off(dev);
452
453 link_report(dev);
454 }
455}
456
457void t4_os_portmod_changed(const struct adapter *adap, int port_id)
458{
459 static const char *mod_str[] = {
a0881cab 460 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
461 };
462
463 const struct net_device *dev = adap->port[port_id];
464 const struct port_info *pi = netdev_priv(dev);
465
466 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
467 netdev_info(dev, "port module unplugged\n");
a0881cab 468 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
469 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
470}
471
472/*
473 * Configure the exact and hash address filters to handle a port's multicast
474 * and secondary unicast MAC addresses.
475 */
476static int set_addr_filters(const struct net_device *dev, bool sleep)
477{
478 u64 mhash = 0;
479 u64 uhash = 0;
480 bool free = true;
481 u16 filt_idx[7];
482 const u8 *addr[7];
483 int ret, naddr = 0;
b8ff05a9
DM
484 const struct netdev_hw_addr *ha;
485 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 486 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 487 const struct port_info *pi = netdev_priv(dev);
060e0c75 488 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
489
490 /* first do the secondary unicast addresses */
491 netdev_for_each_uc_addr(ha, dev) {
492 addr[naddr++] = ha->addr;
493 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 494 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
495 naddr, addr, filt_idx, &uhash, sleep);
496 if (ret < 0)
497 return ret;
498
499 free = false;
500 naddr = 0;
501 }
502 }
503
504 /* next set up the multicast addresses */
4a35ecf8
DM
505 netdev_for_each_mc_addr(ha, dev) {
506 addr[naddr++] = ha->addr;
507 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 508 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
509 naddr, addr, filt_idx, &mhash, sleep);
510 if (ret < 0)
511 return ret;
512
513 free = false;
514 naddr = 0;
515 }
516 }
517
060e0c75 518 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
519 uhash | mhash, sleep);
520}
521
3069ee9b
VP
522int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
523module_param(dbfifo_int_thresh, int, 0644);
524MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
525
404d9e3f
VP
526/*
527 * usecs to sleep while draining the dbfifo
528 */
529static int dbfifo_drain_delay = 1000;
3069ee9b
VP
530module_param(dbfifo_drain_delay, int, 0644);
531MODULE_PARM_DESC(dbfifo_drain_delay,
532 "usecs to sleep while draining the dbfifo");
533
b8ff05a9
DM
534/*
535 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
536 * If @mtu is -1 it is left unchanged.
537 */
538static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
539{
540 int ret;
541 struct port_info *pi = netdev_priv(dev);
542
543 ret = set_addr_filters(dev, sleep_ok);
544 if (ret == 0)
060e0c75 545 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 546 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 547 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
548 sleep_ok);
549 return ret;
550}
551
3069ee9b
VP
552static struct workqueue_struct *workq;
553
b8ff05a9
DM
554/**
555 * link_start - enable a port
556 * @dev: the port to enable
557 *
558 * Performs the MAC and PHY actions needed to enable a port.
559 */
560static int link_start(struct net_device *dev)
561{
562 int ret;
563 struct port_info *pi = netdev_priv(dev);
060e0c75 564 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
565
566 /*
567 * We do not set address filters and promiscuity here, the stack does
568 * that step explicitly.
569 */
060e0c75 570 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 571 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 572 if (ret == 0) {
060e0c75 573 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 574 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 575 true);
b8ff05a9
DM
576 if (ret >= 0) {
577 pi->xact_addr_filt = ret;
578 ret = 0;
579 }
580 }
581 if (ret == 0)
060e0c75
DM
582 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
583 &pi->link_cfg);
b8ff05a9 584 if (ret == 0)
060e0c75 585 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
586 return ret;
587}
588
f2b7e78d
VP
589/* Clear a filter and release any of its resources that we own. This also
590 * clears the filter's "pending" status.
591 */
592static void clear_filter(struct adapter *adap, struct filter_entry *f)
593{
594 /* If the new or old filter have loopback rewriteing rules then we'll
595 * need to free any existing Layer Two Table (L2T) entries of the old
596 * filter rule. The firmware will handle freeing up any Source MAC
597 * Table (SMT) entries used for rewriting Source MAC Addresses in
598 * loopback rules.
599 */
600 if (f->l2t)
601 cxgb4_l2t_release(f->l2t);
602
603 /* The zeroing of the filter rule below clears the filter valid,
604 * pending, locked flags, l2t pointer, etc. so it's all we need for
605 * this operation.
606 */
607 memset(f, 0, sizeof(*f));
608}
609
610/* Handle a filter write/deletion reply.
611 */
612static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
613{
614 unsigned int idx = GET_TID(rpl);
615 unsigned int nidx = idx - adap->tids.ftid_base;
616 unsigned int ret;
617 struct filter_entry *f;
618
619 if (idx >= adap->tids.ftid_base && nidx <
620 (adap->tids.nftids + adap->tids.nsftids)) {
621 idx = nidx;
622 ret = GET_TCB_COOKIE(rpl->cookie);
623 f = &adap->tids.ftid_tab[idx];
624
625 if (ret == FW_FILTER_WR_FLT_DELETED) {
626 /* Clear the filter when we get confirmation from the
627 * hardware that the filter has been deleted.
628 */
629 clear_filter(adap, f);
630 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
631 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
632 idx);
633 clear_filter(adap, f);
634 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
635 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
636 f->pending = 0; /* asynchronous setup completed */
637 f->valid = 1;
638 } else {
639 /* Something went wrong. Issue a warning about the
640 * problem and clear everything out.
641 */
642 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
643 idx, ret);
644 clear_filter(adap, f);
645 }
646 }
647}
648
649/* Response queue handler for the FW event queue.
b8ff05a9
DM
650 */
651static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
652 const struct pkt_gl *gl)
653{
654 u8 opcode = ((const struct rss_header *)rsp)->opcode;
655
656 rsp++; /* skip RSS header */
b407a4a9
VP
657
658 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
659 */
660 if (unlikely(opcode == CPL_FW4_MSG &&
661 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
662 rsp++;
663 opcode = ((const struct rss_header *)rsp)->opcode;
664 rsp++;
665 if (opcode != CPL_SGE_EGR_UPDATE) {
666 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
667 , opcode);
668 goto out;
669 }
670 }
671
b8ff05a9
DM
672 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
673 const struct cpl_sge_egr_update *p = (void *)rsp;
674 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 675 struct sge_txq *txq;
b8ff05a9 676
e46dab4d 677 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 678 txq->restarts++;
e46dab4d 679 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
680 struct sge_eth_txq *eq;
681
682 eq = container_of(txq, struct sge_eth_txq, q);
683 netif_tx_wake_queue(eq->txq);
684 } else {
685 struct sge_ofld_txq *oq;
686
687 oq = container_of(txq, struct sge_ofld_txq, q);
688 tasklet_schedule(&oq->qresume_tsk);
689 }
690 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
691 const struct cpl_fw6_msg *p = (void *)rsp;
692
693 if (p->type == 0)
694 t4_handle_fw_rpl(q->adap, p->data);
695 } else if (opcode == CPL_L2T_WRITE_RPL) {
696 const struct cpl_l2t_write_rpl *p = (void *)rsp;
697
698 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
699 } else if (opcode == CPL_SET_TCB_RPL) {
700 const struct cpl_set_tcb_rpl *p = (void *)rsp;
701
702 filter_rpl(q->adap, p);
b8ff05a9
DM
703 } else
704 dev_err(q->adap->pdev_dev,
705 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 706out:
b8ff05a9
DM
707 return 0;
708}
709
710/**
711 * uldrx_handler - response queue handler for ULD queues
712 * @q: the response queue that received the packet
713 * @rsp: the response queue descriptor holding the offload message
714 * @gl: the gather list of packet fragments
715 *
716 * Deliver an ingress offload packet to a ULD. All processing is done by
717 * the ULD, we just maintain statistics.
718 */
719static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
720 const struct pkt_gl *gl)
721{
722 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
723
b407a4a9
VP
724 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
725 */
726 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
727 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
728 rsp += 2;
729
b8ff05a9
DM
730 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
731 rxq->stats.nomem++;
732 return -1;
733 }
734 if (gl == NULL)
735 rxq->stats.imm++;
736 else if (gl == CXGB4_MSG_AN)
737 rxq->stats.an++;
738 else
739 rxq->stats.pkts++;
740 return 0;
741}
742
743static void disable_msi(struct adapter *adapter)
744{
745 if (adapter->flags & USING_MSIX) {
746 pci_disable_msix(adapter->pdev);
747 adapter->flags &= ~USING_MSIX;
748 } else if (adapter->flags & USING_MSI) {
749 pci_disable_msi(adapter->pdev);
750 adapter->flags &= ~USING_MSI;
751 }
752}
753
754/*
755 * Interrupt handler for non-data events used with MSI-X.
756 */
757static irqreturn_t t4_nondata_intr(int irq, void *cookie)
758{
759 struct adapter *adap = cookie;
760
761 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
762 if (v & PFSW) {
763 adap->swintr = 1;
764 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
765 }
766 t4_slow_intr_handler(adap);
767 return IRQ_HANDLED;
768}
769
770/*
771 * Name the MSI-X interrupts.
772 */
773static void name_msix_vecs(struct adapter *adap)
774{
ba27816c 775 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
776
777 /* non-data interrupts */
b1a3c2b6 778 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
779
780 /* FW events */
b1a3c2b6
DM
781 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
782 adap->port[0]->name);
b8ff05a9
DM
783
784 /* Ethernet queues */
785 for_each_port(adap, j) {
786 struct net_device *d = adap->port[j];
787 const struct port_info *pi = netdev_priv(d);
788
ba27816c 789 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
790 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
791 d->name, i);
b8ff05a9
DM
792 }
793
794 /* offload queues */
ba27816c
DM
795 for_each_ofldrxq(&adap->sge, i)
796 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 797 adap->port[0]->name, i);
ba27816c
DM
798
799 for_each_rdmarxq(&adap->sge, i)
800 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 801 adap->port[0]->name, i);
b8ff05a9
DM
802}
803
804static int request_msix_queue_irqs(struct adapter *adap)
805{
806 struct sge *s = &adap->sge;
404d9e3f 807 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
b8ff05a9
DM
808
809 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
810 adap->msix_info[1].desc, &s->fw_evtq);
811 if (err)
812 return err;
813
814 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
815 err = request_irq(adap->msix_info[msi_index].vec,
816 t4_sge_intr_msix, 0,
817 adap->msix_info[msi_index].desc,
b8ff05a9
DM
818 &s->ethrxq[ethqidx].rspq);
819 if (err)
820 goto unwind;
404d9e3f 821 msi_index++;
b8ff05a9
DM
822 }
823 for_each_ofldrxq(s, ofldqidx) {
404d9e3f
VP
824 err = request_irq(adap->msix_info[msi_index].vec,
825 t4_sge_intr_msix, 0,
826 adap->msix_info[msi_index].desc,
b8ff05a9
DM
827 &s->ofldrxq[ofldqidx].rspq);
828 if (err)
829 goto unwind;
404d9e3f 830 msi_index++;
b8ff05a9
DM
831 }
832 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
833 err = request_irq(adap->msix_info[msi_index].vec,
834 t4_sge_intr_msix, 0,
835 adap->msix_info[msi_index].desc,
b8ff05a9
DM
836 &s->rdmarxq[rdmaqidx].rspq);
837 if (err)
838 goto unwind;
404d9e3f 839 msi_index++;
b8ff05a9
DM
840 }
841 return 0;
842
843unwind:
844 while (--rdmaqidx >= 0)
404d9e3f 845 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
846 &s->rdmarxq[rdmaqidx].rspq);
847 while (--ofldqidx >= 0)
404d9e3f 848 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
849 &s->ofldrxq[ofldqidx].rspq);
850 while (--ethqidx >= 0)
404d9e3f
VP
851 free_irq(adap->msix_info[--msi_index].vec,
852 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
853 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
854 return err;
855}
856
857static void free_msix_queue_irqs(struct adapter *adap)
858{
404d9e3f 859 int i, msi_index = 2;
b8ff05a9
DM
860 struct sge *s = &adap->sge;
861
862 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
863 for_each_ethrxq(s, i)
404d9e3f 864 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9 865 for_each_ofldrxq(s, i)
404d9e3f 866 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
b8ff05a9 867 for_each_rdmarxq(s, i)
404d9e3f 868 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
b8ff05a9
DM
869}
870
671b0060
DM
871/**
872 * write_rss - write the RSS table for a given port
873 * @pi: the port
874 * @queues: array of queue indices for RSS
875 *
876 * Sets up the portion of the HW RSS table for the port's VI to distribute
877 * packets to the Rx queues in @queues.
878 */
879static int write_rss(const struct port_info *pi, const u16 *queues)
880{
881 u16 *rss;
882 int i, err;
883 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
884
885 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
886 if (!rss)
887 return -ENOMEM;
888
889 /* map the queue indices to queue ids */
890 for (i = 0; i < pi->rss_size; i++, queues++)
891 rss[i] = q[*queues].rspq.abs_id;
892
060e0c75
DM
893 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
894 pi->rss_size, rss, pi->rss_size);
671b0060
DM
895 kfree(rss);
896 return err;
897}
898
b8ff05a9
DM
899/**
900 * setup_rss - configure RSS
901 * @adap: the adapter
902 *
671b0060 903 * Sets up RSS for each port.
b8ff05a9
DM
904 */
905static int setup_rss(struct adapter *adap)
906{
671b0060 907 int i, err;
b8ff05a9
DM
908
909 for_each_port(adap, i) {
910 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 911
671b0060 912 err = write_rss(pi, pi->rss);
b8ff05a9
DM
913 if (err)
914 return err;
915 }
916 return 0;
917}
918
e46dab4d
DM
919/*
920 * Return the channel of the ingress queue with the given qid.
921 */
922static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
923{
924 qid -= p->ingr_start;
925 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
926}
927
b8ff05a9
DM
928/*
929 * Wait until all NAPI handlers are descheduled.
930 */
931static void quiesce_rx(struct adapter *adap)
932{
933 int i;
934
935 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
936 struct sge_rspq *q = adap->sge.ingr_map[i];
937
938 if (q && q->handler)
939 napi_disable(&q->napi);
940 }
941}
942
943/*
944 * Enable NAPI scheduling and interrupt generation for all Rx queues.
945 */
946static void enable_rx(struct adapter *adap)
947{
948 int i;
949
950 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
951 struct sge_rspq *q = adap->sge.ingr_map[i];
952
953 if (!q)
954 continue;
955 if (q->handler)
956 napi_enable(&q->napi);
957 /* 0-increment GTS to start the timer and enable interrupts */
958 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
959 SEINTARM(q->intr_params) |
960 INGRESSQID(q->cntxt_id));
961 }
962}
963
964/**
965 * setup_sge_queues - configure SGE Tx/Rx/response queues
966 * @adap: the adapter
967 *
968 * Determines how many sets of SGE queues to use and initializes them.
969 * We support multiple queue sets per port if we have MSI-X, otherwise
970 * just one queue set per port.
971 */
972static int setup_sge_queues(struct adapter *adap)
973{
974 int err, msi_idx, i, j;
975 struct sge *s = &adap->sge;
976
977 bitmap_zero(s->starving_fl, MAX_EGRQ);
978 bitmap_zero(s->txq_maperr, MAX_EGRQ);
979
980 if (adap->flags & USING_MSIX)
981 msi_idx = 1; /* vector 0 is for non-queue interrupts */
982 else {
983 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
984 NULL, NULL);
985 if (err)
986 return err;
987 msi_idx = -((int)s->intrq.abs_id + 1);
988 }
989
990 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
991 msi_idx, NULL, fwevtq_handler);
992 if (err) {
993freeout: t4_free_sge_resources(adap);
994 return err;
995 }
996
997 for_each_port(adap, i) {
998 struct net_device *dev = adap->port[i];
999 struct port_info *pi = netdev_priv(dev);
1000 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1001 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1002
1003 for (j = 0; j < pi->nqsets; j++, q++) {
1004 if (msi_idx > 0)
1005 msi_idx++;
1006 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1007 msi_idx, &q->fl,
1008 t4_ethrx_handler);
1009 if (err)
1010 goto freeout;
1011 q->rspq.idx = j;
1012 memset(&q->stats, 0, sizeof(q->stats));
1013 }
1014 for (j = 0; j < pi->nqsets; j++, t++) {
1015 err = t4_sge_alloc_eth_txq(adap, t, dev,
1016 netdev_get_tx_queue(dev, j),
1017 s->fw_evtq.cntxt_id);
1018 if (err)
1019 goto freeout;
1020 }
1021 }
1022
1023 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1024 for_each_ofldrxq(s, i) {
1025 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1026 struct net_device *dev = adap->port[i / j];
1027
1028 if (msi_idx > 0)
1029 msi_idx++;
1030 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1031 &q->fl, uldrx_handler);
1032 if (err)
1033 goto freeout;
1034 memset(&q->stats, 0, sizeof(q->stats));
1035 s->ofld_rxq[i] = q->rspq.abs_id;
1036 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1037 s->fw_evtq.cntxt_id);
1038 if (err)
1039 goto freeout;
1040 }
1041
1042 for_each_rdmarxq(s, i) {
1043 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1044
1045 if (msi_idx > 0)
1046 msi_idx++;
1047 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1048 msi_idx, &q->fl, uldrx_handler);
1049 if (err)
1050 goto freeout;
1051 memset(&q->stats, 0, sizeof(q->stats));
1052 s->rdma_rxq[i] = q->rspq.abs_id;
1053 }
1054
1055 for_each_port(adap, i) {
1056 /*
1057 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1058 * have RDMA queues, and that's the right value.
1059 */
1060 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1061 s->fw_evtq.cntxt_id,
1062 s->rdmarxq[i].rspq.cntxt_id);
1063 if (err)
1064 goto freeout;
1065 }
1066
1067 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1068 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1069 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1070 return 0;
1071}
1072
1073/*
1074 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1075 * started but failed, and a negative errno if flash load couldn't start.
1076 */
1077static int upgrade_fw(struct adapter *adap)
1078{
1079 int ret;
0a57a536 1080 u32 vers, exp_major;
b8ff05a9
DM
1081 const struct fw_hdr *hdr;
1082 const struct firmware *fw;
1083 struct device *dev = adap->pdev_dev;
0a57a536 1084 char *fw_file_name;
b8ff05a9 1085
0a57a536
SR
1086 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1087 case CHELSIO_T4:
1088 fw_file_name = FW_FNAME;
1089 exp_major = FW_VERSION_MAJOR;
1090 break;
1091 case CHELSIO_T5:
1092 fw_file_name = FW5_FNAME;
1093 exp_major = FW_VERSION_MAJOR_T5;
1094 break;
1095 default:
1096 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1097 return -EINVAL;
1098 }
1099
1100 ret = request_firmware(&fw, fw_file_name, dev);
b8ff05a9 1101 if (ret < 0) {
0a57a536
SR
1102 dev_err(dev, "unable to load firmware image %s, error %d\n",
1103 fw_file_name, ret);
b8ff05a9
DM
1104 return ret;
1105 }
1106
1107 hdr = (const struct fw_hdr *)fw->data;
1108 vers = ntohl(hdr->fw_ver);
0a57a536 1109 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
b8ff05a9
DM
1110 ret = -EINVAL; /* wrong major version, won't do */
1111 goto out;
1112 }
1113
1114 /*
1115 * If the flash FW is unusable or we found something newer, load it.
1116 */
0a57a536 1117 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
b8ff05a9 1118 vers > adap->params.fw_vers) {
26f7cbc0
VP
1119 dev_info(dev, "upgrading firmware ...\n");
1120 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1121 /*force=*/false);
b8ff05a9 1122 if (!ret)
0a57a536
SR
1123 dev_info(dev,
1124 "firmware upgraded to version %pI4 from %s\n",
1125 &hdr->fw_ver, fw_file_name);
26f7cbc0
VP
1126 else
1127 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1648a22b
VP
1128 } else {
1129 /*
1130 * Tell our caller that we didn't upgrade the firmware.
1131 */
1132 ret = -EINVAL;
b8ff05a9 1133 }
1648a22b 1134
b8ff05a9
DM
1135out: release_firmware(fw);
1136 return ret;
1137}
1138
1139/*
1140 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1141 * The allocated memory is cleared.
1142 */
1143void *t4_alloc_mem(size_t size)
1144{
8be04b93 1145 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
b8ff05a9
DM
1146
1147 if (!p)
89bf67f1 1148 p = vzalloc(size);
b8ff05a9
DM
1149 return p;
1150}
1151
1152/*
1153 * Free memory allocated through alloc_mem().
1154 */
31b9c19b 1155static void t4_free_mem(void *addr)
b8ff05a9
DM
1156{
1157 if (is_vmalloc_addr(addr))
1158 vfree(addr);
1159 else
1160 kfree(addr);
1161}
1162
f2b7e78d
VP
1163/* Send a Work Request to write the filter at a specified index. We construct
1164 * a Firmware Filter Work Request to have the work done and put the indicated
1165 * filter into "pending" mode which will prevent any further actions against
1166 * it till we get a reply from the firmware on the completion status of the
1167 * request.
1168 */
1169static int set_filter_wr(struct adapter *adapter, int fidx)
1170{
1171 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1172 struct sk_buff *skb;
1173 struct fw_filter_wr *fwr;
1174 unsigned int ftid;
1175
1176 /* If the new filter requires loopback Destination MAC and/or VLAN
1177 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1178 * the filter.
1179 */
1180 if (f->fs.newdmac || f->fs.newvlan) {
1181 /* allocate L2T entry for new filter */
1182 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1183 if (f->l2t == NULL)
1184 return -EAGAIN;
1185 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1186 f->fs.eport, f->fs.dmac)) {
1187 cxgb4_l2t_release(f->l2t);
1188 f->l2t = NULL;
1189 return -ENOMEM;
1190 }
1191 }
1192
1193 ftid = adapter->tids.ftid_base + fidx;
1194
1195 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1196 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1197 memset(fwr, 0, sizeof(*fwr));
1198
1199 /* It would be nice to put most of the following in t4_hw.c but most
1200 * of the work is translating the cxgbtool ch_filter_specification
1201 * into the Work Request and the definition of that structure is
1202 * currently in cxgbtool.h which isn't appropriate to pull into the
1203 * common code. We may eventually try to come up with a more neutral
1204 * filter specification structure but for now it's easiest to simply
1205 * put this fairly direct code in line ...
1206 */
1207 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1208 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1209 fwr->tid_to_iq =
1210 htonl(V_FW_FILTER_WR_TID(ftid) |
1211 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1212 V_FW_FILTER_WR_NOREPLY(0) |
1213 V_FW_FILTER_WR_IQ(f->fs.iq));
1214 fwr->del_filter_to_l2tix =
1215 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1216 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1217 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1218 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1219 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1220 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1221 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1222 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1223 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1224 f->fs.newvlan == VLAN_REWRITE) |
1225 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1226 f->fs.newvlan == VLAN_REWRITE) |
1227 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1228 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1229 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1230 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1231 fwr->ethtype = htons(f->fs.val.ethtype);
1232 fwr->ethtypem = htons(f->fs.mask.ethtype);
1233 fwr->frag_to_ovlan_vldm =
1234 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1235 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1236 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1237 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1238 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1239 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1240 fwr->smac_sel = 0;
1241 fwr->rx_chan_rx_rpl_iq =
1242 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1243 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1244 fwr->maci_to_matchtypem =
1245 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1246 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1247 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1248 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1249 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1250 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1251 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1252 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1253 fwr->ptcl = f->fs.val.proto;
1254 fwr->ptclm = f->fs.mask.proto;
1255 fwr->ttyp = f->fs.val.tos;
1256 fwr->ttypm = f->fs.mask.tos;
1257 fwr->ivlan = htons(f->fs.val.ivlan);
1258 fwr->ivlanm = htons(f->fs.mask.ivlan);
1259 fwr->ovlan = htons(f->fs.val.ovlan);
1260 fwr->ovlanm = htons(f->fs.mask.ovlan);
1261 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1262 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1263 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1264 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1265 fwr->lp = htons(f->fs.val.lport);
1266 fwr->lpm = htons(f->fs.mask.lport);
1267 fwr->fp = htons(f->fs.val.fport);
1268 fwr->fpm = htons(f->fs.mask.fport);
1269 if (f->fs.newsmac)
1270 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1271
1272 /* Mark the filter as "pending" and ship off the Filter Work Request.
1273 * When we get the Work Request Reply we'll clear the pending status.
1274 */
1275 f->pending = 1;
1276 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1277 t4_ofld_send(adapter, skb);
1278 return 0;
1279}
1280
1281/* Delete the filter at a specified index.
1282 */
1283static int del_filter_wr(struct adapter *adapter, int fidx)
1284{
1285 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1286 struct sk_buff *skb;
1287 struct fw_filter_wr *fwr;
1288 unsigned int len, ftid;
1289
1290 len = sizeof(*fwr);
1291 ftid = adapter->tids.ftid_base + fidx;
1292
1293 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1294 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1295 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1296
1297 /* Mark the filter as "pending" and ship off the Filter Work Request.
1298 * When we get the Work Request Reply we'll clear the pending status.
1299 */
1300 f->pending = 1;
1301 t4_mgmt_tx(adapter, skb);
1302 return 0;
1303}
1304
b8ff05a9
DM
1305static inline int is_offload(const struct adapter *adap)
1306{
1307 return adap->params.offload;
1308}
1309
1310/*
1311 * Implementation of ethtool operations.
1312 */
1313
1314static u32 get_msglevel(struct net_device *dev)
1315{
1316 return netdev2adap(dev)->msg_enable;
1317}
1318
1319static void set_msglevel(struct net_device *dev, u32 val)
1320{
1321 netdev2adap(dev)->msg_enable = val;
1322}
1323
1324static char stats_strings[][ETH_GSTRING_LEN] = {
1325 "TxOctetsOK ",
1326 "TxFramesOK ",
1327 "TxBroadcastFrames ",
1328 "TxMulticastFrames ",
1329 "TxUnicastFrames ",
1330 "TxErrorFrames ",
1331
1332 "TxFrames64 ",
1333 "TxFrames65To127 ",
1334 "TxFrames128To255 ",
1335 "TxFrames256To511 ",
1336 "TxFrames512To1023 ",
1337 "TxFrames1024To1518 ",
1338 "TxFrames1519ToMax ",
1339
1340 "TxFramesDropped ",
1341 "TxPauseFrames ",
1342 "TxPPP0Frames ",
1343 "TxPPP1Frames ",
1344 "TxPPP2Frames ",
1345 "TxPPP3Frames ",
1346 "TxPPP4Frames ",
1347 "TxPPP5Frames ",
1348 "TxPPP6Frames ",
1349 "TxPPP7Frames ",
1350
1351 "RxOctetsOK ",
1352 "RxFramesOK ",
1353 "RxBroadcastFrames ",
1354 "RxMulticastFrames ",
1355 "RxUnicastFrames ",
1356
1357 "RxFramesTooLong ",
1358 "RxJabberErrors ",
1359 "RxFCSErrors ",
1360 "RxLengthErrors ",
1361 "RxSymbolErrors ",
1362 "RxRuntFrames ",
1363
1364 "RxFrames64 ",
1365 "RxFrames65To127 ",
1366 "RxFrames128To255 ",
1367 "RxFrames256To511 ",
1368 "RxFrames512To1023 ",
1369 "RxFrames1024To1518 ",
1370 "RxFrames1519ToMax ",
1371
1372 "RxPauseFrames ",
1373 "RxPPP0Frames ",
1374 "RxPPP1Frames ",
1375 "RxPPP2Frames ",
1376 "RxPPP3Frames ",
1377 "RxPPP4Frames ",
1378 "RxPPP5Frames ",
1379 "RxPPP6Frames ",
1380 "RxPPP7Frames ",
1381
1382 "RxBG0FramesDropped ",
1383 "RxBG1FramesDropped ",
1384 "RxBG2FramesDropped ",
1385 "RxBG3FramesDropped ",
1386 "RxBG0FramesTrunc ",
1387 "RxBG1FramesTrunc ",
1388 "RxBG2FramesTrunc ",
1389 "RxBG3FramesTrunc ",
1390
1391 "TSO ",
1392 "TxCsumOffload ",
1393 "RxCsumGood ",
1394 "VLANextractions ",
1395 "VLANinsertions ",
4a6346d4
DM
1396 "GROpackets ",
1397 "GROmerged ",
22adfe0a
SR
1398 "WriteCoalSuccess ",
1399 "WriteCoalFail ",
b8ff05a9
DM
1400};
1401
1402static int get_sset_count(struct net_device *dev, int sset)
1403{
1404 switch (sset) {
1405 case ETH_SS_STATS:
1406 return ARRAY_SIZE(stats_strings);
1407 default:
1408 return -EOPNOTSUPP;
1409 }
1410}
1411
1412#define T4_REGMAP_SIZE (160 * 1024)
251f9e88 1413#define T5_REGMAP_SIZE (332 * 1024)
b8ff05a9
DM
1414
1415static int get_regs_len(struct net_device *dev)
1416{
251f9e88
SR
1417 struct adapter *adap = netdev2adap(dev);
1418 if (is_t4(adap->chip))
1419 return T4_REGMAP_SIZE;
1420 else
1421 return T5_REGMAP_SIZE;
b8ff05a9
DM
1422}
1423
1424static int get_eeprom_len(struct net_device *dev)
1425{
1426 return EEPROMSIZE;
1427}
1428
1429static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1430{
1431 struct adapter *adapter = netdev2adap(dev);
1432
23020ab3
RJ
1433 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1434 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1435 strlcpy(info->bus_info, pci_name(adapter->pdev),
1436 sizeof(info->bus_info));
b8ff05a9 1437
84b40501 1438 if (adapter->params.fw_vers)
b8ff05a9
DM
1439 snprintf(info->fw_version, sizeof(info->fw_version),
1440 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1441 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1442 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1443 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1444 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1445 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1446 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1447 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1448 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1449}
1450
1451static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1452{
1453 if (stringset == ETH_SS_STATS)
1454 memcpy(data, stats_strings, sizeof(stats_strings));
1455}
1456
1457/*
1458 * port stats maintained per queue of the port. They should be in the same
1459 * order as in stats_strings above.
1460 */
1461struct queue_port_stats {
1462 u64 tso;
1463 u64 tx_csum;
1464 u64 rx_csum;
1465 u64 vlan_ex;
1466 u64 vlan_ins;
4a6346d4
DM
1467 u64 gro_pkts;
1468 u64 gro_merged;
b8ff05a9
DM
1469};
1470
1471static void collect_sge_port_stats(const struct adapter *adap,
1472 const struct port_info *p, struct queue_port_stats *s)
1473{
1474 int i;
1475 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1476 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1477
1478 memset(s, 0, sizeof(*s));
1479 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1480 s->tso += tx->tso;
1481 s->tx_csum += tx->tx_cso;
1482 s->rx_csum += rx->stats.rx_cso;
1483 s->vlan_ex += rx->stats.vlan_ex;
1484 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1485 s->gro_pkts += rx->stats.lro_pkts;
1486 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1487 }
1488}
1489
1490static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1491 u64 *data)
1492{
1493 struct port_info *pi = netdev_priv(dev);
1494 struct adapter *adapter = pi->adapter;
22adfe0a 1495 u32 val1, val2;
b8ff05a9
DM
1496
1497 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1498
1499 data += sizeof(struct port_stats) / sizeof(u64);
1500 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
22adfe0a
SR
1501 data += sizeof(struct queue_port_stats) / sizeof(u64);
1502 if (!is_t4(adapter->chip)) {
1503 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1504 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1505 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1506 *data = val1 - val2;
1507 data++;
1508 *data = val2;
1509 data++;
1510 } else {
1511 memset(data, 0, 2 * sizeof(u64));
1512 *data += 2;
1513 }
b8ff05a9
DM
1514}
1515
1516/*
1517 * Return a version number to identify the type of adapter. The scheme is:
1518 * - bits 0..9: chip version
1519 * - bits 10..15: chip revision
835bb606 1520 * - bits 16..23: register dump version
b8ff05a9
DM
1521 */
1522static inline unsigned int mk_adap_vers(const struct adapter *ap)
1523{
0a57a536
SR
1524 return CHELSIO_CHIP_VERSION(ap->chip) |
1525 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
b8ff05a9
DM
1526}
1527
1528static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1529 unsigned int end)
1530{
1531 u32 *p = buf + start;
1532
1533 for ( ; start <= end; start += sizeof(u32))
1534 *p++ = t4_read_reg(ap, start);
1535}
1536
1537static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1538 void *buf)
1539{
251f9e88 1540 static const unsigned int t4_reg_ranges[] = {
b8ff05a9
DM
1541 0x1008, 0x1108,
1542 0x1180, 0x11b4,
1543 0x11fc, 0x123c,
1544 0x1300, 0x173c,
1545 0x1800, 0x18fc,
1546 0x3000, 0x30d8,
1547 0x30e0, 0x5924,
1548 0x5960, 0x59d4,
1549 0x5a00, 0x5af8,
1550 0x6000, 0x6098,
1551 0x6100, 0x6150,
1552 0x6200, 0x6208,
1553 0x6240, 0x6248,
1554 0x6280, 0x6338,
1555 0x6370, 0x638c,
1556 0x6400, 0x643c,
1557 0x6500, 0x6524,
1558 0x6a00, 0x6a38,
1559 0x6a60, 0x6a78,
1560 0x6b00, 0x6b84,
1561 0x6bf0, 0x6c84,
1562 0x6cf0, 0x6d84,
1563 0x6df0, 0x6e84,
1564 0x6ef0, 0x6f84,
1565 0x6ff0, 0x7084,
1566 0x70f0, 0x7184,
1567 0x71f0, 0x7284,
1568 0x72f0, 0x7384,
1569 0x73f0, 0x7450,
1570 0x7500, 0x7530,
1571 0x7600, 0x761c,
1572 0x7680, 0x76cc,
1573 0x7700, 0x7798,
1574 0x77c0, 0x77fc,
1575 0x7900, 0x79fc,
1576 0x7b00, 0x7c38,
1577 0x7d00, 0x7efc,
1578 0x8dc0, 0x8e1c,
1579 0x8e30, 0x8e78,
1580 0x8ea0, 0x8f6c,
1581 0x8fc0, 0x9074,
1582 0x90fc, 0x90fc,
1583 0x9400, 0x9458,
1584 0x9600, 0x96bc,
1585 0x9800, 0x9808,
1586 0x9820, 0x983c,
1587 0x9850, 0x9864,
1588 0x9c00, 0x9c6c,
1589 0x9c80, 0x9cec,
1590 0x9d00, 0x9d6c,
1591 0x9d80, 0x9dec,
1592 0x9e00, 0x9e6c,
1593 0x9e80, 0x9eec,
1594 0x9f00, 0x9f6c,
1595 0x9f80, 0x9fec,
1596 0xd004, 0xd03c,
1597 0xdfc0, 0xdfe0,
1598 0xe000, 0xea7c,
1599 0xf000, 0x11190,
835bb606
DM
1600 0x19040, 0x1906c,
1601 0x19078, 0x19080,
1602 0x1908c, 0x19124,
b8ff05a9
DM
1603 0x19150, 0x191b0,
1604 0x191d0, 0x191e8,
1605 0x19238, 0x1924c,
1606 0x193f8, 0x19474,
1607 0x19490, 0x194f8,
1608 0x19800, 0x19f30,
1609 0x1a000, 0x1a06c,
1610 0x1a0b0, 0x1a120,
1611 0x1a128, 0x1a138,
1612 0x1a190, 0x1a1c4,
1613 0x1a1fc, 0x1a1fc,
1614 0x1e040, 0x1e04c,
835bb606 1615 0x1e284, 0x1e28c,
b8ff05a9
DM
1616 0x1e2c0, 0x1e2c0,
1617 0x1e2e0, 0x1e2e0,
1618 0x1e300, 0x1e384,
1619 0x1e3c0, 0x1e3c8,
1620 0x1e440, 0x1e44c,
835bb606 1621 0x1e684, 0x1e68c,
b8ff05a9
DM
1622 0x1e6c0, 0x1e6c0,
1623 0x1e6e0, 0x1e6e0,
1624 0x1e700, 0x1e784,
1625 0x1e7c0, 0x1e7c8,
1626 0x1e840, 0x1e84c,
835bb606 1627 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1628 0x1eac0, 0x1eac0,
1629 0x1eae0, 0x1eae0,
1630 0x1eb00, 0x1eb84,
1631 0x1ebc0, 0x1ebc8,
1632 0x1ec40, 0x1ec4c,
835bb606 1633 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1634 0x1eec0, 0x1eec0,
1635 0x1eee0, 0x1eee0,
1636 0x1ef00, 0x1ef84,
1637 0x1efc0, 0x1efc8,
1638 0x1f040, 0x1f04c,
835bb606 1639 0x1f284, 0x1f28c,
b8ff05a9
DM
1640 0x1f2c0, 0x1f2c0,
1641 0x1f2e0, 0x1f2e0,
1642 0x1f300, 0x1f384,
1643 0x1f3c0, 0x1f3c8,
1644 0x1f440, 0x1f44c,
835bb606 1645 0x1f684, 0x1f68c,
b8ff05a9
DM
1646 0x1f6c0, 0x1f6c0,
1647 0x1f6e0, 0x1f6e0,
1648 0x1f700, 0x1f784,
1649 0x1f7c0, 0x1f7c8,
1650 0x1f840, 0x1f84c,
835bb606 1651 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1652 0x1fac0, 0x1fac0,
1653 0x1fae0, 0x1fae0,
1654 0x1fb00, 0x1fb84,
1655 0x1fbc0, 0x1fbc8,
1656 0x1fc40, 0x1fc4c,
835bb606 1657 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1658 0x1fec0, 0x1fec0,
1659 0x1fee0, 0x1fee0,
1660 0x1ff00, 0x1ff84,
1661 0x1ffc0, 0x1ffc8,
1662 0x20000, 0x2002c,
1663 0x20100, 0x2013c,
1664 0x20190, 0x201c8,
1665 0x20200, 0x20318,
1666 0x20400, 0x20528,
1667 0x20540, 0x20614,
1668 0x21000, 0x21040,
1669 0x2104c, 0x21060,
1670 0x210c0, 0x210ec,
1671 0x21200, 0x21268,
1672 0x21270, 0x21284,
1673 0x212fc, 0x21388,
1674 0x21400, 0x21404,
1675 0x21500, 0x21518,
1676 0x2152c, 0x2153c,
1677 0x21550, 0x21554,
1678 0x21600, 0x21600,
1679 0x21608, 0x21628,
1680 0x21630, 0x2163c,
1681 0x21700, 0x2171c,
1682 0x21780, 0x2178c,
1683 0x21800, 0x21c38,
1684 0x21c80, 0x21d7c,
1685 0x21e00, 0x21e04,
1686 0x22000, 0x2202c,
1687 0x22100, 0x2213c,
1688 0x22190, 0x221c8,
1689 0x22200, 0x22318,
1690 0x22400, 0x22528,
1691 0x22540, 0x22614,
1692 0x23000, 0x23040,
1693 0x2304c, 0x23060,
1694 0x230c0, 0x230ec,
1695 0x23200, 0x23268,
1696 0x23270, 0x23284,
1697 0x232fc, 0x23388,
1698 0x23400, 0x23404,
1699 0x23500, 0x23518,
1700 0x2352c, 0x2353c,
1701 0x23550, 0x23554,
1702 0x23600, 0x23600,
1703 0x23608, 0x23628,
1704 0x23630, 0x2363c,
1705 0x23700, 0x2371c,
1706 0x23780, 0x2378c,
1707 0x23800, 0x23c38,
1708 0x23c80, 0x23d7c,
1709 0x23e00, 0x23e04,
1710 0x24000, 0x2402c,
1711 0x24100, 0x2413c,
1712 0x24190, 0x241c8,
1713 0x24200, 0x24318,
1714 0x24400, 0x24528,
1715 0x24540, 0x24614,
1716 0x25000, 0x25040,
1717 0x2504c, 0x25060,
1718 0x250c0, 0x250ec,
1719 0x25200, 0x25268,
1720 0x25270, 0x25284,
1721 0x252fc, 0x25388,
1722 0x25400, 0x25404,
1723 0x25500, 0x25518,
1724 0x2552c, 0x2553c,
1725 0x25550, 0x25554,
1726 0x25600, 0x25600,
1727 0x25608, 0x25628,
1728 0x25630, 0x2563c,
1729 0x25700, 0x2571c,
1730 0x25780, 0x2578c,
1731 0x25800, 0x25c38,
1732 0x25c80, 0x25d7c,
1733 0x25e00, 0x25e04,
1734 0x26000, 0x2602c,
1735 0x26100, 0x2613c,
1736 0x26190, 0x261c8,
1737 0x26200, 0x26318,
1738 0x26400, 0x26528,
1739 0x26540, 0x26614,
1740 0x27000, 0x27040,
1741 0x2704c, 0x27060,
1742 0x270c0, 0x270ec,
1743 0x27200, 0x27268,
1744 0x27270, 0x27284,
1745 0x272fc, 0x27388,
1746 0x27400, 0x27404,
1747 0x27500, 0x27518,
1748 0x2752c, 0x2753c,
1749 0x27550, 0x27554,
1750 0x27600, 0x27600,
1751 0x27608, 0x27628,
1752 0x27630, 0x2763c,
1753 0x27700, 0x2771c,
1754 0x27780, 0x2778c,
1755 0x27800, 0x27c38,
1756 0x27c80, 0x27d7c,
1757 0x27e00, 0x27e04
1758 };
1759
251f9e88
SR
1760 static const unsigned int t5_reg_ranges[] = {
1761 0x1008, 0x1148,
1762 0x1180, 0x11b4,
1763 0x11fc, 0x123c,
1764 0x1280, 0x173c,
1765 0x1800, 0x18fc,
1766 0x3000, 0x3028,
1767 0x3060, 0x30d8,
1768 0x30e0, 0x30fc,
1769 0x3140, 0x357c,
1770 0x35a8, 0x35cc,
1771 0x35ec, 0x35ec,
1772 0x3600, 0x5624,
1773 0x56cc, 0x575c,
1774 0x580c, 0x5814,
1775 0x5890, 0x58bc,
1776 0x5940, 0x59dc,
1777 0x59fc, 0x5a18,
1778 0x5a60, 0x5a9c,
1779 0x5b9c, 0x5bfc,
1780 0x6000, 0x6040,
1781 0x6058, 0x614c,
1782 0x7700, 0x7798,
1783 0x77c0, 0x78fc,
1784 0x7b00, 0x7c54,
1785 0x7d00, 0x7efc,
1786 0x8dc0, 0x8de0,
1787 0x8df8, 0x8e84,
1788 0x8ea0, 0x8f84,
1789 0x8fc0, 0x90f8,
1790 0x9400, 0x9470,
1791 0x9600, 0x96f4,
1792 0x9800, 0x9808,
1793 0x9820, 0x983c,
1794 0x9850, 0x9864,
1795 0x9c00, 0x9c6c,
1796 0x9c80, 0x9cec,
1797 0x9d00, 0x9d6c,
1798 0x9d80, 0x9dec,
1799 0x9e00, 0x9e6c,
1800 0x9e80, 0x9eec,
1801 0x9f00, 0x9f6c,
1802 0x9f80, 0xa020,
1803 0xd004, 0xd03c,
1804 0xdfc0, 0xdfe0,
1805 0xe000, 0x11088,
1806 0x1109c, 0x1117c,
1807 0x11190, 0x11204,
1808 0x19040, 0x1906c,
1809 0x19078, 0x19080,
1810 0x1908c, 0x19124,
1811 0x19150, 0x191b0,
1812 0x191d0, 0x191e8,
1813 0x19238, 0x19290,
1814 0x193f8, 0x19474,
1815 0x19490, 0x194cc,
1816 0x194f0, 0x194f8,
1817 0x19c00, 0x19c60,
1818 0x19c94, 0x19e10,
1819 0x19e50, 0x19f34,
1820 0x19f40, 0x19f50,
1821 0x19f90, 0x19fe4,
1822 0x1a000, 0x1a06c,
1823 0x1a0b0, 0x1a120,
1824 0x1a128, 0x1a138,
1825 0x1a190, 0x1a1c4,
1826 0x1a1fc, 0x1a1fc,
1827 0x1e008, 0x1e00c,
1828 0x1e040, 0x1e04c,
1829 0x1e284, 0x1e290,
1830 0x1e2c0, 0x1e2c0,
1831 0x1e2e0, 0x1e2e0,
1832 0x1e300, 0x1e384,
1833 0x1e3c0, 0x1e3c8,
1834 0x1e408, 0x1e40c,
1835 0x1e440, 0x1e44c,
1836 0x1e684, 0x1e690,
1837 0x1e6c0, 0x1e6c0,
1838 0x1e6e0, 0x1e6e0,
1839 0x1e700, 0x1e784,
1840 0x1e7c0, 0x1e7c8,
1841 0x1e808, 0x1e80c,
1842 0x1e840, 0x1e84c,
1843 0x1ea84, 0x1ea90,
1844 0x1eac0, 0x1eac0,
1845 0x1eae0, 0x1eae0,
1846 0x1eb00, 0x1eb84,
1847 0x1ebc0, 0x1ebc8,
1848 0x1ec08, 0x1ec0c,
1849 0x1ec40, 0x1ec4c,
1850 0x1ee84, 0x1ee90,
1851 0x1eec0, 0x1eec0,
1852 0x1eee0, 0x1eee0,
1853 0x1ef00, 0x1ef84,
1854 0x1efc0, 0x1efc8,
1855 0x1f008, 0x1f00c,
1856 0x1f040, 0x1f04c,
1857 0x1f284, 0x1f290,
1858 0x1f2c0, 0x1f2c0,
1859 0x1f2e0, 0x1f2e0,
1860 0x1f300, 0x1f384,
1861 0x1f3c0, 0x1f3c8,
1862 0x1f408, 0x1f40c,
1863 0x1f440, 0x1f44c,
1864 0x1f684, 0x1f690,
1865 0x1f6c0, 0x1f6c0,
1866 0x1f6e0, 0x1f6e0,
1867 0x1f700, 0x1f784,
1868 0x1f7c0, 0x1f7c8,
1869 0x1f808, 0x1f80c,
1870 0x1f840, 0x1f84c,
1871 0x1fa84, 0x1fa90,
1872 0x1fac0, 0x1fac0,
1873 0x1fae0, 0x1fae0,
1874 0x1fb00, 0x1fb84,
1875 0x1fbc0, 0x1fbc8,
1876 0x1fc08, 0x1fc0c,
1877 0x1fc40, 0x1fc4c,
1878 0x1fe84, 0x1fe90,
1879 0x1fec0, 0x1fec0,
1880 0x1fee0, 0x1fee0,
1881 0x1ff00, 0x1ff84,
1882 0x1ffc0, 0x1ffc8,
1883 0x30000, 0x30030,
1884 0x30100, 0x30144,
1885 0x30190, 0x301d0,
1886 0x30200, 0x30318,
1887 0x30400, 0x3052c,
1888 0x30540, 0x3061c,
1889 0x30800, 0x30834,
1890 0x308c0, 0x30908,
1891 0x30910, 0x309ac,
1892 0x30a00, 0x30a04,
1893 0x30a0c, 0x30a2c,
1894 0x30a44, 0x30a50,
1895 0x30a74, 0x30c24,
1896 0x30d08, 0x30d14,
1897 0x30d1c, 0x30d20,
1898 0x30d3c, 0x30d50,
1899 0x31200, 0x3120c,
1900 0x31220, 0x31220,
1901 0x31240, 0x31240,
1902 0x31600, 0x31600,
1903 0x31608, 0x3160c,
1904 0x31a00, 0x31a1c,
1905 0x31e04, 0x31e20,
1906 0x31e38, 0x31e3c,
1907 0x31e80, 0x31e80,
1908 0x31e88, 0x31ea8,
1909 0x31eb0, 0x31eb4,
1910 0x31ec8, 0x31ed4,
1911 0x31fb8, 0x32004,
1912 0x32208, 0x3223c,
1913 0x32600, 0x32630,
1914 0x32a00, 0x32abc,
1915 0x32b00, 0x32b70,
1916 0x33000, 0x33048,
1917 0x33060, 0x3309c,
1918 0x330f0, 0x33148,
1919 0x33160, 0x3319c,
1920 0x331f0, 0x332e4,
1921 0x332f8, 0x333e4,
1922 0x333f8, 0x33448,
1923 0x33460, 0x3349c,
1924 0x334f0, 0x33548,
1925 0x33560, 0x3359c,
1926 0x335f0, 0x336e4,
1927 0x336f8, 0x337e4,
1928 0x337f8, 0x337fc,
1929 0x33814, 0x33814,
1930 0x3382c, 0x3382c,
1931 0x33880, 0x3388c,
1932 0x338e8, 0x338ec,
1933 0x33900, 0x33948,
1934 0x33960, 0x3399c,
1935 0x339f0, 0x33ae4,
1936 0x33af8, 0x33b10,
1937 0x33b28, 0x33b28,
1938 0x33b3c, 0x33b50,
1939 0x33bf0, 0x33c10,
1940 0x33c28, 0x33c28,
1941 0x33c3c, 0x33c50,
1942 0x33cf0, 0x33cfc,
1943 0x34000, 0x34030,
1944 0x34100, 0x34144,
1945 0x34190, 0x341d0,
1946 0x34200, 0x34318,
1947 0x34400, 0x3452c,
1948 0x34540, 0x3461c,
1949 0x34800, 0x34834,
1950 0x348c0, 0x34908,
1951 0x34910, 0x349ac,
1952 0x34a00, 0x34a04,
1953 0x34a0c, 0x34a2c,
1954 0x34a44, 0x34a50,
1955 0x34a74, 0x34c24,
1956 0x34d08, 0x34d14,
1957 0x34d1c, 0x34d20,
1958 0x34d3c, 0x34d50,
1959 0x35200, 0x3520c,
1960 0x35220, 0x35220,
1961 0x35240, 0x35240,
1962 0x35600, 0x35600,
1963 0x35608, 0x3560c,
1964 0x35a00, 0x35a1c,
1965 0x35e04, 0x35e20,
1966 0x35e38, 0x35e3c,
1967 0x35e80, 0x35e80,
1968 0x35e88, 0x35ea8,
1969 0x35eb0, 0x35eb4,
1970 0x35ec8, 0x35ed4,
1971 0x35fb8, 0x36004,
1972 0x36208, 0x3623c,
1973 0x36600, 0x36630,
1974 0x36a00, 0x36abc,
1975 0x36b00, 0x36b70,
1976 0x37000, 0x37048,
1977 0x37060, 0x3709c,
1978 0x370f0, 0x37148,
1979 0x37160, 0x3719c,
1980 0x371f0, 0x372e4,
1981 0x372f8, 0x373e4,
1982 0x373f8, 0x37448,
1983 0x37460, 0x3749c,
1984 0x374f0, 0x37548,
1985 0x37560, 0x3759c,
1986 0x375f0, 0x376e4,
1987 0x376f8, 0x377e4,
1988 0x377f8, 0x377fc,
1989 0x37814, 0x37814,
1990 0x3782c, 0x3782c,
1991 0x37880, 0x3788c,
1992 0x378e8, 0x378ec,
1993 0x37900, 0x37948,
1994 0x37960, 0x3799c,
1995 0x379f0, 0x37ae4,
1996 0x37af8, 0x37b10,
1997 0x37b28, 0x37b28,
1998 0x37b3c, 0x37b50,
1999 0x37bf0, 0x37c10,
2000 0x37c28, 0x37c28,
2001 0x37c3c, 0x37c50,
2002 0x37cf0, 0x37cfc,
2003 0x38000, 0x38030,
2004 0x38100, 0x38144,
2005 0x38190, 0x381d0,
2006 0x38200, 0x38318,
2007 0x38400, 0x3852c,
2008 0x38540, 0x3861c,
2009 0x38800, 0x38834,
2010 0x388c0, 0x38908,
2011 0x38910, 0x389ac,
2012 0x38a00, 0x38a04,
2013 0x38a0c, 0x38a2c,
2014 0x38a44, 0x38a50,
2015 0x38a74, 0x38c24,
2016 0x38d08, 0x38d14,
2017 0x38d1c, 0x38d20,
2018 0x38d3c, 0x38d50,
2019 0x39200, 0x3920c,
2020 0x39220, 0x39220,
2021 0x39240, 0x39240,
2022 0x39600, 0x39600,
2023 0x39608, 0x3960c,
2024 0x39a00, 0x39a1c,
2025 0x39e04, 0x39e20,
2026 0x39e38, 0x39e3c,
2027 0x39e80, 0x39e80,
2028 0x39e88, 0x39ea8,
2029 0x39eb0, 0x39eb4,
2030 0x39ec8, 0x39ed4,
2031 0x39fb8, 0x3a004,
2032 0x3a208, 0x3a23c,
2033 0x3a600, 0x3a630,
2034 0x3aa00, 0x3aabc,
2035 0x3ab00, 0x3ab70,
2036 0x3b000, 0x3b048,
2037 0x3b060, 0x3b09c,
2038 0x3b0f0, 0x3b148,
2039 0x3b160, 0x3b19c,
2040 0x3b1f0, 0x3b2e4,
2041 0x3b2f8, 0x3b3e4,
2042 0x3b3f8, 0x3b448,
2043 0x3b460, 0x3b49c,
2044 0x3b4f0, 0x3b548,
2045 0x3b560, 0x3b59c,
2046 0x3b5f0, 0x3b6e4,
2047 0x3b6f8, 0x3b7e4,
2048 0x3b7f8, 0x3b7fc,
2049 0x3b814, 0x3b814,
2050 0x3b82c, 0x3b82c,
2051 0x3b880, 0x3b88c,
2052 0x3b8e8, 0x3b8ec,
2053 0x3b900, 0x3b948,
2054 0x3b960, 0x3b99c,
2055 0x3b9f0, 0x3bae4,
2056 0x3baf8, 0x3bb10,
2057 0x3bb28, 0x3bb28,
2058 0x3bb3c, 0x3bb50,
2059 0x3bbf0, 0x3bc10,
2060 0x3bc28, 0x3bc28,
2061 0x3bc3c, 0x3bc50,
2062 0x3bcf0, 0x3bcfc,
2063 0x3c000, 0x3c030,
2064 0x3c100, 0x3c144,
2065 0x3c190, 0x3c1d0,
2066 0x3c200, 0x3c318,
2067 0x3c400, 0x3c52c,
2068 0x3c540, 0x3c61c,
2069 0x3c800, 0x3c834,
2070 0x3c8c0, 0x3c908,
2071 0x3c910, 0x3c9ac,
2072 0x3ca00, 0x3ca04,
2073 0x3ca0c, 0x3ca2c,
2074 0x3ca44, 0x3ca50,
2075 0x3ca74, 0x3cc24,
2076 0x3cd08, 0x3cd14,
2077 0x3cd1c, 0x3cd20,
2078 0x3cd3c, 0x3cd50,
2079 0x3d200, 0x3d20c,
2080 0x3d220, 0x3d220,
2081 0x3d240, 0x3d240,
2082 0x3d600, 0x3d600,
2083 0x3d608, 0x3d60c,
2084 0x3da00, 0x3da1c,
2085 0x3de04, 0x3de20,
2086 0x3de38, 0x3de3c,
2087 0x3de80, 0x3de80,
2088 0x3de88, 0x3dea8,
2089 0x3deb0, 0x3deb4,
2090 0x3dec8, 0x3ded4,
2091 0x3dfb8, 0x3e004,
2092 0x3e208, 0x3e23c,
2093 0x3e600, 0x3e630,
2094 0x3ea00, 0x3eabc,
2095 0x3eb00, 0x3eb70,
2096 0x3f000, 0x3f048,
2097 0x3f060, 0x3f09c,
2098 0x3f0f0, 0x3f148,
2099 0x3f160, 0x3f19c,
2100 0x3f1f0, 0x3f2e4,
2101 0x3f2f8, 0x3f3e4,
2102 0x3f3f8, 0x3f448,
2103 0x3f460, 0x3f49c,
2104 0x3f4f0, 0x3f548,
2105 0x3f560, 0x3f59c,
2106 0x3f5f0, 0x3f6e4,
2107 0x3f6f8, 0x3f7e4,
2108 0x3f7f8, 0x3f7fc,
2109 0x3f814, 0x3f814,
2110 0x3f82c, 0x3f82c,
2111 0x3f880, 0x3f88c,
2112 0x3f8e8, 0x3f8ec,
2113 0x3f900, 0x3f948,
2114 0x3f960, 0x3f99c,
2115 0x3f9f0, 0x3fae4,
2116 0x3faf8, 0x3fb10,
2117 0x3fb28, 0x3fb28,
2118 0x3fb3c, 0x3fb50,
2119 0x3fbf0, 0x3fc10,
2120 0x3fc28, 0x3fc28,
2121 0x3fc3c, 0x3fc50,
2122 0x3fcf0, 0x3fcfc,
2123 0x40000, 0x4000c,
2124 0x40040, 0x40068,
2125 0x40080, 0x40144,
2126 0x40180, 0x4018c,
2127 0x40200, 0x40298,
2128 0x402ac, 0x4033c,
2129 0x403f8, 0x403fc,
2130 0x41300, 0x413c4,
2131 0x41400, 0x4141c,
2132 0x41480, 0x414d0,
2133 0x44000, 0x44078,
2134 0x440c0, 0x44278,
2135 0x442c0, 0x44478,
2136 0x444c0, 0x44678,
2137 0x446c0, 0x44878,
2138 0x448c0, 0x449fc,
2139 0x45000, 0x45068,
2140 0x45080, 0x45084,
2141 0x450a0, 0x450b0,
2142 0x45200, 0x45268,
2143 0x45280, 0x45284,
2144 0x452a0, 0x452b0,
2145 0x460c0, 0x460e4,
2146 0x47000, 0x4708c,
2147 0x47200, 0x47250,
2148 0x47400, 0x47420,
2149 0x47600, 0x47618,
2150 0x47800, 0x47814,
2151 0x48000, 0x4800c,
2152 0x48040, 0x48068,
2153 0x48080, 0x48144,
2154 0x48180, 0x4818c,
2155 0x48200, 0x48298,
2156 0x482ac, 0x4833c,
2157 0x483f8, 0x483fc,
2158 0x49300, 0x493c4,
2159 0x49400, 0x4941c,
2160 0x49480, 0x494d0,
2161 0x4c000, 0x4c078,
2162 0x4c0c0, 0x4c278,
2163 0x4c2c0, 0x4c478,
2164 0x4c4c0, 0x4c678,
2165 0x4c6c0, 0x4c878,
2166 0x4c8c0, 0x4c9fc,
2167 0x4d000, 0x4d068,
2168 0x4d080, 0x4d084,
2169 0x4d0a0, 0x4d0b0,
2170 0x4d200, 0x4d268,
2171 0x4d280, 0x4d284,
2172 0x4d2a0, 0x4d2b0,
2173 0x4e0c0, 0x4e0e4,
2174 0x4f000, 0x4f08c,
2175 0x4f200, 0x4f250,
2176 0x4f400, 0x4f420,
2177 0x4f600, 0x4f618,
2178 0x4f800, 0x4f814,
2179 0x50000, 0x500cc,
2180 0x50400, 0x50400,
2181 0x50800, 0x508cc,
2182 0x50c00, 0x50c00,
2183 0x51000, 0x5101c,
2184 0x51300, 0x51308,
2185 };
2186
b8ff05a9
DM
2187 int i;
2188 struct adapter *ap = netdev2adap(dev);
251f9e88
SR
2189 static const unsigned int *reg_ranges;
2190 int arr_size = 0, buf_size = 0;
2191
2192 if (is_t4(ap->chip)) {
2193 reg_ranges = &t4_reg_ranges[0];
2194 arr_size = ARRAY_SIZE(t4_reg_ranges);
2195 buf_size = T4_REGMAP_SIZE;
2196 } else {
2197 reg_ranges = &t5_reg_ranges[0];
2198 arr_size = ARRAY_SIZE(t5_reg_ranges);
2199 buf_size = T5_REGMAP_SIZE;
2200 }
b8ff05a9
DM
2201
2202 regs->version = mk_adap_vers(ap);
2203
251f9e88
SR
2204 memset(buf, 0, buf_size);
2205 for (i = 0; i < arr_size; i += 2)
b8ff05a9
DM
2206 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2207}
2208
2209static int restart_autoneg(struct net_device *dev)
2210{
2211 struct port_info *p = netdev_priv(dev);
2212
2213 if (!netif_running(dev))
2214 return -EAGAIN;
2215 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2216 return -EINVAL;
060e0c75 2217 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
2218 return 0;
2219}
2220
c5e06360
DM
2221static int identify_port(struct net_device *dev,
2222 enum ethtool_phys_id_state state)
b8ff05a9 2223{
c5e06360 2224 unsigned int val;
060e0c75
DM
2225 struct adapter *adap = netdev2adap(dev);
2226
c5e06360
DM
2227 if (state == ETHTOOL_ID_ACTIVE)
2228 val = 0xffff;
2229 else if (state == ETHTOOL_ID_INACTIVE)
2230 val = 0;
2231 else
2232 return -EINVAL;
b8ff05a9 2233
c5e06360 2234 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
2235}
2236
2237static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2238{
2239 unsigned int v = 0;
2240
a0881cab
DM
2241 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2242 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
2243 v |= SUPPORTED_TP;
2244 if (caps & FW_PORT_CAP_SPEED_100M)
2245 v |= SUPPORTED_100baseT_Full;
2246 if (caps & FW_PORT_CAP_SPEED_1G)
2247 v |= SUPPORTED_1000baseT_Full;
2248 if (caps & FW_PORT_CAP_SPEED_10G)
2249 v |= SUPPORTED_10000baseT_Full;
2250 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2251 v |= SUPPORTED_Backplane;
2252 if (caps & FW_PORT_CAP_SPEED_1G)
2253 v |= SUPPORTED_1000baseKX_Full;
2254 if (caps & FW_PORT_CAP_SPEED_10G)
2255 v |= SUPPORTED_10000baseKX4_Full;
2256 } else if (type == FW_PORT_TYPE_KR)
2257 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 2258 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
2259 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2260 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2261 else if (type == FW_PORT_TYPE_BP4_AP)
2262 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2263 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2264 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
2265 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2266 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9
DM
2267 v |= SUPPORTED_FIBRE;
2268
2269 if (caps & FW_PORT_CAP_ANEG)
2270 v |= SUPPORTED_Autoneg;
2271 return v;
2272}
2273
2274static unsigned int to_fw_linkcaps(unsigned int caps)
2275{
2276 unsigned int v = 0;
2277
2278 if (caps & ADVERTISED_100baseT_Full)
2279 v |= FW_PORT_CAP_SPEED_100M;
2280 if (caps & ADVERTISED_1000baseT_Full)
2281 v |= FW_PORT_CAP_SPEED_1G;
2282 if (caps & ADVERTISED_10000baseT_Full)
2283 v |= FW_PORT_CAP_SPEED_10G;
2284 return v;
2285}
2286
2287static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2288{
2289 const struct port_info *p = netdev_priv(dev);
2290
2291 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 2292 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
2293 p->port_type == FW_PORT_TYPE_BT_XAUI)
2294 cmd->port = PORT_TP;
a0881cab
DM
2295 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2296 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 2297 cmd->port = PORT_FIBRE;
a0881cab
DM
2298 else if (p->port_type == FW_PORT_TYPE_SFP) {
2299 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2300 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2301 cmd->port = PORT_DA;
2302 else
2303 cmd->port = PORT_FIBRE;
2304 } else
b8ff05a9
DM
2305 cmd->port = PORT_OTHER;
2306
2307 if (p->mdio_addr >= 0) {
2308 cmd->phy_address = p->mdio_addr;
2309 cmd->transceiver = XCVR_EXTERNAL;
2310 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2311 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2312 } else {
2313 cmd->phy_address = 0; /* not really, but no better option */
2314 cmd->transceiver = XCVR_INTERNAL;
2315 cmd->mdio_support = 0;
2316 }
2317
2318 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2319 cmd->advertising = from_fw_linkcaps(p->port_type,
2320 p->link_cfg.advertising);
70739497
DD
2321 ethtool_cmd_speed_set(cmd,
2322 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
2323 cmd->duplex = DUPLEX_FULL;
2324 cmd->autoneg = p->link_cfg.autoneg;
2325 cmd->maxtxpkt = 0;
2326 cmd->maxrxpkt = 0;
2327 return 0;
2328}
2329
2330static unsigned int speed_to_caps(int speed)
2331{
2332 if (speed == SPEED_100)
2333 return FW_PORT_CAP_SPEED_100M;
2334 if (speed == SPEED_1000)
2335 return FW_PORT_CAP_SPEED_1G;
2336 if (speed == SPEED_10000)
2337 return FW_PORT_CAP_SPEED_10G;
2338 return 0;
2339}
2340
2341static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2342{
2343 unsigned int cap;
2344 struct port_info *p = netdev_priv(dev);
2345 struct link_config *lc = &p->link_cfg;
25db0338 2346 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
2347
2348 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2349 return -EINVAL;
2350
2351 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2352 /*
2353 * PHY offers a single speed. See if that's what's
2354 * being requested.
2355 */
2356 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
2357 (lc->supported & speed_to_caps(speed)))
2358 return 0;
b8ff05a9
DM
2359 return -EINVAL;
2360 }
2361
2362 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 2363 cap = speed_to_caps(speed);
b8ff05a9 2364
25db0338
DD
2365 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2366 (speed == SPEED_10000))
b8ff05a9
DM
2367 return -EINVAL;
2368 lc->requested_speed = cap;
2369 lc->advertising = 0;
2370 } else {
2371 cap = to_fw_linkcaps(cmd->advertising);
2372 if (!(lc->supported & cap))
2373 return -EINVAL;
2374 lc->requested_speed = 0;
2375 lc->advertising = cap | FW_PORT_CAP_ANEG;
2376 }
2377 lc->autoneg = cmd->autoneg;
2378
2379 if (netif_running(dev))
060e0c75
DM
2380 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2381 lc);
b8ff05a9
DM
2382 return 0;
2383}
2384
2385static void get_pauseparam(struct net_device *dev,
2386 struct ethtool_pauseparam *epause)
2387{
2388 struct port_info *p = netdev_priv(dev);
2389
2390 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2391 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2392 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2393}
2394
2395static int set_pauseparam(struct net_device *dev,
2396 struct ethtool_pauseparam *epause)
2397{
2398 struct port_info *p = netdev_priv(dev);
2399 struct link_config *lc = &p->link_cfg;
2400
2401 if (epause->autoneg == AUTONEG_DISABLE)
2402 lc->requested_fc = 0;
2403 else if (lc->supported & FW_PORT_CAP_ANEG)
2404 lc->requested_fc = PAUSE_AUTONEG;
2405 else
2406 return -EINVAL;
2407
2408 if (epause->rx_pause)
2409 lc->requested_fc |= PAUSE_RX;
2410 if (epause->tx_pause)
2411 lc->requested_fc |= PAUSE_TX;
2412 if (netif_running(dev))
060e0c75
DM
2413 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2414 lc);
b8ff05a9
DM
2415 return 0;
2416}
2417
b8ff05a9
DM
2418static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2419{
2420 const struct port_info *pi = netdev_priv(dev);
2421 const struct sge *s = &pi->adapter->sge;
2422
2423 e->rx_max_pending = MAX_RX_BUFFERS;
2424 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2425 e->rx_jumbo_max_pending = 0;
2426 e->tx_max_pending = MAX_TXQ_ENTRIES;
2427
2428 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2429 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2430 e->rx_jumbo_pending = 0;
2431 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2432}
2433
2434static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2435{
2436 int i;
2437 const struct port_info *pi = netdev_priv(dev);
2438 struct adapter *adapter = pi->adapter;
2439 struct sge *s = &adapter->sge;
2440
2441 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2442 e->tx_pending > MAX_TXQ_ENTRIES ||
2443 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2444 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2445 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2446 return -EINVAL;
2447
2448 if (adapter->flags & FULL_INIT_DONE)
2449 return -EBUSY;
2450
2451 for (i = 0; i < pi->nqsets; ++i) {
2452 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2453 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2454 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2455 }
2456 return 0;
2457}
2458
2459static int closest_timer(const struct sge *s, int time)
2460{
2461 int i, delta, match = 0, min_delta = INT_MAX;
2462
2463 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2464 delta = time - s->timer_val[i];
2465 if (delta < 0)
2466 delta = -delta;
2467 if (delta < min_delta) {
2468 min_delta = delta;
2469 match = i;
2470 }
2471 }
2472 return match;
2473}
2474
2475static int closest_thres(const struct sge *s, int thres)
2476{
2477 int i, delta, match = 0, min_delta = INT_MAX;
2478
2479 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2480 delta = thres - s->counter_val[i];
2481 if (delta < 0)
2482 delta = -delta;
2483 if (delta < min_delta) {
2484 min_delta = delta;
2485 match = i;
2486 }
2487 }
2488 return match;
2489}
2490
2491/*
2492 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2493 */
2494static unsigned int qtimer_val(const struct adapter *adap,
2495 const struct sge_rspq *q)
2496{
2497 unsigned int idx = q->intr_params >> 1;
2498
2499 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2500}
2501
2502/**
2503 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2504 * @adap: the adapter
2505 * @q: the Rx queue
2506 * @us: the hold-off time in us, or 0 to disable timer
2507 * @cnt: the hold-off packet count, or 0 to disable counter
2508 *
2509 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2510 * one of the two needs to be enabled for the queue to generate interrupts.
2511 */
2512static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2513 unsigned int us, unsigned int cnt)
2514{
2515 if ((us | cnt) == 0)
2516 cnt = 1;
2517
2518 if (cnt) {
2519 int err;
2520 u32 v, new_idx;
2521
2522 new_idx = closest_thres(&adap->sge, cnt);
2523 if (q->desc && q->pktcnt_idx != new_idx) {
2524 /* the queue has already been created, update it */
2525 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2526 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2527 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
2528 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2529 &new_idx);
b8ff05a9
DM
2530 if (err)
2531 return err;
2532 }
2533 q->pktcnt_idx = new_idx;
2534 }
2535
2536 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2537 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2538 return 0;
2539}
2540
2541static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2542{
2543 const struct port_info *pi = netdev_priv(dev);
2544 struct adapter *adap = pi->adapter;
d4fc9dc2
TLSC
2545 struct sge_rspq *q;
2546 int i;
2547 int r = 0;
2548
2549 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2550 q = &adap->sge.ethrxq[i].rspq;
2551 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2552 c->rx_max_coalesced_frames);
2553 if (r) {
2554 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2555 break;
2556 }
2557 }
2558 return r;
b8ff05a9
DM
2559}
2560
2561static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2562{
2563 const struct port_info *pi = netdev_priv(dev);
2564 const struct adapter *adap = pi->adapter;
2565 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2566
2567 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2568 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2569 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2570 return 0;
2571}
2572
1478b3ee
DM
2573/**
2574 * eeprom_ptov - translate a physical EEPROM address to virtual
2575 * @phys_addr: the physical EEPROM address
2576 * @fn: the PCI function number
2577 * @sz: size of function-specific area
2578 *
2579 * Translate a physical EEPROM address to virtual. The first 1K is
2580 * accessed through virtual addresses starting at 31K, the rest is
2581 * accessed through virtual addresses starting at 0.
2582 *
2583 * The mapping is as follows:
2584 * [0..1K) -> [31K..32K)
2585 * [1K..1K+A) -> [31K-A..31K)
2586 * [1K+A..ES) -> [0..ES-A-1K)
2587 *
2588 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 2589 */
1478b3ee 2590static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 2591{
1478b3ee 2592 fn *= sz;
b8ff05a9
DM
2593 if (phys_addr < 1024)
2594 return phys_addr + (31 << 10);
1478b3ee
DM
2595 if (phys_addr < 1024 + fn)
2596 return 31744 - fn + phys_addr - 1024;
b8ff05a9 2597 if (phys_addr < EEPROMSIZE)
1478b3ee 2598 return phys_addr - 1024 - fn;
b8ff05a9
DM
2599 return -EINVAL;
2600}
2601
2602/*
2603 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
2604 */
2605static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2606{
1478b3ee 2607 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2608
2609 if (vaddr >= 0)
2610 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2611 return vaddr < 0 ? vaddr : 0;
2612}
2613
2614static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2615{
1478b3ee 2616 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2617
2618 if (vaddr >= 0)
2619 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2620 return vaddr < 0 ? vaddr : 0;
2621}
2622
2623#define EEPROM_MAGIC 0x38E2F10C
2624
2625static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2626 u8 *data)
2627{
2628 int i, err = 0;
2629 struct adapter *adapter = netdev2adap(dev);
2630
2631 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2632 if (!buf)
2633 return -ENOMEM;
2634
2635 e->magic = EEPROM_MAGIC;
2636 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2637 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2638
2639 if (!err)
2640 memcpy(data, buf + e->offset, e->len);
2641 kfree(buf);
2642 return err;
2643}
2644
2645static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2646 u8 *data)
2647{
2648 u8 *buf;
2649 int err = 0;
2650 u32 aligned_offset, aligned_len, *p;
2651 struct adapter *adapter = netdev2adap(dev);
2652
2653 if (eeprom->magic != EEPROM_MAGIC)
2654 return -EINVAL;
2655
2656 aligned_offset = eeprom->offset & ~3;
2657 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2658
1478b3ee
DM
2659 if (adapter->fn > 0) {
2660 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2661
2662 if (aligned_offset < start ||
2663 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2664 return -EPERM;
2665 }
2666
b8ff05a9
DM
2667 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2668 /*
2669 * RMW possibly needed for first or last words.
2670 */
2671 buf = kmalloc(aligned_len, GFP_KERNEL);
2672 if (!buf)
2673 return -ENOMEM;
2674 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2675 if (!err && aligned_len > 4)
2676 err = eeprom_rd_phys(adapter,
2677 aligned_offset + aligned_len - 4,
2678 (u32 *)&buf[aligned_len - 4]);
2679 if (err)
2680 goto out;
2681 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2682 } else
2683 buf = data;
2684
2685 err = t4_seeprom_wp(adapter, false);
2686 if (err)
2687 goto out;
2688
2689 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2690 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2691 aligned_offset += 4;
2692 }
2693
2694 if (!err)
2695 err = t4_seeprom_wp(adapter, true);
2696out:
2697 if (buf != data)
2698 kfree(buf);
2699 return err;
2700}
2701
2702static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2703{
2704 int ret;
2705 const struct firmware *fw;
2706 struct adapter *adap = netdev2adap(netdev);
2707
2708 ef->data[sizeof(ef->data) - 1] = '\0';
2709 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2710 if (ret < 0)
2711 return ret;
2712
2713 ret = t4_load_fw(adap, fw->data, fw->size);
2714 release_firmware(fw);
2715 if (!ret)
2716 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2717 return ret;
2718}
2719
2720#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2721#define BCAST_CRC 0xa0ccc1a6
2722
2723static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2724{
2725 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2726 wol->wolopts = netdev2adap(dev)->wol;
2727 memset(&wol->sopass, 0, sizeof(wol->sopass));
2728}
2729
2730static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2731{
2732 int err = 0;
2733 struct port_info *pi = netdev_priv(dev);
2734
2735 if (wol->wolopts & ~WOL_SUPPORTED)
2736 return -EINVAL;
2737 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2738 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2739 if (wol->wolopts & WAKE_BCAST) {
2740 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2741 ~0ULL, 0, false);
2742 if (!err)
2743 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2744 ~6ULL, ~0ULL, BCAST_CRC, true);
2745 } else
2746 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2747 return err;
2748}
2749
c8f44aff 2750static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 2751{
2ed28baa 2752 const struct port_info *pi = netdev_priv(dev);
c8f44aff 2753 netdev_features_t changed = dev->features ^ features;
19ecae2c 2754 int err;
19ecae2c 2755
f646968f 2756 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 2757 return 0;
19ecae2c 2758
2ed28baa
MM
2759 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2760 -1, -1, -1,
f646968f 2761 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 2762 if (unlikely(err))
f646968f 2763 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 2764 return err;
87b6cf51
DM
2765}
2766
7850f63f 2767static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
2768{
2769 const struct port_info *pi = netdev_priv(dev);
671b0060 2770
7850f63f
BH
2771 return pi->rss_size;
2772}
2773
2774static int get_rss_table(struct net_device *dev, u32 *p)
2775{
2776 const struct port_info *pi = netdev_priv(dev);
2777 unsigned int n = pi->rss_size;
2778
671b0060 2779 while (n--)
7850f63f 2780 p[n] = pi->rss[n];
671b0060
DM
2781 return 0;
2782}
2783
7850f63f 2784static int set_rss_table(struct net_device *dev, const u32 *p)
671b0060
DM
2785{
2786 unsigned int i;
2787 struct port_info *pi = netdev_priv(dev);
2788
7850f63f
BH
2789 for (i = 0; i < pi->rss_size; i++)
2790 pi->rss[i] = p[i];
671b0060
DM
2791 if (pi->adapter->flags & FULL_INIT_DONE)
2792 return write_rss(pi, pi->rss);
2793 return 0;
2794}
2795
2796static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 2797 u32 *rules)
671b0060 2798{
f796564a
DM
2799 const struct port_info *pi = netdev_priv(dev);
2800
671b0060 2801 switch (info->cmd) {
f796564a
DM
2802 case ETHTOOL_GRXFH: {
2803 unsigned int v = pi->rss_mode;
2804
2805 info->data = 0;
2806 switch (info->flow_type) {
2807 case TCP_V4_FLOW:
2808 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2809 info->data = RXH_IP_SRC | RXH_IP_DST |
2810 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2811 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2812 info->data = RXH_IP_SRC | RXH_IP_DST;
2813 break;
2814 case UDP_V4_FLOW:
2815 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2816 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2817 info->data = RXH_IP_SRC | RXH_IP_DST |
2818 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2819 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2820 info->data = RXH_IP_SRC | RXH_IP_DST;
2821 break;
2822 case SCTP_V4_FLOW:
2823 case AH_ESP_V4_FLOW:
2824 case IPV4_FLOW:
2825 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2826 info->data = RXH_IP_SRC | RXH_IP_DST;
2827 break;
2828 case TCP_V6_FLOW:
2829 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2830 info->data = RXH_IP_SRC | RXH_IP_DST |
2831 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2832 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2833 info->data = RXH_IP_SRC | RXH_IP_DST;
2834 break;
2835 case UDP_V6_FLOW:
2836 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2837 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2838 info->data = RXH_IP_SRC | RXH_IP_DST |
2839 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2840 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2841 info->data = RXH_IP_SRC | RXH_IP_DST;
2842 break;
2843 case SCTP_V6_FLOW:
2844 case AH_ESP_V6_FLOW:
2845 case IPV6_FLOW:
2846 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2847 info->data = RXH_IP_SRC | RXH_IP_DST;
2848 break;
2849 }
2850 return 0;
2851 }
671b0060 2852 case ETHTOOL_GRXRINGS:
f796564a 2853 info->data = pi->nqsets;
671b0060
DM
2854 return 0;
2855 }
2856 return -EOPNOTSUPP;
2857}
2858
9b07be4b 2859static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
2860 .get_settings = get_settings,
2861 .set_settings = set_settings,
2862 .get_drvinfo = get_drvinfo,
2863 .get_msglevel = get_msglevel,
2864 .set_msglevel = set_msglevel,
2865 .get_ringparam = get_sge_param,
2866 .set_ringparam = set_sge_param,
2867 .get_coalesce = get_coalesce,
2868 .set_coalesce = set_coalesce,
2869 .get_eeprom_len = get_eeprom_len,
2870 .get_eeprom = get_eeprom,
2871 .set_eeprom = set_eeprom,
2872 .get_pauseparam = get_pauseparam,
2873 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
2874 .get_link = ethtool_op_get_link,
2875 .get_strings = get_strings,
c5e06360 2876 .set_phys_id = identify_port,
b8ff05a9
DM
2877 .nway_reset = restart_autoneg,
2878 .get_sset_count = get_sset_count,
2879 .get_ethtool_stats = get_stats,
2880 .get_regs_len = get_regs_len,
2881 .get_regs = get_regs,
2882 .get_wol = get_wol,
2883 .set_wol = set_wol,
671b0060 2884 .get_rxnfc = get_rxnfc,
7850f63f 2885 .get_rxfh_indir_size = get_rss_table_size,
671b0060
DM
2886 .get_rxfh_indir = get_rss_table,
2887 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
2888 .flash_device = set_flash,
2889};
2890
2891/*
2892 * debugfs support
2893 */
b8ff05a9
DM
2894static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2895 loff_t *ppos)
2896{
2897 loff_t pos = *ppos;
496ad9aa 2898 loff_t avail = file_inode(file)->i_size;
b8ff05a9
DM
2899 unsigned int mem = (uintptr_t)file->private_data & 3;
2900 struct adapter *adap = file->private_data - mem;
2901
2902 if (pos < 0)
2903 return -EINVAL;
2904 if (pos >= avail)
2905 return 0;
2906 if (count > avail - pos)
2907 count = avail - pos;
2908
2909 while (count) {
2910 size_t len;
2911 int ret, ofst;
2912 __be32 data[16];
2913
19dd37ba
SR
2914 if ((mem == MEM_MC) || (mem == MEM_MC1))
2915 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
b8ff05a9
DM
2916 else
2917 ret = t4_edc_read(adap, mem, pos, data, NULL);
2918 if (ret)
2919 return ret;
2920
2921 ofst = pos % sizeof(data);
2922 len = min(count, sizeof(data) - ofst);
2923 if (copy_to_user(buf, (u8 *)data + ofst, len))
2924 return -EFAULT;
2925
2926 buf += len;
2927 pos += len;
2928 count -= len;
2929 }
2930 count = pos - *ppos;
2931 *ppos = pos;
2932 return count;
2933}
2934
2935static const struct file_operations mem_debugfs_fops = {
2936 .owner = THIS_MODULE,
234e3405 2937 .open = simple_open,
b8ff05a9 2938 .read = mem_read,
6038f373 2939 .llseek = default_llseek,
b8ff05a9
DM
2940};
2941
91744948 2942static void add_debugfs_mem(struct adapter *adap, const char *name,
1dd06ae8 2943 unsigned int idx, unsigned int size_mb)
b8ff05a9
DM
2944{
2945 struct dentry *de;
2946
2947 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2948 (void *)adap + idx, &mem_debugfs_fops);
2949 if (de && de->d_inode)
2950 de->d_inode->i_size = size_mb << 20;
2951}
2952
91744948 2953static int setup_debugfs(struct adapter *adap)
b8ff05a9
DM
2954{
2955 int i;
19dd37ba 2956 u32 size;
b8ff05a9
DM
2957
2958 if (IS_ERR_OR_NULL(adap->debugfs_root))
2959 return -1;
2960
2961 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
19dd37ba
SR
2962 if (i & EDRAM0_ENABLE) {
2963 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2964 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2965 }
2966 if (i & EDRAM1_ENABLE) {
2967 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2968 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2969 }
2970 if (is_t4(adap->chip)) {
2971 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2972 if (i & EXT_MEM_ENABLE)
2973 add_debugfs_mem(adap, "mc", MEM_MC,
2974 EXT_MEM_SIZE_GET(size));
2975 } else {
2976 if (i & EXT_MEM_ENABLE) {
2977 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2978 add_debugfs_mem(adap, "mc0", MEM_MC0,
2979 EXT_MEM_SIZE_GET(size));
2980 }
2981 if (i & EXT_MEM1_ENABLE) {
2982 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2983 add_debugfs_mem(adap, "mc1", MEM_MC1,
2984 EXT_MEM_SIZE_GET(size));
2985 }
2986 }
b8ff05a9
DM
2987 if (adap->l2t)
2988 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2989 &t4_l2t_fops);
2990 return 0;
2991}
2992
2993/*
2994 * upper-layer driver support
2995 */
2996
2997/*
2998 * Allocate an active-open TID and set it to the supplied value.
2999 */
3000int cxgb4_alloc_atid(struct tid_info *t, void *data)
3001{
3002 int atid = -1;
3003
3004 spin_lock_bh(&t->atid_lock);
3005 if (t->afree) {
3006 union aopen_entry *p = t->afree;
3007
f2b7e78d 3008 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
3009 t->afree = p->next;
3010 p->data = data;
3011 t->atids_in_use++;
3012 }
3013 spin_unlock_bh(&t->atid_lock);
3014 return atid;
3015}
3016EXPORT_SYMBOL(cxgb4_alloc_atid);
3017
3018/*
3019 * Release an active-open TID.
3020 */
3021void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3022{
f2b7e78d 3023 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
3024
3025 spin_lock_bh(&t->atid_lock);
3026 p->next = t->afree;
3027 t->afree = p;
3028 t->atids_in_use--;
3029 spin_unlock_bh(&t->atid_lock);
3030}
3031EXPORT_SYMBOL(cxgb4_free_atid);
3032
3033/*
3034 * Allocate a server TID and set it to the supplied value.
3035 */
3036int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3037{
3038 int stid;
3039
3040 spin_lock_bh(&t->stid_lock);
3041 if (family == PF_INET) {
3042 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3043 if (stid < t->nstids)
3044 __set_bit(stid, t->stid_bmap);
3045 else
3046 stid = -1;
3047 } else {
3048 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3049 if (stid < 0)
3050 stid = -1;
3051 }
3052 if (stid >= 0) {
3053 t->stid_tab[stid].data = data;
3054 stid += t->stid_base;
3055 t->stids_in_use++;
3056 }
3057 spin_unlock_bh(&t->stid_lock);
3058 return stid;
3059}
3060EXPORT_SYMBOL(cxgb4_alloc_stid);
3061
dca4faeb
VP
3062/* Allocate a server filter TID and set it to the supplied value.
3063 */
3064int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3065{
3066 int stid;
3067
3068 spin_lock_bh(&t->stid_lock);
3069 if (family == PF_INET) {
3070 stid = find_next_zero_bit(t->stid_bmap,
3071 t->nstids + t->nsftids, t->nstids);
3072 if (stid < (t->nstids + t->nsftids))
3073 __set_bit(stid, t->stid_bmap);
3074 else
3075 stid = -1;
3076 } else {
3077 stid = -1;
3078 }
3079 if (stid >= 0) {
3080 t->stid_tab[stid].data = data;
3081 stid += t->stid_base;
3082 t->stids_in_use++;
3083 }
3084 spin_unlock_bh(&t->stid_lock);
3085 return stid;
3086}
3087EXPORT_SYMBOL(cxgb4_alloc_sftid);
3088
3089/* Release a server TID.
b8ff05a9
DM
3090 */
3091void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3092{
3093 stid -= t->stid_base;
3094 spin_lock_bh(&t->stid_lock);
3095 if (family == PF_INET)
3096 __clear_bit(stid, t->stid_bmap);
3097 else
3098 bitmap_release_region(t->stid_bmap, stid, 2);
3099 t->stid_tab[stid].data = NULL;
3100 t->stids_in_use--;
3101 spin_unlock_bh(&t->stid_lock);
3102}
3103EXPORT_SYMBOL(cxgb4_free_stid);
3104
3105/*
3106 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3107 */
3108static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3109 unsigned int tid)
3110{
3111 struct cpl_tid_release *req;
3112
3113 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3114 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3115 INIT_TP_WR(req, tid);
3116 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3117}
3118
3119/*
3120 * Queue a TID release request and if necessary schedule a work queue to
3121 * process it.
3122 */
31b9c19b 3123static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3124 unsigned int tid)
b8ff05a9
DM
3125{
3126 void **p = &t->tid_tab[tid];
3127 struct adapter *adap = container_of(t, struct adapter, tids);
3128
3129 spin_lock_bh(&adap->tid_release_lock);
3130 *p = adap->tid_release_head;
3131 /* Low 2 bits encode the Tx channel number */
3132 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3133 if (!adap->tid_release_task_busy) {
3134 adap->tid_release_task_busy = true;
3069ee9b 3135 queue_work(workq, &adap->tid_release_task);
b8ff05a9
DM
3136 }
3137 spin_unlock_bh(&adap->tid_release_lock);
3138}
b8ff05a9
DM
3139
3140/*
3141 * Process the list of pending TID release requests.
3142 */
3143static void process_tid_release_list(struct work_struct *work)
3144{
3145 struct sk_buff *skb;
3146 struct adapter *adap;
3147
3148 adap = container_of(work, struct adapter, tid_release_task);
3149
3150 spin_lock_bh(&adap->tid_release_lock);
3151 while (adap->tid_release_head) {
3152 void **p = adap->tid_release_head;
3153 unsigned int chan = (uintptr_t)p & 3;
3154 p = (void *)p - chan;
3155
3156 adap->tid_release_head = *p;
3157 *p = NULL;
3158 spin_unlock_bh(&adap->tid_release_lock);
3159
3160 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3161 GFP_KERNEL)))
3162 schedule_timeout_uninterruptible(1);
3163
3164 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3165 t4_ofld_send(adap, skb);
3166 spin_lock_bh(&adap->tid_release_lock);
3167 }
3168 adap->tid_release_task_busy = false;
3169 spin_unlock_bh(&adap->tid_release_lock);
3170}
3171
3172/*
3173 * Release a TID and inform HW. If we are unable to allocate the release
3174 * message we defer to a work queue.
3175 */
3176void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3177{
3178 void *old;
3179 struct sk_buff *skb;
3180 struct adapter *adap = container_of(t, struct adapter, tids);
3181
3182 old = t->tid_tab[tid];
3183 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3184 if (likely(skb)) {
3185 t->tid_tab[tid] = NULL;
3186 mk_tid_release(skb, chan, tid);
3187 t4_ofld_send(adap, skb);
3188 } else
3189 cxgb4_queue_tid_release(t, chan, tid);
3190 if (old)
3191 atomic_dec(&t->tids_in_use);
3192}
3193EXPORT_SYMBOL(cxgb4_remove_tid);
3194
3195/*
3196 * Allocate and initialize the TID tables. Returns 0 on success.
3197 */
3198static int tid_init(struct tid_info *t)
3199{
3200 size_t size;
f2b7e78d 3201 unsigned int stid_bmap_size;
b8ff05a9
DM
3202 unsigned int natids = t->natids;
3203
dca4faeb 3204 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
3205 size = t->ntids * sizeof(*t->tid_tab) +
3206 natids * sizeof(*t->atid_tab) +
b8ff05a9 3207 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 3208 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 3209 stid_bmap_size * sizeof(long) +
dca4faeb
VP
3210 t->nftids * sizeof(*t->ftid_tab) +
3211 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 3212
b8ff05a9
DM
3213 t->tid_tab = t4_alloc_mem(size);
3214 if (!t->tid_tab)
3215 return -ENOMEM;
3216
3217 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3218 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 3219 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 3220 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
3221 spin_lock_init(&t->stid_lock);
3222 spin_lock_init(&t->atid_lock);
3223
3224 t->stids_in_use = 0;
3225 t->afree = NULL;
3226 t->atids_in_use = 0;
3227 atomic_set(&t->tids_in_use, 0);
3228
3229 /* Setup the free list for atid_tab and clear the stid bitmap. */
3230 if (natids) {
3231 while (--natids)
3232 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3233 t->afree = t->atid_tab;
3234 }
dca4faeb 3235 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b8ff05a9
DM
3236 return 0;
3237}
3238
01bcca68
VP
3239static int cxgb4_clip_get(const struct net_device *dev,
3240 const struct in6_addr *lip)
3241{
3242 struct adapter *adap;
3243 struct fw_clip_cmd c;
3244
3245 adap = netdev2adap(dev);
3246 memset(&c, 0, sizeof(c));
3247 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3248 FW_CMD_REQUEST | FW_CMD_WRITE);
3249 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3250 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3251 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3252 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3253}
3254
3255static int cxgb4_clip_release(const struct net_device *dev,
3256 const struct in6_addr *lip)
3257{
3258 struct adapter *adap;
3259 struct fw_clip_cmd c;
3260
3261 adap = netdev2adap(dev);
3262 memset(&c, 0, sizeof(c));
3263 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3264 FW_CMD_REQUEST | FW_CMD_READ);
3265 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3266 *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3267 *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3268 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3269}
3270
b8ff05a9
DM
3271/**
3272 * cxgb4_create_server - create an IP server
3273 * @dev: the device
3274 * @stid: the server TID
3275 * @sip: local IP address to bind server to
3276 * @sport: the server's TCP port
3277 * @queue: queue to direct messages from this server to
3278 *
3279 * Create an IP server for the given port and address.
3280 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3281 */
3282int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
3283 __be32 sip, __be16 sport, __be16 vlan,
3284 unsigned int queue)
b8ff05a9
DM
3285{
3286 unsigned int chan;
3287 struct sk_buff *skb;
3288 struct adapter *adap;
3289 struct cpl_pass_open_req *req;
80f40c1f 3290 int ret;
b8ff05a9
DM
3291
3292 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3293 if (!skb)
3294 return -ENOMEM;
3295
3296 adap = netdev2adap(dev);
3297 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3298 INIT_TP_WR(req, 0);
3299 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3300 req->local_port = sport;
3301 req->peer_port = htons(0);
3302 req->local_ip = sip;
3303 req->peer_ip = htonl(0);
e46dab4d 3304 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
3305 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3306 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3307 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
80f40c1f
VP
3308 ret = t4_mgmt_tx(adap, skb);
3309 return net_xmit_eval(ret);
b8ff05a9
DM
3310}
3311EXPORT_SYMBOL(cxgb4_create_server);
3312
80f40c1f
VP
3313/* cxgb4_create_server6 - create an IPv6 server
3314 * @dev: the device
3315 * @stid: the server TID
3316 * @sip: local IPv6 address to bind server to
3317 * @sport: the server's TCP port
3318 * @queue: queue to direct messages from this server to
3319 *
3320 * Create an IPv6 server for the given port and address.
3321 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3322 */
3323int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3324 const struct in6_addr *sip, __be16 sport,
3325 unsigned int queue)
3326{
3327 unsigned int chan;
3328 struct sk_buff *skb;
3329 struct adapter *adap;
3330 struct cpl_pass_open_req6 *req;
3331 int ret;
3332
3333 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3334 if (!skb)
3335 return -ENOMEM;
3336
3337 adap = netdev2adap(dev);
3338 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3339 INIT_TP_WR(req, 0);
3340 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3341 req->local_port = sport;
3342 req->peer_port = htons(0);
3343 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3344 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3345 req->peer_ip_hi = cpu_to_be64(0);
3346 req->peer_ip_lo = cpu_to_be64(0);
3347 chan = rxq_to_chan(&adap->sge, queue);
3348 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3349 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3350 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3351 ret = t4_mgmt_tx(adap, skb);
3352 return net_xmit_eval(ret);
3353}
3354EXPORT_SYMBOL(cxgb4_create_server6);
3355
3356int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3357 unsigned int queue, bool ipv6)
3358{
3359 struct sk_buff *skb;
3360 struct adapter *adap;
3361 struct cpl_close_listsvr_req *req;
3362 int ret;
3363
3364 adap = netdev2adap(dev);
3365
3366 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3367 if (!skb)
3368 return -ENOMEM;
3369
3370 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3371 INIT_TP_WR(req, 0);
3372 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3373 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3374 LISTSVR_IPV6(0)) | QUEUENO(queue));
3375 ret = t4_mgmt_tx(adap, skb);
3376 return net_xmit_eval(ret);
3377}
3378EXPORT_SYMBOL(cxgb4_remove_server);
3379
b8ff05a9
DM
3380/**
3381 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3382 * @mtus: the HW MTU table
3383 * @mtu: the target MTU
3384 * @idx: index of selected entry in the MTU table
3385 *
3386 * Returns the index and the value in the HW MTU table that is closest to
3387 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3388 * table, in which case that smallest available value is selected.
3389 */
3390unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3391 unsigned int *idx)
3392{
3393 unsigned int i = 0;
3394
3395 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3396 ++i;
3397 if (idx)
3398 *idx = i;
3399 return mtus[i];
3400}
3401EXPORT_SYMBOL(cxgb4_best_mtu);
3402
3403/**
3404 * cxgb4_port_chan - get the HW channel of a port
3405 * @dev: the net device for the port
3406 *
3407 * Return the HW Tx channel of the given port.
3408 */
3409unsigned int cxgb4_port_chan(const struct net_device *dev)
3410{
3411 return netdev2pinfo(dev)->tx_chan;
3412}
3413EXPORT_SYMBOL(cxgb4_port_chan);
3414
881806bc
VP
3415unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3416{
3417 struct adapter *adap = netdev2adap(dev);
2cc301d2 3418 u32 v1, v2, lp_count, hp_count;
881806bc 3419
2cc301d2
SR
3420 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3421 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3422 if (is_t4(adap->chip)) {
3423 lp_count = G_LP_COUNT(v1);
3424 hp_count = G_HP_COUNT(v1);
3425 } else {
3426 lp_count = G_LP_COUNT_T5(v1);
3427 hp_count = G_HP_COUNT_T5(v2);
3428 }
3429 return lpfifo ? lp_count : hp_count;
881806bc
VP
3430}
3431EXPORT_SYMBOL(cxgb4_dbfifo_count);
3432
b8ff05a9
DM
3433/**
3434 * cxgb4_port_viid - get the VI id of a port
3435 * @dev: the net device for the port
3436 *
3437 * Return the VI id of the given port.
3438 */
3439unsigned int cxgb4_port_viid(const struct net_device *dev)
3440{
3441 return netdev2pinfo(dev)->viid;
3442}
3443EXPORT_SYMBOL(cxgb4_port_viid);
3444
3445/**
3446 * cxgb4_port_idx - get the index of a port
3447 * @dev: the net device for the port
3448 *
3449 * Return the index of the given port.
3450 */
3451unsigned int cxgb4_port_idx(const struct net_device *dev)
3452{
3453 return netdev2pinfo(dev)->port_id;
3454}
3455EXPORT_SYMBOL(cxgb4_port_idx);
3456
b8ff05a9
DM
3457void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3458 struct tp_tcp_stats *v6)
3459{
3460 struct adapter *adap = pci_get_drvdata(pdev);
3461
3462 spin_lock(&adap->stats_lock);
3463 t4_tp_get_tcp_stats(adap, v4, v6);
3464 spin_unlock(&adap->stats_lock);
3465}
3466EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3467
3468void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3469 const unsigned int *pgsz_order)
3470{
3471 struct adapter *adap = netdev2adap(dev);
3472
3473 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3474 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3475 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3476 HPZ3(pgsz_order[3]));
3477}
3478EXPORT_SYMBOL(cxgb4_iscsi_init);
3479
3069ee9b
VP
3480int cxgb4_flush_eq_cache(struct net_device *dev)
3481{
3482 struct adapter *adap = netdev2adap(dev);
3483 int ret;
3484
3485 ret = t4_fwaddrspace_write(adap, adap->mbox,
3486 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3487 return ret;
3488}
3489EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3490
3491static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3492{
3493 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3494 __be64 indices;
3495 int ret;
3496
3497 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3498 if (!ret) {
404d9e3f
VP
3499 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3500 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
3501 }
3502 return ret;
3503}
3504
3505int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3506 u16 size)
3507{
3508 struct adapter *adap = netdev2adap(dev);
3509 u16 hw_pidx, hw_cidx;
3510 int ret;
3511
3512 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3513 if (ret)
3514 goto out;
3515
3516 if (pidx != hw_pidx) {
3517 u16 delta;
3518
3519 if (pidx >= hw_pidx)
3520 delta = pidx - hw_pidx;
3521 else
3522 delta = size - hw_pidx + pidx;
3523 wmb();
840f3000
VP
3524 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3525 QID(qid) | PIDX(delta));
3069ee9b
VP
3526 }
3527out:
3528 return ret;
3529}
3530EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3531
3cbdb928
VP
3532void cxgb4_disable_db_coalescing(struct net_device *dev)
3533{
3534 struct adapter *adap;
3535
3536 adap = netdev2adap(dev);
3537 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3538 F_NOCOALESCE);
3539}
3540EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3541
3542void cxgb4_enable_db_coalescing(struct net_device *dev)
3543{
3544 struct adapter *adap;
3545
3546 adap = netdev2adap(dev);
3547 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3548}
3549EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3550
b8ff05a9
DM
3551static struct pci_driver cxgb4_driver;
3552
3553static void check_neigh_update(struct neighbour *neigh)
3554{
3555 const struct device *parent;
3556 const struct net_device *netdev = neigh->dev;
3557
3558 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3559 netdev = vlan_dev_real_dev(netdev);
3560 parent = netdev->dev.parent;
3561 if (parent && parent->driver == &cxgb4_driver.driver)
3562 t4_l2t_update(dev_get_drvdata(parent), neigh);
3563}
3564
3565static int netevent_cb(struct notifier_block *nb, unsigned long event,
3566 void *data)
3567{
3568 switch (event) {
3569 case NETEVENT_NEIGH_UPDATE:
3570 check_neigh_update(data);
3571 break;
b8ff05a9
DM
3572 case NETEVENT_REDIRECT:
3573 default:
3574 break;
3575 }
3576 return 0;
3577}
3578
3579static bool netevent_registered;
3580static struct notifier_block cxgb4_netevent_nb = {
3581 .notifier_call = netevent_cb
3582};
3583
3069ee9b
VP
3584static void drain_db_fifo(struct adapter *adap, int usecs)
3585{
2cc301d2 3586 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
3587
3588 do {
2cc301d2
SR
3589 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3590 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3591 if (is_t4(adap->chip)) {
3592 lp_count = G_LP_COUNT(v1);
3593 hp_count = G_HP_COUNT(v1);
3594 } else {
3595 lp_count = G_LP_COUNT_T5(v1);
3596 hp_count = G_HP_COUNT_T5(v2);
3597 }
3598
3599 if (lp_count == 0 && hp_count == 0)
3600 break;
3069ee9b
VP
3601 set_current_state(TASK_UNINTERRUPTIBLE);
3602 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
3603 } while (1);
3604}
3605
3606static void disable_txq_db(struct sge_txq *q)
3607{
3608 spin_lock_irq(&q->db_lock);
3609 q->db_disabled = 1;
3610 spin_unlock_irq(&q->db_lock);
3611}
3612
3613static void enable_txq_db(struct sge_txq *q)
3614{
3615 spin_lock_irq(&q->db_lock);
3616 q->db_disabled = 0;
3617 spin_unlock_irq(&q->db_lock);
3618}
3619
3620static void disable_dbs(struct adapter *adap)
3621{
3622 int i;
3623
3624 for_each_ethrxq(&adap->sge, i)
3625 disable_txq_db(&adap->sge.ethtxq[i].q);
3626 for_each_ofldrxq(&adap->sge, i)
3627 disable_txq_db(&adap->sge.ofldtxq[i].q);
3628 for_each_port(adap, i)
3629 disable_txq_db(&adap->sge.ctrlq[i].q);
3630}
3631
3632static void enable_dbs(struct adapter *adap)
3633{
3634 int i;
3635
3636 for_each_ethrxq(&adap->sge, i)
3637 enable_txq_db(&adap->sge.ethtxq[i].q);
3638 for_each_ofldrxq(&adap->sge, i)
3639 enable_txq_db(&adap->sge.ofldtxq[i].q);
3640 for_each_port(adap, i)
3641 enable_txq_db(&adap->sge.ctrlq[i].q);
3642}
3643
3644static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3645{
3646 u16 hw_pidx, hw_cidx;
3647 int ret;
3648
3649 spin_lock_bh(&q->db_lock);
3650 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3651 if (ret)
3652 goto out;
3653 if (q->db_pidx != hw_pidx) {
3654 u16 delta;
3655
3656 if (q->db_pidx >= hw_pidx)
3657 delta = q->db_pidx - hw_pidx;
3658 else
3659 delta = q->size - hw_pidx + q->db_pidx;
3660 wmb();
840f3000
VP
3661 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3662 QID(q->cntxt_id) | PIDX(delta));
3069ee9b
VP
3663 }
3664out:
3665 q->db_disabled = 0;
3666 spin_unlock_bh(&q->db_lock);
3667 if (ret)
3668 CH_WARN(adap, "DB drop recovery failed.\n");
3669}
3670static void recover_all_queues(struct adapter *adap)
3671{
3672 int i;
3673
3674 for_each_ethrxq(&adap->sge, i)
3675 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3676 for_each_ofldrxq(&adap->sge, i)
3677 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3678 for_each_port(adap, i)
3679 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3680}
3681
881806bc
VP
3682static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3683{
3684 mutex_lock(&uld_mutex);
3685 if (adap->uld_handle[CXGB4_ULD_RDMA])
3686 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3687 cmd);
3688 mutex_unlock(&uld_mutex);
3689}
3690
3691static void process_db_full(struct work_struct *work)
3692{
3693 struct adapter *adap;
881806bc
VP
3694
3695 adap = container_of(work, struct adapter, db_full_task);
3696
881806bc 3697 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3069ee9b 3698 drain_db_fifo(adap, dbfifo_drain_delay);
840f3000
VP
3699 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3700 DBFIFO_HP_INT | DBFIFO_LP_INT,
3701 DBFIFO_HP_INT | DBFIFO_LP_INT);
881806bc 3702 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
881806bc
VP
3703}
3704
3705static void process_db_drop(struct work_struct *work)
3706{
3707 struct adapter *adap;
881806bc 3708
3069ee9b 3709 adap = container_of(work, struct adapter, db_drop_task);
881806bc 3710
2cc301d2
SR
3711 if (is_t4(adap->chip)) {
3712 disable_dbs(adap);
3713 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3714 drain_db_fifo(adap, 1);
3715 recover_all_queues(adap);
3716 enable_dbs(adap);
3717 } else {
3718 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3719 u16 qid = (dropped_db >> 15) & 0x1ffff;
3720 u16 pidx_inc = dropped_db & 0x1fff;
3721 unsigned int s_qpp;
3722 unsigned short udb_density;
3723 unsigned long qpshift;
3724 int page;
3725 u32 udb;
3726
3727 dev_warn(adap->pdev_dev,
3728 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3729 dropped_db, qid,
3730 (dropped_db >> 14) & 1,
3731 (dropped_db >> 13) & 1,
3732 pidx_inc);
3733
3734 drain_db_fifo(adap, 1);
3735
3736 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3737 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3738 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3739 qpshift = PAGE_SHIFT - ilog2(udb_density);
3740 udb = qid << qpshift;
3741 udb &= PAGE_MASK;
3742 page = udb / PAGE_SIZE;
3743 udb += (qid - (page * udb_density)) * 128;
3744
3745 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3746
3747 /* Re-enable BAR2 WC */
3748 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3749 }
3750
3069ee9b 3751 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
881806bc
VP
3752}
3753
3754void t4_db_full(struct adapter *adap)
3755{
2cc301d2
SR
3756 if (is_t4(adap->chip)) {
3757 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3758 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3759 queue_work(workq, &adap->db_full_task);
3760 }
881806bc
VP
3761}
3762
3763void t4_db_dropped(struct adapter *adap)
3764{
2cc301d2
SR
3765 if (is_t4(adap->chip))
3766 queue_work(workq, &adap->db_drop_task);
881806bc
VP
3767}
3768
b8ff05a9
DM
3769static void uld_attach(struct adapter *adap, unsigned int uld)
3770{
3771 void *handle;
3772 struct cxgb4_lld_info lli;
dca4faeb 3773 unsigned short i;
b8ff05a9
DM
3774
3775 lli.pdev = adap->pdev;
3776 lli.l2t = adap->l2t;
3777 lli.tids = &adap->tids;
3778 lli.ports = adap->port;
3779 lli.vr = &adap->vres;
3780 lli.mtus = adap->params.mtus;
3781 if (uld == CXGB4_ULD_RDMA) {
3782 lli.rxq_ids = adap->sge.rdma_rxq;
3783 lli.nrxq = adap->sge.rdmaqs;
3784 } else if (uld == CXGB4_ULD_ISCSI) {
3785 lli.rxq_ids = adap->sge.ofld_rxq;
3786 lli.nrxq = adap->sge.ofldqsets;
3787 }
3788 lli.ntxq = adap->sge.ofldqsets;
3789 lli.nchan = adap->params.nports;
3790 lli.nports = adap->params.nports;
3791 lli.wr_cred = adap->params.ofldq_wr_cred;
3792 lli.adapter_type = adap->params.rev;
3793 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3794 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3795 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3796 (adap->fn * 4));
b8ff05a9 3797 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3798 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3799 (adap->fn * 4));
793dad94 3800 lli.filt_mode = adap->filter_mode;
dca4faeb
VP
3801 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3802 for (i = 0; i < NCHAN; i++)
3803 lli.tx_modq[i] = i;
b8ff05a9
DM
3804 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3805 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3806 lli.fw_vers = adap->params.fw_vers;
3069ee9b 3807 lli.dbfifo_int_thresh = dbfifo_int_thresh;
dca4faeb
VP
3808 lli.sge_pktshift = adap->sge.pktshift;
3809 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
b8ff05a9
DM
3810
3811 handle = ulds[uld].add(&lli);
3812 if (IS_ERR(handle)) {
3813 dev_warn(adap->pdev_dev,
3814 "could not attach to the %s driver, error %ld\n",
3815 uld_str[uld], PTR_ERR(handle));
3816 return;
3817 }
3818
3819 adap->uld_handle[uld] = handle;
3820
3821 if (!netevent_registered) {
3822 register_netevent_notifier(&cxgb4_netevent_nb);
3823 netevent_registered = true;
3824 }
e29f5dbc
DM
3825
3826 if (adap->flags & FULL_INIT_DONE)
3827 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
3828}
3829
3830static void attach_ulds(struct adapter *adap)
3831{
3832 unsigned int i;
3833
01bcca68
VP
3834 spin_lock(&adap_rcu_lock);
3835 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3836 spin_unlock(&adap_rcu_lock);
3837
b8ff05a9
DM
3838 mutex_lock(&uld_mutex);
3839 list_add_tail(&adap->list_node, &adapter_list);
3840 for (i = 0; i < CXGB4_ULD_MAX; i++)
3841 if (ulds[i].add)
3842 uld_attach(adap, i);
3843 mutex_unlock(&uld_mutex);
3844}
3845
3846static void detach_ulds(struct adapter *adap)
3847{
3848 unsigned int i;
3849
3850 mutex_lock(&uld_mutex);
3851 list_del(&adap->list_node);
3852 for (i = 0; i < CXGB4_ULD_MAX; i++)
3853 if (adap->uld_handle[i]) {
3854 ulds[i].state_change(adap->uld_handle[i],
3855 CXGB4_STATE_DETACH);
3856 adap->uld_handle[i] = NULL;
3857 }
3858 if (netevent_registered && list_empty(&adapter_list)) {
3859 unregister_netevent_notifier(&cxgb4_netevent_nb);
3860 netevent_registered = false;
3861 }
3862 mutex_unlock(&uld_mutex);
01bcca68
VP
3863
3864 spin_lock(&adap_rcu_lock);
3865 list_del_rcu(&adap->rcu_node);
3866 spin_unlock(&adap_rcu_lock);
b8ff05a9
DM
3867}
3868
3869static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3870{
3871 unsigned int i;
3872
3873 mutex_lock(&uld_mutex);
3874 for (i = 0; i < CXGB4_ULD_MAX; i++)
3875 if (adap->uld_handle[i])
3876 ulds[i].state_change(adap->uld_handle[i], new_state);
3877 mutex_unlock(&uld_mutex);
3878}
3879
3880/**
3881 * cxgb4_register_uld - register an upper-layer driver
3882 * @type: the ULD type
3883 * @p: the ULD methods
3884 *
3885 * Registers an upper-layer driver with this driver and notifies the ULD
3886 * about any presently available devices that support its type. Returns
3887 * %-EBUSY if a ULD of the same type is already registered.
3888 */
3889int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3890{
3891 int ret = 0;
3892 struct adapter *adap;
3893
3894 if (type >= CXGB4_ULD_MAX)
3895 return -EINVAL;
3896 mutex_lock(&uld_mutex);
3897 if (ulds[type].add) {
3898 ret = -EBUSY;
3899 goto out;
3900 }
3901 ulds[type] = *p;
3902 list_for_each_entry(adap, &adapter_list, list_node)
3903 uld_attach(adap, type);
3904out: mutex_unlock(&uld_mutex);
3905 return ret;
3906}
3907EXPORT_SYMBOL(cxgb4_register_uld);
3908
3909/**
3910 * cxgb4_unregister_uld - unregister an upper-layer driver
3911 * @type: the ULD type
3912 *
3913 * Unregisters an existing upper-layer driver.
3914 */
3915int cxgb4_unregister_uld(enum cxgb4_uld type)
3916{
3917 struct adapter *adap;
3918
3919 if (type >= CXGB4_ULD_MAX)
3920 return -EINVAL;
3921 mutex_lock(&uld_mutex);
3922 list_for_each_entry(adap, &adapter_list, list_node)
3923 adap->uld_handle[type] = NULL;
3924 ulds[type].add = NULL;
3925 mutex_unlock(&uld_mutex);
3926 return 0;
3927}
3928EXPORT_SYMBOL(cxgb4_unregister_uld);
3929
01bcca68
VP
3930/* Check if netdev on which event is occured belongs to us or not. Return
3931 * suceess (1) if it belongs otherwise failure (0).
3932 */
3933static int cxgb4_netdev(struct net_device *netdev)
3934{
3935 struct adapter *adap;
3936 int i;
3937
3938 spin_lock(&adap_rcu_lock);
3939 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
3940 for (i = 0; i < MAX_NPORTS; i++)
3941 if (adap->port[i] == netdev) {
3942 spin_unlock(&adap_rcu_lock);
3943 return 1;
3944 }
3945 spin_unlock(&adap_rcu_lock);
3946 return 0;
3947}
3948
3949static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
3950 unsigned long event)
3951{
3952 int ret = NOTIFY_DONE;
3953
3954 rcu_read_lock();
3955 if (cxgb4_netdev(event_dev)) {
3956 switch (event) {
3957 case NETDEV_UP:
3958 ret = cxgb4_clip_get(event_dev,
3959 (const struct in6_addr *)ifa->addr.s6_addr);
3960 if (ret < 0) {
3961 rcu_read_unlock();
3962 return ret;
3963 }
3964 ret = NOTIFY_OK;
3965 break;
3966 case NETDEV_DOWN:
3967 cxgb4_clip_release(event_dev,
3968 (const struct in6_addr *)ifa->addr.s6_addr);
3969 ret = NOTIFY_OK;
3970 break;
3971 default:
3972 break;
3973 }
3974 }
3975 rcu_read_unlock();
3976 return ret;
3977}
3978
3979static int cxgb4_inet6addr_handler(struct notifier_block *this,
3980 unsigned long event, void *data)
3981{
3982 struct inet6_ifaddr *ifa = data;
3983 struct net_device *event_dev;
3984 int ret = NOTIFY_DONE;
01bcca68 3985 struct bonding *bond = netdev_priv(ifa->idev->dev);
9caff1e7 3986 struct list_head *iter;
01bcca68
VP
3987 struct slave *slave;
3988 struct pci_dev *first_pdev = NULL;
3989
3990 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
3991 event_dev = vlan_dev_real_dev(ifa->idev->dev);
3992 ret = clip_add(event_dev, ifa, event);
3993 } else if (ifa->idev->dev->flags & IFF_MASTER) {
3994 /* It is possible that two different adapters are bonded in one
3995 * bond. We need to find such different adapters and add clip
3996 * in all of them only once.
3997 */
3998 read_lock(&bond->lock);
9caff1e7 3999 bond_for_each_slave(bond, slave, iter) {
01bcca68
VP
4000 if (!first_pdev) {
4001 ret = clip_add(slave->dev, ifa, event);
4002 /* If clip_add is success then only initialize
4003 * first_pdev since it means it is our device
4004 */
4005 if (ret == NOTIFY_OK)
4006 first_pdev = to_pci_dev(
4007 slave->dev->dev.parent);
4008 } else if (first_pdev !=
4009 to_pci_dev(slave->dev->dev.parent))
4010 ret = clip_add(slave->dev, ifa, event);
4011 }
4012 read_unlock(&bond->lock);
4013 } else
4014 ret = clip_add(ifa->idev->dev, ifa, event);
4015
4016 return ret;
4017}
4018
4019static struct notifier_block cxgb4_inet6addr_notifier = {
4020 .notifier_call = cxgb4_inet6addr_handler
4021};
4022
4023/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4024 * a physical device.
4025 * The physical device reference is needed to send the actul CLIP command.
4026 */
4027static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4028{
4029 struct inet6_dev *idev = NULL;
4030 struct inet6_ifaddr *ifa;
4031 int ret = 0;
4032
4033 idev = __in6_dev_get(root_dev);
4034 if (!idev)
4035 return ret;
4036
4037 read_lock_bh(&idev->lock);
4038 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4039 ret = cxgb4_clip_get(dev,
4040 (const struct in6_addr *)ifa->addr.s6_addr);
4041 if (ret < 0)
4042 break;
4043 }
4044 read_unlock_bh(&idev->lock);
4045
4046 return ret;
4047}
4048
4049static int update_root_dev_clip(struct net_device *dev)
4050{
4051 struct net_device *root_dev = NULL;
4052 int i, ret = 0;
4053
4054 /* First populate the real net device's IPv6 addresses */
4055 ret = update_dev_clip(dev, dev);
4056 if (ret)
4057 return ret;
4058
4059 /* Parse all bond and vlan devices layered on top of the physical dev */
4060 for (i = 0; i < VLAN_N_VID; i++) {
4061 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
4062 if (!root_dev)
4063 continue;
4064
4065 ret = update_dev_clip(root_dev, dev);
4066 if (ret)
4067 break;
4068 }
4069 return ret;
4070}
4071
4072static void update_clip(const struct adapter *adap)
4073{
4074 int i;
4075 struct net_device *dev;
4076 int ret;
4077
4078 rcu_read_lock();
4079
4080 for (i = 0; i < MAX_NPORTS; i++) {
4081 dev = adap->port[i];
4082 ret = 0;
4083
4084 if (dev)
4085 ret = update_root_dev_clip(dev);
4086
4087 if (ret < 0)
4088 break;
4089 }
4090 rcu_read_unlock();
4091}
4092
b8ff05a9
DM
4093/**
4094 * cxgb_up - enable the adapter
4095 * @adap: adapter being enabled
4096 *
4097 * Called when the first port is enabled, this function performs the
4098 * actions necessary to make an adapter operational, such as completing
4099 * the initialization of HW modules, and enabling interrupts.
4100 *
4101 * Must be called with the rtnl lock held.
4102 */
4103static int cxgb_up(struct adapter *adap)
4104{
aaefae9b 4105 int err;
b8ff05a9 4106
aaefae9b
DM
4107 err = setup_sge_queues(adap);
4108 if (err)
4109 goto out;
4110 err = setup_rss(adap);
4111 if (err)
4112 goto freeq;
b8ff05a9
DM
4113
4114 if (adap->flags & USING_MSIX) {
aaefae9b 4115 name_msix_vecs(adap);
b8ff05a9
DM
4116 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4117 adap->msix_info[0].desc, adap);
4118 if (err)
4119 goto irq_err;
4120
4121 err = request_msix_queue_irqs(adap);
4122 if (err) {
4123 free_irq(adap->msix_info[0].vec, adap);
4124 goto irq_err;
4125 }
4126 } else {
4127 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4128 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 4129 adap->port[0]->name, adap);
b8ff05a9
DM
4130 if (err)
4131 goto irq_err;
4132 }
4133 enable_rx(adap);
4134 t4_sge_start(adap);
4135 t4_intr_enable(adap);
aaefae9b 4136 adap->flags |= FULL_INIT_DONE;
b8ff05a9 4137 notify_ulds(adap, CXGB4_STATE_UP);
01bcca68 4138 update_clip(adap);
b8ff05a9
DM
4139 out:
4140 return err;
4141 irq_err:
4142 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
4143 freeq:
4144 t4_free_sge_resources(adap);
b8ff05a9
DM
4145 goto out;
4146}
4147
4148static void cxgb_down(struct adapter *adapter)
4149{
4150 t4_intr_disable(adapter);
4151 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
4152 cancel_work_sync(&adapter->db_full_task);
4153 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 4154 adapter->tid_release_task_busy = false;
204dc3c0 4155 adapter->tid_release_head = NULL;
b8ff05a9
DM
4156
4157 if (adapter->flags & USING_MSIX) {
4158 free_msix_queue_irqs(adapter);
4159 free_irq(adapter->msix_info[0].vec, adapter);
4160 } else
4161 free_irq(adapter->pdev->irq, adapter);
4162 quiesce_rx(adapter);
aaefae9b
DM
4163 t4_sge_stop(adapter);
4164 t4_free_sge_resources(adapter);
4165 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
4166}
4167
4168/*
4169 * net_device operations
4170 */
4171static int cxgb_open(struct net_device *dev)
4172{
4173 int err;
4174 struct port_info *pi = netdev_priv(dev);
4175 struct adapter *adapter = pi->adapter;
4176
6a3c869a
DM
4177 netif_carrier_off(dev);
4178
aaefae9b
DM
4179 if (!(adapter->flags & FULL_INIT_DONE)) {
4180 err = cxgb_up(adapter);
4181 if (err < 0)
4182 return err;
4183 }
b8ff05a9 4184
f68707b8
DM
4185 err = link_start(dev);
4186 if (!err)
4187 netif_tx_start_all_queues(dev);
4188 return err;
b8ff05a9
DM
4189}
4190
4191static int cxgb_close(struct net_device *dev)
4192{
b8ff05a9
DM
4193 struct port_info *pi = netdev_priv(dev);
4194 struct adapter *adapter = pi->adapter;
4195
4196 netif_tx_stop_all_queues(dev);
4197 netif_carrier_off(dev);
060e0c75 4198 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
4199}
4200
f2b7e78d
VP
4201/* Return an error number if the indicated filter isn't writable ...
4202 */
4203static int writable_filter(struct filter_entry *f)
4204{
4205 if (f->locked)
4206 return -EPERM;
4207 if (f->pending)
4208 return -EBUSY;
4209
4210 return 0;
4211}
4212
4213/* Delete the filter at the specified index (if valid). The checks for all
4214 * the common problems with doing this like the filter being locked, currently
4215 * pending in another operation, etc.
4216 */
4217static int delete_filter(struct adapter *adapter, unsigned int fidx)
4218{
4219 struct filter_entry *f;
4220 int ret;
4221
dca4faeb 4222 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
4223 return -EINVAL;
4224
4225 f = &adapter->tids.ftid_tab[fidx];
4226 ret = writable_filter(f);
4227 if (ret)
4228 return ret;
4229 if (f->valid)
4230 return del_filter_wr(adapter, fidx);
4231
4232 return 0;
4233}
4234
dca4faeb 4235int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
4236 __be32 sip, __be16 sport, __be16 vlan,
4237 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
4238{
4239 int ret;
4240 struct filter_entry *f;
4241 struct adapter *adap;
4242 int i;
4243 u8 *val;
4244
4245 adap = netdev2adap(dev);
4246
1cab775c
VP
4247 /* Adjust stid to correct filter index */
4248 stid -= adap->tids.nstids;
4249 stid += adap->tids.nftids;
4250
dca4faeb
VP
4251 /* Check to make sure the filter requested is writable ...
4252 */
4253 f = &adap->tids.ftid_tab[stid];
4254 ret = writable_filter(f);
4255 if (ret)
4256 return ret;
4257
4258 /* Clear out any old resources being used by the filter before
4259 * we start constructing the new filter.
4260 */
4261 if (f->valid)
4262 clear_filter(adap, f);
4263
4264 /* Clear out filter specifications */
4265 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4266 f->fs.val.lport = cpu_to_be16(sport);
4267 f->fs.mask.lport = ~0;
4268 val = (u8 *)&sip;
793dad94 4269 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
4270 for (i = 0; i < 4; i++) {
4271 f->fs.val.lip[i] = val[i];
4272 f->fs.mask.lip[i] = ~0;
4273 }
793dad94
VP
4274 if (adap->filter_mode & F_PORT) {
4275 f->fs.val.iport = port;
4276 f->fs.mask.iport = mask;
4277 }
4278 }
dca4faeb
VP
4279
4280 f->fs.dirsteer = 1;
4281 f->fs.iq = queue;
4282 /* Mark filter as locked */
4283 f->locked = 1;
4284 f->fs.rpttid = 1;
4285
4286 ret = set_filter_wr(adap, stid);
4287 if (ret) {
4288 clear_filter(adap, f);
4289 return ret;
4290 }
4291
4292 return 0;
4293}
4294EXPORT_SYMBOL(cxgb4_create_server_filter);
4295
4296int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4297 unsigned int queue, bool ipv6)
4298{
4299 int ret;
4300 struct filter_entry *f;
4301 struct adapter *adap;
4302
4303 adap = netdev2adap(dev);
1cab775c
VP
4304
4305 /* Adjust stid to correct filter index */
4306 stid -= adap->tids.nstids;
4307 stid += adap->tids.nftids;
4308
dca4faeb
VP
4309 f = &adap->tids.ftid_tab[stid];
4310 /* Unlock the filter */
4311 f->locked = 0;
4312
4313 ret = delete_filter(adap, stid);
4314 if (ret)
4315 return ret;
4316
4317 return 0;
4318}
4319EXPORT_SYMBOL(cxgb4_remove_server_filter);
4320
f5152c90
DM
4321static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4322 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
4323{
4324 struct port_stats stats;
4325 struct port_info *p = netdev_priv(dev);
4326 struct adapter *adapter = p->adapter;
b8ff05a9
DM
4327
4328 spin_lock(&adapter->stats_lock);
4329 t4_get_port_stats(adapter, p->tx_chan, &stats);
4330 spin_unlock(&adapter->stats_lock);
4331
4332 ns->tx_bytes = stats.tx_octets;
4333 ns->tx_packets = stats.tx_frames;
4334 ns->rx_bytes = stats.rx_octets;
4335 ns->rx_packets = stats.rx_frames;
4336 ns->multicast = stats.rx_mcast_frames;
4337
4338 /* detailed rx_errors */
4339 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4340 stats.rx_runt;
4341 ns->rx_over_errors = 0;
4342 ns->rx_crc_errors = stats.rx_fcs_err;
4343 ns->rx_frame_errors = stats.rx_symbol_err;
4344 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4345 stats.rx_ovflow2 + stats.rx_ovflow3 +
4346 stats.rx_trunc0 + stats.rx_trunc1 +
4347 stats.rx_trunc2 + stats.rx_trunc3;
4348 ns->rx_missed_errors = 0;
4349
4350 /* detailed tx_errors */
4351 ns->tx_aborted_errors = 0;
4352 ns->tx_carrier_errors = 0;
4353 ns->tx_fifo_errors = 0;
4354 ns->tx_heartbeat_errors = 0;
4355 ns->tx_window_errors = 0;
4356
4357 ns->tx_errors = stats.tx_error_frames;
4358 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4359 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4360 return ns;
4361}
4362
4363static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4364{
060e0c75 4365 unsigned int mbox;
b8ff05a9
DM
4366 int ret = 0, prtad, devad;
4367 struct port_info *pi = netdev_priv(dev);
4368 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4369
4370 switch (cmd) {
4371 case SIOCGMIIPHY:
4372 if (pi->mdio_addr < 0)
4373 return -EOPNOTSUPP;
4374 data->phy_id = pi->mdio_addr;
4375 break;
4376 case SIOCGMIIREG:
4377 case SIOCSMIIREG:
4378 if (mdio_phy_id_is_c45(data->phy_id)) {
4379 prtad = mdio_phy_id_prtad(data->phy_id);
4380 devad = mdio_phy_id_devad(data->phy_id);
4381 } else if (data->phy_id < 32) {
4382 prtad = data->phy_id;
4383 devad = 0;
4384 data->reg_num &= 0x1f;
4385 } else
4386 return -EINVAL;
4387
060e0c75 4388 mbox = pi->adapter->fn;
b8ff05a9 4389 if (cmd == SIOCGMIIREG)
060e0c75 4390 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4391 data->reg_num, &data->val_out);
4392 else
060e0c75 4393 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4394 data->reg_num, data->val_in);
4395 break;
4396 default:
4397 return -EOPNOTSUPP;
4398 }
4399 return ret;
4400}
4401
4402static void cxgb_set_rxmode(struct net_device *dev)
4403{
4404 /* unfortunately we can't return errors to the stack */
4405 set_rxmode(dev, -1, false);
4406}
4407
4408static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4409{
4410 int ret;
4411 struct port_info *pi = netdev_priv(dev);
4412
4413 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4414 return -EINVAL;
060e0c75
DM
4415 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4416 -1, -1, -1, true);
b8ff05a9
DM
4417 if (!ret)
4418 dev->mtu = new_mtu;
4419 return ret;
4420}
4421
4422static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4423{
4424 int ret;
4425 struct sockaddr *addr = p;
4426 struct port_info *pi = netdev_priv(dev);
4427
4428 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 4429 return -EADDRNOTAVAIL;
b8ff05a9 4430
060e0c75
DM
4431 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4432 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
4433 if (ret < 0)
4434 return ret;
4435
4436 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4437 pi->xact_addr_filt = ret;
4438 return 0;
4439}
4440
b8ff05a9
DM
4441#ifdef CONFIG_NET_POLL_CONTROLLER
4442static void cxgb_netpoll(struct net_device *dev)
4443{
4444 struct port_info *pi = netdev_priv(dev);
4445 struct adapter *adap = pi->adapter;
4446
4447 if (adap->flags & USING_MSIX) {
4448 int i;
4449 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4450
4451 for (i = pi->nqsets; i; i--, rx++)
4452 t4_sge_intr_msix(0, &rx->rspq);
4453 } else
4454 t4_intr_handler(adap)(0, adap);
4455}
4456#endif
4457
4458static const struct net_device_ops cxgb4_netdev_ops = {
4459 .ndo_open = cxgb_open,
4460 .ndo_stop = cxgb_close,
4461 .ndo_start_xmit = t4_eth_xmit,
9be793bf 4462 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
4463 .ndo_set_rx_mode = cxgb_set_rxmode,
4464 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 4465 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
4466 .ndo_validate_addr = eth_validate_addr,
4467 .ndo_do_ioctl = cxgb_ioctl,
4468 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
4469#ifdef CONFIG_NET_POLL_CONTROLLER
4470 .ndo_poll_controller = cxgb_netpoll,
4471#endif
4472};
4473
4474void t4_fatal_err(struct adapter *adap)
4475{
4476 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4477 t4_intr_disable(adap);
4478 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4479}
4480
4481static void setup_memwin(struct adapter *adap)
4482{
19dd37ba 4483 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
b8ff05a9
DM
4484
4485 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
19dd37ba
SR
4486 if (is_t4(adap->chip)) {
4487 mem_win0_base = bar0 + MEMWIN0_BASE;
4488 mem_win1_base = bar0 + MEMWIN1_BASE;
4489 mem_win2_base = bar0 + MEMWIN2_BASE;
4490 } else {
4491 /* For T5, only relative offset inside the PCIe BAR is passed */
4492 mem_win0_base = MEMWIN0_BASE;
4493 mem_win1_base = MEMWIN1_BASE_T5;
4494 mem_win2_base = MEMWIN2_BASE_T5;
4495 }
b8ff05a9 4496 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
19dd37ba 4497 mem_win0_base | BIR(0) |
b8ff05a9
DM
4498 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4499 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
19dd37ba 4500 mem_win1_base | BIR(0) |
b8ff05a9
DM
4501 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4502 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
19dd37ba 4503 mem_win2_base | BIR(0) |
b8ff05a9 4504 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
636f9d37
VP
4505}
4506
4507static void setup_memwin_rdma(struct adapter *adap)
4508{
1ae970e0
DM
4509 if (adap->vres.ocq.size) {
4510 unsigned int start, sz_kb;
4511
4512 start = pci_resource_start(adap->pdev, 2) +
4513 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4514 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4515 t4_write_reg(adap,
4516 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4517 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4518 t4_write_reg(adap,
4519 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4520 adap->vres.ocq.start);
4521 t4_read_reg(adap,
4522 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4523 }
b8ff05a9
DM
4524}
4525
02b5fb8e
DM
4526static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4527{
4528 u32 v;
4529 int ret;
4530
4531 /* get device capabilities */
4532 memset(c, 0, sizeof(*c));
4533 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4534 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4535 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
060e0c75 4536 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
4537 if (ret < 0)
4538 return ret;
4539
4540 /* select capabilities we'll be using */
4541 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4542 if (!vf_acls)
4543 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4544 else
4545 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4546 } else if (vf_acls) {
4547 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4548 return ret;
4549 }
4550 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4551 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 4552 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
4553 if (ret < 0)
4554 return ret;
4555
060e0c75 4556 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
4557 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4558 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4559 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4560 if (ret < 0)
4561 return ret;
4562
060e0c75
DM
4563 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4564 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
4565 if (ret < 0)
4566 return ret;
4567
4568 t4_sge_init(adap);
4569
02b5fb8e
DM
4570 /* tweak some settings */
4571 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4572 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4573 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4574 v = t4_read_reg(adap, TP_PIO_DATA);
4575 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75 4576
dca4faeb
VP
4577 /* first 4 Tx modulation queues point to consecutive Tx channels */
4578 adap->params.tp.tx_modq_map = 0xE4;
4579 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4580 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4581
4582 /* associate each Tx modulation queue with consecutive Tx channels */
4583 v = 0x84218421;
4584 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4585 &v, 1, A_TP_TX_SCHED_HDR);
4586 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4587 &v, 1, A_TP_TX_SCHED_FIFO);
4588 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4589 &v, 1, A_TP_TX_SCHED_PCMD);
4590
4591#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4592 if (is_offload(adap)) {
4593 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4594 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4595 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4596 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4597 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4598 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4599 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4600 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4601 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4602 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4603 }
4604
060e0c75
DM
4605 /* get basic stuff going */
4606 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
4607}
4608
b8ff05a9
DM
4609/*
4610 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4611 */
4612#define MAX_ATIDS 8192U
4613
636f9d37
VP
4614/*
4615 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4616 *
4617 * If the firmware we're dealing with has Configuration File support, then
4618 * we use that to perform all configuration
4619 */
4620
4621/*
4622 * Tweak configuration based on module parameters, etc. Most of these have
4623 * defaults assigned to them by Firmware Configuration Files (if we're using
4624 * them) but need to be explicitly set if we're using hard-coded
4625 * initialization. But even in the case of using Firmware Configuration
4626 * Files, we'd like to expose the ability to change these via module
4627 * parameters so these are essentially common tweaks/settings for
4628 * Configuration Files and hard-coded initialization ...
4629 */
4630static int adap_init0_tweaks(struct adapter *adapter)
4631{
4632 /*
4633 * Fix up various Host-Dependent Parameters like Page Size, Cache
4634 * Line Size, etc. The firmware default is for a 4KB Page Size and
4635 * 64B Cache Line Size ...
4636 */
4637 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4638
4639 /*
4640 * Process module parameters which affect early initialization.
4641 */
4642 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4643 dev_err(&adapter->pdev->dev,
4644 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4645 rx_dma_offset);
4646 rx_dma_offset = 2;
4647 }
4648 t4_set_reg_field(adapter, SGE_CONTROL,
4649 PKTSHIFT_MASK,
4650 PKTSHIFT(rx_dma_offset));
4651
4652 /*
4653 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4654 * adds the pseudo header itself.
4655 */
4656 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4657 CSUM_HAS_PSEUDO_HDR, 0);
4658
4659 return 0;
4660}
4661
4662/*
4663 * Attempt to initialize the adapter via a Firmware Configuration File.
4664 */
4665static int adap_init0_config(struct adapter *adapter, int reset)
4666{
4667 struct fw_caps_config_cmd caps_cmd;
4668 const struct firmware *cf;
4669 unsigned long mtype = 0, maddr = 0;
4670 u32 finiver, finicsum, cfcsum;
4671 int ret, using_flash;
0a57a536 4672 char *fw_config_file, fw_config_file_path[256];
636f9d37
VP
4673
4674 /*
4675 * Reset device if necessary.
4676 */
4677 if (reset) {
4678 ret = t4_fw_reset(adapter, adapter->mbox,
4679 PIORSTMODE | PIORST);
4680 if (ret < 0)
4681 goto bye;
4682 }
4683
4684 /*
4685 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4686 * then use that. Otherwise, use the configuration file stored
4687 * in the adapter flash ...
4688 */
0a57a536
SR
4689 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4690 case CHELSIO_T4:
4691 fw_config_file = FW_CFNAME;
4692 break;
4693 case CHELSIO_T5:
4694 fw_config_file = FW5_CFNAME;
4695 break;
4696 default:
4697 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4698 adapter->pdev->device);
4699 ret = -EINVAL;
4700 goto bye;
4701 }
4702
4703 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37
VP
4704 if (ret < 0) {
4705 using_flash = 1;
4706 mtype = FW_MEMTYPE_CF_FLASH;
4707 maddr = t4_flash_cfg_addr(adapter);
4708 } else {
4709 u32 params[7], val[7];
4710
4711 using_flash = 0;
4712 if (cf->size >= FLASH_CFG_MAX_SIZE)
4713 ret = -ENOMEM;
4714 else {
4715 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4716 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4717 ret = t4_query_params(adapter, adapter->mbox,
4718 adapter->fn, 0, 1, params, val);
4719 if (ret == 0) {
4720 /*
4721 * For t4_memory_write() below addresses and
4722 * sizes have to be in terms of multiples of 4
4723 * bytes. So, if the Configuration File isn't
4724 * a multiple of 4 bytes in length we'll have
4725 * to write that out separately since we can't
4726 * guarantee that the bytes following the
4727 * residual byte in the buffer returned by
4728 * request_firmware() are zeroed out ...
4729 */
4730 size_t resid = cf->size & 0x3;
4731 size_t size = cf->size & ~0x3;
4732 __be32 *data = (__be32 *)cf->data;
4733
4734 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4735 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4736
4737 ret = t4_memory_write(adapter, mtype, maddr,
4738 size, data);
4739 if (ret == 0 && resid != 0) {
4740 union {
4741 __be32 word;
4742 char buf[4];
4743 } last;
4744 int i;
4745
4746 last.word = data[size >> 2];
4747 for (i = resid; i < 4; i++)
4748 last.buf[i] = 0;
4749 ret = t4_memory_write(adapter, mtype,
4750 maddr + size,
4751 4, &last.word);
4752 }
4753 }
4754 }
4755
4756 release_firmware(cf);
4757 if (ret)
4758 goto bye;
4759 }
4760
4761 /*
4762 * Issue a Capability Configuration command to the firmware to get it
4763 * to parse the Configuration File. We don't use t4_fw_config_file()
4764 * because we want the ability to modify various features after we've
4765 * processed the configuration file ...
4766 */
4767 memset(&caps_cmd, 0, sizeof(caps_cmd));
4768 caps_cmd.op_to_write =
4769 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4770 FW_CMD_REQUEST |
4771 FW_CMD_READ);
ce91a923 4772 caps_cmd.cfvalid_to_len16 =
636f9d37
VP
4773 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4774 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4775 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4776 FW_LEN16(caps_cmd));
4777 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4778 &caps_cmd);
4779 if (ret < 0)
4780 goto bye;
4781
4782 finiver = ntohl(caps_cmd.finiver);
4783 finicsum = ntohl(caps_cmd.finicsum);
4784 cfcsum = ntohl(caps_cmd.cfcsum);
4785 if (finicsum != cfcsum)
4786 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4787 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4788 finicsum, cfcsum);
4789
636f9d37
VP
4790 /*
4791 * And now tell the firmware to use the configuration we just loaded.
4792 */
4793 caps_cmd.op_to_write =
4794 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4795 FW_CMD_REQUEST |
4796 FW_CMD_WRITE);
ce91a923 4797 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4798 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4799 NULL);
4800 if (ret < 0)
4801 goto bye;
4802
4803 /*
4804 * Tweak configuration based on system architecture, module
4805 * parameters, etc.
4806 */
4807 ret = adap_init0_tweaks(adapter);
4808 if (ret < 0)
4809 goto bye;
4810
4811 /*
4812 * And finally tell the firmware to initialize itself using the
4813 * parameters from the Configuration File.
4814 */
4815 ret = t4_fw_initialize(adapter, adapter->mbox);
4816 if (ret < 0)
4817 goto bye;
4818
0a57a536 4819 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
636f9d37
VP
4820 /*
4821 * Return successfully and note that we're operating with parameters
4822 * not supplied by the driver, rather than from hard-wired
4823 * initialization constants burried in the driver.
4824 */
4825 adapter->flags |= USING_SOFT_PARAMS;
4826 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4827 "Configuration File %s, version %#x, computed checksum %#x\n",
4828 (using_flash
4829 ? "in device FLASH"
0a57a536 4830 : fw_config_file_path),
636f9d37
VP
4831 finiver, cfcsum);
4832 return 0;
4833
4834 /*
4835 * Something bad happened. Return the error ... (If the "error"
4836 * is that there's no Configuration File on the adapter we don't
4837 * want to issue a warning since this is fairly common.)
4838 */
4839bye:
4840 if (ret != -ENOENT)
4841 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4842 -ret);
4843 return ret;
4844}
4845
13ee15d3
VP
4846/*
4847 * Attempt to initialize the adapter via hard-coded, driver supplied
4848 * parameters ...
4849 */
4850static int adap_init0_no_config(struct adapter *adapter, int reset)
4851{
4852 struct sge *s = &adapter->sge;
4853 struct fw_caps_config_cmd caps_cmd;
4854 u32 v;
4855 int i, ret;
4856
4857 /*
4858 * Reset device if necessary
4859 */
4860 if (reset) {
4861 ret = t4_fw_reset(adapter, adapter->mbox,
4862 PIORSTMODE | PIORST);
4863 if (ret < 0)
4864 goto bye;
4865 }
4866
4867 /*
4868 * Get device capabilities and select which we'll be using.
4869 */
4870 memset(&caps_cmd, 0, sizeof(caps_cmd));
4871 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4872 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4873 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
13ee15d3
VP
4874 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4875 &caps_cmd);
4876 if (ret < 0)
4877 goto bye;
4878
13ee15d3
VP
4879 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4880 if (!vf_acls)
4881 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4882 else
4883 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4884 } else if (vf_acls) {
4885 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4886 goto bye;
4887 }
4888 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4889 FW_CMD_REQUEST | FW_CMD_WRITE);
4890 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4891 NULL);
4892 if (ret < 0)
4893 goto bye;
4894
4895 /*
4896 * Tweak configuration based on system architecture, module
4897 * parameters, etc.
4898 */
4899 ret = adap_init0_tweaks(adapter);
4900 if (ret < 0)
4901 goto bye;
4902
4903 /*
4904 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4905 * mode which maps each Virtual Interface to its own section of
4906 * the RSS Table and we turn on all map and hash enables ...
4907 */
4908 adapter->flags |= RSS_TNLALLLOOKUP;
4909 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4910 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4911 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4912 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4913 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4914 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4915 if (ret < 0)
4916 goto bye;
4917
4918 /*
4919 * Set up our own fundamental resource provisioning ...
4920 */
4921 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4922 PFRES_NEQ, PFRES_NETHCTRL,
4923 PFRES_NIQFLINT, PFRES_NIQ,
4924 PFRES_TC, PFRES_NVI,
4925 FW_PFVF_CMD_CMASK_MASK,
4926 pfvfres_pmask(adapter, adapter->fn, 0),
4927 PFRES_NEXACTF,
4928 PFRES_R_CAPS, PFRES_WX_CAPS);
4929 if (ret < 0)
4930 goto bye;
4931
4932 /*
4933 * Perform low level SGE initialization. We need to do this before we
4934 * send the firmware the INITIALIZE command because that will cause
4935 * any other PF Drivers which are waiting for the Master
4936 * Initialization to proceed forward.
4937 */
4938 for (i = 0; i < SGE_NTIMERS - 1; i++)
4939 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4940 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4941 s->counter_val[0] = 1;
4942 for (i = 1; i < SGE_NCOUNTERS; i++)
4943 s->counter_val[i] = min(intr_cnt[i - 1],
4944 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4945 t4_sge_init(adapter);
4946
4947#ifdef CONFIG_PCI_IOV
4948 /*
4949 * Provision resource limits for Virtual Functions. We currently
4950 * grant them all the same static resource limits except for the Port
4951 * Access Rights Mask which we're assigning based on the PF. All of
4952 * the static provisioning stuff for both the PF and VF really needs
4953 * to be managed in a persistent manner for each device which the
4954 * firmware controls.
4955 */
4956 {
4957 int pf, vf;
4958
7d6727cf 4959 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
13ee15d3
VP
4960 if (num_vf[pf] <= 0)
4961 continue;
4962
4963 /* VF numbering starts at 1! */
4964 for (vf = 1; vf <= num_vf[pf]; vf++) {
4965 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4966 pf, vf,
4967 VFRES_NEQ, VFRES_NETHCTRL,
4968 VFRES_NIQFLINT, VFRES_NIQ,
4969 VFRES_TC, VFRES_NVI,
1f1e4958 4970 FW_PFVF_CMD_CMASK_MASK,
13ee15d3
VP
4971 pfvfres_pmask(
4972 adapter, pf, vf),
4973 VFRES_NEXACTF,
4974 VFRES_R_CAPS, VFRES_WX_CAPS);
4975 if (ret < 0)
4976 dev_warn(adapter->pdev_dev,
4977 "failed to "\
4978 "provision pf/vf=%d/%d; "
4979 "err=%d\n", pf, vf, ret);
4980 }
4981 }
4982 }
4983#endif
4984
4985 /*
4986 * Set up the default filter mode. Later we'll want to implement this
4987 * via a firmware command, etc. ... This needs to be done before the
4988 * firmare initialization command ... If the selected set of fields
4989 * isn't equal to the default value, we'll need to make sure that the
4990 * field selections will fit in the 36-bit budget.
4991 */
4992 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
404d9e3f 4993 int j, bits = 0;
13ee15d3 4994
404d9e3f
VP
4995 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4996 switch (tp_vlan_pri_map & (1 << j)) {
13ee15d3
VP
4997 case 0:
4998 /* compressed filter field not enabled */
4999 break;
5000 case FCOE_MASK:
5001 bits += 1;
5002 break;
5003 case PORT_MASK:
5004 bits += 3;
5005 break;
5006 case VNIC_ID_MASK:
5007 bits += 17;
5008 break;
5009 case VLAN_MASK:
5010 bits += 17;
5011 break;
5012 case TOS_MASK:
5013 bits += 8;
5014 break;
5015 case PROTOCOL_MASK:
5016 bits += 8;
5017 break;
5018 case ETHERTYPE_MASK:
5019 bits += 16;
5020 break;
5021 case MACMATCH_MASK:
5022 bits += 9;
5023 break;
5024 case MPSHITTYPE_MASK:
5025 bits += 3;
5026 break;
5027 case FRAGMENTATION_MASK:
5028 bits += 1;
5029 break;
5030 }
5031
5032 if (bits > 36) {
5033 dev_err(adapter->pdev_dev,
5034 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5035 " using %#x\n", tp_vlan_pri_map, bits,
5036 TP_VLAN_PRI_MAP_DEFAULT);
5037 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5038 }
5039 }
5040 v = tp_vlan_pri_map;
5041 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5042 &v, 1, TP_VLAN_PRI_MAP);
5043
5044 /*
5045 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5046 * to support any of the compressed filter fields above. Newer
5047 * versions of the firmware do this automatically but it doesn't hurt
5048 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5049 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5050 * since the firmware automatically turns this on and off when we have
5051 * a non-zero number of filters active (since it does have a
5052 * performance impact).
5053 */
5054 if (tp_vlan_pri_map)
5055 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5056 FIVETUPLELOOKUP_MASK,
5057 FIVETUPLELOOKUP_MASK);
5058
5059 /*
5060 * Tweak some settings.
5061 */
5062 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5063 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5064 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5065 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5066
5067 /*
5068 * Get basic stuff going by issuing the Firmware Initialize command.
5069 * Note that this _must_ be after all PFVF commands ...
5070 */
5071 ret = t4_fw_initialize(adapter, adapter->mbox);
5072 if (ret < 0)
5073 goto bye;
5074
5075 /*
5076 * Return successfully!
5077 */
5078 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5079 "driver parameters\n");
5080 return 0;
5081
5082 /*
5083 * Something bad happened. Return the error ...
5084 */
5085bye:
5086 return ret;
5087}
5088
b8ff05a9
DM
5089/*
5090 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5091 */
5092static int adap_init0(struct adapter *adap)
5093{
5094 int ret;
5095 u32 v, port_vec;
5096 enum dev_state state;
5097 u32 params[7], val[7];
9a4da2cd 5098 struct fw_caps_config_cmd caps_cmd;
636f9d37 5099 int reset = 1, j;
b8ff05a9 5100
636f9d37
VP
5101 /*
5102 * Contact FW, advertising Master capability (and potentially forcing
5103 * ourselves as the Master PF if our module parameter force_init is
5104 * set).
5105 */
5106 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5107 force_init ? MASTER_MUST : MASTER_MAY,
5108 &state);
b8ff05a9
DM
5109 if (ret < 0) {
5110 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5111 ret);
5112 return ret;
5113 }
636f9d37
VP
5114 if (ret == adap->mbox)
5115 adap->flags |= MASTER_PF;
5116 if (force_init && state == DEV_STATE_INIT)
5117 state = DEV_STATE_UNINIT;
b8ff05a9 5118
636f9d37
VP
5119 /*
5120 * If we're the Master PF Driver and the device is uninitialized,
5121 * then let's consider upgrading the firmware ... (We always want
5122 * to check the firmware version number in order to A. get it for
5123 * later reporting and B. to warn if the currently loaded firmware
5124 * is excessively mismatched relative to the driver.)
5125 */
5126 ret = t4_check_fw_version(adap);
e69972f5
JH
5127
5128 /* The error code -EFAULT is returned by t4_check_fw_version() if
5129 * firmware on adapter < supported firmware. If firmware on adapter
5130 * is too old (not supported by driver) and we're the MASTER_PF set
5131 * adapter state to DEV_STATE_UNINIT to force firmware upgrade
5132 * and reinitialization.
5133 */
5134 if ((adap->flags & MASTER_PF) && ret == -EFAULT)
5135 state = DEV_STATE_UNINIT;
636f9d37 5136 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
e69972f5 5137 if (ret == -EINVAL || ret == -EFAULT || ret > 0) {
636f9d37
VP
5138 if (upgrade_fw(adap) >= 0) {
5139 /*
5140 * Note that the chip was reset as part of the
5141 * firmware upgrade so we don't reset it again
5142 * below and grab the new firmware version.
5143 */
5144 reset = 0;
5145 ret = t4_check_fw_version(adap);
e69972f5
JH
5146 } else
5147 if (ret == -EFAULT) {
5148 /*
5149 * Firmware is old but still might
5150 * work if we force reinitialization
5151 * of the adapter. Ignoring FW upgrade
5152 * failure.
5153 */
5154 dev_warn(adap->pdev_dev,
5155 "Ignoring firmware upgrade "
5156 "failure, and forcing driver "
5157 "to reinitialize the "
5158 "adapter.\n");
5159 ret = 0;
5160 }
636f9d37
VP
5161 }
5162 if (ret < 0)
5163 return ret;
5164 }
b8ff05a9 5165
636f9d37
VP
5166 /*
5167 * Grab VPD parameters. This should be done after we establish a
5168 * connection to the firmware since some of the VPD parameters
5169 * (notably the Core Clock frequency) are retrieved via requests to
5170 * the firmware. On the other hand, we need these fairly early on
5171 * so we do this right after getting ahold of the firmware.
5172 */
5173 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
5174 if (ret < 0)
5175 goto bye;
a0881cab 5176
636f9d37 5177 /*
13ee15d3
VP
5178 * Find out what ports are available to us. Note that we need to do
5179 * this before calling adap_init0_no_config() since it needs nports
5180 * and portvec ...
636f9d37
VP
5181 */
5182 v =
5183 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5184 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5185 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
a0881cab
DM
5186 if (ret < 0)
5187 goto bye;
5188
636f9d37
VP
5189 adap->params.nports = hweight32(port_vec);
5190 adap->params.portvec = port_vec;
5191
5192 /*
5193 * If the firmware is initialized already (and we're not forcing a
5194 * master initialization), note that we're living with existing
5195 * adapter parameters. Otherwise, it's time to try initializing the
5196 * adapter ...
5197 */
5198 if (state == DEV_STATE_INIT) {
5199 dev_info(adap->pdev_dev, "Coming up as %s: "\
5200 "Adapter already initialized\n",
5201 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5202 adap->flags |= USING_SOFT_PARAMS;
5203 } else {
5204 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5205 "Initializing adapter\n");
636f9d37
VP
5206
5207 /*
5208 * If the firmware doesn't support Configuration
5209 * Files warn user and exit,
5210 */
5211 if (ret < 0)
13ee15d3 5212 dev_warn(adap->pdev_dev, "Firmware doesn't support "
636f9d37 5213 "configuration file.\n");
13ee15d3
VP
5214 if (force_old_init)
5215 ret = adap_init0_no_config(adap, reset);
636f9d37
VP
5216 else {
5217 /*
13ee15d3
VP
5218 * Find out whether we're dealing with a version of
5219 * the firmware which has configuration file support.
636f9d37 5220 */
13ee15d3
VP
5221 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5222 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5223 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5224 params, val);
636f9d37 5225
13ee15d3
VP
5226 /*
5227 * If the firmware doesn't support Configuration
5228 * Files, use the old Driver-based, hard-wired
5229 * initialization. Otherwise, try using the
5230 * Configuration File support and fall back to the
5231 * Driver-based initialization if there's no
5232 * Configuration File found.
5233 */
5234 if (ret < 0)
5235 ret = adap_init0_no_config(adap, reset);
5236 else {
5237 /*
5238 * The firmware provides us with a memory
5239 * buffer where we can load a Configuration
5240 * File from the host if we want to override
5241 * the Configuration File in flash.
5242 */
5243
5244 ret = adap_init0_config(adap, reset);
5245 if (ret == -ENOENT) {
5246 dev_info(adap->pdev_dev,
5247 "No Configuration File present "
5248 "on adapter. Using hard-wired "
5249 "configuration parameters.\n");
5250 ret = adap_init0_no_config(adap, reset);
5251 }
636f9d37
VP
5252 }
5253 }
5254 if (ret < 0) {
5255 dev_err(adap->pdev_dev,
5256 "could not initialize adapter, error %d\n",
5257 -ret);
5258 goto bye;
5259 }
5260 }
5261
5262 /*
5263 * If we're living with non-hard-coded parameters (either from a
5264 * Firmware Configuration File or values programmed by a different PF
5265 * Driver), give the SGE code a chance to pull in anything that it
5266 * needs ... Note that this must be called after we retrieve our VPD
5267 * parameters in order to know how to convert core ticks to seconds.
5268 */
5269 if (adap->flags & USING_SOFT_PARAMS) {
5270 ret = t4_sge_init(adap);
5271 if (ret < 0)
5272 goto bye;
5273 }
5274
9a4da2cd
VP
5275 if (is_bypass_device(adap->pdev->device))
5276 adap->params.bypass = 1;
5277
636f9d37
VP
5278 /*
5279 * Grab some of our basic fundamental operating parameters.
5280 */
5281#define FW_PARAM_DEV(param) \
5282 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5283 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5284
b8ff05a9 5285#define FW_PARAM_PFVF(param) \
636f9d37
VP
5286 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5287 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5288 FW_PARAMS_PARAM_Y(0) | \
5289 FW_PARAMS_PARAM_Z(0)
b8ff05a9 5290
636f9d37 5291 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
5292 params[1] = FW_PARAM_PFVF(L2T_START);
5293 params[2] = FW_PARAM_PFVF(L2T_END);
5294 params[3] = FW_PARAM_PFVF(FILTER_START);
5295 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 5296 params[5] = FW_PARAM_PFVF(IQFLINT_START);
636f9d37 5297 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
b8ff05a9
DM
5298 if (ret < 0)
5299 goto bye;
636f9d37
VP
5300 adap->sge.egr_start = val[0];
5301 adap->l2t_start = val[1];
5302 adap->l2t_end = val[2];
b8ff05a9
DM
5303 adap->tids.ftid_base = val[3];
5304 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 5305 adap->sge.ingr_start = val[5];
b8ff05a9 5306
636f9d37
VP
5307 /* query params related to active filter region */
5308 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5309 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5310 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5311 /* If Active filter size is set we enable establishing
5312 * offload connection through firmware work request
5313 */
5314 if ((val[0] != val[1]) && (ret >= 0)) {
5315 adap->flags |= FW_OFLD_CONN;
5316 adap->tids.aftid_base = val[0];
5317 adap->tids.aftid_end = val[1];
5318 }
5319
b407a4a9
VP
5320 /* If we're running on newer firmware, let it know that we're
5321 * prepared to deal with encapsulated CPL messages. Older
5322 * firmware won't understand this and we'll just get
5323 * unencapsulated messages ...
5324 */
5325 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5326 val[0] = 1;
5327 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5328
636f9d37
VP
5329 /*
5330 * Get device capabilities so we can determine what resources we need
5331 * to manage.
5332 */
5333 memset(&caps_cmd, 0, sizeof(caps_cmd));
9a4da2cd 5334 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
13ee15d3 5335 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 5336 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
5337 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5338 &caps_cmd);
5339 if (ret < 0)
5340 goto bye;
5341
13ee15d3 5342 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
5343 /* query offload-related parameters */
5344 params[0] = FW_PARAM_DEV(NTID);
5345 params[1] = FW_PARAM_PFVF(SERVER_START);
5346 params[2] = FW_PARAM_PFVF(SERVER_END);
5347 params[3] = FW_PARAM_PFVF(TDDP_START);
5348 params[4] = FW_PARAM_PFVF(TDDP_END);
5349 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
636f9d37
VP
5350 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5351 params, val);
b8ff05a9
DM
5352 if (ret < 0)
5353 goto bye;
5354 adap->tids.ntids = val[0];
5355 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5356 adap->tids.stid_base = val[1];
5357 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37
VP
5358 /*
5359 * Setup server filter region. Divide the availble filter
5360 * region into two parts. Regular filters get 1/3rd and server
5361 * filters get 2/3rd part. This is only enabled if workarond
5362 * path is enabled.
5363 * 1. For regular filters.
5364 * 2. Server filter: This are special filters which are used
5365 * to redirect SYN packets to offload queue.
5366 */
5367 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5368 adap->tids.sftid_base = adap->tids.ftid_base +
5369 DIV_ROUND_UP(adap->tids.nftids, 3);
5370 adap->tids.nsftids = adap->tids.nftids -
5371 DIV_ROUND_UP(adap->tids.nftids, 3);
5372 adap->tids.nftids = adap->tids.sftid_base -
5373 adap->tids.ftid_base;
5374 }
b8ff05a9
DM
5375 adap->vres.ddp.start = val[3];
5376 adap->vres.ddp.size = val[4] - val[3] + 1;
5377 adap->params.ofldq_wr_cred = val[5];
636f9d37 5378
b8ff05a9
DM
5379 adap->params.offload = 1;
5380 }
636f9d37 5381 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
5382 params[0] = FW_PARAM_PFVF(STAG_START);
5383 params[1] = FW_PARAM_PFVF(STAG_END);
5384 params[2] = FW_PARAM_PFVF(RQ_START);
5385 params[3] = FW_PARAM_PFVF(RQ_END);
5386 params[4] = FW_PARAM_PFVF(PBL_START);
5387 params[5] = FW_PARAM_PFVF(PBL_END);
636f9d37
VP
5388 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5389 params, val);
b8ff05a9
DM
5390 if (ret < 0)
5391 goto bye;
5392 adap->vres.stag.start = val[0];
5393 adap->vres.stag.size = val[1] - val[0] + 1;
5394 adap->vres.rq.start = val[2];
5395 adap->vres.rq.size = val[3] - val[2] + 1;
5396 adap->vres.pbl.start = val[4];
5397 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
5398
5399 params[0] = FW_PARAM_PFVF(SQRQ_START);
5400 params[1] = FW_PARAM_PFVF(SQRQ_END);
5401 params[2] = FW_PARAM_PFVF(CQ_START);
5402 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
5403 params[4] = FW_PARAM_PFVF(OCQ_START);
5404 params[5] = FW_PARAM_PFVF(OCQ_END);
636f9d37 5405 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
a0881cab
DM
5406 if (ret < 0)
5407 goto bye;
5408 adap->vres.qp.start = val[0];
5409 adap->vres.qp.size = val[1] - val[0] + 1;
5410 adap->vres.cq.start = val[2];
5411 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
5412 adap->vres.ocq.start = val[4];
5413 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9 5414 }
636f9d37 5415 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
5416 params[0] = FW_PARAM_PFVF(ISCSI_START);
5417 params[1] = FW_PARAM_PFVF(ISCSI_END);
636f9d37
VP
5418 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5419 params, val);
b8ff05a9
DM
5420 if (ret < 0)
5421 goto bye;
5422 adap->vres.iscsi.start = val[0];
5423 adap->vres.iscsi.size = val[1] - val[0] + 1;
5424 }
5425#undef FW_PARAM_PFVF
5426#undef FW_PARAM_DEV
5427
636f9d37
VP
5428 /*
5429 * These are finalized by FW initialization, load their values now.
5430 */
b8ff05a9
DM
5431 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5432 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
636f9d37 5433 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
b8ff05a9
DM
5434 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5435 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5436 adap->params.b_wnd);
7ee9ff94 5437
636f9d37
VP
5438 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5439 for (j = 0; j < NCHAN; j++)
5440 adap->params.tp.tx_modq[j] = j;
7ee9ff94 5441
793dad94
VP
5442 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5443 &adap->filter_mode, 1,
5444 TP_VLAN_PRI_MAP);
5445
636f9d37 5446 adap->flags |= FW_OK;
b8ff05a9
DM
5447 return 0;
5448
5449 /*
636f9d37
VP
5450 * Something bad happened. If a command timed out or failed with EIO
5451 * FW does not operate within its spec or something catastrophic
5452 * happened to HW/FW, stop issuing commands.
b8ff05a9 5453 */
636f9d37
VP
5454bye:
5455 if (ret != -ETIMEDOUT && ret != -EIO)
5456 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
5457 return ret;
5458}
5459
204dc3c0
DM
5460/* EEH callbacks */
5461
5462static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5463 pci_channel_state_t state)
5464{
5465 int i;
5466 struct adapter *adap = pci_get_drvdata(pdev);
5467
5468 if (!adap)
5469 goto out;
5470
5471 rtnl_lock();
5472 adap->flags &= ~FW_OK;
5473 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5474 for_each_port(adap, i) {
5475 struct net_device *dev = adap->port[i];
5476
5477 netif_device_detach(dev);
5478 netif_carrier_off(dev);
5479 }
5480 if (adap->flags & FULL_INIT_DONE)
5481 cxgb_down(adap);
5482 rtnl_unlock();
5483 pci_disable_device(pdev);
5484out: return state == pci_channel_io_perm_failure ?
5485 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5486}
5487
5488static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5489{
5490 int i, ret;
5491 struct fw_caps_config_cmd c;
5492 struct adapter *adap = pci_get_drvdata(pdev);
5493
5494 if (!adap) {
5495 pci_restore_state(pdev);
5496 pci_save_state(pdev);
5497 return PCI_ERS_RESULT_RECOVERED;
5498 }
5499
5500 if (pci_enable_device(pdev)) {
5501 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5502 return PCI_ERS_RESULT_DISCONNECT;
5503 }
5504
5505 pci_set_master(pdev);
5506 pci_restore_state(pdev);
5507 pci_save_state(pdev);
5508 pci_cleanup_aer_uncorrect_error_status(pdev);
5509
5510 if (t4_wait_dev_ready(adap) < 0)
5511 return PCI_ERS_RESULT_DISCONNECT;
777c2300 5512 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
204dc3c0
DM
5513 return PCI_ERS_RESULT_DISCONNECT;
5514 adap->flags |= FW_OK;
5515 if (adap_init1(adap, &c))
5516 return PCI_ERS_RESULT_DISCONNECT;
5517
5518 for_each_port(adap, i) {
5519 struct port_info *p = adap2pinfo(adap, i);
5520
060e0c75
DM
5521 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5522 NULL, NULL);
204dc3c0
DM
5523 if (ret < 0)
5524 return PCI_ERS_RESULT_DISCONNECT;
5525 p->viid = ret;
5526 p->xact_addr_filt = -1;
5527 }
5528
5529 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5530 adap->params.b_wnd);
1ae970e0 5531 setup_memwin(adap);
204dc3c0
DM
5532 if (cxgb_up(adap))
5533 return PCI_ERS_RESULT_DISCONNECT;
5534 return PCI_ERS_RESULT_RECOVERED;
5535}
5536
5537static void eeh_resume(struct pci_dev *pdev)
5538{
5539 int i;
5540 struct adapter *adap = pci_get_drvdata(pdev);
5541
5542 if (!adap)
5543 return;
5544
5545 rtnl_lock();
5546 for_each_port(adap, i) {
5547 struct net_device *dev = adap->port[i];
5548
5549 if (netif_running(dev)) {
5550 link_start(dev);
5551 cxgb_set_rxmode(dev);
5552 }
5553 netif_device_attach(dev);
5554 }
5555 rtnl_unlock();
5556}
5557
3646f0e5 5558static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
5559 .error_detected = eeh_err_detected,
5560 .slot_reset = eeh_slot_reset,
5561 .resume = eeh_resume,
5562};
5563
b8ff05a9
DM
5564static inline bool is_10g_port(const struct link_config *lc)
5565{
5566 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5567}
5568
5569static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5570 unsigned int size, unsigned int iqe_size)
5571{
5572 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5573 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5574 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5575 q->iqe_len = iqe_size;
5576 q->size = size;
5577}
5578
5579/*
5580 * Perform default configuration of DMA queues depending on the number and type
5581 * of ports we found and the number of available CPUs. Most settings can be
5582 * modified by the admin prior to actual use.
5583 */
91744948 5584static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
5585{
5586 struct sge *s = &adap->sge;
5587 int i, q10g = 0, n10g = 0, qidx = 0;
5588
5589 for_each_port(adap, i)
5590 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5591
5592 /*
5593 * We default to 1 queue per non-10G port and up to # of cores queues
5594 * per 10G port.
5595 */
5596 if (n10g)
5597 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
5598 if (q10g > netif_get_num_default_rss_queues())
5599 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
5600
5601 for_each_port(adap, i) {
5602 struct port_info *pi = adap2pinfo(adap, i);
5603
5604 pi->first_qset = qidx;
5605 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5606 qidx += pi->nqsets;
5607 }
5608
5609 s->ethqsets = qidx;
5610 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5611
5612 if (is_offload(adap)) {
5613 /*
5614 * For offload we use 1 queue/channel if all ports are up to 1G,
5615 * otherwise we divide all available queues amongst the channels
5616 * capped by the number of available cores.
5617 */
5618 if (n10g) {
5619 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5620 num_online_cpus());
5621 s->ofldqsets = roundup(i, adap->params.nports);
5622 } else
5623 s->ofldqsets = adap->params.nports;
5624 /* For RDMA one Rx queue per channel suffices */
5625 s->rdmaqs = adap->params.nports;
5626 }
5627
5628 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5629 struct sge_eth_rxq *r = &s->ethrxq[i];
5630
5631 init_rspq(&r->rspq, 0, 0, 1024, 64);
5632 r->fl.size = 72;
5633 }
5634
5635 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5636 s->ethtxq[i].q.size = 1024;
5637
5638 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5639 s->ctrlq[i].q.size = 512;
5640
5641 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5642 s->ofldtxq[i].q.size = 1024;
5643
5644 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5645 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5646
5647 init_rspq(&r->rspq, 0, 0, 1024, 64);
5648 r->rspq.uld = CXGB4_ULD_ISCSI;
5649 r->fl.size = 72;
5650 }
5651
5652 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5653 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5654
5655 init_rspq(&r->rspq, 0, 0, 511, 64);
5656 r->rspq.uld = CXGB4_ULD_RDMA;
5657 r->fl.size = 72;
5658 }
5659
5660 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5661 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5662}
5663
5664/*
5665 * Reduce the number of Ethernet queues across all ports to at most n.
5666 * n provides at least one queue per port.
5667 */
91744948 5668static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
5669{
5670 int i;
5671 struct port_info *pi;
5672
5673 while (n < adap->sge.ethqsets)
5674 for_each_port(adap, i) {
5675 pi = adap2pinfo(adap, i);
5676 if (pi->nqsets > 1) {
5677 pi->nqsets--;
5678 adap->sge.ethqsets--;
5679 if (adap->sge.ethqsets <= n)
5680 break;
5681 }
5682 }
5683
5684 n = 0;
5685 for_each_port(adap, i) {
5686 pi = adap2pinfo(adap, i);
5687 pi->first_qset = n;
5688 n += pi->nqsets;
5689 }
5690}
5691
5692/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5693#define EXTRA_VECS 2
5694
91744948 5695static int enable_msix(struct adapter *adap)
b8ff05a9
DM
5696{
5697 int ofld_need = 0;
5698 int i, err, want, need;
5699 struct sge *s = &adap->sge;
5700 unsigned int nchan = adap->params.nports;
5701 struct msix_entry entries[MAX_INGQ + 1];
5702
5703 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5704 entries[i].entry = i;
5705
5706 want = s->max_ethqsets + EXTRA_VECS;
5707 if (is_offload(adap)) {
5708 want += s->rdmaqs + s->ofldqsets;
5709 /* need nchan for each possible ULD */
5710 ofld_need = 2 * nchan;
5711 }
5712 need = adap->params.nports + EXTRA_VECS + ofld_need;
5713
5714 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5715 want = err;
5716
5717 if (!err) {
5718 /*
5719 * Distribute available vectors to the various queue groups.
5720 * Every group gets its minimum requirement and NIC gets top
5721 * priority for leftovers.
5722 */
5723 i = want - EXTRA_VECS - ofld_need;
5724 if (i < s->max_ethqsets) {
5725 s->max_ethqsets = i;
5726 if (i < s->ethqsets)
5727 reduce_ethqs(adap, i);
5728 }
5729 if (is_offload(adap)) {
5730 i = want - EXTRA_VECS - s->max_ethqsets;
5731 i -= ofld_need - nchan;
5732 s->ofldqsets = (i / nchan) * nchan; /* round down */
5733 }
5734 for (i = 0; i < want; ++i)
5735 adap->msix_info[i].vec = entries[i].vector;
5736 } else if (err > 0)
5737 dev_info(adap->pdev_dev,
5738 "only %d MSI-X vectors left, not using MSI-X\n", err);
5739 return err;
5740}
5741
5742#undef EXTRA_VECS
5743
91744948 5744static int init_rss(struct adapter *adap)
671b0060
DM
5745{
5746 unsigned int i, j;
5747
5748 for_each_port(adap, i) {
5749 struct port_info *pi = adap2pinfo(adap, i);
5750
5751 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5752 if (!pi->rss)
5753 return -ENOMEM;
5754 for (j = 0; j < pi->rss_size; j++)
278bc429 5755 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
5756 }
5757 return 0;
5758}
5759
91744948 5760static void print_port_info(const struct net_device *dev)
b8ff05a9
DM
5761{
5762 static const char *base[] = {
a0881cab 5763 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
7d5e77aa 5764 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
b8ff05a9
DM
5765 };
5766
b8ff05a9 5767 char buf[80];
118969ed 5768 char *bufp = buf;
f1a051b9 5769 const char *spd = "";
118969ed
DM
5770 const struct port_info *pi = netdev_priv(dev);
5771 const struct adapter *adap = pi->adapter;
f1a051b9
DM
5772
5773 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5774 spd = " 2.5 GT/s";
5775 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5776 spd = " 5 GT/s";
b8ff05a9 5777
118969ed
DM
5778 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5779 bufp += sprintf(bufp, "100/");
5780 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5781 bufp += sprintf(bufp, "1000/");
5782 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5783 bufp += sprintf(bufp, "10G/");
5784 if (bufp != buf)
5785 --bufp;
5786 sprintf(bufp, "BASE-%s", base[pi->port_type]);
5787
5788 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
0a57a536
SR
5789 adap->params.vpd.id,
5790 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
118969ed
DM
5791 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5792 (adap->flags & USING_MSIX) ? " MSI-X" :
5793 (adap->flags & USING_MSI) ? " MSI" : "");
5794 netdev_info(dev, "S/N: %s, E/C: %s\n",
5795 adap->params.vpd.sn, adap->params.vpd.ec);
b8ff05a9
DM
5796}
5797
91744948 5798static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 5799{
e5c8ae5f 5800 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
5801}
5802
06546391
DM
5803/*
5804 * Free the following resources:
5805 * - memory used for tables
5806 * - MSI/MSI-X
5807 * - net devices
5808 * - resources FW is holding for us
5809 */
5810static void free_some_resources(struct adapter *adapter)
5811{
5812 unsigned int i;
5813
5814 t4_free_mem(adapter->l2t);
5815 t4_free_mem(adapter->tids.tid_tab);
5816 disable_msi(adapter);
5817
5818 for_each_port(adapter, i)
671b0060
DM
5819 if (adapter->port[i]) {
5820 kfree(adap2pinfo(adapter, i)->rss);
06546391 5821 free_netdev(adapter->port[i]);
671b0060 5822 }
06546391 5823 if (adapter->flags & FW_OK)
060e0c75 5824 t4_fw_bye(adapter, adapter->fn);
06546391
DM
5825}
5826
2ed28baa 5827#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 5828#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 5829 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 5830#define SEGMENT_SIZE 128
b8ff05a9 5831
1dd06ae8 5832static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 5833{
22adfe0a 5834 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 5835 struct port_info *pi;
c8f44aff 5836 bool highdma = false;
b8ff05a9
DM
5837 struct adapter *adapter = NULL;
5838
5839 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5840
5841 err = pci_request_regions(pdev, KBUILD_MODNAME);
5842 if (err) {
5843 /* Just info, some other driver may have claimed the device. */
5844 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5845 return err;
5846 }
5847
060e0c75 5848 /* We control everything through one PF */
b8ff05a9 5849 func = PCI_FUNC(pdev->devfn);
060e0c75 5850 if (func != ent->driver_data) {
204dc3c0 5851 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 5852 goto sriov;
204dc3c0 5853 }
b8ff05a9
DM
5854
5855 err = pci_enable_device(pdev);
5856 if (err) {
5857 dev_err(&pdev->dev, "cannot enable PCI device\n");
5858 goto out_release_regions;
5859 }
5860
5861 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 5862 highdma = true;
b8ff05a9
DM
5863 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5864 if (err) {
5865 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5866 "coherent allocations\n");
5867 goto out_disable_device;
5868 }
5869 } else {
5870 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5871 if (err) {
5872 dev_err(&pdev->dev, "no usable DMA configuration\n");
5873 goto out_disable_device;
5874 }
5875 }
5876
5877 pci_enable_pcie_error_reporting(pdev);
ef306b50 5878 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
5879 pci_set_master(pdev);
5880 pci_save_state(pdev);
5881
5882 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5883 if (!adapter) {
5884 err = -ENOMEM;
5885 goto out_disable_device;
5886 }
5887
5888 adapter->regs = pci_ioremap_bar(pdev, 0);
5889 if (!adapter->regs) {
5890 dev_err(&pdev->dev, "cannot map device registers\n");
5891 err = -ENOMEM;
5892 goto out_free_adapter;
5893 }
5894
5895 adapter->pdev = pdev;
5896 adapter->pdev_dev = &pdev->dev;
3069ee9b 5897 adapter->mbox = func;
060e0c75 5898 adapter->fn = func;
b8ff05a9
DM
5899 adapter->msg_enable = dflt_msg_enable;
5900 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5901
5902 spin_lock_init(&adapter->stats_lock);
5903 spin_lock_init(&adapter->tid_release_lock);
5904
5905 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
5906 INIT_WORK(&adapter->db_full_task, process_db_full);
5907 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
5908
5909 err = t4_prep_adapter(adapter);
5910 if (err)
22adfe0a
SR
5911 goto out_unmap_bar0;
5912
5913 if (!is_t4(adapter->chip)) {
5914 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5915 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5916 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5917 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5918
5919 /* Each segment size is 128B. Write coalescing is enabled only
5920 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5921 * queue is less no of segments that can be accommodated in
5922 * a page size.
5923 */
5924 if (qpp > num_seg) {
5925 dev_err(&pdev->dev,
5926 "Incorrect number of egress queues per page\n");
5927 err = -EINVAL;
5928 goto out_unmap_bar0;
5929 }
5930 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5931 pci_resource_len(pdev, 2));
5932 if (!adapter->bar2) {
5933 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5934 err = -ENOMEM;
5935 goto out_unmap_bar0;
5936 }
5937 }
5938
636f9d37 5939 setup_memwin(adapter);
b8ff05a9 5940 err = adap_init0(adapter);
636f9d37 5941 setup_memwin_rdma(adapter);
b8ff05a9
DM
5942 if (err)
5943 goto out_unmap_bar;
5944
5945 for_each_port(adapter, i) {
5946 struct net_device *netdev;
5947
5948 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5949 MAX_ETH_QSETS);
5950 if (!netdev) {
5951 err = -ENOMEM;
5952 goto out_free_dev;
5953 }
5954
5955 SET_NETDEV_DEV(netdev, &pdev->dev);
5956
5957 adapter->port[i] = netdev;
5958 pi = netdev_priv(netdev);
5959 pi->adapter = adapter;
5960 pi->xact_addr_filt = -1;
b8ff05a9 5961 pi->port_id = i;
b8ff05a9
DM
5962 netdev->irq = pdev->irq;
5963
2ed28baa
MM
5964 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5965 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5966 NETIF_F_RXCSUM | NETIF_F_RXHASH |
f646968f 5967 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
c8f44aff
MM
5968 if (highdma)
5969 netdev->hw_features |= NETIF_F_HIGHDMA;
5970 netdev->features |= netdev->hw_features;
b8ff05a9
DM
5971 netdev->vlan_features = netdev->features & VLAN_FEAT;
5972
01789349
JP
5973 netdev->priv_flags |= IFF_UNICAST_FLT;
5974
b8ff05a9
DM
5975 netdev->netdev_ops = &cxgb4_netdev_ops;
5976 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5977 }
5978
5979 pci_set_drvdata(pdev, adapter);
5980
5981 if (adapter->flags & FW_OK) {
060e0c75 5982 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
5983 if (err)
5984 goto out_free_dev;
5985 }
5986
5987 /*
5988 * Configure queues and allocate tables now, they can be needed as
5989 * soon as the first register_netdev completes.
5990 */
5991 cfg_queues(adapter);
5992
5993 adapter->l2t = t4_init_l2t();
5994 if (!adapter->l2t) {
5995 /* We tolerate a lack of L2T, giving up some functionality */
5996 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5997 adapter->params.offload = 0;
5998 }
5999
6000 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6001 dev_warn(&pdev->dev, "could not allocate TID table, "
6002 "continuing\n");
6003 adapter->params.offload = 0;
6004 }
6005
f7cabcdd
DM
6006 /* See what interrupts we'll be using */
6007 if (msi > 1 && enable_msix(adapter) == 0)
6008 adapter->flags |= USING_MSIX;
6009 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6010 adapter->flags |= USING_MSI;
6011
671b0060
DM
6012 err = init_rss(adapter);
6013 if (err)
6014 goto out_free_dev;
6015
b8ff05a9
DM
6016 /*
6017 * The card is now ready to go. If any errors occur during device
6018 * registration we do not fail the whole card but rather proceed only
6019 * with the ports we manage to register successfully. However we must
6020 * register at least one net device.
6021 */
6022 for_each_port(adapter, i) {
a57cabe0
DM
6023 pi = adap2pinfo(adapter, i);
6024 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6025 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6026
b8ff05a9
DM
6027 err = register_netdev(adapter->port[i]);
6028 if (err)
b1a3c2b6 6029 break;
b1a3c2b6
DM
6030 adapter->chan_map[pi->tx_chan] = i;
6031 print_port_info(adapter->port[i]);
b8ff05a9 6032 }
b1a3c2b6 6033 if (i == 0) {
b8ff05a9
DM
6034 dev_err(&pdev->dev, "could not register any net devices\n");
6035 goto out_free_dev;
6036 }
b1a3c2b6
DM
6037 if (err) {
6038 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6039 err = 0;
6403eab1 6040 }
b8ff05a9
DM
6041
6042 if (cxgb4_debugfs_root) {
6043 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6044 cxgb4_debugfs_root);
6045 setup_debugfs(adapter);
6046 }
6047
6482aa7c
DLR
6048 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6049 pdev->needs_freset = 1;
6050
b8ff05a9
DM
6051 if (is_offload(adapter))
6052 attach_ulds(adapter);
6053
b8ff05a9
DM
6054sriov:
6055#ifdef CONFIG_PCI_IOV
7d6727cf 6056 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
b8ff05a9
DM
6057 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6058 dev_info(&pdev->dev,
6059 "instantiated %u virtual functions\n",
6060 num_vf[func]);
6061#endif
6062 return 0;
6063
6064 out_free_dev:
06546391 6065 free_some_resources(adapter);
b8ff05a9 6066 out_unmap_bar:
22adfe0a
SR
6067 if (!is_t4(adapter->chip))
6068 iounmap(adapter->bar2);
6069 out_unmap_bar0:
b8ff05a9
DM
6070 iounmap(adapter->regs);
6071 out_free_adapter:
6072 kfree(adapter);
6073 out_disable_device:
6074 pci_disable_pcie_error_reporting(pdev);
6075 pci_disable_device(pdev);
6076 out_release_regions:
6077 pci_release_regions(pdev);
b8ff05a9
DM
6078 return err;
6079}
6080
91744948 6081static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
6082{
6083 struct adapter *adapter = pci_get_drvdata(pdev);
6084
636f9d37 6085#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
6086 pci_disable_sriov(pdev);
6087
636f9d37
VP
6088#endif
6089
b8ff05a9
DM
6090 if (adapter) {
6091 int i;
6092
6093 if (is_offload(adapter))
6094 detach_ulds(adapter);
6095
6096 for_each_port(adapter, i)
8f3a7676 6097 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
6098 unregister_netdev(adapter->port[i]);
6099
6100 if (adapter->debugfs_root)
6101 debugfs_remove_recursive(adapter->debugfs_root);
6102
f2b7e78d
VP
6103 /* If we allocated filters, free up state associated with any
6104 * valid filters ...
6105 */
6106 if (adapter->tids.ftid_tab) {
6107 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
6108 for (i = 0; i < (adapter->tids.nftids +
6109 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
6110 if (f->valid)
6111 clear_filter(adapter, f);
6112 }
6113
aaefae9b
DM
6114 if (adapter->flags & FULL_INIT_DONE)
6115 cxgb_down(adapter);
b8ff05a9 6116
06546391 6117 free_some_resources(adapter);
b8ff05a9 6118 iounmap(adapter->regs);
22adfe0a
SR
6119 if (!is_t4(adapter->chip))
6120 iounmap(adapter->bar2);
b8ff05a9
DM
6121 kfree(adapter);
6122 pci_disable_pcie_error_reporting(pdev);
6123 pci_disable_device(pdev);
6124 pci_release_regions(pdev);
a069ec91 6125 } else
b8ff05a9
DM
6126 pci_release_regions(pdev);
6127}
6128
6129static struct pci_driver cxgb4_driver = {
6130 .name = KBUILD_MODNAME,
6131 .id_table = cxgb4_pci_tbl,
6132 .probe = init_one,
91744948 6133 .remove = remove_one,
204dc3c0 6134 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
6135};
6136
6137static int __init cxgb4_init_module(void)
6138{
6139 int ret;
6140
3069ee9b
VP
6141 workq = create_singlethread_workqueue("cxgb4");
6142 if (!workq)
6143 return -ENOMEM;
6144
b8ff05a9
DM
6145 /* Debugfs support is optional, just warn if this fails */
6146 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6147 if (!cxgb4_debugfs_root)
428ac43f 6148 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
6149
6150 ret = pci_register_driver(&cxgb4_driver);
73a695f8 6151 if (ret < 0) {
b8ff05a9 6152 debugfs_remove(cxgb4_debugfs_root);
73a695f8
WY
6153 destroy_workqueue(workq);
6154 }
01bcca68
VP
6155
6156 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6157
b8ff05a9
DM
6158 return ret;
6159}
6160
6161static void __exit cxgb4_cleanup_module(void)
6162{
01bcca68 6163 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
b8ff05a9
DM
6164 pci_unregister_driver(&cxgb4_driver);
6165 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3069ee9b
VP
6166 flush_workqueue(workq);
6167 destroy_workqueue(workq);
b8ff05a9
DM
6168}
6169
6170module_init(cxgb4_init_module);
6171module_exit(cxgb4_cleanup_module);