]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
Altera TSE: Add missing include to silence sparse warnings
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
b8ff05a9
DM
64#include <asm/uaccess.h>
65
66#include "cxgb4.h"
67#include "t4_regs.h"
68#include "t4_msg.h"
69#include "t4fw_api.h"
70#include "l2t.h"
71
01bcca68
VP
72#include <../drivers/net/bonding/bonding.h>
73
74#ifdef DRV_VERSION
75#undef DRV_VERSION
76#endif
3a7f8554
SR
77#define DRV_VERSION "2.0.0-ko"
78#define DRV_DESC "Chelsio T4/T5 Network Driver"
b8ff05a9
DM
79
80/*
81 * Max interrupt hold-off timer value in us. Queues fall back to this value
82 * under extreme memory pressure so it's largish to give the system time to
83 * recover.
84 */
85#define MAX_SGE_TIMERVAL 200U
86
7ee9ff94 87enum {
13ee15d3
VP
88 /*
89 * Physical Function provisioning constants.
90 */
91 PFRES_NVI = 4, /* # of Virtual Interfaces */
92 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
93 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
94 */
95 PFRES_NEQ = 256, /* # of egress queues */
96 PFRES_NIQ = 0, /* # of ingress queues */
97 PFRES_TC = 0, /* PCI-E traffic class */
98 PFRES_NEXACTF = 128, /* # of exact MPS filters */
99
100 PFRES_R_CAPS = FW_CMD_CAP_PF,
101 PFRES_WX_CAPS = FW_CMD_CAP_PF,
102
103#ifdef CONFIG_PCI_IOV
104 /*
105 * Virtual Function provisioning constants. We need two extra Ingress
106 * Queues with Interrupt capability to serve as the VF's Firmware
107 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108 * neither will have Free Lists associated with them). For each
109 * Ethernet/Control Egress Queue and for each Free List, we need an
110 * Egress Context.
111 */
7ee9ff94
CL
112 VFRES_NPORTS = 1, /* # of "ports" per VF */
113 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
114
115 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
116 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
117 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
7ee9ff94 118 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
13ee15d3 119 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
7ee9ff94
CL
120 VFRES_TC = 0, /* PCI-E traffic class */
121 VFRES_NEXACTF = 16, /* # of exact MPS filters */
122
123 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
124 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
13ee15d3 125#endif
7ee9ff94
CL
126};
127
128/*
129 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
130 * static and likely not to be useful in the long run. We really need to
131 * implement some form of persistent configuration which the firmware
132 * controls.
133 */
134static unsigned int pfvfres_pmask(struct adapter *adapter,
135 unsigned int pf, unsigned int vf)
136{
137 unsigned int portn, portvec;
138
139 /*
140 * Give PF's access to all of the ports.
141 */
142 if (vf == 0)
143 return FW_PFVF_CMD_PMASK_MASK;
144
145 /*
146 * For VFs, we'll assign them access to the ports based purely on the
147 * PF. We assign active ports in order, wrapping around if there are
148 * fewer active ports than PFs: e.g. active port[pf % nports].
149 * Unfortunately the adapter's port_info structs haven't been
150 * initialized yet so we have to compute this.
151 */
152 if (adapter->params.nports == 0)
153 return 0;
154
155 portn = pf % adapter->params.nports;
156 portvec = adapter->params.portvec;
157 for (;;) {
158 /*
159 * Isolate the lowest set bit in the port vector. If we're at
160 * the port number that we want, return that as the pmask.
161 * otherwise mask that bit out of the port vector and
162 * decrement our port number ...
163 */
164 unsigned int pmask = portvec ^ (portvec & (portvec-1));
165 if (portn == 0)
166 return pmask;
167 portn--;
168 portvec &= ~pmask;
169 }
170 /*NOTREACHED*/
171}
7ee9ff94 172
b8ff05a9
DM
173enum {
174 MAX_TXQ_ENTRIES = 16384,
175 MAX_CTRL_TXQ_ENTRIES = 1024,
176 MAX_RSPQ_ENTRIES = 16384,
177 MAX_RX_BUFFERS = 16384,
178 MIN_TXQ_ENTRIES = 32,
179 MIN_CTRL_TXQ_ENTRIES = 32,
180 MIN_RSPQ_ENTRIES = 128,
181 MIN_FL_ENTRIES = 16
182};
183
f2b7e78d
VP
184/* Host shadow copy of ingress filter entry. This is in host native format
185 * and doesn't match the ordering or bit order, etc. of the hardware of the
186 * firmware command. The use of bit-field structure elements is purely to
187 * remind ourselves of the field size limitations and save memory in the case
188 * where the filter table is large.
189 */
190struct filter_entry {
191 /* Administrative fields for filter.
192 */
193 u32 valid:1; /* filter allocated and valid */
194 u32 locked:1; /* filter is administratively locked */
195
196 u32 pending:1; /* filter action is pending firmware reply */
197 u32 smtidx:8; /* Source MAC Table index for smac */
198 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
199
200 /* The filter itself. Most of this is a straight copy of information
201 * provided by the extended ioctl(). Some fields are translated to
202 * internal forms -- for instance the Ingress Queue ID passed in from
203 * the ioctl() is translated into the Absolute Ingress Queue ID.
204 */
205 struct ch_filter_specification fs;
206};
207
b8ff05a9
DM
208#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
211
060e0c75 212#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
213
214static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 215 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
216 CH_DEVICE(0x4001, -1),
217 CH_DEVICE(0x4002, -1),
218 CH_DEVICE(0x4003, -1),
219 CH_DEVICE(0x4004, -1),
220 CH_DEVICE(0x4005, -1),
221 CH_DEVICE(0x4006, -1),
222 CH_DEVICE(0x4007, -1),
223 CH_DEVICE(0x4008, -1),
224 CH_DEVICE(0x4009, -1),
225 CH_DEVICE(0x400a, -1),
226 CH_DEVICE(0x4401, 4),
227 CH_DEVICE(0x4402, 4),
228 CH_DEVICE(0x4403, 4),
229 CH_DEVICE(0x4404, 4),
230 CH_DEVICE(0x4405, 4),
231 CH_DEVICE(0x4406, 4),
232 CH_DEVICE(0x4407, 4),
233 CH_DEVICE(0x4408, 4),
234 CH_DEVICE(0x4409, 4),
235 CH_DEVICE(0x440a, 4),
f637d577
VP
236 CH_DEVICE(0x440d, 4),
237 CH_DEVICE(0x440e, 4),
9ef603a0
VP
238 CH_DEVICE(0x5001, 4),
239 CH_DEVICE(0x5002, 4),
240 CH_DEVICE(0x5003, 4),
241 CH_DEVICE(0x5004, 4),
242 CH_DEVICE(0x5005, 4),
243 CH_DEVICE(0x5006, 4),
244 CH_DEVICE(0x5007, 4),
245 CH_DEVICE(0x5008, 4),
246 CH_DEVICE(0x5009, 4),
247 CH_DEVICE(0x500A, 4),
248 CH_DEVICE(0x500B, 4),
249 CH_DEVICE(0x500C, 4),
250 CH_DEVICE(0x500D, 4),
251 CH_DEVICE(0x500E, 4),
252 CH_DEVICE(0x500F, 4),
253 CH_DEVICE(0x5010, 4),
254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4),
f0a8e6de
HS
257 CH_DEVICE(0x5014, 4),
258 CH_DEVICE(0x5015, 4),
0183aa62
HS
259 CH_DEVICE(0x5080, 4),
260 CH_DEVICE(0x5081, 4),
261 CH_DEVICE(0x5082, 4),
262 CH_DEVICE(0x5083, 4),
263 CH_DEVICE(0x5084, 4),
264 CH_DEVICE(0x5085, 4),
9ef603a0
VP
265 CH_DEVICE(0x5401, 4),
266 CH_DEVICE(0x5402, 4),
267 CH_DEVICE(0x5403, 4),
268 CH_DEVICE(0x5404, 4),
269 CH_DEVICE(0x5405, 4),
270 CH_DEVICE(0x5406, 4),
271 CH_DEVICE(0x5407, 4),
272 CH_DEVICE(0x5408, 4),
273 CH_DEVICE(0x5409, 4),
274 CH_DEVICE(0x540A, 4),
275 CH_DEVICE(0x540B, 4),
276 CH_DEVICE(0x540C, 4),
277 CH_DEVICE(0x540D, 4),
278 CH_DEVICE(0x540E, 4),
279 CH_DEVICE(0x540F, 4),
280 CH_DEVICE(0x5410, 4),
281 CH_DEVICE(0x5411, 4),
282 CH_DEVICE(0x5412, 4),
283 CH_DEVICE(0x5413, 4),
f0a8e6de
HS
284 CH_DEVICE(0x5414, 4),
285 CH_DEVICE(0x5415, 4),
0183aa62
HS
286 CH_DEVICE(0x5480, 4),
287 CH_DEVICE(0x5481, 4),
288 CH_DEVICE(0x5482, 4),
289 CH_DEVICE(0x5483, 4),
290 CH_DEVICE(0x5484, 4),
291 CH_DEVICE(0x5485, 4),
b8ff05a9
DM
292 { 0, }
293};
294
16e47624 295#define FW4_FNAME "cxgb4/t4fw.bin"
0a57a536 296#define FW5_FNAME "cxgb4/t5fw.bin"
16e47624 297#define FW4_CFNAME "cxgb4/t4-config.txt"
0a57a536 298#define FW5_CFNAME "cxgb4/t5-config.txt"
b8ff05a9
DM
299
300MODULE_DESCRIPTION(DRV_DESC);
301MODULE_AUTHOR("Chelsio Communications");
302MODULE_LICENSE("Dual BSD/GPL");
303MODULE_VERSION(DRV_VERSION);
304MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
16e47624 305MODULE_FIRMWARE(FW4_FNAME);
0a57a536 306MODULE_FIRMWARE(FW5_FNAME);
b8ff05a9 307
636f9d37
VP
308/*
309 * Normally we're willing to become the firmware's Master PF but will be happy
310 * if another PF has already become the Master and initialized the adapter.
311 * Setting "force_init" will cause this driver to forcibly establish itself as
312 * the Master PF and initialize the adapter.
313 */
314static uint force_init;
315
316module_param(force_init, uint, 0644);
317MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
318
13ee15d3
VP
319/*
320 * Normally if the firmware we connect to has Configuration File support, we
321 * use that and only fall back to the old Driver-based initialization if the
322 * Configuration File fails for some reason. If force_old_init is set, then
323 * we'll always use the old Driver-based initialization sequence.
324 */
325static uint force_old_init;
326
327module_param(force_old_init, uint, 0644);
328MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
329
b8ff05a9
DM
330static int dflt_msg_enable = DFLT_MSG_ENABLE;
331
332module_param(dflt_msg_enable, int, 0644);
333MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
334
335/*
336 * The driver uses the best interrupt scheme available on a platform in the
337 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
338 * of these schemes the driver may consider as follows:
339 *
340 * msi = 2: choose from among all three options
341 * msi = 1: only consider MSI and INTx interrupts
342 * msi = 0: force INTx interrupts
343 */
344static int msi = 2;
345
346module_param(msi, int, 0644);
347MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
348
349/*
350 * Queue interrupt hold-off timer values. Queues default to the first of these
351 * upon creation.
352 */
353static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
354
355module_param_array(intr_holdoff, uint, NULL, 0644);
356MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
357 "0..4 in microseconds");
358
359static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
360
361module_param_array(intr_cnt, uint, NULL, 0644);
362MODULE_PARM_DESC(intr_cnt,
363 "thresholds 1..3 for queue interrupt packet counters");
364
636f9d37
VP
365/*
366 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
367 * offset by 2 bytes in order to have the IP headers line up on 4-byte
368 * boundaries. This is a requirement for many architectures which will throw
369 * a machine check fault if an attempt is made to access one of the 4-byte IP
370 * header fields on a non-4-byte boundary. And it's a major performance issue
371 * even on some architectures which allow it like some implementations of the
372 * x86 ISA. However, some architectures don't mind this and for some very
373 * edge-case performance sensitive applications (like forwarding large volumes
374 * of small packets), setting this DMA offset to 0 will decrease the number of
375 * PCI-E Bus transfers enough to measurably affect performance.
376 */
377static int rx_dma_offset = 2;
378
eb939922 379static bool vf_acls;
b8ff05a9
DM
380
381#ifdef CONFIG_PCI_IOV
382module_param(vf_acls, bool, 0644);
383MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
384
7d6727cf
SR
385/* Configure the number of PCI-E Virtual Function which are to be instantiated
386 * on SR-IOV Capable Physical Functions.
0a57a536 387 */
7d6727cf 388static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
b8ff05a9
DM
389
390module_param_array(num_vf, uint, NULL, 0644);
7d6727cf 391MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
b8ff05a9
DM
392#endif
393
13ee15d3
VP
394/*
395 * The filter TCAM has a fixed portion and a variable portion. The fixed
396 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
397 * ports. The variable portion is 36 bits which can include things like Exact
398 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
399 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
400 * far exceed the 36-bit budget for this "compressed" header portion of the
401 * filter. Thus, we have a scarce resource which must be carefully managed.
402 *
403 * By default we set this up to mostly match the set of filter matching
404 * capabilities of T3 but with accommodations for some of T4's more
405 * interesting features:
406 *
407 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
408 * [Inner] VLAN (17), Port (3), FCoE (1) }
409 */
410enum {
411 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
412 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
413 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
414};
415
416static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
417
f2b7e78d
VP
418module_param(tp_vlan_pri_map, uint, 0644);
419MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
420
b8ff05a9
DM
421static struct dentry *cxgb4_debugfs_root;
422
423static LIST_HEAD(adapter_list);
424static DEFINE_MUTEX(uld_mutex);
01bcca68
VP
425/* Adapter list to be accessed from atomic context */
426static LIST_HEAD(adap_rcu_list);
427static DEFINE_SPINLOCK(adap_rcu_lock);
b8ff05a9
DM
428static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
429static const char *uld_str[] = { "RDMA", "iSCSI" };
430
431static void link_report(struct net_device *dev)
432{
433 if (!netif_carrier_ok(dev))
434 netdev_info(dev, "link down\n");
435 else {
436 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
437
438 const char *s = "10Mbps";
439 const struct port_info *p = netdev_priv(dev);
440
441 switch (p->link_cfg.speed) {
e8b39015 442 case 10000:
b8ff05a9
DM
443 s = "10Gbps";
444 break;
e8b39015 445 case 1000:
b8ff05a9
DM
446 s = "1000Mbps";
447 break;
e8b39015 448 case 100:
b8ff05a9
DM
449 s = "100Mbps";
450 break;
e8b39015 451 case 40000:
72aca4bf
KS
452 s = "40Gbps";
453 break;
b8ff05a9
DM
454 }
455
456 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
457 fc[p->link_cfg.fc]);
458 }
459}
460
461void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
462{
463 struct net_device *dev = adapter->port[port_id];
464
465 /* Skip changes from disabled ports. */
466 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
467 if (link_stat)
468 netif_carrier_on(dev);
469 else
470 netif_carrier_off(dev);
471
472 link_report(dev);
473 }
474}
475
476void t4_os_portmod_changed(const struct adapter *adap, int port_id)
477{
478 static const char *mod_str[] = {
a0881cab 479 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
480 };
481
482 const struct net_device *dev = adap->port[port_id];
483 const struct port_info *pi = netdev_priv(dev);
484
485 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
486 netdev_info(dev, "port module unplugged\n");
a0881cab 487 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
488 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
489}
490
491/*
492 * Configure the exact and hash address filters to handle a port's multicast
493 * and secondary unicast MAC addresses.
494 */
495static int set_addr_filters(const struct net_device *dev, bool sleep)
496{
497 u64 mhash = 0;
498 u64 uhash = 0;
499 bool free = true;
500 u16 filt_idx[7];
501 const u8 *addr[7];
502 int ret, naddr = 0;
b8ff05a9
DM
503 const struct netdev_hw_addr *ha;
504 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 505 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 506 const struct port_info *pi = netdev_priv(dev);
060e0c75 507 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
508
509 /* first do the secondary unicast addresses */
510 netdev_for_each_uc_addr(ha, dev) {
511 addr[naddr++] = ha->addr;
512 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 513 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
514 naddr, addr, filt_idx, &uhash, sleep);
515 if (ret < 0)
516 return ret;
517
518 free = false;
519 naddr = 0;
520 }
521 }
522
523 /* next set up the multicast addresses */
4a35ecf8
DM
524 netdev_for_each_mc_addr(ha, dev) {
525 addr[naddr++] = ha->addr;
526 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 527 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
528 naddr, addr, filt_idx, &mhash, sleep);
529 if (ret < 0)
530 return ret;
531
532 free = false;
533 naddr = 0;
534 }
535 }
536
060e0c75 537 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
538 uhash | mhash, sleep);
539}
540
3069ee9b
VP
541int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
542module_param(dbfifo_int_thresh, int, 0644);
543MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
544
404d9e3f
VP
545/*
546 * usecs to sleep while draining the dbfifo
547 */
548static int dbfifo_drain_delay = 1000;
3069ee9b
VP
549module_param(dbfifo_drain_delay, int, 0644);
550MODULE_PARM_DESC(dbfifo_drain_delay,
551 "usecs to sleep while draining the dbfifo");
552
b8ff05a9
DM
553/*
554 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
555 * If @mtu is -1 it is left unchanged.
556 */
557static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
558{
559 int ret;
560 struct port_info *pi = netdev_priv(dev);
561
562 ret = set_addr_filters(dev, sleep_ok);
563 if (ret == 0)
060e0c75 564 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 565 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 566 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
567 sleep_ok);
568 return ret;
569}
570
3069ee9b
VP
571static struct workqueue_struct *workq;
572
b8ff05a9
DM
573/**
574 * link_start - enable a port
575 * @dev: the port to enable
576 *
577 * Performs the MAC and PHY actions needed to enable a port.
578 */
579static int link_start(struct net_device *dev)
580{
581 int ret;
582 struct port_info *pi = netdev_priv(dev);
060e0c75 583 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
584
585 /*
586 * We do not set address filters and promiscuity here, the stack does
587 * that step explicitly.
588 */
060e0c75 589 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 590 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 591 if (ret == 0) {
060e0c75 592 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 593 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 594 true);
b8ff05a9
DM
595 if (ret >= 0) {
596 pi->xact_addr_filt = ret;
597 ret = 0;
598 }
599 }
600 if (ret == 0)
060e0c75
DM
601 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
602 &pi->link_cfg);
b8ff05a9 603 if (ret == 0)
060e0c75 604 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
605 return ret;
606}
607
f2b7e78d
VP
608/* Clear a filter and release any of its resources that we own. This also
609 * clears the filter's "pending" status.
610 */
611static void clear_filter(struct adapter *adap, struct filter_entry *f)
612{
613 /* If the new or old filter have loopback rewriteing rules then we'll
614 * need to free any existing Layer Two Table (L2T) entries of the old
615 * filter rule. The firmware will handle freeing up any Source MAC
616 * Table (SMT) entries used for rewriting Source MAC Addresses in
617 * loopback rules.
618 */
619 if (f->l2t)
620 cxgb4_l2t_release(f->l2t);
621
622 /* The zeroing of the filter rule below clears the filter valid,
623 * pending, locked flags, l2t pointer, etc. so it's all we need for
624 * this operation.
625 */
626 memset(f, 0, sizeof(*f));
627}
628
629/* Handle a filter write/deletion reply.
630 */
631static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
632{
633 unsigned int idx = GET_TID(rpl);
634 unsigned int nidx = idx - adap->tids.ftid_base;
635 unsigned int ret;
636 struct filter_entry *f;
637
638 if (idx >= adap->tids.ftid_base && nidx <
639 (adap->tids.nftids + adap->tids.nsftids)) {
640 idx = nidx;
641 ret = GET_TCB_COOKIE(rpl->cookie);
642 f = &adap->tids.ftid_tab[idx];
643
644 if (ret == FW_FILTER_WR_FLT_DELETED) {
645 /* Clear the filter when we get confirmation from the
646 * hardware that the filter has been deleted.
647 */
648 clear_filter(adap, f);
649 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
650 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
651 idx);
652 clear_filter(adap, f);
653 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
654 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
655 f->pending = 0; /* asynchronous setup completed */
656 f->valid = 1;
657 } else {
658 /* Something went wrong. Issue a warning about the
659 * problem and clear everything out.
660 */
661 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
662 idx, ret);
663 clear_filter(adap, f);
664 }
665 }
666}
667
668/* Response queue handler for the FW event queue.
b8ff05a9
DM
669 */
670static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
671 const struct pkt_gl *gl)
672{
673 u8 opcode = ((const struct rss_header *)rsp)->opcode;
674
675 rsp++; /* skip RSS header */
b407a4a9
VP
676
677 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
678 */
679 if (unlikely(opcode == CPL_FW4_MSG &&
680 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
681 rsp++;
682 opcode = ((const struct rss_header *)rsp)->opcode;
683 rsp++;
684 if (opcode != CPL_SGE_EGR_UPDATE) {
685 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
686 , opcode);
687 goto out;
688 }
689 }
690
b8ff05a9
DM
691 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
692 const struct cpl_sge_egr_update *p = (void *)rsp;
693 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 694 struct sge_txq *txq;
b8ff05a9 695
e46dab4d 696 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 697 txq->restarts++;
e46dab4d 698 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
699 struct sge_eth_txq *eq;
700
701 eq = container_of(txq, struct sge_eth_txq, q);
702 netif_tx_wake_queue(eq->txq);
703 } else {
704 struct sge_ofld_txq *oq;
705
706 oq = container_of(txq, struct sge_ofld_txq, q);
707 tasklet_schedule(&oq->qresume_tsk);
708 }
709 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
710 const struct cpl_fw6_msg *p = (void *)rsp;
711
712 if (p->type == 0)
713 t4_handle_fw_rpl(q->adap, p->data);
714 } else if (opcode == CPL_L2T_WRITE_RPL) {
715 const struct cpl_l2t_write_rpl *p = (void *)rsp;
716
717 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
718 } else if (opcode == CPL_SET_TCB_RPL) {
719 const struct cpl_set_tcb_rpl *p = (void *)rsp;
720
721 filter_rpl(q->adap, p);
b8ff05a9
DM
722 } else
723 dev_err(q->adap->pdev_dev,
724 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 725out:
b8ff05a9
DM
726 return 0;
727}
728
729/**
730 * uldrx_handler - response queue handler for ULD queues
731 * @q: the response queue that received the packet
732 * @rsp: the response queue descriptor holding the offload message
733 * @gl: the gather list of packet fragments
734 *
735 * Deliver an ingress offload packet to a ULD. All processing is done by
736 * the ULD, we just maintain statistics.
737 */
738static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
739 const struct pkt_gl *gl)
740{
741 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
742
b407a4a9
VP
743 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
744 */
745 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
746 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
747 rsp += 2;
748
b8ff05a9
DM
749 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
750 rxq->stats.nomem++;
751 return -1;
752 }
753 if (gl == NULL)
754 rxq->stats.imm++;
755 else if (gl == CXGB4_MSG_AN)
756 rxq->stats.an++;
757 else
758 rxq->stats.pkts++;
759 return 0;
760}
761
762static void disable_msi(struct adapter *adapter)
763{
764 if (adapter->flags & USING_MSIX) {
765 pci_disable_msix(adapter->pdev);
766 adapter->flags &= ~USING_MSIX;
767 } else if (adapter->flags & USING_MSI) {
768 pci_disable_msi(adapter->pdev);
769 adapter->flags &= ~USING_MSI;
770 }
771}
772
773/*
774 * Interrupt handler for non-data events used with MSI-X.
775 */
776static irqreturn_t t4_nondata_intr(int irq, void *cookie)
777{
778 struct adapter *adap = cookie;
779
780 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
781 if (v & PFSW) {
782 adap->swintr = 1;
783 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
784 }
785 t4_slow_intr_handler(adap);
786 return IRQ_HANDLED;
787}
788
789/*
790 * Name the MSI-X interrupts.
791 */
792static void name_msix_vecs(struct adapter *adap)
793{
ba27816c 794 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
795
796 /* non-data interrupts */
b1a3c2b6 797 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
798
799 /* FW events */
b1a3c2b6
DM
800 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
801 adap->port[0]->name);
b8ff05a9
DM
802
803 /* Ethernet queues */
804 for_each_port(adap, j) {
805 struct net_device *d = adap->port[j];
806 const struct port_info *pi = netdev_priv(d);
807
ba27816c 808 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
809 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
810 d->name, i);
b8ff05a9
DM
811 }
812
813 /* offload queues */
ba27816c
DM
814 for_each_ofldrxq(&adap->sge, i)
815 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 816 adap->port[0]->name, i);
ba27816c
DM
817
818 for_each_rdmarxq(&adap->sge, i)
819 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 820 adap->port[0]->name, i);
b8ff05a9
DM
821}
822
823static int request_msix_queue_irqs(struct adapter *adap)
824{
825 struct sge *s = &adap->sge;
404d9e3f 826 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
b8ff05a9
DM
827
828 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
829 adap->msix_info[1].desc, &s->fw_evtq);
830 if (err)
831 return err;
832
833 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
834 err = request_irq(adap->msix_info[msi_index].vec,
835 t4_sge_intr_msix, 0,
836 adap->msix_info[msi_index].desc,
b8ff05a9
DM
837 &s->ethrxq[ethqidx].rspq);
838 if (err)
839 goto unwind;
404d9e3f 840 msi_index++;
b8ff05a9
DM
841 }
842 for_each_ofldrxq(s, ofldqidx) {
404d9e3f
VP
843 err = request_irq(adap->msix_info[msi_index].vec,
844 t4_sge_intr_msix, 0,
845 adap->msix_info[msi_index].desc,
b8ff05a9
DM
846 &s->ofldrxq[ofldqidx].rspq);
847 if (err)
848 goto unwind;
404d9e3f 849 msi_index++;
b8ff05a9
DM
850 }
851 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
852 err = request_irq(adap->msix_info[msi_index].vec,
853 t4_sge_intr_msix, 0,
854 adap->msix_info[msi_index].desc,
b8ff05a9
DM
855 &s->rdmarxq[rdmaqidx].rspq);
856 if (err)
857 goto unwind;
404d9e3f 858 msi_index++;
b8ff05a9
DM
859 }
860 return 0;
861
862unwind:
863 while (--rdmaqidx >= 0)
404d9e3f 864 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
865 &s->rdmarxq[rdmaqidx].rspq);
866 while (--ofldqidx >= 0)
404d9e3f 867 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
868 &s->ofldrxq[ofldqidx].rspq);
869 while (--ethqidx >= 0)
404d9e3f
VP
870 free_irq(adap->msix_info[--msi_index].vec,
871 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
872 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
873 return err;
874}
875
876static void free_msix_queue_irqs(struct adapter *adap)
877{
404d9e3f 878 int i, msi_index = 2;
b8ff05a9
DM
879 struct sge *s = &adap->sge;
880
881 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
882 for_each_ethrxq(s, i)
404d9e3f 883 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9 884 for_each_ofldrxq(s, i)
404d9e3f 885 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
b8ff05a9 886 for_each_rdmarxq(s, i)
404d9e3f 887 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
b8ff05a9
DM
888}
889
671b0060
DM
890/**
891 * write_rss - write the RSS table for a given port
892 * @pi: the port
893 * @queues: array of queue indices for RSS
894 *
895 * Sets up the portion of the HW RSS table for the port's VI to distribute
896 * packets to the Rx queues in @queues.
897 */
898static int write_rss(const struct port_info *pi, const u16 *queues)
899{
900 u16 *rss;
901 int i, err;
902 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
903
904 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
905 if (!rss)
906 return -ENOMEM;
907
908 /* map the queue indices to queue ids */
909 for (i = 0; i < pi->rss_size; i++, queues++)
910 rss[i] = q[*queues].rspq.abs_id;
911
060e0c75
DM
912 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
913 pi->rss_size, rss, pi->rss_size);
671b0060
DM
914 kfree(rss);
915 return err;
916}
917
b8ff05a9
DM
918/**
919 * setup_rss - configure RSS
920 * @adap: the adapter
921 *
671b0060 922 * Sets up RSS for each port.
b8ff05a9
DM
923 */
924static int setup_rss(struct adapter *adap)
925{
671b0060 926 int i, err;
b8ff05a9
DM
927
928 for_each_port(adap, i) {
929 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 930
671b0060 931 err = write_rss(pi, pi->rss);
b8ff05a9
DM
932 if (err)
933 return err;
934 }
935 return 0;
936}
937
e46dab4d
DM
938/*
939 * Return the channel of the ingress queue with the given qid.
940 */
941static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
942{
943 qid -= p->ingr_start;
944 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
945}
946
b8ff05a9
DM
947/*
948 * Wait until all NAPI handlers are descheduled.
949 */
950static void quiesce_rx(struct adapter *adap)
951{
952 int i;
953
954 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
955 struct sge_rspq *q = adap->sge.ingr_map[i];
956
957 if (q && q->handler)
958 napi_disable(&q->napi);
959 }
960}
961
962/*
963 * Enable NAPI scheduling and interrupt generation for all Rx queues.
964 */
965static void enable_rx(struct adapter *adap)
966{
967 int i;
968
969 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
970 struct sge_rspq *q = adap->sge.ingr_map[i];
971
972 if (!q)
973 continue;
974 if (q->handler)
975 napi_enable(&q->napi);
976 /* 0-increment GTS to start the timer and enable interrupts */
977 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
978 SEINTARM(q->intr_params) |
979 INGRESSQID(q->cntxt_id));
980 }
981}
982
983/**
984 * setup_sge_queues - configure SGE Tx/Rx/response queues
985 * @adap: the adapter
986 *
987 * Determines how many sets of SGE queues to use and initializes them.
988 * We support multiple queue sets per port if we have MSI-X, otherwise
989 * just one queue set per port.
990 */
991static int setup_sge_queues(struct adapter *adap)
992{
993 int err, msi_idx, i, j;
994 struct sge *s = &adap->sge;
995
996 bitmap_zero(s->starving_fl, MAX_EGRQ);
997 bitmap_zero(s->txq_maperr, MAX_EGRQ);
998
999 if (adap->flags & USING_MSIX)
1000 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1001 else {
1002 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1003 NULL, NULL);
1004 if (err)
1005 return err;
1006 msi_idx = -((int)s->intrq.abs_id + 1);
1007 }
1008
1009 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1010 msi_idx, NULL, fwevtq_handler);
1011 if (err) {
1012freeout: t4_free_sge_resources(adap);
1013 return err;
1014 }
1015
1016 for_each_port(adap, i) {
1017 struct net_device *dev = adap->port[i];
1018 struct port_info *pi = netdev_priv(dev);
1019 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1020 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1021
1022 for (j = 0; j < pi->nqsets; j++, q++) {
1023 if (msi_idx > 0)
1024 msi_idx++;
1025 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1026 msi_idx, &q->fl,
1027 t4_ethrx_handler);
1028 if (err)
1029 goto freeout;
1030 q->rspq.idx = j;
1031 memset(&q->stats, 0, sizeof(q->stats));
1032 }
1033 for (j = 0; j < pi->nqsets; j++, t++) {
1034 err = t4_sge_alloc_eth_txq(adap, t, dev,
1035 netdev_get_tx_queue(dev, j),
1036 s->fw_evtq.cntxt_id);
1037 if (err)
1038 goto freeout;
1039 }
1040 }
1041
1042 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1043 for_each_ofldrxq(s, i) {
1044 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1045 struct net_device *dev = adap->port[i / j];
1046
1047 if (msi_idx > 0)
1048 msi_idx++;
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1050 &q->fl, uldrx_handler);
1051 if (err)
1052 goto freeout;
1053 memset(&q->stats, 0, sizeof(q->stats));
1054 s->ofld_rxq[i] = q->rspq.abs_id;
1055 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1056 s->fw_evtq.cntxt_id);
1057 if (err)
1058 goto freeout;
1059 }
1060
1061 for_each_rdmarxq(s, i) {
1062 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1063
1064 if (msi_idx > 0)
1065 msi_idx++;
1066 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1067 msi_idx, &q->fl, uldrx_handler);
1068 if (err)
1069 goto freeout;
1070 memset(&q->stats, 0, sizeof(q->stats));
1071 s->rdma_rxq[i] = q->rspq.abs_id;
1072 }
1073
1074 for_each_port(adap, i) {
1075 /*
1076 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1077 * have RDMA queues, and that's the right value.
1078 */
1079 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1080 s->fw_evtq.cntxt_id,
1081 s->rdmarxq[i].rspq.cntxt_id);
1082 if (err)
1083 goto freeout;
1084 }
1085
1086 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1087 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1088 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1089 return 0;
1090}
1091
b8ff05a9
DM
1092/*
1093 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1094 * The allocated memory is cleared.
1095 */
1096void *t4_alloc_mem(size_t size)
1097{
8be04b93 1098 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
b8ff05a9
DM
1099
1100 if (!p)
89bf67f1 1101 p = vzalloc(size);
b8ff05a9
DM
1102 return p;
1103}
1104
1105/*
1106 * Free memory allocated through alloc_mem().
1107 */
31b9c19b 1108static void t4_free_mem(void *addr)
b8ff05a9
DM
1109{
1110 if (is_vmalloc_addr(addr))
1111 vfree(addr);
1112 else
1113 kfree(addr);
1114}
1115
f2b7e78d
VP
1116/* Send a Work Request to write the filter at a specified index. We construct
1117 * a Firmware Filter Work Request to have the work done and put the indicated
1118 * filter into "pending" mode which will prevent any further actions against
1119 * it till we get a reply from the firmware on the completion status of the
1120 * request.
1121 */
1122static int set_filter_wr(struct adapter *adapter, int fidx)
1123{
1124 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1125 struct sk_buff *skb;
1126 struct fw_filter_wr *fwr;
1127 unsigned int ftid;
1128
1129 /* If the new filter requires loopback Destination MAC and/or VLAN
1130 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1131 * the filter.
1132 */
1133 if (f->fs.newdmac || f->fs.newvlan) {
1134 /* allocate L2T entry for new filter */
1135 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1136 if (f->l2t == NULL)
1137 return -EAGAIN;
1138 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1139 f->fs.eport, f->fs.dmac)) {
1140 cxgb4_l2t_release(f->l2t);
1141 f->l2t = NULL;
1142 return -ENOMEM;
1143 }
1144 }
1145
1146 ftid = adapter->tids.ftid_base + fidx;
1147
1148 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1149 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1150 memset(fwr, 0, sizeof(*fwr));
1151
1152 /* It would be nice to put most of the following in t4_hw.c but most
1153 * of the work is translating the cxgbtool ch_filter_specification
1154 * into the Work Request and the definition of that structure is
1155 * currently in cxgbtool.h which isn't appropriate to pull into the
1156 * common code. We may eventually try to come up with a more neutral
1157 * filter specification structure but for now it's easiest to simply
1158 * put this fairly direct code in line ...
1159 */
1160 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1161 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1162 fwr->tid_to_iq =
1163 htonl(V_FW_FILTER_WR_TID(ftid) |
1164 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1165 V_FW_FILTER_WR_NOREPLY(0) |
1166 V_FW_FILTER_WR_IQ(f->fs.iq));
1167 fwr->del_filter_to_l2tix =
1168 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1169 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1170 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1171 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1172 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1173 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1174 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1175 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1176 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1177 f->fs.newvlan == VLAN_REWRITE) |
1178 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1179 f->fs.newvlan == VLAN_REWRITE) |
1180 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1181 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1182 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1183 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1184 fwr->ethtype = htons(f->fs.val.ethtype);
1185 fwr->ethtypem = htons(f->fs.mask.ethtype);
1186 fwr->frag_to_ovlan_vldm =
1187 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1188 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1189 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1190 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1191 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1192 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1193 fwr->smac_sel = 0;
1194 fwr->rx_chan_rx_rpl_iq =
1195 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1196 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1197 fwr->maci_to_matchtypem =
1198 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1199 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1200 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1201 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1202 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1203 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1204 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1205 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1206 fwr->ptcl = f->fs.val.proto;
1207 fwr->ptclm = f->fs.mask.proto;
1208 fwr->ttyp = f->fs.val.tos;
1209 fwr->ttypm = f->fs.mask.tos;
1210 fwr->ivlan = htons(f->fs.val.ivlan);
1211 fwr->ivlanm = htons(f->fs.mask.ivlan);
1212 fwr->ovlan = htons(f->fs.val.ovlan);
1213 fwr->ovlanm = htons(f->fs.mask.ovlan);
1214 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1215 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1216 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1217 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1218 fwr->lp = htons(f->fs.val.lport);
1219 fwr->lpm = htons(f->fs.mask.lport);
1220 fwr->fp = htons(f->fs.val.fport);
1221 fwr->fpm = htons(f->fs.mask.fport);
1222 if (f->fs.newsmac)
1223 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1224
1225 /* Mark the filter as "pending" and ship off the Filter Work Request.
1226 * When we get the Work Request Reply we'll clear the pending status.
1227 */
1228 f->pending = 1;
1229 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1230 t4_ofld_send(adapter, skb);
1231 return 0;
1232}
1233
1234/* Delete the filter at a specified index.
1235 */
1236static int del_filter_wr(struct adapter *adapter, int fidx)
1237{
1238 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1239 struct sk_buff *skb;
1240 struct fw_filter_wr *fwr;
1241 unsigned int len, ftid;
1242
1243 len = sizeof(*fwr);
1244 ftid = adapter->tids.ftid_base + fidx;
1245
1246 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1247 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1248 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1249
1250 /* Mark the filter as "pending" and ship off the Filter Work Request.
1251 * When we get the Work Request Reply we'll clear the pending status.
1252 */
1253 f->pending = 1;
1254 t4_mgmt_tx(adapter, skb);
1255 return 0;
1256}
1257
b8ff05a9
DM
1258static inline int is_offload(const struct adapter *adap)
1259{
1260 return adap->params.offload;
1261}
1262
1263/*
1264 * Implementation of ethtool operations.
1265 */
1266
1267static u32 get_msglevel(struct net_device *dev)
1268{
1269 return netdev2adap(dev)->msg_enable;
1270}
1271
1272static void set_msglevel(struct net_device *dev, u32 val)
1273{
1274 netdev2adap(dev)->msg_enable = val;
1275}
1276
1277static char stats_strings[][ETH_GSTRING_LEN] = {
1278 "TxOctetsOK ",
1279 "TxFramesOK ",
1280 "TxBroadcastFrames ",
1281 "TxMulticastFrames ",
1282 "TxUnicastFrames ",
1283 "TxErrorFrames ",
1284
1285 "TxFrames64 ",
1286 "TxFrames65To127 ",
1287 "TxFrames128To255 ",
1288 "TxFrames256To511 ",
1289 "TxFrames512To1023 ",
1290 "TxFrames1024To1518 ",
1291 "TxFrames1519ToMax ",
1292
1293 "TxFramesDropped ",
1294 "TxPauseFrames ",
1295 "TxPPP0Frames ",
1296 "TxPPP1Frames ",
1297 "TxPPP2Frames ",
1298 "TxPPP3Frames ",
1299 "TxPPP4Frames ",
1300 "TxPPP5Frames ",
1301 "TxPPP6Frames ",
1302 "TxPPP7Frames ",
1303
1304 "RxOctetsOK ",
1305 "RxFramesOK ",
1306 "RxBroadcastFrames ",
1307 "RxMulticastFrames ",
1308 "RxUnicastFrames ",
1309
1310 "RxFramesTooLong ",
1311 "RxJabberErrors ",
1312 "RxFCSErrors ",
1313 "RxLengthErrors ",
1314 "RxSymbolErrors ",
1315 "RxRuntFrames ",
1316
1317 "RxFrames64 ",
1318 "RxFrames65To127 ",
1319 "RxFrames128To255 ",
1320 "RxFrames256To511 ",
1321 "RxFrames512To1023 ",
1322 "RxFrames1024To1518 ",
1323 "RxFrames1519ToMax ",
1324
1325 "RxPauseFrames ",
1326 "RxPPP0Frames ",
1327 "RxPPP1Frames ",
1328 "RxPPP2Frames ",
1329 "RxPPP3Frames ",
1330 "RxPPP4Frames ",
1331 "RxPPP5Frames ",
1332 "RxPPP6Frames ",
1333 "RxPPP7Frames ",
1334
1335 "RxBG0FramesDropped ",
1336 "RxBG1FramesDropped ",
1337 "RxBG2FramesDropped ",
1338 "RxBG3FramesDropped ",
1339 "RxBG0FramesTrunc ",
1340 "RxBG1FramesTrunc ",
1341 "RxBG2FramesTrunc ",
1342 "RxBG3FramesTrunc ",
1343
1344 "TSO ",
1345 "TxCsumOffload ",
1346 "RxCsumGood ",
1347 "VLANextractions ",
1348 "VLANinsertions ",
4a6346d4
DM
1349 "GROpackets ",
1350 "GROmerged ",
22adfe0a
SR
1351 "WriteCoalSuccess ",
1352 "WriteCoalFail ",
b8ff05a9
DM
1353};
1354
1355static int get_sset_count(struct net_device *dev, int sset)
1356{
1357 switch (sset) {
1358 case ETH_SS_STATS:
1359 return ARRAY_SIZE(stats_strings);
1360 default:
1361 return -EOPNOTSUPP;
1362 }
1363}
1364
1365#define T4_REGMAP_SIZE (160 * 1024)
251f9e88 1366#define T5_REGMAP_SIZE (332 * 1024)
b8ff05a9
DM
1367
1368static int get_regs_len(struct net_device *dev)
1369{
251f9e88 1370 struct adapter *adap = netdev2adap(dev);
d14807dd 1371 if (is_t4(adap->params.chip))
251f9e88
SR
1372 return T4_REGMAP_SIZE;
1373 else
1374 return T5_REGMAP_SIZE;
b8ff05a9
DM
1375}
1376
1377static int get_eeprom_len(struct net_device *dev)
1378{
1379 return EEPROMSIZE;
1380}
1381
1382static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1383{
1384 struct adapter *adapter = netdev2adap(dev);
1385
23020ab3
RJ
1386 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1387 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1388 strlcpy(info->bus_info, pci_name(adapter->pdev),
1389 sizeof(info->bus_info));
b8ff05a9 1390
84b40501 1391 if (adapter->params.fw_vers)
b8ff05a9
DM
1392 snprintf(info->fw_version, sizeof(info->fw_version),
1393 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1394 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1395 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1396 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1397 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1398 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1399 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1400 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1401 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1402}
1403
1404static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1405{
1406 if (stringset == ETH_SS_STATS)
1407 memcpy(data, stats_strings, sizeof(stats_strings));
1408}
1409
1410/*
1411 * port stats maintained per queue of the port. They should be in the same
1412 * order as in stats_strings above.
1413 */
1414struct queue_port_stats {
1415 u64 tso;
1416 u64 tx_csum;
1417 u64 rx_csum;
1418 u64 vlan_ex;
1419 u64 vlan_ins;
4a6346d4
DM
1420 u64 gro_pkts;
1421 u64 gro_merged;
b8ff05a9
DM
1422};
1423
1424static void collect_sge_port_stats(const struct adapter *adap,
1425 const struct port_info *p, struct queue_port_stats *s)
1426{
1427 int i;
1428 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1429 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1430
1431 memset(s, 0, sizeof(*s));
1432 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1433 s->tso += tx->tso;
1434 s->tx_csum += tx->tx_cso;
1435 s->rx_csum += rx->stats.rx_cso;
1436 s->vlan_ex += rx->stats.vlan_ex;
1437 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1438 s->gro_pkts += rx->stats.lro_pkts;
1439 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1440 }
1441}
1442
1443static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1444 u64 *data)
1445{
1446 struct port_info *pi = netdev_priv(dev);
1447 struct adapter *adapter = pi->adapter;
22adfe0a 1448 u32 val1, val2;
b8ff05a9
DM
1449
1450 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1451
1452 data += sizeof(struct port_stats) / sizeof(u64);
1453 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
22adfe0a 1454 data += sizeof(struct queue_port_stats) / sizeof(u64);
d14807dd 1455 if (!is_t4(adapter->params.chip)) {
22adfe0a
SR
1456 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1457 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1458 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1459 *data = val1 - val2;
1460 data++;
1461 *data = val2;
1462 data++;
1463 } else {
1464 memset(data, 0, 2 * sizeof(u64));
1465 *data += 2;
1466 }
b8ff05a9
DM
1467}
1468
1469/*
1470 * Return a version number to identify the type of adapter. The scheme is:
1471 * - bits 0..9: chip version
1472 * - bits 10..15: chip revision
835bb606 1473 * - bits 16..23: register dump version
b8ff05a9
DM
1474 */
1475static inline unsigned int mk_adap_vers(const struct adapter *ap)
1476{
d14807dd
HS
1477 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1478 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
b8ff05a9
DM
1479}
1480
1481static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1482 unsigned int end)
1483{
1484 u32 *p = buf + start;
1485
1486 for ( ; start <= end; start += sizeof(u32))
1487 *p++ = t4_read_reg(ap, start);
1488}
1489
1490static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1491 void *buf)
1492{
251f9e88 1493 static const unsigned int t4_reg_ranges[] = {
b8ff05a9
DM
1494 0x1008, 0x1108,
1495 0x1180, 0x11b4,
1496 0x11fc, 0x123c,
1497 0x1300, 0x173c,
1498 0x1800, 0x18fc,
1499 0x3000, 0x30d8,
1500 0x30e0, 0x5924,
1501 0x5960, 0x59d4,
1502 0x5a00, 0x5af8,
1503 0x6000, 0x6098,
1504 0x6100, 0x6150,
1505 0x6200, 0x6208,
1506 0x6240, 0x6248,
1507 0x6280, 0x6338,
1508 0x6370, 0x638c,
1509 0x6400, 0x643c,
1510 0x6500, 0x6524,
1511 0x6a00, 0x6a38,
1512 0x6a60, 0x6a78,
1513 0x6b00, 0x6b84,
1514 0x6bf0, 0x6c84,
1515 0x6cf0, 0x6d84,
1516 0x6df0, 0x6e84,
1517 0x6ef0, 0x6f84,
1518 0x6ff0, 0x7084,
1519 0x70f0, 0x7184,
1520 0x71f0, 0x7284,
1521 0x72f0, 0x7384,
1522 0x73f0, 0x7450,
1523 0x7500, 0x7530,
1524 0x7600, 0x761c,
1525 0x7680, 0x76cc,
1526 0x7700, 0x7798,
1527 0x77c0, 0x77fc,
1528 0x7900, 0x79fc,
1529 0x7b00, 0x7c38,
1530 0x7d00, 0x7efc,
1531 0x8dc0, 0x8e1c,
1532 0x8e30, 0x8e78,
1533 0x8ea0, 0x8f6c,
1534 0x8fc0, 0x9074,
1535 0x90fc, 0x90fc,
1536 0x9400, 0x9458,
1537 0x9600, 0x96bc,
1538 0x9800, 0x9808,
1539 0x9820, 0x983c,
1540 0x9850, 0x9864,
1541 0x9c00, 0x9c6c,
1542 0x9c80, 0x9cec,
1543 0x9d00, 0x9d6c,
1544 0x9d80, 0x9dec,
1545 0x9e00, 0x9e6c,
1546 0x9e80, 0x9eec,
1547 0x9f00, 0x9f6c,
1548 0x9f80, 0x9fec,
1549 0xd004, 0xd03c,
1550 0xdfc0, 0xdfe0,
1551 0xe000, 0xea7c,
1552 0xf000, 0x11190,
835bb606
DM
1553 0x19040, 0x1906c,
1554 0x19078, 0x19080,
1555 0x1908c, 0x19124,
b8ff05a9
DM
1556 0x19150, 0x191b0,
1557 0x191d0, 0x191e8,
1558 0x19238, 0x1924c,
1559 0x193f8, 0x19474,
1560 0x19490, 0x194f8,
1561 0x19800, 0x19f30,
1562 0x1a000, 0x1a06c,
1563 0x1a0b0, 0x1a120,
1564 0x1a128, 0x1a138,
1565 0x1a190, 0x1a1c4,
1566 0x1a1fc, 0x1a1fc,
1567 0x1e040, 0x1e04c,
835bb606 1568 0x1e284, 0x1e28c,
b8ff05a9
DM
1569 0x1e2c0, 0x1e2c0,
1570 0x1e2e0, 0x1e2e0,
1571 0x1e300, 0x1e384,
1572 0x1e3c0, 0x1e3c8,
1573 0x1e440, 0x1e44c,
835bb606 1574 0x1e684, 0x1e68c,
b8ff05a9
DM
1575 0x1e6c0, 0x1e6c0,
1576 0x1e6e0, 0x1e6e0,
1577 0x1e700, 0x1e784,
1578 0x1e7c0, 0x1e7c8,
1579 0x1e840, 0x1e84c,
835bb606 1580 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1581 0x1eac0, 0x1eac0,
1582 0x1eae0, 0x1eae0,
1583 0x1eb00, 0x1eb84,
1584 0x1ebc0, 0x1ebc8,
1585 0x1ec40, 0x1ec4c,
835bb606 1586 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1587 0x1eec0, 0x1eec0,
1588 0x1eee0, 0x1eee0,
1589 0x1ef00, 0x1ef84,
1590 0x1efc0, 0x1efc8,
1591 0x1f040, 0x1f04c,
835bb606 1592 0x1f284, 0x1f28c,
b8ff05a9
DM
1593 0x1f2c0, 0x1f2c0,
1594 0x1f2e0, 0x1f2e0,
1595 0x1f300, 0x1f384,
1596 0x1f3c0, 0x1f3c8,
1597 0x1f440, 0x1f44c,
835bb606 1598 0x1f684, 0x1f68c,
b8ff05a9
DM
1599 0x1f6c0, 0x1f6c0,
1600 0x1f6e0, 0x1f6e0,
1601 0x1f700, 0x1f784,
1602 0x1f7c0, 0x1f7c8,
1603 0x1f840, 0x1f84c,
835bb606 1604 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1605 0x1fac0, 0x1fac0,
1606 0x1fae0, 0x1fae0,
1607 0x1fb00, 0x1fb84,
1608 0x1fbc0, 0x1fbc8,
1609 0x1fc40, 0x1fc4c,
835bb606 1610 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1611 0x1fec0, 0x1fec0,
1612 0x1fee0, 0x1fee0,
1613 0x1ff00, 0x1ff84,
1614 0x1ffc0, 0x1ffc8,
1615 0x20000, 0x2002c,
1616 0x20100, 0x2013c,
1617 0x20190, 0x201c8,
1618 0x20200, 0x20318,
1619 0x20400, 0x20528,
1620 0x20540, 0x20614,
1621 0x21000, 0x21040,
1622 0x2104c, 0x21060,
1623 0x210c0, 0x210ec,
1624 0x21200, 0x21268,
1625 0x21270, 0x21284,
1626 0x212fc, 0x21388,
1627 0x21400, 0x21404,
1628 0x21500, 0x21518,
1629 0x2152c, 0x2153c,
1630 0x21550, 0x21554,
1631 0x21600, 0x21600,
1632 0x21608, 0x21628,
1633 0x21630, 0x2163c,
1634 0x21700, 0x2171c,
1635 0x21780, 0x2178c,
1636 0x21800, 0x21c38,
1637 0x21c80, 0x21d7c,
1638 0x21e00, 0x21e04,
1639 0x22000, 0x2202c,
1640 0x22100, 0x2213c,
1641 0x22190, 0x221c8,
1642 0x22200, 0x22318,
1643 0x22400, 0x22528,
1644 0x22540, 0x22614,
1645 0x23000, 0x23040,
1646 0x2304c, 0x23060,
1647 0x230c0, 0x230ec,
1648 0x23200, 0x23268,
1649 0x23270, 0x23284,
1650 0x232fc, 0x23388,
1651 0x23400, 0x23404,
1652 0x23500, 0x23518,
1653 0x2352c, 0x2353c,
1654 0x23550, 0x23554,
1655 0x23600, 0x23600,
1656 0x23608, 0x23628,
1657 0x23630, 0x2363c,
1658 0x23700, 0x2371c,
1659 0x23780, 0x2378c,
1660 0x23800, 0x23c38,
1661 0x23c80, 0x23d7c,
1662 0x23e00, 0x23e04,
1663 0x24000, 0x2402c,
1664 0x24100, 0x2413c,
1665 0x24190, 0x241c8,
1666 0x24200, 0x24318,
1667 0x24400, 0x24528,
1668 0x24540, 0x24614,
1669 0x25000, 0x25040,
1670 0x2504c, 0x25060,
1671 0x250c0, 0x250ec,
1672 0x25200, 0x25268,
1673 0x25270, 0x25284,
1674 0x252fc, 0x25388,
1675 0x25400, 0x25404,
1676 0x25500, 0x25518,
1677 0x2552c, 0x2553c,
1678 0x25550, 0x25554,
1679 0x25600, 0x25600,
1680 0x25608, 0x25628,
1681 0x25630, 0x2563c,
1682 0x25700, 0x2571c,
1683 0x25780, 0x2578c,
1684 0x25800, 0x25c38,
1685 0x25c80, 0x25d7c,
1686 0x25e00, 0x25e04,
1687 0x26000, 0x2602c,
1688 0x26100, 0x2613c,
1689 0x26190, 0x261c8,
1690 0x26200, 0x26318,
1691 0x26400, 0x26528,
1692 0x26540, 0x26614,
1693 0x27000, 0x27040,
1694 0x2704c, 0x27060,
1695 0x270c0, 0x270ec,
1696 0x27200, 0x27268,
1697 0x27270, 0x27284,
1698 0x272fc, 0x27388,
1699 0x27400, 0x27404,
1700 0x27500, 0x27518,
1701 0x2752c, 0x2753c,
1702 0x27550, 0x27554,
1703 0x27600, 0x27600,
1704 0x27608, 0x27628,
1705 0x27630, 0x2763c,
1706 0x27700, 0x2771c,
1707 0x27780, 0x2778c,
1708 0x27800, 0x27c38,
1709 0x27c80, 0x27d7c,
1710 0x27e00, 0x27e04
1711 };
1712
251f9e88
SR
1713 static const unsigned int t5_reg_ranges[] = {
1714 0x1008, 0x1148,
1715 0x1180, 0x11b4,
1716 0x11fc, 0x123c,
1717 0x1280, 0x173c,
1718 0x1800, 0x18fc,
1719 0x3000, 0x3028,
1720 0x3060, 0x30d8,
1721 0x30e0, 0x30fc,
1722 0x3140, 0x357c,
1723 0x35a8, 0x35cc,
1724 0x35ec, 0x35ec,
1725 0x3600, 0x5624,
1726 0x56cc, 0x575c,
1727 0x580c, 0x5814,
1728 0x5890, 0x58bc,
1729 0x5940, 0x59dc,
1730 0x59fc, 0x5a18,
1731 0x5a60, 0x5a9c,
1732 0x5b9c, 0x5bfc,
1733 0x6000, 0x6040,
1734 0x6058, 0x614c,
1735 0x7700, 0x7798,
1736 0x77c0, 0x78fc,
1737 0x7b00, 0x7c54,
1738 0x7d00, 0x7efc,
1739 0x8dc0, 0x8de0,
1740 0x8df8, 0x8e84,
1741 0x8ea0, 0x8f84,
1742 0x8fc0, 0x90f8,
1743 0x9400, 0x9470,
1744 0x9600, 0x96f4,
1745 0x9800, 0x9808,
1746 0x9820, 0x983c,
1747 0x9850, 0x9864,
1748 0x9c00, 0x9c6c,
1749 0x9c80, 0x9cec,
1750 0x9d00, 0x9d6c,
1751 0x9d80, 0x9dec,
1752 0x9e00, 0x9e6c,
1753 0x9e80, 0x9eec,
1754 0x9f00, 0x9f6c,
1755 0x9f80, 0xa020,
1756 0xd004, 0xd03c,
1757 0xdfc0, 0xdfe0,
1758 0xe000, 0x11088,
1759 0x1109c, 0x1117c,
1760 0x11190, 0x11204,
1761 0x19040, 0x1906c,
1762 0x19078, 0x19080,
1763 0x1908c, 0x19124,
1764 0x19150, 0x191b0,
1765 0x191d0, 0x191e8,
1766 0x19238, 0x19290,
1767 0x193f8, 0x19474,
1768 0x19490, 0x194cc,
1769 0x194f0, 0x194f8,
1770 0x19c00, 0x19c60,
1771 0x19c94, 0x19e10,
1772 0x19e50, 0x19f34,
1773 0x19f40, 0x19f50,
1774 0x19f90, 0x19fe4,
1775 0x1a000, 0x1a06c,
1776 0x1a0b0, 0x1a120,
1777 0x1a128, 0x1a138,
1778 0x1a190, 0x1a1c4,
1779 0x1a1fc, 0x1a1fc,
1780 0x1e008, 0x1e00c,
1781 0x1e040, 0x1e04c,
1782 0x1e284, 0x1e290,
1783 0x1e2c0, 0x1e2c0,
1784 0x1e2e0, 0x1e2e0,
1785 0x1e300, 0x1e384,
1786 0x1e3c0, 0x1e3c8,
1787 0x1e408, 0x1e40c,
1788 0x1e440, 0x1e44c,
1789 0x1e684, 0x1e690,
1790 0x1e6c0, 0x1e6c0,
1791 0x1e6e0, 0x1e6e0,
1792 0x1e700, 0x1e784,
1793 0x1e7c0, 0x1e7c8,
1794 0x1e808, 0x1e80c,
1795 0x1e840, 0x1e84c,
1796 0x1ea84, 0x1ea90,
1797 0x1eac0, 0x1eac0,
1798 0x1eae0, 0x1eae0,
1799 0x1eb00, 0x1eb84,
1800 0x1ebc0, 0x1ebc8,
1801 0x1ec08, 0x1ec0c,
1802 0x1ec40, 0x1ec4c,
1803 0x1ee84, 0x1ee90,
1804 0x1eec0, 0x1eec0,
1805 0x1eee0, 0x1eee0,
1806 0x1ef00, 0x1ef84,
1807 0x1efc0, 0x1efc8,
1808 0x1f008, 0x1f00c,
1809 0x1f040, 0x1f04c,
1810 0x1f284, 0x1f290,
1811 0x1f2c0, 0x1f2c0,
1812 0x1f2e0, 0x1f2e0,
1813 0x1f300, 0x1f384,
1814 0x1f3c0, 0x1f3c8,
1815 0x1f408, 0x1f40c,
1816 0x1f440, 0x1f44c,
1817 0x1f684, 0x1f690,
1818 0x1f6c0, 0x1f6c0,
1819 0x1f6e0, 0x1f6e0,
1820 0x1f700, 0x1f784,
1821 0x1f7c0, 0x1f7c8,
1822 0x1f808, 0x1f80c,
1823 0x1f840, 0x1f84c,
1824 0x1fa84, 0x1fa90,
1825 0x1fac0, 0x1fac0,
1826 0x1fae0, 0x1fae0,
1827 0x1fb00, 0x1fb84,
1828 0x1fbc0, 0x1fbc8,
1829 0x1fc08, 0x1fc0c,
1830 0x1fc40, 0x1fc4c,
1831 0x1fe84, 0x1fe90,
1832 0x1fec0, 0x1fec0,
1833 0x1fee0, 0x1fee0,
1834 0x1ff00, 0x1ff84,
1835 0x1ffc0, 0x1ffc8,
1836 0x30000, 0x30030,
1837 0x30100, 0x30144,
1838 0x30190, 0x301d0,
1839 0x30200, 0x30318,
1840 0x30400, 0x3052c,
1841 0x30540, 0x3061c,
1842 0x30800, 0x30834,
1843 0x308c0, 0x30908,
1844 0x30910, 0x309ac,
1845 0x30a00, 0x30a04,
1846 0x30a0c, 0x30a2c,
1847 0x30a44, 0x30a50,
1848 0x30a74, 0x30c24,
1849 0x30d08, 0x30d14,
1850 0x30d1c, 0x30d20,
1851 0x30d3c, 0x30d50,
1852 0x31200, 0x3120c,
1853 0x31220, 0x31220,
1854 0x31240, 0x31240,
1855 0x31600, 0x31600,
1856 0x31608, 0x3160c,
1857 0x31a00, 0x31a1c,
1858 0x31e04, 0x31e20,
1859 0x31e38, 0x31e3c,
1860 0x31e80, 0x31e80,
1861 0x31e88, 0x31ea8,
1862 0x31eb0, 0x31eb4,
1863 0x31ec8, 0x31ed4,
1864 0x31fb8, 0x32004,
1865 0x32208, 0x3223c,
1866 0x32600, 0x32630,
1867 0x32a00, 0x32abc,
1868 0x32b00, 0x32b70,
1869 0x33000, 0x33048,
1870 0x33060, 0x3309c,
1871 0x330f0, 0x33148,
1872 0x33160, 0x3319c,
1873 0x331f0, 0x332e4,
1874 0x332f8, 0x333e4,
1875 0x333f8, 0x33448,
1876 0x33460, 0x3349c,
1877 0x334f0, 0x33548,
1878 0x33560, 0x3359c,
1879 0x335f0, 0x336e4,
1880 0x336f8, 0x337e4,
1881 0x337f8, 0x337fc,
1882 0x33814, 0x33814,
1883 0x3382c, 0x3382c,
1884 0x33880, 0x3388c,
1885 0x338e8, 0x338ec,
1886 0x33900, 0x33948,
1887 0x33960, 0x3399c,
1888 0x339f0, 0x33ae4,
1889 0x33af8, 0x33b10,
1890 0x33b28, 0x33b28,
1891 0x33b3c, 0x33b50,
1892 0x33bf0, 0x33c10,
1893 0x33c28, 0x33c28,
1894 0x33c3c, 0x33c50,
1895 0x33cf0, 0x33cfc,
1896 0x34000, 0x34030,
1897 0x34100, 0x34144,
1898 0x34190, 0x341d0,
1899 0x34200, 0x34318,
1900 0x34400, 0x3452c,
1901 0x34540, 0x3461c,
1902 0x34800, 0x34834,
1903 0x348c0, 0x34908,
1904 0x34910, 0x349ac,
1905 0x34a00, 0x34a04,
1906 0x34a0c, 0x34a2c,
1907 0x34a44, 0x34a50,
1908 0x34a74, 0x34c24,
1909 0x34d08, 0x34d14,
1910 0x34d1c, 0x34d20,
1911 0x34d3c, 0x34d50,
1912 0x35200, 0x3520c,
1913 0x35220, 0x35220,
1914 0x35240, 0x35240,
1915 0x35600, 0x35600,
1916 0x35608, 0x3560c,
1917 0x35a00, 0x35a1c,
1918 0x35e04, 0x35e20,
1919 0x35e38, 0x35e3c,
1920 0x35e80, 0x35e80,
1921 0x35e88, 0x35ea8,
1922 0x35eb0, 0x35eb4,
1923 0x35ec8, 0x35ed4,
1924 0x35fb8, 0x36004,
1925 0x36208, 0x3623c,
1926 0x36600, 0x36630,
1927 0x36a00, 0x36abc,
1928 0x36b00, 0x36b70,
1929 0x37000, 0x37048,
1930 0x37060, 0x3709c,
1931 0x370f0, 0x37148,
1932 0x37160, 0x3719c,
1933 0x371f0, 0x372e4,
1934 0x372f8, 0x373e4,
1935 0x373f8, 0x37448,
1936 0x37460, 0x3749c,
1937 0x374f0, 0x37548,
1938 0x37560, 0x3759c,
1939 0x375f0, 0x376e4,
1940 0x376f8, 0x377e4,
1941 0x377f8, 0x377fc,
1942 0x37814, 0x37814,
1943 0x3782c, 0x3782c,
1944 0x37880, 0x3788c,
1945 0x378e8, 0x378ec,
1946 0x37900, 0x37948,
1947 0x37960, 0x3799c,
1948 0x379f0, 0x37ae4,
1949 0x37af8, 0x37b10,
1950 0x37b28, 0x37b28,
1951 0x37b3c, 0x37b50,
1952 0x37bf0, 0x37c10,
1953 0x37c28, 0x37c28,
1954 0x37c3c, 0x37c50,
1955 0x37cf0, 0x37cfc,
1956 0x38000, 0x38030,
1957 0x38100, 0x38144,
1958 0x38190, 0x381d0,
1959 0x38200, 0x38318,
1960 0x38400, 0x3852c,
1961 0x38540, 0x3861c,
1962 0x38800, 0x38834,
1963 0x388c0, 0x38908,
1964 0x38910, 0x389ac,
1965 0x38a00, 0x38a04,
1966 0x38a0c, 0x38a2c,
1967 0x38a44, 0x38a50,
1968 0x38a74, 0x38c24,
1969 0x38d08, 0x38d14,
1970 0x38d1c, 0x38d20,
1971 0x38d3c, 0x38d50,
1972 0x39200, 0x3920c,
1973 0x39220, 0x39220,
1974 0x39240, 0x39240,
1975 0x39600, 0x39600,
1976 0x39608, 0x3960c,
1977 0x39a00, 0x39a1c,
1978 0x39e04, 0x39e20,
1979 0x39e38, 0x39e3c,
1980 0x39e80, 0x39e80,
1981 0x39e88, 0x39ea8,
1982 0x39eb0, 0x39eb4,
1983 0x39ec8, 0x39ed4,
1984 0x39fb8, 0x3a004,
1985 0x3a208, 0x3a23c,
1986 0x3a600, 0x3a630,
1987 0x3aa00, 0x3aabc,
1988 0x3ab00, 0x3ab70,
1989 0x3b000, 0x3b048,
1990 0x3b060, 0x3b09c,
1991 0x3b0f0, 0x3b148,
1992 0x3b160, 0x3b19c,
1993 0x3b1f0, 0x3b2e4,
1994 0x3b2f8, 0x3b3e4,
1995 0x3b3f8, 0x3b448,
1996 0x3b460, 0x3b49c,
1997 0x3b4f0, 0x3b548,
1998 0x3b560, 0x3b59c,
1999 0x3b5f0, 0x3b6e4,
2000 0x3b6f8, 0x3b7e4,
2001 0x3b7f8, 0x3b7fc,
2002 0x3b814, 0x3b814,
2003 0x3b82c, 0x3b82c,
2004 0x3b880, 0x3b88c,
2005 0x3b8e8, 0x3b8ec,
2006 0x3b900, 0x3b948,
2007 0x3b960, 0x3b99c,
2008 0x3b9f0, 0x3bae4,
2009 0x3baf8, 0x3bb10,
2010 0x3bb28, 0x3bb28,
2011 0x3bb3c, 0x3bb50,
2012 0x3bbf0, 0x3bc10,
2013 0x3bc28, 0x3bc28,
2014 0x3bc3c, 0x3bc50,
2015 0x3bcf0, 0x3bcfc,
2016 0x3c000, 0x3c030,
2017 0x3c100, 0x3c144,
2018 0x3c190, 0x3c1d0,
2019 0x3c200, 0x3c318,
2020 0x3c400, 0x3c52c,
2021 0x3c540, 0x3c61c,
2022 0x3c800, 0x3c834,
2023 0x3c8c0, 0x3c908,
2024 0x3c910, 0x3c9ac,
2025 0x3ca00, 0x3ca04,
2026 0x3ca0c, 0x3ca2c,
2027 0x3ca44, 0x3ca50,
2028 0x3ca74, 0x3cc24,
2029 0x3cd08, 0x3cd14,
2030 0x3cd1c, 0x3cd20,
2031 0x3cd3c, 0x3cd50,
2032 0x3d200, 0x3d20c,
2033 0x3d220, 0x3d220,
2034 0x3d240, 0x3d240,
2035 0x3d600, 0x3d600,
2036 0x3d608, 0x3d60c,
2037 0x3da00, 0x3da1c,
2038 0x3de04, 0x3de20,
2039 0x3de38, 0x3de3c,
2040 0x3de80, 0x3de80,
2041 0x3de88, 0x3dea8,
2042 0x3deb0, 0x3deb4,
2043 0x3dec8, 0x3ded4,
2044 0x3dfb8, 0x3e004,
2045 0x3e208, 0x3e23c,
2046 0x3e600, 0x3e630,
2047 0x3ea00, 0x3eabc,
2048 0x3eb00, 0x3eb70,
2049 0x3f000, 0x3f048,
2050 0x3f060, 0x3f09c,
2051 0x3f0f0, 0x3f148,
2052 0x3f160, 0x3f19c,
2053 0x3f1f0, 0x3f2e4,
2054 0x3f2f8, 0x3f3e4,
2055 0x3f3f8, 0x3f448,
2056 0x3f460, 0x3f49c,
2057 0x3f4f0, 0x3f548,
2058 0x3f560, 0x3f59c,
2059 0x3f5f0, 0x3f6e4,
2060 0x3f6f8, 0x3f7e4,
2061 0x3f7f8, 0x3f7fc,
2062 0x3f814, 0x3f814,
2063 0x3f82c, 0x3f82c,
2064 0x3f880, 0x3f88c,
2065 0x3f8e8, 0x3f8ec,
2066 0x3f900, 0x3f948,
2067 0x3f960, 0x3f99c,
2068 0x3f9f0, 0x3fae4,
2069 0x3faf8, 0x3fb10,
2070 0x3fb28, 0x3fb28,
2071 0x3fb3c, 0x3fb50,
2072 0x3fbf0, 0x3fc10,
2073 0x3fc28, 0x3fc28,
2074 0x3fc3c, 0x3fc50,
2075 0x3fcf0, 0x3fcfc,
2076 0x40000, 0x4000c,
2077 0x40040, 0x40068,
2078 0x40080, 0x40144,
2079 0x40180, 0x4018c,
2080 0x40200, 0x40298,
2081 0x402ac, 0x4033c,
2082 0x403f8, 0x403fc,
c1f49e3e 2083 0x41304, 0x413c4,
251f9e88
SR
2084 0x41400, 0x4141c,
2085 0x41480, 0x414d0,
2086 0x44000, 0x44078,
2087 0x440c0, 0x44278,
2088 0x442c0, 0x44478,
2089 0x444c0, 0x44678,
2090 0x446c0, 0x44878,
2091 0x448c0, 0x449fc,
2092 0x45000, 0x45068,
2093 0x45080, 0x45084,
2094 0x450a0, 0x450b0,
2095 0x45200, 0x45268,
2096 0x45280, 0x45284,
2097 0x452a0, 0x452b0,
2098 0x460c0, 0x460e4,
2099 0x47000, 0x4708c,
2100 0x47200, 0x47250,
2101 0x47400, 0x47420,
2102 0x47600, 0x47618,
2103 0x47800, 0x47814,
2104 0x48000, 0x4800c,
2105 0x48040, 0x48068,
2106 0x48080, 0x48144,
2107 0x48180, 0x4818c,
2108 0x48200, 0x48298,
2109 0x482ac, 0x4833c,
2110 0x483f8, 0x483fc,
c1f49e3e 2111 0x49304, 0x493c4,
251f9e88
SR
2112 0x49400, 0x4941c,
2113 0x49480, 0x494d0,
2114 0x4c000, 0x4c078,
2115 0x4c0c0, 0x4c278,
2116 0x4c2c0, 0x4c478,
2117 0x4c4c0, 0x4c678,
2118 0x4c6c0, 0x4c878,
2119 0x4c8c0, 0x4c9fc,
2120 0x4d000, 0x4d068,
2121 0x4d080, 0x4d084,
2122 0x4d0a0, 0x4d0b0,
2123 0x4d200, 0x4d268,
2124 0x4d280, 0x4d284,
2125 0x4d2a0, 0x4d2b0,
2126 0x4e0c0, 0x4e0e4,
2127 0x4f000, 0x4f08c,
2128 0x4f200, 0x4f250,
2129 0x4f400, 0x4f420,
2130 0x4f600, 0x4f618,
2131 0x4f800, 0x4f814,
2132 0x50000, 0x500cc,
2133 0x50400, 0x50400,
2134 0x50800, 0x508cc,
2135 0x50c00, 0x50c00,
2136 0x51000, 0x5101c,
2137 0x51300, 0x51308,
2138 };
2139
b8ff05a9
DM
2140 int i;
2141 struct adapter *ap = netdev2adap(dev);
251f9e88
SR
2142 static const unsigned int *reg_ranges;
2143 int arr_size = 0, buf_size = 0;
2144
d14807dd 2145 if (is_t4(ap->params.chip)) {
251f9e88
SR
2146 reg_ranges = &t4_reg_ranges[0];
2147 arr_size = ARRAY_SIZE(t4_reg_ranges);
2148 buf_size = T4_REGMAP_SIZE;
2149 } else {
2150 reg_ranges = &t5_reg_ranges[0];
2151 arr_size = ARRAY_SIZE(t5_reg_ranges);
2152 buf_size = T5_REGMAP_SIZE;
2153 }
b8ff05a9
DM
2154
2155 regs->version = mk_adap_vers(ap);
2156
251f9e88
SR
2157 memset(buf, 0, buf_size);
2158 for (i = 0; i < arr_size; i += 2)
b8ff05a9
DM
2159 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2160}
2161
2162static int restart_autoneg(struct net_device *dev)
2163{
2164 struct port_info *p = netdev_priv(dev);
2165
2166 if (!netif_running(dev))
2167 return -EAGAIN;
2168 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2169 return -EINVAL;
060e0c75 2170 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
2171 return 0;
2172}
2173
c5e06360
DM
2174static int identify_port(struct net_device *dev,
2175 enum ethtool_phys_id_state state)
b8ff05a9 2176{
c5e06360 2177 unsigned int val;
060e0c75
DM
2178 struct adapter *adap = netdev2adap(dev);
2179
c5e06360
DM
2180 if (state == ETHTOOL_ID_ACTIVE)
2181 val = 0xffff;
2182 else if (state == ETHTOOL_ID_INACTIVE)
2183 val = 0;
2184 else
2185 return -EINVAL;
b8ff05a9 2186
c5e06360 2187 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
2188}
2189
2190static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2191{
2192 unsigned int v = 0;
2193
a0881cab
DM
2194 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2195 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
2196 v |= SUPPORTED_TP;
2197 if (caps & FW_PORT_CAP_SPEED_100M)
2198 v |= SUPPORTED_100baseT_Full;
2199 if (caps & FW_PORT_CAP_SPEED_1G)
2200 v |= SUPPORTED_1000baseT_Full;
2201 if (caps & FW_PORT_CAP_SPEED_10G)
2202 v |= SUPPORTED_10000baseT_Full;
2203 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2204 v |= SUPPORTED_Backplane;
2205 if (caps & FW_PORT_CAP_SPEED_1G)
2206 v |= SUPPORTED_1000baseKX_Full;
2207 if (caps & FW_PORT_CAP_SPEED_10G)
2208 v |= SUPPORTED_10000baseKX4_Full;
2209 } else if (type == FW_PORT_TYPE_KR)
2210 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 2211 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
2212 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2213 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2214 else if (type == FW_PORT_TYPE_BP4_AP)
2215 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2216 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2217 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
2218 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2219 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9 2220 v |= SUPPORTED_FIBRE;
72aca4bf
KS
2221 else if (type == FW_PORT_TYPE_BP40_BA)
2222 v |= SUPPORTED_40000baseSR4_Full;
b8ff05a9
DM
2223
2224 if (caps & FW_PORT_CAP_ANEG)
2225 v |= SUPPORTED_Autoneg;
2226 return v;
2227}
2228
2229static unsigned int to_fw_linkcaps(unsigned int caps)
2230{
2231 unsigned int v = 0;
2232
2233 if (caps & ADVERTISED_100baseT_Full)
2234 v |= FW_PORT_CAP_SPEED_100M;
2235 if (caps & ADVERTISED_1000baseT_Full)
2236 v |= FW_PORT_CAP_SPEED_1G;
2237 if (caps & ADVERTISED_10000baseT_Full)
2238 v |= FW_PORT_CAP_SPEED_10G;
72aca4bf
KS
2239 if (caps & ADVERTISED_40000baseSR4_Full)
2240 v |= FW_PORT_CAP_SPEED_40G;
b8ff05a9
DM
2241 return v;
2242}
2243
2244static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2245{
2246 const struct port_info *p = netdev_priv(dev);
2247
2248 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 2249 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
2250 p->port_type == FW_PORT_TYPE_BT_XAUI)
2251 cmd->port = PORT_TP;
a0881cab
DM
2252 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2253 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 2254 cmd->port = PORT_FIBRE;
a0881cab
DM
2255 else if (p->port_type == FW_PORT_TYPE_SFP) {
2256 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2257 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2258 cmd->port = PORT_DA;
2259 else
2260 cmd->port = PORT_FIBRE;
2261 } else
b8ff05a9
DM
2262 cmd->port = PORT_OTHER;
2263
2264 if (p->mdio_addr >= 0) {
2265 cmd->phy_address = p->mdio_addr;
2266 cmd->transceiver = XCVR_EXTERNAL;
2267 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2268 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2269 } else {
2270 cmd->phy_address = 0; /* not really, but no better option */
2271 cmd->transceiver = XCVR_INTERNAL;
2272 cmd->mdio_support = 0;
2273 }
2274
2275 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2276 cmd->advertising = from_fw_linkcaps(p->port_type,
2277 p->link_cfg.advertising);
70739497
DD
2278 ethtool_cmd_speed_set(cmd,
2279 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
2280 cmd->duplex = DUPLEX_FULL;
2281 cmd->autoneg = p->link_cfg.autoneg;
2282 cmd->maxtxpkt = 0;
2283 cmd->maxrxpkt = 0;
2284 return 0;
2285}
2286
2287static unsigned int speed_to_caps(int speed)
2288{
e8b39015 2289 if (speed == 100)
b8ff05a9 2290 return FW_PORT_CAP_SPEED_100M;
e8b39015 2291 if (speed == 1000)
b8ff05a9 2292 return FW_PORT_CAP_SPEED_1G;
e8b39015 2293 if (speed == 10000)
b8ff05a9 2294 return FW_PORT_CAP_SPEED_10G;
e8b39015 2295 if (speed == 40000)
72aca4bf 2296 return FW_PORT_CAP_SPEED_40G;
b8ff05a9
DM
2297 return 0;
2298}
2299
2300static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2301{
2302 unsigned int cap;
2303 struct port_info *p = netdev_priv(dev);
2304 struct link_config *lc = &p->link_cfg;
25db0338 2305 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
2306
2307 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2308 return -EINVAL;
2309
2310 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2311 /*
2312 * PHY offers a single speed. See if that's what's
2313 * being requested.
2314 */
2315 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
2316 (lc->supported & speed_to_caps(speed)))
2317 return 0;
b8ff05a9
DM
2318 return -EINVAL;
2319 }
2320
2321 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 2322 cap = speed_to_caps(speed);
b8ff05a9 2323
72aca4bf 2324 if (!(lc->supported & cap) ||
e8b39015
BH
2325 (speed == 1000) ||
2326 (speed == 10000) ||
72aca4bf 2327 (speed == 40000))
b8ff05a9
DM
2328 return -EINVAL;
2329 lc->requested_speed = cap;
2330 lc->advertising = 0;
2331 } else {
2332 cap = to_fw_linkcaps(cmd->advertising);
2333 if (!(lc->supported & cap))
2334 return -EINVAL;
2335 lc->requested_speed = 0;
2336 lc->advertising = cap | FW_PORT_CAP_ANEG;
2337 }
2338 lc->autoneg = cmd->autoneg;
2339
2340 if (netif_running(dev))
060e0c75
DM
2341 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2342 lc);
b8ff05a9
DM
2343 return 0;
2344}
2345
2346static void get_pauseparam(struct net_device *dev,
2347 struct ethtool_pauseparam *epause)
2348{
2349 struct port_info *p = netdev_priv(dev);
2350
2351 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2352 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2353 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2354}
2355
2356static int set_pauseparam(struct net_device *dev,
2357 struct ethtool_pauseparam *epause)
2358{
2359 struct port_info *p = netdev_priv(dev);
2360 struct link_config *lc = &p->link_cfg;
2361
2362 if (epause->autoneg == AUTONEG_DISABLE)
2363 lc->requested_fc = 0;
2364 else if (lc->supported & FW_PORT_CAP_ANEG)
2365 lc->requested_fc = PAUSE_AUTONEG;
2366 else
2367 return -EINVAL;
2368
2369 if (epause->rx_pause)
2370 lc->requested_fc |= PAUSE_RX;
2371 if (epause->tx_pause)
2372 lc->requested_fc |= PAUSE_TX;
2373 if (netif_running(dev))
060e0c75
DM
2374 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2375 lc);
b8ff05a9
DM
2376 return 0;
2377}
2378
b8ff05a9
DM
2379static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2380{
2381 const struct port_info *pi = netdev_priv(dev);
2382 const struct sge *s = &pi->adapter->sge;
2383
2384 e->rx_max_pending = MAX_RX_BUFFERS;
2385 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2386 e->rx_jumbo_max_pending = 0;
2387 e->tx_max_pending = MAX_TXQ_ENTRIES;
2388
2389 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2390 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2391 e->rx_jumbo_pending = 0;
2392 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2393}
2394
2395static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2396{
2397 int i;
2398 const struct port_info *pi = netdev_priv(dev);
2399 struct adapter *adapter = pi->adapter;
2400 struct sge *s = &adapter->sge;
2401
2402 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2403 e->tx_pending > MAX_TXQ_ENTRIES ||
2404 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2405 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2406 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2407 return -EINVAL;
2408
2409 if (adapter->flags & FULL_INIT_DONE)
2410 return -EBUSY;
2411
2412 for (i = 0; i < pi->nqsets; ++i) {
2413 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2414 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2415 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2416 }
2417 return 0;
2418}
2419
2420static int closest_timer(const struct sge *s, int time)
2421{
2422 int i, delta, match = 0, min_delta = INT_MAX;
2423
2424 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2425 delta = time - s->timer_val[i];
2426 if (delta < 0)
2427 delta = -delta;
2428 if (delta < min_delta) {
2429 min_delta = delta;
2430 match = i;
2431 }
2432 }
2433 return match;
2434}
2435
2436static int closest_thres(const struct sge *s, int thres)
2437{
2438 int i, delta, match = 0, min_delta = INT_MAX;
2439
2440 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2441 delta = thres - s->counter_val[i];
2442 if (delta < 0)
2443 delta = -delta;
2444 if (delta < min_delta) {
2445 min_delta = delta;
2446 match = i;
2447 }
2448 }
2449 return match;
2450}
2451
2452/*
2453 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2454 */
2455static unsigned int qtimer_val(const struct adapter *adap,
2456 const struct sge_rspq *q)
2457{
2458 unsigned int idx = q->intr_params >> 1;
2459
2460 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2461}
2462
2463/**
2464 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2465 * @adap: the adapter
2466 * @q: the Rx queue
2467 * @us: the hold-off time in us, or 0 to disable timer
2468 * @cnt: the hold-off packet count, or 0 to disable counter
2469 *
2470 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2471 * one of the two needs to be enabled for the queue to generate interrupts.
2472 */
2473static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2474 unsigned int us, unsigned int cnt)
2475{
2476 if ((us | cnt) == 0)
2477 cnt = 1;
2478
2479 if (cnt) {
2480 int err;
2481 u32 v, new_idx;
2482
2483 new_idx = closest_thres(&adap->sge, cnt);
2484 if (q->desc && q->pktcnt_idx != new_idx) {
2485 /* the queue has already been created, update it */
2486 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2487 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2488 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
2489 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2490 &new_idx);
b8ff05a9
DM
2491 if (err)
2492 return err;
2493 }
2494 q->pktcnt_idx = new_idx;
2495 }
2496
2497 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2498 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2499 return 0;
2500}
2501
2502static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2503{
2504 const struct port_info *pi = netdev_priv(dev);
2505 struct adapter *adap = pi->adapter;
d4fc9dc2
TLSC
2506 struct sge_rspq *q;
2507 int i;
2508 int r = 0;
2509
2510 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2511 q = &adap->sge.ethrxq[i].rspq;
2512 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2513 c->rx_max_coalesced_frames);
2514 if (r) {
2515 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2516 break;
2517 }
2518 }
2519 return r;
b8ff05a9
DM
2520}
2521
2522static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2523{
2524 const struct port_info *pi = netdev_priv(dev);
2525 const struct adapter *adap = pi->adapter;
2526 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2527
2528 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2529 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2530 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2531 return 0;
2532}
2533
1478b3ee
DM
2534/**
2535 * eeprom_ptov - translate a physical EEPROM address to virtual
2536 * @phys_addr: the physical EEPROM address
2537 * @fn: the PCI function number
2538 * @sz: size of function-specific area
2539 *
2540 * Translate a physical EEPROM address to virtual. The first 1K is
2541 * accessed through virtual addresses starting at 31K, the rest is
2542 * accessed through virtual addresses starting at 0.
2543 *
2544 * The mapping is as follows:
2545 * [0..1K) -> [31K..32K)
2546 * [1K..1K+A) -> [31K-A..31K)
2547 * [1K+A..ES) -> [0..ES-A-1K)
2548 *
2549 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 2550 */
1478b3ee 2551static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 2552{
1478b3ee 2553 fn *= sz;
b8ff05a9
DM
2554 if (phys_addr < 1024)
2555 return phys_addr + (31 << 10);
1478b3ee
DM
2556 if (phys_addr < 1024 + fn)
2557 return 31744 - fn + phys_addr - 1024;
b8ff05a9 2558 if (phys_addr < EEPROMSIZE)
1478b3ee 2559 return phys_addr - 1024 - fn;
b8ff05a9
DM
2560 return -EINVAL;
2561}
2562
2563/*
2564 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
2565 */
2566static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2567{
1478b3ee 2568 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2569
2570 if (vaddr >= 0)
2571 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2572 return vaddr < 0 ? vaddr : 0;
2573}
2574
2575static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2576{
1478b3ee 2577 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2578
2579 if (vaddr >= 0)
2580 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2581 return vaddr < 0 ? vaddr : 0;
2582}
2583
2584#define EEPROM_MAGIC 0x38E2F10C
2585
2586static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2587 u8 *data)
2588{
2589 int i, err = 0;
2590 struct adapter *adapter = netdev2adap(dev);
2591
2592 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2593 if (!buf)
2594 return -ENOMEM;
2595
2596 e->magic = EEPROM_MAGIC;
2597 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2598 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2599
2600 if (!err)
2601 memcpy(data, buf + e->offset, e->len);
2602 kfree(buf);
2603 return err;
2604}
2605
2606static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2607 u8 *data)
2608{
2609 u8 *buf;
2610 int err = 0;
2611 u32 aligned_offset, aligned_len, *p;
2612 struct adapter *adapter = netdev2adap(dev);
2613
2614 if (eeprom->magic != EEPROM_MAGIC)
2615 return -EINVAL;
2616
2617 aligned_offset = eeprom->offset & ~3;
2618 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2619
1478b3ee
DM
2620 if (adapter->fn > 0) {
2621 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2622
2623 if (aligned_offset < start ||
2624 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2625 return -EPERM;
2626 }
2627
b8ff05a9
DM
2628 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2629 /*
2630 * RMW possibly needed for first or last words.
2631 */
2632 buf = kmalloc(aligned_len, GFP_KERNEL);
2633 if (!buf)
2634 return -ENOMEM;
2635 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2636 if (!err && aligned_len > 4)
2637 err = eeprom_rd_phys(adapter,
2638 aligned_offset + aligned_len - 4,
2639 (u32 *)&buf[aligned_len - 4]);
2640 if (err)
2641 goto out;
2642 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2643 } else
2644 buf = data;
2645
2646 err = t4_seeprom_wp(adapter, false);
2647 if (err)
2648 goto out;
2649
2650 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2651 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2652 aligned_offset += 4;
2653 }
2654
2655 if (!err)
2656 err = t4_seeprom_wp(adapter, true);
2657out:
2658 if (buf != data)
2659 kfree(buf);
2660 return err;
2661}
2662
2663static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2664{
2665 int ret;
2666 const struct firmware *fw;
2667 struct adapter *adap = netdev2adap(netdev);
2668
2669 ef->data[sizeof(ef->data) - 1] = '\0';
2670 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2671 if (ret < 0)
2672 return ret;
2673
2674 ret = t4_load_fw(adap, fw->data, fw->size);
2675 release_firmware(fw);
2676 if (!ret)
2677 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2678 return ret;
2679}
2680
2681#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2682#define BCAST_CRC 0xa0ccc1a6
2683
2684static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2685{
2686 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2687 wol->wolopts = netdev2adap(dev)->wol;
2688 memset(&wol->sopass, 0, sizeof(wol->sopass));
2689}
2690
2691static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2692{
2693 int err = 0;
2694 struct port_info *pi = netdev_priv(dev);
2695
2696 if (wol->wolopts & ~WOL_SUPPORTED)
2697 return -EINVAL;
2698 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2699 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2700 if (wol->wolopts & WAKE_BCAST) {
2701 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2702 ~0ULL, 0, false);
2703 if (!err)
2704 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2705 ~6ULL, ~0ULL, BCAST_CRC, true);
2706 } else
2707 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2708 return err;
2709}
2710
c8f44aff 2711static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 2712{
2ed28baa 2713 const struct port_info *pi = netdev_priv(dev);
c8f44aff 2714 netdev_features_t changed = dev->features ^ features;
19ecae2c 2715 int err;
19ecae2c 2716
f646968f 2717 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 2718 return 0;
19ecae2c 2719
2ed28baa
MM
2720 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2721 -1, -1, -1,
f646968f 2722 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 2723 if (unlikely(err))
f646968f 2724 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 2725 return err;
87b6cf51
DM
2726}
2727
7850f63f 2728static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
2729{
2730 const struct port_info *pi = netdev_priv(dev);
671b0060 2731
7850f63f
BH
2732 return pi->rss_size;
2733}
2734
2735static int get_rss_table(struct net_device *dev, u32 *p)
2736{
2737 const struct port_info *pi = netdev_priv(dev);
2738 unsigned int n = pi->rss_size;
2739
671b0060 2740 while (n--)
7850f63f 2741 p[n] = pi->rss[n];
671b0060
DM
2742 return 0;
2743}
2744
7850f63f 2745static int set_rss_table(struct net_device *dev, const u32 *p)
671b0060
DM
2746{
2747 unsigned int i;
2748 struct port_info *pi = netdev_priv(dev);
2749
7850f63f
BH
2750 for (i = 0; i < pi->rss_size; i++)
2751 pi->rss[i] = p[i];
671b0060
DM
2752 if (pi->adapter->flags & FULL_INIT_DONE)
2753 return write_rss(pi, pi->rss);
2754 return 0;
2755}
2756
2757static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 2758 u32 *rules)
671b0060 2759{
f796564a
DM
2760 const struct port_info *pi = netdev_priv(dev);
2761
671b0060 2762 switch (info->cmd) {
f796564a
DM
2763 case ETHTOOL_GRXFH: {
2764 unsigned int v = pi->rss_mode;
2765
2766 info->data = 0;
2767 switch (info->flow_type) {
2768 case TCP_V4_FLOW:
2769 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2770 info->data = RXH_IP_SRC | RXH_IP_DST |
2771 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2772 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2773 info->data = RXH_IP_SRC | RXH_IP_DST;
2774 break;
2775 case UDP_V4_FLOW:
2776 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2777 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2778 info->data = RXH_IP_SRC | RXH_IP_DST |
2779 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2780 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2781 info->data = RXH_IP_SRC | RXH_IP_DST;
2782 break;
2783 case SCTP_V4_FLOW:
2784 case AH_ESP_V4_FLOW:
2785 case IPV4_FLOW:
2786 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2787 info->data = RXH_IP_SRC | RXH_IP_DST;
2788 break;
2789 case TCP_V6_FLOW:
2790 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2791 info->data = RXH_IP_SRC | RXH_IP_DST |
2792 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2793 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2794 info->data = RXH_IP_SRC | RXH_IP_DST;
2795 break;
2796 case UDP_V6_FLOW:
2797 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2798 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2799 info->data = RXH_IP_SRC | RXH_IP_DST |
2800 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2801 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2802 info->data = RXH_IP_SRC | RXH_IP_DST;
2803 break;
2804 case SCTP_V6_FLOW:
2805 case AH_ESP_V6_FLOW:
2806 case IPV6_FLOW:
2807 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2808 info->data = RXH_IP_SRC | RXH_IP_DST;
2809 break;
2810 }
2811 return 0;
2812 }
671b0060 2813 case ETHTOOL_GRXRINGS:
f796564a 2814 info->data = pi->nqsets;
671b0060
DM
2815 return 0;
2816 }
2817 return -EOPNOTSUPP;
2818}
2819
9b07be4b 2820static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
2821 .get_settings = get_settings,
2822 .set_settings = set_settings,
2823 .get_drvinfo = get_drvinfo,
2824 .get_msglevel = get_msglevel,
2825 .set_msglevel = set_msglevel,
2826 .get_ringparam = get_sge_param,
2827 .set_ringparam = set_sge_param,
2828 .get_coalesce = get_coalesce,
2829 .set_coalesce = set_coalesce,
2830 .get_eeprom_len = get_eeprom_len,
2831 .get_eeprom = get_eeprom,
2832 .set_eeprom = set_eeprom,
2833 .get_pauseparam = get_pauseparam,
2834 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
2835 .get_link = ethtool_op_get_link,
2836 .get_strings = get_strings,
c5e06360 2837 .set_phys_id = identify_port,
b8ff05a9
DM
2838 .nway_reset = restart_autoneg,
2839 .get_sset_count = get_sset_count,
2840 .get_ethtool_stats = get_stats,
2841 .get_regs_len = get_regs_len,
2842 .get_regs = get_regs,
2843 .get_wol = get_wol,
2844 .set_wol = set_wol,
671b0060 2845 .get_rxnfc = get_rxnfc,
7850f63f 2846 .get_rxfh_indir_size = get_rss_table_size,
671b0060
DM
2847 .get_rxfh_indir = get_rss_table,
2848 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
2849 .flash_device = set_flash,
2850};
2851
2852/*
2853 * debugfs support
2854 */
b8ff05a9
DM
2855static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2856 loff_t *ppos)
2857{
2858 loff_t pos = *ppos;
496ad9aa 2859 loff_t avail = file_inode(file)->i_size;
b8ff05a9
DM
2860 unsigned int mem = (uintptr_t)file->private_data & 3;
2861 struct adapter *adap = file->private_data - mem;
2862
2863 if (pos < 0)
2864 return -EINVAL;
2865 if (pos >= avail)
2866 return 0;
2867 if (count > avail - pos)
2868 count = avail - pos;
2869
2870 while (count) {
2871 size_t len;
2872 int ret, ofst;
2873 __be32 data[16];
2874
19dd37ba
SR
2875 if ((mem == MEM_MC) || (mem == MEM_MC1))
2876 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
b8ff05a9
DM
2877 else
2878 ret = t4_edc_read(adap, mem, pos, data, NULL);
2879 if (ret)
2880 return ret;
2881
2882 ofst = pos % sizeof(data);
2883 len = min(count, sizeof(data) - ofst);
2884 if (copy_to_user(buf, (u8 *)data + ofst, len))
2885 return -EFAULT;
2886
2887 buf += len;
2888 pos += len;
2889 count -= len;
2890 }
2891 count = pos - *ppos;
2892 *ppos = pos;
2893 return count;
2894}
2895
2896static const struct file_operations mem_debugfs_fops = {
2897 .owner = THIS_MODULE,
234e3405 2898 .open = simple_open,
b8ff05a9 2899 .read = mem_read,
6038f373 2900 .llseek = default_llseek,
b8ff05a9
DM
2901};
2902
91744948 2903static void add_debugfs_mem(struct adapter *adap, const char *name,
1dd06ae8 2904 unsigned int idx, unsigned int size_mb)
b8ff05a9
DM
2905{
2906 struct dentry *de;
2907
2908 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2909 (void *)adap + idx, &mem_debugfs_fops);
2910 if (de && de->d_inode)
2911 de->d_inode->i_size = size_mb << 20;
2912}
2913
91744948 2914static int setup_debugfs(struct adapter *adap)
b8ff05a9
DM
2915{
2916 int i;
19dd37ba 2917 u32 size;
b8ff05a9
DM
2918
2919 if (IS_ERR_OR_NULL(adap->debugfs_root))
2920 return -1;
2921
2922 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
19dd37ba
SR
2923 if (i & EDRAM0_ENABLE) {
2924 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2925 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2926 }
2927 if (i & EDRAM1_ENABLE) {
2928 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2929 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2930 }
d14807dd 2931 if (is_t4(adap->params.chip)) {
19dd37ba
SR
2932 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2933 if (i & EXT_MEM_ENABLE)
2934 add_debugfs_mem(adap, "mc", MEM_MC,
2935 EXT_MEM_SIZE_GET(size));
2936 } else {
2937 if (i & EXT_MEM_ENABLE) {
2938 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2939 add_debugfs_mem(adap, "mc0", MEM_MC0,
2940 EXT_MEM_SIZE_GET(size));
2941 }
2942 if (i & EXT_MEM1_ENABLE) {
2943 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2944 add_debugfs_mem(adap, "mc1", MEM_MC1,
2945 EXT_MEM_SIZE_GET(size));
2946 }
2947 }
b8ff05a9
DM
2948 if (adap->l2t)
2949 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2950 &t4_l2t_fops);
2951 return 0;
2952}
2953
2954/*
2955 * upper-layer driver support
2956 */
2957
2958/*
2959 * Allocate an active-open TID and set it to the supplied value.
2960 */
2961int cxgb4_alloc_atid(struct tid_info *t, void *data)
2962{
2963 int atid = -1;
2964
2965 spin_lock_bh(&t->atid_lock);
2966 if (t->afree) {
2967 union aopen_entry *p = t->afree;
2968
f2b7e78d 2969 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
2970 t->afree = p->next;
2971 p->data = data;
2972 t->atids_in_use++;
2973 }
2974 spin_unlock_bh(&t->atid_lock);
2975 return atid;
2976}
2977EXPORT_SYMBOL(cxgb4_alloc_atid);
2978
2979/*
2980 * Release an active-open TID.
2981 */
2982void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2983{
f2b7e78d 2984 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
2985
2986 spin_lock_bh(&t->atid_lock);
2987 p->next = t->afree;
2988 t->afree = p;
2989 t->atids_in_use--;
2990 spin_unlock_bh(&t->atid_lock);
2991}
2992EXPORT_SYMBOL(cxgb4_free_atid);
2993
2994/*
2995 * Allocate a server TID and set it to the supplied value.
2996 */
2997int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2998{
2999 int stid;
3000
3001 spin_lock_bh(&t->stid_lock);
3002 if (family == PF_INET) {
3003 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3004 if (stid < t->nstids)
3005 __set_bit(stid, t->stid_bmap);
3006 else
3007 stid = -1;
3008 } else {
3009 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3010 if (stid < 0)
3011 stid = -1;
3012 }
3013 if (stid >= 0) {
3014 t->stid_tab[stid].data = data;
3015 stid += t->stid_base;
15f63b74
KS
3016 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3017 * This is equivalent to 4 TIDs. With CLIP enabled it
3018 * needs 2 TIDs.
3019 */
3020 if (family == PF_INET)
3021 t->stids_in_use++;
3022 else
3023 t->stids_in_use += 4;
b8ff05a9
DM
3024 }
3025 spin_unlock_bh(&t->stid_lock);
3026 return stid;
3027}
3028EXPORT_SYMBOL(cxgb4_alloc_stid);
3029
dca4faeb
VP
3030/* Allocate a server filter TID and set it to the supplied value.
3031 */
3032int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3033{
3034 int stid;
3035
3036 spin_lock_bh(&t->stid_lock);
3037 if (family == PF_INET) {
3038 stid = find_next_zero_bit(t->stid_bmap,
3039 t->nstids + t->nsftids, t->nstids);
3040 if (stid < (t->nstids + t->nsftids))
3041 __set_bit(stid, t->stid_bmap);
3042 else
3043 stid = -1;
3044 } else {
3045 stid = -1;
3046 }
3047 if (stid >= 0) {
3048 t->stid_tab[stid].data = data;
470c60c4
KS
3049 stid -= t->nstids;
3050 stid += t->sftid_base;
dca4faeb
VP
3051 t->stids_in_use++;
3052 }
3053 spin_unlock_bh(&t->stid_lock);
3054 return stid;
3055}
3056EXPORT_SYMBOL(cxgb4_alloc_sftid);
3057
3058/* Release a server TID.
b8ff05a9
DM
3059 */
3060void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3061{
470c60c4
KS
3062 /* Is it a server filter TID? */
3063 if (t->nsftids && (stid >= t->sftid_base)) {
3064 stid -= t->sftid_base;
3065 stid += t->nstids;
3066 } else {
3067 stid -= t->stid_base;
3068 }
3069
b8ff05a9
DM
3070 spin_lock_bh(&t->stid_lock);
3071 if (family == PF_INET)
3072 __clear_bit(stid, t->stid_bmap);
3073 else
3074 bitmap_release_region(t->stid_bmap, stid, 2);
3075 t->stid_tab[stid].data = NULL;
15f63b74
KS
3076 if (family == PF_INET)
3077 t->stids_in_use--;
3078 else
3079 t->stids_in_use -= 4;
b8ff05a9
DM
3080 spin_unlock_bh(&t->stid_lock);
3081}
3082EXPORT_SYMBOL(cxgb4_free_stid);
3083
3084/*
3085 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3086 */
3087static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3088 unsigned int tid)
3089{
3090 struct cpl_tid_release *req;
3091
3092 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3093 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3094 INIT_TP_WR(req, tid);
3095 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3096}
3097
3098/*
3099 * Queue a TID release request and if necessary schedule a work queue to
3100 * process it.
3101 */
31b9c19b 3102static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3103 unsigned int tid)
b8ff05a9
DM
3104{
3105 void **p = &t->tid_tab[tid];
3106 struct adapter *adap = container_of(t, struct adapter, tids);
3107
3108 spin_lock_bh(&adap->tid_release_lock);
3109 *p = adap->tid_release_head;
3110 /* Low 2 bits encode the Tx channel number */
3111 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3112 if (!adap->tid_release_task_busy) {
3113 adap->tid_release_task_busy = true;
3069ee9b 3114 queue_work(workq, &adap->tid_release_task);
b8ff05a9
DM
3115 }
3116 spin_unlock_bh(&adap->tid_release_lock);
3117}
b8ff05a9
DM
3118
3119/*
3120 * Process the list of pending TID release requests.
3121 */
3122static void process_tid_release_list(struct work_struct *work)
3123{
3124 struct sk_buff *skb;
3125 struct adapter *adap;
3126
3127 adap = container_of(work, struct adapter, tid_release_task);
3128
3129 spin_lock_bh(&adap->tid_release_lock);
3130 while (adap->tid_release_head) {
3131 void **p = adap->tid_release_head;
3132 unsigned int chan = (uintptr_t)p & 3;
3133 p = (void *)p - chan;
3134
3135 adap->tid_release_head = *p;
3136 *p = NULL;
3137 spin_unlock_bh(&adap->tid_release_lock);
3138
3139 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3140 GFP_KERNEL)))
3141 schedule_timeout_uninterruptible(1);
3142
3143 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3144 t4_ofld_send(adap, skb);
3145 spin_lock_bh(&adap->tid_release_lock);
3146 }
3147 adap->tid_release_task_busy = false;
3148 spin_unlock_bh(&adap->tid_release_lock);
3149}
3150
3151/*
3152 * Release a TID and inform HW. If we are unable to allocate the release
3153 * message we defer to a work queue.
3154 */
3155void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3156{
3157 void *old;
3158 struct sk_buff *skb;
3159 struct adapter *adap = container_of(t, struct adapter, tids);
3160
3161 old = t->tid_tab[tid];
3162 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3163 if (likely(skb)) {
3164 t->tid_tab[tid] = NULL;
3165 mk_tid_release(skb, chan, tid);
3166 t4_ofld_send(adap, skb);
3167 } else
3168 cxgb4_queue_tid_release(t, chan, tid);
3169 if (old)
3170 atomic_dec(&t->tids_in_use);
3171}
3172EXPORT_SYMBOL(cxgb4_remove_tid);
3173
3174/*
3175 * Allocate and initialize the TID tables. Returns 0 on success.
3176 */
3177static int tid_init(struct tid_info *t)
3178{
3179 size_t size;
f2b7e78d 3180 unsigned int stid_bmap_size;
b8ff05a9 3181 unsigned int natids = t->natids;
b6f8eaec 3182 struct adapter *adap = container_of(t, struct adapter, tids);
b8ff05a9 3183
dca4faeb 3184 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
3185 size = t->ntids * sizeof(*t->tid_tab) +
3186 natids * sizeof(*t->atid_tab) +
b8ff05a9 3187 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 3188 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 3189 stid_bmap_size * sizeof(long) +
dca4faeb
VP
3190 t->nftids * sizeof(*t->ftid_tab) +
3191 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 3192
b8ff05a9
DM
3193 t->tid_tab = t4_alloc_mem(size);
3194 if (!t->tid_tab)
3195 return -ENOMEM;
3196
3197 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3198 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 3199 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 3200 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
3201 spin_lock_init(&t->stid_lock);
3202 spin_lock_init(&t->atid_lock);
3203
3204 t->stids_in_use = 0;
3205 t->afree = NULL;
3206 t->atids_in_use = 0;
3207 atomic_set(&t->tids_in_use, 0);
3208
3209 /* Setup the free list for atid_tab and clear the stid bitmap. */
3210 if (natids) {
3211 while (--natids)
3212 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3213 t->afree = t->atid_tab;
3214 }
dca4faeb 3215 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b6f8eaec
KS
3216 /* Reserve stid 0 for T4/T5 adapters */
3217 if (!t->stid_base &&
3218 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3219 __set_bit(0, t->stid_bmap);
3220
b8ff05a9
DM
3221 return 0;
3222}
3223
01bcca68
VP
3224static int cxgb4_clip_get(const struct net_device *dev,
3225 const struct in6_addr *lip)
3226{
3227 struct adapter *adap;
3228 struct fw_clip_cmd c;
3229
3230 adap = netdev2adap(dev);
3231 memset(&c, 0, sizeof(c));
3232 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3233 FW_CMD_REQUEST | FW_CMD_WRITE);
3234 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
12f2a479
JP
3235 c.ip_hi = *(__be64 *)(lip->s6_addr);
3236 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
01bcca68
VP
3237 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3238}
3239
3240static int cxgb4_clip_release(const struct net_device *dev,
3241 const struct in6_addr *lip)
3242{
3243 struct adapter *adap;
3244 struct fw_clip_cmd c;
3245
3246 adap = netdev2adap(dev);
3247 memset(&c, 0, sizeof(c));
3248 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3249 FW_CMD_REQUEST | FW_CMD_READ);
3250 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
12f2a479
JP
3251 c.ip_hi = *(__be64 *)(lip->s6_addr);
3252 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
01bcca68
VP
3253 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3254}
3255
b8ff05a9
DM
3256/**
3257 * cxgb4_create_server - create an IP server
3258 * @dev: the device
3259 * @stid: the server TID
3260 * @sip: local IP address to bind server to
3261 * @sport: the server's TCP port
3262 * @queue: queue to direct messages from this server to
3263 *
3264 * Create an IP server for the given port and address.
3265 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3266 */
3267int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
3268 __be32 sip, __be16 sport, __be16 vlan,
3269 unsigned int queue)
b8ff05a9
DM
3270{
3271 unsigned int chan;
3272 struct sk_buff *skb;
3273 struct adapter *adap;
3274 struct cpl_pass_open_req *req;
80f40c1f 3275 int ret;
b8ff05a9
DM
3276
3277 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3278 if (!skb)
3279 return -ENOMEM;
3280
3281 adap = netdev2adap(dev);
3282 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3283 INIT_TP_WR(req, 0);
3284 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3285 req->local_port = sport;
3286 req->peer_port = htons(0);
3287 req->local_ip = sip;
3288 req->peer_ip = htonl(0);
e46dab4d 3289 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
3290 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3291 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3292 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
80f40c1f
VP
3293 ret = t4_mgmt_tx(adap, skb);
3294 return net_xmit_eval(ret);
b8ff05a9
DM
3295}
3296EXPORT_SYMBOL(cxgb4_create_server);
3297
80f40c1f
VP
3298/* cxgb4_create_server6 - create an IPv6 server
3299 * @dev: the device
3300 * @stid: the server TID
3301 * @sip: local IPv6 address to bind server to
3302 * @sport: the server's TCP port
3303 * @queue: queue to direct messages from this server to
3304 *
3305 * Create an IPv6 server for the given port and address.
3306 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3307 */
3308int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3309 const struct in6_addr *sip, __be16 sport,
3310 unsigned int queue)
3311{
3312 unsigned int chan;
3313 struct sk_buff *skb;
3314 struct adapter *adap;
3315 struct cpl_pass_open_req6 *req;
3316 int ret;
3317
3318 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3319 if (!skb)
3320 return -ENOMEM;
3321
3322 adap = netdev2adap(dev);
3323 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3324 INIT_TP_WR(req, 0);
3325 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3326 req->local_port = sport;
3327 req->peer_port = htons(0);
3328 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3329 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3330 req->peer_ip_hi = cpu_to_be64(0);
3331 req->peer_ip_lo = cpu_to_be64(0);
3332 chan = rxq_to_chan(&adap->sge, queue);
3333 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3334 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3335 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3336 ret = t4_mgmt_tx(adap, skb);
3337 return net_xmit_eval(ret);
3338}
3339EXPORT_SYMBOL(cxgb4_create_server6);
3340
3341int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3342 unsigned int queue, bool ipv6)
3343{
3344 struct sk_buff *skb;
3345 struct adapter *adap;
3346 struct cpl_close_listsvr_req *req;
3347 int ret;
3348
3349 adap = netdev2adap(dev);
3350
3351 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3352 if (!skb)
3353 return -ENOMEM;
3354
3355 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3356 INIT_TP_WR(req, 0);
3357 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3358 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3359 LISTSVR_IPV6(0)) | QUEUENO(queue));
3360 ret = t4_mgmt_tx(adap, skb);
3361 return net_xmit_eval(ret);
3362}
3363EXPORT_SYMBOL(cxgb4_remove_server);
3364
b8ff05a9
DM
3365/**
3366 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3367 * @mtus: the HW MTU table
3368 * @mtu: the target MTU
3369 * @idx: index of selected entry in the MTU table
3370 *
3371 * Returns the index and the value in the HW MTU table that is closest to
3372 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3373 * table, in which case that smallest available value is selected.
3374 */
3375unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3376 unsigned int *idx)
3377{
3378 unsigned int i = 0;
3379
3380 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3381 ++i;
3382 if (idx)
3383 *idx = i;
3384 return mtus[i];
3385}
3386EXPORT_SYMBOL(cxgb4_best_mtu);
3387
3388/**
3389 * cxgb4_port_chan - get the HW channel of a port
3390 * @dev: the net device for the port
3391 *
3392 * Return the HW Tx channel of the given port.
3393 */
3394unsigned int cxgb4_port_chan(const struct net_device *dev)
3395{
3396 return netdev2pinfo(dev)->tx_chan;
3397}
3398EXPORT_SYMBOL(cxgb4_port_chan);
3399
881806bc
VP
3400unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3401{
3402 struct adapter *adap = netdev2adap(dev);
2cc301d2 3403 u32 v1, v2, lp_count, hp_count;
881806bc 3404
2cc301d2
SR
3405 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3406 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
d14807dd 3407 if (is_t4(adap->params.chip)) {
2cc301d2
SR
3408 lp_count = G_LP_COUNT(v1);
3409 hp_count = G_HP_COUNT(v1);
3410 } else {
3411 lp_count = G_LP_COUNT_T5(v1);
3412 hp_count = G_HP_COUNT_T5(v2);
3413 }
3414 return lpfifo ? lp_count : hp_count;
881806bc
VP
3415}
3416EXPORT_SYMBOL(cxgb4_dbfifo_count);
3417
b8ff05a9
DM
3418/**
3419 * cxgb4_port_viid - get the VI id of a port
3420 * @dev: the net device for the port
3421 *
3422 * Return the VI id of the given port.
3423 */
3424unsigned int cxgb4_port_viid(const struct net_device *dev)
3425{
3426 return netdev2pinfo(dev)->viid;
3427}
3428EXPORT_SYMBOL(cxgb4_port_viid);
3429
3430/**
3431 * cxgb4_port_idx - get the index of a port
3432 * @dev: the net device for the port
3433 *
3434 * Return the index of the given port.
3435 */
3436unsigned int cxgb4_port_idx(const struct net_device *dev)
3437{
3438 return netdev2pinfo(dev)->port_id;
3439}
3440EXPORT_SYMBOL(cxgb4_port_idx);
3441
b8ff05a9
DM
3442void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3443 struct tp_tcp_stats *v6)
3444{
3445 struct adapter *adap = pci_get_drvdata(pdev);
3446
3447 spin_lock(&adap->stats_lock);
3448 t4_tp_get_tcp_stats(adap, v4, v6);
3449 spin_unlock(&adap->stats_lock);
3450}
3451EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3452
3453void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3454 const unsigned int *pgsz_order)
3455{
3456 struct adapter *adap = netdev2adap(dev);
3457
3458 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3459 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3460 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3461 HPZ3(pgsz_order[3]));
3462}
3463EXPORT_SYMBOL(cxgb4_iscsi_init);
3464
3069ee9b
VP
3465int cxgb4_flush_eq_cache(struct net_device *dev)
3466{
3467 struct adapter *adap = netdev2adap(dev);
3468 int ret;
3469
3470 ret = t4_fwaddrspace_write(adap, adap->mbox,
3471 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3472 return ret;
3473}
3474EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3475
3476static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3477{
3478 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3479 __be64 indices;
3480 int ret;
3481
3482 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3483 if (!ret) {
404d9e3f
VP
3484 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3485 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
3486 }
3487 return ret;
3488}
3489
3490int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3491 u16 size)
3492{
3493 struct adapter *adap = netdev2adap(dev);
3494 u16 hw_pidx, hw_cidx;
3495 int ret;
3496
3497 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3498 if (ret)
3499 goto out;
3500
3501 if (pidx != hw_pidx) {
3502 u16 delta;
3503
3504 if (pidx >= hw_pidx)
3505 delta = pidx - hw_pidx;
3506 else
3507 delta = size - hw_pidx + pidx;
3508 wmb();
840f3000
VP
3509 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3510 QID(qid) | PIDX(delta));
3069ee9b
VP
3511 }
3512out:
3513 return ret;
3514}
3515EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3516
3cbdb928
VP
3517void cxgb4_disable_db_coalescing(struct net_device *dev)
3518{
3519 struct adapter *adap;
3520
3521 adap = netdev2adap(dev);
3522 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3523 F_NOCOALESCE);
3524}
3525EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3526
3527void cxgb4_enable_db_coalescing(struct net_device *dev)
3528{
3529 struct adapter *adap;
3530
3531 adap = netdev2adap(dev);
3532 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3533}
3534EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3535
b8ff05a9
DM
3536static struct pci_driver cxgb4_driver;
3537
3538static void check_neigh_update(struct neighbour *neigh)
3539{
3540 const struct device *parent;
3541 const struct net_device *netdev = neigh->dev;
3542
3543 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3544 netdev = vlan_dev_real_dev(netdev);
3545 parent = netdev->dev.parent;
3546 if (parent && parent->driver == &cxgb4_driver.driver)
3547 t4_l2t_update(dev_get_drvdata(parent), neigh);
3548}
3549
3550static int netevent_cb(struct notifier_block *nb, unsigned long event,
3551 void *data)
3552{
3553 switch (event) {
3554 case NETEVENT_NEIGH_UPDATE:
3555 check_neigh_update(data);
3556 break;
b8ff05a9
DM
3557 case NETEVENT_REDIRECT:
3558 default:
3559 break;
3560 }
3561 return 0;
3562}
3563
3564static bool netevent_registered;
3565static struct notifier_block cxgb4_netevent_nb = {
3566 .notifier_call = netevent_cb
3567};
3568
3069ee9b
VP
3569static void drain_db_fifo(struct adapter *adap, int usecs)
3570{
2cc301d2 3571 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
3572
3573 do {
2cc301d2
SR
3574 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3575 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
d14807dd 3576 if (is_t4(adap->params.chip)) {
2cc301d2
SR
3577 lp_count = G_LP_COUNT(v1);
3578 hp_count = G_HP_COUNT(v1);
3579 } else {
3580 lp_count = G_LP_COUNT_T5(v1);
3581 hp_count = G_HP_COUNT_T5(v2);
3582 }
3583
3584 if (lp_count == 0 && hp_count == 0)
3585 break;
3069ee9b
VP
3586 set_current_state(TASK_UNINTERRUPTIBLE);
3587 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
3588 } while (1);
3589}
3590
3591static void disable_txq_db(struct sge_txq *q)
3592{
05eb2389
SW
3593 unsigned long flags;
3594
3595 spin_lock_irqsave(&q->db_lock, flags);
3069ee9b 3596 q->db_disabled = 1;
05eb2389 3597 spin_unlock_irqrestore(&q->db_lock, flags);
3069ee9b
VP
3598}
3599
05eb2389 3600static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3069ee9b
VP
3601{
3602 spin_lock_irq(&q->db_lock);
05eb2389
SW
3603 if (q->db_pidx_inc) {
3604 /* Make sure that all writes to the TX descriptors
3605 * are committed before we tell HW about them.
3606 */
3607 wmb();
3608 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3609 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3610 q->db_pidx_inc = 0;
3611 }
3069ee9b
VP
3612 q->db_disabled = 0;
3613 spin_unlock_irq(&q->db_lock);
3614}
3615
3616static void disable_dbs(struct adapter *adap)
3617{
3618 int i;
3619
3620 for_each_ethrxq(&adap->sge, i)
3621 disable_txq_db(&adap->sge.ethtxq[i].q);
3622 for_each_ofldrxq(&adap->sge, i)
3623 disable_txq_db(&adap->sge.ofldtxq[i].q);
3624 for_each_port(adap, i)
3625 disable_txq_db(&adap->sge.ctrlq[i].q);
3626}
3627
3628static void enable_dbs(struct adapter *adap)
3629{
3630 int i;
3631
3632 for_each_ethrxq(&adap->sge, i)
05eb2389 3633 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3069ee9b 3634 for_each_ofldrxq(&adap->sge, i)
05eb2389 3635 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3069ee9b 3636 for_each_port(adap, i)
05eb2389
SW
3637 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3638}
3639
3640static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3641{
3642 if (adap->uld_handle[CXGB4_ULD_RDMA])
3643 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3644 cmd);
3645}
3646
3647static void process_db_full(struct work_struct *work)
3648{
3649 struct adapter *adap;
3650
3651 adap = container_of(work, struct adapter, db_full_task);
3652
3653 drain_db_fifo(adap, dbfifo_drain_delay);
3654 enable_dbs(adap);
3655 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3656 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3657 DBFIFO_HP_INT | DBFIFO_LP_INT,
3658 DBFIFO_HP_INT | DBFIFO_LP_INT);
3069ee9b
VP
3659}
3660
3661static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3662{
3663 u16 hw_pidx, hw_cidx;
3664 int ret;
3665
05eb2389 3666 spin_lock_irq(&q->db_lock);
3069ee9b
VP
3667 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3668 if (ret)
3669 goto out;
3670 if (q->db_pidx != hw_pidx) {
3671 u16 delta;
3672
3673 if (q->db_pidx >= hw_pidx)
3674 delta = q->db_pidx - hw_pidx;
3675 else
3676 delta = q->size - hw_pidx + q->db_pidx;
3677 wmb();
840f3000
VP
3678 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3679 QID(q->cntxt_id) | PIDX(delta));
3069ee9b
VP
3680 }
3681out:
3682 q->db_disabled = 0;
05eb2389
SW
3683 q->db_pidx_inc = 0;
3684 spin_unlock_irq(&q->db_lock);
3069ee9b
VP
3685 if (ret)
3686 CH_WARN(adap, "DB drop recovery failed.\n");
3687}
3688static void recover_all_queues(struct adapter *adap)
3689{
3690 int i;
3691
3692 for_each_ethrxq(&adap->sge, i)
3693 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3694 for_each_ofldrxq(&adap->sge, i)
3695 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3696 for_each_port(adap, i)
3697 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3698}
3699
881806bc
VP
3700static void process_db_drop(struct work_struct *work)
3701{
3702 struct adapter *adap;
881806bc 3703
3069ee9b 3704 adap = container_of(work, struct adapter, db_drop_task);
881806bc 3705
d14807dd 3706 if (is_t4(adap->params.chip)) {
05eb2389 3707 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 3708 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
05eb2389 3709 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 3710 recover_all_queues(adap);
05eb2389 3711 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 3712 enable_dbs(adap);
05eb2389 3713 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2cc301d2
SR
3714 } else {
3715 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3716 u16 qid = (dropped_db >> 15) & 0x1ffff;
3717 u16 pidx_inc = dropped_db & 0x1fff;
3718 unsigned int s_qpp;
3719 unsigned short udb_density;
3720 unsigned long qpshift;
3721 int page;
3722 u32 udb;
3723
3724 dev_warn(adap->pdev_dev,
3725 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3726 dropped_db, qid,
3727 (dropped_db >> 14) & 1,
3728 (dropped_db >> 13) & 1,
3729 pidx_inc);
3730
3731 drain_db_fifo(adap, 1);
3732
3733 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3734 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3735 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3736 qpshift = PAGE_SHIFT - ilog2(udb_density);
3737 udb = qid << qpshift;
3738 udb &= PAGE_MASK;
3739 page = udb / PAGE_SIZE;
3740 udb += (qid - (page * udb_density)) * 128;
3741
3742 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3743
3744 /* Re-enable BAR2 WC */
3745 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3746 }
3747
3069ee9b 3748 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
881806bc
VP
3749}
3750
3751void t4_db_full(struct adapter *adap)
3752{
d14807dd 3753 if (is_t4(adap->params.chip)) {
05eb2389
SW
3754 disable_dbs(adap);
3755 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2cc301d2
SR
3756 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3757 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3758 queue_work(workq, &adap->db_full_task);
3759 }
881806bc
VP
3760}
3761
3762void t4_db_dropped(struct adapter *adap)
3763{
05eb2389
SW
3764 if (is_t4(adap->params.chip)) {
3765 disable_dbs(adap);
3766 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3767 }
3768 queue_work(workq, &adap->db_drop_task);
881806bc
VP
3769}
3770
b8ff05a9
DM
3771static void uld_attach(struct adapter *adap, unsigned int uld)
3772{
3773 void *handle;
3774 struct cxgb4_lld_info lli;
dca4faeb 3775 unsigned short i;
b8ff05a9
DM
3776
3777 lli.pdev = adap->pdev;
3778 lli.l2t = adap->l2t;
3779 lli.tids = &adap->tids;
3780 lli.ports = adap->port;
3781 lli.vr = &adap->vres;
3782 lli.mtus = adap->params.mtus;
3783 if (uld == CXGB4_ULD_RDMA) {
3784 lli.rxq_ids = adap->sge.rdma_rxq;
3785 lli.nrxq = adap->sge.rdmaqs;
3786 } else if (uld == CXGB4_ULD_ISCSI) {
3787 lli.rxq_ids = adap->sge.ofld_rxq;
3788 lli.nrxq = adap->sge.ofldqsets;
3789 }
3790 lli.ntxq = adap->sge.ofldqsets;
3791 lli.nchan = adap->params.nports;
3792 lli.nports = adap->params.nports;
3793 lli.wr_cred = adap->params.ofldq_wr_cred;
d14807dd 3794 lli.adapter_type = adap->params.chip;
b8ff05a9
DM
3795 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3796 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3797 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3798 (adap->fn * 4));
b8ff05a9 3799 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3800 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3801 (adap->fn * 4));
dcf7b6f5 3802 lli.filt_mode = adap->params.tp.vlan_pri_map;
dca4faeb
VP
3803 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3804 for (i = 0; i < NCHAN; i++)
3805 lli.tx_modq[i] = i;
b8ff05a9
DM
3806 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3807 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3808 lli.fw_vers = adap->params.fw_vers;
3069ee9b 3809 lli.dbfifo_int_thresh = dbfifo_int_thresh;
dca4faeb
VP
3810 lli.sge_pktshift = adap->sge.pktshift;
3811 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
1ac0f095 3812 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
b8ff05a9
DM
3813
3814 handle = ulds[uld].add(&lli);
3815 if (IS_ERR(handle)) {
3816 dev_warn(adap->pdev_dev,
3817 "could not attach to the %s driver, error %ld\n",
3818 uld_str[uld], PTR_ERR(handle));
3819 return;
3820 }
3821
3822 adap->uld_handle[uld] = handle;
3823
3824 if (!netevent_registered) {
3825 register_netevent_notifier(&cxgb4_netevent_nb);
3826 netevent_registered = true;
3827 }
e29f5dbc
DM
3828
3829 if (adap->flags & FULL_INIT_DONE)
3830 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
3831}
3832
3833static void attach_ulds(struct adapter *adap)
3834{
3835 unsigned int i;
3836
01bcca68
VP
3837 spin_lock(&adap_rcu_lock);
3838 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3839 spin_unlock(&adap_rcu_lock);
3840
b8ff05a9
DM
3841 mutex_lock(&uld_mutex);
3842 list_add_tail(&adap->list_node, &adapter_list);
3843 for (i = 0; i < CXGB4_ULD_MAX; i++)
3844 if (ulds[i].add)
3845 uld_attach(adap, i);
3846 mutex_unlock(&uld_mutex);
3847}
3848
3849static void detach_ulds(struct adapter *adap)
3850{
3851 unsigned int i;
3852
3853 mutex_lock(&uld_mutex);
3854 list_del(&adap->list_node);
3855 for (i = 0; i < CXGB4_ULD_MAX; i++)
3856 if (adap->uld_handle[i]) {
3857 ulds[i].state_change(adap->uld_handle[i],
3858 CXGB4_STATE_DETACH);
3859 adap->uld_handle[i] = NULL;
3860 }
3861 if (netevent_registered && list_empty(&adapter_list)) {
3862 unregister_netevent_notifier(&cxgb4_netevent_nb);
3863 netevent_registered = false;
3864 }
3865 mutex_unlock(&uld_mutex);
01bcca68
VP
3866
3867 spin_lock(&adap_rcu_lock);
3868 list_del_rcu(&adap->rcu_node);
3869 spin_unlock(&adap_rcu_lock);
b8ff05a9
DM
3870}
3871
3872static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3873{
3874 unsigned int i;
3875
3876 mutex_lock(&uld_mutex);
3877 for (i = 0; i < CXGB4_ULD_MAX; i++)
3878 if (adap->uld_handle[i])
3879 ulds[i].state_change(adap->uld_handle[i], new_state);
3880 mutex_unlock(&uld_mutex);
3881}
3882
3883/**
3884 * cxgb4_register_uld - register an upper-layer driver
3885 * @type: the ULD type
3886 * @p: the ULD methods
3887 *
3888 * Registers an upper-layer driver with this driver and notifies the ULD
3889 * about any presently available devices that support its type. Returns
3890 * %-EBUSY if a ULD of the same type is already registered.
3891 */
3892int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3893{
3894 int ret = 0;
3895 struct adapter *adap;
3896
3897 if (type >= CXGB4_ULD_MAX)
3898 return -EINVAL;
3899 mutex_lock(&uld_mutex);
3900 if (ulds[type].add) {
3901 ret = -EBUSY;
3902 goto out;
3903 }
3904 ulds[type] = *p;
3905 list_for_each_entry(adap, &adapter_list, list_node)
3906 uld_attach(adap, type);
3907out: mutex_unlock(&uld_mutex);
3908 return ret;
3909}
3910EXPORT_SYMBOL(cxgb4_register_uld);
3911
3912/**
3913 * cxgb4_unregister_uld - unregister an upper-layer driver
3914 * @type: the ULD type
3915 *
3916 * Unregisters an existing upper-layer driver.
3917 */
3918int cxgb4_unregister_uld(enum cxgb4_uld type)
3919{
3920 struct adapter *adap;
3921
3922 if (type >= CXGB4_ULD_MAX)
3923 return -EINVAL;
3924 mutex_lock(&uld_mutex);
3925 list_for_each_entry(adap, &adapter_list, list_node)
3926 adap->uld_handle[type] = NULL;
3927 ulds[type].add = NULL;
3928 mutex_unlock(&uld_mutex);
3929 return 0;
3930}
3931EXPORT_SYMBOL(cxgb4_unregister_uld);
3932
01bcca68
VP
3933/* Check if netdev on which event is occured belongs to us or not. Return
3934 * suceess (1) if it belongs otherwise failure (0).
3935 */
3936static int cxgb4_netdev(struct net_device *netdev)
3937{
3938 struct adapter *adap;
3939 int i;
3940
3941 spin_lock(&adap_rcu_lock);
3942 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
3943 for (i = 0; i < MAX_NPORTS; i++)
3944 if (adap->port[i] == netdev) {
3945 spin_unlock(&adap_rcu_lock);
3946 return 1;
3947 }
3948 spin_unlock(&adap_rcu_lock);
3949 return 0;
3950}
3951
3952static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
3953 unsigned long event)
3954{
3955 int ret = NOTIFY_DONE;
3956
3957 rcu_read_lock();
3958 if (cxgb4_netdev(event_dev)) {
3959 switch (event) {
3960 case NETDEV_UP:
3961 ret = cxgb4_clip_get(event_dev,
3962 (const struct in6_addr *)ifa->addr.s6_addr);
3963 if (ret < 0) {
3964 rcu_read_unlock();
3965 return ret;
3966 }
3967 ret = NOTIFY_OK;
3968 break;
3969 case NETDEV_DOWN:
3970 cxgb4_clip_release(event_dev,
3971 (const struct in6_addr *)ifa->addr.s6_addr);
3972 ret = NOTIFY_OK;
3973 break;
3974 default:
3975 break;
3976 }
3977 }
3978 rcu_read_unlock();
3979 return ret;
3980}
3981
3982static int cxgb4_inet6addr_handler(struct notifier_block *this,
3983 unsigned long event, void *data)
3984{
3985 struct inet6_ifaddr *ifa = data;
3986 struct net_device *event_dev;
3987 int ret = NOTIFY_DONE;
01bcca68 3988 struct bonding *bond = netdev_priv(ifa->idev->dev);
9caff1e7 3989 struct list_head *iter;
01bcca68
VP
3990 struct slave *slave;
3991 struct pci_dev *first_pdev = NULL;
3992
3993 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
3994 event_dev = vlan_dev_real_dev(ifa->idev->dev);
3995 ret = clip_add(event_dev, ifa, event);
3996 } else if (ifa->idev->dev->flags & IFF_MASTER) {
3997 /* It is possible that two different adapters are bonded in one
3998 * bond. We need to find such different adapters and add clip
3999 * in all of them only once.
4000 */
4001 read_lock(&bond->lock);
9caff1e7 4002 bond_for_each_slave(bond, slave, iter) {
01bcca68
VP
4003 if (!first_pdev) {
4004 ret = clip_add(slave->dev, ifa, event);
4005 /* If clip_add is success then only initialize
4006 * first_pdev since it means it is our device
4007 */
4008 if (ret == NOTIFY_OK)
4009 first_pdev = to_pci_dev(
4010 slave->dev->dev.parent);
4011 } else if (first_pdev !=
4012 to_pci_dev(slave->dev->dev.parent))
4013 ret = clip_add(slave->dev, ifa, event);
4014 }
4015 read_unlock(&bond->lock);
4016 } else
4017 ret = clip_add(ifa->idev->dev, ifa, event);
4018
4019 return ret;
4020}
4021
4022static struct notifier_block cxgb4_inet6addr_notifier = {
4023 .notifier_call = cxgb4_inet6addr_handler
4024};
4025
4026/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4027 * a physical device.
4028 * The physical device reference is needed to send the actul CLIP command.
4029 */
4030static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4031{
4032 struct inet6_dev *idev = NULL;
4033 struct inet6_ifaddr *ifa;
4034 int ret = 0;
4035
4036 idev = __in6_dev_get(root_dev);
4037 if (!idev)
4038 return ret;
4039
4040 read_lock_bh(&idev->lock);
4041 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4042 ret = cxgb4_clip_get(dev,
4043 (const struct in6_addr *)ifa->addr.s6_addr);
4044 if (ret < 0)
4045 break;
4046 }
4047 read_unlock_bh(&idev->lock);
4048
4049 return ret;
4050}
4051
4052static int update_root_dev_clip(struct net_device *dev)
4053{
4054 struct net_device *root_dev = NULL;
4055 int i, ret = 0;
4056
4057 /* First populate the real net device's IPv6 addresses */
4058 ret = update_dev_clip(dev, dev);
4059 if (ret)
4060 return ret;
4061
4062 /* Parse all bond and vlan devices layered on top of the physical dev */
4063 for (i = 0; i < VLAN_N_VID; i++) {
4064 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
4065 if (!root_dev)
4066 continue;
4067
4068 ret = update_dev_clip(root_dev, dev);
4069 if (ret)
4070 break;
4071 }
4072 return ret;
4073}
4074
4075static void update_clip(const struct adapter *adap)
4076{
4077 int i;
4078 struct net_device *dev;
4079 int ret;
4080
4081 rcu_read_lock();
4082
4083 for (i = 0; i < MAX_NPORTS; i++) {
4084 dev = adap->port[i];
4085 ret = 0;
4086
4087 if (dev)
4088 ret = update_root_dev_clip(dev);
4089
4090 if (ret < 0)
4091 break;
4092 }
4093 rcu_read_unlock();
4094}
4095
b8ff05a9
DM
4096/**
4097 * cxgb_up - enable the adapter
4098 * @adap: adapter being enabled
4099 *
4100 * Called when the first port is enabled, this function performs the
4101 * actions necessary to make an adapter operational, such as completing
4102 * the initialization of HW modules, and enabling interrupts.
4103 *
4104 * Must be called with the rtnl lock held.
4105 */
4106static int cxgb_up(struct adapter *adap)
4107{
aaefae9b 4108 int err;
b8ff05a9 4109
aaefae9b
DM
4110 err = setup_sge_queues(adap);
4111 if (err)
4112 goto out;
4113 err = setup_rss(adap);
4114 if (err)
4115 goto freeq;
b8ff05a9
DM
4116
4117 if (adap->flags & USING_MSIX) {
aaefae9b 4118 name_msix_vecs(adap);
b8ff05a9
DM
4119 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4120 adap->msix_info[0].desc, adap);
4121 if (err)
4122 goto irq_err;
4123
4124 err = request_msix_queue_irqs(adap);
4125 if (err) {
4126 free_irq(adap->msix_info[0].vec, adap);
4127 goto irq_err;
4128 }
4129 } else {
4130 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4131 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 4132 adap->port[0]->name, adap);
b8ff05a9
DM
4133 if (err)
4134 goto irq_err;
4135 }
4136 enable_rx(adap);
4137 t4_sge_start(adap);
4138 t4_intr_enable(adap);
aaefae9b 4139 adap->flags |= FULL_INIT_DONE;
b8ff05a9 4140 notify_ulds(adap, CXGB4_STATE_UP);
01bcca68 4141 update_clip(adap);
b8ff05a9
DM
4142 out:
4143 return err;
4144 irq_err:
4145 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
4146 freeq:
4147 t4_free_sge_resources(adap);
b8ff05a9
DM
4148 goto out;
4149}
4150
4151static void cxgb_down(struct adapter *adapter)
4152{
4153 t4_intr_disable(adapter);
4154 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
4155 cancel_work_sync(&adapter->db_full_task);
4156 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 4157 adapter->tid_release_task_busy = false;
204dc3c0 4158 adapter->tid_release_head = NULL;
b8ff05a9
DM
4159
4160 if (adapter->flags & USING_MSIX) {
4161 free_msix_queue_irqs(adapter);
4162 free_irq(adapter->msix_info[0].vec, adapter);
4163 } else
4164 free_irq(adapter->pdev->irq, adapter);
4165 quiesce_rx(adapter);
aaefae9b
DM
4166 t4_sge_stop(adapter);
4167 t4_free_sge_resources(adapter);
4168 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
4169}
4170
4171/*
4172 * net_device operations
4173 */
4174static int cxgb_open(struct net_device *dev)
4175{
4176 int err;
4177 struct port_info *pi = netdev_priv(dev);
4178 struct adapter *adapter = pi->adapter;
4179
6a3c869a
DM
4180 netif_carrier_off(dev);
4181
aaefae9b
DM
4182 if (!(adapter->flags & FULL_INIT_DONE)) {
4183 err = cxgb_up(adapter);
4184 if (err < 0)
4185 return err;
4186 }
b8ff05a9 4187
f68707b8
DM
4188 err = link_start(dev);
4189 if (!err)
4190 netif_tx_start_all_queues(dev);
4191 return err;
b8ff05a9
DM
4192}
4193
4194static int cxgb_close(struct net_device *dev)
4195{
b8ff05a9
DM
4196 struct port_info *pi = netdev_priv(dev);
4197 struct adapter *adapter = pi->adapter;
4198
4199 netif_tx_stop_all_queues(dev);
4200 netif_carrier_off(dev);
060e0c75 4201 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
4202}
4203
f2b7e78d
VP
4204/* Return an error number if the indicated filter isn't writable ...
4205 */
4206static int writable_filter(struct filter_entry *f)
4207{
4208 if (f->locked)
4209 return -EPERM;
4210 if (f->pending)
4211 return -EBUSY;
4212
4213 return 0;
4214}
4215
4216/* Delete the filter at the specified index (if valid). The checks for all
4217 * the common problems with doing this like the filter being locked, currently
4218 * pending in another operation, etc.
4219 */
4220static int delete_filter(struct adapter *adapter, unsigned int fidx)
4221{
4222 struct filter_entry *f;
4223 int ret;
4224
dca4faeb 4225 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
4226 return -EINVAL;
4227
4228 f = &adapter->tids.ftid_tab[fidx];
4229 ret = writable_filter(f);
4230 if (ret)
4231 return ret;
4232 if (f->valid)
4233 return del_filter_wr(adapter, fidx);
4234
4235 return 0;
4236}
4237
dca4faeb 4238int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
4239 __be32 sip, __be16 sport, __be16 vlan,
4240 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
4241{
4242 int ret;
4243 struct filter_entry *f;
4244 struct adapter *adap;
4245 int i;
4246 u8 *val;
4247
4248 adap = netdev2adap(dev);
4249
1cab775c 4250 /* Adjust stid to correct filter index */
470c60c4 4251 stid -= adap->tids.sftid_base;
1cab775c
VP
4252 stid += adap->tids.nftids;
4253
dca4faeb
VP
4254 /* Check to make sure the filter requested is writable ...
4255 */
4256 f = &adap->tids.ftid_tab[stid];
4257 ret = writable_filter(f);
4258 if (ret)
4259 return ret;
4260
4261 /* Clear out any old resources being used by the filter before
4262 * we start constructing the new filter.
4263 */
4264 if (f->valid)
4265 clear_filter(adap, f);
4266
4267 /* Clear out filter specifications */
4268 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4269 f->fs.val.lport = cpu_to_be16(sport);
4270 f->fs.mask.lport = ~0;
4271 val = (u8 *)&sip;
793dad94 4272 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
4273 for (i = 0; i < 4; i++) {
4274 f->fs.val.lip[i] = val[i];
4275 f->fs.mask.lip[i] = ~0;
4276 }
dcf7b6f5 4277 if (adap->params.tp.vlan_pri_map & F_PORT) {
793dad94
VP
4278 f->fs.val.iport = port;
4279 f->fs.mask.iport = mask;
4280 }
4281 }
dca4faeb 4282
dcf7b6f5 4283 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
7c89e555
KS
4284 f->fs.val.proto = IPPROTO_TCP;
4285 f->fs.mask.proto = ~0;
4286 }
4287
dca4faeb
VP
4288 f->fs.dirsteer = 1;
4289 f->fs.iq = queue;
4290 /* Mark filter as locked */
4291 f->locked = 1;
4292 f->fs.rpttid = 1;
4293
4294 ret = set_filter_wr(adap, stid);
4295 if (ret) {
4296 clear_filter(adap, f);
4297 return ret;
4298 }
4299
4300 return 0;
4301}
4302EXPORT_SYMBOL(cxgb4_create_server_filter);
4303
4304int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4305 unsigned int queue, bool ipv6)
4306{
4307 int ret;
4308 struct filter_entry *f;
4309 struct adapter *adap;
4310
4311 adap = netdev2adap(dev);
1cab775c
VP
4312
4313 /* Adjust stid to correct filter index */
470c60c4 4314 stid -= adap->tids.sftid_base;
1cab775c
VP
4315 stid += adap->tids.nftids;
4316
dca4faeb
VP
4317 f = &adap->tids.ftid_tab[stid];
4318 /* Unlock the filter */
4319 f->locked = 0;
4320
4321 ret = delete_filter(adap, stid);
4322 if (ret)
4323 return ret;
4324
4325 return 0;
4326}
4327EXPORT_SYMBOL(cxgb4_remove_server_filter);
4328
f5152c90
DM
4329static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4330 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
4331{
4332 struct port_stats stats;
4333 struct port_info *p = netdev_priv(dev);
4334 struct adapter *adapter = p->adapter;
b8ff05a9 4335
9fe6cb58
GS
4336 /* Block retrieving statistics during EEH error
4337 * recovery. Otherwise, the recovery might fail
4338 * and the PCI device will be removed permanently
4339 */
b8ff05a9 4340 spin_lock(&adapter->stats_lock);
9fe6cb58
GS
4341 if (!netif_device_present(dev)) {
4342 spin_unlock(&adapter->stats_lock);
4343 return ns;
4344 }
b8ff05a9
DM
4345 t4_get_port_stats(adapter, p->tx_chan, &stats);
4346 spin_unlock(&adapter->stats_lock);
4347
4348 ns->tx_bytes = stats.tx_octets;
4349 ns->tx_packets = stats.tx_frames;
4350 ns->rx_bytes = stats.rx_octets;
4351 ns->rx_packets = stats.rx_frames;
4352 ns->multicast = stats.rx_mcast_frames;
4353
4354 /* detailed rx_errors */
4355 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4356 stats.rx_runt;
4357 ns->rx_over_errors = 0;
4358 ns->rx_crc_errors = stats.rx_fcs_err;
4359 ns->rx_frame_errors = stats.rx_symbol_err;
4360 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4361 stats.rx_ovflow2 + stats.rx_ovflow3 +
4362 stats.rx_trunc0 + stats.rx_trunc1 +
4363 stats.rx_trunc2 + stats.rx_trunc3;
4364 ns->rx_missed_errors = 0;
4365
4366 /* detailed tx_errors */
4367 ns->tx_aborted_errors = 0;
4368 ns->tx_carrier_errors = 0;
4369 ns->tx_fifo_errors = 0;
4370 ns->tx_heartbeat_errors = 0;
4371 ns->tx_window_errors = 0;
4372
4373 ns->tx_errors = stats.tx_error_frames;
4374 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4375 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4376 return ns;
4377}
4378
4379static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4380{
060e0c75 4381 unsigned int mbox;
b8ff05a9
DM
4382 int ret = 0, prtad, devad;
4383 struct port_info *pi = netdev_priv(dev);
4384 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4385
4386 switch (cmd) {
4387 case SIOCGMIIPHY:
4388 if (pi->mdio_addr < 0)
4389 return -EOPNOTSUPP;
4390 data->phy_id = pi->mdio_addr;
4391 break;
4392 case SIOCGMIIREG:
4393 case SIOCSMIIREG:
4394 if (mdio_phy_id_is_c45(data->phy_id)) {
4395 prtad = mdio_phy_id_prtad(data->phy_id);
4396 devad = mdio_phy_id_devad(data->phy_id);
4397 } else if (data->phy_id < 32) {
4398 prtad = data->phy_id;
4399 devad = 0;
4400 data->reg_num &= 0x1f;
4401 } else
4402 return -EINVAL;
4403
060e0c75 4404 mbox = pi->adapter->fn;
b8ff05a9 4405 if (cmd == SIOCGMIIREG)
060e0c75 4406 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4407 data->reg_num, &data->val_out);
4408 else
060e0c75 4409 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4410 data->reg_num, data->val_in);
4411 break;
4412 default:
4413 return -EOPNOTSUPP;
4414 }
4415 return ret;
4416}
4417
4418static void cxgb_set_rxmode(struct net_device *dev)
4419{
4420 /* unfortunately we can't return errors to the stack */
4421 set_rxmode(dev, -1, false);
4422}
4423
4424static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4425{
4426 int ret;
4427 struct port_info *pi = netdev_priv(dev);
4428
4429 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4430 return -EINVAL;
060e0c75
DM
4431 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4432 -1, -1, -1, true);
b8ff05a9
DM
4433 if (!ret)
4434 dev->mtu = new_mtu;
4435 return ret;
4436}
4437
4438static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4439{
4440 int ret;
4441 struct sockaddr *addr = p;
4442 struct port_info *pi = netdev_priv(dev);
4443
4444 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 4445 return -EADDRNOTAVAIL;
b8ff05a9 4446
060e0c75
DM
4447 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4448 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
4449 if (ret < 0)
4450 return ret;
4451
4452 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4453 pi->xact_addr_filt = ret;
4454 return 0;
4455}
4456
b8ff05a9
DM
4457#ifdef CONFIG_NET_POLL_CONTROLLER
4458static void cxgb_netpoll(struct net_device *dev)
4459{
4460 struct port_info *pi = netdev_priv(dev);
4461 struct adapter *adap = pi->adapter;
4462
4463 if (adap->flags & USING_MSIX) {
4464 int i;
4465 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4466
4467 for (i = pi->nqsets; i; i--, rx++)
4468 t4_sge_intr_msix(0, &rx->rspq);
4469 } else
4470 t4_intr_handler(adap)(0, adap);
4471}
4472#endif
4473
4474static const struct net_device_ops cxgb4_netdev_ops = {
4475 .ndo_open = cxgb_open,
4476 .ndo_stop = cxgb_close,
4477 .ndo_start_xmit = t4_eth_xmit,
9be793bf 4478 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
4479 .ndo_set_rx_mode = cxgb_set_rxmode,
4480 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 4481 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
4482 .ndo_validate_addr = eth_validate_addr,
4483 .ndo_do_ioctl = cxgb_ioctl,
4484 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
4485#ifdef CONFIG_NET_POLL_CONTROLLER
4486 .ndo_poll_controller = cxgb_netpoll,
4487#endif
4488};
4489
4490void t4_fatal_err(struct adapter *adap)
4491{
4492 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4493 t4_intr_disable(adap);
4494 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4495}
4496
4497static void setup_memwin(struct adapter *adap)
4498{
19dd37ba 4499 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
b8ff05a9
DM
4500
4501 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
d14807dd 4502 if (is_t4(adap->params.chip)) {
19dd37ba
SR
4503 mem_win0_base = bar0 + MEMWIN0_BASE;
4504 mem_win1_base = bar0 + MEMWIN1_BASE;
4505 mem_win2_base = bar0 + MEMWIN2_BASE;
4506 } else {
4507 /* For T5, only relative offset inside the PCIe BAR is passed */
4508 mem_win0_base = MEMWIN0_BASE;
4509 mem_win1_base = MEMWIN1_BASE_T5;
4510 mem_win2_base = MEMWIN2_BASE_T5;
4511 }
b8ff05a9 4512 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
19dd37ba 4513 mem_win0_base | BIR(0) |
b8ff05a9
DM
4514 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4515 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
19dd37ba 4516 mem_win1_base | BIR(0) |
b8ff05a9
DM
4517 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4518 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
19dd37ba 4519 mem_win2_base | BIR(0) |
b8ff05a9 4520 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
636f9d37
VP
4521}
4522
4523static void setup_memwin_rdma(struct adapter *adap)
4524{
1ae970e0
DM
4525 if (adap->vres.ocq.size) {
4526 unsigned int start, sz_kb;
4527
4528 start = pci_resource_start(adap->pdev, 2) +
4529 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4530 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4531 t4_write_reg(adap,
4532 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4533 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4534 t4_write_reg(adap,
4535 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4536 adap->vres.ocq.start);
4537 t4_read_reg(adap,
4538 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4539 }
b8ff05a9
DM
4540}
4541
02b5fb8e
DM
4542static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4543{
4544 u32 v;
4545 int ret;
4546
4547 /* get device capabilities */
4548 memset(c, 0, sizeof(*c));
4549 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4550 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4551 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
060e0c75 4552 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
4553 if (ret < 0)
4554 return ret;
4555
4556 /* select capabilities we'll be using */
4557 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4558 if (!vf_acls)
4559 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4560 else
4561 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4562 } else if (vf_acls) {
4563 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4564 return ret;
4565 }
4566 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4567 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 4568 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
4569 if (ret < 0)
4570 return ret;
4571
060e0c75 4572 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
4573 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4574 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4575 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4576 if (ret < 0)
4577 return ret;
4578
060e0c75
DM
4579 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4580 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
4581 if (ret < 0)
4582 return ret;
4583
4584 t4_sge_init(adap);
4585
02b5fb8e
DM
4586 /* tweak some settings */
4587 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4588 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4589 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4590 v = t4_read_reg(adap, TP_PIO_DATA);
4591 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75 4592
dca4faeb
VP
4593 /* first 4 Tx modulation queues point to consecutive Tx channels */
4594 adap->params.tp.tx_modq_map = 0xE4;
4595 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4596 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4597
4598 /* associate each Tx modulation queue with consecutive Tx channels */
4599 v = 0x84218421;
4600 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4601 &v, 1, A_TP_TX_SCHED_HDR);
4602 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4603 &v, 1, A_TP_TX_SCHED_FIFO);
4604 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4605 &v, 1, A_TP_TX_SCHED_PCMD);
4606
4607#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4608 if (is_offload(adap)) {
4609 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4610 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4611 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4612 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4613 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4614 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4615 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4616 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4617 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4618 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4619 }
4620
060e0c75
DM
4621 /* get basic stuff going */
4622 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
4623}
4624
b8ff05a9
DM
4625/*
4626 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4627 */
4628#define MAX_ATIDS 8192U
4629
636f9d37
VP
4630/*
4631 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4632 *
4633 * If the firmware we're dealing with has Configuration File support, then
4634 * we use that to perform all configuration
4635 */
4636
4637/*
4638 * Tweak configuration based on module parameters, etc. Most of these have
4639 * defaults assigned to them by Firmware Configuration Files (if we're using
4640 * them) but need to be explicitly set if we're using hard-coded
4641 * initialization. But even in the case of using Firmware Configuration
4642 * Files, we'd like to expose the ability to change these via module
4643 * parameters so these are essentially common tweaks/settings for
4644 * Configuration Files and hard-coded initialization ...
4645 */
4646static int adap_init0_tweaks(struct adapter *adapter)
4647{
4648 /*
4649 * Fix up various Host-Dependent Parameters like Page Size, Cache
4650 * Line Size, etc. The firmware default is for a 4KB Page Size and
4651 * 64B Cache Line Size ...
4652 */
4653 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4654
4655 /*
4656 * Process module parameters which affect early initialization.
4657 */
4658 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4659 dev_err(&adapter->pdev->dev,
4660 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4661 rx_dma_offset);
4662 rx_dma_offset = 2;
4663 }
4664 t4_set_reg_field(adapter, SGE_CONTROL,
4665 PKTSHIFT_MASK,
4666 PKTSHIFT(rx_dma_offset));
4667
4668 /*
4669 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4670 * adds the pseudo header itself.
4671 */
4672 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4673 CSUM_HAS_PSEUDO_HDR, 0);
4674
4675 return 0;
4676}
4677
4678/*
4679 * Attempt to initialize the adapter via a Firmware Configuration File.
4680 */
4681static int adap_init0_config(struct adapter *adapter, int reset)
4682{
4683 struct fw_caps_config_cmd caps_cmd;
4684 const struct firmware *cf;
4685 unsigned long mtype = 0, maddr = 0;
4686 u32 finiver, finicsum, cfcsum;
16e47624
HS
4687 int ret;
4688 int config_issued = 0;
0a57a536 4689 char *fw_config_file, fw_config_file_path[256];
16e47624 4690 char *config_name = NULL;
636f9d37
VP
4691
4692 /*
4693 * Reset device if necessary.
4694 */
4695 if (reset) {
4696 ret = t4_fw_reset(adapter, adapter->mbox,
4697 PIORSTMODE | PIORST);
4698 if (ret < 0)
4699 goto bye;
4700 }
4701
4702 /*
4703 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4704 * then use that. Otherwise, use the configuration file stored
4705 * in the adapter flash ...
4706 */
d14807dd 4707 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
0a57a536 4708 case CHELSIO_T4:
16e47624 4709 fw_config_file = FW4_CFNAME;
0a57a536
SR
4710 break;
4711 case CHELSIO_T5:
4712 fw_config_file = FW5_CFNAME;
4713 break;
4714 default:
4715 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4716 adapter->pdev->device);
4717 ret = -EINVAL;
4718 goto bye;
4719 }
4720
4721 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37 4722 if (ret < 0) {
16e47624 4723 config_name = "On FLASH";
636f9d37
VP
4724 mtype = FW_MEMTYPE_CF_FLASH;
4725 maddr = t4_flash_cfg_addr(adapter);
4726 } else {
4727 u32 params[7], val[7];
4728
16e47624
HS
4729 sprintf(fw_config_file_path,
4730 "/lib/firmware/%s", fw_config_file);
4731 config_name = fw_config_file_path;
4732
636f9d37
VP
4733 if (cf->size >= FLASH_CFG_MAX_SIZE)
4734 ret = -ENOMEM;
4735 else {
4736 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4737 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4738 ret = t4_query_params(adapter, adapter->mbox,
4739 adapter->fn, 0, 1, params, val);
4740 if (ret == 0) {
4741 /*
4742 * For t4_memory_write() below addresses and
4743 * sizes have to be in terms of multiples of 4
4744 * bytes. So, if the Configuration File isn't
4745 * a multiple of 4 bytes in length we'll have
4746 * to write that out separately since we can't
4747 * guarantee that the bytes following the
4748 * residual byte in the buffer returned by
4749 * request_firmware() are zeroed out ...
4750 */
4751 size_t resid = cf->size & 0x3;
4752 size_t size = cf->size & ~0x3;
4753 __be32 *data = (__be32 *)cf->data;
4754
4755 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4756 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4757
4758 ret = t4_memory_write(adapter, mtype, maddr,
4759 size, data);
4760 if (ret == 0 && resid != 0) {
4761 union {
4762 __be32 word;
4763 char buf[4];
4764 } last;
4765 int i;
4766
4767 last.word = data[size >> 2];
4768 for (i = resid; i < 4; i++)
4769 last.buf[i] = 0;
4770 ret = t4_memory_write(adapter, mtype,
4771 maddr + size,
4772 4, &last.word);
4773 }
4774 }
4775 }
4776
4777 release_firmware(cf);
4778 if (ret)
4779 goto bye;
4780 }
4781
4782 /*
4783 * Issue a Capability Configuration command to the firmware to get it
4784 * to parse the Configuration File. We don't use t4_fw_config_file()
4785 * because we want the ability to modify various features after we've
4786 * processed the configuration file ...
4787 */
4788 memset(&caps_cmd, 0, sizeof(caps_cmd));
4789 caps_cmd.op_to_write =
4790 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4791 FW_CMD_REQUEST |
4792 FW_CMD_READ);
ce91a923 4793 caps_cmd.cfvalid_to_len16 =
636f9d37
VP
4794 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4795 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4796 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4797 FW_LEN16(caps_cmd));
4798 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4799 &caps_cmd);
16e47624
HS
4800
4801 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4802 * Configuration File in FLASH), our last gasp effort is to use the
4803 * Firmware Configuration File which is embedded in the firmware. A
4804 * very few early versions of the firmware didn't have one embedded
4805 * but we can ignore those.
4806 */
4807 if (ret == -ENOENT) {
4808 memset(&caps_cmd, 0, sizeof(caps_cmd));
4809 caps_cmd.op_to_write =
4810 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4811 FW_CMD_REQUEST |
4812 FW_CMD_READ);
4813 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4814 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4815 sizeof(caps_cmd), &caps_cmd);
4816 config_name = "Firmware Default";
4817 }
4818
4819 config_issued = 1;
636f9d37
VP
4820 if (ret < 0)
4821 goto bye;
4822
4823 finiver = ntohl(caps_cmd.finiver);
4824 finicsum = ntohl(caps_cmd.finicsum);
4825 cfcsum = ntohl(caps_cmd.cfcsum);
4826 if (finicsum != cfcsum)
4827 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4828 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4829 finicsum, cfcsum);
4830
636f9d37
VP
4831 /*
4832 * And now tell the firmware to use the configuration we just loaded.
4833 */
4834 caps_cmd.op_to_write =
4835 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4836 FW_CMD_REQUEST |
4837 FW_CMD_WRITE);
ce91a923 4838 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4839 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4840 NULL);
4841 if (ret < 0)
4842 goto bye;
4843
4844 /*
4845 * Tweak configuration based on system architecture, module
4846 * parameters, etc.
4847 */
4848 ret = adap_init0_tweaks(adapter);
4849 if (ret < 0)
4850 goto bye;
4851
4852 /*
4853 * And finally tell the firmware to initialize itself using the
4854 * parameters from the Configuration File.
4855 */
4856 ret = t4_fw_initialize(adapter, adapter->mbox);
4857 if (ret < 0)
4858 goto bye;
4859
4860 /*
4861 * Return successfully and note that we're operating with parameters
4862 * not supplied by the driver, rather than from hard-wired
4863 * initialization constants burried in the driver.
4864 */
4865 adapter->flags |= USING_SOFT_PARAMS;
4866 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
16e47624
HS
4867 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4868 config_name, finiver, cfcsum);
636f9d37
VP
4869 return 0;
4870
4871 /*
4872 * Something bad happened. Return the error ... (If the "error"
4873 * is that there's no Configuration File on the adapter we don't
4874 * want to issue a warning since this is fairly common.)
4875 */
4876bye:
16e47624
HS
4877 if (config_issued && ret != -ENOENT)
4878 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4879 config_name, -ret);
636f9d37
VP
4880 return ret;
4881}
4882
13ee15d3
VP
4883/*
4884 * Attempt to initialize the adapter via hard-coded, driver supplied
4885 * parameters ...
4886 */
4887static int adap_init0_no_config(struct adapter *adapter, int reset)
4888{
4889 struct sge *s = &adapter->sge;
4890 struct fw_caps_config_cmd caps_cmd;
4891 u32 v;
4892 int i, ret;
4893
4894 /*
4895 * Reset device if necessary
4896 */
4897 if (reset) {
4898 ret = t4_fw_reset(adapter, adapter->mbox,
4899 PIORSTMODE | PIORST);
4900 if (ret < 0)
4901 goto bye;
4902 }
4903
4904 /*
4905 * Get device capabilities and select which we'll be using.
4906 */
4907 memset(&caps_cmd, 0, sizeof(caps_cmd));
4908 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4909 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4910 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
13ee15d3
VP
4911 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4912 &caps_cmd);
4913 if (ret < 0)
4914 goto bye;
4915
13ee15d3
VP
4916 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4917 if (!vf_acls)
4918 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4919 else
4920 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4921 } else if (vf_acls) {
4922 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4923 goto bye;
4924 }
4925 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4926 FW_CMD_REQUEST | FW_CMD_WRITE);
4927 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4928 NULL);
4929 if (ret < 0)
4930 goto bye;
4931
4932 /*
4933 * Tweak configuration based on system architecture, module
4934 * parameters, etc.
4935 */
4936 ret = adap_init0_tweaks(adapter);
4937 if (ret < 0)
4938 goto bye;
4939
4940 /*
4941 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4942 * mode which maps each Virtual Interface to its own section of
4943 * the RSS Table and we turn on all map and hash enables ...
4944 */
4945 adapter->flags |= RSS_TNLALLLOOKUP;
4946 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4947 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4948 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4949 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4950 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4951 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4952 if (ret < 0)
4953 goto bye;
4954
4955 /*
4956 * Set up our own fundamental resource provisioning ...
4957 */
4958 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4959 PFRES_NEQ, PFRES_NETHCTRL,
4960 PFRES_NIQFLINT, PFRES_NIQ,
4961 PFRES_TC, PFRES_NVI,
4962 FW_PFVF_CMD_CMASK_MASK,
4963 pfvfres_pmask(adapter, adapter->fn, 0),
4964 PFRES_NEXACTF,
4965 PFRES_R_CAPS, PFRES_WX_CAPS);
4966 if (ret < 0)
4967 goto bye;
4968
4969 /*
4970 * Perform low level SGE initialization. We need to do this before we
4971 * send the firmware the INITIALIZE command because that will cause
4972 * any other PF Drivers which are waiting for the Master
4973 * Initialization to proceed forward.
4974 */
4975 for (i = 0; i < SGE_NTIMERS - 1; i++)
4976 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4977 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4978 s->counter_val[0] = 1;
4979 for (i = 1; i < SGE_NCOUNTERS; i++)
4980 s->counter_val[i] = min(intr_cnt[i - 1],
4981 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4982 t4_sge_init(adapter);
4983
4984#ifdef CONFIG_PCI_IOV
4985 /*
4986 * Provision resource limits for Virtual Functions. We currently
4987 * grant them all the same static resource limits except for the Port
4988 * Access Rights Mask which we're assigning based on the PF. All of
4989 * the static provisioning stuff for both the PF and VF really needs
4990 * to be managed in a persistent manner for each device which the
4991 * firmware controls.
4992 */
4993 {
4994 int pf, vf;
4995
7d6727cf 4996 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
13ee15d3
VP
4997 if (num_vf[pf] <= 0)
4998 continue;
4999
5000 /* VF numbering starts at 1! */
5001 for (vf = 1; vf <= num_vf[pf]; vf++) {
5002 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5003 pf, vf,
5004 VFRES_NEQ, VFRES_NETHCTRL,
5005 VFRES_NIQFLINT, VFRES_NIQ,
5006 VFRES_TC, VFRES_NVI,
1f1e4958 5007 FW_PFVF_CMD_CMASK_MASK,
13ee15d3
VP
5008 pfvfres_pmask(
5009 adapter, pf, vf),
5010 VFRES_NEXACTF,
5011 VFRES_R_CAPS, VFRES_WX_CAPS);
5012 if (ret < 0)
5013 dev_warn(adapter->pdev_dev,
5014 "failed to "\
5015 "provision pf/vf=%d/%d; "
5016 "err=%d\n", pf, vf, ret);
5017 }
5018 }
5019 }
5020#endif
5021
5022 /*
5023 * Set up the default filter mode. Later we'll want to implement this
5024 * via a firmware command, etc. ... This needs to be done before the
5025 * firmare initialization command ... If the selected set of fields
5026 * isn't equal to the default value, we'll need to make sure that the
5027 * field selections will fit in the 36-bit budget.
5028 */
5029 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
404d9e3f 5030 int j, bits = 0;
13ee15d3 5031
404d9e3f
VP
5032 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5033 switch (tp_vlan_pri_map & (1 << j)) {
13ee15d3
VP
5034 case 0:
5035 /* compressed filter field not enabled */
5036 break;
5037 case FCOE_MASK:
5038 bits += 1;
5039 break;
5040 case PORT_MASK:
5041 bits += 3;
5042 break;
5043 case VNIC_ID_MASK:
5044 bits += 17;
5045 break;
5046 case VLAN_MASK:
5047 bits += 17;
5048 break;
5049 case TOS_MASK:
5050 bits += 8;
5051 break;
5052 case PROTOCOL_MASK:
5053 bits += 8;
5054 break;
5055 case ETHERTYPE_MASK:
5056 bits += 16;
5057 break;
5058 case MACMATCH_MASK:
5059 bits += 9;
5060 break;
5061 case MPSHITTYPE_MASK:
5062 bits += 3;
5063 break;
5064 case FRAGMENTATION_MASK:
5065 bits += 1;
5066 break;
5067 }
5068
5069 if (bits > 36) {
5070 dev_err(adapter->pdev_dev,
5071 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5072 " using %#x\n", tp_vlan_pri_map, bits,
5073 TP_VLAN_PRI_MAP_DEFAULT);
5074 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5075 }
5076 }
5077 v = tp_vlan_pri_map;
5078 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5079 &v, 1, TP_VLAN_PRI_MAP);
5080
5081 /*
5082 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5083 * to support any of the compressed filter fields above. Newer
5084 * versions of the firmware do this automatically but it doesn't hurt
5085 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5086 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5087 * since the firmware automatically turns this on and off when we have
5088 * a non-zero number of filters active (since it does have a
5089 * performance impact).
5090 */
5091 if (tp_vlan_pri_map)
5092 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5093 FIVETUPLELOOKUP_MASK,
5094 FIVETUPLELOOKUP_MASK);
5095
5096 /*
5097 * Tweak some settings.
5098 */
5099 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5100 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5101 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5102 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5103
5104 /*
5105 * Get basic stuff going by issuing the Firmware Initialize command.
5106 * Note that this _must_ be after all PFVF commands ...
5107 */
5108 ret = t4_fw_initialize(adapter, adapter->mbox);
5109 if (ret < 0)
5110 goto bye;
5111
5112 /*
5113 * Return successfully!
5114 */
5115 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5116 "driver parameters\n");
5117 return 0;
5118
5119 /*
5120 * Something bad happened. Return the error ...
5121 */
5122bye:
5123 return ret;
5124}
5125
16e47624
HS
5126static struct fw_info fw_info_array[] = {
5127 {
5128 .chip = CHELSIO_T4,
5129 .fs_name = FW4_CFNAME,
5130 .fw_mod_name = FW4_FNAME,
5131 .fw_hdr = {
5132 .chip = FW_HDR_CHIP_T4,
5133 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5134 .intfver_nic = FW_INTFVER(T4, NIC),
5135 .intfver_vnic = FW_INTFVER(T4, VNIC),
5136 .intfver_ri = FW_INTFVER(T4, RI),
5137 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5138 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5139 },
5140 }, {
5141 .chip = CHELSIO_T5,
5142 .fs_name = FW5_CFNAME,
5143 .fw_mod_name = FW5_FNAME,
5144 .fw_hdr = {
5145 .chip = FW_HDR_CHIP_T5,
5146 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5147 .intfver_nic = FW_INTFVER(T5, NIC),
5148 .intfver_vnic = FW_INTFVER(T5, VNIC),
5149 .intfver_ri = FW_INTFVER(T5, RI),
5150 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5151 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5152 },
5153 }
5154};
5155
5156static struct fw_info *find_fw_info(int chip)
5157{
5158 int i;
5159
5160 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5161 if (fw_info_array[i].chip == chip)
5162 return &fw_info_array[i];
5163 }
5164 return NULL;
5165}
5166
b8ff05a9
DM
5167/*
5168 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5169 */
5170static int adap_init0(struct adapter *adap)
5171{
5172 int ret;
5173 u32 v, port_vec;
5174 enum dev_state state;
5175 u32 params[7], val[7];
9a4da2cd 5176 struct fw_caps_config_cmd caps_cmd;
dcf7b6f5 5177 int reset = 1;
b8ff05a9 5178
636f9d37
VP
5179 /*
5180 * Contact FW, advertising Master capability (and potentially forcing
5181 * ourselves as the Master PF if our module parameter force_init is
5182 * set).
5183 */
5184 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5185 force_init ? MASTER_MUST : MASTER_MAY,
5186 &state);
b8ff05a9
DM
5187 if (ret < 0) {
5188 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5189 ret);
5190 return ret;
5191 }
636f9d37
VP
5192 if (ret == adap->mbox)
5193 adap->flags |= MASTER_PF;
5194 if (force_init && state == DEV_STATE_INIT)
5195 state = DEV_STATE_UNINIT;
b8ff05a9 5196
636f9d37
VP
5197 /*
5198 * If we're the Master PF Driver and the device is uninitialized,
5199 * then let's consider upgrading the firmware ... (We always want
5200 * to check the firmware version number in order to A. get it for
5201 * later reporting and B. to warn if the currently loaded firmware
5202 * is excessively mismatched relative to the driver.)
5203 */
16e47624
HS
5204 t4_get_fw_version(adap, &adap->params.fw_vers);
5205 t4_get_tp_version(adap, &adap->params.tp_vers);
636f9d37 5206 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
16e47624
HS
5207 struct fw_info *fw_info;
5208 struct fw_hdr *card_fw;
5209 const struct firmware *fw;
5210 const u8 *fw_data = NULL;
5211 unsigned int fw_size = 0;
5212
5213 /* This is the firmware whose headers the driver was compiled
5214 * against
5215 */
5216 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5217 if (fw_info == NULL) {
5218 dev_err(adap->pdev_dev,
5219 "unable to get firmware info for chip %d.\n",
5220 CHELSIO_CHIP_VERSION(adap->params.chip));
5221 return -EINVAL;
636f9d37 5222 }
16e47624
HS
5223
5224 /* allocate memory to read the header of the firmware on the
5225 * card
5226 */
5227 card_fw = t4_alloc_mem(sizeof(*card_fw));
5228
5229 /* Get FW from from /lib/firmware/ */
5230 ret = request_firmware(&fw, fw_info->fw_mod_name,
5231 adap->pdev_dev);
5232 if (ret < 0) {
5233 dev_err(adap->pdev_dev,
5234 "unable to load firmware image %s, error %d\n",
5235 fw_info->fw_mod_name, ret);
5236 } else {
5237 fw_data = fw->data;
5238 fw_size = fw->size;
5239 }
5240
5241 /* upgrade FW logic */
5242 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5243 state, &reset);
5244
5245 /* Cleaning up */
5246 if (fw != NULL)
5247 release_firmware(fw);
5248 t4_free_mem(card_fw);
5249
636f9d37 5250 if (ret < 0)
16e47624 5251 goto bye;
636f9d37 5252 }
b8ff05a9 5253
636f9d37
VP
5254 /*
5255 * Grab VPD parameters. This should be done after we establish a
5256 * connection to the firmware since some of the VPD parameters
5257 * (notably the Core Clock frequency) are retrieved via requests to
5258 * the firmware. On the other hand, we need these fairly early on
5259 * so we do this right after getting ahold of the firmware.
5260 */
5261 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
5262 if (ret < 0)
5263 goto bye;
a0881cab 5264
636f9d37 5265 /*
13ee15d3
VP
5266 * Find out what ports are available to us. Note that we need to do
5267 * this before calling adap_init0_no_config() since it needs nports
5268 * and portvec ...
636f9d37
VP
5269 */
5270 v =
5271 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5272 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5273 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
a0881cab
DM
5274 if (ret < 0)
5275 goto bye;
5276
636f9d37
VP
5277 adap->params.nports = hweight32(port_vec);
5278 adap->params.portvec = port_vec;
5279
5280 /*
5281 * If the firmware is initialized already (and we're not forcing a
5282 * master initialization), note that we're living with existing
5283 * adapter parameters. Otherwise, it's time to try initializing the
5284 * adapter ...
5285 */
5286 if (state == DEV_STATE_INIT) {
5287 dev_info(adap->pdev_dev, "Coming up as %s: "\
5288 "Adapter already initialized\n",
5289 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5290 adap->flags |= USING_SOFT_PARAMS;
5291 } else {
5292 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5293 "Initializing adapter\n");
636f9d37
VP
5294
5295 /*
5296 * If the firmware doesn't support Configuration
5297 * Files warn user and exit,
5298 */
5299 if (ret < 0)
13ee15d3 5300 dev_warn(adap->pdev_dev, "Firmware doesn't support "
636f9d37 5301 "configuration file.\n");
13ee15d3
VP
5302 if (force_old_init)
5303 ret = adap_init0_no_config(adap, reset);
636f9d37
VP
5304 else {
5305 /*
13ee15d3
VP
5306 * Find out whether we're dealing with a version of
5307 * the firmware which has configuration file support.
636f9d37 5308 */
13ee15d3
VP
5309 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5310 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5311 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5312 params, val);
636f9d37 5313
13ee15d3
VP
5314 /*
5315 * If the firmware doesn't support Configuration
5316 * Files, use the old Driver-based, hard-wired
5317 * initialization. Otherwise, try using the
5318 * Configuration File support and fall back to the
5319 * Driver-based initialization if there's no
5320 * Configuration File found.
5321 */
5322 if (ret < 0)
5323 ret = adap_init0_no_config(adap, reset);
5324 else {
5325 /*
5326 * The firmware provides us with a memory
5327 * buffer where we can load a Configuration
5328 * File from the host if we want to override
5329 * the Configuration File in flash.
5330 */
5331
5332 ret = adap_init0_config(adap, reset);
5333 if (ret == -ENOENT) {
5334 dev_info(adap->pdev_dev,
5335 "No Configuration File present "
16e47624 5336 "on adapter. Using hard-wired "
13ee15d3
VP
5337 "configuration parameters.\n");
5338 ret = adap_init0_no_config(adap, reset);
5339 }
636f9d37
VP
5340 }
5341 }
5342 if (ret < 0) {
5343 dev_err(adap->pdev_dev,
5344 "could not initialize adapter, error %d\n",
5345 -ret);
5346 goto bye;
5347 }
5348 }
5349
5350 /*
5351 * If we're living with non-hard-coded parameters (either from a
5352 * Firmware Configuration File or values programmed by a different PF
5353 * Driver), give the SGE code a chance to pull in anything that it
5354 * needs ... Note that this must be called after we retrieve our VPD
5355 * parameters in order to know how to convert core ticks to seconds.
5356 */
5357 if (adap->flags & USING_SOFT_PARAMS) {
5358 ret = t4_sge_init(adap);
5359 if (ret < 0)
5360 goto bye;
5361 }
5362
9a4da2cd
VP
5363 if (is_bypass_device(adap->pdev->device))
5364 adap->params.bypass = 1;
5365
636f9d37
VP
5366 /*
5367 * Grab some of our basic fundamental operating parameters.
5368 */
5369#define FW_PARAM_DEV(param) \
5370 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5371 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5372
b8ff05a9 5373#define FW_PARAM_PFVF(param) \
636f9d37
VP
5374 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5375 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5376 FW_PARAMS_PARAM_Y(0) | \
5377 FW_PARAMS_PARAM_Z(0)
b8ff05a9 5378
636f9d37 5379 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
5380 params[1] = FW_PARAM_PFVF(L2T_START);
5381 params[2] = FW_PARAM_PFVF(L2T_END);
5382 params[3] = FW_PARAM_PFVF(FILTER_START);
5383 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 5384 params[5] = FW_PARAM_PFVF(IQFLINT_START);
636f9d37 5385 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
b8ff05a9
DM
5386 if (ret < 0)
5387 goto bye;
636f9d37
VP
5388 adap->sge.egr_start = val[0];
5389 adap->l2t_start = val[1];
5390 adap->l2t_end = val[2];
b8ff05a9
DM
5391 adap->tids.ftid_base = val[3];
5392 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 5393 adap->sge.ingr_start = val[5];
b8ff05a9 5394
636f9d37
VP
5395 /* query params related to active filter region */
5396 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5397 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5398 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5399 /* If Active filter size is set we enable establishing
5400 * offload connection through firmware work request
5401 */
5402 if ((val[0] != val[1]) && (ret >= 0)) {
5403 adap->flags |= FW_OFLD_CONN;
5404 adap->tids.aftid_base = val[0];
5405 adap->tids.aftid_end = val[1];
5406 }
5407
b407a4a9
VP
5408 /* If we're running on newer firmware, let it know that we're
5409 * prepared to deal with encapsulated CPL messages. Older
5410 * firmware won't understand this and we'll just get
5411 * unencapsulated messages ...
5412 */
5413 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5414 val[0] = 1;
5415 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5416
1ac0f095
KS
5417 /*
5418 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5419 * capability. Earlier versions of the firmware didn't have the
5420 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5421 * permission to use ULPTX MEMWRITE DSGL.
5422 */
5423 if (is_t4(adap->params.chip)) {
5424 adap->params.ulptx_memwrite_dsgl = false;
5425 } else {
5426 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5427 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5428 1, params, val);
5429 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5430 }
5431
636f9d37
VP
5432 /*
5433 * Get device capabilities so we can determine what resources we need
5434 * to manage.
5435 */
5436 memset(&caps_cmd, 0, sizeof(caps_cmd));
9a4da2cd 5437 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
13ee15d3 5438 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 5439 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
5440 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5441 &caps_cmd);
5442 if (ret < 0)
5443 goto bye;
5444
13ee15d3 5445 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
5446 /* query offload-related parameters */
5447 params[0] = FW_PARAM_DEV(NTID);
5448 params[1] = FW_PARAM_PFVF(SERVER_START);
5449 params[2] = FW_PARAM_PFVF(SERVER_END);
5450 params[3] = FW_PARAM_PFVF(TDDP_START);
5451 params[4] = FW_PARAM_PFVF(TDDP_END);
5452 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
636f9d37
VP
5453 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5454 params, val);
b8ff05a9
DM
5455 if (ret < 0)
5456 goto bye;
5457 adap->tids.ntids = val[0];
5458 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5459 adap->tids.stid_base = val[1];
5460 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37
VP
5461 /*
5462 * Setup server filter region. Divide the availble filter
5463 * region into two parts. Regular filters get 1/3rd and server
5464 * filters get 2/3rd part. This is only enabled if workarond
5465 * path is enabled.
5466 * 1. For regular filters.
5467 * 2. Server filter: This are special filters which are used
5468 * to redirect SYN packets to offload queue.
5469 */
5470 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5471 adap->tids.sftid_base = adap->tids.ftid_base +
5472 DIV_ROUND_UP(adap->tids.nftids, 3);
5473 adap->tids.nsftids = adap->tids.nftids -
5474 DIV_ROUND_UP(adap->tids.nftids, 3);
5475 adap->tids.nftids = adap->tids.sftid_base -
5476 adap->tids.ftid_base;
5477 }
b8ff05a9
DM
5478 adap->vres.ddp.start = val[3];
5479 adap->vres.ddp.size = val[4] - val[3] + 1;
5480 adap->params.ofldq_wr_cred = val[5];
636f9d37 5481
b8ff05a9
DM
5482 adap->params.offload = 1;
5483 }
636f9d37 5484 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
5485 params[0] = FW_PARAM_PFVF(STAG_START);
5486 params[1] = FW_PARAM_PFVF(STAG_END);
5487 params[2] = FW_PARAM_PFVF(RQ_START);
5488 params[3] = FW_PARAM_PFVF(RQ_END);
5489 params[4] = FW_PARAM_PFVF(PBL_START);
5490 params[5] = FW_PARAM_PFVF(PBL_END);
636f9d37
VP
5491 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5492 params, val);
b8ff05a9
DM
5493 if (ret < 0)
5494 goto bye;
5495 adap->vres.stag.start = val[0];
5496 adap->vres.stag.size = val[1] - val[0] + 1;
5497 adap->vres.rq.start = val[2];
5498 adap->vres.rq.size = val[3] - val[2] + 1;
5499 adap->vres.pbl.start = val[4];
5500 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
5501
5502 params[0] = FW_PARAM_PFVF(SQRQ_START);
5503 params[1] = FW_PARAM_PFVF(SQRQ_END);
5504 params[2] = FW_PARAM_PFVF(CQ_START);
5505 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
5506 params[4] = FW_PARAM_PFVF(OCQ_START);
5507 params[5] = FW_PARAM_PFVF(OCQ_END);
636f9d37 5508 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
a0881cab
DM
5509 if (ret < 0)
5510 goto bye;
5511 adap->vres.qp.start = val[0];
5512 adap->vres.qp.size = val[1] - val[0] + 1;
5513 adap->vres.cq.start = val[2];
5514 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
5515 adap->vres.ocq.start = val[4];
5516 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9 5517 }
636f9d37 5518 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
5519 params[0] = FW_PARAM_PFVF(ISCSI_START);
5520 params[1] = FW_PARAM_PFVF(ISCSI_END);
636f9d37
VP
5521 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5522 params, val);
b8ff05a9
DM
5523 if (ret < 0)
5524 goto bye;
5525 adap->vres.iscsi.start = val[0];
5526 adap->vres.iscsi.size = val[1] - val[0] + 1;
5527 }
5528#undef FW_PARAM_PFVF
5529#undef FW_PARAM_DEV
5530
636f9d37
VP
5531 /*
5532 * These are finalized by FW initialization, load their values now.
5533 */
b8ff05a9
DM
5534 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5535 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5536 adap->params.b_wnd);
7ee9ff94 5537
dcf7b6f5 5538 t4_init_tp_params(adap);
636f9d37 5539 adap->flags |= FW_OK;
b8ff05a9
DM
5540 return 0;
5541
5542 /*
636f9d37
VP
5543 * Something bad happened. If a command timed out or failed with EIO
5544 * FW does not operate within its spec or something catastrophic
5545 * happened to HW/FW, stop issuing commands.
b8ff05a9 5546 */
636f9d37
VP
5547bye:
5548 if (ret != -ETIMEDOUT && ret != -EIO)
5549 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
5550 return ret;
5551}
5552
204dc3c0
DM
5553/* EEH callbacks */
5554
5555static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5556 pci_channel_state_t state)
5557{
5558 int i;
5559 struct adapter *adap = pci_get_drvdata(pdev);
5560
5561 if (!adap)
5562 goto out;
5563
5564 rtnl_lock();
5565 adap->flags &= ~FW_OK;
5566 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
9fe6cb58 5567 spin_lock(&adap->stats_lock);
204dc3c0
DM
5568 for_each_port(adap, i) {
5569 struct net_device *dev = adap->port[i];
5570
5571 netif_device_detach(dev);
5572 netif_carrier_off(dev);
5573 }
9fe6cb58 5574 spin_unlock(&adap->stats_lock);
204dc3c0
DM
5575 if (adap->flags & FULL_INIT_DONE)
5576 cxgb_down(adap);
5577 rtnl_unlock();
144be3d9
GS
5578 if ((adap->flags & DEV_ENABLED)) {
5579 pci_disable_device(pdev);
5580 adap->flags &= ~DEV_ENABLED;
5581 }
204dc3c0
DM
5582out: return state == pci_channel_io_perm_failure ?
5583 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5584}
5585
5586static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5587{
5588 int i, ret;
5589 struct fw_caps_config_cmd c;
5590 struct adapter *adap = pci_get_drvdata(pdev);
5591
5592 if (!adap) {
5593 pci_restore_state(pdev);
5594 pci_save_state(pdev);
5595 return PCI_ERS_RESULT_RECOVERED;
5596 }
5597
144be3d9
GS
5598 if (!(adap->flags & DEV_ENABLED)) {
5599 if (pci_enable_device(pdev)) {
5600 dev_err(&pdev->dev, "Cannot reenable PCI "
5601 "device after reset\n");
5602 return PCI_ERS_RESULT_DISCONNECT;
5603 }
5604 adap->flags |= DEV_ENABLED;
204dc3c0
DM
5605 }
5606
5607 pci_set_master(pdev);
5608 pci_restore_state(pdev);
5609 pci_save_state(pdev);
5610 pci_cleanup_aer_uncorrect_error_status(pdev);
5611
5612 if (t4_wait_dev_ready(adap) < 0)
5613 return PCI_ERS_RESULT_DISCONNECT;
777c2300 5614 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
204dc3c0
DM
5615 return PCI_ERS_RESULT_DISCONNECT;
5616 adap->flags |= FW_OK;
5617 if (adap_init1(adap, &c))
5618 return PCI_ERS_RESULT_DISCONNECT;
5619
5620 for_each_port(adap, i) {
5621 struct port_info *p = adap2pinfo(adap, i);
5622
060e0c75
DM
5623 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5624 NULL, NULL);
204dc3c0
DM
5625 if (ret < 0)
5626 return PCI_ERS_RESULT_DISCONNECT;
5627 p->viid = ret;
5628 p->xact_addr_filt = -1;
5629 }
5630
5631 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5632 adap->params.b_wnd);
1ae970e0 5633 setup_memwin(adap);
204dc3c0
DM
5634 if (cxgb_up(adap))
5635 return PCI_ERS_RESULT_DISCONNECT;
5636 return PCI_ERS_RESULT_RECOVERED;
5637}
5638
5639static void eeh_resume(struct pci_dev *pdev)
5640{
5641 int i;
5642 struct adapter *adap = pci_get_drvdata(pdev);
5643
5644 if (!adap)
5645 return;
5646
5647 rtnl_lock();
5648 for_each_port(adap, i) {
5649 struct net_device *dev = adap->port[i];
5650
5651 if (netif_running(dev)) {
5652 link_start(dev);
5653 cxgb_set_rxmode(dev);
5654 }
5655 netif_device_attach(dev);
5656 }
5657 rtnl_unlock();
5658}
5659
3646f0e5 5660static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
5661 .error_detected = eeh_err_detected,
5662 .slot_reset = eeh_slot_reset,
5663 .resume = eeh_resume,
5664};
5665
57d8b764 5666static inline bool is_x_10g_port(const struct link_config *lc)
b8ff05a9 5667{
57d8b764
KS
5668 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5669 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
b8ff05a9
DM
5670}
5671
5672static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5673 unsigned int size, unsigned int iqe_size)
5674{
5675 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5676 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5677 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5678 q->iqe_len = iqe_size;
5679 q->size = size;
5680}
5681
5682/*
5683 * Perform default configuration of DMA queues depending on the number and type
5684 * of ports we found and the number of available CPUs. Most settings can be
5685 * modified by the admin prior to actual use.
5686 */
91744948 5687static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
5688{
5689 struct sge *s = &adap->sge;
5690 int i, q10g = 0, n10g = 0, qidx = 0;
5691
5692 for_each_port(adap, i)
57d8b764 5693 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
b8ff05a9
DM
5694
5695 /*
5696 * We default to 1 queue per non-10G port and up to # of cores queues
5697 * per 10G port.
5698 */
5699 if (n10g)
5700 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
5701 if (q10g > netif_get_num_default_rss_queues())
5702 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
5703
5704 for_each_port(adap, i) {
5705 struct port_info *pi = adap2pinfo(adap, i);
5706
5707 pi->first_qset = qidx;
57d8b764 5708 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
b8ff05a9
DM
5709 qidx += pi->nqsets;
5710 }
5711
5712 s->ethqsets = qidx;
5713 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5714
5715 if (is_offload(adap)) {
5716 /*
5717 * For offload we use 1 queue/channel if all ports are up to 1G,
5718 * otherwise we divide all available queues amongst the channels
5719 * capped by the number of available cores.
5720 */
5721 if (n10g) {
5722 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5723 num_online_cpus());
5724 s->ofldqsets = roundup(i, adap->params.nports);
5725 } else
5726 s->ofldqsets = adap->params.nports;
5727 /* For RDMA one Rx queue per channel suffices */
5728 s->rdmaqs = adap->params.nports;
5729 }
5730
5731 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5732 struct sge_eth_rxq *r = &s->ethrxq[i];
5733
5734 init_rspq(&r->rspq, 0, 0, 1024, 64);
5735 r->fl.size = 72;
5736 }
5737
5738 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5739 s->ethtxq[i].q.size = 1024;
5740
5741 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5742 s->ctrlq[i].q.size = 512;
5743
5744 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5745 s->ofldtxq[i].q.size = 1024;
5746
5747 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5748 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5749
5750 init_rspq(&r->rspq, 0, 0, 1024, 64);
5751 r->rspq.uld = CXGB4_ULD_ISCSI;
5752 r->fl.size = 72;
5753 }
5754
5755 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5756 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5757
5758 init_rspq(&r->rspq, 0, 0, 511, 64);
5759 r->rspq.uld = CXGB4_ULD_RDMA;
5760 r->fl.size = 72;
5761 }
5762
5763 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5764 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5765}
5766
5767/*
5768 * Reduce the number of Ethernet queues across all ports to at most n.
5769 * n provides at least one queue per port.
5770 */
91744948 5771static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
5772{
5773 int i;
5774 struct port_info *pi;
5775
5776 while (n < adap->sge.ethqsets)
5777 for_each_port(adap, i) {
5778 pi = adap2pinfo(adap, i);
5779 if (pi->nqsets > 1) {
5780 pi->nqsets--;
5781 adap->sge.ethqsets--;
5782 if (adap->sge.ethqsets <= n)
5783 break;
5784 }
5785 }
5786
5787 n = 0;
5788 for_each_port(adap, i) {
5789 pi = adap2pinfo(adap, i);
5790 pi->first_qset = n;
5791 n += pi->nqsets;
5792 }
5793}
5794
5795/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5796#define EXTRA_VECS 2
5797
91744948 5798static int enable_msix(struct adapter *adap)
b8ff05a9
DM
5799{
5800 int ofld_need = 0;
c32ad224 5801 int i, want, need;
b8ff05a9
DM
5802 struct sge *s = &adap->sge;
5803 unsigned int nchan = adap->params.nports;
5804 struct msix_entry entries[MAX_INGQ + 1];
5805
5806 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5807 entries[i].entry = i;
5808
5809 want = s->max_ethqsets + EXTRA_VECS;
5810 if (is_offload(adap)) {
5811 want += s->rdmaqs + s->ofldqsets;
5812 /* need nchan for each possible ULD */
5813 ofld_need = 2 * nchan;
5814 }
5815 need = adap->params.nports + EXTRA_VECS + ofld_need;
5816
c32ad224
AG
5817 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5818 if (want < 0)
5819 return want;
b8ff05a9 5820
c32ad224
AG
5821 /*
5822 * Distribute available vectors to the various queue groups.
5823 * Every group gets its minimum requirement and NIC gets top
5824 * priority for leftovers.
5825 */
5826 i = want - EXTRA_VECS - ofld_need;
5827 if (i < s->max_ethqsets) {
5828 s->max_ethqsets = i;
5829 if (i < s->ethqsets)
5830 reduce_ethqs(adap, i);
5831 }
5832 if (is_offload(adap)) {
5833 i = want - EXTRA_VECS - s->max_ethqsets;
5834 i -= ofld_need - nchan;
5835 s->ofldqsets = (i / nchan) * nchan; /* round down */
5836 }
5837 for (i = 0; i < want; ++i)
5838 adap->msix_info[i].vec = entries[i].vector;
5839
5840 return 0;
b8ff05a9
DM
5841}
5842
5843#undef EXTRA_VECS
5844
91744948 5845static int init_rss(struct adapter *adap)
671b0060
DM
5846{
5847 unsigned int i, j;
5848
5849 for_each_port(adap, i) {
5850 struct port_info *pi = adap2pinfo(adap, i);
5851
5852 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5853 if (!pi->rss)
5854 return -ENOMEM;
5855 for (j = 0; j < pi->rss_size; j++)
278bc429 5856 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
5857 }
5858 return 0;
5859}
5860
91744948 5861static void print_port_info(const struct net_device *dev)
b8ff05a9 5862{
b8ff05a9 5863 char buf[80];
118969ed 5864 char *bufp = buf;
f1a051b9 5865 const char *spd = "";
118969ed
DM
5866 const struct port_info *pi = netdev_priv(dev);
5867 const struct adapter *adap = pi->adapter;
f1a051b9
DM
5868
5869 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5870 spd = " 2.5 GT/s";
5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5872 spd = " 5 GT/s";
b8ff05a9 5873
118969ed
DM
5874 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5875 bufp += sprintf(bufp, "100/");
5876 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5877 bufp += sprintf(bufp, "1000/");
5878 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5879 bufp += sprintf(bufp, "10G/");
72aca4bf
KS
5880 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5881 bufp += sprintf(bufp, "40G/");
118969ed
DM
5882 if (bufp != buf)
5883 --bufp;
72aca4bf 5884 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
118969ed
DM
5885
5886 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
0a57a536 5887 adap->params.vpd.id,
d14807dd 5888 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
118969ed
DM
5889 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5890 (adap->flags & USING_MSIX) ? " MSI-X" :
5891 (adap->flags & USING_MSI) ? " MSI" : "");
a94cd705
KS
5892 netdev_info(dev, "S/N: %s, P/N: %s\n",
5893 adap->params.vpd.sn, adap->params.vpd.pn);
b8ff05a9
DM
5894}
5895
91744948 5896static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 5897{
e5c8ae5f 5898 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
5899}
5900
06546391
DM
5901/*
5902 * Free the following resources:
5903 * - memory used for tables
5904 * - MSI/MSI-X
5905 * - net devices
5906 * - resources FW is holding for us
5907 */
5908static void free_some_resources(struct adapter *adapter)
5909{
5910 unsigned int i;
5911
5912 t4_free_mem(adapter->l2t);
5913 t4_free_mem(adapter->tids.tid_tab);
5914 disable_msi(adapter);
5915
5916 for_each_port(adapter, i)
671b0060
DM
5917 if (adapter->port[i]) {
5918 kfree(adap2pinfo(adapter, i)->rss);
06546391 5919 free_netdev(adapter->port[i]);
671b0060 5920 }
06546391 5921 if (adapter->flags & FW_OK)
060e0c75 5922 t4_fw_bye(adapter, adapter->fn);
06546391
DM
5923}
5924
2ed28baa 5925#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 5926#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 5927 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 5928#define SEGMENT_SIZE 128
b8ff05a9 5929
1dd06ae8 5930static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 5931{
22adfe0a 5932 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 5933 struct port_info *pi;
c8f44aff 5934 bool highdma = false;
b8ff05a9
DM
5935 struct adapter *adapter = NULL;
5936
5937 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5938
5939 err = pci_request_regions(pdev, KBUILD_MODNAME);
5940 if (err) {
5941 /* Just info, some other driver may have claimed the device. */
5942 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5943 return err;
5944 }
5945
060e0c75 5946 /* We control everything through one PF */
b8ff05a9 5947 func = PCI_FUNC(pdev->devfn);
060e0c75 5948 if (func != ent->driver_data) {
204dc3c0 5949 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 5950 goto sriov;
204dc3c0 5951 }
b8ff05a9
DM
5952
5953 err = pci_enable_device(pdev);
5954 if (err) {
5955 dev_err(&pdev->dev, "cannot enable PCI device\n");
5956 goto out_release_regions;
5957 }
5958
5959 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 5960 highdma = true;
b8ff05a9
DM
5961 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5962 if (err) {
5963 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5964 "coherent allocations\n");
5965 goto out_disable_device;
5966 }
5967 } else {
5968 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5969 if (err) {
5970 dev_err(&pdev->dev, "no usable DMA configuration\n");
5971 goto out_disable_device;
5972 }
5973 }
5974
5975 pci_enable_pcie_error_reporting(pdev);
ef306b50 5976 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
5977 pci_set_master(pdev);
5978 pci_save_state(pdev);
5979
5980 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5981 if (!adapter) {
5982 err = -ENOMEM;
5983 goto out_disable_device;
5984 }
5985
144be3d9
GS
5986 /* PCI device has been enabled */
5987 adapter->flags |= DEV_ENABLED;
5988
b8ff05a9
DM
5989 adapter->regs = pci_ioremap_bar(pdev, 0);
5990 if (!adapter->regs) {
5991 dev_err(&pdev->dev, "cannot map device registers\n");
5992 err = -ENOMEM;
5993 goto out_free_adapter;
5994 }
5995
5996 adapter->pdev = pdev;
5997 adapter->pdev_dev = &pdev->dev;
3069ee9b 5998 adapter->mbox = func;
060e0c75 5999 adapter->fn = func;
b8ff05a9
DM
6000 adapter->msg_enable = dflt_msg_enable;
6001 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6002
6003 spin_lock_init(&adapter->stats_lock);
6004 spin_lock_init(&adapter->tid_release_lock);
6005
6006 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
6007 INIT_WORK(&adapter->db_full_task, process_db_full);
6008 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
6009
6010 err = t4_prep_adapter(adapter);
6011 if (err)
22adfe0a
SR
6012 goto out_unmap_bar0;
6013
d14807dd 6014 if (!is_t4(adapter->params.chip)) {
22adfe0a
SR
6015 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6016 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6017 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6018 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6019
6020 /* Each segment size is 128B. Write coalescing is enabled only
6021 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6022 * queue is less no of segments that can be accommodated in
6023 * a page size.
6024 */
6025 if (qpp > num_seg) {
6026 dev_err(&pdev->dev,
6027 "Incorrect number of egress queues per page\n");
6028 err = -EINVAL;
6029 goto out_unmap_bar0;
6030 }
6031 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6032 pci_resource_len(pdev, 2));
6033 if (!adapter->bar2) {
6034 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6035 err = -ENOMEM;
6036 goto out_unmap_bar0;
6037 }
6038 }
6039
636f9d37 6040 setup_memwin(adapter);
b8ff05a9 6041 err = adap_init0(adapter);
636f9d37 6042 setup_memwin_rdma(adapter);
b8ff05a9
DM
6043 if (err)
6044 goto out_unmap_bar;
6045
6046 for_each_port(adapter, i) {
6047 struct net_device *netdev;
6048
6049 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6050 MAX_ETH_QSETS);
6051 if (!netdev) {
6052 err = -ENOMEM;
6053 goto out_free_dev;
6054 }
6055
6056 SET_NETDEV_DEV(netdev, &pdev->dev);
6057
6058 adapter->port[i] = netdev;
6059 pi = netdev_priv(netdev);
6060 pi->adapter = adapter;
6061 pi->xact_addr_filt = -1;
b8ff05a9 6062 pi->port_id = i;
b8ff05a9
DM
6063 netdev->irq = pdev->irq;
6064
2ed28baa
MM
6065 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6066 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6067 NETIF_F_RXCSUM | NETIF_F_RXHASH |
f646968f 6068 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
c8f44aff
MM
6069 if (highdma)
6070 netdev->hw_features |= NETIF_F_HIGHDMA;
6071 netdev->features |= netdev->hw_features;
b8ff05a9
DM
6072 netdev->vlan_features = netdev->features & VLAN_FEAT;
6073
01789349
JP
6074 netdev->priv_flags |= IFF_UNICAST_FLT;
6075
b8ff05a9
DM
6076 netdev->netdev_ops = &cxgb4_netdev_ops;
6077 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
6078 }
6079
6080 pci_set_drvdata(pdev, adapter);
6081
6082 if (adapter->flags & FW_OK) {
060e0c75 6083 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
6084 if (err)
6085 goto out_free_dev;
6086 }
6087
6088 /*
6089 * Configure queues and allocate tables now, they can be needed as
6090 * soon as the first register_netdev completes.
6091 */
6092 cfg_queues(adapter);
6093
6094 adapter->l2t = t4_init_l2t();
6095 if (!adapter->l2t) {
6096 /* We tolerate a lack of L2T, giving up some functionality */
6097 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6098 adapter->params.offload = 0;
6099 }
6100
6101 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6102 dev_warn(&pdev->dev, "could not allocate TID table, "
6103 "continuing\n");
6104 adapter->params.offload = 0;
6105 }
6106
f7cabcdd
DM
6107 /* See what interrupts we'll be using */
6108 if (msi > 1 && enable_msix(adapter) == 0)
6109 adapter->flags |= USING_MSIX;
6110 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6111 adapter->flags |= USING_MSI;
6112
671b0060
DM
6113 err = init_rss(adapter);
6114 if (err)
6115 goto out_free_dev;
6116
b8ff05a9
DM
6117 /*
6118 * The card is now ready to go. If any errors occur during device
6119 * registration we do not fail the whole card but rather proceed only
6120 * with the ports we manage to register successfully. However we must
6121 * register at least one net device.
6122 */
6123 for_each_port(adapter, i) {
a57cabe0
DM
6124 pi = adap2pinfo(adapter, i);
6125 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6126 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6127
b8ff05a9
DM
6128 err = register_netdev(adapter->port[i]);
6129 if (err)
b1a3c2b6 6130 break;
b1a3c2b6
DM
6131 adapter->chan_map[pi->tx_chan] = i;
6132 print_port_info(adapter->port[i]);
b8ff05a9 6133 }
b1a3c2b6 6134 if (i == 0) {
b8ff05a9
DM
6135 dev_err(&pdev->dev, "could not register any net devices\n");
6136 goto out_free_dev;
6137 }
b1a3c2b6
DM
6138 if (err) {
6139 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6140 err = 0;
6403eab1 6141 }
b8ff05a9
DM
6142
6143 if (cxgb4_debugfs_root) {
6144 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6145 cxgb4_debugfs_root);
6146 setup_debugfs(adapter);
6147 }
6148
6482aa7c
DLR
6149 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6150 pdev->needs_freset = 1;
6151
b8ff05a9
DM
6152 if (is_offload(adapter))
6153 attach_ulds(adapter);
6154
b8ff05a9
DM
6155sriov:
6156#ifdef CONFIG_PCI_IOV
7d6727cf 6157 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
b8ff05a9
DM
6158 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6159 dev_info(&pdev->dev,
6160 "instantiated %u virtual functions\n",
6161 num_vf[func]);
6162#endif
6163 return 0;
6164
6165 out_free_dev:
06546391 6166 free_some_resources(adapter);
b8ff05a9 6167 out_unmap_bar:
d14807dd 6168 if (!is_t4(adapter->params.chip))
22adfe0a
SR
6169 iounmap(adapter->bar2);
6170 out_unmap_bar0:
b8ff05a9
DM
6171 iounmap(adapter->regs);
6172 out_free_adapter:
6173 kfree(adapter);
6174 out_disable_device:
6175 pci_disable_pcie_error_reporting(pdev);
6176 pci_disable_device(pdev);
6177 out_release_regions:
6178 pci_release_regions(pdev);
b8ff05a9
DM
6179 return err;
6180}
6181
91744948 6182static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
6183{
6184 struct adapter *adapter = pci_get_drvdata(pdev);
6185
636f9d37 6186#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
6187 pci_disable_sriov(pdev);
6188
636f9d37
VP
6189#endif
6190
b8ff05a9
DM
6191 if (adapter) {
6192 int i;
6193
6194 if (is_offload(adapter))
6195 detach_ulds(adapter);
6196
6197 for_each_port(adapter, i)
8f3a7676 6198 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
6199 unregister_netdev(adapter->port[i]);
6200
6201 if (adapter->debugfs_root)
6202 debugfs_remove_recursive(adapter->debugfs_root);
6203
f2b7e78d
VP
6204 /* If we allocated filters, free up state associated with any
6205 * valid filters ...
6206 */
6207 if (adapter->tids.ftid_tab) {
6208 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
6209 for (i = 0; i < (adapter->tids.nftids +
6210 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
6211 if (f->valid)
6212 clear_filter(adapter, f);
6213 }
6214
aaefae9b
DM
6215 if (adapter->flags & FULL_INIT_DONE)
6216 cxgb_down(adapter);
b8ff05a9 6217
06546391 6218 free_some_resources(adapter);
b8ff05a9 6219 iounmap(adapter->regs);
d14807dd 6220 if (!is_t4(adapter->params.chip))
22adfe0a 6221 iounmap(adapter->bar2);
b8ff05a9 6222 pci_disable_pcie_error_reporting(pdev);
144be3d9
GS
6223 if ((adapter->flags & DEV_ENABLED)) {
6224 pci_disable_device(pdev);
6225 adapter->flags &= ~DEV_ENABLED;
6226 }
b8ff05a9 6227 pci_release_regions(pdev);
8b662fe7 6228 kfree(adapter);
a069ec91 6229 } else
b8ff05a9
DM
6230 pci_release_regions(pdev);
6231}
6232
6233static struct pci_driver cxgb4_driver = {
6234 .name = KBUILD_MODNAME,
6235 .id_table = cxgb4_pci_tbl,
6236 .probe = init_one,
91744948 6237 .remove = remove_one,
687d705c 6238 .shutdown = remove_one,
204dc3c0 6239 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
6240};
6241
6242static int __init cxgb4_init_module(void)
6243{
6244 int ret;
6245
3069ee9b
VP
6246 workq = create_singlethread_workqueue("cxgb4");
6247 if (!workq)
6248 return -ENOMEM;
6249
b8ff05a9
DM
6250 /* Debugfs support is optional, just warn if this fails */
6251 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6252 if (!cxgb4_debugfs_root)
428ac43f 6253 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
6254
6255 ret = pci_register_driver(&cxgb4_driver);
73a695f8 6256 if (ret < 0) {
b8ff05a9 6257 debugfs_remove(cxgb4_debugfs_root);
73a695f8
WY
6258 destroy_workqueue(workq);
6259 }
01bcca68
VP
6260
6261 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6262
b8ff05a9
DM
6263 return ret;
6264}
6265
6266static void __exit cxgb4_cleanup_module(void)
6267{
01bcca68 6268 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
b8ff05a9
DM
6269 pci_unregister_driver(&cxgb4_driver);
6270 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3069ee9b
VP
6271 flush_workqueue(workq);
6272 destroy_workqueue(workq);
b8ff05a9
DM
6273}
6274
6275module_init(cxgb4_init_module);
6276module_exit(cxgb4_cleanup_module);