]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
Merge tag 'master-2014-09-16' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
65
66 #include "cxgb4.h"
67 #include "t4_regs.h"
68 #include "t4_msg.h"
69 #include "t4fw_api.h"
70 #include "cxgb4_dcb.h"
71 #include "l2t.h"
72
73 #include <../drivers/net/bonding/bonding.h>
74
75 #ifdef DRV_VERSION
76 #undef DRV_VERSION
77 #endif
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
80
81 /*
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
84 * recover.
85 */
86 #define MAX_SGE_TIMERVAL 200U
87
88 enum {
89 /*
90 * Physical Function provisioning constants.
91 */
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 */
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103
104 #ifdef CONFIG_PCI_IOV
105 /*
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
111 * Egress Context.
112 */
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
126 #endif
127 };
128
129 /*
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
133 * controls.
134 */
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
137 {
138 unsigned int portn, portvec;
139
140 /*
141 * Give PF's access to all of the ports.
142 */
143 if (vf == 0)
144 return FW_PFVF_CMD_PMASK_MASK;
145
146 /*
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
152 */
153 if (adapter->params.nports == 0)
154 return 0;
155
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
158 for (;;) {
159 /*
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
164 */
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 if (portn == 0)
167 return pmask;
168 portn--;
169 portvec &= ~pmask;
170 }
171 /*NOTREACHED*/
172 }
173
174 enum {
175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
182 MIN_FL_ENTRIES = 16
183 };
184
185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
190 */
191 struct filter_entry {
192 /* Administrative fields for filter.
193 */
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
196
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
200
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 */
206 struct ch_filter_specification fs;
207 };
208
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214
215 static const struct pci_device_id cxgb4_pci_tbl[] = {
216 CH_DEVICE(0xa000, 0), /* PE10K */
217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1),
238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4),
248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4),
250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4),
259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4),
278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4),
280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4),
286 CH_DEVICE(0x5086, 4),
287 CH_DEVICE(0x5401, 4),
288 CH_DEVICE(0x5402, 4),
289 CH_DEVICE(0x5403, 4),
290 CH_DEVICE(0x5404, 4),
291 CH_DEVICE(0x5405, 4),
292 CH_DEVICE(0x5406, 4),
293 CH_DEVICE(0x5407, 4),
294 CH_DEVICE(0x5408, 4),
295 CH_DEVICE(0x5409, 4),
296 CH_DEVICE(0x540A, 4),
297 CH_DEVICE(0x540B, 4),
298 CH_DEVICE(0x540C, 4),
299 CH_DEVICE(0x540D, 4),
300 CH_DEVICE(0x540E, 4),
301 CH_DEVICE(0x540F, 4),
302 CH_DEVICE(0x5410, 4),
303 CH_DEVICE(0x5411, 4),
304 CH_DEVICE(0x5412, 4),
305 CH_DEVICE(0x5413, 4),
306 CH_DEVICE(0x5414, 4),
307 CH_DEVICE(0x5415, 4),
308 CH_DEVICE(0x5480, 4),
309 CH_DEVICE(0x5481, 4),
310 CH_DEVICE(0x5482, 4),
311 CH_DEVICE(0x5483, 4),
312 CH_DEVICE(0x5484, 4),
313 CH_DEVICE(0x5485, 4),
314 CH_DEVICE(0x5486, 4),
315 { 0, }
316 };
317
318 #define FW4_FNAME "cxgb4/t4fw.bin"
319 #define FW5_FNAME "cxgb4/t5fw.bin"
320 #define FW4_CFNAME "cxgb4/t4-config.txt"
321 #define FW5_CFNAME "cxgb4/t5-config.txt"
322
323 MODULE_DESCRIPTION(DRV_DESC);
324 MODULE_AUTHOR("Chelsio Communications");
325 MODULE_LICENSE("Dual BSD/GPL");
326 MODULE_VERSION(DRV_VERSION);
327 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
328 MODULE_FIRMWARE(FW4_FNAME);
329 MODULE_FIRMWARE(FW5_FNAME);
330
331 /*
332 * Normally we're willing to become the firmware's Master PF but will be happy
333 * if another PF has already become the Master and initialized the adapter.
334 * Setting "force_init" will cause this driver to forcibly establish itself as
335 * the Master PF and initialize the adapter.
336 */
337 static uint force_init;
338
339 module_param(force_init, uint, 0644);
340 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
341
342 /*
343 * Normally if the firmware we connect to has Configuration File support, we
344 * use that and only fall back to the old Driver-based initialization if the
345 * Configuration File fails for some reason. If force_old_init is set, then
346 * we'll always use the old Driver-based initialization sequence.
347 */
348 static uint force_old_init;
349
350 module_param(force_old_init, uint, 0644);
351 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
352
353 static int dflt_msg_enable = DFLT_MSG_ENABLE;
354
355 module_param(dflt_msg_enable, int, 0644);
356 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
357
358 /*
359 * The driver uses the best interrupt scheme available on a platform in the
360 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
361 * of these schemes the driver may consider as follows:
362 *
363 * msi = 2: choose from among all three options
364 * msi = 1: only consider MSI and INTx interrupts
365 * msi = 0: force INTx interrupts
366 */
367 static int msi = 2;
368
369 module_param(msi, int, 0644);
370 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
371
372 /*
373 * Queue interrupt hold-off timer values. Queues default to the first of these
374 * upon creation.
375 */
376 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
377
378 module_param_array(intr_holdoff, uint, NULL, 0644);
379 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
380 "0..4 in microseconds");
381
382 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
383
384 module_param_array(intr_cnt, uint, NULL, 0644);
385 MODULE_PARM_DESC(intr_cnt,
386 "thresholds 1..3 for queue interrupt packet counters");
387
388 /*
389 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
390 * offset by 2 bytes in order to have the IP headers line up on 4-byte
391 * boundaries. This is a requirement for many architectures which will throw
392 * a machine check fault if an attempt is made to access one of the 4-byte IP
393 * header fields on a non-4-byte boundary. And it's a major performance issue
394 * even on some architectures which allow it like some implementations of the
395 * x86 ISA. However, some architectures don't mind this and for some very
396 * edge-case performance sensitive applications (like forwarding large volumes
397 * of small packets), setting this DMA offset to 0 will decrease the number of
398 * PCI-E Bus transfers enough to measurably affect performance.
399 */
400 static int rx_dma_offset = 2;
401
402 static bool vf_acls;
403
404 #ifdef CONFIG_PCI_IOV
405 module_param(vf_acls, bool, 0644);
406 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
407
408 /* Configure the number of PCI-E Virtual Function which are to be instantiated
409 * on SR-IOV Capable Physical Functions.
410 */
411 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
412
413 module_param_array(num_vf, uint, NULL, 0644);
414 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
415 #endif
416
417 /* TX Queue select used to determine what algorithm to use for selecting TX
418 * queue. Select between the kernel provided function (select_queue=0) or user
419 * cxgb_select_queue function (select_queue=1)
420 *
421 * Default: select_queue=0
422 */
423 static int select_queue;
424 module_param(select_queue, int, 0644);
425 MODULE_PARM_DESC(select_queue,
426 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
427
428 /*
429 * The filter TCAM has a fixed portion and a variable portion. The fixed
430 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
431 * ports. The variable portion is 36 bits which can include things like Exact
432 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
433 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
434 * far exceed the 36-bit budget for this "compressed" header portion of the
435 * filter. Thus, we have a scarce resource which must be carefully managed.
436 *
437 * By default we set this up to mostly match the set of filter matching
438 * capabilities of T3 but with accommodations for some of T4's more
439 * interesting features:
440 *
441 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
442 * [Inner] VLAN (17), Port (3), FCoE (1) }
443 */
444 enum {
445 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
446 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
447 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
448 };
449
450 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
451
452 module_param(tp_vlan_pri_map, uint, 0644);
453 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
454
455 static struct dentry *cxgb4_debugfs_root;
456
457 static LIST_HEAD(adapter_list);
458 static DEFINE_MUTEX(uld_mutex);
459 /* Adapter list to be accessed from atomic context */
460 static LIST_HEAD(adap_rcu_list);
461 static DEFINE_SPINLOCK(adap_rcu_lock);
462 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
463 static const char *uld_str[] = { "RDMA", "iSCSI" };
464
465 static void link_report(struct net_device *dev)
466 {
467 if (!netif_carrier_ok(dev))
468 netdev_info(dev, "link down\n");
469 else {
470 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
471
472 const char *s = "10Mbps";
473 const struct port_info *p = netdev_priv(dev);
474
475 switch (p->link_cfg.speed) {
476 case 10000:
477 s = "10Gbps";
478 break;
479 case 1000:
480 s = "1000Mbps";
481 break;
482 case 100:
483 s = "100Mbps";
484 break;
485 case 40000:
486 s = "40Gbps";
487 break;
488 }
489
490 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
491 fc[p->link_cfg.fc]);
492 }
493 }
494
495 #ifdef CONFIG_CHELSIO_T4_DCB
496 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
497 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
498 {
499 struct port_info *pi = netdev_priv(dev);
500 struct adapter *adap = pi->adapter;
501 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
502 int i;
503
504 /* We use a simple mapping of Port TX Queue Index to DCB
505 * Priority when we're enabling DCB.
506 */
507 for (i = 0; i < pi->nqsets; i++, txq++) {
508 u32 name, value;
509 int err;
510
511 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
512 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
513 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
514 value = enable ? i : 0xffffffff;
515
516 /* Since we can be called while atomic (from "interrupt
517 * level") we need to issue the Set Parameters Commannd
518 * without sleeping (timeout < 0).
519 */
520 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
521 &name, &value);
522
523 if (err)
524 dev_err(adap->pdev_dev,
525 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
526 enable ? "set" : "unset", pi->port_id, i, -err);
527 else
528 txq->dcb_prio = value;
529 }
530 }
531 #endif /* CONFIG_CHELSIO_T4_DCB */
532
533 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
534 {
535 struct net_device *dev = adapter->port[port_id];
536
537 /* Skip changes from disabled ports. */
538 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
539 if (link_stat)
540 netif_carrier_on(dev);
541 else {
542 #ifdef CONFIG_CHELSIO_T4_DCB
543 cxgb4_dcb_state_init(dev);
544 dcb_tx_queue_prio_enable(dev, false);
545 #endif /* CONFIG_CHELSIO_T4_DCB */
546 netif_carrier_off(dev);
547 }
548
549 link_report(dev);
550 }
551 }
552
553 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
554 {
555 static const char *mod_str[] = {
556 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
557 };
558
559 const struct net_device *dev = adap->port[port_id];
560 const struct port_info *pi = netdev_priv(dev);
561
562 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
563 netdev_info(dev, "port module unplugged\n");
564 else if (pi->mod_type < ARRAY_SIZE(mod_str))
565 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
566 }
567
568 /*
569 * Configure the exact and hash address filters to handle a port's multicast
570 * and secondary unicast MAC addresses.
571 */
572 static int set_addr_filters(const struct net_device *dev, bool sleep)
573 {
574 u64 mhash = 0;
575 u64 uhash = 0;
576 bool free = true;
577 u16 filt_idx[7];
578 const u8 *addr[7];
579 int ret, naddr = 0;
580 const struct netdev_hw_addr *ha;
581 int uc_cnt = netdev_uc_count(dev);
582 int mc_cnt = netdev_mc_count(dev);
583 const struct port_info *pi = netdev_priv(dev);
584 unsigned int mb = pi->adapter->fn;
585
586 /* first do the secondary unicast addresses */
587 netdev_for_each_uc_addr(ha, dev) {
588 addr[naddr++] = ha->addr;
589 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
590 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
591 naddr, addr, filt_idx, &uhash, sleep);
592 if (ret < 0)
593 return ret;
594
595 free = false;
596 naddr = 0;
597 }
598 }
599
600 /* next set up the multicast addresses */
601 netdev_for_each_mc_addr(ha, dev) {
602 addr[naddr++] = ha->addr;
603 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
604 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
605 naddr, addr, filt_idx, &mhash, sleep);
606 if (ret < 0)
607 return ret;
608
609 free = false;
610 naddr = 0;
611 }
612 }
613
614 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
615 uhash | mhash, sleep);
616 }
617
618 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
619 module_param(dbfifo_int_thresh, int, 0644);
620 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
621
622 /*
623 * usecs to sleep while draining the dbfifo
624 */
625 static int dbfifo_drain_delay = 1000;
626 module_param(dbfifo_drain_delay, int, 0644);
627 MODULE_PARM_DESC(dbfifo_drain_delay,
628 "usecs to sleep while draining the dbfifo");
629
630 /*
631 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
632 * If @mtu is -1 it is left unchanged.
633 */
634 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
635 {
636 int ret;
637 struct port_info *pi = netdev_priv(dev);
638
639 ret = set_addr_filters(dev, sleep_ok);
640 if (ret == 0)
641 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
642 (dev->flags & IFF_PROMISC) ? 1 : 0,
643 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
644 sleep_ok);
645 return ret;
646 }
647
648 /**
649 * link_start - enable a port
650 * @dev: the port to enable
651 *
652 * Performs the MAC and PHY actions needed to enable a port.
653 */
654 static int link_start(struct net_device *dev)
655 {
656 int ret;
657 struct port_info *pi = netdev_priv(dev);
658 unsigned int mb = pi->adapter->fn;
659
660 /*
661 * We do not set address filters and promiscuity here, the stack does
662 * that step explicitly.
663 */
664 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
665 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
666 if (ret == 0) {
667 ret = t4_change_mac(pi->adapter, mb, pi->viid,
668 pi->xact_addr_filt, dev->dev_addr, true,
669 true);
670 if (ret >= 0) {
671 pi->xact_addr_filt = ret;
672 ret = 0;
673 }
674 }
675 if (ret == 0)
676 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
677 &pi->link_cfg);
678 if (ret == 0) {
679 local_bh_disable();
680 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
681 true, CXGB4_DCB_ENABLED);
682 local_bh_enable();
683 }
684
685 return ret;
686 }
687
688 int cxgb4_dcb_enabled(const struct net_device *dev)
689 {
690 #ifdef CONFIG_CHELSIO_T4_DCB
691 struct port_info *pi = netdev_priv(dev);
692
693 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
694 #else
695 return 0;
696 #endif
697 }
698 EXPORT_SYMBOL(cxgb4_dcb_enabled);
699
700 #ifdef CONFIG_CHELSIO_T4_DCB
701 /* Handle a Data Center Bridging update message from the firmware. */
702 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
703 {
704 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
705 struct net_device *dev = adap->port[port];
706 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
707 int new_dcb_enabled;
708
709 cxgb4_dcb_handle_fw_update(adap, pcmd);
710 new_dcb_enabled = cxgb4_dcb_enabled(dev);
711
712 /* If the DCB has become enabled or disabled on the port then we're
713 * going to need to set up/tear down DCB Priority parameters for the
714 * TX Queues associated with the port.
715 */
716 if (new_dcb_enabled != old_dcb_enabled)
717 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
718 }
719 #endif /* CONFIG_CHELSIO_T4_DCB */
720
721 /* Clear a filter and release any of its resources that we own. This also
722 * clears the filter's "pending" status.
723 */
724 static void clear_filter(struct adapter *adap, struct filter_entry *f)
725 {
726 /* If the new or old filter have loopback rewriteing rules then we'll
727 * need to free any existing Layer Two Table (L2T) entries of the old
728 * filter rule. The firmware will handle freeing up any Source MAC
729 * Table (SMT) entries used for rewriting Source MAC Addresses in
730 * loopback rules.
731 */
732 if (f->l2t)
733 cxgb4_l2t_release(f->l2t);
734
735 /* The zeroing of the filter rule below clears the filter valid,
736 * pending, locked flags, l2t pointer, etc. so it's all we need for
737 * this operation.
738 */
739 memset(f, 0, sizeof(*f));
740 }
741
742 /* Handle a filter write/deletion reply.
743 */
744 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
745 {
746 unsigned int idx = GET_TID(rpl);
747 unsigned int nidx = idx - adap->tids.ftid_base;
748 unsigned int ret;
749 struct filter_entry *f;
750
751 if (idx >= adap->tids.ftid_base && nidx <
752 (adap->tids.nftids + adap->tids.nsftids)) {
753 idx = nidx;
754 ret = GET_TCB_COOKIE(rpl->cookie);
755 f = &adap->tids.ftid_tab[idx];
756
757 if (ret == FW_FILTER_WR_FLT_DELETED) {
758 /* Clear the filter when we get confirmation from the
759 * hardware that the filter has been deleted.
760 */
761 clear_filter(adap, f);
762 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
763 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
764 idx);
765 clear_filter(adap, f);
766 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
767 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
768 f->pending = 0; /* asynchronous setup completed */
769 f->valid = 1;
770 } else {
771 /* Something went wrong. Issue a warning about the
772 * problem and clear everything out.
773 */
774 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
775 idx, ret);
776 clear_filter(adap, f);
777 }
778 }
779 }
780
781 /* Response queue handler for the FW event queue.
782 */
783 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
784 const struct pkt_gl *gl)
785 {
786 u8 opcode = ((const struct rss_header *)rsp)->opcode;
787
788 rsp++; /* skip RSS header */
789
790 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
791 */
792 if (unlikely(opcode == CPL_FW4_MSG &&
793 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
794 rsp++;
795 opcode = ((const struct rss_header *)rsp)->opcode;
796 rsp++;
797 if (opcode != CPL_SGE_EGR_UPDATE) {
798 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
799 , opcode);
800 goto out;
801 }
802 }
803
804 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
805 const struct cpl_sge_egr_update *p = (void *)rsp;
806 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
807 struct sge_txq *txq;
808
809 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
810 txq->restarts++;
811 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
812 struct sge_eth_txq *eq;
813
814 eq = container_of(txq, struct sge_eth_txq, q);
815 netif_tx_wake_queue(eq->txq);
816 } else {
817 struct sge_ofld_txq *oq;
818
819 oq = container_of(txq, struct sge_ofld_txq, q);
820 tasklet_schedule(&oq->qresume_tsk);
821 }
822 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
823 const struct cpl_fw6_msg *p = (void *)rsp;
824
825 #ifdef CONFIG_CHELSIO_T4_DCB
826 const struct fw_port_cmd *pcmd = (const void *)p->data;
827 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
828 unsigned int action =
829 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
830
831 if (cmd == FW_PORT_CMD &&
832 action == FW_PORT_ACTION_GET_PORT_INFO) {
833 int port = FW_PORT_CMD_PORTID_GET(
834 be32_to_cpu(pcmd->op_to_portid));
835 struct net_device *dev = q->adap->port[port];
836 int state_input = ((pcmd->u.info.dcbxdis_pkd &
837 FW_PORT_CMD_DCBXDIS)
838 ? CXGB4_DCB_INPUT_FW_DISABLED
839 : CXGB4_DCB_INPUT_FW_ENABLED);
840
841 cxgb4_dcb_state_fsm(dev, state_input);
842 }
843
844 if (cmd == FW_PORT_CMD &&
845 action == FW_PORT_ACTION_L2_DCB_CFG)
846 dcb_rpl(q->adap, pcmd);
847 else
848 #endif
849 if (p->type == 0)
850 t4_handle_fw_rpl(q->adap, p->data);
851 } else if (opcode == CPL_L2T_WRITE_RPL) {
852 const struct cpl_l2t_write_rpl *p = (void *)rsp;
853
854 do_l2t_write_rpl(q->adap, p);
855 } else if (opcode == CPL_SET_TCB_RPL) {
856 const struct cpl_set_tcb_rpl *p = (void *)rsp;
857
858 filter_rpl(q->adap, p);
859 } else
860 dev_err(q->adap->pdev_dev,
861 "unexpected CPL %#x on FW event queue\n", opcode);
862 out:
863 return 0;
864 }
865
866 /**
867 * uldrx_handler - response queue handler for ULD queues
868 * @q: the response queue that received the packet
869 * @rsp: the response queue descriptor holding the offload message
870 * @gl: the gather list of packet fragments
871 *
872 * Deliver an ingress offload packet to a ULD. All processing is done by
873 * the ULD, we just maintain statistics.
874 */
875 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
876 const struct pkt_gl *gl)
877 {
878 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
879
880 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
881 */
882 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
883 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
884 rsp += 2;
885
886 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
887 rxq->stats.nomem++;
888 return -1;
889 }
890 if (gl == NULL)
891 rxq->stats.imm++;
892 else if (gl == CXGB4_MSG_AN)
893 rxq->stats.an++;
894 else
895 rxq->stats.pkts++;
896 return 0;
897 }
898
899 static void disable_msi(struct adapter *adapter)
900 {
901 if (adapter->flags & USING_MSIX) {
902 pci_disable_msix(adapter->pdev);
903 adapter->flags &= ~USING_MSIX;
904 } else if (adapter->flags & USING_MSI) {
905 pci_disable_msi(adapter->pdev);
906 adapter->flags &= ~USING_MSI;
907 }
908 }
909
910 /*
911 * Interrupt handler for non-data events used with MSI-X.
912 */
913 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
914 {
915 struct adapter *adap = cookie;
916
917 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
918 if (v & PFSW) {
919 adap->swintr = 1;
920 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
921 }
922 t4_slow_intr_handler(adap);
923 return IRQ_HANDLED;
924 }
925
926 /*
927 * Name the MSI-X interrupts.
928 */
929 static void name_msix_vecs(struct adapter *adap)
930 {
931 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
932
933 /* non-data interrupts */
934 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
935
936 /* FW events */
937 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
938 adap->port[0]->name);
939
940 /* Ethernet queues */
941 for_each_port(adap, j) {
942 struct net_device *d = adap->port[j];
943 const struct port_info *pi = netdev_priv(d);
944
945 for (i = 0; i < pi->nqsets; i++, msi_idx++)
946 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
947 d->name, i);
948 }
949
950 /* offload queues */
951 for_each_ofldrxq(&adap->sge, i)
952 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
953 adap->port[0]->name, i);
954
955 for_each_rdmarxq(&adap->sge, i)
956 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
957 adap->port[0]->name, i);
958
959 for_each_rdmaciq(&adap->sge, i)
960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
961 adap->port[0]->name, i);
962 }
963
964 static int request_msix_queue_irqs(struct adapter *adap)
965 {
966 struct sge *s = &adap->sge;
967 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
968 int msi_index = 2;
969
970 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
971 adap->msix_info[1].desc, &s->fw_evtq);
972 if (err)
973 return err;
974
975 for_each_ethrxq(s, ethqidx) {
976 err = request_irq(adap->msix_info[msi_index].vec,
977 t4_sge_intr_msix, 0,
978 adap->msix_info[msi_index].desc,
979 &s->ethrxq[ethqidx].rspq);
980 if (err)
981 goto unwind;
982 msi_index++;
983 }
984 for_each_ofldrxq(s, ofldqidx) {
985 err = request_irq(adap->msix_info[msi_index].vec,
986 t4_sge_intr_msix, 0,
987 adap->msix_info[msi_index].desc,
988 &s->ofldrxq[ofldqidx].rspq);
989 if (err)
990 goto unwind;
991 msi_index++;
992 }
993 for_each_rdmarxq(s, rdmaqidx) {
994 err = request_irq(adap->msix_info[msi_index].vec,
995 t4_sge_intr_msix, 0,
996 adap->msix_info[msi_index].desc,
997 &s->rdmarxq[rdmaqidx].rspq);
998 if (err)
999 goto unwind;
1000 msi_index++;
1001 }
1002 for_each_rdmaciq(s, rdmaciqqidx) {
1003 err = request_irq(adap->msix_info[msi_index].vec,
1004 t4_sge_intr_msix, 0,
1005 adap->msix_info[msi_index].desc,
1006 &s->rdmaciq[rdmaciqqidx].rspq);
1007 if (err)
1008 goto unwind;
1009 msi_index++;
1010 }
1011 return 0;
1012
1013 unwind:
1014 while (--rdmaciqqidx >= 0)
1015 free_irq(adap->msix_info[--msi_index].vec,
1016 &s->rdmaciq[rdmaciqqidx].rspq);
1017 while (--rdmaqidx >= 0)
1018 free_irq(adap->msix_info[--msi_index].vec,
1019 &s->rdmarxq[rdmaqidx].rspq);
1020 while (--ofldqidx >= 0)
1021 free_irq(adap->msix_info[--msi_index].vec,
1022 &s->ofldrxq[ofldqidx].rspq);
1023 while (--ethqidx >= 0)
1024 free_irq(adap->msix_info[--msi_index].vec,
1025 &s->ethrxq[ethqidx].rspq);
1026 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1027 return err;
1028 }
1029
1030 static void free_msix_queue_irqs(struct adapter *adap)
1031 {
1032 int i, msi_index = 2;
1033 struct sge *s = &adap->sge;
1034
1035 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1036 for_each_ethrxq(s, i)
1037 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1038 for_each_ofldrxq(s, i)
1039 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1040 for_each_rdmarxq(s, i)
1041 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1042 for_each_rdmaciq(s, i)
1043 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1044 }
1045
1046 /**
1047 * write_rss - write the RSS table for a given port
1048 * @pi: the port
1049 * @queues: array of queue indices for RSS
1050 *
1051 * Sets up the portion of the HW RSS table for the port's VI to distribute
1052 * packets to the Rx queues in @queues.
1053 */
1054 static int write_rss(const struct port_info *pi, const u16 *queues)
1055 {
1056 u16 *rss;
1057 int i, err;
1058 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1059
1060 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1061 if (!rss)
1062 return -ENOMEM;
1063
1064 /* map the queue indices to queue ids */
1065 for (i = 0; i < pi->rss_size; i++, queues++)
1066 rss[i] = q[*queues].rspq.abs_id;
1067
1068 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1069 pi->rss_size, rss, pi->rss_size);
1070 kfree(rss);
1071 return err;
1072 }
1073
1074 /**
1075 * setup_rss - configure RSS
1076 * @adap: the adapter
1077 *
1078 * Sets up RSS for each port.
1079 */
1080 static int setup_rss(struct adapter *adap)
1081 {
1082 int i, err;
1083
1084 for_each_port(adap, i) {
1085 const struct port_info *pi = adap2pinfo(adap, i);
1086
1087 err = write_rss(pi, pi->rss);
1088 if (err)
1089 return err;
1090 }
1091 return 0;
1092 }
1093
1094 /*
1095 * Return the channel of the ingress queue with the given qid.
1096 */
1097 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1098 {
1099 qid -= p->ingr_start;
1100 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1101 }
1102
1103 /*
1104 * Wait until all NAPI handlers are descheduled.
1105 */
1106 static void quiesce_rx(struct adapter *adap)
1107 {
1108 int i;
1109
1110 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1111 struct sge_rspq *q = adap->sge.ingr_map[i];
1112
1113 if (q && q->handler)
1114 napi_disable(&q->napi);
1115 }
1116 }
1117
1118 /*
1119 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1120 */
1121 static void enable_rx(struct adapter *adap)
1122 {
1123 int i;
1124
1125 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1126 struct sge_rspq *q = adap->sge.ingr_map[i];
1127
1128 if (!q)
1129 continue;
1130 if (q->handler)
1131 napi_enable(&q->napi);
1132 /* 0-increment GTS to start the timer and enable interrupts */
1133 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1134 SEINTARM(q->intr_params) |
1135 INGRESSQID(q->cntxt_id));
1136 }
1137 }
1138
1139 /**
1140 * setup_sge_queues - configure SGE Tx/Rx/response queues
1141 * @adap: the adapter
1142 *
1143 * Determines how many sets of SGE queues to use and initializes them.
1144 * We support multiple queue sets per port if we have MSI-X, otherwise
1145 * just one queue set per port.
1146 */
1147 static int setup_sge_queues(struct adapter *adap)
1148 {
1149 int err, msi_idx, i, j;
1150 struct sge *s = &adap->sge;
1151
1152 bitmap_zero(s->starving_fl, MAX_EGRQ);
1153 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1154
1155 if (adap->flags & USING_MSIX)
1156 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1157 else {
1158 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1159 NULL, NULL);
1160 if (err)
1161 return err;
1162 msi_idx = -((int)s->intrq.abs_id + 1);
1163 }
1164
1165 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1166 msi_idx, NULL, fwevtq_handler);
1167 if (err) {
1168 freeout: t4_free_sge_resources(adap);
1169 return err;
1170 }
1171
1172 for_each_port(adap, i) {
1173 struct net_device *dev = adap->port[i];
1174 struct port_info *pi = netdev_priv(dev);
1175 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1176 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1177
1178 for (j = 0; j < pi->nqsets; j++, q++) {
1179 if (msi_idx > 0)
1180 msi_idx++;
1181 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1182 msi_idx, &q->fl,
1183 t4_ethrx_handler);
1184 if (err)
1185 goto freeout;
1186 q->rspq.idx = j;
1187 memset(&q->stats, 0, sizeof(q->stats));
1188 }
1189 for (j = 0; j < pi->nqsets; j++, t++) {
1190 err = t4_sge_alloc_eth_txq(adap, t, dev,
1191 netdev_get_tx_queue(dev, j),
1192 s->fw_evtq.cntxt_id);
1193 if (err)
1194 goto freeout;
1195 }
1196 }
1197
1198 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1199 for_each_ofldrxq(s, i) {
1200 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1201 struct net_device *dev = adap->port[i / j];
1202
1203 if (msi_idx > 0)
1204 msi_idx++;
1205 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1206 q->fl.size ? &q->fl : NULL,
1207 uldrx_handler);
1208 if (err)
1209 goto freeout;
1210 memset(&q->stats, 0, sizeof(q->stats));
1211 s->ofld_rxq[i] = q->rspq.abs_id;
1212 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1213 s->fw_evtq.cntxt_id);
1214 if (err)
1215 goto freeout;
1216 }
1217
1218 for_each_rdmarxq(s, i) {
1219 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1220
1221 if (msi_idx > 0)
1222 msi_idx++;
1223 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1224 msi_idx, q->fl.size ? &q->fl : NULL,
1225 uldrx_handler);
1226 if (err)
1227 goto freeout;
1228 memset(&q->stats, 0, sizeof(q->stats));
1229 s->rdma_rxq[i] = q->rspq.abs_id;
1230 }
1231
1232 for_each_rdmaciq(s, i) {
1233 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1234
1235 if (msi_idx > 0)
1236 msi_idx++;
1237 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1238 msi_idx, q->fl.size ? &q->fl : NULL,
1239 uldrx_handler);
1240 if (err)
1241 goto freeout;
1242 memset(&q->stats, 0, sizeof(q->stats));
1243 s->rdma_ciq[i] = q->rspq.abs_id;
1244 }
1245
1246 for_each_port(adap, i) {
1247 /*
1248 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1249 * have RDMA queues, and that's the right value.
1250 */
1251 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1252 s->fw_evtq.cntxt_id,
1253 s->rdmarxq[i].rspq.cntxt_id);
1254 if (err)
1255 goto freeout;
1256 }
1257
1258 t4_write_reg(adap, is_t4(adap->params.chip) ?
1259 MPS_TRC_RSS_CONTROL :
1260 MPS_T5_TRC_RSS_CONTROL,
1261 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1262 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1263 return 0;
1264 }
1265
1266 /*
1267 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1268 * The allocated memory is cleared.
1269 */
1270 void *t4_alloc_mem(size_t size)
1271 {
1272 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1273
1274 if (!p)
1275 p = vzalloc(size);
1276 return p;
1277 }
1278
1279 /*
1280 * Free memory allocated through alloc_mem().
1281 */
1282 static void t4_free_mem(void *addr)
1283 {
1284 if (is_vmalloc_addr(addr))
1285 vfree(addr);
1286 else
1287 kfree(addr);
1288 }
1289
1290 /* Send a Work Request to write the filter at a specified index. We construct
1291 * a Firmware Filter Work Request to have the work done and put the indicated
1292 * filter into "pending" mode which will prevent any further actions against
1293 * it till we get a reply from the firmware on the completion status of the
1294 * request.
1295 */
1296 static int set_filter_wr(struct adapter *adapter, int fidx)
1297 {
1298 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1299 struct sk_buff *skb;
1300 struct fw_filter_wr *fwr;
1301 unsigned int ftid;
1302
1303 /* If the new filter requires loopback Destination MAC and/or VLAN
1304 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1305 * the filter.
1306 */
1307 if (f->fs.newdmac || f->fs.newvlan) {
1308 /* allocate L2T entry for new filter */
1309 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1310 if (f->l2t == NULL)
1311 return -EAGAIN;
1312 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1313 f->fs.eport, f->fs.dmac)) {
1314 cxgb4_l2t_release(f->l2t);
1315 f->l2t = NULL;
1316 return -ENOMEM;
1317 }
1318 }
1319
1320 ftid = adapter->tids.ftid_base + fidx;
1321
1322 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1323 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1324 memset(fwr, 0, sizeof(*fwr));
1325
1326 /* It would be nice to put most of the following in t4_hw.c but most
1327 * of the work is translating the cxgbtool ch_filter_specification
1328 * into the Work Request and the definition of that structure is
1329 * currently in cxgbtool.h which isn't appropriate to pull into the
1330 * common code. We may eventually try to come up with a more neutral
1331 * filter specification structure but for now it's easiest to simply
1332 * put this fairly direct code in line ...
1333 */
1334 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1335 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1336 fwr->tid_to_iq =
1337 htonl(V_FW_FILTER_WR_TID(ftid) |
1338 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1339 V_FW_FILTER_WR_NOREPLY(0) |
1340 V_FW_FILTER_WR_IQ(f->fs.iq));
1341 fwr->del_filter_to_l2tix =
1342 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1343 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1344 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1345 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1346 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1347 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1348 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1349 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1350 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1351 f->fs.newvlan == VLAN_REWRITE) |
1352 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1353 f->fs.newvlan == VLAN_REWRITE) |
1354 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1355 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1356 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1357 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1358 fwr->ethtype = htons(f->fs.val.ethtype);
1359 fwr->ethtypem = htons(f->fs.mask.ethtype);
1360 fwr->frag_to_ovlan_vldm =
1361 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1362 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1363 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1364 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1365 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1366 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1367 fwr->smac_sel = 0;
1368 fwr->rx_chan_rx_rpl_iq =
1369 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1370 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1371 fwr->maci_to_matchtypem =
1372 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1373 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1374 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1375 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1376 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1377 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1378 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1379 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1380 fwr->ptcl = f->fs.val.proto;
1381 fwr->ptclm = f->fs.mask.proto;
1382 fwr->ttyp = f->fs.val.tos;
1383 fwr->ttypm = f->fs.mask.tos;
1384 fwr->ivlan = htons(f->fs.val.ivlan);
1385 fwr->ivlanm = htons(f->fs.mask.ivlan);
1386 fwr->ovlan = htons(f->fs.val.ovlan);
1387 fwr->ovlanm = htons(f->fs.mask.ovlan);
1388 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1389 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1390 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1391 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1392 fwr->lp = htons(f->fs.val.lport);
1393 fwr->lpm = htons(f->fs.mask.lport);
1394 fwr->fp = htons(f->fs.val.fport);
1395 fwr->fpm = htons(f->fs.mask.fport);
1396 if (f->fs.newsmac)
1397 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1398
1399 /* Mark the filter as "pending" and ship off the Filter Work Request.
1400 * When we get the Work Request Reply we'll clear the pending status.
1401 */
1402 f->pending = 1;
1403 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1404 t4_ofld_send(adapter, skb);
1405 return 0;
1406 }
1407
1408 /* Delete the filter at a specified index.
1409 */
1410 static int del_filter_wr(struct adapter *adapter, int fidx)
1411 {
1412 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1413 struct sk_buff *skb;
1414 struct fw_filter_wr *fwr;
1415 unsigned int len, ftid;
1416
1417 len = sizeof(*fwr);
1418 ftid = adapter->tids.ftid_base + fidx;
1419
1420 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1421 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1422 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1423
1424 /* Mark the filter as "pending" and ship off the Filter Work Request.
1425 * When we get the Work Request Reply we'll clear the pending status.
1426 */
1427 f->pending = 1;
1428 t4_mgmt_tx(adapter, skb);
1429 return 0;
1430 }
1431
1432 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1433 void *accel_priv, select_queue_fallback_t fallback)
1434 {
1435 int txq;
1436
1437 #ifdef CONFIG_CHELSIO_T4_DCB
1438 /* If a Data Center Bridging has been successfully negotiated on this
1439 * link then we'll use the skb's priority to map it to a TX Queue.
1440 * The skb's priority is determined via the VLAN Tag Priority Code
1441 * Point field.
1442 */
1443 if (cxgb4_dcb_enabled(dev)) {
1444 u16 vlan_tci;
1445 int err;
1446
1447 err = vlan_get_tag(skb, &vlan_tci);
1448 if (unlikely(err)) {
1449 if (net_ratelimit())
1450 netdev_warn(dev,
1451 "TX Packet without VLAN Tag on DCB Link\n");
1452 txq = 0;
1453 } else {
1454 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1455 }
1456 return txq;
1457 }
1458 #endif /* CONFIG_CHELSIO_T4_DCB */
1459
1460 if (select_queue) {
1461 txq = (skb_rx_queue_recorded(skb)
1462 ? skb_get_rx_queue(skb)
1463 : smp_processor_id());
1464
1465 while (unlikely(txq >= dev->real_num_tx_queues))
1466 txq -= dev->real_num_tx_queues;
1467
1468 return txq;
1469 }
1470
1471 return fallback(dev, skb) % dev->real_num_tx_queues;
1472 }
1473
1474 static inline int is_offload(const struct adapter *adap)
1475 {
1476 return adap->params.offload;
1477 }
1478
1479 /*
1480 * Implementation of ethtool operations.
1481 */
1482
1483 static u32 get_msglevel(struct net_device *dev)
1484 {
1485 return netdev2adap(dev)->msg_enable;
1486 }
1487
1488 static void set_msglevel(struct net_device *dev, u32 val)
1489 {
1490 netdev2adap(dev)->msg_enable = val;
1491 }
1492
1493 static char stats_strings[][ETH_GSTRING_LEN] = {
1494 "TxOctetsOK ",
1495 "TxFramesOK ",
1496 "TxBroadcastFrames ",
1497 "TxMulticastFrames ",
1498 "TxUnicastFrames ",
1499 "TxErrorFrames ",
1500
1501 "TxFrames64 ",
1502 "TxFrames65To127 ",
1503 "TxFrames128To255 ",
1504 "TxFrames256To511 ",
1505 "TxFrames512To1023 ",
1506 "TxFrames1024To1518 ",
1507 "TxFrames1519ToMax ",
1508
1509 "TxFramesDropped ",
1510 "TxPauseFrames ",
1511 "TxPPP0Frames ",
1512 "TxPPP1Frames ",
1513 "TxPPP2Frames ",
1514 "TxPPP3Frames ",
1515 "TxPPP4Frames ",
1516 "TxPPP5Frames ",
1517 "TxPPP6Frames ",
1518 "TxPPP7Frames ",
1519
1520 "RxOctetsOK ",
1521 "RxFramesOK ",
1522 "RxBroadcastFrames ",
1523 "RxMulticastFrames ",
1524 "RxUnicastFrames ",
1525
1526 "RxFramesTooLong ",
1527 "RxJabberErrors ",
1528 "RxFCSErrors ",
1529 "RxLengthErrors ",
1530 "RxSymbolErrors ",
1531 "RxRuntFrames ",
1532
1533 "RxFrames64 ",
1534 "RxFrames65To127 ",
1535 "RxFrames128To255 ",
1536 "RxFrames256To511 ",
1537 "RxFrames512To1023 ",
1538 "RxFrames1024To1518 ",
1539 "RxFrames1519ToMax ",
1540
1541 "RxPauseFrames ",
1542 "RxPPP0Frames ",
1543 "RxPPP1Frames ",
1544 "RxPPP2Frames ",
1545 "RxPPP3Frames ",
1546 "RxPPP4Frames ",
1547 "RxPPP5Frames ",
1548 "RxPPP6Frames ",
1549 "RxPPP7Frames ",
1550
1551 "RxBG0FramesDropped ",
1552 "RxBG1FramesDropped ",
1553 "RxBG2FramesDropped ",
1554 "RxBG3FramesDropped ",
1555 "RxBG0FramesTrunc ",
1556 "RxBG1FramesTrunc ",
1557 "RxBG2FramesTrunc ",
1558 "RxBG3FramesTrunc ",
1559
1560 "TSO ",
1561 "TxCsumOffload ",
1562 "RxCsumGood ",
1563 "VLANextractions ",
1564 "VLANinsertions ",
1565 "GROpackets ",
1566 "GROmerged ",
1567 "WriteCoalSuccess ",
1568 "WriteCoalFail ",
1569 };
1570
1571 static int get_sset_count(struct net_device *dev, int sset)
1572 {
1573 switch (sset) {
1574 case ETH_SS_STATS:
1575 return ARRAY_SIZE(stats_strings);
1576 default:
1577 return -EOPNOTSUPP;
1578 }
1579 }
1580
1581 #define T4_REGMAP_SIZE (160 * 1024)
1582 #define T5_REGMAP_SIZE (332 * 1024)
1583
1584 static int get_regs_len(struct net_device *dev)
1585 {
1586 struct adapter *adap = netdev2adap(dev);
1587 if (is_t4(adap->params.chip))
1588 return T4_REGMAP_SIZE;
1589 else
1590 return T5_REGMAP_SIZE;
1591 }
1592
1593 static int get_eeprom_len(struct net_device *dev)
1594 {
1595 return EEPROMSIZE;
1596 }
1597
1598 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1599 {
1600 struct adapter *adapter = netdev2adap(dev);
1601
1602 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1603 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1604 strlcpy(info->bus_info, pci_name(adapter->pdev),
1605 sizeof(info->bus_info));
1606
1607 if (adapter->params.fw_vers)
1608 snprintf(info->fw_version, sizeof(info->fw_version),
1609 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1610 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1611 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1612 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1613 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1614 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1615 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1616 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1617 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1618 }
1619
1620 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1621 {
1622 if (stringset == ETH_SS_STATS)
1623 memcpy(data, stats_strings, sizeof(stats_strings));
1624 }
1625
1626 /*
1627 * port stats maintained per queue of the port. They should be in the same
1628 * order as in stats_strings above.
1629 */
1630 struct queue_port_stats {
1631 u64 tso;
1632 u64 tx_csum;
1633 u64 rx_csum;
1634 u64 vlan_ex;
1635 u64 vlan_ins;
1636 u64 gro_pkts;
1637 u64 gro_merged;
1638 };
1639
1640 static void collect_sge_port_stats(const struct adapter *adap,
1641 const struct port_info *p, struct queue_port_stats *s)
1642 {
1643 int i;
1644 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1645 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1646
1647 memset(s, 0, sizeof(*s));
1648 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1649 s->tso += tx->tso;
1650 s->tx_csum += tx->tx_cso;
1651 s->rx_csum += rx->stats.rx_cso;
1652 s->vlan_ex += rx->stats.vlan_ex;
1653 s->vlan_ins += tx->vlan_ins;
1654 s->gro_pkts += rx->stats.lro_pkts;
1655 s->gro_merged += rx->stats.lro_merged;
1656 }
1657 }
1658
1659 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1660 u64 *data)
1661 {
1662 struct port_info *pi = netdev_priv(dev);
1663 struct adapter *adapter = pi->adapter;
1664 u32 val1, val2;
1665
1666 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1667
1668 data += sizeof(struct port_stats) / sizeof(u64);
1669 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1670 data += sizeof(struct queue_port_stats) / sizeof(u64);
1671 if (!is_t4(adapter->params.chip)) {
1672 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1673 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1674 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1675 *data = val1 - val2;
1676 data++;
1677 *data = val2;
1678 data++;
1679 } else {
1680 memset(data, 0, 2 * sizeof(u64));
1681 *data += 2;
1682 }
1683 }
1684
1685 /*
1686 * Return a version number to identify the type of adapter. The scheme is:
1687 * - bits 0..9: chip version
1688 * - bits 10..15: chip revision
1689 * - bits 16..23: register dump version
1690 */
1691 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1692 {
1693 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1694 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1695 }
1696
1697 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1698 unsigned int end)
1699 {
1700 u32 *p = buf + start;
1701
1702 for ( ; start <= end; start += sizeof(u32))
1703 *p++ = t4_read_reg(ap, start);
1704 }
1705
1706 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1707 void *buf)
1708 {
1709 static const unsigned int t4_reg_ranges[] = {
1710 0x1008, 0x1108,
1711 0x1180, 0x11b4,
1712 0x11fc, 0x123c,
1713 0x1300, 0x173c,
1714 0x1800, 0x18fc,
1715 0x3000, 0x30d8,
1716 0x30e0, 0x5924,
1717 0x5960, 0x59d4,
1718 0x5a00, 0x5af8,
1719 0x6000, 0x6098,
1720 0x6100, 0x6150,
1721 0x6200, 0x6208,
1722 0x6240, 0x6248,
1723 0x6280, 0x6338,
1724 0x6370, 0x638c,
1725 0x6400, 0x643c,
1726 0x6500, 0x6524,
1727 0x6a00, 0x6a38,
1728 0x6a60, 0x6a78,
1729 0x6b00, 0x6b84,
1730 0x6bf0, 0x6c84,
1731 0x6cf0, 0x6d84,
1732 0x6df0, 0x6e84,
1733 0x6ef0, 0x6f84,
1734 0x6ff0, 0x7084,
1735 0x70f0, 0x7184,
1736 0x71f0, 0x7284,
1737 0x72f0, 0x7384,
1738 0x73f0, 0x7450,
1739 0x7500, 0x7530,
1740 0x7600, 0x761c,
1741 0x7680, 0x76cc,
1742 0x7700, 0x7798,
1743 0x77c0, 0x77fc,
1744 0x7900, 0x79fc,
1745 0x7b00, 0x7c38,
1746 0x7d00, 0x7efc,
1747 0x8dc0, 0x8e1c,
1748 0x8e30, 0x8e78,
1749 0x8ea0, 0x8f6c,
1750 0x8fc0, 0x9074,
1751 0x90fc, 0x90fc,
1752 0x9400, 0x9458,
1753 0x9600, 0x96bc,
1754 0x9800, 0x9808,
1755 0x9820, 0x983c,
1756 0x9850, 0x9864,
1757 0x9c00, 0x9c6c,
1758 0x9c80, 0x9cec,
1759 0x9d00, 0x9d6c,
1760 0x9d80, 0x9dec,
1761 0x9e00, 0x9e6c,
1762 0x9e80, 0x9eec,
1763 0x9f00, 0x9f6c,
1764 0x9f80, 0x9fec,
1765 0xd004, 0xd03c,
1766 0xdfc0, 0xdfe0,
1767 0xe000, 0xea7c,
1768 0xf000, 0x11110,
1769 0x11118, 0x11190,
1770 0x19040, 0x1906c,
1771 0x19078, 0x19080,
1772 0x1908c, 0x19124,
1773 0x19150, 0x191b0,
1774 0x191d0, 0x191e8,
1775 0x19238, 0x1924c,
1776 0x193f8, 0x19474,
1777 0x19490, 0x194f8,
1778 0x19800, 0x19f30,
1779 0x1a000, 0x1a06c,
1780 0x1a0b0, 0x1a120,
1781 0x1a128, 0x1a138,
1782 0x1a190, 0x1a1c4,
1783 0x1a1fc, 0x1a1fc,
1784 0x1e040, 0x1e04c,
1785 0x1e284, 0x1e28c,
1786 0x1e2c0, 0x1e2c0,
1787 0x1e2e0, 0x1e2e0,
1788 0x1e300, 0x1e384,
1789 0x1e3c0, 0x1e3c8,
1790 0x1e440, 0x1e44c,
1791 0x1e684, 0x1e68c,
1792 0x1e6c0, 0x1e6c0,
1793 0x1e6e0, 0x1e6e0,
1794 0x1e700, 0x1e784,
1795 0x1e7c0, 0x1e7c8,
1796 0x1e840, 0x1e84c,
1797 0x1ea84, 0x1ea8c,
1798 0x1eac0, 0x1eac0,
1799 0x1eae0, 0x1eae0,
1800 0x1eb00, 0x1eb84,
1801 0x1ebc0, 0x1ebc8,
1802 0x1ec40, 0x1ec4c,
1803 0x1ee84, 0x1ee8c,
1804 0x1eec0, 0x1eec0,
1805 0x1eee0, 0x1eee0,
1806 0x1ef00, 0x1ef84,
1807 0x1efc0, 0x1efc8,
1808 0x1f040, 0x1f04c,
1809 0x1f284, 0x1f28c,
1810 0x1f2c0, 0x1f2c0,
1811 0x1f2e0, 0x1f2e0,
1812 0x1f300, 0x1f384,
1813 0x1f3c0, 0x1f3c8,
1814 0x1f440, 0x1f44c,
1815 0x1f684, 0x1f68c,
1816 0x1f6c0, 0x1f6c0,
1817 0x1f6e0, 0x1f6e0,
1818 0x1f700, 0x1f784,
1819 0x1f7c0, 0x1f7c8,
1820 0x1f840, 0x1f84c,
1821 0x1fa84, 0x1fa8c,
1822 0x1fac0, 0x1fac0,
1823 0x1fae0, 0x1fae0,
1824 0x1fb00, 0x1fb84,
1825 0x1fbc0, 0x1fbc8,
1826 0x1fc40, 0x1fc4c,
1827 0x1fe84, 0x1fe8c,
1828 0x1fec0, 0x1fec0,
1829 0x1fee0, 0x1fee0,
1830 0x1ff00, 0x1ff84,
1831 0x1ffc0, 0x1ffc8,
1832 0x20000, 0x2002c,
1833 0x20100, 0x2013c,
1834 0x20190, 0x201c8,
1835 0x20200, 0x20318,
1836 0x20400, 0x20528,
1837 0x20540, 0x20614,
1838 0x21000, 0x21040,
1839 0x2104c, 0x21060,
1840 0x210c0, 0x210ec,
1841 0x21200, 0x21268,
1842 0x21270, 0x21284,
1843 0x212fc, 0x21388,
1844 0x21400, 0x21404,
1845 0x21500, 0x21518,
1846 0x2152c, 0x2153c,
1847 0x21550, 0x21554,
1848 0x21600, 0x21600,
1849 0x21608, 0x21628,
1850 0x21630, 0x2163c,
1851 0x21700, 0x2171c,
1852 0x21780, 0x2178c,
1853 0x21800, 0x21c38,
1854 0x21c80, 0x21d7c,
1855 0x21e00, 0x21e04,
1856 0x22000, 0x2202c,
1857 0x22100, 0x2213c,
1858 0x22190, 0x221c8,
1859 0x22200, 0x22318,
1860 0x22400, 0x22528,
1861 0x22540, 0x22614,
1862 0x23000, 0x23040,
1863 0x2304c, 0x23060,
1864 0x230c0, 0x230ec,
1865 0x23200, 0x23268,
1866 0x23270, 0x23284,
1867 0x232fc, 0x23388,
1868 0x23400, 0x23404,
1869 0x23500, 0x23518,
1870 0x2352c, 0x2353c,
1871 0x23550, 0x23554,
1872 0x23600, 0x23600,
1873 0x23608, 0x23628,
1874 0x23630, 0x2363c,
1875 0x23700, 0x2371c,
1876 0x23780, 0x2378c,
1877 0x23800, 0x23c38,
1878 0x23c80, 0x23d7c,
1879 0x23e00, 0x23e04,
1880 0x24000, 0x2402c,
1881 0x24100, 0x2413c,
1882 0x24190, 0x241c8,
1883 0x24200, 0x24318,
1884 0x24400, 0x24528,
1885 0x24540, 0x24614,
1886 0x25000, 0x25040,
1887 0x2504c, 0x25060,
1888 0x250c0, 0x250ec,
1889 0x25200, 0x25268,
1890 0x25270, 0x25284,
1891 0x252fc, 0x25388,
1892 0x25400, 0x25404,
1893 0x25500, 0x25518,
1894 0x2552c, 0x2553c,
1895 0x25550, 0x25554,
1896 0x25600, 0x25600,
1897 0x25608, 0x25628,
1898 0x25630, 0x2563c,
1899 0x25700, 0x2571c,
1900 0x25780, 0x2578c,
1901 0x25800, 0x25c38,
1902 0x25c80, 0x25d7c,
1903 0x25e00, 0x25e04,
1904 0x26000, 0x2602c,
1905 0x26100, 0x2613c,
1906 0x26190, 0x261c8,
1907 0x26200, 0x26318,
1908 0x26400, 0x26528,
1909 0x26540, 0x26614,
1910 0x27000, 0x27040,
1911 0x2704c, 0x27060,
1912 0x270c0, 0x270ec,
1913 0x27200, 0x27268,
1914 0x27270, 0x27284,
1915 0x272fc, 0x27388,
1916 0x27400, 0x27404,
1917 0x27500, 0x27518,
1918 0x2752c, 0x2753c,
1919 0x27550, 0x27554,
1920 0x27600, 0x27600,
1921 0x27608, 0x27628,
1922 0x27630, 0x2763c,
1923 0x27700, 0x2771c,
1924 0x27780, 0x2778c,
1925 0x27800, 0x27c38,
1926 0x27c80, 0x27d7c,
1927 0x27e00, 0x27e04
1928 };
1929
1930 static const unsigned int t5_reg_ranges[] = {
1931 0x1008, 0x1148,
1932 0x1180, 0x11b4,
1933 0x11fc, 0x123c,
1934 0x1280, 0x173c,
1935 0x1800, 0x18fc,
1936 0x3000, 0x3028,
1937 0x3060, 0x30d8,
1938 0x30e0, 0x30fc,
1939 0x3140, 0x357c,
1940 0x35a8, 0x35cc,
1941 0x35ec, 0x35ec,
1942 0x3600, 0x5624,
1943 0x56cc, 0x575c,
1944 0x580c, 0x5814,
1945 0x5890, 0x58bc,
1946 0x5940, 0x59dc,
1947 0x59fc, 0x5a18,
1948 0x5a60, 0x5a9c,
1949 0x5b9c, 0x5bfc,
1950 0x6000, 0x6040,
1951 0x6058, 0x614c,
1952 0x7700, 0x7798,
1953 0x77c0, 0x78fc,
1954 0x7b00, 0x7c54,
1955 0x7d00, 0x7efc,
1956 0x8dc0, 0x8de0,
1957 0x8df8, 0x8e84,
1958 0x8ea0, 0x8f84,
1959 0x8fc0, 0x90f8,
1960 0x9400, 0x9470,
1961 0x9600, 0x96f4,
1962 0x9800, 0x9808,
1963 0x9820, 0x983c,
1964 0x9850, 0x9864,
1965 0x9c00, 0x9c6c,
1966 0x9c80, 0x9cec,
1967 0x9d00, 0x9d6c,
1968 0x9d80, 0x9dec,
1969 0x9e00, 0x9e6c,
1970 0x9e80, 0x9eec,
1971 0x9f00, 0x9f6c,
1972 0x9f80, 0xa020,
1973 0xd004, 0xd03c,
1974 0xdfc0, 0xdfe0,
1975 0xe000, 0x11088,
1976 0x1109c, 0x11110,
1977 0x11118, 0x1117c,
1978 0x11190, 0x11204,
1979 0x19040, 0x1906c,
1980 0x19078, 0x19080,
1981 0x1908c, 0x19124,
1982 0x19150, 0x191b0,
1983 0x191d0, 0x191e8,
1984 0x19238, 0x19290,
1985 0x193f8, 0x19474,
1986 0x19490, 0x194cc,
1987 0x194f0, 0x194f8,
1988 0x19c00, 0x19c60,
1989 0x19c94, 0x19e10,
1990 0x19e50, 0x19f34,
1991 0x19f40, 0x19f50,
1992 0x19f90, 0x19fe4,
1993 0x1a000, 0x1a06c,
1994 0x1a0b0, 0x1a120,
1995 0x1a128, 0x1a138,
1996 0x1a190, 0x1a1c4,
1997 0x1a1fc, 0x1a1fc,
1998 0x1e008, 0x1e00c,
1999 0x1e040, 0x1e04c,
2000 0x1e284, 0x1e290,
2001 0x1e2c0, 0x1e2c0,
2002 0x1e2e0, 0x1e2e0,
2003 0x1e300, 0x1e384,
2004 0x1e3c0, 0x1e3c8,
2005 0x1e408, 0x1e40c,
2006 0x1e440, 0x1e44c,
2007 0x1e684, 0x1e690,
2008 0x1e6c0, 0x1e6c0,
2009 0x1e6e0, 0x1e6e0,
2010 0x1e700, 0x1e784,
2011 0x1e7c0, 0x1e7c8,
2012 0x1e808, 0x1e80c,
2013 0x1e840, 0x1e84c,
2014 0x1ea84, 0x1ea90,
2015 0x1eac0, 0x1eac0,
2016 0x1eae0, 0x1eae0,
2017 0x1eb00, 0x1eb84,
2018 0x1ebc0, 0x1ebc8,
2019 0x1ec08, 0x1ec0c,
2020 0x1ec40, 0x1ec4c,
2021 0x1ee84, 0x1ee90,
2022 0x1eec0, 0x1eec0,
2023 0x1eee0, 0x1eee0,
2024 0x1ef00, 0x1ef84,
2025 0x1efc0, 0x1efc8,
2026 0x1f008, 0x1f00c,
2027 0x1f040, 0x1f04c,
2028 0x1f284, 0x1f290,
2029 0x1f2c0, 0x1f2c0,
2030 0x1f2e0, 0x1f2e0,
2031 0x1f300, 0x1f384,
2032 0x1f3c0, 0x1f3c8,
2033 0x1f408, 0x1f40c,
2034 0x1f440, 0x1f44c,
2035 0x1f684, 0x1f690,
2036 0x1f6c0, 0x1f6c0,
2037 0x1f6e0, 0x1f6e0,
2038 0x1f700, 0x1f784,
2039 0x1f7c0, 0x1f7c8,
2040 0x1f808, 0x1f80c,
2041 0x1f840, 0x1f84c,
2042 0x1fa84, 0x1fa90,
2043 0x1fac0, 0x1fac0,
2044 0x1fae0, 0x1fae0,
2045 0x1fb00, 0x1fb84,
2046 0x1fbc0, 0x1fbc8,
2047 0x1fc08, 0x1fc0c,
2048 0x1fc40, 0x1fc4c,
2049 0x1fe84, 0x1fe90,
2050 0x1fec0, 0x1fec0,
2051 0x1fee0, 0x1fee0,
2052 0x1ff00, 0x1ff84,
2053 0x1ffc0, 0x1ffc8,
2054 0x30000, 0x30030,
2055 0x30100, 0x30144,
2056 0x30190, 0x301d0,
2057 0x30200, 0x30318,
2058 0x30400, 0x3052c,
2059 0x30540, 0x3061c,
2060 0x30800, 0x30834,
2061 0x308c0, 0x30908,
2062 0x30910, 0x309ac,
2063 0x30a00, 0x30a04,
2064 0x30a0c, 0x30a2c,
2065 0x30a44, 0x30a50,
2066 0x30a74, 0x30c24,
2067 0x30d08, 0x30d14,
2068 0x30d1c, 0x30d20,
2069 0x30d3c, 0x30d50,
2070 0x31200, 0x3120c,
2071 0x31220, 0x31220,
2072 0x31240, 0x31240,
2073 0x31600, 0x31600,
2074 0x31608, 0x3160c,
2075 0x31a00, 0x31a1c,
2076 0x31e04, 0x31e20,
2077 0x31e38, 0x31e3c,
2078 0x31e80, 0x31e80,
2079 0x31e88, 0x31ea8,
2080 0x31eb0, 0x31eb4,
2081 0x31ec8, 0x31ed4,
2082 0x31fb8, 0x32004,
2083 0x32208, 0x3223c,
2084 0x32600, 0x32630,
2085 0x32a00, 0x32abc,
2086 0x32b00, 0x32b70,
2087 0x33000, 0x33048,
2088 0x33060, 0x3309c,
2089 0x330f0, 0x33148,
2090 0x33160, 0x3319c,
2091 0x331f0, 0x332e4,
2092 0x332f8, 0x333e4,
2093 0x333f8, 0x33448,
2094 0x33460, 0x3349c,
2095 0x334f0, 0x33548,
2096 0x33560, 0x3359c,
2097 0x335f0, 0x336e4,
2098 0x336f8, 0x337e4,
2099 0x337f8, 0x337fc,
2100 0x33814, 0x33814,
2101 0x3382c, 0x3382c,
2102 0x33880, 0x3388c,
2103 0x338e8, 0x338ec,
2104 0x33900, 0x33948,
2105 0x33960, 0x3399c,
2106 0x339f0, 0x33ae4,
2107 0x33af8, 0x33b10,
2108 0x33b28, 0x33b28,
2109 0x33b3c, 0x33b50,
2110 0x33bf0, 0x33c10,
2111 0x33c28, 0x33c28,
2112 0x33c3c, 0x33c50,
2113 0x33cf0, 0x33cfc,
2114 0x34000, 0x34030,
2115 0x34100, 0x34144,
2116 0x34190, 0x341d0,
2117 0x34200, 0x34318,
2118 0x34400, 0x3452c,
2119 0x34540, 0x3461c,
2120 0x34800, 0x34834,
2121 0x348c0, 0x34908,
2122 0x34910, 0x349ac,
2123 0x34a00, 0x34a04,
2124 0x34a0c, 0x34a2c,
2125 0x34a44, 0x34a50,
2126 0x34a74, 0x34c24,
2127 0x34d08, 0x34d14,
2128 0x34d1c, 0x34d20,
2129 0x34d3c, 0x34d50,
2130 0x35200, 0x3520c,
2131 0x35220, 0x35220,
2132 0x35240, 0x35240,
2133 0x35600, 0x35600,
2134 0x35608, 0x3560c,
2135 0x35a00, 0x35a1c,
2136 0x35e04, 0x35e20,
2137 0x35e38, 0x35e3c,
2138 0x35e80, 0x35e80,
2139 0x35e88, 0x35ea8,
2140 0x35eb0, 0x35eb4,
2141 0x35ec8, 0x35ed4,
2142 0x35fb8, 0x36004,
2143 0x36208, 0x3623c,
2144 0x36600, 0x36630,
2145 0x36a00, 0x36abc,
2146 0x36b00, 0x36b70,
2147 0x37000, 0x37048,
2148 0x37060, 0x3709c,
2149 0x370f0, 0x37148,
2150 0x37160, 0x3719c,
2151 0x371f0, 0x372e4,
2152 0x372f8, 0x373e4,
2153 0x373f8, 0x37448,
2154 0x37460, 0x3749c,
2155 0x374f0, 0x37548,
2156 0x37560, 0x3759c,
2157 0x375f0, 0x376e4,
2158 0x376f8, 0x377e4,
2159 0x377f8, 0x377fc,
2160 0x37814, 0x37814,
2161 0x3782c, 0x3782c,
2162 0x37880, 0x3788c,
2163 0x378e8, 0x378ec,
2164 0x37900, 0x37948,
2165 0x37960, 0x3799c,
2166 0x379f0, 0x37ae4,
2167 0x37af8, 0x37b10,
2168 0x37b28, 0x37b28,
2169 0x37b3c, 0x37b50,
2170 0x37bf0, 0x37c10,
2171 0x37c28, 0x37c28,
2172 0x37c3c, 0x37c50,
2173 0x37cf0, 0x37cfc,
2174 0x38000, 0x38030,
2175 0x38100, 0x38144,
2176 0x38190, 0x381d0,
2177 0x38200, 0x38318,
2178 0x38400, 0x3852c,
2179 0x38540, 0x3861c,
2180 0x38800, 0x38834,
2181 0x388c0, 0x38908,
2182 0x38910, 0x389ac,
2183 0x38a00, 0x38a04,
2184 0x38a0c, 0x38a2c,
2185 0x38a44, 0x38a50,
2186 0x38a74, 0x38c24,
2187 0x38d08, 0x38d14,
2188 0x38d1c, 0x38d20,
2189 0x38d3c, 0x38d50,
2190 0x39200, 0x3920c,
2191 0x39220, 0x39220,
2192 0x39240, 0x39240,
2193 0x39600, 0x39600,
2194 0x39608, 0x3960c,
2195 0x39a00, 0x39a1c,
2196 0x39e04, 0x39e20,
2197 0x39e38, 0x39e3c,
2198 0x39e80, 0x39e80,
2199 0x39e88, 0x39ea8,
2200 0x39eb0, 0x39eb4,
2201 0x39ec8, 0x39ed4,
2202 0x39fb8, 0x3a004,
2203 0x3a208, 0x3a23c,
2204 0x3a600, 0x3a630,
2205 0x3aa00, 0x3aabc,
2206 0x3ab00, 0x3ab70,
2207 0x3b000, 0x3b048,
2208 0x3b060, 0x3b09c,
2209 0x3b0f0, 0x3b148,
2210 0x3b160, 0x3b19c,
2211 0x3b1f0, 0x3b2e4,
2212 0x3b2f8, 0x3b3e4,
2213 0x3b3f8, 0x3b448,
2214 0x3b460, 0x3b49c,
2215 0x3b4f0, 0x3b548,
2216 0x3b560, 0x3b59c,
2217 0x3b5f0, 0x3b6e4,
2218 0x3b6f8, 0x3b7e4,
2219 0x3b7f8, 0x3b7fc,
2220 0x3b814, 0x3b814,
2221 0x3b82c, 0x3b82c,
2222 0x3b880, 0x3b88c,
2223 0x3b8e8, 0x3b8ec,
2224 0x3b900, 0x3b948,
2225 0x3b960, 0x3b99c,
2226 0x3b9f0, 0x3bae4,
2227 0x3baf8, 0x3bb10,
2228 0x3bb28, 0x3bb28,
2229 0x3bb3c, 0x3bb50,
2230 0x3bbf0, 0x3bc10,
2231 0x3bc28, 0x3bc28,
2232 0x3bc3c, 0x3bc50,
2233 0x3bcf0, 0x3bcfc,
2234 0x3c000, 0x3c030,
2235 0x3c100, 0x3c144,
2236 0x3c190, 0x3c1d0,
2237 0x3c200, 0x3c318,
2238 0x3c400, 0x3c52c,
2239 0x3c540, 0x3c61c,
2240 0x3c800, 0x3c834,
2241 0x3c8c0, 0x3c908,
2242 0x3c910, 0x3c9ac,
2243 0x3ca00, 0x3ca04,
2244 0x3ca0c, 0x3ca2c,
2245 0x3ca44, 0x3ca50,
2246 0x3ca74, 0x3cc24,
2247 0x3cd08, 0x3cd14,
2248 0x3cd1c, 0x3cd20,
2249 0x3cd3c, 0x3cd50,
2250 0x3d200, 0x3d20c,
2251 0x3d220, 0x3d220,
2252 0x3d240, 0x3d240,
2253 0x3d600, 0x3d600,
2254 0x3d608, 0x3d60c,
2255 0x3da00, 0x3da1c,
2256 0x3de04, 0x3de20,
2257 0x3de38, 0x3de3c,
2258 0x3de80, 0x3de80,
2259 0x3de88, 0x3dea8,
2260 0x3deb0, 0x3deb4,
2261 0x3dec8, 0x3ded4,
2262 0x3dfb8, 0x3e004,
2263 0x3e208, 0x3e23c,
2264 0x3e600, 0x3e630,
2265 0x3ea00, 0x3eabc,
2266 0x3eb00, 0x3eb70,
2267 0x3f000, 0x3f048,
2268 0x3f060, 0x3f09c,
2269 0x3f0f0, 0x3f148,
2270 0x3f160, 0x3f19c,
2271 0x3f1f0, 0x3f2e4,
2272 0x3f2f8, 0x3f3e4,
2273 0x3f3f8, 0x3f448,
2274 0x3f460, 0x3f49c,
2275 0x3f4f0, 0x3f548,
2276 0x3f560, 0x3f59c,
2277 0x3f5f0, 0x3f6e4,
2278 0x3f6f8, 0x3f7e4,
2279 0x3f7f8, 0x3f7fc,
2280 0x3f814, 0x3f814,
2281 0x3f82c, 0x3f82c,
2282 0x3f880, 0x3f88c,
2283 0x3f8e8, 0x3f8ec,
2284 0x3f900, 0x3f948,
2285 0x3f960, 0x3f99c,
2286 0x3f9f0, 0x3fae4,
2287 0x3faf8, 0x3fb10,
2288 0x3fb28, 0x3fb28,
2289 0x3fb3c, 0x3fb50,
2290 0x3fbf0, 0x3fc10,
2291 0x3fc28, 0x3fc28,
2292 0x3fc3c, 0x3fc50,
2293 0x3fcf0, 0x3fcfc,
2294 0x40000, 0x4000c,
2295 0x40040, 0x40068,
2296 0x40080, 0x40144,
2297 0x40180, 0x4018c,
2298 0x40200, 0x40298,
2299 0x402ac, 0x4033c,
2300 0x403f8, 0x403fc,
2301 0x41304, 0x413c4,
2302 0x41400, 0x4141c,
2303 0x41480, 0x414d0,
2304 0x44000, 0x44078,
2305 0x440c0, 0x44278,
2306 0x442c0, 0x44478,
2307 0x444c0, 0x44678,
2308 0x446c0, 0x44878,
2309 0x448c0, 0x449fc,
2310 0x45000, 0x45068,
2311 0x45080, 0x45084,
2312 0x450a0, 0x450b0,
2313 0x45200, 0x45268,
2314 0x45280, 0x45284,
2315 0x452a0, 0x452b0,
2316 0x460c0, 0x460e4,
2317 0x47000, 0x4708c,
2318 0x47200, 0x47250,
2319 0x47400, 0x47420,
2320 0x47600, 0x47618,
2321 0x47800, 0x47814,
2322 0x48000, 0x4800c,
2323 0x48040, 0x48068,
2324 0x48080, 0x48144,
2325 0x48180, 0x4818c,
2326 0x48200, 0x48298,
2327 0x482ac, 0x4833c,
2328 0x483f8, 0x483fc,
2329 0x49304, 0x493c4,
2330 0x49400, 0x4941c,
2331 0x49480, 0x494d0,
2332 0x4c000, 0x4c078,
2333 0x4c0c0, 0x4c278,
2334 0x4c2c0, 0x4c478,
2335 0x4c4c0, 0x4c678,
2336 0x4c6c0, 0x4c878,
2337 0x4c8c0, 0x4c9fc,
2338 0x4d000, 0x4d068,
2339 0x4d080, 0x4d084,
2340 0x4d0a0, 0x4d0b0,
2341 0x4d200, 0x4d268,
2342 0x4d280, 0x4d284,
2343 0x4d2a0, 0x4d2b0,
2344 0x4e0c0, 0x4e0e4,
2345 0x4f000, 0x4f08c,
2346 0x4f200, 0x4f250,
2347 0x4f400, 0x4f420,
2348 0x4f600, 0x4f618,
2349 0x4f800, 0x4f814,
2350 0x50000, 0x500cc,
2351 0x50400, 0x50400,
2352 0x50800, 0x508cc,
2353 0x50c00, 0x50c00,
2354 0x51000, 0x5101c,
2355 0x51300, 0x51308,
2356 };
2357
2358 int i;
2359 struct adapter *ap = netdev2adap(dev);
2360 static const unsigned int *reg_ranges;
2361 int arr_size = 0, buf_size = 0;
2362
2363 if (is_t4(ap->params.chip)) {
2364 reg_ranges = &t4_reg_ranges[0];
2365 arr_size = ARRAY_SIZE(t4_reg_ranges);
2366 buf_size = T4_REGMAP_SIZE;
2367 } else {
2368 reg_ranges = &t5_reg_ranges[0];
2369 arr_size = ARRAY_SIZE(t5_reg_ranges);
2370 buf_size = T5_REGMAP_SIZE;
2371 }
2372
2373 regs->version = mk_adap_vers(ap);
2374
2375 memset(buf, 0, buf_size);
2376 for (i = 0; i < arr_size; i += 2)
2377 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2378 }
2379
2380 static int restart_autoneg(struct net_device *dev)
2381 {
2382 struct port_info *p = netdev_priv(dev);
2383
2384 if (!netif_running(dev))
2385 return -EAGAIN;
2386 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2387 return -EINVAL;
2388 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2389 return 0;
2390 }
2391
2392 static int identify_port(struct net_device *dev,
2393 enum ethtool_phys_id_state state)
2394 {
2395 unsigned int val;
2396 struct adapter *adap = netdev2adap(dev);
2397
2398 if (state == ETHTOOL_ID_ACTIVE)
2399 val = 0xffff;
2400 else if (state == ETHTOOL_ID_INACTIVE)
2401 val = 0;
2402 else
2403 return -EINVAL;
2404
2405 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2406 }
2407
2408 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2409 {
2410 unsigned int v = 0;
2411
2412 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2413 type == FW_PORT_TYPE_BT_XAUI) {
2414 v |= SUPPORTED_TP;
2415 if (caps & FW_PORT_CAP_SPEED_100M)
2416 v |= SUPPORTED_100baseT_Full;
2417 if (caps & FW_PORT_CAP_SPEED_1G)
2418 v |= SUPPORTED_1000baseT_Full;
2419 if (caps & FW_PORT_CAP_SPEED_10G)
2420 v |= SUPPORTED_10000baseT_Full;
2421 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2422 v |= SUPPORTED_Backplane;
2423 if (caps & FW_PORT_CAP_SPEED_1G)
2424 v |= SUPPORTED_1000baseKX_Full;
2425 if (caps & FW_PORT_CAP_SPEED_10G)
2426 v |= SUPPORTED_10000baseKX4_Full;
2427 } else if (type == FW_PORT_TYPE_KR)
2428 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2429 else if (type == FW_PORT_TYPE_BP_AP)
2430 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2431 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2432 else if (type == FW_PORT_TYPE_BP4_AP)
2433 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2434 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2435 SUPPORTED_10000baseKX4_Full;
2436 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2437 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2438 v |= SUPPORTED_FIBRE;
2439 else if (type == FW_PORT_TYPE_BP40_BA)
2440 v |= SUPPORTED_40000baseSR4_Full;
2441
2442 if (caps & FW_PORT_CAP_ANEG)
2443 v |= SUPPORTED_Autoneg;
2444 return v;
2445 }
2446
2447 static unsigned int to_fw_linkcaps(unsigned int caps)
2448 {
2449 unsigned int v = 0;
2450
2451 if (caps & ADVERTISED_100baseT_Full)
2452 v |= FW_PORT_CAP_SPEED_100M;
2453 if (caps & ADVERTISED_1000baseT_Full)
2454 v |= FW_PORT_CAP_SPEED_1G;
2455 if (caps & ADVERTISED_10000baseT_Full)
2456 v |= FW_PORT_CAP_SPEED_10G;
2457 if (caps & ADVERTISED_40000baseSR4_Full)
2458 v |= FW_PORT_CAP_SPEED_40G;
2459 return v;
2460 }
2461
2462 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2463 {
2464 const struct port_info *p = netdev_priv(dev);
2465
2466 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2467 p->port_type == FW_PORT_TYPE_BT_XFI ||
2468 p->port_type == FW_PORT_TYPE_BT_XAUI)
2469 cmd->port = PORT_TP;
2470 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2471 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2472 cmd->port = PORT_FIBRE;
2473 else if (p->port_type == FW_PORT_TYPE_SFP ||
2474 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2475 p->port_type == FW_PORT_TYPE_QSFP) {
2476 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2477 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2478 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2479 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2480 cmd->port = PORT_FIBRE;
2481 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2482 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2483 cmd->port = PORT_DA;
2484 else
2485 cmd->port = PORT_OTHER;
2486 } else
2487 cmd->port = PORT_OTHER;
2488
2489 if (p->mdio_addr >= 0) {
2490 cmd->phy_address = p->mdio_addr;
2491 cmd->transceiver = XCVR_EXTERNAL;
2492 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2493 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2494 } else {
2495 cmd->phy_address = 0; /* not really, but no better option */
2496 cmd->transceiver = XCVR_INTERNAL;
2497 cmd->mdio_support = 0;
2498 }
2499
2500 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2501 cmd->advertising = from_fw_linkcaps(p->port_type,
2502 p->link_cfg.advertising);
2503 ethtool_cmd_speed_set(cmd,
2504 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2505 cmd->duplex = DUPLEX_FULL;
2506 cmd->autoneg = p->link_cfg.autoneg;
2507 cmd->maxtxpkt = 0;
2508 cmd->maxrxpkt = 0;
2509 return 0;
2510 }
2511
2512 static unsigned int speed_to_caps(int speed)
2513 {
2514 if (speed == 100)
2515 return FW_PORT_CAP_SPEED_100M;
2516 if (speed == 1000)
2517 return FW_PORT_CAP_SPEED_1G;
2518 if (speed == 10000)
2519 return FW_PORT_CAP_SPEED_10G;
2520 if (speed == 40000)
2521 return FW_PORT_CAP_SPEED_40G;
2522 return 0;
2523 }
2524
2525 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2526 {
2527 unsigned int cap;
2528 struct port_info *p = netdev_priv(dev);
2529 struct link_config *lc = &p->link_cfg;
2530 u32 speed = ethtool_cmd_speed(cmd);
2531
2532 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2533 return -EINVAL;
2534
2535 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2536 /*
2537 * PHY offers a single speed. See if that's what's
2538 * being requested.
2539 */
2540 if (cmd->autoneg == AUTONEG_DISABLE &&
2541 (lc->supported & speed_to_caps(speed)))
2542 return 0;
2543 return -EINVAL;
2544 }
2545
2546 if (cmd->autoneg == AUTONEG_DISABLE) {
2547 cap = speed_to_caps(speed);
2548
2549 if (!(lc->supported & cap) ||
2550 (speed == 1000) ||
2551 (speed == 10000) ||
2552 (speed == 40000))
2553 return -EINVAL;
2554 lc->requested_speed = cap;
2555 lc->advertising = 0;
2556 } else {
2557 cap = to_fw_linkcaps(cmd->advertising);
2558 if (!(lc->supported & cap))
2559 return -EINVAL;
2560 lc->requested_speed = 0;
2561 lc->advertising = cap | FW_PORT_CAP_ANEG;
2562 }
2563 lc->autoneg = cmd->autoneg;
2564
2565 if (netif_running(dev))
2566 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2567 lc);
2568 return 0;
2569 }
2570
2571 static void get_pauseparam(struct net_device *dev,
2572 struct ethtool_pauseparam *epause)
2573 {
2574 struct port_info *p = netdev_priv(dev);
2575
2576 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2577 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2578 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2579 }
2580
2581 static int set_pauseparam(struct net_device *dev,
2582 struct ethtool_pauseparam *epause)
2583 {
2584 struct port_info *p = netdev_priv(dev);
2585 struct link_config *lc = &p->link_cfg;
2586
2587 if (epause->autoneg == AUTONEG_DISABLE)
2588 lc->requested_fc = 0;
2589 else if (lc->supported & FW_PORT_CAP_ANEG)
2590 lc->requested_fc = PAUSE_AUTONEG;
2591 else
2592 return -EINVAL;
2593
2594 if (epause->rx_pause)
2595 lc->requested_fc |= PAUSE_RX;
2596 if (epause->tx_pause)
2597 lc->requested_fc |= PAUSE_TX;
2598 if (netif_running(dev))
2599 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2600 lc);
2601 return 0;
2602 }
2603
2604 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2605 {
2606 const struct port_info *pi = netdev_priv(dev);
2607 const struct sge *s = &pi->adapter->sge;
2608
2609 e->rx_max_pending = MAX_RX_BUFFERS;
2610 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2611 e->rx_jumbo_max_pending = 0;
2612 e->tx_max_pending = MAX_TXQ_ENTRIES;
2613
2614 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2615 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2616 e->rx_jumbo_pending = 0;
2617 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2618 }
2619
2620 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2621 {
2622 int i;
2623 const struct port_info *pi = netdev_priv(dev);
2624 struct adapter *adapter = pi->adapter;
2625 struct sge *s = &adapter->sge;
2626
2627 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2628 e->tx_pending > MAX_TXQ_ENTRIES ||
2629 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2630 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2631 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2632 return -EINVAL;
2633
2634 if (adapter->flags & FULL_INIT_DONE)
2635 return -EBUSY;
2636
2637 for (i = 0; i < pi->nqsets; ++i) {
2638 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2639 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2640 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2641 }
2642 return 0;
2643 }
2644
2645 static int closest_timer(const struct sge *s, int time)
2646 {
2647 int i, delta, match = 0, min_delta = INT_MAX;
2648
2649 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2650 delta = time - s->timer_val[i];
2651 if (delta < 0)
2652 delta = -delta;
2653 if (delta < min_delta) {
2654 min_delta = delta;
2655 match = i;
2656 }
2657 }
2658 return match;
2659 }
2660
2661 static int closest_thres(const struct sge *s, int thres)
2662 {
2663 int i, delta, match = 0, min_delta = INT_MAX;
2664
2665 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2666 delta = thres - s->counter_val[i];
2667 if (delta < 0)
2668 delta = -delta;
2669 if (delta < min_delta) {
2670 min_delta = delta;
2671 match = i;
2672 }
2673 }
2674 return match;
2675 }
2676
2677 /*
2678 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2679 */
2680 static unsigned int qtimer_val(const struct adapter *adap,
2681 const struct sge_rspq *q)
2682 {
2683 unsigned int idx = q->intr_params >> 1;
2684
2685 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2686 }
2687
2688 /**
2689 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2690 * @q: the Rx queue
2691 * @us: the hold-off time in us, or 0 to disable timer
2692 * @cnt: the hold-off packet count, or 0 to disable counter
2693 *
2694 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2695 * one of the two needs to be enabled for the queue to generate interrupts.
2696 */
2697 static int set_rspq_intr_params(struct sge_rspq *q,
2698 unsigned int us, unsigned int cnt)
2699 {
2700 struct adapter *adap = q->adap;
2701
2702 if ((us | cnt) == 0)
2703 cnt = 1;
2704
2705 if (cnt) {
2706 int err;
2707 u32 v, new_idx;
2708
2709 new_idx = closest_thres(&adap->sge, cnt);
2710 if (q->desc && q->pktcnt_idx != new_idx) {
2711 /* the queue has already been created, update it */
2712 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2713 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2714 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2715 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2716 &new_idx);
2717 if (err)
2718 return err;
2719 }
2720 q->pktcnt_idx = new_idx;
2721 }
2722
2723 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2724 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2725 return 0;
2726 }
2727
2728 /**
2729 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2730 * @dev: the network device
2731 * @us: the hold-off time in us, or 0 to disable timer
2732 * @cnt: the hold-off packet count, or 0 to disable counter
2733 *
2734 * Set the RX interrupt hold-off parameters for a network device.
2735 */
2736 static int set_rx_intr_params(struct net_device *dev,
2737 unsigned int us, unsigned int cnt)
2738 {
2739 int i, err;
2740 struct port_info *pi = netdev_priv(dev);
2741 struct adapter *adap = pi->adapter;
2742 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2743
2744 for (i = 0; i < pi->nqsets; i++, q++) {
2745 err = set_rspq_intr_params(&q->rspq, us, cnt);
2746 if (err)
2747 return err;
2748 }
2749 return 0;
2750 }
2751
2752 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2753 {
2754 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2755 c->rx_max_coalesced_frames);
2756 }
2757
2758 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2759 {
2760 const struct port_info *pi = netdev_priv(dev);
2761 const struct adapter *adap = pi->adapter;
2762 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2763
2764 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2765 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2766 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2767 return 0;
2768 }
2769
2770 /**
2771 * eeprom_ptov - translate a physical EEPROM address to virtual
2772 * @phys_addr: the physical EEPROM address
2773 * @fn: the PCI function number
2774 * @sz: size of function-specific area
2775 *
2776 * Translate a physical EEPROM address to virtual. The first 1K is
2777 * accessed through virtual addresses starting at 31K, the rest is
2778 * accessed through virtual addresses starting at 0.
2779 *
2780 * The mapping is as follows:
2781 * [0..1K) -> [31K..32K)
2782 * [1K..1K+A) -> [31K-A..31K)
2783 * [1K+A..ES) -> [0..ES-A-1K)
2784 *
2785 * where A = @fn * @sz, and ES = EEPROM size.
2786 */
2787 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2788 {
2789 fn *= sz;
2790 if (phys_addr < 1024)
2791 return phys_addr + (31 << 10);
2792 if (phys_addr < 1024 + fn)
2793 return 31744 - fn + phys_addr - 1024;
2794 if (phys_addr < EEPROMSIZE)
2795 return phys_addr - 1024 - fn;
2796 return -EINVAL;
2797 }
2798
2799 /*
2800 * The next two routines implement eeprom read/write from physical addresses.
2801 */
2802 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2803 {
2804 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2805
2806 if (vaddr >= 0)
2807 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2808 return vaddr < 0 ? vaddr : 0;
2809 }
2810
2811 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2812 {
2813 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2814
2815 if (vaddr >= 0)
2816 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2817 return vaddr < 0 ? vaddr : 0;
2818 }
2819
2820 #define EEPROM_MAGIC 0x38E2F10C
2821
2822 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2823 u8 *data)
2824 {
2825 int i, err = 0;
2826 struct adapter *adapter = netdev2adap(dev);
2827
2828 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2829 if (!buf)
2830 return -ENOMEM;
2831
2832 e->magic = EEPROM_MAGIC;
2833 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2834 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2835
2836 if (!err)
2837 memcpy(data, buf + e->offset, e->len);
2838 kfree(buf);
2839 return err;
2840 }
2841
2842 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2843 u8 *data)
2844 {
2845 u8 *buf;
2846 int err = 0;
2847 u32 aligned_offset, aligned_len, *p;
2848 struct adapter *adapter = netdev2adap(dev);
2849
2850 if (eeprom->magic != EEPROM_MAGIC)
2851 return -EINVAL;
2852
2853 aligned_offset = eeprom->offset & ~3;
2854 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2855
2856 if (adapter->fn > 0) {
2857 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2858
2859 if (aligned_offset < start ||
2860 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2861 return -EPERM;
2862 }
2863
2864 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2865 /*
2866 * RMW possibly needed for first or last words.
2867 */
2868 buf = kmalloc(aligned_len, GFP_KERNEL);
2869 if (!buf)
2870 return -ENOMEM;
2871 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2872 if (!err && aligned_len > 4)
2873 err = eeprom_rd_phys(adapter,
2874 aligned_offset + aligned_len - 4,
2875 (u32 *)&buf[aligned_len - 4]);
2876 if (err)
2877 goto out;
2878 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2879 } else
2880 buf = data;
2881
2882 err = t4_seeprom_wp(adapter, false);
2883 if (err)
2884 goto out;
2885
2886 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2887 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2888 aligned_offset += 4;
2889 }
2890
2891 if (!err)
2892 err = t4_seeprom_wp(adapter, true);
2893 out:
2894 if (buf != data)
2895 kfree(buf);
2896 return err;
2897 }
2898
2899 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2900 {
2901 int ret;
2902 const struct firmware *fw;
2903 struct adapter *adap = netdev2adap(netdev);
2904
2905 ef->data[sizeof(ef->data) - 1] = '\0';
2906 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2907 if (ret < 0)
2908 return ret;
2909
2910 ret = t4_load_fw(adap, fw->data, fw->size);
2911 release_firmware(fw);
2912 if (!ret)
2913 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2914 return ret;
2915 }
2916
2917 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2918 #define BCAST_CRC 0xa0ccc1a6
2919
2920 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2921 {
2922 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2923 wol->wolopts = netdev2adap(dev)->wol;
2924 memset(&wol->sopass, 0, sizeof(wol->sopass));
2925 }
2926
2927 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2928 {
2929 int err = 0;
2930 struct port_info *pi = netdev_priv(dev);
2931
2932 if (wol->wolopts & ~WOL_SUPPORTED)
2933 return -EINVAL;
2934 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2935 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2936 if (wol->wolopts & WAKE_BCAST) {
2937 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2938 ~0ULL, 0, false);
2939 if (!err)
2940 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2941 ~6ULL, ~0ULL, BCAST_CRC, true);
2942 } else
2943 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2944 return err;
2945 }
2946
2947 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2948 {
2949 const struct port_info *pi = netdev_priv(dev);
2950 netdev_features_t changed = dev->features ^ features;
2951 int err;
2952
2953 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2954 return 0;
2955
2956 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2957 -1, -1, -1,
2958 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2959 if (unlikely(err))
2960 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2961 return err;
2962 }
2963
2964 static u32 get_rss_table_size(struct net_device *dev)
2965 {
2966 const struct port_info *pi = netdev_priv(dev);
2967
2968 return pi->rss_size;
2969 }
2970
2971 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
2972 {
2973 const struct port_info *pi = netdev_priv(dev);
2974 unsigned int n = pi->rss_size;
2975
2976 while (n--)
2977 p[n] = pi->rss[n];
2978 return 0;
2979 }
2980
2981 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
2982 {
2983 unsigned int i;
2984 struct port_info *pi = netdev_priv(dev);
2985
2986 for (i = 0; i < pi->rss_size; i++)
2987 pi->rss[i] = p[i];
2988 if (pi->adapter->flags & FULL_INIT_DONE)
2989 return write_rss(pi, pi->rss);
2990 return 0;
2991 }
2992
2993 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2994 u32 *rules)
2995 {
2996 const struct port_info *pi = netdev_priv(dev);
2997
2998 switch (info->cmd) {
2999 case ETHTOOL_GRXFH: {
3000 unsigned int v = pi->rss_mode;
3001
3002 info->data = 0;
3003 switch (info->flow_type) {
3004 case TCP_V4_FLOW:
3005 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3006 info->data = RXH_IP_SRC | RXH_IP_DST |
3007 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3008 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3009 info->data = RXH_IP_SRC | RXH_IP_DST;
3010 break;
3011 case UDP_V4_FLOW:
3012 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3013 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3014 info->data = RXH_IP_SRC | RXH_IP_DST |
3015 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3016 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3017 info->data = RXH_IP_SRC | RXH_IP_DST;
3018 break;
3019 case SCTP_V4_FLOW:
3020 case AH_ESP_V4_FLOW:
3021 case IPV4_FLOW:
3022 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3023 info->data = RXH_IP_SRC | RXH_IP_DST;
3024 break;
3025 case TCP_V6_FLOW:
3026 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3027 info->data = RXH_IP_SRC | RXH_IP_DST |
3028 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3029 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3030 info->data = RXH_IP_SRC | RXH_IP_DST;
3031 break;
3032 case UDP_V6_FLOW:
3033 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3034 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3035 info->data = RXH_IP_SRC | RXH_IP_DST |
3036 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3037 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3038 info->data = RXH_IP_SRC | RXH_IP_DST;
3039 break;
3040 case SCTP_V6_FLOW:
3041 case AH_ESP_V6_FLOW:
3042 case IPV6_FLOW:
3043 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3044 info->data = RXH_IP_SRC | RXH_IP_DST;
3045 break;
3046 }
3047 return 0;
3048 }
3049 case ETHTOOL_GRXRINGS:
3050 info->data = pi->nqsets;
3051 return 0;
3052 }
3053 return -EOPNOTSUPP;
3054 }
3055
3056 static const struct ethtool_ops cxgb_ethtool_ops = {
3057 .get_settings = get_settings,
3058 .set_settings = set_settings,
3059 .get_drvinfo = get_drvinfo,
3060 .get_msglevel = get_msglevel,
3061 .set_msglevel = set_msglevel,
3062 .get_ringparam = get_sge_param,
3063 .set_ringparam = set_sge_param,
3064 .get_coalesce = get_coalesce,
3065 .set_coalesce = set_coalesce,
3066 .get_eeprom_len = get_eeprom_len,
3067 .get_eeprom = get_eeprom,
3068 .set_eeprom = set_eeprom,
3069 .get_pauseparam = get_pauseparam,
3070 .set_pauseparam = set_pauseparam,
3071 .get_link = ethtool_op_get_link,
3072 .get_strings = get_strings,
3073 .set_phys_id = identify_port,
3074 .nway_reset = restart_autoneg,
3075 .get_sset_count = get_sset_count,
3076 .get_ethtool_stats = get_stats,
3077 .get_regs_len = get_regs_len,
3078 .get_regs = get_regs,
3079 .get_wol = get_wol,
3080 .set_wol = set_wol,
3081 .get_rxnfc = get_rxnfc,
3082 .get_rxfh_indir_size = get_rss_table_size,
3083 .get_rxfh = get_rss_table,
3084 .set_rxfh = set_rss_table,
3085 .flash_device = set_flash,
3086 };
3087
3088 /*
3089 * debugfs support
3090 */
3091 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3092 loff_t *ppos)
3093 {
3094 loff_t pos = *ppos;
3095 loff_t avail = file_inode(file)->i_size;
3096 unsigned int mem = (uintptr_t)file->private_data & 3;
3097 struct adapter *adap = file->private_data - mem;
3098 __be32 *data;
3099 int ret;
3100
3101 if (pos < 0)
3102 return -EINVAL;
3103 if (pos >= avail)
3104 return 0;
3105 if (count > avail - pos)
3106 count = avail - pos;
3107
3108 data = t4_alloc_mem(count);
3109 if (!data)
3110 return -ENOMEM;
3111
3112 spin_lock(&adap->win0_lock);
3113 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3114 spin_unlock(&adap->win0_lock);
3115 if (ret) {
3116 t4_free_mem(data);
3117 return ret;
3118 }
3119 ret = copy_to_user(buf, data, count);
3120
3121 t4_free_mem(data);
3122 if (ret)
3123 return -EFAULT;
3124
3125 *ppos = pos + count;
3126 return count;
3127 }
3128
3129 static const struct file_operations mem_debugfs_fops = {
3130 .owner = THIS_MODULE,
3131 .open = simple_open,
3132 .read = mem_read,
3133 .llseek = default_llseek,
3134 };
3135
3136 static void add_debugfs_mem(struct adapter *adap, const char *name,
3137 unsigned int idx, unsigned int size_mb)
3138 {
3139 struct dentry *de;
3140
3141 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3142 (void *)adap + idx, &mem_debugfs_fops);
3143 if (de && de->d_inode)
3144 de->d_inode->i_size = size_mb << 20;
3145 }
3146
3147 static int setup_debugfs(struct adapter *adap)
3148 {
3149 int i;
3150 u32 size;
3151
3152 if (IS_ERR_OR_NULL(adap->debugfs_root))
3153 return -1;
3154
3155 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
3156 if (i & EDRAM0_ENABLE) {
3157 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3158 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3159 }
3160 if (i & EDRAM1_ENABLE) {
3161 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3162 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3163 }
3164 if (is_t4(adap->params.chip)) {
3165 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3166 if (i & EXT_MEM_ENABLE)
3167 add_debugfs_mem(adap, "mc", MEM_MC,
3168 EXT_MEM_SIZE_GET(size));
3169 } else {
3170 if (i & EXT_MEM_ENABLE) {
3171 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3172 add_debugfs_mem(adap, "mc0", MEM_MC0,
3173 EXT_MEM_SIZE_GET(size));
3174 }
3175 if (i & EXT_MEM1_ENABLE) {
3176 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3177 add_debugfs_mem(adap, "mc1", MEM_MC1,
3178 EXT_MEM_SIZE_GET(size));
3179 }
3180 }
3181 if (adap->l2t)
3182 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3183 &t4_l2t_fops);
3184 return 0;
3185 }
3186
3187 /*
3188 * upper-layer driver support
3189 */
3190
3191 /*
3192 * Allocate an active-open TID and set it to the supplied value.
3193 */
3194 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3195 {
3196 int atid = -1;
3197
3198 spin_lock_bh(&t->atid_lock);
3199 if (t->afree) {
3200 union aopen_entry *p = t->afree;
3201
3202 atid = (p - t->atid_tab) + t->atid_base;
3203 t->afree = p->next;
3204 p->data = data;
3205 t->atids_in_use++;
3206 }
3207 spin_unlock_bh(&t->atid_lock);
3208 return atid;
3209 }
3210 EXPORT_SYMBOL(cxgb4_alloc_atid);
3211
3212 /*
3213 * Release an active-open TID.
3214 */
3215 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3216 {
3217 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3218
3219 spin_lock_bh(&t->atid_lock);
3220 p->next = t->afree;
3221 t->afree = p;
3222 t->atids_in_use--;
3223 spin_unlock_bh(&t->atid_lock);
3224 }
3225 EXPORT_SYMBOL(cxgb4_free_atid);
3226
3227 /*
3228 * Allocate a server TID and set it to the supplied value.
3229 */
3230 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3231 {
3232 int stid;
3233
3234 spin_lock_bh(&t->stid_lock);
3235 if (family == PF_INET) {
3236 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3237 if (stid < t->nstids)
3238 __set_bit(stid, t->stid_bmap);
3239 else
3240 stid = -1;
3241 } else {
3242 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3243 if (stid < 0)
3244 stid = -1;
3245 }
3246 if (stid >= 0) {
3247 t->stid_tab[stid].data = data;
3248 stid += t->stid_base;
3249 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3250 * This is equivalent to 4 TIDs. With CLIP enabled it
3251 * needs 2 TIDs.
3252 */
3253 if (family == PF_INET)
3254 t->stids_in_use++;
3255 else
3256 t->stids_in_use += 4;
3257 }
3258 spin_unlock_bh(&t->stid_lock);
3259 return stid;
3260 }
3261 EXPORT_SYMBOL(cxgb4_alloc_stid);
3262
3263 /* Allocate a server filter TID and set it to the supplied value.
3264 */
3265 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3266 {
3267 int stid;
3268
3269 spin_lock_bh(&t->stid_lock);
3270 if (family == PF_INET) {
3271 stid = find_next_zero_bit(t->stid_bmap,
3272 t->nstids + t->nsftids, t->nstids);
3273 if (stid < (t->nstids + t->nsftids))
3274 __set_bit(stid, t->stid_bmap);
3275 else
3276 stid = -1;
3277 } else {
3278 stid = -1;
3279 }
3280 if (stid >= 0) {
3281 t->stid_tab[stid].data = data;
3282 stid -= t->nstids;
3283 stid += t->sftid_base;
3284 t->stids_in_use++;
3285 }
3286 spin_unlock_bh(&t->stid_lock);
3287 return stid;
3288 }
3289 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3290
3291 /* Release a server TID.
3292 */
3293 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3294 {
3295 /* Is it a server filter TID? */
3296 if (t->nsftids && (stid >= t->sftid_base)) {
3297 stid -= t->sftid_base;
3298 stid += t->nstids;
3299 } else {
3300 stid -= t->stid_base;
3301 }
3302
3303 spin_lock_bh(&t->stid_lock);
3304 if (family == PF_INET)
3305 __clear_bit(stid, t->stid_bmap);
3306 else
3307 bitmap_release_region(t->stid_bmap, stid, 2);
3308 t->stid_tab[stid].data = NULL;
3309 if (family == PF_INET)
3310 t->stids_in_use--;
3311 else
3312 t->stids_in_use -= 4;
3313 spin_unlock_bh(&t->stid_lock);
3314 }
3315 EXPORT_SYMBOL(cxgb4_free_stid);
3316
3317 /*
3318 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3319 */
3320 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3321 unsigned int tid)
3322 {
3323 struct cpl_tid_release *req;
3324
3325 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3326 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3327 INIT_TP_WR(req, tid);
3328 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3329 }
3330
3331 /*
3332 * Queue a TID release request and if necessary schedule a work queue to
3333 * process it.
3334 */
3335 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3336 unsigned int tid)
3337 {
3338 void **p = &t->tid_tab[tid];
3339 struct adapter *adap = container_of(t, struct adapter, tids);
3340
3341 spin_lock_bh(&adap->tid_release_lock);
3342 *p = adap->tid_release_head;
3343 /* Low 2 bits encode the Tx channel number */
3344 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3345 if (!adap->tid_release_task_busy) {
3346 adap->tid_release_task_busy = true;
3347 queue_work(adap->workq, &adap->tid_release_task);
3348 }
3349 spin_unlock_bh(&adap->tid_release_lock);
3350 }
3351
3352 /*
3353 * Process the list of pending TID release requests.
3354 */
3355 static void process_tid_release_list(struct work_struct *work)
3356 {
3357 struct sk_buff *skb;
3358 struct adapter *adap;
3359
3360 adap = container_of(work, struct adapter, tid_release_task);
3361
3362 spin_lock_bh(&adap->tid_release_lock);
3363 while (adap->tid_release_head) {
3364 void **p = adap->tid_release_head;
3365 unsigned int chan = (uintptr_t)p & 3;
3366 p = (void *)p - chan;
3367
3368 adap->tid_release_head = *p;
3369 *p = NULL;
3370 spin_unlock_bh(&adap->tid_release_lock);
3371
3372 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3373 GFP_KERNEL)))
3374 schedule_timeout_uninterruptible(1);
3375
3376 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3377 t4_ofld_send(adap, skb);
3378 spin_lock_bh(&adap->tid_release_lock);
3379 }
3380 adap->tid_release_task_busy = false;
3381 spin_unlock_bh(&adap->tid_release_lock);
3382 }
3383
3384 /*
3385 * Release a TID and inform HW. If we are unable to allocate the release
3386 * message we defer to a work queue.
3387 */
3388 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3389 {
3390 void *old;
3391 struct sk_buff *skb;
3392 struct adapter *adap = container_of(t, struct adapter, tids);
3393
3394 old = t->tid_tab[tid];
3395 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3396 if (likely(skb)) {
3397 t->tid_tab[tid] = NULL;
3398 mk_tid_release(skb, chan, tid);
3399 t4_ofld_send(adap, skb);
3400 } else
3401 cxgb4_queue_tid_release(t, chan, tid);
3402 if (old)
3403 atomic_dec(&t->tids_in_use);
3404 }
3405 EXPORT_SYMBOL(cxgb4_remove_tid);
3406
3407 /*
3408 * Allocate and initialize the TID tables. Returns 0 on success.
3409 */
3410 static int tid_init(struct tid_info *t)
3411 {
3412 size_t size;
3413 unsigned int stid_bmap_size;
3414 unsigned int natids = t->natids;
3415 struct adapter *adap = container_of(t, struct adapter, tids);
3416
3417 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3418 size = t->ntids * sizeof(*t->tid_tab) +
3419 natids * sizeof(*t->atid_tab) +
3420 t->nstids * sizeof(*t->stid_tab) +
3421 t->nsftids * sizeof(*t->stid_tab) +
3422 stid_bmap_size * sizeof(long) +
3423 t->nftids * sizeof(*t->ftid_tab) +
3424 t->nsftids * sizeof(*t->ftid_tab);
3425
3426 t->tid_tab = t4_alloc_mem(size);
3427 if (!t->tid_tab)
3428 return -ENOMEM;
3429
3430 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3431 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3432 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3433 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3434 spin_lock_init(&t->stid_lock);
3435 spin_lock_init(&t->atid_lock);
3436
3437 t->stids_in_use = 0;
3438 t->afree = NULL;
3439 t->atids_in_use = 0;
3440 atomic_set(&t->tids_in_use, 0);
3441
3442 /* Setup the free list for atid_tab and clear the stid bitmap. */
3443 if (natids) {
3444 while (--natids)
3445 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3446 t->afree = t->atid_tab;
3447 }
3448 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3449 /* Reserve stid 0 for T4/T5 adapters */
3450 if (!t->stid_base &&
3451 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3452 __set_bit(0, t->stid_bmap);
3453
3454 return 0;
3455 }
3456
3457 int cxgb4_clip_get(const struct net_device *dev,
3458 const struct in6_addr *lip)
3459 {
3460 struct adapter *adap;
3461 struct fw_clip_cmd c;
3462
3463 adap = netdev2adap(dev);
3464 memset(&c, 0, sizeof(c));
3465 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3466 FW_CMD_REQUEST | FW_CMD_WRITE);
3467 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3468 c.ip_hi = *(__be64 *)(lip->s6_addr);
3469 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3470 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3471 }
3472 EXPORT_SYMBOL(cxgb4_clip_get);
3473
3474 int cxgb4_clip_release(const struct net_device *dev,
3475 const struct in6_addr *lip)
3476 {
3477 struct adapter *adap;
3478 struct fw_clip_cmd c;
3479
3480 adap = netdev2adap(dev);
3481 memset(&c, 0, sizeof(c));
3482 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3483 FW_CMD_REQUEST | FW_CMD_READ);
3484 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3485 c.ip_hi = *(__be64 *)(lip->s6_addr);
3486 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3487 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3488 }
3489 EXPORT_SYMBOL(cxgb4_clip_release);
3490
3491 /**
3492 * cxgb4_create_server - create an IP server
3493 * @dev: the device
3494 * @stid: the server TID
3495 * @sip: local IP address to bind server to
3496 * @sport: the server's TCP port
3497 * @queue: queue to direct messages from this server to
3498 *
3499 * Create an IP server for the given port and address.
3500 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3501 */
3502 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3503 __be32 sip, __be16 sport, __be16 vlan,
3504 unsigned int queue)
3505 {
3506 unsigned int chan;
3507 struct sk_buff *skb;
3508 struct adapter *adap;
3509 struct cpl_pass_open_req *req;
3510 int ret;
3511
3512 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3513 if (!skb)
3514 return -ENOMEM;
3515
3516 adap = netdev2adap(dev);
3517 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3518 INIT_TP_WR(req, 0);
3519 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3520 req->local_port = sport;
3521 req->peer_port = htons(0);
3522 req->local_ip = sip;
3523 req->peer_ip = htonl(0);
3524 chan = rxq_to_chan(&adap->sge, queue);
3525 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3526 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3527 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3528 ret = t4_mgmt_tx(adap, skb);
3529 return net_xmit_eval(ret);
3530 }
3531 EXPORT_SYMBOL(cxgb4_create_server);
3532
3533 /* cxgb4_create_server6 - create an IPv6 server
3534 * @dev: the device
3535 * @stid: the server TID
3536 * @sip: local IPv6 address to bind server to
3537 * @sport: the server's TCP port
3538 * @queue: queue to direct messages from this server to
3539 *
3540 * Create an IPv6 server for the given port and address.
3541 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3542 */
3543 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3544 const struct in6_addr *sip, __be16 sport,
3545 unsigned int queue)
3546 {
3547 unsigned int chan;
3548 struct sk_buff *skb;
3549 struct adapter *adap;
3550 struct cpl_pass_open_req6 *req;
3551 int ret;
3552
3553 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3554 if (!skb)
3555 return -ENOMEM;
3556
3557 adap = netdev2adap(dev);
3558 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3559 INIT_TP_WR(req, 0);
3560 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3561 req->local_port = sport;
3562 req->peer_port = htons(0);
3563 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3564 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3565 req->peer_ip_hi = cpu_to_be64(0);
3566 req->peer_ip_lo = cpu_to_be64(0);
3567 chan = rxq_to_chan(&adap->sge, queue);
3568 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3569 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3570 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3571 ret = t4_mgmt_tx(adap, skb);
3572 return net_xmit_eval(ret);
3573 }
3574 EXPORT_SYMBOL(cxgb4_create_server6);
3575
3576 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3577 unsigned int queue, bool ipv6)
3578 {
3579 struct sk_buff *skb;
3580 struct adapter *adap;
3581 struct cpl_close_listsvr_req *req;
3582 int ret;
3583
3584 adap = netdev2adap(dev);
3585
3586 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3587 if (!skb)
3588 return -ENOMEM;
3589
3590 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3591 INIT_TP_WR(req, 0);
3592 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3593 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3594 LISTSVR_IPV6(0)) | QUEUENO(queue));
3595 ret = t4_mgmt_tx(adap, skb);
3596 return net_xmit_eval(ret);
3597 }
3598 EXPORT_SYMBOL(cxgb4_remove_server);
3599
3600 /**
3601 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3602 * @mtus: the HW MTU table
3603 * @mtu: the target MTU
3604 * @idx: index of selected entry in the MTU table
3605 *
3606 * Returns the index and the value in the HW MTU table that is closest to
3607 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3608 * table, in which case that smallest available value is selected.
3609 */
3610 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3611 unsigned int *idx)
3612 {
3613 unsigned int i = 0;
3614
3615 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3616 ++i;
3617 if (idx)
3618 *idx = i;
3619 return mtus[i];
3620 }
3621 EXPORT_SYMBOL(cxgb4_best_mtu);
3622
3623 /**
3624 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3625 * @mtus: the HW MTU table
3626 * @header_size: Header Size
3627 * @data_size_max: maximum Data Segment Size
3628 * @data_size_align: desired Data Segment Size Alignment (2^N)
3629 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3630 *
3631 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3632 * MTU Table based solely on a Maximum MTU parameter, we break that
3633 * parameter up into a Header Size and Maximum Data Segment Size, and
3634 * provide a desired Data Segment Size Alignment. If we find an MTU in
3635 * the Hardware MTU Table which will result in a Data Segment Size with
3636 * the requested alignment _and_ that MTU isn't "too far" from the
3637 * closest MTU, then we'll return that rather than the closest MTU.
3638 */
3639 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3640 unsigned short header_size,
3641 unsigned short data_size_max,
3642 unsigned short data_size_align,
3643 unsigned int *mtu_idxp)
3644 {
3645 unsigned short max_mtu = header_size + data_size_max;
3646 unsigned short data_size_align_mask = data_size_align - 1;
3647 int mtu_idx, aligned_mtu_idx;
3648
3649 /* Scan the MTU Table till we find an MTU which is larger than our
3650 * Maximum MTU or we reach the end of the table. Along the way,
3651 * record the last MTU found, if any, which will result in a Data
3652 * Segment Length matching the requested alignment.
3653 */
3654 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3655 unsigned short data_size = mtus[mtu_idx] - header_size;
3656
3657 /* If this MTU minus the Header Size would result in a
3658 * Data Segment Size of the desired alignment, remember it.
3659 */
3660 if ((data_size & data_size_align_mask) == 0)
3661 aligned_mtu_idx = mtu_idx;
3662
3663 /* If we're not at the end of the Hardware MTU Table and the
3664 * next element is larger than our Maximum MTU, drop out of
3665 * the loop.
3666 */
3667 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3668 break;
3669 }
3670
3671 /* If we fell out of the loop because we ran to the end of the table,
3672 * then we just have to use the last [largest] entry.
3673 */
3674 if (mtu_idx == NMTUS)
3675 mtu_idx--;
3676
3677 /* If we found an MTU which resulted in the requested Data Segment
3678 * Length alignment and that's "not far" from the largest MTU which is
3679 * less than or equal to the maximum MTU, then use that.
3680 */
3681 if (aligned_mtu_idx >= 0 &&
3682 mtu_idx - aligned_mtu_idx <= 1)
3683 mtu_idx = aligned_mtu_idx;
3684
3685 /* If the caller has passed in an MTU Index pointer, pass the
3686 * MTU Index back. Return the MTU value.
3687 */
3688 if (mtu_idxp)
3689 *mtu_idxp = mtu_idx;
3690 return mtus[mtu_idx];
3691 }
3692 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3693
3694 /**
3695 * cxgb4_port_chan - get the HW channel of a port
3696 * @dev: the net device for the port
3697 *
3698 * Return the HW Tx channel of the given port.
3699 */
3700 unsigned int cxgb4_port_chan(const struct net_device *dev)
3701 {
3702 return netdev2pinfo(dev)->tx_chan;
3703 }
3704 EXPORT_SYMBOL(cxgb4_port_chan);
3705
3706 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3707 {
3708 struct adapter *adap = netdev2adap(dev);
3709 u32 v1, v2, lp_count, hp_count;
3710
3711 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3712 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3713 if (is_t4(adap->params.chip)) {
3714 lp_count = G_LP_COUNT(v1);
3715 hp_count = G_HP_COUNT(v1);
3716 } else {
3717 lp_count = G_LP_COUNT_T5(v1);
3718 hp_count = G_HP_COUNT_T5(v2);
3719 }
3720 return lpfifo ? lp_count : hp_count;
3721 }
3722 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3723
3724 /**
3725 * cxgb4_port_viid - get the VI id of a port
3726 * @dev: the net device for the port
3727 *
3728 * Return the VI id of the given port.
3729 */
3730 unsigned int cxgb4_port_viid(const struct net_device *dev)
3731 {
3732 return netdev2pinfo(dev)->viid;
3733 }
3734 EXPORT_SYMBOL(cxgb4_port_viid);
3735
3736 /**
3737 * cxgb4_port_idx - get the index of a port
3738 * @dev: the net device for the port
3739 *
3740 * Return the index of the given port.
3741 */
3742 unsigned int cxgb4_port_idx(const struct net_device *dev)
3743 {
3744 return netdev2pinfo(dev)->port_id;
3745 }
3746 EXPORT_SYMBOL(cxgb4_port_idx);
3747
3748 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3749 struct tp_tcp_stats *v6)
3750 {
3751 struct adapter *adap = pci_get_drvdata(pdev);
3752
3753 spin_lock(&adap->stats_lock);
3754 t4_tp_get_tcp_stats(adap, v4, v6);
3755 spin_unlock(&adap->stats_lock);
3756 }
3757 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3758
3759 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3760 const unsigned int *pgsz_order)
3761 {
3762 struct adapter *adap = netdev2adap(dev);
3763
3764 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3765 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3766 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3767 HPZ3(pgsz_order[3]));
3768 }
3769 EXPORT_SYMBOL(cxgb4_iscsi_init);
3770
3771 int cxgb4_flush_eq_cache(struct net_device *dev)
3772 {
3773 struct adapter *adap = netdev2adap(dev);
3774 int ret;
3775
3776 ret = t4_fwaddrspace_write(adap, adap->mbox,
3777 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3778 return ret;
3779 }
3780 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3781
3782 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3783 {
3784 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3785 __be64 indices;
3786 int ret;
3787
3788 spin_lock(&adap->win0_lock);
3789 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3790 sizeof(indices), (__be32 *)&indices,
3791 T4_MEMORY_READ);
3792 spin_unlock(&adap->win0_lock);
3793 if (!ret) {
3794 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3795 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3796 }
3797 return ret;
3798 }
3799
3800 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3801 u16 size)
3802 {
3803 struct adapter *adap = netdev2adap(dev);
3804 u16 hw_pidx, hw_cidx;
3805 int ret;
3806
3807 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3808 if (ret)
3809 goto out;
3810
3811 if (pidx != hw_pidx) {
3812 u16 delta;
3813
3814 if (pidx >= hw_pidx)
3815 delta = pidx - hw_pidx;
3816 else
3817 delta = size - hw_pidx + pidx;
3818 wmb();
3819 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3820 QID(qid) | PIDX(delta));
3821 }
3822 out:
3823 return ret;
3824 }
3825 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3826
3827 void cxgb4_disable_db_coalescing(struct net_device *dev)
3828 {
3829 struct adapter *adap;
3830
3831 adap = netdev2adap(dev);
3832 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3833 F_NOCOALESCE);
3834 }
3835 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3836
3837 void cxgb4_enable_db_coalescing(struct net_device *dev)
3838 {
3839 struct adapter *adap;
3840
3841 adap = netdev2adap(dev);
3842 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3843 }
3844 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3845
3846 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3847 {
3848 struct adapter *adap;
3849 u32 offset, memtype, memaddr;
3850 u32 edc0_size, edc1_size, mc0_size, mc1_size;
3851 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3852 int ret;
3853
3854 adap = netdev2adap(dev);
3855
3856 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3857
3858 /* Figure out where the offset lands in the Memory Type/Address scheme.
3859 * This code assumes that the memory is laid out starting at offset 0
3860 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3861 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3862 * MC0, and some have both MC0 and MC1.
3863 */
3864 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3865 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3866 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3867
3868 edc0_end = edc0_size;
3869 edc1_end = edc0_end + edc1_size;
3870 mc0_end = edc1_end + mc0_size;
3871
3872 if (offset < edc0_end) {
3873 memtype = MEM_EDC0;
3874 memaddr = offset;
3875 } else if (offset < edc1_end) {
3876 memtype = MEM_EDC1;
3877 memaddr = offset - edc0_end;
3878 } else {
3879 if (offset < mc0_end) {
3880 memtype = MEM_MC0;
3881 memaddr = offset - edc1_end;
3882 } else if (is_t4(adap->params.chip)) {
3883 /* T4 only has a single memory channel */
3884 goto err;
3885 } else {
3886 mc1_size = EXT_MEM_SIZE_GET(
3887 t4_read_reg(adap,
3888 MA_EXT_MEMORY1_BAR)) << 20;
3889 mc1_end = mc0_end + mc1_size;
3890 if (offset < mc1_end) {
3891 memtype = MEM_MC1;
3892 memaddr = offset - mc0_end;
3893 } else {
3894 /* offset beyond the end of any memory */
3895 goto err;
3896 }
3897 }
3898 }
3899
3900 spin_lock(&adap->win0_lock);
3901 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3902 spin_unlock(&adap->win0_lock);
3903 return ret;
3904
3905 err:
3906 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3907 stag, offset);
3908 return -EINVAL;
3909 }
3910 EXPORT_SYMBOL(cxgb4_read_tpte);
3911
3912 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3913 {
3914 u32 hi, lo;
3915 struct adapter *adap;
3916
3917 adap = netdev2adap(dev);
3918 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3919 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3920
3921 return ((u64)hi << 32) | (u64)lo;
3922 }
3923 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3924
3925 static struct pci_driver cxgb4_driver;
3926
3927 static void check_neigh_update(struct neighbour *neigh)
3928 {
3929 const struct device *parent;
3930 const struct net_device *netdev = neigh->dev;
3931
3932 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3933 netdev = vlan_dev_real_dev(netdev);
3934 parent = netdev->dev.parent;
3935 if (parent && parent->driver == &cxgb4_driver.driver)
3936 t4_l2t_update(dev_get_drvdata(parent), neigh);
3937 }
3938
3939 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3940 void *data)
3941 {
3942 switch (event) {
3943 case NETEVENT_NEIGH_UPDATE:
3944 check_neigh_update(data);
3945 break;
3946 case NETEVENT_REDIRECT:
3947 default:
3948 break;
3949 }
3950 return 0;
3951 }
3952
3953 static bool netevent_registered;
3954 static struct notifier_block cxgb4_netevent_nb = {
3955 .notifier_call = netevent_cb
3956 };
3957
3958 static void drain_db_fifo(struct adapter *adap, int usecs)
3959 {
3960 u32 v1, v2, lp_count, hp_count;
3961
3962 do {
3963 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3964 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3965 if (is_t4(adap->params.chip)) {
3966 lp_count = G_LP_COUNT(v1);
3967 hp_count = G_HP_COUNT(v1);
3968 } else {
3969 lp_count = G_LP_COUNT_T5(v1);
3970 hp_count = G_HP_COUNT_T5(v2);
3971 }
3972
3973 if (lp_count == 0 && hp_count == 0)
3974 break;
3975 set_current_state(TASK_UNINTERRUPTIBLE);
3976 schedule_timeout(usecs_to_jiffies(usecs));
3977 } while (1);
3978 }
3979
3980 static void disable_txq_db(struct sge_txq *q)
3981 {
3982 unsigned long flags;
3983
3984 spin_lock_irqsave(&q->db_lock, flags);
3985 q->db_disabled = 1;
3986 spin_unlock_irqrestore(&q->db_lock, flags);
3987 }
3988
3989 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3990 {
3991 spin_lock_irq(&q->db_lock);
3992 if (q->db_pidx_inc) {
3993 /* Make sure that all writes to the TX descriptors
3994 * are committed before we tell HW about them.
3995 */
3996 wmb();
3997 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3998 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3999 q->db_pidx_inc = 0;
4000 }
4001 q->db_disabled = 0;
4002 spin_unlock_irq(&q->db_lock);
4003 }
4004
4005 static void disable_dbs(struct adapter *adap)
4006 {
4007 int i;
4008
4009 for_each_ethrxq(&adap->sge, i)
4010 disable_txq_db(&adap->sge.ethtxq[i].q);
4011 for_each_ofldrxq(&adap->sge, i)
4012 disable_txq_db(&adap->sge.ofldtxq[i].q);
4013 for_each_port(adap, i)
4014 disable_txq_db(&adap->sge.ctrlq[i].q);
4015 }
4016
4017 static void enable_dbs(struct adapter *adap)
4018 {
4019 int i;
4020
4021 for_each_ethrxq(&adap->sge, i)
4022 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
4023 for_each_ofldrxq(&adap->sge, i)
4024 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
4025 for_each_port(adap, i)
4026 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
4027 }
4028
4029 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
4030 {
4031 if (adap->uld_handle[CXGB4_ULD_RDMA])
4032 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
4033 cmd);
4034 }
4035
4036 static void process_db_full(struct work_struct *work)
4037 {
4038 struct adapter *adap;
4039
4040 adap = container_of(work, struct adapter, db_full_task);
4041
4042 drain_db_fifo(adap, dbfifo_drain_delay);
4043 enable_dbs(adap);
4044 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4045 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4046 DBFIFO_HP_INT | DBFIFO_LP_INT,
4047 DBFIFO_HP_INT | DBFIFO_LP_INT);
4048 }
4049
4050 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4051 {
4052 u16 hw_pidx, hw_cidx;
4053 int ret;
4054
4055 spin_lock_irq(&q->db_lock);
4056 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4057 if (ret)
4058 goto out;
4059 if (q->db_pidx != hw_pidx) {
4060 u16 delta;
4061
4062 if (q->db_pidx >= hw_pidx)
4063 delta = q->db_pidx - hw_pidx;
4064 else
4065 delta = q->size - hw_pidx + q->db_pidx;
4066 wmb();
4067 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4068 QID(q->cntxt_id) | PIDX(delta));
4069 }
4070 out:
4071 q->db_disabled = 0;
4072 q->db_pidx_inc = 0;
4073 spin_unlock_irq(&q->db_lock);
4074 if (ret)
4075 CH_WARN(adap, "DB drop recovery failed.\n");
4076 }
4077 static void recover_all_queues(struct adapter *adap)
4078 {
4079 int i;
4080
4081 for_each_ethrxq(&adap->sge, i)
4082 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4083 for_each_ofldrxq(&adap->sge, i)
4084 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4085 for_each_port(adap, i)
4086 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4087 }
4088
4089 static void process_db_drop(struct work_struct *work)
4090 {
4091 struct adapter *adap;
4092
4093 adap = container_of(work, struct adapter, db_drop_task);
4094
4095 if (is_t4(adap->params.chip)) {
4096 drain_db_fifo(adap, dbfifo_drain_delay);
4097 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4098 drain_db_fifo(adap, dbfifo_drain_delay);
4099 recover_all_queues(adap);
4100 drain_db_fifo(adap, dbfifo_drain_delay);
4101 enable_dbs(adap);
4102 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4103 } else {
4104 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4105 u16 qid = (dropped_db >> 15) & 0x1ffff;
4106 u16 pidx_inc = dropped_db & 0x1fff;
4107 unsigned int s_qpp;
4108 unsigned short udb_density;
4109 unsigned long qpshift;
4110 int page;
4111 u32 udb;
4112
4113 dev_warn(adap->pdev_dev,
4114 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4115 dropped_db, qid,
4116 (dropped_db >> 14) & 1,
4117 (dropped_db >> 13) & 1,
4118 pidx_inc);
4119
4120 drain_db_fifo(adap, 1);
4121
4122 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4123 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4124 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4125 qpshift = PAGE_SHIFT - ilog2(udb_density);
4126 udb = qid << qpshift;
4127 udb &= PAGE_MASK;
4128 page = udb / PAGE_SIZE;
4129 udb += (qid - (page * udb_density)) * 128;
4130
4131 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4132
4133 /* Re-enable BAR2 WC */
4134 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4135 }
4136
4137 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4138 }
4139
4140 void t4_db_full(struct adapter *adap)
4141 {
4142 if (is_t4(adap->params.chip)) {
4143 disable_dbs(adap);
4144 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4145 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4146 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4147 queue_work(adap->workq, &adap->db_full_task);
4148 }
4149 }
4150
4151 void t4_db_dropped(struct adapter *adap)
4152 {
4153 if (is_t4(adap->params.chip)) {
4154 disable_dbs(adap);
4155 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4156 }
4157 queue_work(adap->workq, &adap->db_drop_task);
4158 }
4159
4160 static void uld_attach(struct adapter *adap, unsigned int uld)
4161 {
4162 void *handle;
4163 struct cxgb4_lld_info lli;
4164 unsigned short i;
4165
4166 lli.pdev = adap->pdev;
4167 lli.pf = adap->fn;
4168 lli.l2t = adap->l2t;
4169 lli.tids = &adap->tids;
4170 lli.ports = adap->port;
4171 lli.vr = &adap->vres;
4172 lli.mtus = adap->params.mtus;
4173 if (uld == CXGB4_ULD_RDMA) {
4174 lli.rxq_ids = adap->sge.rdma_rxq;
4175 lli.ciq_ids = adap->sge.rdma_ciq;
4176 lli.nrxq = adap->sge.rdmaqs;
4177 lli.nciq = adap->sge.rdmaciqs;
4178 } else if (uld == CXGB4_ULD_ISCSI) {
4179 lli.rxq_ids = adap->sge.ofld_rxq;
4180 lli.nrxq = adap->sge.ofldqsets;
4181 }
4182 lli.ntxq = adap->sge.ofldqsets;
4183 lli.nchan = adap->params.nports;
4184 lli.nports = adap->params.nports;
4185 lli.wr_cred = adap->params.ofldq_wr_cred;
4186 lli.adapter_type = adap->params.chip;
4187 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4188 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4189 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4190 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4191 (adap->fn * 4));
4192 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4193 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4194 (adap->fn * 4));
4195 lli.filt_mode = adap->params.tp.vlan_pri_map;
4196 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4197 for (i = 0; i < NCHAN; i++)
4198 lli.tx_modq[i] = i;
4199 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4200 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4201 lli.fw_vers = adap->params.fw_vers;
4202 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4203 lli.sge_ingpadboundary = adap->sge.fl_align;
4204 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4205 lli.sge_pktshift = adap->sge.pktshift;
4206 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4207 lli.max_ordird_qp = adap->params.max_ordird_qp;
4208 lli.max_ird_adapter = adap->params.max_ird_adapter;
4209 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4210
4211 handle = ulds[uld].add(&lli);
4212 if (IS_ERR(handle)) {
4213 dev_warn(adap->pdev_dev,
4214 "could not attach to the %s driver, error %ld\n",
4215 uld_str[uld], PTR_ERR(handle));
4216 return;
4217 }
4218
4219 adap->uld_handle[uld] = handle;
4220
4221 if (!netevent_registered) {
4222 register_netevent_notifier(&cxgb4_netevent_nb);
4223 netevent_registered = true;
4224 }
4225
4226 if (adap->flags & FULL_INIT_DONE)
4227 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4228 }
4229
4230 static void attach_ulds(struct adapter *adap)
4231 {
4232 unsigned int i;
4233
4234 spin_lock(&adap_rcu_lock);
4235 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4236 spin_unlock(&adap_rcu_lock);
4237
4238 mutex_lock(&uld_mutex);
4239 list_add_tail(&adap->list_node, &adapter_list);
4240 for (i = 0; i < CXGB4_ULD_MAX; i++)
4241 if (ulds[i].add)
4242 uld_attach(adap, i);
4243 mutex_unlock(&uld_mutex);
4244 }
4245
4246 static void detach_ulds(struct adapter *adap)
4247 {
4248 unsigned int i;
4249
4250 mutex_lock(&uld_mutex);
4251 list_del(&adap->list_node);
4252 for (i = 0; i < CXGB4_ULD_MAX; i++)
4253 if (adap->uld_handle[i]) {
4254 ulds[i].state_change(adap->uld_handle[i],
4255 CXGB4_STATE_DETACH);
4256 adap->uld_handle[i] = NULL;
4257 }
4258 if (netevent_registered && list_empty(&adapter_list)) {
4259 unregister_netevent_notifier(&cxgb4_netevent_nb);
4260 netevent_registered = false;
4261 }
4262 mutex_unlock(&uld_mutex);
4263
4264 spin_lock(&adap_rcu_lock);
4265 list_del_rcu(&adap->rcu_node);
4266 spin_unlock(&adap_rcu_lock);
4267 }
4268
4269 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4270 {
4271 unsigned int i;
4272
4273 mutex_lock(&uld_mutex);
4274 for (i = 0; i < CXGB4_ULD_MAX; i++)
4275 if (adap->uld_handle[i])
4276 ulds[i].state_change(adap->uld_handle[i], new_state);
4277 mutex_unlock(&uld_mutex);
4278 }
4279
4280 /**
4281 * cxgb4_register_uld - register an upper-layer driver
4282 * @type: the ULD type
4283 * @p: the ULD methods
4284 *
4285 * Registers an upper-layer driver with this driver and notifies the ULD
4286 * about any presently available devices that support its type. Returns
4287 * %-EBUSY if a ULD of the same type is already registered.
4288 */
4289 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4290 {
4291 int ret = 0;
4292 struct adapter *adap;
4293
4294 if (type >= CXGB4_ULD_MAX)
4295 return -EINVAL;
4296 mutex_lock(&uld_mutex);
4297 if (ulds[type].add) {
4298 ret = -EBUSY;
4299 goto out;
4300 }
4301 ulds[type] = *p;
4302 list_for_each_entry(adap, &adapter_list, list_node)
4303 uld_attach(adap, type);
4304 out: mutex_unlock(&uld_mutex);
4305 return ret;
4306 }
4307 EXPORT_SYMBOL(cxgb4_register_uld);
4308
4309 /**
4310 * cxgb4_unregister_uld - unregister an upper-layer driver
4311 * @type: the ULD type
4312 *
4313 * Unregisters an existing upper-layer driver.
4314 */
4315 int cxgb4_unregister_uld(enum cxgb4_uld type)
4316 {
4317 struct adapter *adap;
4318
4319 if (type >= CXGB4_ULD_MAX)
4320 return -EINVAL;
4321 mutex_lock(&uld_mutex);
4322 list_for_each_entry(adap, &adapter_list, list_node)
4323 adap->uld_handle[type] = NULL;
4324 ulds[type].add = NULL;
4325 mutex_unlock(&uld_mutex);
4326 return 0;
4327 }
4328 EXPORT_SYMBOL(cxgb4_unregister_uld);
4329
4330 /* Check if netdev on which event is occured belongs to us or not. Return
4331 * success (true) if it belongs otherwise failure (false).
4332 * Called with rcu_read_lock() held.
4333 */
4334 static bool cxgb4_netdev(const struct net_device *netdev)
4335 {
4336 struct adapter *adap;
4337 int i;
4338
4339 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4340 for (i = 0; i < MAX_NPORTS; i++)
4341 if (adap->port[i] == netdev)
4342 return true;
4343 return false;
4344 }
4345
4346 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4347 unsigned long event)
4348 {
4349 int ret = NOTIFY_DONE;
4350
4351 rcu_read_lock();
4352 if (cxgb4_netdev(event_dev)) {
4353 switch (event) {
4354 case NETDEV_UP:
4355 ret = cxgb4_clip_get(event_dev,
4356 (const struct in6_addr *)ifa->addr.s6_addr);
4357 if (ret < 0) {
4358 rcu_read_unlock();
4359 return ret;
4360 }
4361 ret = NOTIFY_OK;
4362 break;
4363 case NETDEV_DOWN:
4364 cxgb4_clip_release(event_dev,
4365 (const struct in6_addr *)ifa->addr.s6_addr);
4366 ret = NOTIFY_OK;
4367 break;
4368 default:
4369 break;
4370 }
4371 }
4372 rcu_read_unlock();
4373 return ret;
4374 }
4375
4376 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4377 unsigned long event, void *data)
4378 {
4379 struct inet6_ifaddr *ifa = data;
4380 struct net_device *event_dev;
4381 int ret = NOTIFY_DONE;
4382 struct bonding *bond = netdev_priv(ifa->idev->dev);
4383 struct list_head *iter;
4384 struct slave *slave;
4385 struct pci_dev *first_pdev = NULL;
4386
4387 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4388 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4389 ret = clip_add(event_dev, ifa, event);
4390 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4391 /* It is possible that two different adapters are bonded in one
4392 * bond. We need to find such different adapters and add clip
4393 * in all of them only once.
4394 */
4395 bond_for_each_slave(bond, slave, iter) {
4396 if (!first_pdev) {
4397 ret = clip_add(slave->dev, ifa, event);
4398 /* If clip_add is success then only initialize
4399 * first_pdev since it means it is our device
4400 */
4401 if (ret == NOTIFY_OK)
4402 first_pdev = to_pci_dev(
4403 slave->dev->dev.parent);
4404 } else if (first_pdev !=
4405 to_pci_dev(slave->dev->dev.parent))
4406 ret = clip_add(slave->dev, ifa, event);
4407 }
4408 } else
4409 ret = clip_add(ifa->idev->dev, ifa, event);
4410
4411 return ret;
4412 }
4413
4414 static struct notifier_block cxgb4_inet6addr_notifier = {
4415 .notifier_call = cxgb4_inet6addr_handler
4416 };
4417
4418 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4419 * a physical device.
4420 * The physical device reference is needed to send the actul CLIP command.
4421 */
4422 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4423 {
4424 struct inet6_dev *idev = NULL;
4425 struct inet6_ifaddr *ifa;
4426 int ret = 0;
4427
4428 idev = __in6_dev_get(root_dev);
4429 if (!idev)
4430 return ret;
4431
4432 read_lock_bh(&idev->lock);
4433 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4434 ret = cxgb4_clip_get(dev,
4435 (const struct in6_addr *)ifa->addr.s6_addr);
4436 if (ret < 0)
4437 break;
4438 }
4439 read_unlock_bh(&idev->lock);
4440
4441 return ret;
4442 }
4443
4444 static int update_root_dev_clip(struct net_device *dev)
4445 {
4446 struct net_device *root_dev = NULL;
4447 int i, ret = 0;
4448
4449 /* First populate the real net device's IPv6 addresses */
4450 ret = update_dev_clip(dev, dev);
4451 if (ret)
4452 return ret;
4453
4454 /* Parse all bond and vlan devices layered on top of the physical dev */
4455 for (i = 0; i < VLAN_N_VID; i++) {
4456 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4457 if (!root_dev)
4458 continue;
4459
4460 ret = update_dev_clip(root_dev, dev);
4461 if (ret)
4462 break;
4463 }
4464 return ret;
4465 }
4466
4467 static void update_clip(const struct adapter *adap)
4468 {
4469 int i;
4470 struct net_device *dev;
4471 int ret;
4472
4473 rcu_read_lock();
4474
4475 for (i = 0; i < MAX_NPORTS; i++) {
4476 dev = adap->port[i];
4477 ret = 0;
4478
4479 if (dev)
4480 ret = update_root_dev_clip(dev);
4481
4482 if (ret < 0)
4483 break;
4484 }
4485 rcu_read_unlock();
4486 }
4487
4488 /**
4489 * cxgb_up - enable the adapter
4490 * @adap: adapter being enabled
4491 *
4492 * Called when the first port is enabled, this function performs the
4493 * actions necessary to make an adapter operational, such as completing
4494 * the initialization of HW modules, and enabling interrupts.
4495 *
4496 * Must be called with the rtnl lock held.
4497 */
4498 static int cxgb_up(struct adapter *adap)
4499 {
4500 int err;
4501
4502 err = setup_sge_queues(adap);
4503 if (err)
4504 goto out;
4505 err = setup_rss(adap);
4506 if (err)
4507 goto freeq;
4508
4509 if (adap->flags & USING_MSIX) {
4510 name_msix_vecs(adap);
4511 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4512 adap->msix_info[0].desc, adap);
4513 if (err)
4514 goto irq_err;
4515
4516 err = request_msix_queue_irqs(adap);
4517 if (err) {
4518 free_irq(adap->msix_info[0].vec, adap);
4519 goto irq_err;
4520 }
4521 } else {
4522 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4523 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4524 adap->port[0]->name, adap);
4525 if (err)
4526 goto irq_err;
4527 }
4528 enable_rx(adap);
4529 t4_sge_start(adap);
4530 t4_intr_enable(adap);
4531 adap->flags |= FULL_INIT_DONE;
4532 notify_ulds(adap, CXGB4_STATE_UP);
4533 update_clip(adap);
4534 out:
4535 return err;
4536 irq_err:
4537 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4538 freeq:
4539 t4_free_sge_resources(adap);
4540 goto out;
4541 }
4542
4543 static void cxgb_down(struct adapter *adapter)
4544 {
4545 t4_intr_disable(adapter);
4546 cancel_work_sync(&adapter->tid_release_task);
4547 cancel_work_sync(&adapter->db_full_task);
4548 cancel_work_sync(&adapter->db_drop_task);
4549 adapter->tid_release_task_busy = false;
4550 adapter->tid_release_head = NULL;
4551
4552 if (adapter->flags & USING_MSIX) {
4553 free_msix_queue_irqs(adapter);
4554 free_irq(adapter->msix_info[0].vec, adapter);
4555 } else
4556 free_irq(adapter->pdev->irq, adapter);
4557 quiesce_rx(adapter);
4558 t4_sge_stop(adapter);
4559 t4_free_sge_resources(adapter);
4560 adapter->flags &= ~FULL_INIT_DONE;
4561 }
4562
4563 /*
4564 * net_device operations
4565 */
4566 static int cxgb_open(struct net_device *dev)
4567 {
4568 int err;
4569 struct port_info *pi = netdev_priv(dev);
4570 struct adapter *adapter = pi->adapter;
4571
4572 netif_carrier_off(dev);
4573
4574 if (!(adapter->flags & FULL_INIT_DONE)) {
4575 err = cxgb_up(adapter);
4576 if (err < 0)
4577 return err;
4578 }
4579
4580 err = link_start(dev);
4581 if (!err)
4582 netif_tx_start_all_queues(dev);
4583 return err;
4584 }
4585
4586 static int cxgb_close(struct net_device *dev)
4587 {
4588 struct port_info *pi = netdev_priv(dev);
4589 struct adapter *adapter = pi->adapter;
4590
4591 netif_tx_stop_all_queues(dev);
4592 netif_carrier_off(dev);
4593 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4594 }
4595
4596 /* Return an error number if the indicated filter isn't writable ...
4597 */
4598 static int writable_filter(struct filter_entry *f)
4599 {
4600 if (f->locked)
4601 return -EPERM;
4602 if (f->pending)
4603 return -EBUSY;
4604
4605 return 0;
4606 }
4607
4608 /* Delete the filter at the specified index (if valid). The checks for all
4609 * the common problems with doing this like the filter being locked, currently
4610 * pending in another operation, etc.
4611 */
4612 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4613 {
4614 struct filter_entry *f;
4615 int ret;
4616
4617 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4618 return -EINVAL;
4619
4620 f = &adapter->tids.ftid_tab[fidx];
4621 ret = writable_filter(f);
4622 if (ret)
4623 return ret;
4624 if (f->valid)
4625 return del_filter_wr(adapter, fidx);
4626
4627 return 0;
4628 }
4629
4630 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4631 __be32 sip, __be16 sport, __be16 vlan,
4632 unsigned int queue, unsigned char port, unsigned char mask)
4633 {
4634 int ret;
4635 struct filter_entry *f;
4636 struct adapter *adap;
4637 int i;
4638 u8 *val;
4639
4640 adap = netdev2adap(dev);
4641
4642 /* Adjust stid to correct filter index */
4643 stid -= adap->tids.sftid_base;
4644 stid += adap->tids.nftids;
4645
4646 /* Check to make sure the filter requested is writable ...
4647 */
4648 f = &adap->tids.ftid_tab[stid];
4649 ret = writable_filter(f);
4650 if (ret)
4651 return ret;
4652
4653 /* Clear out any old resources being used by the filter before
4654 * we start constructing the new filter.
4655 */
4656 if (f->valid)
4657 clear_filter(adap, f);
4658
4659 /* Clear out filter specifications */
4660 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4661 f->fs.val.lport = cpu_to_be16(sport);
4662 f->fs.mask.lport = ~0;
4663 val = (u8 *)&sip;
4664 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4665 for (i = 0; i < 4; i++) {
4666 f->fs.val.lip[i] = val[i];
4667 f->fs.mask.lip[i] = ~0;
4668 }
4669 if (adap->params.tp.vlan_pri_map & F_PORT) {
4670 f->fs.val.iport = port;
4671 f->fs.mask.iport = mask;
4672 }
4673 }
4674
4675 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4676 f->fs.val.proto = IPPROTO_TCP;
4677 f->fs.mask.proto = ~0;
4678 }
4679
4680 f->fs.dirsteer = 1;
4681 f->fs.iq = queue;
4682 /* Mark filter as locked */
4683 f->locked = 1;
4684 f->fs.rpttid = 1;
4685
4686 ret = set_filter_wr(adap, stid);
4687 if (ret) {
4688 clear_filter(adap, f);
4689 return ret;
4690 }
4691
4692 return 0;
4693 }
4694 EXPORT_SYMBOL(cxgb4_create_server_filter);
4695
4696 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4697 unsigned int queue, bool ipv6)
4698 {
4699 int ret;
4700 struct filter_entry *f;
4701 struct adapter *adap;
4702
4703 adap = netdev2adap(dev);
4704
4705 /* Adjust stid to correct filter index */
4706 stid -= adap->tids.sftid_base;
4707 stid += adap->tids.nftids;
4708
4709 f = &adap->tids.ftid_tab[stid];
4710 /* Unlock the filter */
4711 f->locked = 0;
4712
4713 ret = delete_filter(adap, stid);
4714 if (ret)
4715 return ret;
4716
4717 return 0;
4718 }
4719 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4720
4721 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4722 struct rtnl_link_stats64 *ns)
4723 {
4724 struct port_stats stats;
4725 struct port_info *p = netdev_priv(dev);
4726 struct adapter *adapter = p->adapter;
4727
4728 /* Block retrieving statistics during EEH error
4729 * recovery. Otherwise, the recovery might fail
4730 * and the PCI device will be removed permanently
4731 */
4732 spin_lock(&adapter->stats_lock);
4733 if (!netif_device_present(dev)) {
4734 spin_unlock(&adapter->stats_lock);
4735 return ns;
4736 }
4737 t4_get_port_stats(adapter, p->tx_chan, &stats);
4738 spin_unlock(&adapter->stats_lock);
4739
4740 ns->tx_bytes = stats.tx_octets;
4741 ns->tx_packets = stats.tx_frames;
4742 ns->rx_bytes = stats.rx_octets;
4743 ns->rx_packets = stats.rx_frames;
4744 ns->multicast = stats.rx_mcast_frames;
4745
4746 /* detailed rx_errors */
4747 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4748 stats.rx_runt;
4749 ns->rx_over_errors = 0;
4750 ns->rx_crc_errors = stats.rx_fcs_err;
4751 ns->rx_frame_errors = stats.rx_symbol_err;
4752 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4753 stats.rx_ovflow2 + stats.rx_ovflow3 +
4754 stats.rx_trunc0 + stats.rx_trunc1 +
4755 stats.rx_trunc2 + stats.rx_trunc3;
4756 ns->rx_missed_errors = 0;
4757
4758 /* detailed tx_errors */
4759 ns->tx_aborted_errors = 0;
4760 ns->tx_carrier_errors = 0;
4761 ns->tx_fifo_errors = 0;
4762 ns->tx_heartbeat_errors = 0;
4763 ns->tx_window_errors = 0;
4764
4765 ns->tx_errors = stats.tx_error_frames;
4766 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4767 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4768 return ns;
4769 }
4770
4771 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4772 {
4773 unsigned int mbox;
4774 int ret = 0, prtad, devad;
4775 struct port_info *pi = netdev_priv(dev);
4776 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4777
4778 switch (cmd) {
4779 case SIOCGMIIPHY:
4780 if (pi->mdio_addr < 0)
4781 return -EOPNOTSUPP;
4782 data->phy_id = pi->mdio_addr;
4783 break;
4784 case SIOCGMIIREG:
4785 case SIOCSMIIREG:
4786 if (mdio_phy_id_is_c45(data->phy_id)) {
4787 prtad = mdio_phy_id_prtad(data->phy_id);
4788 devad = mdio_phy_id_devad(data->phy_id);
4789 } else if (data->phy_id < 32) {
4790 prtad = data->phy_id;
4791 devad = 0;
4792 data->reg_num &= 0x1f;
4793 } else
4794 return -EINVAL;
4795
4796 mbox = pi->adapter->fn;
4797 if (cmd == SIOCGMIIREG)
4798 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4799 data->reg_num, &data->val_out);
4800 else
4801 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4802 data->reg_num, data->val_in);
4803 break;
4804 default:
4805 return -EOPNOTSUPP;
4806 }
4807 return ret;
4808 }
4809
4810 static void cxgb_set_rxmode(struct net_device *dev)
4811 {
4812 /* unfortunately we can't return errors to the stack */
4813 set_rxmode(dev, -1, false);
4814 }
4815
4816 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4817 {
4818 int ret;
4819 struct port_info *pi = netdev_priv(dev);
4820
4821 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4822 return -EINVAL;
4823 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4824 -1, -1, -1, true);
4825 if (!ret)
4826 dev->mtu = new_mtu;
4827 return ret;
4828 }
4829
4830 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4831 {
4832 int ret;
4833 struct sockaddr *addr = p;
4834 struct port_info *pi = netdev_priv(dev);
4835
4836 if (!is_valid_ether_addr(addr->sa_data))
4837 return -EADDRNOTAVAIL;
4838
4839 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4840 pi->xact_addr_filt, addr->sa_data, true, true);
4841 if (ret < 0)
4842 return ret;
4843
4844 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4845 pi->xact_addr_filt = ret;
4846 return 0;
4847 }
4848
4849 #ifdef CONFIG_NET_POLL_CONTROLLER
4850 static void cxgb_netpoll(struct net_device *dev)
4851 {
4852 struct port_info *pi = netdev_priv(dev);
4853 struct adapter *adap = pi->adapter;
4854
4855 if (adap->flags & USING_MSIX) {
4856 int i;
4857 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4858
4859 for (i = pi->nqsets; i; i--, rx++)
4860 t4_sge_intr_msix(0, &rx->rspq);
4861 } else
4862 t4_intr_handler(adap)(0, adap);
4863 }
4864 #endif
4865
4866 static const struct net_device_ops cxgb4_netdev_ops = {
4867 .ndo_open = cxgb_open,
4868 .ndo_stop = cxgb_close,
4869 .ndo_start_xmit = t4_eth_xmit,
4870 .ndo_select_queue = cxgb_select_queue,
4871 .ndo_get_stats64 = cxgb_get_stats,
4872 .ndo_set_rx_mode = cxgb_set_rxmode,
4873 .ndo_set_mac_address = cxgb_set_mac_addr,
4874 .ndo_set_features = cxgb_set_features,
4875 .ndo_validate_addr = eth_validate_addr,
4876 .ndo_do_ioctl = cxgb_ioctl,
4877 .ndo_change_mtu = cxgb_change_mtu,
4878 #ifdef CONFIG_NET_POLL_CONTROLLER
4879 .ndo_poll_controller = cxgb_netpoll,
4880 #endif
4881 };
4882
4883 void t4_fatal_err(struct adapter *adap)
4884 {
4885 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4886 t4_intr_disable(adap);
4887 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4888 }
4889
4890 /* Return the specified PCI-E Configuration Space register from our Physical
4891 * Function. We try first via a Firmware LDST Command since we prefer to let
4892 * the firmware own all of these registers, but if that fails we go for it
4893 * directly ourselves.
4894 */
4895 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4896 {
4897 struct fw_ldst_cmd ldst_cmd;
4898 u32 val;
4899 int ret;
4900
4901 /* Construct and send the Firmware LDST Command to retrieve the
4902 * specified PCI-E Configuration Space register.
4903 */
4904 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4905 ldst_cmd.op_to_addrspace =
4906 htonl(FW_CMD_OP(FW_LDST_CMD) |
4907 FW_CMD_REQUEST |
4908 FW_CMD_READ |
4909 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4910 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4911 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4912 ldst_cmd.u.pcie.ctrl_to_fn =
4913 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4914 ldst_cmd.u.pcie.r = reg;
4915 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4916 &ldst_cmd);
4917
4918 /* If the LDST Command suucceeded, exctract the returned register
4919 * value. Otherwise read it directly ourself.
4920 */
4921 if (ret == 0)
4922 val = ntohl(ldst_cmd.u.pcie.data[0]);
4923 else
4924 t4_hw_pci_read_cfg4(adap, reg, &val);
4925
4926 return val;
4927 }
4928
4929 static void setup_memwin(struct adapter *adap)
4930 {
4931 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4932
4933 if (is_t4(adap->params.chip)) {
4934 u32 bar0;
4935
4936 /* Truncation intentional: we only read the bottom 32-bits of
4937 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4938 * mechanism to read BAR0 instead of using
4939 * pci_resource_start() because we could be operating from
4940 * within a Virtual Machine which is trapping our accesses to
4941 * our Configuration Space and we need to set up the PCI-E
4942 * Memory Window decoders with the actual addresses which will
4943 * be coming across the PCI-E link.
4944 */
4945 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4946 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4947 adap->t4_bar0 = bar0;
4948
4949 mem_win0_base = bar0 + MEMWIN0_BASE;
4950 mem_win1_base = bar0 + MEMWIN1_BASE;
4951 mem_win2_base = bar0 + MEMWIN2_BASE;
4952 mem_win2_aperture = MEMWIN2_APERTURE;
4953 } else {
4954 /* For T5, only relative offset inside the PCIe BAR is passed */
4955 mem_win0_base = MEMWIN0_BASE;
4956 mem_win1_base = MEMWIN1_BASE;
4957 mem_win2_base = MEMWIN2_BASE_T5;
4958 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4959 }
4960 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4961 mem_win0_base | BIR(0) |
4962 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4963 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4964 mem_win1_base | BIR(0) |
4965 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4966 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4967 mem_win2_base | BIR(0) |
4968 WINDOW(ilog2(mem_win2_aperture) - 10));
4969 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
4970 }
4971
4972 static void setup_memwin_rdma(struct adapter *adap)
4973 {
4974 if (adap->vres.ocq.size) {
4975 u32 start;
4976 unsigned int sz_kb;
4977
4978 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4979 start &= PCI_BASE_ADDRESS_MEM_MASK;
4980 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4981 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4982 t4_write_reg(adap,
4983 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4984 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4985 t4_write_reg(adap,
4986 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4987 adap->vres.ocq.start);
4988 t4_read_reg(adap,
4989 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4990 }
4991 }
4992
4993 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4994 {
4995 u32 v;
4996 int ret;
4997
4998 /* get device capabilities */
4999 memset(c, 0, sizeof(*c));
5000 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5001 FW_CMD_REQUEST | FW_CMD_READ);
5002 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
5003 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
5004 if (ret < 0)
5005 return ret;
5006
5007 /* select capabilities we'll be using */
5008 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5009 if (!vf_acls)
5010 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5011 else
5012 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5013 } else if (vf_acls) {
5014 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
5015 return ret;
5016 }
5017 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5018 FW_CMD_REQUEST | FW_CMD_WRITE);
5019 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
5020 if (ret < 0)
5021 return ret;
5022
5023 ret = t4_config_glbl_rss(adap, adap->fn,
5024 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5025 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5026 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
5027 if (ret < 0)
5028 return ret;
5029
5030 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
5031 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
5032 if (ret < 0)
5033 return ret;
5034
5035 t4_sge_init(adap);
5036
5037 /* tweak some settings */
5038 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5039 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5040 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5041 v = t4_read_reg(adap, TP_PIO_DATA);
5042 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
5043
5044 /* first 4 Tx modulation queues point to consecutive Tx channels */
5045 adap->params.tp.tx_modq_map = 0xE4;
5046 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5047 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5048
5049 /* associate each Tx modulation queue with consecutive Tx channels */
5050 v = 0x84218421;
5051 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5052 &v, 1, A_TP_TX_SCHED_HDR);
5053 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5054 &v, 1, A_TP_TX_SCHED_FIFO);
5055 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5056 &v, 1, A_TP_TX_SCHED_PCMD);
5057
5058 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5059 if (is_offload(adap)) {
5060 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5061 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5062 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5063 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5064 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5065 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5066 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5067 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5068 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5069 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5070 }
5071
5072 /* get basic stuff going */
5073 return t4_early_init(adap, adap->fn);
5074 }
5075
5076 /*
5077 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5078 */
5079 #define MAX_ATIDS 8192U
5080
5081 /*
5082 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5083 *
5084 * If the firmware we're dealing with has Configuration File support, then
5085 * we use that to perform all configuration
5086 */
5087
5088 /*
5089 * Tweak configuration based on module parameters, etc. Most of these have
5090 * defaults assigned to them by Firmware Configuration Files (if we're using
5091 * them) but need to be explicitly set if we're using hard-coded
5092 * initialization. But even in the case of using Firmware Configuration
5093 * Files, we'd like to expose the ability to change these via module
5094 * parameters so these are essentially common tweaks/settings for
5095 * Configuration Files and hard-coded initialization ...
5096 */
5097 static int adap_init0_tweaks(struct adapter *adapter)
5098 {
5099 /*
5100 * Fix up various Host-Dependent Parameters like Page Size, Cache
5101 * Line Size, etc. The firmware default is for a 4KB Page Size and
5102 * 64B Cache Line Size ...
5103 */
5104 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5105
5106 /*
5107 * Process module parameters which affect early initialization.
5108 */
5109 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5110 dev_err(&adapter->pdev->dev,
5111 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5112 rx_dma_offset);
5113 rx_dma_offset = 2;
5114 }
5115 t4_set_reg_field(adapter, SGE_CONTROL,
5116 PKTSHIFT_MASK,
5117 PKTSHIFT(rx_dma_offset));
5118
5119 /*
5120 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5121 * adds the pseudo header itself.
5122 */
5123 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5124 CSUM_HAS_PSEUDO_HDR, 0);
5125
5126 return 0;
5127 }
5128
5129 /*
5130 * Attempt to initialize the adapter via a Firmware Configuration File.
5131 */
5132 static int adap_init0_config(struct adapter *adapter, int reset)
5133 {
5134 struct fw_caps_config_cmd caps_cmd;
5135 const struct firmware *cf;
5136 unsigned long mtype = 0, maddr = 0;
5137 u32 finiver, finicsum, cfcsum;
5138 int ret;
5139 int config_issued = 0;
5140 char *fw_config_file, fw_config_file_path[256];
5141 char *config_name = NULL;
5142
5143 /*
5144 * Reset device if necessary.
5145 */
5146 if (reset) {
5147 ret = t4_fw_reset(adapter, adapter->mbox,
5148 PIORSTMODE | PIORST);
5149 if (ret < 0)
5150 goto bye;
5151 }
5152
5153 /*
5154 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5155 * then use that. Otherwise, use the configuration file stored
5156 * in the adapter flash ...
5157 */
5158 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5159 case CHELSIO_T4:
5160 fw_config_file = FW4_CFNAME;
5161 break;
5162 case CHELSIO_T5:
5163 fw_config_file = FW5_CFNAME;
5164 break;
5165 default:
5166 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5167 adapter->pdev->device);
5168 ret = -EINVAL;
5169 goto bye;
5170 }
5171
5172 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5173 if (ret < 0) {
5174 config_name = "On FLASH";
5175 mtype = FW_MEMTYPE_CF_FLASH;
5176 maddr = t4_flash_cfg_addr(adapter);
5177 } else {
5178 u32 params[7], val[7];
5179
5180 sprintf(fw_config_file_path,
5181 "/lib/firmware/%s", fw_config_file);
5182 config_name = fw_config_file_path;
5183
5184 if (cf->size >= FLASH_CFG_MAX_SIZE)
5185 ret = -ENOMEM;
5186 else {
5187 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5188 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5189 ret = t4_query_params(adapter, adapter->mbox,
5190 adapter->fn, 0, 1, params, val);
5191 if (ret == 0) {
5192 /*
5193 * For t4_memory_rw() below addresses and
5194 * sizes have to be in terms of multiples of 4
5195 * bytes. So, if the Configuration File isn't
5196 * a multiple of 4 bytes in length we'll have
5197 * to write that out separately since we can't
5198 * guarantee that the bytes following the
5199 * residual byte in the buffer returned by
5200 * request_firmware() are zeroed out ...
5201 */
5202 size_t resid = cf->size & 0x3;
5203 size_t size = cf->size & ~0x3;
5204 __be32 *data = (__be32 *)cf->data;
5205
5206 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5207 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5208
5209 spin_lock(&adapter->win0_lock);
5210 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5211 size, data, T4_MEMORY_WRITE);
5212 if (ret == 0 && resid != 0) {
5213 union {
5214 __be32 word;
5215 char buf[4];
5216 } last;
5217 int i;
5218
5219 last.word = data[size >> 2];
5220 for (i = resid; i < 4; i++)
5221 last.buf[i] = 0;
5222 ret = t4_memory_rw(adapter, 0, mtype,
5223 maddr + size,
5224 4, &last.word,
5225 T4_MEMORY_WRITE);
5226 }
5227 spin_unlock(&adapter->win0_lock);
5228 }
5229 }
5230
5231 release_firmware(cf);
5232 if (ret)
5233 goto bye;
5234 }
5235
5236 /*
5237 * Issue a Capability Configuration command to the firmware to get it
5238 * to parse the Configuration File. We don't use t4_fw_config_file()
5239 * because we want the ability to modify various features after we've
5240 * processed the configuration file ...
5241 */
5242 memset(&caps_cmd, 0, sizeof(caps_cmd));
5243 caps_cmd.op_to_write =
5244 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5245 FW_CMD_REQUEST |
5246 FW_CMD_READ);
5247 caps_cmd.cfvalid_to_len16 =
5248 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5249 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5250 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5251 FW_LEN16(caps_cmd));
5252 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5253 &caps_cmd);
5254
5255 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5256 * Configuration File in FLASH), our last gasp effort is to use the
5257 * Firmware Configuration File which is embedded in the firmware. A
5258 * very few early versions of the firmware didn't have one embedded
5259 * but we can ignore those.
5260 */
5261 if (ret == -ENOENT) {
5262 memset(&caps_cmd, 0, sizeof(caps_cmd));
5263 caps_cmd.op_to_write =
5264 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5265 FW_CMD_REQUEST |
5266 FW_CMD_READ);
5267 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5268 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5269 sizeof(caps_cmd), &caps_cmd);
5270 config_name = "Firmware Default";
5271 }
5272
5273 config_issued = 1;
5274 if (ret < 0)
5275 goto bye;
5276
5277 finiver = ntohl(caps_cmd.finiver);
5278 finicsum = ntohl(caps_cmd.finicsum);
5279 cfcsum = ntohl(caps_cmd.cfcsum);
5280 if (finicsum != cfcsum)
5281 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5282 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5283 finicsum, cfcsum);
5284
5285 /*
5286 * And now tell the firmware to use the configuration we just loaded.
5287 */
5288 caps_cmd.op_to_write =
5289 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5290 FW_CMD_REQUEST |
5291 FW_CMD_WRITE);
5292 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5293 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5294 NULL);
5295 if (ret < 0)
5296 goto bye;
5297
5298 /*
5299 * Tweak configuration based on system architecture, module
5300 * parameters, etc.
5301 */
5302 ret = adap_init0_tweaks(adapter);
5303 if (ret < 0)
5304 goto bye;
5305
5306 /*
5307 * And finally tell the firmware to initialize itself using the
5308 * parameters from the Configuration File.
5309 */
5310 ret = t4_fw_initialize(adapter, adapter->mbox);
5311 if (ret < 0)
5312 goto bye;
5313
5314 /*
5315 * Return successfully and note that we're operating with parameters
5316 * not supplied by the driver, rather than from hard-wired
5317 * initialization constants burried in the driver.
5318 */
5319 adapter->flags |= USING_SOFT_PARAMS;
5320 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5321 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5322 config_name, finiver, cfcsum);
5323 return 0;
5324
5325 /*
5326 * Something bad happened. Return the error ... (If the "error"
5327 * is that there's no Configuration File on the adapter we don't
5328 * want to issue a warning since this is fairly common.)
5329 */
5330 bye:
5331 if (config_issued && ret != -ENOENT)
5332 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5333 config_name, -ret);
5334 return ret;
5335 }
5336
5337 /*
5338 * Attempt to initialize the adapter via hard-coded, driver supplied
5339 * parameters ...
5340 */
5341 static int adap_init0_no_config(struct adapter *adapter, int reset)
5342 {
5343 struct sge *s = &adapter->sge;
5344 struct fw_caps_config_cmd caps_cmd;
5345 u32 v;
5346 int i, ret;
5347
5348 /*
5349 * Reset device if necessary
5350 */
5351 if (reset) {
5352 ret = t4_fw_reset(adapter, adapter->mbox,
5353 PIORSTMODE | PIORST);
5354 if (ret < 0)
5355 goto bye;
5356 }
5357
5358 /*
5359 * Get device capabilities and select which we'll be using.
5360 */
5361 memset(&caps_cmd, 0, sizeof(caps_cmd));
5362 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5363 FW_CMD_REQUEST | FW_CMD_READ);
5364 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5365 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5366 &caps_cmd);
5367 if (ret < 0)
5368 goto bye;
5369
5370 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5371 if (!vf_acls)
5372 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5373 else
5374 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5375 } else if (vf_acls) {
5376 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5377 goto bye;
5378 }
5379 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5380 FW_CMD_REQUEST | FW_CMD_WRITE);
5381 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5382 NULL);
5383 if (ret < 0)
5384 goto bye;
5385
5386 /*
5387 * Tweak configuration based on system architecture, module
5388 * parameters, etc.
5389 */
5390 ret = adap_init0_tweaks(adapter);
5391 if (ret < 0)
5392 goto bye;
5393
5394 /*
5395 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5396 * mode which maps each Virtual Interface to its own section of
5397 * the RSS Table and we turn on all map and hash enables ...
5398 */
5399 adapter->flags |= RSS_TNLALLLOOKUP;
5400 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5401 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5402 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5403 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5404 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5405 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5406 if (ret < 0)
5407 goto bye;
5408
5409 /*
5410 * Set up our own fundamental resource provisioning ...
5411 */
5412 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5413 PFRES_NEQ, PFRES_NETHCTRL,
5414 PFRES_NIQFLINT, PFRES_NIQ,
5415 PFRES_TC, PFRES_NVI,
5416 FW_PFVF_CMD_CMASK_MASK,
5417 pfvfres_pmask(adapter, adapter->fn, 0),
5418 PFRES_NEXACTF,
5419 PFRES_R_CAPS, PFRES_WX_CAPS);
5420 if (ret < 0)
5421 goto bye;
5422
5423 /*
5424 * Perform low level SGE initialization. We need to do this before we
5425 * send the firmware the INITIALIZE command because that will cause
5426 * any other PF Drivers which are waiting for the Master
5427 * Initialization to proceed forward.
5428 */
5429 for (i = 0; i < SGE_NTIMERS - 1; i++)
5430 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5431 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5432 s->counter_val[0] = 1;
5433 for (i = 1; i < SGE_NCOUNTERS; i++)
5434 s->counter_val[i] = min(intr_cnt[i - 1],
5435 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5436 t4_sge_init(adapter);
5437
5438 #ifdef CONFIG_PCI_IOV
5439 /*
5440 * Provision resource limits for Virtual Functions. We currently
5441 * grant them all the same static resource limits except for the Port
5442 * Access Rights Mask which we're assigning based on the PF. All of
5443 * the static provisioning stuff for both the PF and VF really needs
5444 * to be managed in a persistent manner for each device which the
5445 * firmware controls.
5446 */
5447 {
5448 int pf, vf;
5449
5450 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5451 if (num_vf[pf] <= 0)
5452 continue;
5453
5454 /* VF numbering starts at 1! */
5455 for (vf = 1; vf <= num_vf[pf]; vf++) {
5456 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5457 pf, vf,
5458 VFRES_NEQ, VFRES_NETHCTRL,
5459 VFRES_NIQFLINT, VFRES_NIQ,
5460 VFRES_TC, VFRES_NVI,
5461 FW_PFVF_CMD_CMASK_MASK,
5462 pfvfres_pmask(
5463 adapter, pf, vf),
5464 VFRES_NEXACTF,
5465 VFRES_R_CAPS, VFRES_WX_CAPS);
5466 if (ret < 0)
5467 dev_warn(adapter->pdev_dev,
5468 "failed to "\
5469 "provision pf/vf=%d/%d; "
5470 "err=%d\n", pf, vf, ret);
5471 }
5472 }
5473 }
5474 #endif
5475
5476 /*
5477 * Set up the default filter mode. Later we'll want to implement this
5478 * via a firmware command, etc. ... This needs to be done before the
5479 * firmare initialization command ... If the selected set of fields
5480 * isn't equal to the default value, we'll need to make sure that the
5481 * field selections will fit in the 36-bit budget.
5482 */
5483 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5484 int j, bits = 0;
5485
5486 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5487 switch (tp_vlan_pri_map & (1 << j)) {
5488 case 0:
5489 /* compressed filter field not enabled */
5490 break;
5491 case FCOE_MASK:
5492 bits += 1;
5493 break;
5494 case PORT_MASK:
5495 bits += 3;
5496 break;
5497 case VNIC_ID_MASK:
5498 bits += 17;
5499 break;
5500 case VLAN_MASK:
5501 bits += 17;
5502 break;
5503 case TOS_MASK:
5504 bits += 8;
5505 break;
5506 case PROTOCOL_MASK:
5507 bits += 8;
5508 break;
5509 case ETHERTYPE_MASK:
5510 bits += 16;
5511 break;
5512 case MACMATCH_MASK:
5513 bits += 9;
5514 break;
5515 case MPSHITTYPE_MASK:
5516 bits += 3;
5517 break;
5518 case FRAGMENTATION_MASK:
5519 bits += 1;
5520 break;
5521 }
5522
5523 if (bits > 36) {
5524 dev_err(adapter->pdev_dev,
5525 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5526 " using %#x\n", tp_vlan_pri_map, bits,
5527 TP_VLAN_PRI_MAP_DEFAULT);
5528 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5529 }
5530 }
5531 v = tp_vlan_pri_map;
5532 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5533 &v, 1, TP_VLAN_PRI_MAP);
5534
5535 /*
5536 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5537 * to support any of the compressed filter fields above. Newer
5538 * versions of the firmware do this automatically but it doesn't hurt
5539 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5540 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5541 * since the firmware automatically turns this on and off when we have
5542 * a non-zero number of filters active (since it does have a
5543 * performance impact).
5544 */
5545 if (tp_vlan_pri_map)
5546 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5547 FIVETUPLELOOKUP_MASK,
5548 FIVETUPLELOOKUP_MASK);
5549
5550 /*
5551 * Tweak some settings.
5552 */
5553 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5554 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5555 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5556 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5557
5558 /*
5559 * Get basic stuff going by issuing the Firmware Initialize command.
5560 * Note that this _must_ be after all PFVF commands ...
5561 */
5562 ret = t4_fw_initialize(adapter, adapter->mbox);
5563 if (ret < 0)
5564 goto bye;
5565
5566 /*
5567 * Return successfully!
5568 */
5569 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5570 "driver parameters\n");
5571 return 0;
5572
5573 /*
5574 * Something bad happened. Return the error ...
5575 */
5576 bye:
5577 return ret;
5578 }
5579
5580 static struct fw_info fw_info_array[] = {
5581 {
5582 .chip = CHELSIO_T4,
5583 .fs_name = FW4_CFNAME,
5584 .fw_mod_name = FW4_FNAME,
5585 .fw_hdr = {
5586 .chip = FW_HDR_CHIP_T4,
5587 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5588 .intfver_nic = FW_INTFVER(T4, NIC),
5589 .intfver_vnic = FW_INTFVER(T4, VNIC),
5590 .intfver_ri = FW_INTFVER(T4, RI),
5591 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5592 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5593 },
5594 }, {
5595 .chip = CHELSIO_T5,
5596 .fs_name = FW5_CFNAME,
5597 .fw_mod_name = FW5_FNAME,
5598 .fw_hdr = {
5599 .chip = FW_HDR_CHIP_T5,
5600 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5601 .intfver_nic = FW_INTFVER(T5, NIC),
5602 .intfver_vnic = FW_INTFVER(T5, VNIC),
5603 .intfver_ri = FW_INTFVER(T5, RI),
5604 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5605 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5606 },
5607 }
5608 };
5609
5610 static struct fw_info *find_fw_info(int chip)
5611 {
5612 int i;
5613
5614 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5615 if (fw_info_array[i].chip == chip)
5616 return &fw_info_array[i];
5617 }
5618 return NULL;
5619 }
5620
5621 /*
5622 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5623 */
5624 static int adap_init0(struct adapter *adap)
5625 {
5626 int ret;
5627 u32 v, port_vec;
5628 enum dev_state state;
5629 u32 params[7], val[7];
5630 struct fw_caps_config_cmd caps_cmd;
5631 int reset = 1;
5632
5633 /*
5634 * Contact FW, advertising Master capability (and potentially forcing
5635 * ourselves as the Master PF if our module parameter force_init is
5636 * set).
5637 */
5638 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5639 force_init ? MASTER_MUST : MASTER_MAY,
5640 &state);
5641 if (ret < 0) {
5642 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5643 ret);
5644 return ret;
5645 }
5646 if (ret == adap->mbox)
5647 adap->flags |= MASTER_PF;
5648 if (force_init && state == DEV_STATE_INIT)
5649 state = DEV_STATE_UNINIT;
5650
5651 /*
5652 * If we're the Master PF Driver and the device is uninitialized,
5653 * then let's consider upgrading the firmware ... (We always want
5654 * to check the firmware version number in order to A. get it for
5655 * later reporting and B. to warn if the currently loaded firmware
5656 * is excessively mismatched relative to the driver.)
5657 */
5658 t4_get_fw_version(adap, &adap->params.fw_vers);
5659 t4_get_tp_version(adap, &adap->params.tp_vers);
5660 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5661 struct fw_info *fw_info;
5662 struct fw_hdr *card_fw;
5663 const struct firmware *fw;
5664 const u8 *fw_data = NULL;
5665 unsigned int fw_size = 0;
5666
5667 /* This is the firmware whose headers the driver was compiled
5668 * against
5669 */
5670 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5671 if (fw_info == NULL) {
5672 dev_err(adap->pdev_dev,
5673 "unable to get firmware info for chip %d.\n",
5674 CHELSIO_CHIP_VERSION(adap->params.chip));
5675 return -EINVAL;
5676 }
5677
5678 /* allocate memory to read the header of the firmware on the
5679 * card
5680 */
5681 card_fw = t4_alloc_mem(sizeof(*card_fw));
5682
5683 /* Get FW from from /lib/firmware/ */
5684 ret = request_firmware(&fw, fw_info->fw_mod_name,
5685 adap->pdev_dev);
5686 if (ret < 0) {
5687 dev_err(adap->pdev_dev,
5688 "unable to load firmware image %s, error %d\n",
5689 fw_info->fw_mod_name, ret);
5690 } else {
5691 fw_data = fw->data;
5692 fw_size = fw->size;
5693 }
5694
5695 /* upgrade FW logic */
5696 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5697 state, &reset);
5698
5699 /* Cleaning up */
5700 if (fw != NULL)
5701 release_firmware(fw);
5702 t4_free_mem(card_fw);
5703
5704 if (ret < 0)
5705 goto bye;
5706 }
5707
5708 /*
5709 * Grab VPD parameters. This should be done after we establish a
5710 * connection to the firmware since some of the VPD parameters
5711 * (notably the Core Clock frequency) are retrieved via requests to
5712 * the firmware. On the other hand, we need these fairly early on
5713 * so we do this right after getting ahold of the firmware.
5714 */
5715 ret = get_vpd_params(adap, &adap->params.vpd);
5716 if (ret < 0)
5717 goto bye;
5718
5719 /*
5720 * Find out what ports are available to us. Note that we need to do
5721 * this before calling adap_init0_no_config() since it needs nports
5722 * and portvec ...
5723 */
5724 v =
5725 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5726 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5727 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5728 if (ret < 0)
5729 goto bye;
5730
5731 adap->params.nports = hweight32(port_vec);
5732 adap->params.portvec = port_vec;
5733
5734 /*
5735 * If the firmware is initialized already (and we're not forcing a
5736 * master initialization), note that we're living with existing
5737 * adapter parameters. Otherwise, it's time to try initializing the
5738 * adapter ...
5739 */
5740 if (state == DEV_STATE_INIT) {
5741 dev_info(adap->pdev_dev, "Coming up as %s: "\
5742 "Adapter already initialized\n",
5743 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5744 adap->flags |= USING_SOFT_PARAMS;
5745 } else {
5746 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5747 "Initializing adapter\n");
5748
5749 /*
5750 * If the firmware doesn't support Configuration
5751 * Files warn user and exit,
5752 */
5753 if (ret < 0)
5754 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5755 "configuration file.\n");
5756 if (force_old_init)
5757 ret = adap_init0_no_config(adap, reset);
5758 else {
5759 /*
5760 * Find out whether we're dealing with a version of
5761 * the firmware which has configuration file support.
5762 */
5763 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5764 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5765 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5766 params, val);
5767
5768 /*
5769 * If the firmware doesn't support Configuration
5770 * Files, use the old Driver-based, hard-wired
5771 * initialization. Otherwise, try using the
5772 * Configuration File support and fall back to the
5773 * Driver-based initialization if there's no
5774 * Configuration File found.
5775 */
5776 if (ret < 0)
5777 ret = adap_init0_no_config(adap, reset);
5778 else {
5779 /*
5780 * The firmware provides us with a memory
5781 * buffer where we can load a Configuration
5782 * File from the host if we want to override
5783 * the Configuration File in flash.
5784 */
5785
5786 ret = adap_init0_config(adap, reset);
5787 if (ret == -ENOENT) {
5788 dev_info(adap->pdev_dev,
5789 "No Configuration File present "
5790 "on adapter. Using hard-wired "
5791 "configuration parameters.\n");
5792 ret = adap_init0_no_config(adap, reset);
5793 }
5794 }
5795 }
5796 if (ret < 0) {
5797 dev_err(adap->pdev_dev,
5798 "could not initialize adapter, error %d\n",
5799 -ret);
5800 goto bye;
5801 }
5802 }
5803
5804 /*
5805 * If we're living with non-hard-coded parameters (either from a
5806 * Firmware Configuration File or values programmed by a different PF
5807 * Driver), give the SGE code a chance to pull in anything that it
5808 * needs ... Note that this must be called after we retrieve our VPD
5809 * parameters in order to know how to convert core ticks to seconds.
5810 */
5811 if (adap->flags & USING_SOFT_PARAMS) {
5812 ret = t4_sge_init(adap);
5813 if (ret < 0)
5814 goto bye;
5815 }
5816
5817 if (is_bypass_device(adap->pdev->device))
5818 adap->params.bypass = 1;
5819
5820 /*
5821 * Grab some of our basic fundamental operating parameters.
5822 */
5823 #define FW_PARAM_DEV(param) \
5824 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5825 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5826
5827 #define FW_PARAM_PFVF(param) \
5828 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5829 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5830 FW_PARAMS_PARAM_Y(0) | \
5831 FW_PARAMS_PARAM_Z(0)
5832
5833 params[0] = FW_PARAM_PFVF(EQ_START);
5834 params[1] = FW_PARAM_PFVF(L2T_START);
5835 params[2] = FW_PARAM_PFVF(L2T_END);
5836 params[3] = FW_PARAM_PFVF(FILTER_START);
5837 params[4] = FW_PARAM_PFVF(FILTER_END);
5838 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5839 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5840 if (ret < 0)
5841 goto bye;
5842 adap->sge.egr_start = val[0];
5843 adap->l2t_start = val[1];
5844 adap->l2t_end = val[2];
5845 adap->tids.ftid_base = val[3];
5846 adap->tids.nftids = val[4] - val[3] + 1;
5847 adap->sge.ingr_start = val[5];
5848
5849 /* query params related to active filter region */
5850 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5851 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5852 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5853 /* If Active filter size is set we enable establishing
5854 * offload connection through firmware work request
5855 */
5856 if ((val[0] != val[1]) && (ret >= 0)) {
5857 adap->flags |= FW_OFLD_CONN;
5858 adap->tids.aftid_base = val[0];
5859 adap->tids.aftid_end = val[1];
5860 }
5861
5862 /* If we're running on newer firmware, let it know that we're
5863 * prepared to deal with encapsulated CPL messages. Older
5864 * firmware won't understand this and we'll just get
5865 * unencapsulated messages ...
5866 */
5867 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5868 val[0] = 1;
5869 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5870
5871 /*
5872 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5873 * capability. Earlier versions of the firmware didn't have the
5874 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5875 * permission to use ULPTX MEMWRITE DSGL.
5876 */
5877 if (is_t4(adap->params.chip)) {
5878 adap->params.ulptx_memwrite_dsgl = false;
5879 } else {
5880 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5881 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5882 1, params, val);
5883 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5884 }
5885
5886 /*
5887 * Get device capabilities so we can determine what resources we need
5888 * to manage.
5889 */
5890 memset(&caps_cmd, 0, sizeof(caps_cmd));
5891 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5892 FW_CMD_REQUEST | FW_CMD_READ);
5893 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5894 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5895 &caps_cmd);
5896 if (ret < 0)
5897 goto bye;
5898
5899 if (caps_cmd.ofldcaps) {
5900 /* query offload-related parameters */
5901 params[0] = FW_PARAM_DEV(NTID);
5902 params[1] = FW_PARAM_PFVF(SERVER_START);
5903 params[2] = FW_PARAM_PFVF(SERVER_END);
5904 params[3] = FW_PARAM_PFVF(TDDP_START);
5905 params[4] = FW_PARAM_PFVF(TDDP_END);
5906 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5907 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5908 params, val);
5909 if (ret < 0)
5910 goto bye;
5911 adap->tids.ntids = val[0];
5912 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5913 adap->tids.stid_base = val[1];
5914 adap->tids.nstids = val[2] - val[1] + 1;
5915 /*
5916 * Setup server filter region. Divide the availble filter
5917 * region into two parts. Regular filters get 1/3rd and server
5918 * filters get 2/3rd part. This is only enabled if workarond
5919 * path is enabled.
5920 * 1. For regular filters.
5921 * 2. Server filter: This are special filters which are used
5922 * to redirect SYN packets to offload queue.
5923 */
5924 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5925 adap->tids.sftid_base = adap->tids.ftid_base +
5926 DIV_ROUND_UP(adap->tids.nftids, 3);
5927 adap->tids.nsftids = adap->tids.nftids -
5928 DIV_ROUND_UP(adap->tids.nftids, 3);
5929 adap->tids.nftids = adap->tids.sftid_base -
5930 adap->tids.ftid_base;
5931 }
5932 adap->vres.ddp.start = val[3];
5933 adap->vres.ddp.size = val[4] - val[3] + 1;
5934 adap->params.ofldq_wr_cred = val[5];
5935
5936 adap->params.offload = 1;
5937 }
5938 if (caps_cmd.rdmacaps) {
5939 params[0] = FW_PARAM_PFVF(STAG_START);
5940 params[1] = FW_PARAM_PFVF(STAG_END);
5941 params[2] = FW_PARAM_PFVF(RQ_START);
5942 params[3] = FW_PARAM_PFVF(RQ_END);
5943 params[4] = FW_PARAM_PFVF(PBL_START);
5944 params[5] = FW_PARAM_PFVF(PBL_END);
5945 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5946 params, val);
5947 if (ret < 0)
5948 goto bye;
5949 adap->vres.stag.start = val[0];
5950 adap->vres.stag.size = val[1] - val[0] + 1;
5951 adap->vres.rq.start = val[2];
5952 adap->vres.rq.size = val[3] - val[2] + 1;
5953 adap->vres.pbl.start = val[4];
5954 adap->vres.pbl.size = val[5] - val[4] + 1;
5955
5956 params[0] = FW_PARAM_PFVF(SQRQ_START);
5957 params[1] = FW_PARAM_PFVF(SQRQ_END);
5958 params[2] = FW_PARAM_PFVF(CQ_START);
5959 params[3] = FW_PARAM_PFVF(CQ_END);
5960 params[4] = FW_PARAM_PFVF(OCQ_START);
5961 params[5] = FW_PARAM_PFVF(OCQ_END);
5962 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5963 val);
5964 if (ret < 0)
5965 goto bye;
5966 adap->vres.qp.start = val[0];
5967 adap->vres.qp.size = val[1] - val[0] + 1;
5968 adap->vres.cq.start = val[2];
5969 adap->vres.cq.size = val[3] - val[2] + 1;
5970 adap->vres.ocq.start = val[4];
5971 adap->vres.ocq.size = val[5] - val[4] + 1;
5972
5973 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5974 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5975 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5976 val);
5977 if (ret < 0) {
5978 adap->params.max_ordird_qp = 8;
5979 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5980 ret = 0;
5981 } else {
5982 adap->params.max_ordird_qp = val[0];
5983 adap->params.max_ird_adapter = val[1];
5984 }
5985 dev_info(adap->pdev_dev,
5986 "max_ordird_qp %d max_ird_adapter %d\n",
5987 adap->params.max_ordird_qp,
5988 adap->params.max_ird_adapter);
5989 }
5990 if (caps_cmd.iscsicaps) {
5991 params[0] = FW_PARAM_PFVF(ISCSI_START);
5992 params[1] = FW_PARAM_PFVF(ISCSI_END);
5993 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5994 params, val);
5995 if (ret < 0)
5996 goto bye;
5997 adap->vres.iscsi.start = val[0];
5998 adap->vres.iscsi.size = val[1] - val[0] + 1;
5999 }
6000 #undef FW_PARAM_PFVF
6001 #undef FW_PARAM_DEV
6002
6003 /* The MTU/MSS Table is initialized by now, so load their values. If
6004 * we're initializing the adapter, then we'll make any modifications
6005 * we want to the MTU/MSS Table and also initialize the congestion
6006 * parameters.
6007 */
6008 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
6009 if (state != DEV_STATE_INIT) {
6010 int i;
6011
6012 /* The default MTU Table contains values 1492 and 1500.
6013 * However, for TCP, it's better to have two values which are
6014 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6015 * This allows us to have a TCP Data Payload which is a
6016 * multiple of 8 regardless of what combination of TCP Options
6017 * are in use (always a multiple of 4 bytes) which is
6018 * important for performance reasons. For instance, if no
6019 * options are in use, then we have a 20-byte IP header and a
6020 * 20-byte TCP header. In this case, a 1500-byte MSS would
6021 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6022 * which is not a multiple of 8. So using an MSS of 1488 in
6023 * this case results in a TCP Data Payload of 1448 bytes which
6024 * is a multiple of 8. On the other hand, if 12-byte TCP Time
6025 * Stamps have been negotiated, then an MTU of 1500 bytes
6026 * results in a TCP Data Payload of 1448 bytes which, as
6027 * above, is a multiple of 8 bytes ...
6028 */
6029 for (i = 0; i < NMTUS; i++)
6030 if (adap->params.mtus[i] == 1492) {
6031 adap->params.mtus[i] = 1488;
6032 break;
6033 }
6034
6035 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6036 adap->params.b_wnd);
6037 }
6038 t4_init_tp_params(adap);
6039 adap->flags |= FW_OK;
6040 return 0;
6041
6042 /*
6043 * Something bad happened. If a command timed out or failed with EIO
6044 * FW does not operate within its spec or something catastrophic
6045 * happened to HW/FW, stop issuing commands.
6046 */
6047 bye:
6048 if (ret != -ETIMEDOUT && ret != -EIO)
6049 t4_fw_bye(adap, adap->mbox);
6050 return ret;
6051 }
6052
6053 /* EEH callbacks */
6054
6055 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6056 pci_channel_state_t state)
6057 {
6058 int i;
6059 struct adapter *adap = pci_get_drvdata(pdev);
6060
6061 if (!adap)
6062 goto out;
6063
6064 rtnl_lock();
6065 adap->flags &= ~FW_OK;
6066 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
6067 spin_lock(&adap->stats_lock);
6068 for_each_port(adap, i) {
6069 struct net_device *dev = adap->port[i];
6070
6071 netif_device_detach(dev);
6072 netif_carrier_off(dev);
6073 }
6074 spin_unlock(&adap->stats_lock);
6075 if (adap->flags & FULL_INIT_DONE)
6076 cxgb_down(adap);
6077 rtnl_unlock();
6078 if ((adap->flags & DEV_ENABLED)) {
6079 pci_disable_device(pdev);
6080 adap->flags &= ~DEV_ENABLED;
6081 }
6082 out: return state == pci_channel_io_perm_failure ?
6083 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6084 }
6085
6086 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6087 {
6088 int i, ret;
6089 struct fw_caps_config_cmd c;
6090 struct adapter *adap = pci_get_drvdata(pdev);
6091
6092 if (!adap) {
6093 pci_restore_state(pdev);
6094 pci_save_state(pdev);
6095 return PCI_ERS_RESULT_RECOVERED;
6096 }
6097
6098 if (!(adap->flags & DEV_ENABLED)) {
6099 if (pci_enable_device(pdev)) {
6100 dev_err(&pdev->dev, "Cannot reenable PCI "
6101 "device after reset\n");
6102 return PCI_ERS_RESULT_DISCONNECT;
6103 }
6104 adap->flags |= DEV_ENABLED;
6105 }
6106
6107 pci_set_master(pdev);
6108 pci_restore_state(pdev);
6109 pci_save_state(pdev);
6110 pci_cleanup_aer_uncorrect_error_status(pdev);
6111
6112 if (t4_wait_dev_ready(adap) < 0)
6113 return PCI_ERS_RESULT_DISCONNECT;
6114 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6115 return PCI_ERS_RESULT_DISCONNECT;
6116 adap->flags |= FW_OK;
6117 if (adap_init1(adap, &c))
6118 return PCI_ERS_RESULT_DISCONNECT;
6119
6120 for_each_port(adap, i) {
6121 struct port_info *p = adap2pinfo(adap, i);
6122
6123 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6124 NULL, NULL);
6125 if (ret < 0)
6126 return PCI_ERS_RESULT_DISCONNECT;
6127 p->viid = ret;
6128 p->xact_addr_filt = -1;
6129 }
6130
6131 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6132 adap->params.b_wnd);
6133 setup_memwin(adap);
6134 if (cxgb_up(adap))
6135 return PCI_ERS_RESULT_DISCONNECT;
6136 return PCI_ERS_RESULT_RECOVERED;
6137 }
6138
6139 static void eeh_resume(struct pci_dev *pdev)
6140 {
6141 int i;
6142 struct adapter *adap = pci_get_drvdata(pdev);
6143
6144 if (!adap)
6145 return;
6146
6147 rtnl_lock();
6148 for_each_port(adap, i) {
6149 struct net_device *dev = adap->port[i];
6150
6151 if (netif_running(dev)) {
6152 link_start(dev);
6153 cxgb_set_rxmode(dev);
6154 }
6155 netif_device_attach(dev);
6156 }
6157 rtnl_unlock();
6158 }
6159
6160 static const struct pci_error_handlers cxgb4_eeh = {
6161 .error_detected = eeh_err_detected,
6162 .slot_reset = eeh_slot_reset,
6163 .resume = eeh_resume,
6164 };
6165
6166 static inline bool is_x_10g_port(const struct link_config *lc)
6167 {
6168 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6169 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6170 }
6171
6172 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6173 unsigned int us, unsigned int cnt,
6174 unsigned int size, unsigned int iqe_size)
6175 {
6176 q->adap = adap;
6177 set_rspq_intr_params(q, us, cnt);
6178 q->iqe_len = iqe_size;
6179 q->size = size;
6180 }
6181
6182 /*
6183 * Perform default configuration of DMA queues depending on the number and type
6184 * of ports we found and the number of available CPUs. Most settings can be
6185 * modified by the admin prior to actual use.
6186 */
6187 static void cfg_queues(struct adapter *adap)
6188 {
6189 struct sge *s = &adap->sge;
6190 int i, n10g = 0, qidx = 0;
6191 #ifndef CONFIG_CHELSIO_T4_DCB
6192 int q10g = 0;
6193 #endif
6194 int ciq_size;
6195
6196 for_each_port(adap, i)
6197 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6198 #ifdef CONFIG_CHELSIO_T4_DCB
6199 /* For Data Center Bridging support we need to be able to support up
6200 * to 8 Traffic Priorities; each of which will be assigned to its
6201 * own TX Queue in order to prevent Head-Of-Line Blocking.
6202 */
6203 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6204 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6205 MAX_ETH_QSETS, adap->params.nports * 8);
6206 BUG_ON(1);
6207 }
6208
6209 for_each_port(adap, i) {
6210 struct port_info *pi = adap2pinfo(adap, i);
6211
6212 pi->first_qset = qidx;
6213 pi->nqsets = 8;
6214 qidx += pi->nqsets;
6215 }
6216 #else /* !CONFIG_CHELSIO_T4_DCB */
6217 /*
6218 * We default to 1 queue per non-10G port and up to # of cores queues
6219 * per 10G port.
6220 */
6221 if (n10g)
6222 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6223 if (q10g > netif_get_num_default_rss_queues())
6224 q10g = netif_get_num_default_rss_queues();
6225
6226 for_each_port(adap, i) {
6227 struct port_info *pi = adap2pinfo(adap, i);
6228
6229 pi->first_qset = qidx;
6230 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6231 qidx += pi->nqsets;
6232 }
6233 #endif /* !CONFIG_CHELSIO_T4_DCB */
6234
6235 s->ethqsets = qidx;
6236 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6237
6238 if (is_offload(adap)) {
6239 /*
6240 * For offload we use 1 queue/channel if all ports are up to 1G,
6241 * otherwise we divide all available queues amongst the channels
6242 * capped by the number of available cores.
6243 */
6244 if (n10g) {
6245 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6246 num_online_cpus());
6247 s->ofldqsets = roundup(i, adap->params.nports);
6248 } else
6249 s->ofldqsets = adap->params.nports;
6250 /* For RDMA one Rx queue per channel suffices */
6251 s->rdmaqs = adap->params.nports;
6252 s->rdmaciqs = adap->params.nports;
6253 }
6254
6255 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6256 struct sge_eth_rxq *r = &s->ethrxq[i];
6257
6258 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6259 r->fl.size = 72;
6260 }
6261
6262 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6263 s->ethtxq[i].q.size = 1024;
6264
6265 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6266 s->ctrlq[i].q.size = 512;
6267
6268 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6269 s->ofldtxq[i].q.size = 1024;
6270
6271 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6272 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6273
6274 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6275 r->rspq.uld = CXGB4_ULD_ISCSI;
6276 r->fl.size = 72;
6277 }
6278
6279 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6280 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6281
6282 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6283 r->rspq.uld = CXGB4_ULD_RDMA;
6284 r->fl.size = 72;
6285 }
6286
6287 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6288 if (ciq_size > SGE_MAX_IQ_SIZE) {
6289 CH_WARN(adap, "CIQ size too small for available IQs\n");
6290 ciq_size = SGE_MAX_IQ_SIZE;
6291 }
6292
6293 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6294 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6295
6296 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6297 r->rspq.uld = CXGB4_ULD_RDMA;
6298 }
6299
6300 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6301 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6302 }
6303
6304 /*
6305 * Reduce the number of Ethernet queues across all ports to at most n.
6306 * n provides at least one queue per port.
6307 */
6308 static void reduce_ethqs(struct adapter *adap, int n)
6309 {
6310 int i;
6311 struct port_info *pi;
6312
6313 while (n < adap->sge.ethqsets)
6314 for_each_port(adap, i) {
6315 pi = adap2pinfo(adap, i);
6316 if (pi->nqsets > 1) {
6317 pi->nqsets--;
6318 adap->sge.ethqsets--;
6319 if (adap->sge.ethqsets <= n)
6320 break;
6321 }
6322 }
6323
6324 n = 0;
6325 for_each_port(adap, i) {
6326 pi = adap2pinfo(adap, i);
6327 pi->first_qset = n;
6328 n += pi->nqsets;
6329 }
6330 }
6331
6332 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6333 #define EXTRA_VECS 2
6334
6335 static int enable_msix(struct adapter *adap)
6336 {
6337 int ofld_need = 0;
6338 int i, want, need;
6339 struct sge *s = &adap->sge;
6340 unsigned int nchan = adap->params.nports;
6341 struct msix_entry entries[MAX_INGQ + 1];
6342
6343 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6344 entries[i].entry = i;
6345
6346 want = s->max_ethqsets + EXTRA_VECS;
6347 if (is_offload(adap)) {
6348 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6349 /* need nchan for each possible ULD */
6350 ofld_need = 3 * nchan;
6351 }
6352 #ifdef CONFIG_CHELSIO_T4_DCB
6353 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6354 * each port.
6355 */
6356 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6357 #else
6358 need = adap->params.nports + EXTRA_VECS + ofld_need;
6359 #endif
6360 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6361 if (want < 0)
6362 return want;
6363
6364 /*
6365 * Distribute available vectors to the various queue groups.
6366 * Every group gets its minimum requirement and NIC gets top
6367 * priority for leftovers.
6368 */
6369 i = want - EXTRA_VECS - ofld_need;
6370 if (i < s->max_ethqsets) {
6371 s->max_ethqsets = i;
6372 if (i < s->ethqsets)
6373 reduce_ethqs(adap, i);
6374 }
6375 if (is_offload(adap)) {
6376 i = want - EXTRA_VECS - s->max_ethqsets;
6377 i -= ofld_need - nchan;
6378 s->ofldqsets = (i / nchan) * nchan; /* round down */
6379 }
6380 for (i = 0; i < want; ++i)
6381 adap->msix_info[i].vec = entries[i].vector;
6382
6383 return 0;
6384 }
6385
6386 #undef EXTRA_VECS
6387
6388 static int init_rss(struct adapter *adap)
6389 {
6390 unsigned int i, j;
6391
6392 for_each_port(adap, i) {
6393 struct port_info *pi = adap2pinfo(adap, i);
6394
6395 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6396 if (!pi->rss)
6397 return -ENOMEM;
6398 for (j = 0; j < pi->rss_size; j++)
6399 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6400 }
6401 return 0;
6402 }
6403
6404 static void print_port_info(const struct net_device *dev)
6405 {
6406 char buf[80];
6407 char *bufp = buf;
6408 const char *spd = "";
6409 const struct port_info *pi = netdev_priv(dev);
6410 const struct adapter *adap = pi->adapter;
6411
6412 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6413 spd = " 2.5 GT/s";
6414 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6415 spd = " 5 GT/s";
6416 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6417 spd = " 8 GT/s";
6418
6419 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6420 bufp += sprintf(bufp, "100/");
6421 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6422 bufp += sprintf(bufp, "1000/");
6423 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6424 bufp += sprintf(bufp, "10G/");
6425 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6426 bufp += sprintf(bufp, "40G/");
6427 if (bufp != buf)
6428 --bufp;
6429 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6430
6431 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6432 adap->params.vpd.id,
6433 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6434 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6435 (adap->flags & USING_MSIX) ? " MSI-X" :
6436 (adap->flags & USING_MSI) ? " MSI" : "");
6437 netdev_info(dev, "S/N: %s, P/N: %s\n",
6438 adap->params.vpd.sn, adap->params.vpd.pn);
6439 }
6440
6441 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6442 {
6443 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6444 }
6445
6446 /*
6447 * Free the following resources:
6448 * - memory used for tables
6449 * - MSI/MSI-X
6450 * - net devices
6451 * - resources FW is holding for us
6452 */
6453 static void free_some_resources(struct adapter *adapter)
6454 {
6455 unsigned int i;
6456
6457 t4_free_mem(adapter->l2t);
6458 t4_free_mem(adapter->tids.tid_tab);
6459 disable_msi(adapter);
6460
6461 for_each_port(adapter, i)
6462 if (adapter->port[i]) {
6463 kfree(adap2pinfo(adapter, i)->rss);
6464 free_netdev(adapter->port[i]);
6465 }
6466 if (adapter->flags & FW_OK)
6467 t4_fw_bye(adapter, adapter->fn);
6468 }
6469
6470 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6471 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6472 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6473 #define SEGMENT_SIZE 128
6474
6475 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6476 {
6477 int func, i, err, s_qpp, qpp, num_seg;
6478 struct port_info *pi;
6479 bool highdma = false;
6480 struct adapter *adapter = NULL;
6481 void __iomem *regs;
6482
6483 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6484
6485 err = pci_request_regions(pdev, KBUILD_MODNAME);
6486 if (err) {
6487 /* Just info, some other driver may have claimed the device. */
6488 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6489 return err;
6490 }
6491
6492 err = pci_enable_device(pdev);
6493 if (err) {
6494 dev_err(&pdev->dev, "cannot enable PCI device\n");
6495 goto out_release_regions;
6496 }
6497
6498 regs = pci_ioremap_bar(pdev, 0);
6499 if (!regs) {
6500 dev_err(&pdev->dev, "cannot map device registers\n");
6501 err = -ENOMEM;
6502 goto out_disable_device;
6503 }
6504
6505 /* We control everything through one PF */
6506 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6507 if (func != ent->driver_data) {
6508 iounmap(regs);
6509 pci_disable_device(pdev);
6510 pci_save_state(pdev); /* to restore SR-IOV later */
6511 goto sriov;
6512 }
6513
6514 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6515 highdma = true;
6516 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6517 if (err) {
6518 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6519 "coherent allocations\n");
6520 goto out_unmap_bar0;
6521 }
6522 } else {
6523 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6524 if (err) {
6525 dev_err(&pdev->dev, "no usable DMA configuration\n");
6526 goto out_unmap_bar0;
6527 }
6528 }
6529
6530 pci_enable_pcie_error_reporting(pdev);
6531 enable_pcie_relaxed_ordering(pdev);
6532 pci_set_master(pdev);
6533 pci_save_state(pdev);
6534
6535 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6536 if (!adapter) {
6537 err = -ENOMEM;
6538 goto out_unmap_bar0;
6539 }
6540
6541 adapter->workq = create_singlethread_workqueue("cxgb4");
6542 if (!adapter->workq) {
6543 err = -ENOMEM;
6544 goto out_free_adapter;
6545 }
6546
6547 /* PCI device has been enabled */
6548 adapter->flags |= DEV_ENABLED;
6549
6550 adapter->regs = regs;
6551 adapter->pdev = pdev;
6552 adapter->pdev_dev = &pdev->dev;
6553 adapter->mbox = func;
6554 adapter->fn = func;
6555 adapter->msg_enable = dflt_msg_enable;
6556 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6557
6558 spin_lock_init(&adapter->stats_lock);
6559 spin_lock_init(&adapter->tid_release_lock);
6560
6561 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6562 INIT_WORK(&adapter->db_full_task, process_db_full);
6563 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6564
6565 err = t4_prep_adapter(adapter);
6566 if (err)
6567 goto out_free_adapter;
6568
6569
6570 if (!is_t4(adapter->params.chip)) {
6571 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6572 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6573 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6574 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6575
6576 /* Each segment size is 128B. Write coalescing is enabled only
6577 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6578 * queue is less no of segments that can be accommodated in
6579 * a page size.
6580 */
6581 if (qpp > num_seg) {
6582 dev_err(&pdev->dev,
6583 "Incorrect number of egress queues per page\n");
6584 err = -EINVAL;
6585 goto out_free_adapter;
6586 }
6587 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6588 pci_resource_len(pdev, 2));
6589 if (!adapter->bar2) {
6590 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6591 err = -ENOMEM;
6592 goto out_free_adapter;
6593 }
6594 }
6595
6596 setup_memwin(adapter);
6597 err = adap_init0(adapter);
6598 setup_memwin_rdma(adapter);
6599 if (err)
6600 goto out_unmap_bar;
6601
6602 for_each_port(adapter, i) {
6603 struct net_device *netdev;
6604
6605 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6606 MAX_ETH_QSETS);
6607 if (!netdev) {
6608 err = -ENOMEM;
6609 goto out_free_dev;
6610 }
6611
6612 SET_NETDEV_DEV(netdev, &pdev->dev);
6613
6614 adapter->port[i] = netdev;
6615 pi = netdev_priv(netdev);
6616 pi->adapter = adapter;
6617 pi->xact_addr_filt = -1;
6618 pi->port_id = i;
6619 netdev->irq = pdev->irq;
6620
6621 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6622 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6623 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6624 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6625 if (highdma)
6626 netdev->hw_features |= NETIF_F_HIGHDMA;
6627 netdev->features |= netdev->hw_features;
6628 netdev->vlan_features = netdev->features & VLAN_FEAT;
6629
6630 netdev->priv_flags |= IFF_UNICAST_FLT;
6631
6632 netdev->netdev_ops = &cxgb4_netdev_ops;
6633 #ifdef CONFIG_CHELSIO_T4_DCB
6634 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6635 cxgb4_dcb_state_init(netdev);
6636 #endif
6637 netdev->ethtool_ops = &cxgb_ethtool_ops;
6638 }
6639
6640 pci_set_drvdata(pdev, adapter);
6641
6642 if (adapter->flags & FW_OK) {
6643 err = t4_port_init(adapter, func, func, 0);
6644 if (err)
6645 goto out_free_dev;
6646 }
6647
6648 /*
6649 * Configure queues and allocate tables now, they can be needed as
6650 * soon as the first register_netdev completes.
6651 */
6652 cfg_queues(adapter);
6653
6654 adapter->l2t = t4_init_l2t();
6655 if (!adapter->l2t) {
6656 /* We tolerate a lack of L2T, giving up some functionality */
6657 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6658 adapter->params.offload = 0;
6659 }
6660
6661 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6662 dev_warn(&pdev->dev, "could not allocate TID table, "
6663 "continuing\n");
6664 adapter->params.offload = 0;
6665 }
6666
6667 /* See what interrupts we'll be using */
6668 if (msi > 1 && enable_msix(adapter) == 0)
6669 adapter->flags |= USING_MSIX;
6670 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6671 adapter->flags |= USING_MSI;
6672
6673 err = init_rss(adapter);
6674 if (err)
6675 goto out_free_dev;
6676
6677 /*
6678 * The card is now ready to go. If any errors occur during device
6679 * registration we do not fail the whole card but rather proceed only
6680 * with the ports we manage to register successfully. However we must
6681 * register at least one net device.
6682 */
6683 for_each_port(adapter, i) {
6684 pi = adap2pinfo(adapter, i);
6685 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6686 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6687
6688 err = register_netdev(adapter->port[i]);
6689 if (err)
6690 break;
6691 adapter->chan_map[pi->tx_chan] = i;
6692 print_port_info(adapter->port[i]);
6693 }
6694 if (i == 0) {
6695 dev_err(&pdev->dev, "could not register any net devices\n");
6696 goto out_free_dev;
6697 }
6698 if (err) {
6699 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6700 err = 0;
6701 }
6702
6703 if (cxgb4_debugfs_root) {
6704 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6705 cxgb4_debugfs_root);
6706 setup_debugfs(adapter);
6707 }
6708
6709 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6710 pdev->needs_freset = 1;
6711
6712 if (is_offload(adapter))
6713 attach_ulds(adapter);
6714
6715 sriov:
6716 #ifdef CONFIG_PCI_IOV
6717 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6718 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6719 dev_info(&pdev->dev,
6720 "instantiated %u virtual functions\n",
6721 num_vf[func]);
6722 #endif
6723 return 0;
6724
6725 out_free_dev:
6726 free_some_resources(adapter);
6727 out_unmap_bar:
6728 if (!is_t4(adapter->params.chip))
6729 iounmap(adapter->bar2);
6730 out_free_adapter:
6731 if (adapter->workq)
6732 destroy_workqueue(adapter->workq);
6733
6734 kfree(adapter);
6735 out_unmap_bar0:
6736 iounmap(regs);
6737 out_disable_device:
6738 pci_disable_pcie_error_reporting(pdev);
6739 pci_disable_device(pdev);
6740 out_release_regions:
6741 pci_release_regions(pdev);
6742 return err;
6743 }
6744
6745 static void remove_one(struct pci_dev *pdev)
6746 {
6747 struct adapter *adapter = pci_get_drvdata(pdev);
6748
6749 #ifdef CONFIG_PCI_IOV
6750 pci_disable_sriov(pdev);
6751
6752 #endif
6753
6754 if (adapter) {
6755 int i;
6756
6757 /* Tear down per-adapter Work Queue first since it can contain
6758 * references to our adapter data structure.
6759 */
6760 destroy_workqueue(adapter->workq);
6761
6762 if (is_offload(adapter))
6763 detach_ulds(adapter);
6764
6765 for_each_port(adapter, i)
6766 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6767 unregister_netdev(adapter->port[i]);
6768
6769 debugfs_remove_recursive(adapter->debugfs_root);
6770
6771 /* If we allocated filters, free up state associated with any
6772 * valid filters ...
6773 */
6774 if (adapter->tids.ftid_tab) {
6775 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6776 for (i = 0; i < (adapter->tids.nftids +
6777 adapter->tids.nsftids); i++, f++)
6778 if (f->valid)
6779 clear_filter(adapter, f);
6780 }
6781
6782 if (adapter->flags & FULL_INIT_DONE)
6783 cxgb_down(adapter);
6784
6785 free_some_resources(adapter);
6786 iounmap(adapter->regs);
6787 if (!is_t4(adapter->params.chip))
6788 iounmap(adapter->bar2);
6789 pci_disable_pcie_error_reporting(pdev);
6790 if ((adapter->flags & DEV_ENABLED)) {
6791 pci_disable_device(pdev);
6792 adapter->flags &= ~DEV_ENABLED;
6793 }
6794 pci_release_regions(pdev);
6795 synchronize_rcu();
6796 kfree(adapter);
6797 } else
6798 pci_release_regions(pdev);
6799 }
6800
6801 static struct pci_driver cxgb4_driver = {
6802 .name = KBUILD_MODNAME,
6803 .id_table = cxgb4_pci_tbl,
6804 .probe = init_one,
6805 .remove = remove_one,
6806 .shutdown = remove_one,
6807 .err_handler = &cxgb4_eeh,
6808 };
6809
6810 static int __init cxgb4_init_module(void)
6811 {
6812 int ret;
6813
6814 /* Debugfs support is optional, just warn if this fails */
6815 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6816 if (!cxgb4_debugfs_root)
6817 pr_warn("could not create debugfs entry, continuing\n");
6818
6819 ret = pci_register_driver(&cxgb4_driver);
6820 if (ret < 0)
6821 debugfs_remove(cxgb4_debugfs_root);
6822
6823 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6824
6825 return ret;
6826 }
6827
6828 static void __exit cxgb4_cleanup_module(void)
6829 {
6830 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6831 pci_unregister_driver(&cxgb4_driver);
6832 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6833 }
6834
6835 module_init(cxgb4_init_module);
6836 module_exit(cxgb4_cleanup_module);