]>
Commit | Line | Data |
---|---|---|
4d22de3e | 1 | /* |
a02d44a0 | 2 | * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved. |
4d22de3e | 3 | * |
1d68e93d DLR |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
4d22de3e | 9 | * |
1d68e93d DLR |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
4d22de3e | 31 | */ |
4d22de3e DLR |
32 | #include <linux/module.h> |
33 | #include <linux/moduleparam.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/pci.h> | |
36 | #include <linux/dma-mapping.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/etherdevice.h> | |
39 | #include <linux/if_vlan.h> | |
0f07c4ee | 40 | #include <linux/mdio.h> |
4d22de3e DLR |
41 | #include <linux/sockios.h> |
42 | #include <linux/workqueue.h> | |
43 | #include <linux/proc_fs.h> | |
44 | #include <linux/rtnetlink.h> | |
2e283962 | 45 | #include <linux/firmware.h> |
d9da466a | 46 | #include <linux/log2.h> |
34336ec0 | 47 | #include <linux/stringify.h> |
e998f245 | 48 | #include <linux/sched.h> |
5a0e3ad6 | 49 | #include <linux/slab.h> |
4d22de3e DLR |
50 | #include <asm/uaccess.h> |
51 | ||
52 | #include "common.h" | |
53 | #include "cxgb3_ioctl.h" | |
54 | #include "regs.h" | |
55 | #include "cxgb3_offload.h" | |
56 | #include "version.h" | |
57 | ||
58 | #include "cxgb3_ctl_defs.h" | |
59 | #include "t3_cpl.h" | |
60 | #include "firmware_exports.h" | |
61 | ||
62 | enum { | |
63 | MAX_TXQ_ENTRIES = 16384, | |
64 | MAX_CTRL_TXQ_ENTRIES = 1024, | |
65 | MAX_RSPQ_ENTRIES = 16384, | |
66 | MAX_RX_BUFFERS = 16384, | |
67 | MAX_RX_JUMBO_BUFFERS = 16384, | |
68 | MIN_TXQ_ENTRIES = 4, | |
69 | MIN_CTRL_TXQ_ENTRIES = 4, | |
70 | MIN_RSPQ_ENTRIES = 32, | |
71 | MIN_FL_ENTRIES = 32 | |
72 | }; | |
73 | ||
74 | #define PORT_MASK ((1 << MAX_NPORTS) - 1) | |
75 | ||
76 | #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ | |
77 | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ | |
78 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | |
79 | ||
80 | #define EEPROM_MAGIC 0x38E2F10C | |
81 | ||
678771d6 DLR |
82 | #define CH_DEVICE(devid, idx) \ |
83 | { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx } | |
4d22de3e | 84 | |
a3aa1884 | 85 | static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = { |
678771d6 DLR |
86 | CH_DEVICE(0x20, 0), /* PE9000 */ |
87 | CH_DEVICE(0x21, 1), /* T302E */ | |
88 | CH_DEVICE(0x22, 2), /* T310E */ | |
89 | CH_DEVICE(0x23, 3), /* T320X */ | |
90 | CH_DEVICE(0x24, 1), /* T302X */ | |
91 | CH_DEVICE(0x25, 3), /* T320E */ | |
92 | CH_DEVICE(0x26, 2), /* T310X */ | |
93 | CH_DEVICE(0x30, 2), /* T3B10 */ | |
94 | CH_DEVICE(0x31, 3), /* T3B20 */ | |
95 | CH_DEVICE(0x32, 1), /* T3B02 */ | |
ce03aadd | 96 | CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */ |
74451424 DLR |
97 | CH_DEVICE(0x36, 3), /* S320E-CR */ |
98 | CH_DEVICE(0x37, 7), /* N320E-G2 */ | |
4d22de3e DLR |
99 | {0,} |
100 | }; | |
101 | ||
102 | MODULE_DESCRIPTION(DRV_DESC); | |
103 | MODULE_AUTHOR("Chelsio Communications"); | |
1d68e93d | 104 | MODULE_LICENSE("Dual BSD/GPL"); |
4d22de3e DLR |
105 | MODULE_VERSION(DRV_VERSION); |
106 | MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl); | |
107 | ||
108 | static int dflt_msg_enable = DFLT_MSG_ENABLE; | |
109 | ||
110 | module_param(dflt_msg_enable, int, 0644); | |
111 | MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap"); | |
112 | ||
113 | /* | |
114 | * The driver uses the best interrupt scheme available on a platform in the | |
115 | * order MSI-X, MSI, legacy pin interrupts. This parameter determines which | |
116 | * of these schemes the driver may consider as follows: | |
117 | * | |
118 | * msi = 2: choose from among all three options | |
119 | * msi = 1: only consider MSI and pin interrupts | |
120 | * msi = 0: force pin interrupts | |
121 | */ | |
122 | static int msi = 2; | |
123 | ||
124 | module_param(msi, int, 0644); | |
125 | MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X"); | |
126 | ||
127 | /* | |
128 | * The driver enables offload as a default. | |
129 | * To disable it, use ofld_disable = 1. | |
130 | */ | |
131 | ||
132 | static int ofld_disable = 0; | |
133 | ||
134 | module_param(ofld_disable, int, 0644); | |
135 | MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not"); | |
136 | ||
137 | /* | |
138 | * We have work elements that we need to cancel when an interface is taken | |
139 | * down. Normally the work elements would be executed by keventd but that | |
140 | * can deadlock because of linkwatch. If our close method takes the rtnl | |
141 | * lock and linkwatch is ahead of our work elements in keventd, linkwatch | |
142 | * will block keventd as it needs the rtnl lock, and we'll deadlock waiting | |
143 | * for our work to complete. Get our own work queue to solve this. | |
144 | */ | |
e998f245 | 145 | struct workqueue_struct *cxgb3_wq; |
4d22de3e DLR |
146 | |
147 | /** | |
148 | * link_report - show link status and link speed/duplex | |
149 | * @p: the port whose settings are to be reported | |
150 | * | |
151 | * Shows the link status, speed, and duplex of a port. | |
152 | */ | |
153 | static void link_report(struct net_device *dev) | |
154 | { | |
155 | if (!netif_carrier_ok(dev)) | |
156 | printk(KERN_INFO "%s: link down\n", dev->name); | |
157 | else { | |
158 | const char *s = "10Mbps"; | |
159 | const struct port_info *p = netdev_priv(dev); | |
160 | ||
161 | switch (p->link_config.speed) { | |
162 | case SPEED_10000: | |
163 | s = "10Gbps"; | |
164 | break; | |
165 | case SPEED_1000: | |
166 | s = "1000Mbps"; | |
167 | break; | |
168 | case SPEED_100: | |
169 | s = "100Mbps"; | |
170 | break; | |
171 | } | |
172 | ||
173 | printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s, | |
174 | p->link_config.duplex == DUPLEX_FULL ? "full" : "half"); | |
175 | } | |
176 | } | |
177 | ||
34701fde DLR |
178 | static void enable_tx_fifo_drain(struct adapter *adapter, |
179 | struct port_info *pi) | |
180 | { | |
181 | t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0, | |
182 | F_ENDROPPKT); | |
183 | t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0); | |
184 | t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN); | |
185 | t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN); | |
186 | } | |
187 | ||
188 | static void disable_tx_fifo_drain(struct adapter *adapter, | |
189 | struct port_info *pi) | |
190 | { | |
191 | t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, | |
192 | F_ENDROPPKT, 0); | |
193 | } | |
194 | ||
bf792094 DLR |
195 | void t3_os_link_fault(struct adapter *adap, int port_id, int state) |
196 | { | |
197 | struct net_device *dev = adap->port[port_id]; | |
198 | struct port_info *pi = netdev_priv(dev); | |
199 | ||
200 | if (state == netif_carrier_ok(dev)) | |
201 | return; | |
202 | ||
203 | if (state) { | |
204 | struct cmac *mac = &pi->mac; | |
205 | ||
206 | netif_carrier_on(dev); | |
207 | ||
34701fde DLR |
208 | disable_tx_fifo_drain(adap, pi); |
209 | ||
bf792094 DLR |
210 | /* Clear local faults */ |
211 | t3_xgm_intr_disable(adap, pi->port_id); | |
212 | t3_read_reg(adap, A_XGM_INT_STATUS + | |
213 | pi->mac.offset); | |
214 | t3_write_reg(adap, | |
215 | A_XGM_INT_CAUSE + pi->mac.offset, | |
216 | F_XGM_INT); | |
217 | ||
218 | t3_set_reg_field(adap, | |
219 | A_XGM_INT_ENABLE + | |
220 | pi->mac.offset, | |
221 | F_XGM_INT, F_XGM_INT); | |
222 | t3_xgm_intr_enable(adap, pi->port_id); | |
223 | ||
224 | t3_mac_enable(mac, MAC_DIRECTION_TX); | |
34701fde | 225 | } else { |
bf792094 DLR |
226 | netif_carrier_off(dev); |
227 | ||
34701fde DLR |
228 | /* Flush TX FIFO */ |
229 | enable_tx_fifo_drain(adap, pi); | |
230 | } | |
bf792094 DLR |
231 | link_report(dev); |
232 | } | |
233 | ||
4d22de3e DLR |
234 | /** |
235 | * t3_os_link_changed - handle link status changes | |
236 | * @adapter: the adapter associated with the link change | |
237 | * @port_id: the port index whose limk status has changed | |
238 | * @link_stat: the new status of the link | |
239 | * @speed: the new speed setting | |
240 | * @duplex: the new duplex setting | |
241 | * @pause: the new flow-control setting | |
242 | * | |
243 | * This is the OS-dependent handler for link status changes. The OS | |
244 | * neutral handler takes care of most of the processing for these events, | |
245 | * then calls this handler for any OS-specific processing. | |
246 | */ | |
247 | void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat, | |
248 | int speed, int duplex, int pause) | |
249 | { | |
250 | struct net_device *dev = adapter->port[port_id]; | |
6d6dabac DLR |
251 | struct port_info *pi = netdev_priv(dev); |
252 | struct cmac *mac = &pi->mac; | |
4d22de3e DLR |
253 | |
254 | /* Skip changes from disabled ports. */ | |
255 | if (!netif_running(dev)) | |
256 | return; | |
257 | ||
258 | if (link_stat != netif_carrier_ok(dev)) { | |
6d6dabac | 259 | if (link_stat) { |
34701fde DLR |
260 | disable_tx_fifo_drain(adapter, pi); |
261 | ||
59cf8107 | 262 | t3_mac_enable(mac, MAC_DIRECTION_RX); |
bf792094 DLR |
263 | |
264 | /* Clear local faults */ | |
265 | t3_xgm_intr_disable(adapter, pi->port_id); | |
266 | t3_read_reg(adapter, A_XGM_INT_STATUS + | |
267 | pi->mac.offset); | |
268 | t3_write_reg(adapter, | |
269 | A_XGM_INT_CAUSE + pi->mac.offset, | |
270 | F_XGM_INT); | |
271 | ||
272 | t3_set_reg_field(adapter, | |
273 | A_XGM_INT_ENABLE + pi->mac.offset, | |
274 | F_XGM_INT, F_XGM_INT); | |
275 | t3_xgm_intr_enable(adapter, pi->port_id); | |
276 | ||
4d22de3e | 277 | netif_carrier_on(dev); |
6d6dabac | 278 | } else { |
4d22de3e | 279 | netif_carrier_off(dev); |
bf792094 DLR |
280 | |
281 | t3_xgm_intr_disable(adapter, pi->port_id); | |
282 | t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); | |
283 | t3_set_reg_field(adapter, | |
284 | A_XGM_INT_ENABLE + pi->mac.offset, | |
285 | F_XGM_INT, 0); | |
286 | ||
287 | if (is_10G(adapter)) | |
288 | pi->phy.ops->power_down(&pi->phy, 1); | |
289 | ||
290 | t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); | |
59cf8107 DLR |
291 | t3_mac_disable(mac, MAC_DIRECTION_RX); |
292 | t3_link_start(&pi->phy, mac, &pi->link_config); | |
34701fde DLR |
293 | |
294 | /* Flush TX FIFO */ | |
295 | enable_tx_fifo_drain(adapter, pi); | |
6d6dabac DLR |
296 | } |
297 | ||
4d22de3e DLR |
298 | link_report(dev); |
299 | } | |
300 | } | |
301 | ||
1e882025 DLR |
302 | /** |
303 | * t3_os_phymod_changed - handle PHY module changes | |
304 | * @phy: the PHY reporting the module change | |
305 | * @mod_type: new module type | |
306 | * | |
307 | * This is the OS-dependent handler for PHY module changes. It is | |
308 | * invoked when a PHY module is removed or inserted for any OS-specific | |
309 | * processing. | |
310 | */ | |
311 | void t3_os_phymod_changed(struct adapter *adap, int port_id) | |
312 | { | |
313 | static const char *mod_str[] = { | |
314 | NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown" | |
315 | }; | |
316 | ||
317 | const struct net_device *dev = adap->port[port_id]; | |
318 | const struct port_info *pi = netdev_priv(dev); | |
319 | ||
320 | if (pi->phy.modtype == phy_modtype_none) | |
321 | printk(KERN_INFO "%s: PHY module unplugged\n", dev->name); | |
322 | else | |
323 | printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name, | |
324 | mod_str[pi->phy.modtype]); | |
325 | } | |
326 | ||
4d22de3e DLR |
327 | static void cxgb_set_rxmode(struct net_device *dev) |
328 | { | |
4d22de3e DLR |
329 | struct port_info *pi = netdev_priv(dev); |
330 | ||
0988d269 | 331 | t3_mac_set_rx_mode(&pi->mac, dev); |
4d22de3e DLR |
332 | } |
333 | ||
334 | /** | |
335 | * link_start - enable a port | |
336 | * @dev: the device to enable | |
337 | * | |
338 | * Performs the MAC and PHY actions needed to enable a port. | |
339 | */ | |
340 | static void link_start(struct net_device *dev) | |
341 | { | |
4d22de3e DLR |
342 | struct port_info *pi = netdev_priv(dev); |
343 | struct cmac *mac = &pi->mac; | |
344 | ||
4d22de3e | 345 | t3_mac_reset(mac); |
f14d42f3 | 346 | t3_mac_set_num_ucast(mac, MAX_MAC_IDX); |
4d22de3e | 347 | t3_mac_set_mtu(mac, dev->mtu); |
f14d42f3 KX |
348 | t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); |
349 | t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr); | |
0988d269 | 350 | t3_mac_set_rx_mode(mac, dev); |
4d22de3e DLR |
351 | t3_link_start(&pi->phy, mac, &pi->link_config); |
352 | t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); | |
353 | } | |
354 | ||
355 | static inline void cxgb_disable_msi(struct adapter *adapter) | |
356 | { | |
357 | if (adapter->flags & USING_MSIX) { | |
358 | pci_disable_msix(adapter->pdev); | |
359 | adapter->flags &= ~USING_MSIX; | |
360 | } else if (adapter->flags & USING_MSI) { | |
361 | pci_disable_msi(adapter->pdev); | |
362 | adapter->flags &= ~USING_MSI; | |
363 | } | |
364 | } | |
365 | ||
366 | /* | |
367 | * Interrupt handler for asynchronous events used with MSI-X. | |
368 | */ | |
369 | static irqreturn_t t3_async_intr_handler(int irq, void *cookie) | |
370 | { | |
371 | t3_slow_intr_handler(cookie); | |
372 | return IRQ_HANDLED; | |
373 | } | |
374 | ||
375 | /* | |
376 | * Name the MSI-X interrupts. | |
377 | */ | |
378 | static void name_msix_vecs(struct adapter *adap) | |
379 | { | |
380 | int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1; | |
381 | ||
382 | snprintf(adap->msix_info[0].desc, n, "%s", adap->name); | |
383 | adap->msix_info[0].desc[n] = 0; | |
384 | ||
385 | for_each_port(adap, j) { | |
386 | struct net_device *d = adap->port[j]; | |
387 | const struct port_info *pi = netdev_priv(d); | |
388 | ||
389 | for (i = 0; i < pi->nqsets; i++, msi_idx++) { | |
390 | snprintf(adap->msix_info[msi_idx].desc, n, | |
8c263761 | 391 | "%s-%d", d->name, pi->first_qset + i); |
4d22de3e DLR |
392 | adap->msix_info[msi_idx].desc[n] = 0; |
393 | } | |
8c263761 | 394 | } |
4d22de3e DLR |
395 | } |
396 | ||
397 | static int request_msix_data_irqs(struct adapter *adap) | |
398 | { | |
399 | int i, j, err, qidx = 0; | |
400 | ||
401 | for_each_port(adap, i) { | |
402 | int nqsets = adap2pinfo(adap, i)->nqsets; | |
403 | ||
404 | for (j = 0; j < nqsets; ++j) { | |
405 | err = request_irq(adap->msix_info[qidx + 1].vec, | |
406 | t3_intr_handler(adap, | |
407 | adap->sge.qs[qidx]. | |
408 | rspq.polling), 0, | |
409 | adap->msix_info[qidx + 1].desc, | |
410 | &adap->sge.qs[qidx]); | |
411 | if (err) { | |
412 | while (--qidx >= 0) | |
413 | free_irq(adap->msix_info[qidx + 1].vec, | |
414 | &adap->sge.qs[qidx]); | |
415 | return err; | |
416 | } | |
417 | qidx++; | |
418 | } | |
419 | } | |
420 | return 0; | |
421 | } | |
422 | ||
8c263761 DLR |
423 | static void free_irq_resources(struct adapter *adapter) |
424 | { | |
425 | if (adapter->flags & USING_MSIX) { | |
426 | int i, n = 0; | |
427 | ||
428 | free_irq(adapter->msix_info[0].vec, adapter); | |
429 | for_each_port(adapter, i) | |
5cda9364 | 430 | n += adap2pinfo(adapter, i)->nqsets; |
8c263761 DLR |
431 | |
432 | for (i = 0; i < n; ++i) | |
433 | free_irq(adapter->msix_info[i + 1].vec, | |
434 | &adapter->sge.qs[i]); | |
435 | } else | |
436 | free_irq(adapter->pdev->irq, adapter); | |
437 | } | |
438 | ||
b881955b DLR |
439 | static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, |
440 | unsigned long n) | |
441 | { | |
e95ef5d3 | 442 | int attempts = 10; |
b881955b DLR |
443 | |
444 | while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { | |
445 | if (!--attempts) | |
446 | return -ETIMEDOUT; | |
447 | msleep(10); | |
448 | } | |
449 | return 0; | |
450 | } | |
451 | ||
452 | static int init_tp_parity(struct adapter *adap) | |
453 | { | |
454 | int i; | |
455 | struct sk_buff *skb; | |
456 | struct cpl_set_tcb_field *greq; | |
457 | unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; | |
458 | ||
459 | t3_tp_set_offload_mode(adap, 1); | |
460 | ||
461 | for (i = 0; i < 16; i++) { | |
462 | struct cpl_smt_write_req *req; | |
463 | ||
74b793e1 DLR |
464 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); |
465 | if (!skb) | |
466 | skb = adap->nofail_skb; | |
467 | if (!skb) | |
468 | goto alloc_skb_fail; | |
469 | ||
b881955b DLR |
470 | req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); |
471 | memset(req, 0, sizeof(*req)); | |
472 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
473 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); | |
dce7d1d0 | 474 | req->mtu_idx = NMTUS - 1; |
b881955b DLR |
475 | req->iff = i; |
476 | t3_mgmt_tx(adap, skb); | |
74b793e1 DLR |
477 | if (skb == adap->nofail_skb) { |
478 | await_mgmt_replies(adap, cnt, i + 1); | |
479 | adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); | |
480 | if (!adap->nofail_skb) | |
481 | goto alloc_skb_fail; | |
482 | } | |
b881955b DLR |
483 | } |
484 | ||
485 | for (i = 0; i < 2048; i++) { | |
486 | struct cpl_l2t_write_req *req; | |
487 | ||
74b793e1 DLR |
488 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); |
489 | if (!skb) | |
490 | skb = adap->nofail_skb; | |
491 | if (!skb) | |
492 | goto alloc_skb_fail; | |
493 | ||
b881955b DLR |
494 | req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); |
495 | memset(req, 0, sizeof(*req)); | |
496 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
497 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); | |
498 | req->params = htonl(V_L2T_W_IDX(i)); | |
499 | t3_mgmt_tx(adap, skb); | |
74b793e1 DLR |
500 | if (skb == adap->nofail_skb) { |
501 | await_mgmt_replies(adap, cnt, 16 + i + 1); | |
502 | adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); | |
503 | if (!adap->nofail_skb) | |
504 | goto alloc_skb_fail; | |
505 | } | |
b881955b DLR |
506 | } |
507 | ||
508 | for (i = 0; i < 2048; i++) { | |
509 | struct cpl_rte_write_req *req; | |
510 | ||
74b793e1 DLR |
511 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); |
512 | if (!skb) | |
513 | skb = adap->nofail_skb; | |
514 | if (!skb) | |
515 | goto alloc_skb_fail; | |
516 | ||
b881955b DLR |
517 | req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req)); |
518 | memset(req, 0, sizeof(*req)); | |
519 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
520 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); | |
521 | req->l2t_idx = htonl(V_L2T_W_IDX(i)); | |
522 | t3_mgmt_tx(adap, skb); | |
74b793e1 DLR |
523 | if (skb == adap->nofail_skb) { |
524 | await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1); | |
525 | adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); | |
526 | if (!adap->nofail_skb) | |
527 | goto alloc_skb_fail; | |
528 | } | |
b881955b DLR |
529 | } |
530 | ||
74b793e1 DLR |
531 | skb = alloc_skb(sizeof(*greq), GFP_KERNEL); |
532 | if (!skb) | |
533 | skb = adap->nofail_skb; | |
534 | if (!skb) | |
535 | goto alloc_skb_fail; | |
536 | ||
b881955b DLR |
537 | greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq)); |
538 | memset(greq, 0, sizeof(*greq)); | |
539 | greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
540 | OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); | |
541 | greq->mask = cpu_to_be64(1); | |
542 | t3_mgmt_tx(adap, skb); | |
543 | ||
544 | i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); | |
74b793e1 DLR |
545 | if (skb == adap->nofail_skb) { |
546 | i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); | |
547 | adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); | |
548 | } | |
549 | ||
b881955b DLR |
550 | t3_tp_set_offload_mode(adap, 0); |
551 | return i; | |
74b793e1 DLR |
552 | |
553 | alloc_skb_fail: | |
554 | t3_tp_set_offload_mode(adap, 0); | |
555 | return -ENOMEM; | |
b881955b DLR |
556 | } |
557 | ||
4d22de3e DLR |
558 | /** |
559 | * setup_rss - configure RSS | |
560 | * @adap: the adapter | |
561 | * | |
562 | * Sets up RSS to distribute packets to multiple receive queues. We | |
563 | * configure the RSS CPU lookup table to distribute to the number of HW | |
564 | * receive queues, and the response queue lookup table to narrow that | |
565 | * down to the response queues actually configured for each port. | |
566 | * We always configure the RSS mapping for two ports since the mapping | |
567 | * table has plenty of entries. | |
568 | */ | |
569 | static void setup_rss(struct adapter *adap) | |
570 | { | |
571 | int i; | |
572 | unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; | |
573 | unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; | |
574 | u8 cpus[SGE_QSETS + 1]; | |
575 | u16 rspq_map[RSS_TABLE_SIZE]; | |
576 | ||
577 | for (i = 0; i < SGE_QSETS; ++i) | |
578 | cpus[i] = i; | |
579 | cpus[SGE_QSETS] = 0xff; /* terminator */ | |
580 | ||
581 | for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { | |
582 | rspq_map[i] = i % nq0; | |
583 | rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; | |
584 | } | |
585 | ||
586 | t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | | |
587 | F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | | |
a2604be5 | 588 | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); |
4d22de3e DLR |
589 | } |
590 | ||
e998f245 SW |
591 | static void ring_dbs(struct adapter *adap) |
592 | { | |
593 | int i, j; | |
594 | ||
595 | for (i = 0; i < SGE_QSETS; i++) { | |
596 | struct sge_qset *qs = &adap->sge.qs[i]; | |
597 | ||
598 | if (qs->adap) | |
599 | for (j = 0; j < SGE_TXQ_PER_SET; j++) | |
600 | t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); | |
601 | } | |
602 | } | |
603 | ||
bea3348e | 604 | static void init_napi(struct adapter *adap) |
4d22de3e | 605 | { |
bea3348e | 606 | int i; |
4d22de3e | 607 | |
bea3348e SH |
608 | for (i = 0; i < SGE_QSETS; i++) { |
609 | struct sge_qset *qs = &adap->sge.qs[i]; | |
4d22de3e | 610 | |
bea3348e SH |
611 | if (qs->adap) |
612 | netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, | |
613 | 64); | |
4d22de3e | 614 | } |
48c4b6db DLR |
615 | |
616 | /* | |
617 | * netif_napi_add() can be called only once per napi_struct because it | |
618 | * adds each new napi_struct to a list. Be careful not to call it a | |
619 | * second time, e.g., during EEH recovery, by making a note of it. | |
620 | */ | |
621 | adap->flags |= NAPI_INIT; | |
4d22de3e DLR |
622 | } |
623 | ||
624 | /* | |
625 | * Wait until all NAPI handlers are descheduled. This includes the handlers of | |
626 | * both netdevices representing interfaces and the dummy ones for the extra | |
627 | * queues. | |
628 | */ | |
629 | static void quiesce_rx(struct adapter *adap) | |
630 | { | |
631 | int i; | |
4d22de3e | 632 | |
bea3348e SH |
633 | for (i = 0; i < SGE_QSETS; i++) |
634 | if (adap->sge.qs[i].adap) | |
635 | napi_disable(&adap->sge.qs[i].napi); | |
636 | } | |
4d22de3e | 637 | |
bea3348e SH |
638 | static void enable_all_napi(struct adapter *adap) |
639 | { | |
640 | int i; | |
641 | for (i = 0; i < SGE_QSETS; i++) | |
642 | if (adap->sge.qs[i].adap) | |
643 | napi_enable(&adap->sge.qs[i].napi); | |
4d22de3e DLR |
644 | } |
645 | ||
646 | /** | |
647 | * setup_sge_qsets - configure SGE Tx/Rx/response queues | |
648 | * @adap: the adapter | |
649 | * | |
650 | * Determines how many sets of SGE queues to use and initializes them. | |
651 | * We support multiple queue sets per port if we have MSI-X, otherwise | |
652 | * just one queue set per port. | |
653 | */ | |
654 | static int setup_sge_qsets(struct adapter *adap) | |
655 | { | |
bea3348e | 656 | int i, j, err, irq_idx = 0, qset_idx = 0; |
8ac3ba68 | 657 | unsigned int ntxq = SGE_TXQ_PER_SET; |
4d22de3e DLR |
658 | |
659 | if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) | |
660 | irq_idx = -1; | |
661 | ||
662 | for_each_port(adap, i) { | |
663 | struct net_device *dev = adap->port[i]; | |
bea3348e | 664 | struct port_info *pi = netdev_priv(dev); |
4d22de3e | 665 | |
bea3348e | 666 | pi->qs = &adap->sge.qs[pi->first_qset]; |
e594e96e | 667 | for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { |
4d22de3e DLR |
668 | err = t3_sge_alloc_qset(adap, qset_idx, 1, |
669 | (adap->flags & USING_MSIX) ? qset_idx + 1 : | |
670 | irq_idx, | |
82ad3329 DLR |
671 | &adap->params.sge.qset[qset_idx], ntxq, dev, |
672 | netdev_get_tx_queue(dev, j)); | |
4d22de3e DLR |
673 | if (err) { |
674 | t3_free_sge_resources(adap); | |
675 | return err; | |
676 | } | |
677 | } | |
678 | } | |
679 | ||
680 | return 0; | |
681 | } | |
682 | ||
3e5192ee | 683 | static ssize_t attr_show(struct device *d, char *buf, |
896392ef | 684 | ssize_t(*format) (struct net_device *, char *)) |
4d22de3e DLR |
685 | { |
686 | ssize_t len; | |
4d22de3e DLR |
687 | |
688 | /* Synchronize with ioctls that may shut down the device */ | |
689 | rtnl_lock(); | |
896392ef | 690 | len = (*format) (to_net_dev(d), buf); |
4d22de3e DLR |
691 | rtnl_unlock(); |
692 | return len; | |
693 | } | |
694 | ||
3e5192ee | 695 | static ssize_t attr_store(struct device *d, |
0ee8d33c | 696 | const char *buf, size_t len, |
896392ef | 697 | ssize_t(*set) (struct net_device *, unsigned int), |
4d22de3e DLR |
698 | unsigned int min_val, unsigned int max_val) |
699 | { | |
700 | char *endp; | |
701 | ssize_t ret; | |
702 | unsigned int val; | |
4d22de3e DLR |
703 | |
704 | if (!capable(CAP_NET_ADMIN)) | |
705 | return -EPERM; | |
706 | ||
707 | val = simple_strtoul(buf, &endp, 0); | |
708 | if (endp == buf || val < min_val || val > max_val) | |
709 | return -EINVAL; | |
710 | ||
711 | rtnl_lock(); | |
896392ef | 712 | ret = (*set) (to_net_dev(d), val); |
4d22de3e DLR |
713 | if (!ret) |
714 | ret = len; | |
715 | rtnl_unlock(); | |
716 | return ret; | |
717 | } | |
718 | ||
719 | #define CXGB3_SHOW(name, val_expr) \ | |
896392ef | 720 | static ssize_t format_##name(struct net_device *dev, char *buf) \ |
4d22de3e | 721 | { \ |
5fbf816f DLR |
722 | struct port_info *pi = netdev_priv(dev); \ |
723 | struct adapter *adap = pi->adapter; \ | |
4d22de3e DLR |
724 | return sprintf(buf, "%u\n", val_expr); \ |
725 | } \ | |
0ee8d33c DLR |
726 | static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ |
727 | char *buf) \ | |
4d22de3e | 728 | { \ |
3e5192ee | 729 | return attr_show(d, buf, format_##name); \ |
4d22de3e DLR |
730 | } |
731 | ||
896392ef | 732 | static ssize_t set_nfilters(struct net_device *dev, unsigned int val) |
4d22de3e | 733 | { |
5fbf816f DLR |
734 | struct port_info *pi = netdev_priv(dev); |
735 | struct adapter *adap = pi->adapter; | |
9f238486 | 736 | int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0; |
896392ef | 737 | |
4d22de3e DLR |
738 | if (adap->flags & FULL_INIT_DONE) |
739 | return -EBUSY; | |
740 | if (val && adap->params.rev == 0) | |
741 | return -EINVAL; | |
9f238486 DLR |
742 | if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - |
743 | min_tids) | |
4d22de3e DLR |
744 | return -EINVAL; |
745 | adap->params.mc5.nfilters = val; | |
746 | return 0; | |
747 | } | |
748 | ||
0ee8d33c DLR |
749 | static ssize_t store_nfilters(struct device *d, struct device_attribute *attr, |
750 | const char *buf, size_t len) | |
4d22de3e | 751 | { |
3e5192ee | 752 | return attr_store(d, buf, len, set_nfilters, 0, ~0); |
4d22de3e DLR |
753 | } |
754 | ||
896392ef | 755 | static ssize_t set_nservers(struct net_device *dev, unsigned int val) |
4d22de3e | 756 | { |
5fbf816f DLR |
757 | struct port_info *pi = netdev_priv(dev); |
758 | struct adapter *adap = pi->adapter; | |
896392ef | 759 | |
4d22de3e DLR |
760 | if (adap->flags & FULL_INIT_DONE) |
761 | return -EBUSY; | |
9f238486 DLR |
762 | if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters - |
763 | MC5_MIN_TIDS) | |
4d22de3e DLR |
764 | return -EINVAL; |
765 | adap->params.mc5.nservers = val; | |
766 | return 0; | |
767 | } | |
768 | ||
0ee8d33c DLR |
769 | static ssize_t store_nservers(struct device *d, struct device_attribute *attr, |
770 | const char *buf, size_t len) | |
4d22de3e | 771 | { |
3e5192ee | 772 | return attr_store(d, buf, len, set_nservers, 0, ~0); |
4d22de3e DLR |
773 | } |
774 | ||
775 | #define CXGB3_ATTR_R(name, val_expr) \ | |
776 | CXGB3_SHOW(name, val_expr) \ | |
0ee8d33c | 777 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) |
4d22de3e DLR |
778 | |
779 | #define CXGB3_ATTR_RW(name, val_expr, store_method) \ | |
780 | CXGB3_SHOW(name, val_expr) \ | |
0ee8d33c | 781 | static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method) |
4d22de3e DLR |
782 | |
783 | CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5)); | |
784 | CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters); | |
785 | CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers); | |
786 | ||
787 | static struct attribute *cxgb3_attrs[] = { | |
0ee8d33c DLR |
788 | &dev_attr_cam_size.attr, |
789 | &dev_attr_nfilters.attr, | |
790 | &dev_attr_nservers.attr, | |
4d22de3e DLR |
791 | NULL |
792 | }; | |
793 | ||
794 | static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; | |
795 | ||
3e5192ee | 796 | static ssize_t tm_attr_show(struct device *d, |
0ee8d33c | 797 | char *buf, int sched) |
4d22de3e | 798 | { |
5fbf816f DLR |
799 | struct port_info *pi = netdev_priv(to_net_dev(d)); |
800 | struct adapter *adap = pi->adapter; | |
4d22de3e | 801 | unsigned int v, addr, bpt, cpt; |
5fbf816f | 802 | ssize_t len; |
4d22de3e DLR |
803 | |
804 | addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; | |
805 | rtnl_lock(); | |
806 | t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); | |
807 | v = t3_read_reg(adap, A_TP_TM_PIO_DATA); | |
808 | if (sched & 1) | |
809 | v >>= 16; | |
810 | bpt = (v >> 8) & 0xff; | |
811 | cpt = v & 0xff; | |
812 | if (!cpt) | |
813 | len = sprintf(buf, "disabled\n"); | |
814 | else { | |
815 | v = (adap->params.vpd.cclk * 1000) / cpt; | |
816 | len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125); | |
817 | } | |
818 | rtnl_unlock(); | |
819 | return len; | |
820 | } | |
821 | ||
3e5192ee | 822 | static ssize_t tm_attr_store(struct device *d, |
0ee8d33c | 823 | const char *buf, size_t len, int sched) |
4d22de3e | 824 | { |
5fbf816f DLR |
825 | struct port_info *pi = netdev_priv(to_net_dev(d)); |
826 | struct adapter *adap = pi->adapter; | |
827 | unsigned int val; | |
4d22de3e DLR |
828 | char *endp; |
829 | ssize_t ret; | |
4d22de3e DLR |
830 | |
831 | if (!capable(CAP_NET_ADMIN)) | |
832 | return -EPERM; | |
833 | ||
834 | val = simple_strtoul(buf, &endp, 0); | |
835 | if (endp == buf || val > 10000000) | |
836 | return -EINVAL; | |
837 | ||
838 | rtnl_lock(); | |
839 | ret = t3_config_sched(adap, val, sched); | |
840 | if (!ret) | |
841 | ret = len; | |
842 | rtnl_unlock(); | |
843 | return ret; | |
844 | } | |
845 | ||
846 | #define TM_ATTR(name, sched) \ | |
0ee8d33c DLR |
847 | static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ |
848 | char *buf) \ | |
4d22de3e | 849 | { \ |
3e5192ee | 850 | return tm_attr_show(d, buf, sched); \ |
4d22de3e | 851 | } \ |
0ee8d33c DLR |
852 | static ssize_t store_##name(struct device *d, struct device_attribute *attr, \ |
853 | const char *buf, size_t len) \ | |
4d22de3e | 854 | { \ |
3e5192ee | 855 | return tm_attr_store(d, buf, len, sched); \ |
4d22de3e | 856 | } \ |
0ee8d33c | 857 | static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name) |
4d22de3e DLR |
858 | |
859 | TM_ATTR(sched0, 0); | |
860 | TM_ATTR(sched1, 1); | |
861 | TM_ATTR(sched2, 2); | |
862 | TM_ATTR(sched3, 3); | |
863 | TM_ATTR(sched4, 4); | |
864 | TM_ATTR(sched5, 5); | |
865 | TM_ATTR(sched6, 6); | |
866 | TM_ATTR(sched7, 7); | |
867 | ||
868 | static struct attribute *offload_attrs[] = { | |
0ee8d33c DLR |
869 | &dev_attr_sched0.attr, |
870 | &dev_attr_sched1.attr, | |
871 | &dev_attr_sched2.attr, | |
872 | &dev_attr_sched3.attr, | |
873 | &dev_attr_sched4.attr, | |
874 | &dev_attr_sched5.attr, | |
875 | &dev_attr_sched6.attr, | |
876 | &dev_attr_sched7.attr, | |
4d22de3e DLR |
877 | NULL |
878 | }; | |
879 | ||
880 | static struct attribute_group offload_attr_group = {.attrs = offload_attrs }; | |
881 | ||
882 | /* | |
883 | * Sends an sk_buff to an offload queue driver | |
884 | * after dealing with any active network taps. | |
885 | */ | |
886 | static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb) | |
887 | { | |
888 | int ret; | |
889 | ||
890 | local_bh_disable(); | |
891 | ret = t3_offload_tx(tdev, skb); | |
892 | local_bh_enable(); | |
893 | return ret; | |
894 | } | |
895 | ||
896 | static int write_smt_entry(struct adapter *adapter, int idx) | |
897 | { | |
898 | struct cpl_smt_write_req *req; | |
f14d42f3 | 899 | struct port_info *pi = netdev_priv(adapter->port[idx]); |
4d22de3e DLR |
900 | struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL); |
901 | ||
902 | if (!skb) | |
903 | return -ENOMEM; | |
904 | ||
905 | req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req)); | |
906 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
907 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); | |
908 | req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ | |
909 | req->iff = idx; | |
4d22de3e | 910 | memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN); |
f14d42f3 | 911 | memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN); |
4d22de3e DLR |
912 | skb->priority = 1; |
913 | offload_tx(&adapter->tdev, skb); | |
914 | return 0; | |
915 | } | |
916 | ||
917 | static int init_smt(struct adapter *adapter) | |
918 | { | |
919 | int i; | |
920 | ||
921 | for_each_port(adapter, i) | |
922 | write_smt_entry(adapter, i); | |
923 | return 0; | |
924 | } | |
925 | ||
926 | static void init_port_mtus(struct adapter *adapter) | |
927 | { | |
928 | unsigned int mtus = adapter->port[0]->mtu; | |
929 | ||
930 | if (adapter->port[1]) | |
931 | mtus |= adapter->port[1]->mtu << 16; | |
932 | t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); | |
933 | } | |
934 | ||
8c263761 | 935 | static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, |
14ab9892 DLR |
936 | int hi, int port) |
937 | { | |
938 | struct sk_buff *skb; | |
939 | struct mngt_pktsched_wr *req; | |
8c263761 | 940 | int ret; |
14ab9892 | 941 | |
74b793e1 DLR |
942 | skb = alloc_skb(sizeof(*req), GFP_KERNEL); |
943 | if (!skb) | |
944 | skb = adap->nofail_skb; | |
945 | if (!skb) | |
946 | return -ENOMEM; | |
947 | ||
14ab9892 DLR |
948 | req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req)); |
949 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); | |
950 | req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; | |
951 | req->sched = sched; | |
952 | req->idx = qidx; | |
953 | req->min = lo; | |
954 | req->max = hi; | |
955 | req->binding = port; | |
8c263761 | 956 | ret = t3_mgmt_tx(adap, skb); |
74b793e1 DLR |
957 | if (skb == adap->nofail_skb) { |
958 | adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field), | |
959 | GFP_KERNEL); | |
960 | if (!adap->nofail_skb) | |
961 | ret = -ENOMEM; | |
962 | } | |
8c263761 DLR |
963 | |
964 | return ret; | |
14ab9892 DLR |
965 | } |
966 | ||
8c263761 | 967 | static int bind_qsets(struct adapter *adap) |
14ab9892 | 968 | { |
8c263761 | 969 | int i, j, err = 0; |
14ab9892 DLR |
970 | |
971 | for_each_port(adap, i) { | |
972 | const struct port_info *pi = adap2pinfo(adap, i); | |
973 | ||
8c263761 DLR |
974 | for (j = 0; j < pi->nqsets; ++j) { |
975 | int ret = send_pktsched_cmd(adap, 1, | |
976 | pi->first_qset + j, -1, | |
977 | -1, i); | |
978 | if (ret) | |
979 | err = ret; | |
980 | } | |
14ab9892 | 981 | } |
8c263761 DLR |
982 | |
983 | return err; | |
14ab9892 DLR |
984 | } |
985 | ||
34336ec0 BH |
986 | #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \ |
987 | __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO) | |
988 | #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin" | |
989 | #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \ | |
990 | __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO) | |
991 | #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin" | |
2e8c07c3 DLR |
992 | #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin" |
993 | #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin" | |
9450526a | 994 | #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin" |
34336ec0 BH |
995 | MODULE_FIRMWARE(FW_FNAME); |
996 | MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin"); | |
997 | MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin"); | |
998 | MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME); | |
999 | MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME); | |
1000 | MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME); | |
2e8c07c3 DLR |
1001 | |
1002 | static inline const char *get_edc_fw_name(int edc_idx) | |
1003 | { | |
1004 | const char *fw_name = NULL; | |
1005 | ||
1006 | switch (edc_idx) { | |
1007 | case EDC_OPT_AEL2005: | |
1008 | fw_name = AEL2005_OPT_EDC_NAME; | |
1009 | break; | |
1010 | case EDC_TWX_AEL2005: | |
1011 | fw_name = AEL2005_TWX_EDC_NAME; | |
1012 | break; | |
1013 | case EDC_TWX_AEL2020: | |
1014 | fw_name = AEL2020_TWX_EDC_NAME; | |
1015 | break; | |
1016 | } | |
1017 | return fw_name; | |
1018 | } | |
1019 | ||
1020 | int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size) | |
1021 | { | |
1022 | struct adapter *adapter = phy->adapter; | |
1023 | const struct firmware *fw; | |
1024 | char buf[64]; | |
1025 | u32 csum; | |
1026 | const __be32 *p; | |
1027 | u16 *cache = phy->phy_cache; | |
1028 | int i, ret; | |
1029 | ||
1030 | snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx)); | |
1031 | ||
1032 | ret = request_firmware(&fw, buf, &adapter->pdev->dev); | |
1033 | if (ret < 0) { | |
1034 | dev_err(&adapter->pdev->dev, | |
1035 | "could not upgrade firmware: unable to load %s\n", | |
1036 | buf); | |
1037 | return ret; | |
1038 | } | |
1039 | ||
1040 | /* check size, take checksum in account */ | |
1041 | if (fw->size > size + 4) { | |
1042 | CH_ERR(adapter, "firmware image too large %u, expected %d\n", | |
1043 | (unsigned int)fw->size, size + 4); | |
1044 | ret = -EINVAL; | |
1045 | } | |
1046 | ||
1047 | /* compute checksum */ | |
1048 | p = (const __be32 *)fw->data; | |
1049 | for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++) | |
1050 | csum += ntohl(p[i]); | |
1051 | ||
1052 | if (csum != 0xffffffff) { | |
1053 | CH_ERR(adapter, "corrupted firmware image, checksum %u\n", | |
1054 | csum); | |
1055 | ret = -EINVAL; | |
1056 | } | |
1057 | ||
1058 | for (i = 0; i < size / 4 ; i++) { | |
1059 | *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16; | |
1060 | *cache++ = be32_to_cpu(p[i]) & 0xffff; | |
1061 | } | |
1062 | ||
1063 | release_firmware(fw); | |
1064 | ||
1065 | return ret; | |
1066 | } | |
2e283962 DLR |
1067 | |
1068 | static int upgrade_fw(struct adapter *adap) | |
1069 | { | |
1070 | int ret; | |
2e283962 DLR |
1071 | const struct firmware *fw; |
1072 | struct device *dev = &adap->pdev->dev; | |
1073 | ||
34336ec0 | 1074 | ret = request_firmware(&fw, FW_FNAME, dev); |
2e283962 DLR |
1075 | if (ret < 0) { |
1076 | dev_err(dev, "could not upgrade firmware: unable to load %s\n", | |
34336ec0 | 1077 | FW_FNAME); |
2e283962 DLR |
1078 | return ret; |
1079 | } | |
1080 | ret = t3_load_fw(adap, fw->data, fw->size); | |
1081 | release_firmware(fw); | |
47330077 DLR |
1082 | |
1083 | if (ret == 0) | |
1084 | dev_info(dev, "successful upgrade to firmware %d.%d.%d\n", | |
1085 | FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); | |
1086 | else | |
1087 | dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n", | |
1088 | FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); | |
2eab17ab | 1089 | |
47330077 DLR |
1090 | return ret; |
1091 | } | |
1092 | ||
1093 | static inline char t3rev2char(struct adapter *adapter) | |
1094 | { | |
1095 | char rev = 0; | |
1096 | ||
1097 | switch(adapter->params.rev) { | |
1098 | case T3_REV_B: | |
1099 | case T3_REV_B2: | |
1100 | rev = 'b'; | |
1101 | break; | |
1aafee26 DLR |
1102 | case T3_REV_C: |
1103 | rev = 'c'; | |
1104 | break; | |
47330077 DLR |
1105 | } |
1106 | return rev; | |
1107 | } | |
1108 | ||
9265fabf | 1109 | static int update_tpsram(struct adapter *adap) |
47330077 DLR |
1110 | { |
1111 | const struct firmware *tpsram; | |
1112 | char buf[64]; | |
1113 | struct device *dev = &adap->pdev->dev; | |
1114 | int ret; | |
1115 | char rev; | |
2eab17ab | 1116 | |
47330077 DLR |
1117 | rev = t3rev2char(adap); |
1118 | if (!rev) | |
1119 | return 0; | |
1120 | ||
34336ec0 | 1121 | snprintf(buf, sizeof(buf), TPSRAM_NAME, rev); |
47330077 DLR |
1122 | |
1123 | ret = request_firmware(&tpsram, buf, dev); | |
1124 | if (ret < 0) { | |
1125 | dev_err(dev, "could not load TP SRAM: unable to load %s\n", | |
1126 | buf); | |
1127 | return ret; | |
1128 | } | |
2eab17ab | 1129 | |
47330077 DLR |
1130 | ret = t3_check_tpsram(adap, tpsram->data, tpsram->size); |
1131 | if (ret) | |
2eab17ab | 1132 | goto release_tpsram; |
47330077 DLR |
1133 | |
1134 | ret = t3_set_proto_sram(adap, tpsram->data); | |
1135 | if (ret == 0) | |
1136 | dev_info(dev, | |
1137 | "successful update of protocol engine " | |
1138 | "to %d.%d.%d\n", | |
1139 | TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); | |
1140 | else | |
1141 | dev_err(dev, "failed to update of protocol engine %d.%d.%d\n", | |
1142 | TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); | |
1143 | if (ret) | |
1144 | dev_err(dev, "loading protocol SRAM failed\n"); | |
1145 | ||
1146 | release_tpsram: | |
1147 | release_firmware(tpsram); | |
2eab17ab | 1148 | |
2e283962 DLR |
1149 | return ret; |
1150 | } | |
1151 | ||
4d22de3e DLR |
1152 | /** |
1153 | * cxgb_up - enable the adapter | |
1154 | * @adapter: adapter being enabled | |
1155 | * | |
1156 | * Called when the first port is enabled, this function performs the | |
1157 | * actions necessary to make an adapter operational, such as completing | |
1158 | * the initialization of HW modules, and enabling interrupts. | |
1159 | * | |
1160 | * Must be called with the rtnl lock held. | |
1161 | */ | |
1162 | static int cxgb_up(struct adapter *adap) | |
1163 | { | |
c54f5c24 | 1164 | int err; |
4d22de3e DLR |
1165 | |
1166 | if (!(adap->flags & FULL_INIT_DONE)) { | |
8207befa | 1167 | err = t3_check_fw_version(adap); |
a5a3b460 | 1168 | if (err == -EINVAL) { |
2e283962 | 1169 | err = upgrade_fw(adap); |
8207befa DLR |
1170 | CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n", |
1171 | FW_VERSION_MAJOR, FW_VERSION_MINOR, | |
1172 | FW_VERSION_MICRO, err ? "failed" : "succeeded"); | |
a5a3b460 | 1173 | } |
4d22de3e | 1174 | |
8207befa | 1175 | err = t3_check_tpsram_version(adap); |
47330077 DLR |
1176 | if (err == -EINVAL) { |
1177 | err = update_tpsram(adap); | |
8207befa DLR |
1178 | CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n", |
1179 | TP_VERSION_MAJOR, TP_VERSION_MINOR, | |
1180 | TP_VERSION_MICRO, err ? "failed" : "succeeded"); | |
47330077 DLR |
1181 | } |
1182 | ||
20d3fc11 DLR |
1183 | /* |
1184 | * Clear interrupts now to catch errors if t3_init_hw fails. | |
1185 | * We clear them again later as initialization may trigger | |
1186 | * conditions that can interrupt. | |
1187 | */ | |
1188 | t3_intr_clear(adap); | |
1189 | ||
4d22de3e DLR |
1190 | err = t3_init_hw(adap, 0); |
1191 | if (err) | |
1192 | goto out; | |
1193 | ||
b881955b | 1194 | t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); |
6cdbd77e | 1195 | t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); |
bea3348e | 1196 | |
4d22de3e DLR |
1197 | err = setup_sge_qsets(adap); |
1198 | if (err) | |
1199 | goto out; | |
1200 | ||
1201 | setup_rss(adap); | |
48c4b6db DLR |
1202 | if (!(adap->flags & NAPI_INIT)) |
1203 | init_napi(adap); | |
31563789 DLR |
1204 | |
1205 | t3_start_sge_timers(adap); | |
4d22de3e DLR |
1206 | adap->flags |= FULL_INIT_DONE; |
1207 | } | |
1208 | ||
1209 | t3_intr_clear(adap); | |
1210 | ||
1211 | if (adap->flags & USING_MSIX) { | |
1212 | name_msix_vecs(adap); | |
1213 | err = request_irq(adap->msix_info[0].vec, | |
1214 | t3_async_intr_handler, 0, | |
1215 | adap->msix_info[0].desc, adap); | |
1216 | if (err) | |
1217 | goto irq_err; | |
1218 | ||
42256f57 DLR |
1219 | err = request_msix_data_irqs(adap); |
1220 | if (err) { | |
4d22de3e DLR |
1221 | free_irq(adap->msix_info[0].vec, adap); |
1222 | goto irq_err; | |
1223 | } | |
1224 | } else if ((err = request_irq(adap->pdev->irq, | |
1225 | t3_intr_handler(adap, | |
1226 | adap->sge.qs[0].rspq. | |
1227 | polling), | |
2db6346f TG |
1228 | (adap->flags & USING_MSI) ? |
1229 | 0 : IRQF_SHARED, | |
4d22de3e DLR |
1230 | adap->name, adap))) |
1231 | goto irq_err; | |
1232 | ||
bea3348e | 1233 | enable_all_napi(adap); |
4d22de3e DLR |
1234 | t3_sge_start(adap); |
1235 | t3_intr_enable(adap); | |
14ab9892 | 1236 | |
b881955b DLR |
1237 | if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) && |
1238 | is_offload(adap) && init_tp_parity(adap) == 0) | |
1239 | adap->flags |= TP_PARITY_INIT; | |
1240 | ||
1241 | if (adap->flags & TP_PARITY_INIT) { | |
1242 | t3_write_reg(adap, A_TP_INT_CAUSE, | |
1243 | F_CMCACHEPERR | F_ARPLUTPERR); | |
1244 | t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); | |
1245 | } | |
1246 | ||
8c263761 | 1247 | if (!(adap->flags & QUEUES_BOUND)) { |
18edc84c DLR |
1248 | int ret = bind_qsets(adap); |
1249 | ||
1250 | if (ret < 0) { | |
1251 | CH_ERR(adap, "failed to bind qsets, err %d\n", ret); | |
8c263761 DLR |
1252 | t3_intr_disable(adap); |
1253 | free_irq_resources(adap); | |
18edc84c | 1254 | err = ret; |
8c263761 DLR |
1255 | goto out; |
1256 | } | |
1257 | adap->flags |= QUEUES_BOUND; | |
1258 | } | |
14ab9892 | 1259 | |
4d22de3e DLR |
1260 | out: |
1261 | return err; | |
1262 | irq_err: | |
1263 | CH_ERR(adap, "request_irq failed, err %d\n", err); | |
1264 | goto out; | |
1265 | } | |
1266 | ||
1267 | /* | |
1268 | * Release resources when all the ports and offloading have been stopped. | |
1269 | */ | |
55bc3228 | 1270 | static void cxgb_down(struct adapter *adapter, int on_wq) |
4d22de3e DLR |
1271 | { |
1272 | t3_sge_stop(adapter); | |
1273 | spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */ | |
1274 | t3_intr_disable(adapter); | |
1275 | spin_unlock_irq(&adapter->work_lock); | |
1276 | ||
8c263761 | 1277 | free_irq_resources(adapter); |
4d22de3e | 1278 | quiesce_rx(adapter); |
a6f018e3 | 1279 | t3_sge_stop(adapter); |
55bc3228 CL |
1280 | if (!on_wq) |
1281 | flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */ | |
4d22de3e DLR |
1282 | } |
1283 | ||
1284 | static void schedule_chk_task(struct adapter *adap) | |
1285 | { | |
1286 | unsigned int timeo; | |
1287 | ||
1288 | timeo = adap->params.linkpoll_period ? | |
1289 | (HZ * adap->params.linkpoll_period) / 10 : | |
1290 | adap->params.stats_update_period * HZ; | |
1291 | if (timeo) | |
1292 | queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo); | |
1293 | } | |
1294 | ||
1295 | static int offload_open(struct net_device *dev) | |
1296 | { | |
5fbf816f DLR |
1297 | struct port_info *pi = netdev_priv(dev); |
1298 | struct adapter *adapter = pi->adapter; | |
1299 | struct t3cdev *tdev = dev2t3cdev(dev); | |
4d22de3e | 1300 | int adap_up = adapter->open_device_map & PORT_MASK; |
c54f5c24 | 1301 | int err; |
4d22de3e DLR |
1302 | |
1303 | if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) | |
1304 | return 0; | |
1305 | ||
1306 | if (!adap_up && (err = cxgb_up(adapter)) < 0) | |
48c4b6db | 1307 | goto out; |
4d22de3e DLR |
1308 | |
1309 | t3_tp_set_offload_mode(adapter, 1); | |
1310 | tdev->lldev = adapter->port[0]; | |
1311 | err = cxgb3_offload_activate(adapter); | |
1312 | if (err) | |
1313 | goto out; | |
1314 | ||
1315 | init_port_mtus(adapter); | |
1316 | t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd, | |
1317 | adapter->params.b_wnd, | |
1318 | adapter->params.rev == 0 ? | |
1319 | adapter->port[0]->mtu : 0xffff); | |
1320 | init_smt(adapter); | |
1321 | ||
d96a51f6 DN |
1322 | if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group)) |
1323 | dev_dbg(&dev->dev, "cannot create sysfs group\n"); | |
4d22de3e DLR |
1324 | |
1325 | /* Call back all registered clients */ | |
1326 | cxgb3_add_clients(tdev); | |
1327 | ||
1328 | out: | |
1329 | /* restore them in case the offload module has changed them */ | |
1330 | if (err) { | |
1331 | t3_tp_set_offload_mode(adapter, 0); | |
1332 | clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); | |
1333 | cxgb3_set_dummy_ops(tdev); | |
1334 | } | |
1335 | return err; | |
1336 | } | |
1337 | ||
1338 | static int offload_close(struct t3cdev *tdev) | |
1339 | { | |
1340 | struct adapter *adapter = tdev2adap(tdev); | |
23f333a2 | 1341 | struct t3c_data *td = T3C_DATA(tdev); |
4d22de3e DLR |
1342 | |
1343 | if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) | |
1344 | return 0; | |
1345 | ||
1346 | /* Call back all registered clients */ | |
1347 | cxgb3_remove_clients(tdev); | |
1348 | ||
0ee8d33c | 1349 | sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); |
4d22de3e | 1350 | |
c80b0c28 | 1351 | /* Flush work scheduled while releasing TIDs */ |
23f333a2 | 1352 | flush_work_sync(&td->tid_release_task); |
c80b0c28 | 1353 | |
4d22de3e DLR |
1354 | tdev->lldev = NULL; |
1355 | cxgb3_set_dummy_ops(tdev); | |
1356 | t3_tp_set_offload_mode(adapter, 0); | |
1357 | clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map); | |
1358 | ||
1359 | if (!adapter->open_device_map) | |
55bc3228 | 1360 | cxgb_down(adapter, 0); |
4d22de3e DLR |
1361 | |
1362 | cxgb3_offload_deactivate(adapter); | |
1363 | return 0; | |
1364 | } | |
1365 | ||
1366 | static int cxgb_open(struct net_device *dev) | |
1367 | { | |
4d22de3e | 1368 | struct port_info *pi = netdev_priv(dev); |
5fbf816f | 1369 | struct adapter *adapter = pi->adapter; |
4d22de3e | 1370 | int other_ports = adapter->open_device_map & PORT_MASK; |
5fbf816f | 1371 | int err; |
4d22de3e | 1372 | |
48c4b6db | 1373 | if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) |
4d22de3e DLR |
1374 | return err; |
1375 | ||
1376 | set_bit(pi->port_id, &adapter->open_device_map); | |
8ac3ba68 | 1377 | if (is_offload(adapter) && !ofld_disable) { |
4d22de3e DLR |
1378 | err = offload_open(dev); |
1379 | if (err) | |
1380 | printk(KERN_WARNING | |
1381 | "Could not initialize offload capabilities\n"); | |
1382 | } | |
1383 | ||
19221e75 BH |
1384 | netif_set_real_num_tx_queues(dev, pi->nqsets); |
1385 | err = netif_set_real_num_rx_queues(dev, pi->nqsets); | |
1386 | if (err) | |
1387 | return err; | |
4d22de3e DLR |
1388 | link_start(dev); |
1389 | t3_port_intr_enable(adapter, pi->port_id); | |
82ad3329 | 1390 | netif_tx_start_all_queues(dev); |
4d22de3e DLR |
1391 | if (!other_ports) |
1392 | schedule_chk_task(adapter); | |
1393 | ||
fa0d4c11 | 1394 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id); |
4d22de3e DLR |
1395 | return 0; |
1396 | } | |
1397 | ||
55bc3228 | 1398 | static int __cxgb_close(struct net_device *dev, int on_wq) |
4d22de3e | 1399 | { |
5fbf816f DLR |
1400 | struct port_info *pi = netdev_priv(dev); |
1401 | struct adapter *adapter = pi->adapter; | |
4d22de3e | 1402 | |
e8d19370 DLR |
1403 | |
1404 | if (!adapter->open_device_map) | |
1405 | return 0; | |
1406 | ||
bf792094 DLR |
1407 | /* Stop link fault interrupts */ |
1408 | t3_xgm_intr_disable(adapter, pi->port_id); | |
1409 | t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset); | |
1410 | ||
5fbf816f | 1411 | t3_port_intr_disable(adapter, pi->port_id); |
82ad3329 | 1412 | netif_tx_stop_all_queues(dev); |
5fbf816f | 1413 | pi->phy.ops->power_down(&pi->phy, 1); |
4d22de3e | 1414 | netif_carrier_off(dev); |
5fbf816f | 1415 | t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); |
4d22de3e | 1416 | |
20d3fc11 | 1417 | spin_lock_irq(&adapter->work_lock); /* sync with update task */ |
5fbf816f | 1418 | clear_bit(pi->port_id, &adapter->open_device_map); |
20d3fc11 | 1419 | spin_unlock_irq(&adapter->work_lock); |
4d22de3e DLR |
1420 | |
1421 | if (!(adapter->open_device_map & PORT_MASK)) | |
c80b0c28 | 1422 | cancel_delayed_work_sync(&adapter->adap_check_task); |
4d22de3e DLR |
1423 | |
1424 | if (!adapter->open_device_map) | |
55bc3228 | 1425 | cxgb_down(adapter, on_wq); |
4d22de3e | 1426 | |
fa0d4c11 | 1427 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id); |
4d22de3e DLR |
1428 | return 0; |
1429 | } | |
1430 | ||
55bc3228 CL |
1431 | static int cxgb_close(struct net_device *dev) |
1432 | { | |
1433 | return __cxgb_close(dev, 0); | |
1434 | } | |
1435 | ||
4d22de3e DLR |
1436 | static struct net_device_stats *cxgb_get_stats(struct net_device *dev) |
1437 | { | |
5fbf816f DLR |
1438 | struct port_info *pi = netdev_priv(dev); |
1439 | struct adapter *adapter = pi->adapter; | |
1440 | struct net_device_stats *ns = &pi->netstats; | |
4d22de3e DLR |
1441 | const struct mac_stats *pstats; |
1442 | ||
1443 | spin_lock(&adapter->stats_lock); | |
5fbf816f | 1444 | pstats = t3_mac_update_stats(&pi->mac); |
4d22de3e DLR |
1445 | spin_unlock(&adapter->stats_lock); |
1446 | ||
1447 | ns->tx_bytes = pstats->tx_octets; | |
1448 | ns->tx_packets = pstats->tx_frames; | |
1449 | ns->rx_bytes = pstats->rx_octets; | |
1450 | ns->rx_packets = pstats->rx_frames; | |
1451 | ns->multicast = pstats->rx_mcast_frames; | |
1452 | ||
1453 | ns->tx_errors = pstats->tx_underrun; | |
1454 | ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs + | |
1455 | pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short + | |
1456 | pstats->rx_fifo_ovfl; | |
1457 | ||
1458 | /* detailed rx_errors */ | |
1459 | ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long; | |
1460 | ns->rx_over_errors = 0; | |
1461 | ns->rx_crc_errors = pstats->rx_fcs_errs; | |
1462 | ns->rx_frame_errors = pstats->rx_symbol_errs; | |
1463 | ns->rx_fifo_errors = pstats->rx_fifo_ovfl; | |
1464 | ns->rx_missed_errors = pstats->rx_cong_drops; | |
1465 | ||
1466 | /* detailed tx_errors */ | |
1467 | ns->tx_aborted_errors = 0; | |
1468 | ns->tx_carrier_errors = 0; | |
1469 | ns->tx_fifo_errors = pstats->tx_underrun; | |
1470 | ns->tx_heartbeat_errors = 0; | |
1471 | ns->tx_window_errors = 0; | |
1472 | return ns; | |
1473 | } | |
1474 | ||
1475 | static u32 get_msglevel(struct net_device *dev) | |
1476 | { | |
5fbf816f DLR |
1477 | struct port_info *pi = netdev_priv(dev); |
1478 | struct adapter *adapter = pi->adapter; | |
4d22de3e DLR |
1479 | |
1480 | return adapter->msg_enable; | |
1481 | } | |
1482 | ||
1483 | static void set_msglevel(struct net_device *dev, u32 val) | |
1484 | { | |
5fbf816f DLR |
1485 | struct port_info *pi = netdev_priv(dev); |
1486 | struct adapter *adapter = pi->adapter; | |
4d22de3e DLR |
1487 | |
1488 | adapter->msg_enable = val; | |
1489 | } | |
1490 | ||
1491 | static char stats_strings[][ETH_GSTRING_LEN] = { | |
1492 | "TxOctetsOK ", | |
1493 | "TxFramesOK ", | |
1494 | "TxMulticastFramesOK", | |
1495 | "TxBroadcastFramesOK", | |
1496 | "TxPauseFrames ", | |
1497 | "TxUnderrun ", | |
1498 | "TxExtUnderrun ", | |
1499 | ||
1500 | "TxFrames64 ", | |
1501 | "TxFrames65To127 ", | |
1502 | "TxFrames128To255 ", | |
1503 | "TxFrames256To511 ", | |
1504 | "TxFrames512To1023 ", | |
1505 | "TxFrames1024To1518 ", | |
1506 | "TxFrames1519ToMax ", | |
1507 | ||
1508 | "RxOctetsOK ", | |
1509 | "RxFramesOK ", | |
1510 | "RxMulticastFramesOK", | |
1511 | "RxBroadcastFramesOK", | |
1512 | "RxPauseFrames ", | |
1513 | "RxFCSErrors ", | |
1514 | "RxSymbolErrors ", | |
1515 | "RxShortErrors ", | |
1516 | "RxJabberErrors ", | |
1517 | "RxLengthErrors ", | |
1518 | "RxFIFOoverflow ", | |
1519 | ||
1520 | "RxFrames64 ", | |
1521 | "RxFrames65To127 ", | |
1522 | "RxFrames128To255 ", | |
1523 | "RxFrames256To511 ", | |
1524 | "RxFrames512To1023 ", | |
1525 | "RxFrames1024To1518 ", | |
1526 | "RxFrames1519ToMax ", | |
1527 | ||
1528 | "PhyFIFOErrors ", | |
1529 | "TSO ", | |
1530 | "VLANextractions ", | |
1531 | "VLANinsertions ", | |
1532 | "TxCsumOffload ", | |
1533 | "RxCsumGood ", | |
b47385bd DLR |
1534 | "LroAggregated ", |
1535 | "LroFlushed ", | |
1536 | "LroNoDesc ", | |
fc90664e DLR |
1537 | "RxDrops ", |
1538 | ||
1539 | "CheckTXEnToggled ", | |
1540 | "CheckResets ", | |
1541 | ||
bf792094 | 1542 | "LinkFaults ", |
4d22de3e DLR |
1543 | }; |
1544 | ||
b9f2c044 | 1545 | static int get_sset_count(struct net_device *dev, int sset) |
4d22de3e | 1546 | { |
b9f2c044 JG |
1547 | switch (sset) { |
1548 | case ETH_SS_STATS: | |
1549 | return ARRAY_SIZE(stats_strings); | |
1550 | default: | |
1551 | return -EOPNOTSUPP; | |
1552 | } | |
4d22de3e DLR |
1553 | } |
1554 | ||
1555 | #define T3_REGMAP_SIZE (3 * 1024) | |
1556 | ||
1557 | static int get_regs_len(struct net_device *dev) | |
1558 | { | |
1559 | return T3_REGMAP_SIZE; | |
1560 | } | |
1561 | ||
1562 | static int get_eeprom_len(struct net_device *dev) | |
1563 | { | |
1564 | return EEPROMSIZE; | |
1565 | } | |
1566 | ||
1567 | static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
1568 | { | |
5fbf816f DLR |
1569 | struct port_info *pi = netdev_priv(dev); |
1570 | struct adapter *adapter = pi->adapter; | |
4d22de3e | 1571 | u32 fw_vers = 0; |
47330077 | 1572 | u32 tp_vers = 0; |
4d22de3e | 1573 | |
cf3760da | 1574 | spin_lock(&adapter->stats_lock); |
4d22de3e | 1575 | t3_get_fw_version(adapter, &fw_vers); |
47330077 | 1576 | t3_get_tp_version(adapter, &tp_vers); |
cf3760da | 1577 | spin_unlock(&adapter->stats_lock); |
4d22de3e DLR |
1578 | |
1579 | strcpy(info->driver, DRV_NAME); | |
1580 | strcpy(info->version, DRV_VERSION); | |
1581 | strcpy(info->bus_info, pci_name(adapter->pdev)); | |
1582 | if (!fw_vers) | |
1583 | strcpy(info->fw_version, "N/A"); | |
4aac3899 | 1584 | else { |
4d22de3e | 1585 | snprintf(info->fw_version, sizeof(info->fw_version), |
47330077 | 1586 | "%s %u.%u.%u TP %u.%u.%u", |
4aac3899 DLR |
1587 | G_FW_VERSION_TYPE(fw_vers) ? "T" : "N", |
1588 | G_FW_VERSION_MAJOR(fw_vers), | |
1589 | G_FW_VERSION_MINOR(fw_vers), | |
47330077 DLR |
1590 | G_FW_VERSION_MICRO(fw_vers), |
1591 | G_TP_VERSION_MAJOR(tp_vers), | |
1592 | G_TP_VERSION_MINOR(tp_vers), | |
1593 | G_TP_VERSION_MICRO(tp_vers)); | |
4aac3899 | 1594 | } |
4d22de3e DLR |
1595 | } |
1596 | ||
1597 | static void get_strings(struct net_device *dev, u32 stringset, u8 * data) | |
1598 | { | |
1599 | if (stringset == ETH_SS_STATS) | |
1600 | memcpy(data, stats_strings, sizeof(stats_strings)); | |
1601 | } | |
1602 | ||
1603 | static unsigned long collect_sge_port_stats(struct adapter *adapter, | |
1604 | struct port_info *p, int idx) | |
1605 | { | |
1606 | int i; | |
1607 | unsigned long tot = 0; | |
1608 | ||
8c263761 DLR |
1609 | for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i) |
1610 | tot += adapter->sge.qs[i].port_stats[idx]; | |
4d22de3e DLR |
1611 | return tot; |
1612 | } | |
1613 | ||
1614 | static void get_stats(struct net_device *dev, struct ethtool_stats *stats, | |
1615 | u64 *data) | |
1616 | { | |
4d22de3e | 1617 | struct port_info *pi = netdev_priv(dev); |
5fbf816f | 1618 | struct adapter *adapter = pi->adapter; |
4d22de3e DLR |
1619 | const struct mac_stats *s; |
1620 | ||
1621 | spin_lock(&adapter->stats_lock); | |
1622 | s = t3_mac_update_stats(&pi->mac); | |
1623 | spin_unlock(&adapter->stats_lock); | |
1624 | ||
1625 | *data++ = s->tx_octets; | |
1626 | *data++ = s->tx_frames; | |
1627 | *data++ = s->tx_mcast_frames; | |
1628 | *data++ = s->tx_bcast_frames; | |
1629 | *data++ = s->tx_pause; | |
1630 | *data++ = s->tx_underrun; | |
1631 | *data++ = s->tx_fifo_urun; | |
1632 | ||
1633 | *data++ = s->tx_frames_64; | |
1634 | *data++ = s->tx_frames_65_127; | |
1635 | *data++ = s->tx_frames_128_255; | |
1636 | *data++ = s->tx_frames_256_511; | |
1637 | *data++ = s->tx_frames_512_1023; | |
1638 | *data++ = s->tx_frames_1024_1518; | |
1639 | *data++ = s->tx_frames_1519_max; | |
1640 | ||
1641 | *data++ = s->rx_octets; | |
1642 | *data++ = s->rx_frames; | |
1643 | *data++ = s->rx_mcast_frames; | |
1644 | *data++ = s->rx_bcast_frames; | |
1645 | *data++ = s->rx_pause; | |
1646 | *data++ = s->rx_fcs_errs; | |
1647 | *data++ = s->rx_symbol_errs; | |
1648 | *data++ = s->rx_short; | |
1649 | *data++ = s->rx_jabber; | |
1650 | *data++ = s->rx_too_long; | |
1651 | *data++ = s->rx_fifo_ovfl; | |
1652 | ||
1653 | *data++ = s->rx_frames_64; | |
1654 | *data++ = s->rx_frames_65_127; | |
1655 | *data++ = s->rx_frames_128_255; | |
1656 | *data++ = s->rx_frames_256_511; | |
1657 | *data++ = s->rx_frames_512_1023; | |
1658 | *data++ = s->rx_frames_1024_1518; | |
1659 | *data++ = s->rx_frames_1519_max; | |
1660 | ||
1661 | *data++ = pi->phy.fifo_errors; | |
1662 | ||
1663 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO); | |
1664 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX); | |
1665 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); | |
1666 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); | |
1667 | *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); | |
7be2df45 HX |
1668 | *data++ = 0; |
1669 | *data++ = 0; | |
1670 | *data++ = 0; | |
4d22de3e | 1671 | *data++ = s->rx_cong_drops; |
fc90664e DLR |
1672 | |
1673 | *data++ = s->num_toggled; | |
1674 | *data++ = s->num_resets; | |
bf792094 DLR |
1675 | |
1676 | *data++ = s->link_faults; | |
4d22de3e DLR |
1677 | } |
1678 | ||
1679 | static inline void reg_block_dump(struct adapter *ap, void *buf, | |
1680 | unsigned int start, unsigned int end) | |
1681 | { | |
1682 | u32 *p = buf + start; | |
1683 | ||
1684 | for (; start <= end; start += sizeof(u32)) | |
1685 | *p++ = t3_read_reg(ap, start); | |
1686 | } | |
1687 | ||
1688 | static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | |
1689 | void *buf) | |
1690 | { | |
5fbf816f DLR |
1691 | struct port_info *pi = netdev_priv(dev); |
1692 | struct adapter *ap = pi->adapter; | |
4d22de3e DLR |
1693 | |
1694 | /* | |
1695 | * Version scheme: | |
1696 | * bits 0..9: chip version | |
1697 | * bits 10..15: chip revision | |
1698 | * bit 31: set for PCIe cards | |
1699 | */ | |
1700 | regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31); | |
1701 | ||
1702 | /* | |
1703 | * We skip the MAC statistics registers because they are clear-on-read. | |
1704 | * Also reading multi-register stats would need to synchronize with the | |
1705 | * periodic mac stats accumulation. Hard to justify the complexity. | |
1706 | */ | |
1707 | memset(buf, 0, T3_REGMAP_SIZE); | |
1708 | reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN); | |
1709 | reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); | |
1710 | reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); | |
1711 | reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); | |
1712 | reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); | |
1713 | reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0, | |
1714 | XGM_REG(A_XGM_SERDES_STAT3, 1)); | |
1715 | reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), | |
1716 | XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); | |
1717 | } | |
1718 | ||
1719 | static int restart_autoneg(struct net_device *dev) | |
1720 | { | |
1721 | struct port_info *p = netdev_priv(dev); | |
1722 | ||
1723 | if (!netif_running(dev)) | |
1724 | return -EAGAIN; | |
1725 | if (p->link_config.autoneg != AUTONEG_ENABLE) | |
1726 | return -EINVAL; | |
1727 | p->phy.ops->autoneg_restart(&p->phy); | |
1728 | return 0; | |
1729 | } | |
1730 | ||
12fcf941 | 1731 | static int set_phys_id(struct net_device *dev, |
1732 | enum ethtool_phys_id_state state) | |
4d22de3e | 1733 | { |
5fbf816f DLR |
1734 | struct port_info *pi = netdev_priv(dev); |
1735 | struct adapter *adapter = pi->adapter; | |
4d22de3e | 1736 | |
12fcf941 | 1737 | switch (state) { |
1738 | case ETHTOOL_ID_ACTIVE: | |
fce55922 | 1739 | return 1; /* cycle on/off once per second */ |
12fcf941 | 1740 | |
1741 | case ETHTOOL_ID_OFF: | |
1742 | t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0); | |
1743 | break; | |
4d22de3e | 1744 | |
12fcf941 | 1745 | case ETHTOOL_ID_ON: |
1746 | case ETHTOOL_ID_INACTIVE: | |
4d22de3e | 1747 | t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, |
4d22de3e | 1748 | F_GPIO0_OUT_VAL); |
12fcf941 | 1749 | } |
1750 | ||
4d22de3e DLR |
1751 | return 0; |
1752 | } | |
1753 | ||
1754 | static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1755 | { | |
1756 | struct port_info *p = netdev_priv(dev); | |
1757 | ||
1758 | cmd->supported = p->link_config.supported; | |
1759 | cmd->advertising = p->link_config.advertising; | |
1760 | ||
1761 | if (netif_carrier_ok(dev)) { | |
70739497 | 1762 | ethtool_cmd_speed_set(cmd, p->link_config.speed); |
4d22de3e DLR |
1763 | cmd->duplex = p->link_config.duplex; |
1764 | } else { | |
70739497 | 1765 | ethtool_cmd_speed_set(cmd, -1); |
4d22de3e DLR |
1766 | cmd->duplex = -1; |
1767 | } | |
1768 | ||
1769 | cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE; | |
0f07c4ee | 1770 | cmd->phy_address = p->phy.mdio.prtad; |
4d22de3e DLR |
1771 | cmd->transceiver = XCVR_EXTERNAL; |
1772 | cmd->autoneg = p->link_config.autoneg; | |
1773 | cmd->maxtxpkt = 0; | |
1774 | cmd->maxrxpkt = 0; | |
1775 | return 0; | |
1776 | } | |
1777 | ||
1778 | static int speed_duplex_to_caps(int speed, int duplex) | |
1779 | { | |
1780 | int cap = 0; | |
1781 | ||
1782 | switch (speed) { | |
1783 | case SPEED_10: | |
1784 | if (duplex == DUPLEX_FULL) | |
1785 | cap = SUPPORTED_10baseT_Full; | |
1786 | else | |
1787 | cap = SUPPORTED_10baseT_Half; | |
1788 | break; | |
1789 | case SPEED_100: | |
1790 | if (duplex == DUPLEX_FULL) | |
1791 | cap = SUPPORTED_100baseT_Full; | |
1792 | else | |
1793 | cap = SUPPORTED_100baseT_Half; | |
1794 | break; | |
1795 | case SPEED_1000: | |
1796 | if (duplex == DUPLEX_FULL) | |
1797 | cap = SUPPORTED_1000baseT_Full; | |
1798 | else | |
1799 | cap = SUPPORTED_1000baseT_Half; | |
1800 | break; | |
1801 | case SPEED_10000: | |
1802 | if (duplex == DUPLEX_FULL) | |
1803 | cap = SUPPORTED_10000baseT_Full; | |
1804 | } | |
1805 | return cap; | |
1806 | } | |
1807 | ||
1808 | #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \ | |
1809 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \ | |
1810 | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \ | |
1811 | ADVERTISED_10000baseT_Full) | |
1812 | ||
1813 | static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1814 | { | |
1815 | struct port_info *p = netdev_priv(dev); | |
1816 | struct link_config *lc = &p->link_config; | |
1817 | ||
9b1e3656 DLR |
1818 | if (!(lc->supported & SUPPORTED_Autoneg)) { |
1819 | /* | |
1820 | * PHY offers a single speed/duplex. See if that's what's | |
1821 | * being requested. | |
1822 | */ | |
1823 | if (cmd->autoneg == AUTONEG_DISABLE) { | |
25db0338 DD |
1824 | u32 speed = ethtool_cmd_speed(cmd); |
1825 | int cap = speed_duplex_to_caps(speed, cmd->duplex); | |
9b1e3656 DLR |
1826 | if (lc->supported & cap) |
1827 | return 0; | |
1828 | } | |
1829 | return -EINVAL; | |
1830 | } | |
4d22de3e DLR |
1831 | |
1832 | if (cmd->autoneg == AUTONEG_DISABLE) { | |
25db0338 DD |
1833 | u32 speed = ethtool_cmd_speed(cmd); |
1834 | int cap = speed_duplex_to_caps(speed, cmd->duplex); | |
4d22de3e | 1835 | |
25db0338 | 1836 | if (!(lc->supported & cap) || (speed == SPEED_1000)) |
4d22de3e | 1837 | return -EINVAL; |
25db0338 | 1838 | lc->requested_speed = speed; |
4d22de3e DLR |
1839 | lc->requested_duplex = cmd->duplex; |
1840 | lc->advertising = 0; | |
1841 | } else { | |
1842 | cmd->advertising &= ADVERTISED_MASK; | |
1843 | cmd->advertising &= lc->supported; | |
1844 | if (!cmd->advertising) | |
1845 | return -EINVAL; | |
1846 | lc->requested_speed = SPEED_INVALID; | |
1847 | lc->requested_duplex = DUPLEX_INVALID; | |
1848 | lc->advertising = cmd->advertising | ADVERTISED_Autoneg; | |
1849 | } | |
1850 | lc->autoneg = cmd->autoneg; | |
1851 | if (netif_running(dev)) | |
1852 | t3_link_start(&p->phy, &p->mac, lc); | |
1853 | return 0; | |
1854 | } | |
1855 | ||
1856 | static void get_pauseparam(struct net_device *dev, | |
1857 | struct ethtool_pauseparam *epause) | |
1858 | { | |
1859 | struct port_info *p = netdev_priv(dev); | |
1860 | ||
1861 | epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; | |
1862 | epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0; | |
1863 | epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0; | |
1864 | } | |
1865 | ||
1866 | static int set_pauseparam(struct net_device *dev, | |
1867 | struct ethtool_pauseparam *epause) | |
1868 | { | |
1869 | struct port_info *p = netdev_priv(dev); | |
1870 | struct link_config *lc = &p->link_config; | |
1871 | ||
1872 | if (epause->autoneg == AUTONEG_DISABLE) | |
1873 | lc->requested_fc = 0; | |
1874 | else if (lc->supported & SUPPORTED_Autoneg) | |
1875 | lc->requested_fc = PAUSE_AUTONEG; | |
1876 | else | |
1877 | return -EINVAL; | |
1878 | ||
1879 | if (epause->rx_pause) | |
1880 | lc->requested_fc |= PAUSE_RX; | |
1881 | if (epause->tx_pause) | |
1882 | lc->requested_fc |= PAUSE_TX; | |
1883 | if (lc->autoneg == AUTONEG_ENABLE) { | |
1884 | if (netif_running(dev)) | |
1885 | t3_link_start(&p->phy, &p->mac, lc); | |
1886 | } else { | |
1887 | lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); | |
1888 | if (netif_running(dev)) | |
1889 | t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc); | |
1890 | } | |
1891 | return 0; | |
1892 | } | |
1893 | ||
4d22de3e DLR |
1894 | static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) |
1895 | { | |
5fbf816f DLR |
1896 | struct port_info *pi = netdev_priv(dev); |
1897 | struct adapter *adapter = pi->adapter; | |
05b97b30 | 1898 | const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; |
4d22de3e DLR |
1899 | |
1900 | e->rx_max_pending = MAX_RX_BUFFERS; | |
1901 | e->rx_mini_max_pending = 0; | |
1902 | e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; | |
1903 | e->tx_max_pending = MAX_TXQ_ENTRIES; | |
1904 | ||
05b97b30 DLR |
1905 | e->rx_pending = q->fl_size; |
1906 | e->rx_mini_pending = q->rspq_size; | |
1907 | e->rx_jumbo_pending = q->jumbo_size; | |
1908 | e->tx_pending = q->txq_size[0]; | |
4d22de3e DLR |
1909 | } |
1910 | ||
1911 | static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) | |
1912 | { | |
5fbf816f DLR |
1913 | struct port_info *pi = netdev_priv(dev); |
1914 | struct adapter *adapter = pi->adapter; | |
05b97b30 | 1915 | struct qset_params *q; |
5fbf816f | 1916 | int i; |
4d22de3e DLR |
1917 | |
1918 | if (e->rx_pending > MAX_RX_BUFFERS || | |
1919 | e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || | |
1920 | e->tx_pending > MAX_TXQ_ENTRIES || | |
1921 | e->rx_mini_pending > MAX_RSPQ_ENTRIES || | |
1922 | e->rx_mini_pending < MIN_RSPQ_ENTRIES || | |
1923 | e->rx_pending < MIN_FL_ENTRIES || | |
1924 | e->rx_jumbo_pending < MIN_FL_ENTRIES || | |
1925 | e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES) | |
1926 | return -EINVAL; | |
1927 | ||
1928 | if (adapter->flags & FULL_INIT_DONE) | |
1929 | return -EBUSY; | |
1930 | ||
05b97b30 DLR |
1931 | q = &adapter->params.sge.qset[pi->first_qset]; |
1932 | for (i = 0; i < pi->nqsets; ++i, ++q) { | |
4d22de3e DLR |
1933 | q->rspq_size = e->rx_mini_pending; |
1934 | q->fl_size = e->rx_pending; | |
1935 | q->jumbo_size = e->rx_jumbo_pending; | |
1936 | q->txq_size[0] = e->tx_pending; | |
1937 | q->txq_size[1] = e->tx_pending; | |
1938 | q->txq_size[2] = e->tx_pending; | |
1939 | } | |
1940 | return 0; | |
1941 | } | |
1942 | ||
1943 | static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |
1944 | { | |
5fbf816f DLR |
1945 | struct port_info *pi = netdev_priv(dev); |
1946 | struct adapter *adapter = pi->adapter; | |
c211c969 AB |
1947 | struct qset_params *qsp; |
1948 | struct sge_qset *qs; | |
1949 | int i; | |
4d22de3e DLR |
1950 | |
1951 | if (c->rx_coalesce_usecs * 10 > M_NEWTIMER) | |
1952 | return -EINVAL; | |
1953 | ||
c211c969 AB |
1954 | for (i = 0; i < pi->nqsets; i++) { |
1955 | qsp = &adapter->params.sge.qset[i]; | |
1956 | qs = &adapter->sge.qs[i]; | |
1957 | qsp->coalesce_usecs = c->rx_coalesce_usecs; | |
1958 | t3_update_qset_coalesce(qs, qsp); | |
1959 | } | |
1960 | ||
4d22de3e DLR |
1961 | return 0; |
1962 | } | |
1963 | ||
1964 | static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |
1965 | { | |
5fbf816f DLR |
1966 | struct port_info *pi = netdev_priv(dev); |
1967 | struct adapter *adapter = pi->adapter; | |
4d22de3e DLR |
1968 | struct qset_params *q = adapter->params.sge.qset; |
1969 | ||
1970 | c->rx_coalesce_usecs = q->coalesce_usecs; | |
1971 | return 0; | |
1972 | } | |
1973 | ||
1974 | static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, | |
1975 | u8 * data) | |
1976 | { | |
5fbf816f DLR |
1977 | struct port_info *pi = netdev_priv(dev); |
1978 | struct adapter *adapter = pi->adapter; | |
4d22de3e | 1979 | int i, err = 0; |
4d22de3e DLR |
1980 | |
1981 | u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); | |
1982 | if (!buf) | |
1983 | return -ENOMEM; | |
1984 | ||
1985 | e->magic = EEPROM_MAGIC; | |
1986 | for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) | |
05e5c116 | 1987 | err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]); |
4d22de3e DLR |
1988 | |
1989 | if (!err) | |
1990 | memcpy(data, buf + e->offset, e->len); | |
1991 | kfree(buf); | |
1992 | return err; | |
1993 | } | |
1994 | ||
1995 | static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |
1996 | u8 * data) | |
1997 | { | |
5fbf816f DLR |
1998 | struct port_info *pi = netdev_priv(dev); |
1999 | struct adapter *adapter = pi->adapter; | |
05e5c116 AV |
2000 | u32 aligned_offset, aligned_len; |
2001 | __le32 *p; | |
4d22de3e | 2002 | u8 *buf; |
c54f5c24 | 2003 | int err; |
4d22de3e DLR |
2004 | |
2005 | if (eeprom->magic != EEPROM_MAGIC) | |
2006 | return -EINVAL; | |
2007 | ||
2008 | aligned_offset = eeprom->offset & ~3; | |
2009 | aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; | |
2010 | ||
2011 | if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { | |
2012 | buf = kmalloc(aligned_len, GFP_KERNEL); | |
2013 | if (!buf) | |
2014 | return -ENOMEM; | |
05e5c116 | 2015 | err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf); |
4d22de3e DLR |
2016 | if (!err && aligned_len > 4) |
2017 | err = t3_seeprom_read(adapter, | |
2018 | aligned_offset + aligned_len - 4, | |
05e5c116 | 2019 | (__le32 *) & buf[aligned_len - 4]); |
4d22de3e DLR |
2020 | if (err) |
2021 | goto out; | |
2022 | memcpy(buf + (eeprom->offset & 3), data, eeprom->len); | |
2023 | } else | |
2024 | buf = data; | |
2025 | ||
2026 | err = t3_seeprom_wp(adapter, 0); | |
2027 | if (err) | |
2028 | goto out; | |
2029 | ||
05e5c116 | 2030 | for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) { |
4d22de3e DLR |
2031 | err = t3_seeprom_write(adapter, aligned_offset, *p); |
2032 | aligned_offset += 4; | |
2033 | } | |
2034 | ||
2035 | if (!err) | |
2036 | err = t3_seeprom_wp(adapter, 1); | |
2037 | out: | |
2038 | if (buf != data) | |
2039 | kfree(buf); | |
2040 | return err; | |
2041 | } | |
2042 | ||
2043 | static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |
2044 | { | |
2045 | wol->supported = 0; | |
2046 | wol->wolopts = 0; | |
2047 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | |
2048 | } | |
2049 | ||
2050 | static const struct ethtool_ops cxgb_ethtool_ops = { | |
2051 | .get_settings = get_settings, | |
2052 | .set_settings = set_settings, | |
2053 | .get_drvinfo = get_drvinfo, | |
2054 | .get_msglevel = get_msglevel, | |
2055 | .set_msglevel = set_msglevel, | |
2056 | .get_ringparam = get_sge_param, | |
2057 | .set_ringparam = set_sge_param, | |
2058 | .get_coalesce = get_coalesce, | |
2059 | .set_coalesce = set_coalesce, | |
2060 | .get_eeprom_len = get_eeprom_len, | |
2061 | .get_eeprom = get_eeprom, | |
2062 | .set_eeprom = set_eeprom, | |
2063 | .get_pauseparam = get_pauseparam, | |
2064 | .set_pauseparam = set_pauseparam, | |
4d22de3e DLR |
2065 | .get_link = ethtool_op_get_link, |
2066 | .get_strings = get_strings, | |
12fcf941 | 2067 | .set_phys_id = set_phys_id, |
4d22de3e | 2068 | .nway_reset = restart_autoneg, |
b9f2c044 | 2069 | .get_sset_count = get_sset_count, |
4d22de3e DLR |
2070 | .get_ethtool_stats = get_stats, |
2071 | .get_regs_len = get_regs_len, | |
2072 | .get_regs = get_regs, | |
2073 | .get_wol = get_wol, | |
4d22de3e DLR |
2074 | }; |
2075 | ||
2076 | static int in_range(int val, int lo, int hi) | |
2077 | { | |
2078 | return val < 0 || (val <= hi && val >= lo); | |
2079 | } | |
2080 | ||
2081 | static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |
2082 | { | |
5fbf816f DLR |
2083 | struct port_info *pi = netdev_priv(dev); |
2084 | struct adapter *adapter = pi->adapter; | |
4d22de3e | 2085 | u32 cmd; |
5fbf816f | 2086 | int ret; |
4d22de3e DLR |
2087 | |
2088 | if (copy_from_user(&cmd, useraddr, sizeof(cmd))) | |
2089 | return -EFAULT; | |
2090 | ||
2091 | switch (cmd) { | |
4d22de3e DLR |
2092 | case CHELSIO_SET_QSET_PARAMS:{ |
2093 | int i; | |
2094 | struct qset_params *q; | |
2095 | struct ch_qset_params t; | |
8c263761 DLR |
2096 | int q1 = pi->first_qset; |
2097 | int nqsets = pi->nqsets; | |
4d22de3e DLR |
2098 | |
2099 | if (!capable(CAP_NET_ADMIN)) | |
2100 | return -EPERM; | |
2101 | if (copy_from_user(&t, useraddr, sizeof(t))) | |
2102 | return -EFAULT; | |
2103 | if (t.qset_idx >= SGE_QSETS) | |
2104 | return -EINVAL; | |
2105 | if (!in_range(t.intr_lat, 0, M_NEWTIMER) || | |
8e95a202 JP |
2106 | !in_range(t.cong_thres, 0, 255) || |
2107 | !in_range(t.txq_size[0], MIN_TXQ_ENTRIES, | |
2108 | MAX_TXQ_ENTRIES) || | |
2109 | !in_range(t.txq_size[1], MIN_TXQ_ENTRIES, | |
2110 | MAX_TXQ_ENTRIES) || | |
2111 | !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES, | |
2112 | MAX_CTRL_TXQ_ENTRIES) || | |
2113 | !in_range(t.fl_size[0], MIN_FL_ENTRIES, | |
2114 | MAX_RX_BUFFERS) || | |
2115 | !in_range(t.fl_size[1], MIN_FL_ENTRIES, | |
2116 | MAX_RX_JUMBO_BUFFERS) || | |
2117 | !in_range(t.rspq_size, MIN_RSPQ_ENTRIES, | |
2118 | MAX_RSPQ_ENTRIES)) | |
4d22de3e | 2119 | return -EINVAL; |
8c263761 | 2120 | |
4d22de3e DLR |
2121 | if ((adapter->flags & FULL_INIT_DONE) && |
2122 | (t.rspq_size >= 0 || t.fl_size[0] >= 0 || | |
2123 | t.fl_size[1] >= 0 || t.txq_size[0] >= 0 || | |
2124 | t.txq_size[1] >= 0 || t.txq_size[2] >= 0 || | |
2125 | t.polling >= 0 || t.cong_thres >= 0)) | |
2126 | return -EBUSY; | |
2127 | ||
8c263761 DLR |
2128 | /* Allow setting of any available qset when offload enabled */ |
2129 | if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | |
2130 | q1 = 0; | |
2131 | for_each_port(adapter, i) { | |
2132 | pi = adap2pinfo(adapter, i); | |
2133 | nqsets += pi->first_qset + pi->nqsets; | |
2134 | } | |
2135 | } | |
2136 | ||
2137 | if (t.qset_idx < q1) | |
2138 | return -EINVAL; | |
2139 | if (t.qset_idx > q1 + nqsets - 1) | |
2140 | return -EINVAL; | |
2141 | ||
4d22de3e DLR |
2142 | q = &adapter->params.sge.qset[t.qset_idx]; |
2143 | ||
2144 | if (t.rspq_size >= 0) | |
2145 | q->rspq_size = t.rspq_size; | |
2146 | if (t.fl_size[0] >= 0) | |
2147 | q->fl_size = t.fl_size[0]; | |
2148 | if (t.fl_size[1] >= 0) | |
2149 | q->jumbo_size = t.fl_size[1]; | |
2150 | if (t.txq_size[0] >= 0) | |
2151 | q->txq_size[0] = t.txq_size[0]; | |
2152 | if (t.txq_size[1] >= 0) | |
2153 | q->txq_size[1] = t.txq_size[1]; | |
2154 | if (t.txq_size[2] >= 0) | |
2155 | q->txq_size[2] = t.txq_size[2]; | |
2156 | if (t.cong_thres >= 0) | |
2157 | q->cong_thres = t.cong_thres; | |
2158 | if (t.intr_lat >= 0) { | |
2159 | struct sge_qset *qs = | |
2160 | &adapter->sge.qs[t.qset_idx]; | |
2161 | ||
2162 | q->coalesce_usecs = t.intr_lat; | |
2163 | t3_update_qset_coalesce(qs, q); | |
2164 | } | |
2165 | if (t.polling >= 0) { | |
2166 | if (adapter->flags & USING_MSIX) | |
2167 | q->polling = t.polling; | |
2168 | else { | |
2169 | /* No polling with INTx for T3A */ | |
2170 | if (adapter->params.rev == 0 && | |
2171 | !(adapter->flags & USING_MSI)) | |
2172 | t.polling = 0; | |
2173 | ||
2174 | for (i = 0; i < SGE_QSETS; i++) { | |
2175 | q = &adapter->params.sge. | |
2176 | qset[i]; | |
2177 | q->polling = t.polling; | |
2178 | } | |
2179 | } | |
2180 | } | |
d2fe2755 MM |
2181 | |
2182 | if (t.lro >= 0) { | |
2183 | if (t.lro) | |
2184 | dev->wanted_features |= NETIF_F_GRO; | |
2185 | else | |
2186 | dev->wanted_features &= ~NETIF_F_GRO; | |
2187 | netdev_update_features(dev); | |
2188 | } | |
04ecb072 | 2189 | |
4d22de3e DLR |
2190 | break; |
2191 | } | |
2192 | case CHELSIO_GET_QSET_PARAMS:{ | |
2193 | struct qset_params *q; | |
2194 | struct ch_qset_params t; | |
8c263761 DLR |
2195 | int q1 = pi->first_qset; |
2196 | int nqsets = pi->nqsets; | |
2197 | int i; | |
4d22de3e DLR |
2198 | |
2199 | if (copy_from_user(&t, useraddr, sizeof(t))) | |
2200 | return -EFAULT; | |
8c263761 DLR |
2201 | |
2202 | /* Display qsets for all ports when offload enabled */ | |
2203 | if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | |
2204 | q1 = 0; | |
2205 | for_each_port(adapter, i) { | |
2206 | pi = adap2pinfo(adapter, i); | |
2207 | nqsets = pi->first_qset + pi->nqsets; | |
2208 | } | |
2209 | } | |
2210 | ||
2211 | if (t.qset_idx >= nqsets) | |
4d22de3e DLR |
2212 | return -EINVAL; |
2213 | ||
8c263761 | 2214 | q = &adapter->params.sge.qset[q1 + t.qset_idx]; |
4d22de3e DLR |
2215 | t.rspq_size = q->rspq_size; |
2216 | t.txq_size[0] = q->txq_size[0]; | |
2217 | t.txq_size[1] = q->txq_size[1]; | |
2218 | t.txq_size[2] = q->txq_size[2]; | |
2219 | t.fl_size[0] = q->fl_size; | |
2220 | t.fl_size[1] = q->jumbo_size; | |
2221 | t.polling = q->polling; | |
d2fe2755 | 2222 | t.lro = !!(dev->features & NETIF_F_GRO); |
4d22de3e DLR |
2223 | t.intr_lat = q->coalesce_usecs; |
2224 | t.cong_thres = q->cong_thres; | |
8c263761 DLR |
2225 | t.qnum = q1; |
2226 | ||
2227 | if (adapter->flags & USING_MSIX) | |
2228 | t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec; | |
2229 | else | |
2230 | t.vector = adapter->pdev->irq; | |
4d22de3e DLR |
2231 | |
2232 | if (copy_to_user(useraddr, &t, sizeof(t))) | |
2233 | return -EFAULT; | |
2234 | break; | |
2235 | } | |
2236 | case CHELSIO_SET_QSET_NUM:{ | |
2237 | struct ch_reg edata; | |
4d22de3e DLR |
2238 | unsigned int i, first_qset = 0, other_qsets = 0; |
2239 | ||
2240 | if (!capable(CAP_NET_ADMIN)) | |
2241 | return -EPERM; | |
2242 | if (adapter->flags & FULL_INIT_DONE) | |
2243 | return -EBUSY; | |
2244 | if (copy_from_user(&edata, useraddr, sizeof(edata))) | |
2245 | return -EFAULT; | |
2246 | if (edata.val < 1 || | |
2247 | (edata.val > 1 && !(adapter->flags & USING_MSIX))) | |
2248 | return -EINVAL; | |
2249 | ||
2250 | for_each_port(adapter, i) | |
2251 | if (adapter->port[i] && adapter->port[i] != dev) | |
2252 | other_qsets += adap2pinfo(adapter, i)->nqsets; | |
2253 | ||
2254 | if (edata.val + other_qsets > SGE_QSETS) | |
2255 | return -EINVAL; | |
2256 | ||
2257 | pi->nqsets = edata.val; | |
2258 | ||
2259 | for_each_port(adapter, i) | |
2260 | if (adapter->port[i]) { | |
2261 | pi = adap2pinfo(adapter, i); | |
2262 | pi->first_qset = first_qset; | |
2263 | first_qset += pi->nqsets; | |
2264 | } | |
2265 | break; | |
2266 | } | |
2267 | case CHELSIO_GET_QSET_NUM:{ | |
2268 | struct ch_reg edata; | |
4d22de3e | 2269 | |
49c37c03 DR |
2270 | memset(&edata, 0, sizeof(struct ch_reg)); |
2271 | ||
4d22de3e DLR |
2272 | edata.cmd = CHELSIO_GET_QSET_NUM; |
2273 | edata.val = pi->nqsets; | |
2274 | if (copy_to_user(useraddr, &edata, sizeof(edata))) | |
2275 | return -EFAULT; | |
2276 | break; | |
2277 | } | |
2278 | case CHELSIO_LOAD_FW:{ | |
2279 | u8 *fw_data; | |
2280 | struct ch_mem_range t; | |
2281 | ||
1b3aa7af | 2282 | if (!capable(CAP_SYS_RAWIO)) |
4d22de3e DLR |
2283 | return -EPERM; |
2284 | if (copy_from_user(&t, useraddr, sizeof(t))) | |
2285 | return -EFAULT; | |
1b3aa7af | 2286 | /* Check t.len sanity ? */ |
c5dc9a35 JL |
2287 | fw_data = memdup_user(useraddr + sizeof(t), t.len); |
2288 | if (IS_ERR(fw_data)) | |
2289 | return PTR_ERR(fw_data); | |
4d22de3e DLR |
2290 | |
2291 | ret = t3_load_fw(adapter, fw_data, t.len); | |
2292 | kfree(fw_data); | |
2293 | if (ret) | |
2294 | return ret; | |
2295 | break; | |
2296 | } | |
2297 | case CHELSIO_SETMTUTAB:{ | |
2298 | struct ch_mtus m; | |
2299 | int i; | |
2300 | ||
2301 | if (!is_offload(adapter)) | |
2302 | return -EOPNOTSUPP; | |
2303 | if (!capable(CAP_NET_ADMIN)) | |
2304 | return -EPERM; | |
2305 | if (offload_running(adapter)) | |
2306 | return -EBUSY; | |
2307 | if (copy_from_user(&m, useraddr, sizeof(m))) | |
2308 | return -EFAULT; | |
2309 | if (m.nmtus != NMTUS) | |
2310 | return -EINVAL; | |
2311 | if (m.mtus[0] < 81) /* accommodate SACK */ | |
2312 | return -EINVAL; | |
2313 | ||
2314 | /* MTUs must be in ascending order */ | |
2315 | for (i = 1; i < NMTUS; ++i) | |
2316 | if (m.mtus[i] < m.mtus[i - 1]) | |
2317 | return -EINVAL; | |
2318 | ||
2319 | memcpy(adapter->params.mtus, m.mtus, | |
2320 | sizeof(adapter->params.mtus)); | |
2321 | break; | |
2322 | } | |
2323 | case CHELSIO_GET_PM:{ | |
2324 | struct tp_params *p = &adapter->params.tp; | |
2325 | struct ch_pm m = {.cmd = CHELSIO_GET_PM }; | |
2326 | ||
2327 | if (!is_offload(adapter)) | |
2328 | return -EOPNOTSUPP; | |
2329 | m.tx_pg_sz = p->tx_pg_size; | |
2330 | m.tx_num_pg = p->tx_num_pgs; | |
2331 | m.rx_pg_sz = p->rx_pg_size; | |
2332 | m.rx_num_pg = p->rx_num_pgs; | |
2333 | m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; | |
2334 | if (copy_to_user(useraddr, &m, sizeof(m))) | |
2335 | return -EFAULT; | |
2336 | break; | |
2337 | } | |
2338 | case CHELSIO_SET_PM:{ | |
2339 | struct ch_pm m; | |
2340 | struct tp_params *p = &adapter->params.tp; | |
2341 | ||
2342 | if (!is_offload(adapter)) | |
2343 | return -EOPNOTSUPP; | |
2344 | if (!capable(CAP_NET_ADMIN)) | |
2345 | return -EPERM; | |
2346 | if (adapter->flags & FULL_INIT_DONE) | |
2347 | return -EBUSY; | |
2348 | if (copy_from_user(&m, useraddr, sizeof(m))) | |
2349 | return -EFAULT; | |
d9da466a | 2350 | if (!is_power_of_2(m.rx_pg_sz) || |
2351 | !is_power_of_2(m.tx_pg_sz)) | |
4d22de3e DLR |
2352 | return -EINVAL; /* not power of 2 */ |
2353 | if (!(m.rx_pg_sz & 0x14000)) | |
2354 | return -EINVAL; /* not 16KB or 64KB */ | |
2355 | if (!(m.tx_pg_sz & 0x1554000)) | |
2356 | return -EINVAL; | |
2357 | if (m.tx_num_pg == -1) | |
2358 | m.tx_num_pg = p->tx_num_pgs; | |
2359 | if (m.rx_num_pg == -1) | |
2360 | m.rx_num_pg = p->rx_num_pgs; | |
2361 | if (m.tx_num_pg % 24 || m.rx_num_pg % 24) | |
2362 | return -EINVAL; | |
2363 | if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size || | |
2364 | m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size) | |
2365 | return -EINVAL; | |
2366 | p->rx_pg_size = m.rx_pg_sz; | |
2367 | p->tx_pg_size = m.tx_pg_sz; | |
2368 | p->rx_num_pgs = m.rx_num_pg; | |
2369 | p->tx_num_pgs = m.tx_num_pg; | |
2370 | break; | |
2371 | } | |
2372 | case CHELSIO_GET_MEM:{ | |
2373 | struct ch_mem_range t; | |
2374 | struct mc7 *mem; | |
2375 | u64 buf[32]; | |
2376 | ||
2377 | if (!is_offload(adapter)) | |
2378 | return -EOPNOTSUPP; | |
2379 | if (!(adapter->flags & FULL_INIT_DONE)) | |
2380 | return -EIO; /* need the memory controllers */ | |
2381 | if (copy_from_user(&t, useraddr, sizeof(t))) | |
2382 | return -EFAULT; | |
2383 | if ((t.addr & 7) || (t.len & 7)) | |
2384 | return -EINVAL; | |
2385 | if (t.mem_id == MEM_CM) | |
2386 | mem = &adapter->cm; | |
2387 | else if (t.mem_id == MEM_PMRX) | |
2388 | mem = &adapter->pmrx; | |
2389 | else if (t.mem_id == MEM_PMTX) | |
2390 | mem = &adapter->pmtx; | |
2391 | else | |
2392 | return -EINVAL; | |
2393 | ||
2394 | /* | |
1825494a DLR |
2395 | * Version scheme: |
2396 | * bits 0..9: chip version | |
2397 | * bits 10..15: chip revision | |
2398 | */ | |
4d22de3e DLR |
2399 | t.version = 3 | (adapter->params.rev << 10); |
2400 | if (copy_to_user(useraddr, &t, sizeof(t))) | |
2401 | return -EFAULT; | |
2402 | ||
2403 | /* | |
2404 | * Read 256 bytes at a time as len can be large and we don't | |
2405 | * want to use huge intermediate buffers. | |
2406 | */ | |
2407 | useraddr += sizeof(t); /* advance to start of buffer */ | |
2408 | while (t.len) { | |
2409 | unsigned int chunk = | |
2410 | min_t(unsigned int, t.len, sizeof(buf)); | |
2411 | ||
2412 | ret = | |
2413 | t3_mc7_bd_read(mem, t.addr / 8, chunk / 8, | |
2414 | buf); | |
2415 | if (ret) | |
2416 | return ret; | |
2417 | if (copy_to_user(useraddr, buf, chunk)) | |
2418 | return -EFAULT; | |
2419 | useraddr += chunk; | |
2420 | t.addr += chunk; | |
2421 | t.len -= chunk; | |
2422 | } | |
2423 | break; | |
2424 | } | |
2425 | case CHELSIO_SET_TRACE_FILTER:{ | |
2426 | struct ch_trace t; | |
2427 | const struct trace_params *tp; | |
2428 | ||
2429 | if (!capable(CAP_NET_ADMIN)) | |
2430 | return -EPERM; | |
2431 | if (!offload_running(adapter)) | |
2432 | return -EAGAIN; | |
2433 | if (copy_from_user(&t, useraddr, sizeof(t))) | |
2434 | return -EFAULT; | |
2435 | ||
2436 | tp = (const struct trace_params *)&t.sip; | |
2437 | if (t.config_tx) | |
2438 | t3_config_trace_filter(adapter, tp, 0, | |
2439 | t.invert_match, | |
2440 | t.trace_tx); | |
2441 | if (t.config_rx) | |
2442 | t3_config_trace_filter(adapter, tp, 1, | |
2443 | t.invert_match, | |
2444 | t.trace_rx); | |
2445 | break; | |
2446 | } | |
4d22de3e DLR |
2447 | default: |
2448 | return -EOPNOTSUPP; | |
2449 | } | |
2450 | return 0; | |
2451 | } | |
2452 | ||
2453 | static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | |
2454 | { | |
4d22de3e | 2455 | struct mii_ioctl_data *data = if_mii(req); |
5fbf816f DLR |
2456 | struct port_info *pi = netdev_priv(dev); |
2457 | struct adapter *adapter = pi->adapter; | |
4d22de3e DLR |
2458 | |
2459 | switch (cmd) { | |
0f07c4ee BH |
2460 | case SIOCGMIIREG: |
2461 | case SIOCSMIIREG: | |
2462 | /* Convert phy_id from older PRTAD/DEVAD format */ | |
2463 | if (is_10G(adapter) && | |
2464 | !mdio_phy_id_is_c45(data->phy_id) && | |
2465 | (data->phy_id & 0x1f00) && | |
2466 | !(data->phy_id & 0xe0e0)) | |
2467 | data->phy_id = mdio_phy_id_c45(data->phy_id >> 8, | |
2468 | data->phy_id & 0x1f); | |
4d22de3e | 2469 | /* FALLTHRU */ |
0f07c4ee BH |
2470 | case SIOCGMIIPHY: |
2471 | return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); | |
4d22de3e DLR |
2472 | case SIOCCHIOCTL: |
2473 | return cxgb_extension_ioctl(dev, req->ifr_data); | |
2474 | default: | |
2475 | return -EOPNOTSUPP; | |
2476 | } | |
4d22de3e DLR |
2477 | } |
2478 | ||
2479 | static int cxgb_change_mtu(struct net_device *dev, int new_mtu) | |
2480 | { | |
4d22de3e | 2481 | struct port_info *pi = netdev_priv(dev); |
5fbf816f DLR |
2482 | struct adapter *adapter = pi->adapter; |
2483 | int ret; | |
4d22de3e DLR |
2484 | |
2485 | if (new_mtu < 81) /* accommodate SACK */ | |
2486 | return -EINVAL; | |
2487 | if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu))) | |
2488 | return ret; | |
2489 | dev->mtu = new_mtu; | |
2490 | init_port_mtus(adapter); | |
2491 | if (adapter->params.rev == 0 && offload_running(adapter)) | |
2492 | t3_load_mtus(adapter, adapter->params.mtus, | |
2493 | adapter->params.a_wnd, adapter->params.b_wnd, | |
2494 | adapter->port[0]->mtu); | |
2495 | return 0; | |
2496 | } | |
2497 | ||
2498 | static int cxgb_set_mac_addr(struct net_device *dev, void *p) | |
2499 | { | |
4d22de3e | 2500 | struct port_info *pi = netdev_priv(dev); |
5fbf816f | 2501 | struct adapter *adapter = pi->adapter; |
4d22de3e DLR |
2502 | struct sockaddr *addr = p; |
2503 | ||
2504 | if (!is_valid_ether_addr(addr->sa_data)) | |
2505 | return -EINVAL; | |
2506 | ||
2507 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
f14d42f3 | 2508 | t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); |
4d22de3e DLR |
2509 | if (offload_running(adapter)) |
2510 | write_smt_entry(adapter, pi->port_id); | |
2511 | return 0; | |
2512 | } | |
2513 | ||
2514 | /** | |
2515 | * t3_synchronize_rx - wait for current Rx processing on a port to complete | |
2516 | * @adap: the adapter | |
2517 | * @p: the port | |
2518 | * | |
2519 | * Ensures that current Rx processing on any of the queues associated with | |
2520 | * the given port completes before returning. We do this by acquiring and | |
2521 | * releasing the locks of the response queues associated with the port. | |
2522 | */ | |
2523 | static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) | |
2524 | { | |
2525 | int i; | |
2526 | ||
8c263761 DLR |
2527 | for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { |
2528 | struct sge_rspq *q = &adap->sge.qs[i].rspq; | |
4d22de3e DLR |
2529 | |
2530 | spin_lock_irq(&q->lock); | |
2531 | spin_unlock_irq(&q->lock); | |
2532 | } | |
2533 | } | |
2534 | ||
892ef5d8 | 2535 | static void cxgb_vlan_mode(struct net_device *dev, u32 features) |
4d22de3e | 2536 | { |
4d22de3e | 2537 | struct port_info *pi = netdev_priv(dev); |
5fbf816f | 2538 | struct adapter *adapter = pi->adapter; |
4d22de3e | 2539 | |
892ef5d8 JP |
2540 | if (adapter->params.rev > 0) { |
2541 | t3_set_vlan_accel(adapter, 1 << pi->port_id, | |
2542 | features & NETIF_F_HW_VLAN_RX); | |
2543 | } else { | |
4d22de3e | 2544 | /* single control for all ports */ |
892ef5d8 JP |
2545 | unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX; |
2546 | ||
4d22de3e | 2547 | for_each_port(adapter, i) |
892ef5d8 JP |
2548 | have_vlans |= |
2549 | adapter->port[i]->features & NETIF_F_HW_VLAN_RX; | |
4d22de3e DLR |
2550 | |
2551 | t3_set_vlan_accel(adapter, 1, have_vlans); | |
2552 | } | |
2553 | t3_synchronize_rx(adapter, pi); | |
2554 | } | |
2555 | ||
892ef5d8 JP |
2556 | static u32 cxgb_fix_features(struct net_device *dev, u32 features) |
2557 | { | |
2558 | /* | |
2559 | * Since there is no support for separate rx/tx vlan accel | |
2560 | * enable/disable make sure tx flag is always in same state as rx. | |
2561 | */ | |
2562 | if (features & NETIF_F_HW_VLAN_RX) | |
2563 | features |= NETIF_F_HW_VLAN_TX; | |
2564 | else | |
2565 | features &= ~NETIF_F_HW_VLAN_TX; | |
2566 | ||
2567 | return features; | |
2568 | } | |
2569 | ||
2570 | static int cxgb_set_features(struct net_device *dev, u32 features) | |
2571 | { | |
2572 | u32 changed = dev->features ^ features; | |
2573 | ||
2574 | if (changed & NETIF_F_HW_VLAN_RX) | |
2575 | cxgb_vlan_mode(dev, features); | |
2576 | ||
2577 | return 0; | |
2578 | } | |
2579 | ||
4d22de3e DLR |
2580 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2581 | static void cxgb_netpoll(struct net_device *dev) | |
2582 | { | |
890de332 | 2583 | struct port_info *pi = netdev_priv(dev); |
5fbf816f | 2584 | struct adapter *adapter = pi->adapter; |
890de332 | 2585 | int qidx; |
4d22de3e | 2586 | |
890de332 DLR |
2587 | for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) { |
2588 | struct sge_qset *qs = &adapter->sge.qs[qidx]; | |
2589 | void *source; | |
2eab17ab | 2590 | |
890de332 DLR |
2591 | if (adapter->flags & USING_MSIX) |
2592 | source = qs; | |
2593 | else | |
2594 | source = adapter; | |
2595 | ||
2596 | t3_intr_handler(adapter, qs->rspq.polling) (0, source); | |
2597 | } | |
4d22de3e DLR |
2598 | } |
2599 | #endif | |
2600 | ||
2601 | /* | |
2602 | * Periodic accumulation of MAC statistics. | |
2603 | */ | |
2604 | static void mac_stats_update(struct adapter *adapter) | |
2605 | { | |
2606 | int i; | |
2607 | ||
2608 | for_each_port(adapter, i) { | |
2609 | struct net_device *dev = adapter->port[i]; | |
2610 | struct port_info *p = netdev_priv(dev); | |
2611 | ||
2612 | if (netif_running(dev)) { | |
2613 | spin_lock(&adapter->stats_lock); | |
2614 | t3_mac_update_stats(&p->mac); | |
2615 | spin_unlock(&adapter->stats_lock); | |
2616 | } | |
2617 | } | |
2618 | } | |
2619 | ||
2620 | static void check_link_status(struct adapter *adapter) | |
2621 | { | |
2622 | int i; | |
2623 | ||
2624 | for_each_port(adapter, i) { | |
2625 | struct net_device *dev = adapter->port[i]; | |
2626 | struct port_info *p = netdev_priv(dev); | |
c22c8149 | 2627 | int link_fault; |
4d22de3e | 2628 | |
bf792094 | 2629 | spin_lock_irq(&adapter->work_lock); |
c22c8149 DLR |
2630 | link_fault = p->link_fault; |
2631 | spin_unlock_irq(&adapter->work_lock); | |
2632 | ||
2633 | if (link_fault) { | |
3851c66c | 2634 | t3_link_fault(adapter, i); |
bf792094 DLR |
2635 | continue; |
2636 | } | |
bf792094 DLR |
2637 | |
2638 | if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) { | |
2639 | t3_xgm_intr_disable(adapter, i); | |
2640 | t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); | |
2641 | ||
4d22de3e | 2642 | t3_link_changed(adapter, i); |
bf792094 DLR |
2643 | t3_xgm_intr_enable(adapter, i); |
2644 | } | |
4d22de3e DLR |
2645 | } |
2646 | } | |
2647 | ||
fc90664e DLR |
2648 | static void check_t3b2_mac(struct adapter *adapter) |
2649 | { | |
2650 | int i; | |
2651 | ||
f2d961c9 DLR |
2652 | if (!rtnl_trylock()) /* synchronize with ifdown */ |
2653 | return; | |
2654 | ||
fc90664e DLR |
2655 | for_each_port(adapter, i) { |
2656 | struct net_device *dev = adapter->port[i]; | |
2657 | struct port_info *p = netdev_priv(dev); | |
2658 | int status; | |
2659 | ||
2660 | if (!netif_running(dev)) | |
2661 | continue; | |
2662 | ||
2663 | status = 0; | |
6d6dabac | 2664 | if (netif_running(dev) && netif_carrier_ok(dev)) |
fc90664e DLR |
2665 | status = t3b2_mac_watchdog_task(&p->mac); |
2666 | if (status == 1) | |
2667 | p->mac.stats.num_toggled++; | |
2668 | else if (status == 2) { | |
2669 | struct cmac *mac = &p->mac; | |
2670 | ||
2671 | t3_mac_set_mtu(mac, dev->mtu); | |
f14d42f3 | 2672 | t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr); |
fc90664e DLR |
2673 | cxgb_set_rxmode(dev); |
2674 | t3_link_start(&p->phy, mac, &p->link_config); | |
2675 | t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); | |
2676 | t3_port_intr_enable(adapter, p->port_id); | |
2677 | p->mac.stats.num_resets++; | |
2678 | } | |
2679 | } | |
2680 | rtnl_unlock(); | |
2681 | } | |
2682 | ||
2683 | ||
4d22de3e DLR |
2684 | static void t3_adap_check_task(struct work_struct *work) |
2685 | { | |
2686 | struct adapter *adapter = container_of(work, struct adapter, | |
2687 | adap_check_task.work); | |
2688 | const struct adapter_params *p = &adapter->params; | |
fc882196 DLR |
2689 | int port; |
2690 | unsigned int v, status, reset; | |
4d22de3e DLR |
2691 | |
2692 | adapter->check_task_cnt++; | |
2693 | ||
3851c66c | 2694 | check_link_status(adapter); |
4d22de3e DLR |
2695 | |
2696 | /* Accumulate MAC stats if needed */ | |
2697 | if (!p->linkpoll_period || | |
2698 | (adapter->check_task_cnt * p->linkpoll_period) / 10 >= | |
2699 | p->stats_update_period) { | |
2700 | mac_stats_update(adapter); | |
2701 | adapter->check_task_cnt = 0; | |
2702 | } | |
2703 | ||
fc90664e DLR |
2704 | if (p->rev == T3_REV_B2) |
2705 | check_t3b2_mac(adapter); | |
2706 | ||
fc882196 DLR |
2707 | /* |
2708 | * Scan the XGMAC's to check for various conditions which we want to | |
2709 | * monitor in a periodic polling manner rather than via an interrupt | |
2710 | * condition. This is used for conditions which would otherwise flood | |
2711 | * the system with interrupts and we only really need to know that the | |
2712 | * conditions are "happening" ... For each condition we count the | |
2713 | * detection of the condition and reset it for the next polling loop. | |
2714 | */ | |
2715 | for_each_port(adapter, port) { | |
2716 | struct cmac *mac = &adap2pinfo(adapter, port)->mac; | |
2717 | u32 cause; | |
2718 | ||
2719 | cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset); | |
2720 | reset = 0; | |
2721 | if (cause & F_RXFIFO_OVERFLOW) { | |
2722 | mac->stats.rx_fifo_ovfl++; | |
2723 | reset |= F_RXFIFO_OVERFLOW; | |
2724 | } | |
2725 | ||
2726 | t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset); | |
2727 | } | |
2728 | ||
2729 | /* | |
2730 | * We do the same as above for FL_EMPTY interrupts. | |
2731 | */ | |
2732 | status = t3_read_reg(adapter, A_SG_INT_CAUSE); | |
2733 | reset = 0; | |
2734 | ||
2735 | if (status & F_FLEMPTY) { | |
2736 | struct sge_qset *qs = &adapter->sge.qs[0]; | |
2737 | int i = 0; | |
2738 | ||
2739 | reset |= F_FLEMPTY; | |
2740 | ||
2741 | v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) & | |
2742 | 0xffff; | |
2743 | ||
2744 | while (v) { | |
2745 | qs->fl[i].empty += (v & 1); | |
2746 | if (i) | |
2747 | qs++; | |
2748 | i ^= 1; | |
2749 | v >>= 1; | |
2750 | } | |
2751 | } | |
2752 | ||
2753 | t3_write_reg(adapter, A_SG_INT_CAUSE, reset); | |
2754 | ||
4d22de3e | 2755 | /* Schedule the next check update if any port is active. */ |
20d3fc11 | 2756 | spin_lock_irq(&adapter->work_lock); |
4d22de3e DLR |
2757 | if (adapter->open_device_map & PORT_MASK) |
2758 | schedule_chk_task(adapter); | |
20d3fc11 | 2759 | spin_unlock_irq(&adapter->work_lock); |
4d22de3e DLR |
2760 | } |
2761 | ||
e998f245 SW |
2762 | static void db_full_task(struct work_struct *work) |
2763 | { | |
2764 | struct adapter *adapter = container_of(work, struct adapter, | |
2765 | db_full_task); | |
2766 | ||
2767 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0); | |
2768 | } | |
2769 | ||
2770 | static void db_empty_task(struct work_struct *work) | |
2771 | { | |
2772 | struct adapter *adapter = container_of(work, struct adapter, | |
2773 | db_empty_task); | |
2774 | ||
2775 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0); | |
2776 | } | |
2777 | ||
2778 | static void db_drop_task(struct work_struct *work) | |
2779 | { | |
2780 | struct adapter *adapter = container_of(work, struct adapter, | |
2781 | db_drop_task); | |
2782 | unsigned long delay = 1000; | |
2783 | unsigned short r; | |
2784 | ||
2785 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0); | |
2786 | ||
2787 | /* | |
2788 | * Sleep a while before ringing the driver qset dbs. | |
2789 | * The delay is between 1000-2023 usecs. | |
2790 | */ | |
2791 | get_random_bytes(&r, 2); | |
2792 | delay += r & 1023; | |
2793 | set_current_state(TASK_UNINTERRUPTIBLE); | |
2794 | schedule_timeout(usecs_to_jiffies(delay)); | |
2795 | ring_dbs(adapter); | |
2796 | } | |
2797 | ||
4d22de3e DLR |
2798 | /* |
2799 | * Processes external (PHY) interrupts in process context. | |
2800 | */ | |
2801 | static void ext_intr_task(struct work_struct *work) | |
2802 | { | |
2803 | struct adapter *adapter = container_of(work, struct adapter, | |
2804 | ext_intr_handler_task); | |
bf792094 DLR |
2805 | int i; |
2806 | ||
2807 | /* Disable link fault interrupts */ | |
2808 | for_each_port(adapter, i) { | |
2809 | struct net_device *dev = adapter->port[i]; | |
2810 | struct port_info *p = netdev_priv(dev); | |
2811 | ||
2812 | t3_xgm_intr_disable(adapter, i); | |
2813 | t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset); | |
2814 | } | |
4d22de3e | 2815 | |
bf792094 | 2816 | /* Re-enable link fault interrupts */ |
4d22de3e DLR |
2817 | t3_phy_intr_handler(adapter); |
2818 | ||
bf792094 DLR |
2819 | for_each_port(adapter, i) |
2820 | t3_xgm_intr_enable(adapter, i); | |
2821 | ||
4d22de3e DLR |
2822 | /* Now reenable external interrupts */ |
2823 | spin_lock_irq(&adapter->work_lock); | |
2824 | if (adapter->slow_intr_mask) { | |
2825 | adapter->slow_intr_mask |= F_T3DBG; | |
2826 | t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG); | |
2827 | t3_write_reg(adapter, A_PL_INT_ENABLE0, | |
2828 | adapter->slow_intr_mask); | |
2829 | } | |
2830 | spin_unlock_irq(&adapter->work_lock); | |
2831 | } | |
2832 | ||
2833 | /* | |
2834 | * Interrupt-context handler for external (PHY) interrupts. | |
2835 | */ | |
2836 | void t3_os_ext_intr_handler(struct adapter *adapter) | |
2837 | { | |
2838 | /* | |
2839 | * Schedule a task to handle external interrupts as they may be slow | |
2840 | * and we use a mutex to protect MDIO registers. We disable PHY | |
2841 | * interrupts in the meantime and let the task reenable them when | |
2842 | * it's done. | |
2843 | */ | |
2844 | spin_lock(&adapter->work_lock); | |
2845 | if (adapter->slow_intr_mask) { | |
2846 | adapter->slow_intr_mask &= ~F_T3DBG; | |
2847 | t3_write_reg(adapter, A_PL_INT_ENABLE0, | |
2848 | adapter->slow_intr_mask); | |
2849 | queue_work(cxgb3_wq, &adapter->ext_intr_handler_task); | |
2850 | } | |
2851 | spin_unlock(&adapter->work_lock); | |
2852 | } | |
2853 | ||
bf792094 DLR |
2854 | void t3_os_link_fault_handler(struct adapter *adapter, int port_id) |
2855 | { | |
2856 | struct net_device *netdev = adapter->port[port_id]; | |
2857 | struct port_info *pi = netdev_priv(netdev); | |
2858 | ||
2859 | spin_lock(&adapter->work_lock); | |
2860 | pi->link_fault = 1; | |
bf792094 DLR |
2861 | spin_unlock(&adapter->work_lock); |
2862 | } | |
2863 | ||
55bc3228 | 2864 | static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq) |
20d3fc11 DLR |
2865 | { |
2866 | int i, ret = 0; | |
2867 | ||
cb0bc205 DLR |
2868 | if (is_offload(adapter) && |
2869 | test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { | |
fa0d4c11 | 2870 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); |
cb0bc205 DLR |
2871 | offload_close(&adapter->tdev); |
2872 | } | |
2873 | ||
20d3fc11 DLR |
2874 | /* Stop all ports */ |
2875 | for_each_port(adapter, i) { | |
2876 | struct net_device *netdev = adapter->port[i]; | |
2877 | ||
2878 | if (netif_running(netdev)) | |
55bc3228 | 2879 | __cxgb_close(netdev, on_wq); |
20d3fc11 DLR |
2880 | } |
2881 | ||
20d3fc11 DLR |
2882 | /* Stop SGE timers */ |
2883 | t3_stop_sge_timers(adapter); | |
2884 | ||
2885 | adapter->flags &= ~FULL_INIT_DONE; | |
2886 | ||
2887 | if (reset) | |
2888 | ret = t3_reset_adapter(adapter); | |
2889 | ||
2890 | pci_disable_device(adapter->pdev); | |
2891 | ||
2892 | return ret; | |
2893 | } | |
2894 | ||
2895 | static int t3_reenable_adapter(struct adapter *adapter) | |
2896 | { | |
2897 | if (pci_enable_device(adapter->pdev)) { | |
2898 | dev_err(&adapter->pdev->dev, | |
2899 | "Cannot re-enable PCI device after reset.\n"); | |
2900 | goto err; | |
2901 | } | |
2902 | pci_set_master(adapter->pdev); | |
2903 | pci_restore_state(adapter->pdev); | |
ccdddf50 | 2904 | pci_save_state(adapter->pdev); |
20d3fc11 DLR |
2905 | |
2906 | /* Free sge resources */ | |
2907 | t3_free_sge_resources(adapter); | |
2908 | ||
2909 | if (t3_replay_prep_adapter(adapter)) | |
2910 | goto err; | |
2911 | ||
2912 | return 0; | |
2913 | err: | |
2914 | return -1; | |
2915 | } | |
2916 | ||
2917 | static void t3_resume_ports(struct adapter *adapter) | |
2918 | { | |
2919 | int i; | |
2920 | ||
2921 | /* Restart the ports */ | |
2922 | for_each_port(adapter, i) { | |
2923 | struct net_device *netdev = adapter->port[i]; | |
2924 | ||
2925 | if (netif_running(netdev)) { | |
2926 | if (cxgb_open(netdev)) { | |
2927 | dev_err(&adapter->pdev->dev, | |
2928 | "can't bring device back up" | |
2929 | " after reset\n"); | |
2930 | continue; | |
2931 | } | |
2932 | } | |
2933 | } | |
cb0bc205 DLR |
2934 | |
2935 | if (is_offload(adapter) && !ofld_disable) | |
fa0d4c11 | 2936 | cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); |
20d3fc11 DLR |
2937 | } |
2938 | ||
2939 | /* | |
2940 | * processes a fatal error. | |
2941 | * Bring the ports down, reset the chip, bring the ports back up. | |
2942 | */ | |
2943 | static void fatal_error_task(struct work_struct *work) | |
2944 | { | |
2945 | struct adapter *adapter = container_of(work, struct adapter, | |
2946 | fatal_error_handler_task); | |
2947 | int err = 0; | |
2948 | ||
2949 | rtnl_lock(); | |
55bc3228 | 2950 | err = t3_adapter_error(adapter, 1, 1); |
20d3fc11 DLR |
2951 | if (!err) |
2952 | err = t3_reenable_adapter(adapter); | |
2953 | if (!err) | |
2954 | t3_resume_ports(adapter); | |
2955 | ||
2956 | CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded"); | |
2957 | rtnl_unlock(); | |
2958 | } | |
2959 | ||
4d22de3e DLR |
2960 | void t3_fatal_err(struct adapter *adapter) |
2961 | { | |
2962 | unsigned int fw_status[4]; | |
2963 | ||
2964 | if (adapter->flags & FULL_INIT_DONE) { | |
2965 | t3_sge_stop(adapter); | |
c64c2eae DLR |
2966 | t3_write_reg(adapter, A_XGM_TX_CTRL, 0); |
2967 | t3_write_reg(adapter, A_XGM_RX_CTRL, 0); | |
2968 | t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0); | |
2969 | t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0); | |
20d3fc11 DLR |
2970 | |
2971 | spin_lock(&adapter->work_lock); | |
4d22de3e | 2972 | t3_intr_disable(adapter); |
20d3fc11 DLR |
2973 | queue_work(cxgb3_wq, &adapter->fatal_error_handler_task); |
2974 | spin_unlock(&adapter->work_lock); | |
4d22de3e DLR |
2975 | } |
2976 | CH_ALERT(adapter, "encountered fatal error, operation suspended\n"); | |
2977 | if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status)) | |
2978 | CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n", | |
2979 | fw_status[0], fw_status[1], | |
2980 | fw_status[2], fw_status[3]); | |
4d22de3e DLR |
2981 | } |
2982 | ||
91a6b50c DLR |
2983 | /** |
2984 | * t3_io_error_detected - called when PCI error is detected | |
2985 | * @pdev: Pointer to PCI device | |
2986 | * @state: The current pci connection state | |
2987 | * | |
2988 | * This function is called after a PCI bus error affecting | |
2989 | * this device has been detected. | |
2990 | */ | |
2991 | static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev, | |
2992 | pci_channel_state_t state) | |
2993 | { | |
bc4b6b52 | 2994 | struct adapter *adapter = pci_get_drvdata(pdev); |
91a6b50c | 2995 | |
e8d19370 DLR |
2996 | if (state == pci_channel_io_perm_failure) |
2997 | return PCI_ERS_RESULT_DISCONNECT; | |
2998 | ||
c661c4a2 | 2999 | t3_adapter_error(adapter, 0, 0); |
91a6b50c | 3000 | |
48c4b6db | 3001 | /* Request a slot reset. */ |
91a6b50c DLR |
3002 | return PCI_ERS_RESULT_NEED_RESET; |
3003 | } | |
3004 | ||
3005 | /** | |
3006 | * t3_io_slot_reset - called after the pci bus has been reset. | |
3007 | * @pdev: Pointer to PCI device | |
3008 | * | |
3009 | * Restart the card from scratch, as if from a cold-boot. | |
3010 | */ | |
3011 | static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev) | |
3012 | { | |
bc4b6b52 | 3013 | struct adapter *adapter = pci_get_drvdata(pdev); |
91a6b50c | 3014 | |
20d3fc11 DLR |
3015 | if (!t3_reenable_adapter(adapter)) |
3016 | return PCI_ERS_RESULT_RECOVERED; | |
91a6b50c | 3017 | |
48c4b6db | 3018 | return PCI_ERS_RESULT_DISCONNECT; |
91a6b50c DLR |
3019 | } |
3020 | ||
3021 | /** | |
3022 | * t3_io_resume - called when traffic can start flowing again. | |
3023 | * @pdev: Pointer to PCI device | |
3024 | * | |
3025 | * This callback is called when the error recovery driver tells us that | |
3026 | * its OK to resume normal operation. | |
3027 | */ | |
3028 | static void t3_io_resume(struct pci_dev *pdev) | |
3029 | { | |
bc4b6b52 | 3030 | struct adapter *adapter = pci_get_drvdata(pdev); |
91a6b50c | 3031 | |
68f40c10 DLR |
3032 | CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n", |
3033 | t3_read_reg(adapter, A_PCIE_PEX_ERR)); | |
3034 | ||
20d3fc11 | 3035 | t3_resume_ports(adapter); |
91a6b50c DLR |
3036 | } |
3037 | ||
3038 | static struct pci_error_handlers t3_err_handler = { | |
3039 | .error_detected = t3_io_error_detected, | |
3040 | .slot_reset = t3_io_slot_reset, | |
3041 | .resume = t3_io_resume, | |
3042 | }; | |
3043 | ||
8c263761 DLR |
3044 | /* |
3045 | * Set the number of qsets based on the number of CPUs and the number of ports, | |
3046 | * not to exceed the number of available qsets, assuming there are enough qsets | |
3047 | * per port in HW. | |
3048 | */ | |
3049 | static void set_nqsets(struct adapter *adap) | |
3050 | { | |
3051 | int i, j = 0; | |
3052 | int num_cpus = num_online_cpus(); | |
3053 | int hwports = adap->params.nports; | |
5cda9364 | 3054 | int nqsets = adap->msix_nvectors - 1; |
8c263761 | 3055 | |
f9ee3882 | 3056 | if (adap->params.rev > 0 && adap->flags & USING_MSIX) { |
8c263761 DLR |
3057 | if (hwports == 2 && |
3058 | (hwports * nqsets > SGE_QSETS || | |
3059 | num_cpus >= nqsets / hwports)) | |
3060 | nqsets /= hwports; | |
3061 | if (nqsets > num_cpus) | |
3062 | nqsets = num_cpus; | |
3063 | if (nqsets < 1 || hwports == 4) | |
3064 | nqsets = 1; | |
3065 | } else | |
3066 | nqsets = 1; | |
3067 | ||
3068 | for_each_port(adap, i) { | |
3069 | struct port_info *pi = adap2pinfo(adap, i); | |
3070 | ||
3071 | pi->first_qset = j; | |
3072 | pi->nqsets = nqsets; | |
3073 | j = pi->first_qset + nqsets; | |
3074 | ||
3075 | dev_info(&adap->pdev->dev, | |
3076 | "Port %d using %d queue sets.\n", i, nqsets); | |
3077 | } | |
3078 | } | |
3079 | ||
4d22de3e DLR |
3080 | static int __devinit cxgb_enable_msix(struct adapter *adap) |
3081 | { | |
3082 | struct msix_entry entries[SGE_QSETS + 1]; | |
5cda9364 | 3083 | int vectors; |
4d22de3e DLR |
3084 | int i, err; |
3085 | ||
5cda9364 DLR |
3086 | vectors = ARRAY_SIZE(entries); |
3087 | for (i = 0; i < vectors; ++i) | |
4d22de3e DLR |
3088 | entries[i].entry = i; |
3089 | ||
5cda9364 DLR |
3090 | while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) |
3091 | vectors = err; | |
3092 | ||
2c2f409f DLR |
3093 | if (err < 0) |
3094 | pci_disable_msix(adap->pdev); | |
3095 | ||
3096 | if (!err && vectors < (adap->params.nports + 1)) { | |
3097 | pci_disable_msix(adap->pdev); | |
5cda9364 | 3098 | err = -1; |
2c2f409f | 3099 | } |
5cda9364 | 3100 | |
4d22de3e | 3101 | if (!err) { |
5cda9364 | 3102 | for (i = 0; i < vectors; ++i) |
4d22de3e | 3103 | adap->msix_info[i].vec = entries[i].vector; |
5cda9364 DLR |
3104 | adap->msix_nvectors = vectors; |
3105 | } | |
3106 | ||
4d22de3e DLR |
3107 | return err; |
3108 | } | |
3109 | ||
3110 | static void __devinit print_port_info(struct adapter *adap, | |
3111 | const struct adapter_info *ai) | |
3112 | { | |
3113 | static const char *pci_variant[] = { | |
3114 | "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express" | |
3115 | }; | |
3116 | ||
3117 | int i; | |
3118 | char buf[80]; | |
3119 | ||
3120 | if (is_pcie(adap)) | |
3121 | snprintf(buf, sizeof(buf), "%s x%d", | |
3122 | pci_variant[adap->params.pci.variant], | |
3123 | adap->params.pci.width); | |
3124 | else | |
3125 | snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit", | |
3126 | pci_variant[adap->params.pci.variant], | |
3127 | adap->params.pci.speed, adap->params.pci.width); | |
3128 | ||
3129 | for_each_port(adap, i) { | |
3130 | struct net_device *dev = adap->port[i]; | |
3131 | const struct port_info *pi = netdev_priv(dev); | |
3132 | ||
3133 | if (!test_bit(i, &adap->registered_device_map)) | |
3134 | continue; | |
8ac3ba68 | 3135 | printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n", |
04497982 | 3136 | dev->name, ai->desc, pi->phy.desc, |
8ac3ba68 | 3137 | is_offload(adap) ? "R" : "", adap->params.rev, buf, |
4d22de3e DLR |
3138 | (adap->flags & USING_MSIX) ? " MSI-X" : |
3139 | (adap->flags & USING_MSI) ? " MSI" : ""); | |
3140 | if (adap->name == dev->name && adap->params.vpd.mclk) | |
167cdf5f DLR |
3141 | printk(KERN_INFO |
3142 | "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n", | |
4d22de3e DLR |
3143 | adap->name, t3_mc7_size(&adap->cm) >> 20, |
3144 | t3_mc7_size(&adap->pmtx) >> 20, | |
167cdf5f DLR |
3145 | t3_mc7_size(&adap->pmrx) >> 20, |
3146 | adap->params.vpd.sn); | |
4d22de3e DLR |
3147 | } |
3148 | } | |
3149 | ||
dd752696 SH |
3150 | static const struct net_device_ops cxgb_netdev_ops = { |
3151 | .ndo_open = cxgb_open, | |
3152 | .ndo_stop = cxgb_close, | |
43a944f3 | 3153 | .ndo_start_xmit = t3_eth_xmit, |
dd752696 SH |
3154 | .ndo_get_stats = cxgb_get_stats, |
3155 | .ndo_validate_addr = eth_validate_addr, | |
afc4b13d | 3156 | .ndo_set_rx_mode = cxgb_set_rxmode, |
dd752696 SH |
3157 | .ndo_do_ioctl = cxgb_ioctl, |
3158 | .ndo_change_mtu = cxgb_change_mtu, | |
3159 | .ndo_set_mac_address = cxgb_set_mac_addr, | |
892ef5d8 JP |
3160 | .ndo_fix_features = cxgb_fix_features, |
3161 | .ndo_set_features = cxgb_set_features, | |
dd752696 SH |
3162 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3163 | .ndo_poll_controller = cxgb_netpoll, | |
3164 | #endif | |
3165 | }; | |
3166 | ||
f14d42f3 KX |
3167 | static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev) |
3168 | { | |
3169 | struct port_info *pi = netdev_priv(dev); | |
3170 | ||
3171 | memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN); | |
3172 | pi->iscsic.mac_addr[3] |= 0x80; | |
3173 | } | |
3174 | ||
4d22de3e DLR |
3175 | static int __devinit init_one(struct pci_dev *pdev, |
3176 | const struct pci_device_id *ent) | |
3177 | { | |
3178 | static int version_printed; | |
3179 | ||
3180 | int i, err, pci_using_dac = 0; | |
68f40c10 | 3181 | resource_size_t mmio_start, mmio_len; |
4d22de3e DLR |
3182 | const struct adapter_info *ai; |
3183 | struct adapter *adapter = NULL; | |
3184 | struct port_info *pi; | |
3185 | ||
3186 | if (!version_printed) { | |
3187 | printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); | |
3188 | ++version_printed; | |
3189 | } | |
3190 | ||
3191 | if (!cxgb3_wq) { | |
3192 | cxgb3_wq = create_singlethread_workqueue(DRV_NAME); | |
3193 | if (!cxgb3_wq) { | |
3194 | printk(KERN_ERR DRV_NAME | |
3195 | ": cannot initialize work queue\n"); | |
3196 | return -ENOMEM; | |
3197 | } | |
3198 | } | |
3199 | ||
7aaaaa1e | 3200 | err = pci_enable_device(pdev); |
4d22de3e | 3201 | if (err) { |
7aaaaa1e KV |
3202 | dev_err(&pdev->dev, "cannot enable PCI device\n"); |
3203 | goto out; | |
4d22de3e DLR |
3204 | } |
3205 | ||
7aaaaa1e | 3206 | err = pci_request_regions(pdev, DRV_NAME); |
4d22de3e | 3207 | if (err) { |
7aaaaa1e KV |
3208 | /* Just info, some other driver may have claimed the device. */ |
3209 | dev_info(&pdev->dev, "cannot obtain PCI resources\n"); | |
3210 | goto out_disable_device; | |
4d22de3e DLR |
3211 | } |
3212 | ||
6a35528a | 3213 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
4d22de3e | 3214 | pci_using_dac = 1; |
6a35528a | 3215 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
4d22de3e DLR |
3216 | if (err) { |
3217 | dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " | |
3218 | "coherent allocations\n"); | |
7aaaaa1e | 3219 | goto out_release_regions; |
4d22de3e | 3220 | } |
284901a9 | 3221 | } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { |
4d22de3e | 3222 | dev_err(&pdev->dev, "no usable DMA configuration\n"); |
7aaaaa1e | 3223 | goto out_release_regions; |
4d22de3e DLR |
3224 | } |
3225 | ||
3226 | pci_set_master(pdev); | |
204e2f98 | 3227 | pci_save_state(pdev); |
4d22de3e DLR |
3228 | |
3229 | mmio_start = pci_resource_start(pdev, 0); | |
3230 | mmio_len = pci_resource_len(pdev, 0); | |
3231 | ai = t3_get_adapter_info(ent->driver_data); | |
3232 | ||
3233 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | |
3234 | if (!adapter) { | |
3235 | err = -ENOMEM; | |
7aaaaa1e | 3236 | goto out_release_regions; |
4d22de3e DLR |
3237 | } |
3238 | ||
74b793e1 DLR |
3239 | adapter->nofail_skb = |
3240 | alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL); | |
3241 | if (!adapter->nofail_skb) { | |
3242 | dev_err(&pdev->dev, "cannot allocate nofail buffer\n"); | |
3243 | err = -ENOMEM; | |
3244 | goto out_free_adapter; | |
3245 | } | |
3246 | ||
4d22de3e DLR |
3247 | adapter->regs = ioremap_nocache(mmio_start, mmio_len); |
3248 | if (!adapter->regs) { | |
3249 | dev_err(&pdev->dev, "cannot map device registers\n"); | |
3250 | err = -ENOMEM; | |
3251 | goto out_free_adapter; | |
3252 | } | |
3253 | ||
3254 | adapter->pdev = pdev; | |
3255 | adapter->name = pci_name(pdev); | |
3256 | adapter->msg_enable = dflt_msg_enable; | |
3257 | adapter->mmio_len = mmio_len; | |
3258 | ||
3259 | mutex_init(&adapter->mdio_lock); | |
3260 | spin_lock_init(&adapter->work_lock); | |
3261 | spin_lock_init(&adapter->stats_lock); | |
3262 | ||
3263 | INIT_LIST_HEAD(&adapter->adapter_list); | |
3264 | INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); | |
20d3fc11 | 3265 | INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); |
e998f245 SW |
3266 | |
3267 | INIT_WORK(&adapter->db_full_task, db_full_task); | |
3268 | INIT_WORK(&adapter->db_empty_task, db_empty_task); | |
3269 | INIT_WORK(&adapter->db_drop_task, db_drop_task); | |
3270 | ||
4d22de3e DLR |
3271 | INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); |
3272 | ||
952cdf33 | 3273 | for (i = 0; i < ai->nports0 + ai->nports1; ++i) { |
4d22de3e DLR |
3274 | struct net_device *netdev; |
3275 | ||
82ad3329 | 3276 | netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS); |
4d22de3e DLR |
3277 | if (!netdev) { |
3278 | err = -ENOMEM; | |
3279 | goto out_free_dev; | |
3280 | } | |
3281 | ||
4d22de3e DLR |
3282 | SET_NETDEV_DEV(netdev, &pdev->dev); |
3283 | ||
3284 | adapter->port[i] = netdev; | |
3285 | pi = netdev_priv(netdev); | |
5fbf816f | 3286 | pi->adapter = adapter; |
4d22de3e DLR |
3287 | pi->port_id = i; |
3288 | netif_carrier_off(netdev); | |
3289 | netdev->irq = pdev->irq; | |
3290 | netdev->mem_start = mmio_start; | |
3291 | netdev->mem_end = mmio_start + mmio_len - 1; | |
d2fe2755 | 3292 | netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | |
892ef5d8 JP |
3293 | NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; |
3294 | netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX; | |
4d22de3e DLR |
3295 | if (pci_using_dac) |
3296 | netdev->features |= NETIF_F_HIGHDMA; | |
3297 | ||
dd752696 | 3298 | netdev->netdev_ops = &cxgb_netdev_ops; |
4d22de3e DLR |
3299 | SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); |
3300 | } | |
3301 | ||
5fbf816f | 3302 | pci_set_drvdata(pdev, adapter); |
4d22de3e DLR |
3303 | if (t3_prep_adapter(adapter, ai, 1) < 0) { |
3304 | err = -ENODEV; | |
3305 | goto out_free_dev; | |
3306 | } | |
2eab17ab | 3307 | |
4d22de3e DLR |
3308 | /* |
3309 | * The card is now ready to go. If any errors occur during device | |
3310 | * registration we do not fail the whole card but rather proceed only | |
3311 | * with the ports we manage to register successfully. However we must | |
3312 | * register at least one net device. | |
3313 | */ | |
3314 | for_each_port(adapter, i) { | |
3315 | err = register_netdev(adapter->port[i]); | |
3316 | if (err) | |
3317 | dev_warn(&pdev->dev, | |
3318 | "cannot register net device %s, skipping\n", | |
3319 | adapter->port[i]->name); | |
3320 | else { | |
3321 | /* | |
3322 | * Change the name we use for messages to the name of | |
3323 | * the first successfully registered interface. | |
3324 | */ | |
3325 | if (!adapter->registered_device_map) | |
3326 | adapter->name = adapter->port[i]->name; | |
3327 | ||
3328 | __set_bit(i, &adapter->registered_device_map); | |
3329 | } | |
3330 | } | |
3331 | if (!adapter->registered_device_map) { | |
3332 | dev_err(&pdev->dev, "could not register any net devices\n"); | |
3333 | goto out_free_dev; | |
3334 | } | |
3335 | ||
f14d42f3 KX |
3336 | for_each_port(adapter, i) |
3337 | cxgb3_init_iscsi_mac(adapter->port[i]); | |
3338 | ||
4d22de3e DLR |
3339 | /* Driver's ready. Reflect it on LEDs */ |
3340 | t3_led_ready(adapter); | |
3341 | ||
3342 | if (is_offload(adapter)) { | |
3343 | __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); | |
3344 | cxgb3_adapter_ofld(adapter); | |
3345 | } | |
3346 | ||
3347 | /* See what interrupts we'll be using */ | |
3348 | if (msi > 1 && cxgb_enable_msix(adapter) == 0) | |
3349 | adapter->flags |= USING_MSIX; | |
3350 | else if (msi > 0 && pci_enable_msi(pdev) == 0) | |
3351 | adapter->flags |= USING_MSI; | |
3352 | ||
8c263761 DLR |
3353 | set_nqsets(adapter); |
3354 | ||
0ee8d33c | 3355 | err = sysfs_create_group(&adapter->port[0]->dev.kobj, |
4d22de3e DLR |
3356 | &cxgb3_attr_group); |
3357 | ||
892ef5d8 JP |
3358 | for_each_port(adapter, i) |
3359 | cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features); | |
3360 | ||
4d22de3e DLR |
3361 | print_port_info(adapter, ai); |
3362 | return 0; | |
3363 | ||
3364 | out_free_dev: | |
3365 | iounmap(adapter->regs); | |
952cdf33 | 3366 | for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i) |
4d22de3e DLR |
3367 | if (adapter->port[i]) |
3368 | free_netdev(adapter->port[i]); | |
3369 | ||
3370 | out_free_adapter: | |
3371 | kfree(adapter); | |
3372 | ||
4d22de3e DLR |
3373 | out_release_regions: |
3374 | pci_release_regions(pdev); | |
7aaaaa1e KV |
3375 | out_disable_device: |
3376 | pci_disable_device(pdev); | |
4d22de3e | 3377 | pci_set_drvdata(pdev, NULL); |
7aaaaa1e | 3378 | out: |
4d22de3e DLR |
3379 | return err; |
3380 | } | |
3381 | ||
3382 | static void __devexit remove_one(struct pci_dev *pdev) | |
3383 | { | |
5fbf816f | 3384 | struct adapter *adapter = pci_get_drvdata(pdev); |
4d22de3e | 3385 | |
5fbf816f | 3386 | if (adapter) { |
4d22de3e | 3387 | int i; |
4d22de3e DLR |
3388 | |
3389 | t3_sge_stop(adapter); | |
0ee8d33c | 3390 | sysfs_remove_group(&adapter->port[0]->dev.kobj, |
4d22de3e DLR |
3391 | &cxgb3_attr_group); |
3392 | ||
4d22de3e DLR |
3393 | if (is_offload(adapter)) { |
3394 | cxgb3_adapter_unofld(adapter); | |
3395 | if (test_bit(OFFLOAD_DEVMAP_BIT, | |
3396 | &adapter->open_device_map)) | |
3397 | offload_close(&adapter->tdev); | |
3398 | } | |
3399 | ||
67d92ab7 DLR |
3400 | for_each_port(adapter, i) |
3401 | if (test_bit(i, &adapter->registered_device_map)) | |
3402 | unregister_netdev(adapter->port[i]); | |
3403 | ||
0ca41c04 | 3404 | t3_stop_sge_timers(adapter); |
4d22de3e DLR |
3405 | t3_free_sge_resources(adapter); |
3406 | cxgb_disable_msi(adapter); | |
3407 | ||
4d22de3e DLR |
3408 | for_each_port(adapter, i) |
3409 | if (adapter->port[i]) | |
3410 | free_netdev(adapter->port[i]); | |
3411 | ||
3412 | iounmap(adapter->regs); | |
74b793e1 DLR |
3413 | if (adapter->nofail_skb) |
3414 | kfree_skb(adapter->nofail_skb); | |
4d22de3e DLR |
3415 | kfree(adapter); |
3416 | pci_release_regions(pdev); | |
3417 | pci_disable_device(pdev); | |
3418 | pci_set_drvdata(pdev, NULL); | |
3419 | } | |
3420 | } | |
3421 | ||
3422 | static struct pci_driver driver = { | |
3423 | .name = DRV_NAME, | |
3424 | .id_table = cxgb3_pci_tbl, | |
3425 | .probe = init_one, | |
3426 | .remove = __devexit_p(remove_one), | |
91a6b50c | 3427 | .err_handler = &t3_err_handler, |
4d22de3e DLR |
3428 | }; |
3429 | ||
3430 | static int __init cxgb3_init_module(void) | |
3431 | { | |
3432 | int ret; | |
3433 | ||
3434 | cxgb3_offload_init(); | |
3435 | ||
3436 | ret = pci_register_driver(&driver); | |
3437 | return ret; | |
3438 | } | |
3439 | ||
3440 | static void __exit cxgb3_cleanup_module(void) | |
3441 | { | |
3442 | pci_unregister_driver(&driver); | |
3443 | if (cxgb3_wq) | |
3444 | destroy_workqueue(cxgb3_wq); | |
3445 | } | |
3446 | ||
3447 | module_init(cxgb3_init_module); | |
3448 | module_exit(cxgb3_cleanup_module); |