]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/qlogic/qlge/qlge_main.c
treewide: kmalloc() -> kmalloc_array()
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
18c49b91 9#include <linux/bitops.h>
c4e84bde
RM
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
18c49b91 36#include <linux/if_vlan.h>
c4e84bde 37#include <linux/skbuff.h>
c4e84bde
RM
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
70c71606 41#include <linux/prefetch.h>
b7c6bfb7 42#include <net/ip6_checksum.h>
c4e84bde
RM
43
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
4974097a
RM
61/* NETIF_MSG_TX_QUEUED | */
62/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
c4e84bde
RM
63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
84cf7029
SR
66static int debug = -1; /* defaults above */
67module_param(debug, int, 0664);
c4e84bde
RM
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
a5a62a1c 73static int qlge_irq_type = MSIX_IRQ;
84cf7029 74module_param(qlge_irq_type, int, 0664);
a5a62a1c 75MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
c4e84bde 76
8aae2600
RM
77static int qlge_mpi_coredump;
78module_param(qlge_mpi_coredump, int, 0);
79MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
d5c1da56
RM
81 "Default is OFF - Do Not allocate memory. ");
82
83static int qlge_force_coredump;
84module_param(qlge_force_coredump, int, 0);
85MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
8aae2600 88
9baa3c34 89static const struct pci_device_id qlge_pci_tbl[] = {
b0c2aadf 90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
cdca8d02 91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
c4e84bde
RM
92 /* required last entry */
93 {0,}
94};
95
96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
a45adbe8
JK
98static int ql_wol(struct ql_adapter *);
99static void qlge_set_multicast_list(struct net_device *);
100static int ql_adapter_down(struct ql_adapter *);
101static int ql_adapter_up(struct ql_adapter *);
ac409215 102
c4e84bde
RM
103/* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
106 */
107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108{
109 u32 sem_bits = 0;
110
111 switch (sem_mask) {
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 break;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 break;
118 case SEM_ICB_MASK:
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 break;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 break;
124 case SEM_FLASH_MASK:
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 break;
127 case SEM_PROBE_MASK:
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 break;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 break;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 break;
136 default:
ae9540f7 137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
c4e84bde
RM
138 return -EINVAL;
139 }
140
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
143}
144
145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146{
0857e9d7 147 unsigned int wait_count = 30;
c4e84bde
RM
148 do {
149 if (!ql_sem_trylock(qdev, sem_mask))
150 return 0;
0857e9d7
RM
151 udelay(100);
152 } while (--wait_count);
c4e84bde
RM
153 return -ETIMEDOUT;
154}
155
156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157{
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
160}
161
162/* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 */
167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168{
169 u32 temp;
170 int count = UDELAY_COUNT;
171
172 while (count) {
173 temp = ql_read32(qdev, reg);
174
175 /* check for errors */
176 if (temp & err_bit) {
ae9540f7
JP
177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
179 reg, temp);
c4e84bde
RM
180 return -EIO;
181 } else if (temp & bit)
182 return 0;
183 udelay(UDELAY_DELAY);
184 count--;
185 }
ae9540f7
JP
186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
c4e84bde
RM
188 return -ETIMEDOUT;
189}
190
191/* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
193 */
194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195{
196 int count = UDELAY_COUNT;
197 u32 temp;
198
199 while (count) {
200 temp = ql_read32(qdev, CFG);
201 if (temp & CFG_LE)
202 return -EIO;
203 if (!(temp & bit))
204 return 0;
205 udelay(UDELAY_DELAY);
206 count--;
207 }
208 return -ETIMEDOUT;
209}
210
211
212/* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
214 */
215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 u16 q_id)
217{
218 u64 map;
219 int status = 0;
220 int direction;
221 u32 mask;
222 u32 value;
223
224 direction =
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 PCI_DMA_FROMDEVICE;
227
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
ae9540f7 230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
c4e84bde
RM
231 return -ENOMEM;
232 }
233
4322c5be
RM
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 if (status)
236 return status;
237
c4e84bde
RM
238 status = ql_wait_cfg(qdev, bit);
239 if (status) {
ae9540f7
JP
240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
c4e84bde
RM
242 goto exit;
243 }
244
c4e84bde
RM
245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
c4e84bde
RM
247
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
251
252 /*
253 * Wait for the bit to clear after signaling hw.
254 */
255 status = ql_wait_cfg(qdev, bit);
256exit:
4322c5be 257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
c4e84bde
RM
258 pci_unmap_single(qdev->pdev, map, size, direction);
259 return status;
260}
261
262/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 u32 *value)
265{
266 u32 offset = 0;
267 int status;
268
c4e84bde
RM
269 switch (type) {
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
272 {
273 status =
274 ql_wait_reg_rdy(qdev,
939678f8 275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
276 if (status)
277 goto exit;
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 status =
282 ql_wait_reg_rdy(qdev,
939678f8 283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
284 if (status)
285 goto exit;
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 status =
288 ql_wait_reg_rdy(qdev,
939678f8 289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev,
939678f8 297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 status =
303 ql_wait_reg_rdy(qdev,
939678f8 304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
305 if (status)
306 goto exit;
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 status =
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 312 MAC_ADDR_MR, 0);
c4e84bde
RM
313 if (status)
314 goto exit;
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 }
317 break;
318 }
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
321 default:
ae9540f7
JP
322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
c4e84bde
RM
324 status = -EPERM;
325 }
326exit:
c4e84bde
RM
327 return status;
328}
329
330/* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
332 */
333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 u16 index)
335{
336 u32 offset = 0;
337 int status = 0;
338
c4e84bde
RM
339 switch (type) {
340 case MAC_ADDR_TYPE_MULTI_MAC:
76b26694
RM
341 {
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
353 type | MAC_ADDR_E);
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
362 type | MAC_ADDR_E);
363
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 status =
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 goto exit;
370 break;
371 }
c4e84bde
RM
372 case MAC_ADDR_TYPE_CAM_MAC:
373 {
374 u32 cam_output;
375 u32 upper = (addr[0] << 8) | addr[1];
376 u32 lower =
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 (addr[5]);
c4e84bde
RM
379 status =
380 ql_wait_reg_rdy(qdev,
939678f8 381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
382 if (status)
383 goto exit;
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 type); /* type */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
388 status =
389 ql_wait_reg_rdy(qdev,
939678f8 390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
391 if (status)
392 goto exit;
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 type); /* type */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
397 status =
398 ql_wait_reg_rdy(qdev,
939678f8 399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
400 if (status)
401 goto exit;
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
404 type); /* type */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
408 */
76b26694
RM
409 cam_output = (CAM_OUT_ROUTE_NIC |
410 (qdev->
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
f646968f 413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
76b26694
RM
414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
c4e84bde
RM
417 break;
418 }
419 case MAC_ADDR_TYPE_VLAN:
420 {
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
426 */
c4e84bde
RM
427 status =
428 ql_wait_reg_rdy(qdev,
939678f8 429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
430 if (status)
431 goto exit;
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
434 type | /* type */
435 enable_bit); /* enable/disable */
436 break;
437 }
438 case MAC_ADDR_TYPE_MULTI_FLTR:
439 default:
ae9540f7
JP
440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
c4e84bde
RM
442 status = -EPERM;
443 }
444exit:
c4e84bde
RM
445 return status;
446}
447
7fab3bfe
RM
448/* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
451 */
452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453{
454 int status;
455 char zero_mac_addr[ETH_ALEN];
456 char *addr;
457
458 if (set) {
801e9096 459 addr = &qdev->current_mac_addr[0];
ae9540f7
JP
460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
7fab3bfe 462 } else {
c7bf7169 463 eth_zero_addr(zero_mac_addr);
7fab3bfe 464 addr = &zero_mac_addr[0];
ae9540f7
JP
465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
7fab3bfe
RM
467 }
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469 if (status)
470 return status;
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 if (status)
ae9540f7
JP
475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
7fab3bfe
RM
477 return status;
478}
479
6a473308
RM
480void ql_link_on(struct ql_adapter *qdev)
481{
ae9540f7 482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
6a473308
RM
483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
485}
486
487void ql_link_off(struct ql_adapter *qdev)
488{
ae9540f7 489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
6a473308
RM
490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
492}
493
c4e84bde
RM
494/* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
496 */
497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
498{
499 int status = 0;
500
939678f8 501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
502 if (status)
503 goto exit;
504
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
508 if (status)
509 goto exit;
510 *value = ql_read32(qdev, RT_DATA);
511exit:
c4e84bde
RM
512 return status;
513}
514
515/* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
519 */
520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521 int enable)
522{
8587ea35 523 int status = -EINVAL; /* Return error if no mask match. */
c4e84bde
RM
524 u32 value = 0;
525
c4e84bde
RM
526 switch (mask) {
527 case RT_IDX_CAM_HIT:
528 {
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 break;
533 }
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
535 {
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 break;
540 }
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
542 {
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 break;
547 }
fbc2ac33
RM
548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 {
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
554 break;
555 }
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 {
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
562 break;
563 }
c4e84bde
RM
564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
565 {
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 break;
570 }
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
572 {
e163d7f2 573 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 break;
577 }
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
579 {
e163d7f2 580 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 break;
584 }
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
586 {
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590 break;
591 }
592 case 0: /* Clear the E-bit on an entry. */
593 {
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
597 break;
598 }
599 default:
ae9540f7
JP
600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
c4e84bde
RM
602 status = -EPERM;
603 goto exit;
604 }
605
606 if (value) {
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608 if (status)
609 goto exit;
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
613 }
614exit:
c4e84bde
RM
615 return status;
616}
617
618static void ql_enable_interrupts(struct ql_adapter *qdev)
619{
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621}
622
623static void ql_disable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626}
627
628/* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
25985edc 631 * incremented every time we queue a worker and decremented every time
c4e84bde
RM
632 * a worker finishes. Once it hits zero we enable the interrupt.
633 */
bb0d215c 634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 635{
bb0d215c
RM
636 u32 var = 0;
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
639
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
643 */
c4e84bde 644 ql_write32(qdev, INTR_EN,
bb0d215c
RM
645 ctx->intr_en_mask);
646 var = ql_read32(qdev, STS);
647 return var;
c4e84bde 648 }
bb0d215c
RM
649
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
653 ctx->intr_en_mask);
654 var = ql_read32(qdev, STS);
655 }
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
657 return var;
c4e84bde
RM
658}
659
660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
661{
662 u32 var = 0;
bb0d215c 663 struct intr_context *ctx;
c4e84bde 664
bb0d215c
RM
665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
667 */
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
669 return 0;
670
671 ctx = qdev->intr_context + intr;
08b1bc8f 672 spin_lock(&qdev->hw_lock);
bb0d215c 673 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 674 ql_write32(qdev, INTR_EN,
bb0d215c 675 ctx->intr_dis_mask);
c4e84bde
RM
676 var = ql_read32(qdev, STS);
677 }
bb0d215c 678 atomic_inc(&ctx->irq_cnt);
08b1bc8f 679 spin_unlock(&qdev->hw_lock);
c4e84bde
RM
680 return var;
681}
682
683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
684{
685 int i;
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
690 */
bb0d215c
RM
691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 i == 0))
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
694 ql_enable_completion_interrupt(qdev, i);
695 }
696
697}
698
b0c2aadf
RM
699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
700{
701 int status, i;
702 u16 csum = 0;
703 __le16 *flash = (__le16 *)&qdev->flash;
704
705 status = strncmp((char *)&qdev->flash, str, 4);
706 if (status) {
ae9540f7 707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
b0c2aadf
RM
708 return status;
709 }
710
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
713
714 if (csum)
ae9540f7
JP
715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
b0c2aadf
RM
717
718 return csum;
719}
720
26351479 721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
722{
723 int status = 0;
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
727 if (status)
728 goto exit;
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
734 if (status)
735 goto exit;
26351479
RM
736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
739 */
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
741exit:
742 return status;
743}
744
cdca8d02
RM
745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
746{
747 u32 i, size;
748 int status;
749 __le32 *p = (__le32 *)&qdev->flash;
750 u32 offset;
542512e4 751 u8 mac_addr[6];
cdca8d02
RM
752
753 /* Get flash offset for function and adjust
754 * for dword access.
755 */
e4552f51 756 if (!qdev->port)
cdca8d02
RM
757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 else
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
762 return -ETIMEDOUT;
763
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
767 if (status) {
ae9540f7
JP
768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
cdca8d02
RM
770 goto exit;
771 }
772 }
773
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
776 "8000");
777 if (status) {
ae9540f7 778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
cdca8d02
RM
779 status = -EINVAL;
780 goto exit;
781 }
782
542512e4
RM
783 /* Extract either manufacturer or BOFM modified
784 * MAC address.
785 */
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
787 memcpy(mac_addr,
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
790 else
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
794
795 if (!is_valid_ether_addr(mac_addr)) {
ae9540f7 796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
cdca8d02
RM
797 status = -EINVAL;
798 goto exit;
799 }
800
801 memcpy(qdev->ndev->dev_addr,
542512e4 802 mac_addr,
cdca8d02
RM
803 qdev->ndev->addr_len);
804
805exit:
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
807 return status;
808}
809
b0c2aadf 810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
c4e84bde
RM
811{
812 int i;
813 int status;
26351479 814 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7 815 u32 offset = 0;
b0c2aadf 816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
e78f5fa7
RM
817
818 /* Second function's parameters follow the first
819 * function's.
820 */
e4552f51 821 if (qdev->port)
b0c2aadf 822 offset = size;
c4e84bde
RM
823
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
825 return -ETIMEDOUT;
826
b0c2aadf 827 for (i = 0; i < size; i++, p++) {
e78f5fa7 828 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde 829 if (status) {
ae9540f7
JP
830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
c4e84bde
RM
832 goto exit;
833 }
834
835 }
b0c2aadf
RM
836
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
839 "8012");
840 if (status) {
ae9540f7 841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
b0c2aadf
RM
842 status = -EINVAL;
843 goto exit;
844 }
845
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
847 status = -EINVAL;
848 goto exit;
849 }
850
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
854
c4e84bde
RM
855exit:
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
857 return status;
858}
859
860/* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
863 */
864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
865{
866 int status;
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 if (status)
871 return status;
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
876 return status;
877}
878
879/* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
882 */
883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
884{
885 int status = 0;
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
889 if (status)
890 goto exit;
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
896 if (status)
897 goto exit;
898 /* get the data */
899 *data = ql_read32(qdev, XGMAC_DATA);
900exit:
901 return status;
902}
903
904/* This is used for reading the 64-bit statistics regs. */
905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
906{
907 int status = 0;
908 u32 hi = 0;
909 u32 lo = 0;
910
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
912 if (status)
913 goto exit;
914
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
916 if (status)
917 goto exit;
918
919 *data = (u64) lo | ((u64) hi << 32);
920
921exit:
922 return status;
923}
924
cdca8d02
RM
925static int ql_8000_port_initialize(struct ql_adapter *qdev)
926{
bcc2cb3b 927 int status;
cfec0cbc
RM
928 /*
929 * Get MPI firmware version for driver banner
930 * and ethool info.
931 */
932 status = ql_mb_about_fw(qdev);
933 if (status)
934 goto exit;
bcc2cb3b
RM
935 status = ql_mb_get_fw_state(qdev);
936 if (status)
937 goto exit;
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
940exit:
941 return status;
cdca8d02
RM
942}
943
c4e84bde
RM
944/* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
948 * later date.
949 */
b0c2aadf 950static int ql_8012_port_initialize(struct ql_adapter *qdev)
c4e84bde
RM
951{
952 int status = 0;
953 u32 data;
954
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
958 */
ae9540f7
JP
959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
c4e84bde
RM
961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 if (status) {
ae9540f7
JP
963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
c4e84bde
RM
965 }
966 return status;
967 }
968
ae9540f7 969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
c4e84bde
RM
970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
972 if (status)
973 goto end;
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
976 if (status)
977 goto end;
978
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
985 if (status)
986 goto end;
987
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
990 if (status)
991 goto end;
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
995 if (status)
996 goto end;
997
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000 if (status)
1001 goto end;
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005 if (status)
1006 goto end;
1007
1008 /* Turn on jumbo. */
1009 status =
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011 if (status)
1012 goto end;
1013 status =
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015 if (status)
1016 goto end;
1017
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022 return status;
1023}
1024
7c734359
RM
1025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
c4e84bde 1030/* Get the next large buffer. */
8668ae92 1031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1032{
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1038 return lbq_desc;
1039}
1040
7c734359
RM
1041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1043{
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
64b9b41d 1047 dma_unmap_addr(lbq_desc, mapaddr),
7c734359
RM
1048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1050
1051 /* If it's the last chunk of our master page then
1052 * we unmap it.
1053 */
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1060 return lbq_desc;
1061}
1062
c4e84bde 1063/* Get the next small buffer. */
8668ae92 1064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1065{
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1071 return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1082 }
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
7c734359
RM
1090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1092{
1093 if (!rx_ring->pg_chunk.page) {
1094 u64 map;
453f85d4 1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
7c734359
RM
1096 qdev->lbq_buf_order);
1097 if (unlikely(!rx_ring->pg_chunk.page)) {
ae9540f7
JP
1098 netif_err(qdev, drv, qdev->ndev,
1099 "page allocation failed.\n");
7c734359
RM
1100 return -ENOMEM;
1101 }
1102 rx_ring->pg_chunk.offset = 0;
1103 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 0, ql_lbq_block_size(qdev),
1105 PCI_DMA_FROMDEVICE);
1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order);
ef380794 1109 rx_ring->pg_chunk.page = NULL;
ae9540f7
JP
1110 netif_err(qdev, drv, qdev->ndev,
1111 "PCI mapping failed.\n");
7c734359
RM
1112 return -ENOMEM;
1113 }
1114 rx_ring->pg_chunk.map = map;
1115 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1116 }
1117
1118 /* Copy the current master pg_chunk info
1119 * to the current descriptor.
1120 */
1121 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1122
1123 /* Adjust the master page chunk for next
1124 * buffer get.
1125 */
1126 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1127 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1128 rx_ring->pg_chunk.page = NULL;
1129 lbq_desc->p.pg_chunk.last_flag = 1;
1130 } else {
1131 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1132 get_page(rx_ring->pg_chunk.page);
1133 lbq_desc->p.pg_chunk.last_flag = 0;
1134 }
1135 return 0;
1136}
c4e84bde
RM
1137/* Process (refill) a large buffer queue. */
1138static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1139{
49f2186d
RM
1140 u32 clean_idx = rx_ring->lbq_clean_idx;
1141 u32 start_idx = clean_idx;
c4e84bde 1142 struct bq_desc *lbq_desc;
c4e84bde
RM
1143 u64 map;
1144 int i;
1145
7c734359 1146 while (rx_ring->lbq_free_cnt > 32) {
81f25d96 1147 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
ae9540f7
JP
1148 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1149 "lbq: try cleaning clean_idx = %d.\n",
1150 clean_idx);
c4e84bde 1151 lbq_desc = &rx_ring->lbq[clean_idx];
7c734359 1152 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
81f25d96 1153 rx_ring->lbq_clean_idx = clean_idx;
ae9540f7 1154 netif_err(qdev, ifup, qdev->ndev,
81f25d96
JK
1155 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1156 i, clean_idx);
ae9540f7
JP
1157 return;
1158 }
7c734359
RM
1159
1160 map = lbq_desc->p.pg_chunk.map +
1161 lbq_desc->p.pg_chunk.offset;
64b9b41d
FT
1162 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1163 dma_unmap_len_set(lbq_desc, maplen,
7c734359 1164 rx_ring->lbq_buf_size);
2c9a0d41 1165 *lbq_desc->addr = cpu_to_le64(map);
7c734359
RM
1166
1167 pci_dma_sync_single_for_device(qdev->pdev, map,
1168 rx_ring->lbq_buf_size,
1169 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1170 clean_idx++;
1171 if (clean_idx == rx_ring->lbq_len)
1172 clean_idx = 0;
1173 }
1174
1175 rx_ring->lbq_clean_idx = clean_idx;
1176 rx_ring->lbq_prod_idx += 16;
1177 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1178 rx_ring->lbq_prod_idx = 0;
49f2186d
RM
1179 rx_ring->lbq_free_cnt -= 16;
1180 }
1181
1182 if (start_idx != clean_idx) {
ae9540f7
JP
1183 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1184 "lbq: updating prod idx = %d.\n",
1185 rx_ring->lbq_prod_idx);
c4e84bde
RM
1186 ql_write_db_reg(rx_ring->lbq_prod_idx,
1187 rx_ring->lbq_prod_idx_db_reg);
c4e84bde
RM
1188 }
1189}
1190
1191/* Process (refill) a small buffer queue. */
1192static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1193{
49f2186d
RM
1194 u32 clean_idx = rx_ring->sbq_clean_idx;
1195 u32 start_idx = clean_idx;
c4e84bde 1196 struct bq_desc *sbq_desc;
c4e84bde
RM
1197 u64 map;
1198 int i;
1199
1200 while (rx_ring->sbq_free_cnt > 16) {
81f25d96 1201 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
c4e84bde 1202 sbq_desc = &rx_ring->sbq[clean_idx];
ae9540f7
JP
1203 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1204 "sbq: try cleaning clean_idx = %d.\n",
1205 clean_idx);
c4e84bde 1206 if (sbq_desc->p.skb == NULL) {
ae9540f7
JP
1207 netif_printk(qdev, rx_status, KERN_DEBUG,
1208 qdev->ndev,
1209 "sbq: getting new skb for index %d.\n",
1210 sbq_desc->index);
c4e84bde
RM
1211 sbq_desc->p.skb =
1212 netdev_alloc_skb(qdev->ndev,
52e55f3c 1213 SMALL_BUFFER_SIZE);
c4e84bde 1214 if (sbq_desc->p.skb == NULL) {
c4e84bde
RM
1215 rx_ring->sbq_clean_idx = clean_idx;
1216 return;
1217 }
1218 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1219 map = pci_map_single(qdev->pdev,
1220 sbq_desc->p.skb->data,
52e55f3c
RM
1221 rx_ring->sbq_buf_size,
1222 PCI_DMA_FROMDEVICE);
c907a35a 1223 if (pci_dma_mapping_error(qdev->pdev, map)) {
ae9540f7
JP
1224 netif_err(qdev, ifup, qdev->ndev,
1225 "PCI mapping failed.\n");
c907a35a 1226 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
1227 dev_kfree_skb_any(sbq_desc->p.skb);
1228 sbq_desc->p.skb = NULL;
c907a35a
RM
1229 return;
1230 }
64b9b41d
FT
1231 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1232 dma_unmap_len_set(sbq_desc, maplen,
52e55f3c 1233 rx_ring->sbq_buf_size);
2c9a0d41 1234 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1235 }
1236
1237 clean_idx++;
1238 if (clean_idx == rx_ring->sbq_len)
1239 clean_idx = 0;
1240 }
1241 rx_ring->sbq_clean_idx = clean_idx;
1242 rx_ring->sbq_prod_idx += 16;
1243 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1244 rx_ring->sbq_prod_idx = 0;
49f2186d
RM
1245 rx_ring->sbq_free_cnt -= 16;
1246 }
1247
1248 if (start_idx != clean_idx) {
ae9540f7
JP
1249 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1250 "sbq: updating prod idx = %d.\n",
1251 rx_ring->sbq_prod_idx);
c4e84bde
RM
1252 ql_write_db_reg(rx_ring->sbq_prod_idx,
1253 rx_ring->sbq_prod_idx_db_reg);
c4e84bde
RM
1254 }
1255}
1256
1257static void ql_update_buffer_queues(struct ql_adapter *qdev,
1258 struct rx_ring *rx_ring)
1259{
1260 ql_update_sbq(qdev, rx_ring);
1261 ql_update_lbq(qdev, rx_ring);
1262}
1263
1264/* Unmaps tx buffers. Can be called from send() if a pci mapping
1265 * fails at some stage, or from the interrupt when a tx completes.
1266 */
1267static void ql_unmap_send(struct ql_adapter *qdev,
1268 struct tx_ring_desc *tx_ring_desc, int mapped)
1269{
1270 int i;
1271 for (i = 0; i < mapped; i++) {
1272 if (i == 0 || (i == 7 && mapped > 7)) {
1273 /*
1274 * Unmap the skb->data area, or the
1275 * external sglist (AKA the Outbound
1276 * Address List (OAL)).
1277 * If its the zeroeth element, then it's
1278 * the skb->data area. If it's the 7th
1279 * element and there is more than 6 frags,
1280 * then its an OAL.
1281 */
1282 if (i == 7) {
ae9540f7
JP
1283 netif_printk(qdev, tx_done, KERN_DEBUG,
1284 qdev->ndev,
1285 "unmapping OAL area.\n");
c4e84bde
RM
1286 }
1287 pci_unmap_single(qdev->pdev,
64b9b41d 1288 dma_unmap_addr(&tx_ring_desc->map[i],
c4e84bde 1289 mapaddr),
64b9b41d 1290 dma_unmap_len(&tx_ring_desc->map[i],
c4e84bde
RM
1291 maplen),
1292 PCI_DMA_TODEVICE);
1293 } else {
ae9540f7
JP
1294 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1295 "unmapping frag %d.\n", i);
c4e84bde 1296 pci_unmap_page(qdev->pdev,
64b9b41d 1297 dma_unmap_addr(&tx_ring_desc->map[i],
c4e84bde 1298 mapaddr),
64b9b41d 1299 dma_unmap_len(&tx_ring_desc->map[i],
c4e84bde
RM
1300 maplen), PCI_DMA_TODEVICE);
1301 }
1302 }
1303
1304}
1305
1306/* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308 */
1309static int ql_map_send(struct ql_adapter *qdev,
1310 struct ob_mac_iocb_req *mac_iocb_ptr,
1311 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312{
1313 int len = skb_headlen(skb);
1314 dma_addr_t map;
1315 int frag_idx, err, map_idx = 0;
1316 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319 if (frag_cnt) {
ae9540f7
JP
1320 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1321 "frag_cnt = %d.\n", frag_cnt);
c4e84bde
RM
1322 }
1323 /*
1324 * Map the skb buffer first.
1325 */
1326 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1327
1328 err = pci_dma_mapping_error(qdev->pdev, map);
1329 if (err) {
ae9540f7
JP
1330 netif_err(qdev, tx_queued, qdev->ndev,
1331 "PCI mapping failed with error: %d\n", err);
c4e84bde
RM
1332
1333 return NETDEV_TX_BUSY;
1334 }
1335
1336 tbd->len = cpu_to_le32(len);
1337 tbd->addr = cpu_to_le64(map);
64b9b41d
FT
1338 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
c4e84bde
RM
1340 map_idx++;
1341
1342 /*
1343 * This loop fills the remainder of the 8 address descriptors
1344 * in the IOCB. If there are more than 7 fragments, then the
1345 * eighth address desc will point to an external list (OAL).
1346 * When this happens, the remainder of the frags will be stored
1347 * in this list.
1348 */
1349 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1350 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1351 tbd++;
1352 if (frag_idx == 6 && frag_cnt > 7) {
1353 /* Let's tack on an sglist.
1354 * Our control block will now
1355 * look like this:
1356 * iocb->seg[0] = skb->data
1357 * iocb->seg[1] = frag[0]
1358 * iocb->seg[2] = frag[1]
1359 * iocb->seg[3] = frag[2]
1360 * iocb->seg[4] = frag[3]
1361 * iocb->seg[5] = frag[4]
1362 * iocb->seg[6] = frag[5]
1363 * iocb->seg[7] = ptr to OAL (external sglist)
1364 * oal->seg[0] = frag[6]
1365 * oal->seg[1] = frag[7]
1366 * oal->seg[2] = frag[8]
1367 * oal->seg[3] = frag[9]
1368 * oal->seg[4] = frag[10]
1369 * etc...
1370 */
1371 /* Tack on the OAL in the eighth segment of IOCB. */
1372 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1373 sizeof(struct oal),
1374 PCI_DMA_TODEVICE);
1375 err = pci_dma_mapping_error(qdev->pdev, map);
1376 if (err) {
ae9540f7
JP
1377 netif_err(qdev, tx_queued, qdev->ndev,
1378 "PCI mapping outbound address list with error: %d\n",
1379 err);
c4e84bde
RM
1380 goto map_error;
1381 }
1382
1383 tbd->addr = cpu_to_le64(map);
1384 /*
1385 * The length is the number of fragments
1386 * that remain to be mapped times the length
1387 * of our sglist (OAL).
1388 */
1389 tbd->len =
1390 cpu_to_le32((sizeof(struct tx_buf_desc) *
1391 (frag_cnt - frag_idx)) | TX_DESC_C);
64b9b41d 1392 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
c4e84bde 1393 map);
64b9b41d 1394 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
c4e84bde
RM
1395 sizeof(struct oal));
1396 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1397 map_idx++;
1398 }
1399
9e903e08 1400 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
5d6bcdfe 1401 DMA_TO_DEVICE);
c4e84bde 1402
5d6bcdfe 1403 err = dma_mapping_error(&qdev->pdev->dev, map);
c4e84bde 1404 if (err) {
ae9540f7
JP
1405 netif_err(qdev, tx_queued, qdev->ndev,
1406 "PCI mapping frags failed with error: %d.\n",
1407 err);
c4e84bde
RM
1408 goto map_error;
1409 }
1410
1411 tbd->addr = cpu_to_le64(map);
9e903e08 1412 tbd->len = cpu_to_le32(skb_frag_size(frag));
64b9b41d
FT
1413 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1414 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
9e903e08 1415 skb_frag_size(frag));
c4e84bde
RM
1416
1417 }
1418 /* Save the number of segments we've mapped. */
1419 tx_ring_desc->map_cnt = map_idx;
1420 /* Terminate the last segment. */
1421 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1422 return NETDEV_TX_OK;
1423
1424map_error:
1425 /*
1426 * If the first frag mapping failed, then i will be zero.
1427 * This causes the unmap of the skb->data area. Otherwise
1428 * we pass in the number of frags that mapped successfully
1429 * so they can be umapped.
1430 */
1431 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1432 return NETDEV_TX_BUSY;
1433}
1434
433c88e8 1435/* Categorizing receive firmware frame errors */
ae721f3a
SV
1436static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1437 struct rx_ring *rx_ring)
433c88e8
JK
1438{
1439 struct nic_stats *stats = &qdev->nic_stats;
1440
1441 stats->rx_err_count++;
ae721f3a 1442 rx_ring->rx_errors++;
433c88e8
JK
1443
1444 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1445 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1446 stats->rx_code_err++;
1447 break;
1448 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1449 stats->rx_oversize_err++;
1450 break;
1451 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1452 stats->rx_undersize_err++;
1453 break;
1454 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1455 stats->rx_preamble_err++;
1456 break;
1457 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1458 stats->rx_frame_len_err++;
1459 break;
1460 case IB_MAC_IOCB_RSP_ERR_CRC:
1461 stats->rx_crc_err++;
1462 default:
1463 break;
1464 }
1465}
1466
a45adbe8
JK
1467/**
1468 * ql_update_mac_hdr_len - helper routine to update the mac header length
1469 * based on vlan tags if present
1470 */
1471static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 void *page, size_t *len)
1474{
1475 u16 *tags;
1476
1477 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1478 return;
1479 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1480 tags = (u16 *)page;
1481 /* Look for stacked vlan tags in ethertype field */
1482 if (tags[6] == ETH_P_8021Q &&
1483 tags[8] == ETH_P_8021Q)
1484 *len += 2 * VLAN_HLEN;
1485 else
1486 *len += VLAN_HLEN;
1487 }
1488}
1489
63526713
RM
1490/* Process an inbound completion from an rx ring. */
1491static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1492 struct rx_ring *rx_ring,
1493 struct ib_mac_iocb_rsp *ib_mac_rsp,
1494 u32 length,
1495 u16 vlan_id)
1496{
1497 struct sk_buff *skb;
1498 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
63526713
RM
1499 struct napi_struct *napi = &rx_ring->napi;
1500
ae721f3a
SV
1501 /* Frame error, so drop the packet. */
1502 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1503 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1504 put_page(lbq_desc->p.pg_chunk.page);
1505 return;
1506 }
63526713
RM
1507 napi->dev = qdev->ndev;
1508
1509 skb = napi_get_frags(napi);
1510 if (!skb) {
ae9540f7
JP
1511 netif_err(qdev, drv, qdev->ndev,
1512 "Couldn't get an skb, exiting.\n");
63526713
RM
1513 rx_ring->rx_dropped++;
1514 put_page(lbq_desc->p.pg_chunk.page);
1515 return;
1516 }
1517 prefetch(lbq_desc->p.pg_chunk.va);
da7ebfd7
IC
1518 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1519 lbq_desc->p.pg_chunk.page,
1520 lbq_desc->p.pg_chunk.offset,
1521 length);
63526713
RM
1522
1523 skb->len += length;
1524 skb->data_len += length;
1525 skb->truesize += length;
1526 skb_shinfo(skb)->nr_frags++;
1527
1528 rx_ring->rx_packets++;
1529 rx_ring->rx_bytes += length;
1530 skb->ip_summed = CHECKSUM_UNNECESSARY;
1531 skb_record_rx_queue(skb, rx_ring->cq_id);
18c49b91 1532 if (vlan_id != 0xffff)
86a9bad3 1533 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
18c49b91 1534 napi_gro_frags(napi);
63526713
RM
1535}
1536
4f848c0a
RM
1537/* Process an inbound completion from an rx ring. */
1538static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1539 struct rx_ring *rx_ring,
1540 struct ib_mac_iocb_rsp *ib_mac_rsp,
1541 u32 length,
1542 u16 vlan_id)
1543{
1544 struct net_device *ndev = qdev->ndev;
1545 struct sk_buff *skb = NULL;
1546 void *addr;
1547 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1548 struct napi_struct *napi = &rx_ring->napi;
a45adbe8 1549 size_t hlen = ETH_HLEN;
4f848c0a
RM
1550
1551 skb = netdev_alloc_skb(ndev, length);
1552 if (!skb) {
4f848c0a
RM
1553 rx_ring->rx_dropped++;
1554 put_page(lbq_desc->p.pg_chunk.page);
1555 return;
1556 }
1557
1558 addr = lbq_desc->p.pg_chunk.va;
1559 prefetch(addr);
1560
ae721f3a
SV
1561 /* Frame error, so drop the packet. */
1562 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1563 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1564 goto err_out;
1565 }
1566
a45adbe8
JK
1567 /* Update the MAC header length*/
1568 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1569
4f848c0a
RM
1570 /* The max framesize filter on this chip is set higher than
1571 * MTU since FCoE uses 2k frames.
1572 */
a45adbe8 1573 if (skb->len > ndev->mtu + hlen) {
ae9540f7
JP
1574 netif_err(qdev, drv, qdev->ndev,
1575 "Segment too small, dropping.\n");
4f848c0a
RM
1576 rx_ring->rx_dropped++;
1577 goto err_out;
1578 }
59ae1d12 1579 skb_put_data(skb, addr, hlen);
ae9540f7
JP
1580 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1581 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1582 length);
4f848c0a 1583 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
a45adbe8
JK
1584 lbq_desc->p.pg_chunk.offset + hlen,
1585 length - hlen);
1586 skb->len += length - hlen;
1587 skb->data_len += length - hlen;
1588 skb->truesize += length - hlen;
4f848c0a
RM
1589
1590 rx_ring->rx_packets++;
1591 rx_ring->rx_bytes += skb->len;
1592 skb->protocol = eth_type_trans(skb, ndev);
bc8acf2c 1593 skb_checksum_none_assert(skb);
4f848c0a 1594
88230fd5 1595 if ((ndev->features & NETIF_F_RXCSUM) &&
4f848c0a
RM
1596 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1597 /* TCP frame. */
1598 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
ae9540f7
JP
1599 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1600 "TCP checksum done!\n");
4f848c0a
RM
1601 skb->ip_summed = CHECKSUM_UNNECESSARY;
1602 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1603 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1604 /* Unfragmented ipv4 UDP frame. */
e02ef331 1605 struct iphdr *iph =
a45adbe8 1606 (struct iphdr *)((u8 *)addr + hlen);
4f848c0a 1607 if (!(iph->frag_off &
0d653ed8 1608 htons(IP_MF|IP_OFFSET))) {
4f848c0a 1609 skb->ip_summed = CHECKSUM_UNNECESSARY;
ae9540f7
JP
1610 netif_printk(qdev, rx_status, KERN_DEBUG,
1611 qdev->ndev,
e02ef331 1612 "UDP checksum done!\n");
4f848c0a
RM
1613 }
1614 }
1615 }
1616
1617 skb_record_rx_queue(skb, rx_ring->cq_id);
18c49b91 1618 if (vlan_id != 0xffff)
86a9bad3 1619 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
18c49b91
JP
1620 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1621 napi_gro_receive(napi, skb);
1622 else
1623 netif_receive_skb(skb);
4f848c0a
RM
1624 return;
1625err_out:
1626 dev_kfree_skb_any(skb);
1627 put_page(lbq_desc->p.pg_chunk.page);
1628}
1629
1630/* Process an inbound completion from an rx ring. */
1631static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1632 struct rx_ring *rx_ring,
1633 struct ib_mac_iocb_rsp *ib_mac_rsp,
1634 u32 length,
1635 u16 vlan_id)
1636{
1637 struct net_device *ndev = qdev->ndev;
1638 struct sk_buff *skb = NULL;
1639 struct sk_buff *new_skb = NULL;
1640 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1641
1642 skb = sbq_desc->p.skb;
1643 /* Allocate new_skb and copy */
1644 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1645 if (new_skb == NULL) {
4f848c0a
RM
1646 rx_ring->rx_dropped++;
1647 return;
1648 }
1649 skb_reserve(new_skb, NET_IP_ALIGN);
2c9a266a
MC
1650
1651 pci_dma_sync_single_for_cpu(qdev->pdev,
1652 dma_unmap_addr(sbq_desc, mapaddr),
1653 dma_unmap_len(sbq_desc, maplen),
1654 PCI_DMA_FROMDEVICE);
1655
59ae1d12 1656 skb_put_data(new_skb, skb->data, length);
2c9a266a
MC
1657
1658 pci_dma_sync_single_for_device(qdev->pdev,
1659 dma_unmap_addr(sbq_desc, mapaddr),
1660 dma_unmap_len(sbq_desc, maplen),
1661 PCI_DMA_FROMDEVICE);
4f848c0a
RM
1662 skb = new_skb;
1663
ae721f3a
SV
1664 /* Frame error, so drop the packet. */
1665 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1666 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1667 dev_kfree_skb_any(skb);
1668 return;
1669 }
1670
4f848c0a
RM
1671 /* loopback self test for ethtool */
1672 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1673 ql_check_lb_frame(qdev, skb);
1674 dev_kfree_skb_any(skb);
1675 return;
1676 }
1677
1678 /* The max framesize filter on this chip is set higher than
1679 * MTU since FCoE uses 2k frames.
1680 */
1681 if (skb->len > ndev->mtu + ETH_HLEN) {
1682 dev_kfree_skb_any(skb);
1683 rx_ring->rx_dropped++;
1684 return;
1685 }
1686
1687 prefetch(skb->data);
4f848c0a 1688 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
ae9540f7
JP
1689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 "%s Multicast.\n",
1691 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1692 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1693 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1694 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1695 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1696 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
4f848c0a
RM
1697 }
1698 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
ae9540f7
JP
1699 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1700 "Promiscuous Packet.\n");
4f848c0a
RM
1701
1702 rx_ring->rx_packets++;
1703 rx_ring->rx_bytes += skb->len;
1704 skb->protocol = eth_type_trans(skb, ndev);
bc8acf2c 1705 skb_checksum_none_assert(skb);
4f848c0a
RM
1706
1707 /* If rx checksum is on, and there are no
1708 * csum or frame errors.
1709 */
88230fd5 1710 if ((ndev->features & NETIF_F_RXCSUM) &&
4f848c0a
RM
1711 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1712 /* TCP frame. */
1713 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
ae9540f7
JP
1714 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715 "TCP checksum done!\n");
4f848c0a
RM
1716 skb->ip_summed = CHECKSUM_UNNECESSARY;
1717 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1718 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1719 /* Unfragmented ipv4 UDP frame. */
1720 struct iphdr *iph = (struct iphdr *) skb->data;
1721 if (!(iph->frag_off &
0d653ed8 1722 htons(IP_MF|IP_OFFSET))) {
4f848c0a 1723 skb->ip_summed = CHECKSUM_UNNECESSARY;
ae9540f7
JP
1724 netif_printk(qdev, rx_status, KERN_DEBUG,
1725 qdev->ndev,
e02ef331 1726 "UDP checksum done!\n");
4f848c0a
RM
1727 }
1728 }
1729 }
1730
1731 skb_record_rx_queue(skb, rx_ring->cq_id);
18c49b91 1732 if (vlan_id != 0xffff)
86a9bad3 1733 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
18c49b91
JP
1734 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1735 napi_gro_receive(&rx_ring->napi, skb);
1736 else
1737 netif_receive_skb(skb);
4f848c0a
RM
1738}
1739
8668ae92 1740static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1741{
1742 void *temp_addr = skb->data;
1743
1744 /* Undo the skb_reserve(skb,32) we did before
1745 * giving to hardware, and realign data on
1746 * a 2-byte boundary.
1747 */
1748 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1749 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
cfabb177 1750 memmove(skb->data, temp_addr, len);
c4e84bde
RM
1751}
1752
1753/*
1754 * This function builds an skb for the given inbound
1755 * completion. It will be rewritten for readability in the near
1756 * future, but for not it works well.
1757 */
1758static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1759 struct rx_ring *rx_ring,
1760 struct ib_mac_iocb_rsp *ib_mac_rsp)
1761{
1762 struct bq_desc *lbq_desc;
1763 struct bq_desc *sbq_desc;
1764 struct sk_buff *skb = NULL;
1765 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
a45adbe8
JK
1766 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1767 size_t hlen = ETH_HLEN;
c4e84bde
RM
1768
1769 /*
1770 * Handle the header buffer if present.
1771 */
1772 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1773 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
ae9540f7
JP
1774 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1775 "Header of %d bytes in small buffer.\n", hdr_len);
c4e84bde
RM
1776 /*
1777 * Headers fit nicely into a small buffer.
1778 */
1779 sbq_desc = ql_get_curr_sbuf(rx_ring);
1780 pci_unmap_single(qdev->pdev,
64b9b41d
FT
1781 dma_unmap_addr(sbq_desc, mapaddr),
1782 dma_unmap_len(sbq_desc, maplen),
c4e84bde
RM
1783 PCI_DMA_FROMDEVICE);
1784 skb = sbq_desc->p.skb;
1785 ql_realign_skb(skb, hdr_len);
1786 skb_put(skb, hdr_len);
1787 sbq_desc->p.skb = NULL;
1788 }
1789
1790 /*
1791 * Handle the data buffer(s).
1792 */
1793 if (unlikely(!length)) { /* Is there data too? */
ae9540f7
JP
1794 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1795 "No Data buffer in this packet.\n");
c4e84bde
RM
1796 return skb;
1797 }
1798
1799 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1800 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
ae9540f7
JP
1801 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1802 "Headers in small, data of %d bytes in small, combine them.\n",
1803 length);
c4e84bde
RM
1804 /*
1805 * Data is less than small buffer size so it's
1806 * stuffed in a small buffer.
1807 * For this case we append the data
1808 * from the "data" small buffer to the "header" small
1809 * buffer.
1810 */
1811 sbq_desc = ql_get_curr_sbuf(rx_ring);
1812 pci_dma_sync_single_for_cpu(qdev->pdev,
64b9b41d 1813 dma_unmap_addr
c4e84bde 1814 (sbq_desc, mapaddr),
64b9b41d 1815 dma_unmap_len
c4e84bde
RM
1816 (sbq_desc, maplen),
1817 PCI_DMA_FROMDEVICE);
59ae1d12 1818 skb_put_data(skb, sbq_desc->p.skb->data, length);
c4e84bde 1819 pci_dma_sync_single_for_device(qdev->pdev,
64b9b41d 1820 dma_unmap_addr
c4e84bde
RM
1821 (sbq_desc,
1822 mapaddr),
64b9b41d 1823 dma_unmap_len
c4e84bde
RM
1824 (sbq_desc,
1825 maplen),
1826 PCI_DMA_FROMDEVICE);
1827 } else {
ae9540f7
JP
1828 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1829 "%d bytes in a single small buffer.\n",
1830 length);
c4e84bde
RM
1831 sbq_desc = ql_get_curr_sbuf(rx_ring);
1832 skb = sbq_desc->p.skb;
1833 ql_realign_skb(skb, length);
1834 skb_put(skb, length);
1835 pci_unmap_single(qdev->pdev,
64b9b41d 1836 dma_unmap_addr(sbq_desc,
c4e84bde 1837 mapaddr),
64b9b41d 1838 dma_unmap_len(sbq_desc,
c4e84bde
RM
1839 maplen),
1840 PCI_DMA_FROMDEVICE);
1841 sbq_desc->p.skb = NULL;
1842 }
1843 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1844 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
ae9540f7
JP
1845 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846 "Header in small, %d bytes in large. Chain large to small!\n",
1847 length);
c4e84bde
RM
1848 /*
1849 * The data is in a single large buffer. We
1850 * chain it to the header buffer's skb and let
1851 * it rip.
1852 */
7c734359 1853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
ae9540f7
JP
1854 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1855 "Chaining page at offset = %d, for %d bytes to skb.\n",
1856 lbq_desc->p.pg_chunk.offset, length);
7c734359
RM
1857 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1858 lbq_desc->p.pg_chunk.offset,
1859 length);
c4e84bde
RM
1860 skb->len += length;
1861 skb->data_len += length;
1862 skb->truesize += length;
c4e84bde
RM
1863 } else {
1864 /*
1865 * The headers and data are in a single large buffer. We
1866 * copy it to a new skb and let it go. This can happen with
1867 * jumbo mtu on a non-TCP/UDP frame.
1868 */
7c734359 1869 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
c4e84bde
RM
1870 skb = netdev_alloc_skb(qdev->ndev, length);
1871 if (skb == NULL) {
ae9540f7
JP
1872 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1873 "No skb available, drop the packet.\n");
c4e84bde
RM
1874 return NULL;
1875 }
4055c7d4 1876 pci_unmap_page(qdev->pdev,
64b9b41d 1877 dma_unmap_addr(lbq_desc,
4055c7d4 1878 mapaddr),
64b9b41d 1879 dma_unmap_len(lbq_desc, maplen),
4055c7d4 1880 PCI_DMA_FROMDEVICE);
c4e84bde 1881 skb_reserve(skb, NET_IP_ALIGN);
ae9540f7
JP
1882 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1883 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1884 length);
7c734359
RM
1885 skb_fill_page_desc(skb, 0,
1886 lbq_desc->p.pg_chunk.page,
1887 lbq_desc->p.pg_chunk.offset,
1888 length);
c4e84bde
RM
1889 skb->len += length;
1890 skb->data_len += length;
1891 skb->truesize += length;
a45adbe8
JK
1892 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1893 lbq_desc->p.pg_chunk.va,
1894 &hlen);
1895 __pskb_pull_tail(skb, hlen);
c4e84bde
RM
1896 }
1897 } else {
1898 /*
1899 * The data is in a chain of large buffers
1900 * pointed to by a small buffer. We loop
1901 * thru and chain them to the our small header
1902 * buffer's skb.
1903 * frags: There are 18 max frags and our small
1904 * buffer will hold 32 of them. The thing is,
1905 * we'll use 3 max for our 9000 byte jumbo
1906 * frames. If the MTU goes up we could
1907 * eventually be in trouble.
1908 */
7c734359 1909 int size, i = 0;
c4e84bde
RM
1910 sbq_desc = ql_get_curr_sbuf(rx_ring);
1911 pci_unmap_single(qdev->pdev,
64b9b41d
FT
1912 dma_unmap_addr(sbq_desc, mapaddr),
1913 dma_unmap_len(sbq_desc, maplen),
c4e84bde
RM
1914 PCI_DMA_FROMDEVICE);
1915 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1916 /*
1917 * This is an non TCP/UDP IP frame, so
1918 * the headers aren't split into a small
1919 * buffer. We have to use the small buffer
1920 * that contains our sg list as our skb to
1921 * send upstairs. Copy the sg list here to
1922 * a local buffer and use it to find the
1923 * pages to chain.
1924 */
ae9540f7
JP
1925 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1926 "%d bytes of headers & data in chain of large.\n",
1927 length);
c4e84bde 1928 skb = sbq_desc->p.skb;
c4e84bde
RM
1929 sbq_desc->p.skb = NULL;
1930 skb_reserve(skb, NET_IP_ALIGN);
c4e84bde 1931 }
afe6e00c 1932 do {
7c734359
RM
1933 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1934 size = (length < rx_ring->lbq_buf_size) ? length :
1935 rx_ring->lbq_buf_size;
c4e84bde 1936
ae9540f7
JP
1937 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 "Adding page %d to skb for %d bytes.\n",
1939 i, size);
7c734359
RM
1940 skb_fill_page_desc(skb, i,
1941 lbq_desc->p.pg_chunk.page,
1942 lbq_desc->p.pg_chunk.offset,
1943 size);
c4e84bde
RM
1944 skb->len += size;
1945 skb->data_len += size;
1946 skb->truesize += size;
1947 length -= size;
c4e84bde 1948 i++;
afe6e00c 1949 } while (length > 0);
a45adbe8
JK
1950 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1951 &hlen);
1952 __pskb_pull_tail(skb, hlen);
c4e84bde
RM
1953 }
1954 return skb;
1955}
1956
1957/* Process an inbound completion from an rx ring. */
4f848c0a 1958static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
c4e84bde 1959 struct rx_ring *rx_ring,
4f848c0a
RM
1960 struct ib_mac_iocb_rsp *ib_mac_rsp,
1961 u16 vlan_id)
c4e84bde
RM
1962{
1963 struct net_device *ndev = qdev->ndev;
1964 struct sk_buff *skb = NULL;
1965
1966 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1967
1968 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1969 if (unlikely(!skb)) {
ae9540f7
JP
1970 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1971 "No skb available, drop packet.\n");
885ee398 1972 rx_ring->rx_dropped++;
c4e84bde
RM
1973 return;
1974 }
1975
ae721f3a
SV
1976 /* Frame error, so drop the packet. */
1977 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1978 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1979 dev_kfree_skb_any(skb);
1980 return;
1981 }
1982
ec33a491
RM
1983 /* The max framesize filter on this chip is set higher than
1984 * MTU since FCoE uses 2k frames.
1985 */
1986 if (skb->len > ndev->mtu + ETH_HLEN) {
1987 dev_kfree_skb_any(skb);
885ee398 1988 rx_ring->rx_dropped++;
ec33a491
RM
1989 return;
1990 }
1991
9dfbbaa6
RM
1992 /* loopback self test for ethtool */
1993 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1994 ql_check_lb_frame(qdev, skb);
1995 dev_kfree_skb_any(skb);
1996 return;
1997 }
1998
c4e84bde 1999 prefetch(skb->data);
c4e84bde 2000 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
ae9540f7
JP
2001 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2002 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2003 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2004 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2005 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2006 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2007 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
885ee398 2008 rx_ring->rx_multicast++;
c4e84bde
RM
2009 }
2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
ae9540f7
JP
2011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 "Promiscuous Packet.\n");
c4e84bde 2013 }
d555f592 2014
d555f592 2015 skb->protocol = eth_type_trans(skb, ndev);
bc8acf2c 2016 skb_checksum_none_assert(skb);
d555f592
RM
2017
2018 /* If rx checksum is on, and there are no
2019 * csum or frame errors.
2020 */
88230fd5 2021 if ((ndev->features & NETIF_F_RXCSUM) &&
d555f592
RM
2022 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2023 /* TCP frame. */
2024 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
ae9540f7
JP
2025 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2026 "TCP checksum done!\n");
d555f592
RM
2027 skb->ip_summed = CHECKSUM_UNNECESSARY;
2028 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2029 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2030 /* Unfragmented ipv4 UDP frame. */
2031 struct iphdr *iph = (struct iphdr *) skb->data;
2032 if (!(iph->frag_off &
0d653ed8 2033 htons(IP_MF|IP_OFFSET))) {
d555f592 2034 skb->ip_summed = CHECKSUM_UNNECESSARY;
ae9540f7
JP
2035 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2036 "TCP checksum done!\n");
d555f592
RM
2037 }
2038 }
c4e84bde 2039 }
d555f592 2040
885ee398
RM
2041 rx_ring->rx_packets++;
2042 rx_ring->rx_bytes += skb->len;
b2014ff8 2043 skb_record_rx_queue(skb, rx_ring->cq_id);
a45adbe8 2044 if (vlan_id != 0xffff)
86a9bad3 2045 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
18c49b91
JP
2046 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2047 napi_gro_receive(&rx_ring->napi, skb);
2048 else
2049 netif_receive_skb(skb);
c4e84bde
RM
2050}
2051
4f848c0a
RM
2052/* Process an inbound completion from an rx ring. */
2053static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2054 struct rx_ring *rx_ring,
2055 struct ib_mac_iocb_rsp *ib_mac_rsp)
2056{
2057 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
a45adbe8
JK
2058 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2059 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
4f848c0a
RM
2060 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2061 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2062
2063 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2064
2065 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2066 /* The data and headers are split into
2067 * separate buffers.
2068 */
2069 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2070 vlan_id);
2071 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2072 /* The data fit in a single small buffer.
2073 * Allocate a new skb, copy the data and
2074 * return the buffer to the free pool.
2075 */
2076 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2077 length, vlan_id);
63526713
RM
2078 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2079 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2080 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2081 /* TCP packet in a page chunk that's been checksummed.
2082 * Tack it on to our GRO skb and let it go.
2083 */
2084 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2085 length, vlan_id);
4f848c0a
RM
2086 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2087 /* Non-TCP packet in a page chunk. Allocate an
2088 * skb, tack it on frags, and send it up.
2089 */
2090 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2091 length, vlan_id);
2092 } else {
c0c56955
RM
2093 /* Non-TCP/UDP large frames that span multiple buffers
2094 * can be processed corrrectly by the split frame logic.
2095 */
2096 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2097 vlan_id);
4f848c0a
RM
2098 }
2099
2100 return (unsigned long)length;
2101}
2102
c4e84bde
RM
2103/* Process an outbound completion from an rx ring. */
2104static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2105 struct ob_mac_iocb_rsp *mac_rsp)
2106{
2107 struct tx_ring *tx_ring;
2108 struct tx_ring_desc *tx_ring_desc;
2109
2110 QL_DUMP_OB_MAC_RSP(mac_rsp);
2111 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2112 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2113 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
885ee398
RM
2114 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2115 tx_ring->tx_packets++;
c4e84bde
RM
2116 dev_kfree_skb(tx_ring_desc->skb);
2117 tx_ring_desc->skb = NULL;
2118
2119 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2120 OB_MAC_IOCB_RSP_S |
2121 OB_MAC_IOCB_RSP_L |
2122 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2123 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
ae9540f7
JP
2124 netif_warn(qdev, tx_done, qdev->ndev,
2125 "Total descriptor length did not match transfer length.\n");
c4e84bde
RM
2126 }
2127 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
ae9540f7
JP
2128 netif_warn(qdev, tx_done, qdev->ndev,
2129 "Frame too short to be valid, not sent.\n");
c4e84bde
RM
2130 }
2131 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
ae9540f7
JP
2132 netif_warn(qdev, tx_done, qdev->ndev,
2133 "Frame too long, but sent anyway.\n");
c4e84bde
RM
2134 }
2135 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
ae9540f7
JP
2136 netif_warn(qdev, tx_done, qdev->ndev,
2137 "PCI backplane error. Frame not sent.\n");
c4e84bde
RM
2138 }
2139 }
2140 atomic_inc(&tx_ring->tx_count);
2141}
2142
2143/* Fire up a handler to reset the MPI processor. */
2144void ql_queue_fw_error(struct ql_adapter *qdev)
2145{
6a473308 2146 ql_link_off(qdev);
c4e84bde
RM
2147 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2148}
2149
2150void ql_queue_asic_error(struct ql_adapter *qdev)
2151{
6a473308 2152 ql_link_off(qdev);
c4e84bde 2153 ql_disable_interrupts(qdev);
6497b607
RM
2154 /* Clear adapter up bit to signal the recovery
2155 * process that it shouldn't kill the reset worker
2156 * thread
2157 */
2158 clear_bit(QL_ADAPTER_UP, &qdev->flags);
da92b393
JK
2159 /* Set asic recovery bit to indicate reset process that we are
2160 * in fatal error recovery process rather than normal close
2161 */
2162 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
c4e84bde
RM
2163 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2164}
2165
2166static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2167 struct ib_ae_iocb_rsp *ib_ae_rsp)
2168{
2169 switch (ib_ae_rsp->event) {
2170 case MGMT_ERR_EVENT:
ae9540f7
JP
2171 netif_err(qdev, rx_err, qdev->ndev,
2172 "Management Processor Fatal Error.\n");
c4e84bde
RM
2173 ql_queue_fw_error(qdev);
2174 return;
2175
2176 case CAM_LOOKUP_ERR_EVENT:
5069ee55
JK
2177 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2178 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
c4e84bde
RM
2179 ql_queue_asic_error(qdev);
2180 return;
2181
2182 case SOFT_ECC_ERROR_EVENT:
5069ee55 2183 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
c4e84bde
RM
2184 ql_queue_asic_error(qdev);
2185 break;
2186
2187 case PCI_ERR_ANON_BUF_RD:
5069ee55
JK
2188 netdev_err(qdev->ndev, "PCI error occurred when reading "
2189 "anonymous buffers from rx_ring %d.\n",
2190 ib_ae_rsp->q_id);
c4e84bde
RM
2191 ql_queue_asic_error(qdev);
2192 break;
2193
2194 default:
ae9540f7
JP
2195 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2196 ib_ae_rsp->event);
c4e84bde
RM
2197 ql_queue_asic_error(qdev);
2198 break;
2199 }
2200}
2201
2202static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2203{
2204 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 2205 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2206 struct ob_mac_iocb_rsp *net_rsp = NULL;
2207 int count = 0;
2208
1e213303 2209 struct tx_ring *tx_ring;
c4e84bde
RM
2210 /* While there are entries in the completion queue. */
2211 while (prod != rx_ring->cnsmr_idx) {
2212
ae9540f7 2213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
d602de8e 2214 "cq_id = %d, prod = %d, cnsmr = %d\n",
ae9540f7 2215 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
c4e84bde
RM
2216
2217 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2218 rmb();
2219 switch (net_rsp->opcode) {
2220
2221 case OPCODE_OB_MAC_TSO_IOCB:
2222 case OPCODE_OB_MAC_IOCB:
2223 ql_process_mac_tx_intr(qdev, net_rsp);
2224 break;
2225 default:
ae9540f7
JP
2226 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2227 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2228 net_rsp->opcode);
c4e84bde
RM
2229 }
2230 count++;
2231 ql_update_cq(rx_ring);
ba7cd3ba 2232 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde 2233 }
4da79504
DC
2234 if (!net_rsp)
2235 return 0;
c4e84bde 2236 ql_write_cq_idx(rx_ring);
1e213303 2237 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
4da79504 2238 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
d0de7309 2239 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
c4e84bde
RM
2240 /*
2241 * The queue got stopped because the tx_ring was full.
2242 * Wake it up, because it's now at least 25% empty.
2243 */
1e213303 2244 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
c4e84bde
RM
2245 }
2246
2247 return count;
2248}
2249
2250static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2251{
2252 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 2253 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2254 struct ql_net_rsp_iocb *net_rsp;
2255 int count = 0;
2256
2257 /* While there are entries in the completion queue. */
2258 while (prod != rx_ring->cnsmr_idx) {
2259
ae9540f7 2260 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
d602de8e 2261 "cq_id = %d, prod = %d, cnsmr = %d\n",
ae9540f7 2262 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
c4e84bde
RM
2263
2264 net_rsp = rx_ring->curr_entry;
2265 rmb();
2266 switch (net_rsp->opcode) {
2267 case OPCODE_IB_MAC_IOCB:
2268 ql_process_mac_rx_intr(qdev, rx_ring,
2269 (struct ib_mac_iocb_rsp *)
2270 net_rsp);
2271 break;
2272
2273 case OPCODE_IB_AE_IOCB:
2274 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2275 net_rsp);
2276 break;
2277 default:
ae9540f7
JP
2278 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2279 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2280 net_rsp->opcode);
2281 break;
c4e84bde
RM
2282 }
2283 count++;
2284 ql_update_cq(rx_ring);
ba7cd3ba 2285 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2286 if (count == budget)
2287 break;
2288 }
2289 ql_update_buffer_queues(qdev, rx_ring);
2290 ql_write_cq_idx(rx_ring);
2291 return count;
2292}
2293
2294static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2295{
2296 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2297 struct ql_adapter *qdev = rx_ring->qdev;
39aa8165
RM
2298 struct rx_ring *trx_ring;
2299 int i, work_done = 0;
2300 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
c4e84bde 2301
ae9540f7
JP
2302 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2303 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
c4e84bde 2304
39aa8165
RM
2305 /* Service the TX rings first. They start
2306 * right after the RSS rings. */
2307 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2308 trx_ring = &qdev->rx_ring[i];
2309 /* If this TX completion ring belongs to this vector and
2310 * it's not empty then service it.
2311 */
2312 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2313 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2314 trx_ring->cnsmr_idx)) {
ae9540f7
JP
2315 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2316 "%s: Servicing TX completion ring %d.\n",
2317 __func__, trx_ring->cq_id);
39aa8165
RM
2318 ql_clean_outbound_rx_ring(trx_ring);
2319 }
2320 }
2321
2322 /*
2323 * Now service the RSS ring if it's active.
2324 */
2325 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2326 rx_ring->cnsmr_idx) {
ae9540f7
JP
2327 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2328 "%s: Servicing RX completion ring %d.\n",
2329 __func__, rx_ring->cq_id);
39aa8165
RM
2330 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2331 }
2332
c4e84bde 2333 if (work_done < budget) {
6ad20165 2334 napi_complete_done(napi, work_done);
c4e84bde
RM
2335 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2336 }
2337 return work_done;
2338}
2339
c8f44aff 2340static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
c4e84bde
RM
2341{
2342 struct ql_adapter *qdev = netdev_priv(ndev);
2343
f646968f 2344 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
c4e84bde 2345 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
18c49b91 2346 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
c4e84bde 2347 } else {
c4e84bde
RM
2348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349 }
2350}
2351
a45adbe8
JK
2352/**
2353 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2354 * based on the features to enable/disable hardware vlan accel
2355 */
2356static int qlge_update_hw_vlan_features(struct net_device *ndev,
2357 netdev_features_t features)
2358{
2359 struct ql_adapter *qdev = netdev_priv(ndev);
2360 int status = 0;
61132bf7 2361 bool need_restart = netif_running(ndev);
a45adbe8 2362
61132bf7
ML
2363 if (need_restart) {
2364 status = ql_adapter_down(qdev);
2365 if (status) {
2366 netif_err(qdev, link, qdev->ndev,
2367 "Failed to bring down the adapter\n");
2368 return status;
2369 }
a45adbe8
JK
2370 }
2371
2372 /* update the features with resent change */
2373 ndev->features = features;
2374
61132bf7
ML
2375 if (need_restart) {
2376 status = ql_adapter_up(qdev);
2377 if (status) {
2378 netif_err(qdev, link, qdev->ndev,
2379 "Failed to bring up the adapter\n");
2380 return status;
2381 }
a45adbe8 2382 }
61132bf7 2383
a45adbe8
JK
2384 return status;
2385}
2386
c8f44aff
MM
2387static netdev_features_t qlge_fix_features(struct net_device *ndev,
2388 netdev_features_t features)
18c49b91 2389{
a45adbe8 2390 int err;
18c49b91 2391
a45adbe8
JK
2392 /* Update the behavior of vlan accel in the adapter */
2393 err = qlge_update_hw_vlan_features(ndev, features);
2394 if (err)
2395 return err;
2396
18c49b91
JP
2397 return features;
2398}
2399
c8f44aff
MM
2400static int qlge_set_features(struct net_device *ndev,
2401 netdev_features_t features)
18c49b91 2402{
c8f44aff 2403 netdev_features_t changed = ndev->features ^ features;
18c49b91 2404
f646968f 2405 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
18c49b91
JP
2406 qlge_vlan_mode(ndev, features);
2407
2408 return 0;
2409}
2410
8e586137 2411static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
c4e84bde 2412{
c4e84bde 2413 u32 enable_bit = MAC_ADDR_E;
8e586137 2414 int err;
c4e84bde 2415
8e586137
JP
2416 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2417 MAC_ADDR_TYPE_VLAN, vid);
2418 if (err)
ae9540f7
JP
2419 netif_err(qdev, ifup, qdev->ndev,
2420 "Failed to init vlan address.\n");
8e586137 2421 return err;
c4e84bde
RM
2422}
2423
80d5c368 2424static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
c4e84bde
RM
2425{
2426 struct ql_adapter *qdev = netdev_priv(ndev);
cc288f54 2427 int status;
8e586137 2428 int err;
cc288f54
RM
2429
2430 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2431 if (status)
8e586137 2432 return status;
c4e84bde 2433
8e586137 2434 err = __qlge_vlan_rx_add_vid(qdev, vid);
18c49b91
JP
2435 set_bit(vid, qdev->active_vlans);
2436
2437 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
8e586137
JP
2438
2439 return err;
18c49b91
JP
2440}
2441
8e586137 2442static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
18c49b91
JP
2443{
2444 u32 enable_bit = 0;
8e586137 2445 int err;
18c49b91 2446
8e586137
JP
2447 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2448 MAC_ADDR_TYPE_VLAN, vid);
2449 if (err)
ae9540f7
JP
2450 netif_err(qdev, ifup, qdev->ndev,
2451 "Failed to clear vlan address.\n");
8e586137 2452 return err;
18c49b91
JP
2453}
2454
80d5c368 2455static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
18c49b91
JP
2456{
2457 struct ql_adapter *qdev = netdev_priv(ndev);
2458 int status;
8e586137 2459 int err;
c4e84bde 2460
18c49b91
JP
2461 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2462 if (status)
8e586137 2463 return status;
18c49b91 2464
8e586137 2465 err = __qlge_vlan_rx_kill_vid(qdev, vid);
18c49b91
JP
2466 clear_bit(vid, qdev->active_vlans);
2467
2468 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
8e586137
JP
2469
2470 return err;
c4e84bde
RM
2471}
2472
c1b60092
RM
2473static void qlge_restore_vlan(struct ql_adapter *qdev)
2474{
18c49b91
JP
2475 int status;
2476 u16 vid;
2477
2478 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2479 if (status)
2480 return;
2481
2482 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2483 __qlge_vlan_rx_add_vid(qdev, vid);
2484
2485 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c1b60092
RM
2486}
2487
c4e84bde
RM
2488/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2489static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2490{
2491 struct rx_ring *rx_ring = dev_id;
288379f0 2492 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2493 return IRQ_HANDLED;
2494}
2495
c4e84bde
RM
2496/* This handles a fatal error, MPI activity, and the default
2497 * rx_ring in an MSI-X multiple vector environment.
2498 * In MSI/Legacy environment it also process the rest of
2499 * the rx_rings.
2500 */
2501static irqreturn_t qlge_isr(int irq, void *dev_id)
2502{
2503 struct rx_ring *rx_ring = dev_id;
2504 struct ql_adapter *qdev = rx_ring->qdev;
2505 struct intr_context *intr_context = &qdev->intr_context[0];
2506 u32 var;
c4e84bde
RM
2507 int work_done = 0;
2508
bb0d215c
RM
2509 spin_lock(&qdev->hw_lock);
2510 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
ae9540f7
JP
2511 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2512 "Shared Interrupt, Not ours!\n");
bb0d215c
RM
2513 spin_unlock(&qdev->hw_lock);
2514 return IRQ_NONE;
c4e84bde 2515 }
bb0d215c 2516 spin_unlock(&qdev->hw_lock);
c4e84bde 2517
bb0d215c 2518 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2519
2520 /*
2521 * Check for fatal error.
2522 */
2523 if (var & STS_FE) {
2524 ql_queue_asic_error(qdev);
5069ee55 2525 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
c4e84bde 2526 var = ql_read32(qdev, ERR_STS);
5069ee55
JK
2527 netdev_err(qdev->ndev, "Resetting chip. "
2528 "Error Status Register = 0x%x\n", var);
c4e84bde
RM
2529 return IRQ_HANDLED;
2530 }
2531
2532 /*
2533 * Check MPI processor activity.
2534 */
5ee22a5a
RM
2535 if ((var & STS_PI) &&
2536 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
c4e84bde
RM
2537 /*
2538 * We've got an async event or mailbox completion.
2539 * Handle it and clear the source of the interrupt.
2540 */
ae9540f7
JP
2541 netif_err(qdev, intr, qdev->ndev,
2542 "Got MPI processor interrupt.\n");
c4e84bde 2543 ql_disable_completion_interrupt(qdev, intr_context->intr);
5ee22a5a
RM
2544 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2545 queue_delayed_work_on(smp_processor_id(),
2546 qdev->workqueue, &qdev->mpi_work, 0);
c4e84bde
RM
2547 work_done++;
2548 }
2549
2550 /*
39aa8165
RM
2551 * Get the bit-mask that shows the active queues for this
2552 * pass. Compare it to the queues that this irq services
2553 * and call napi if there's a match.
c4e84bde 2554 */
39aa8165
RM
2555 var = ql_read32(qdev, ISR1);
2556 if (var & intr_context->irq_mask) {
ae9540f7
JP
2557 netif_info(qdev, intr, qdev->ndev,
2558 "Waking handler for rx_ring[0].\n");
39aa8165 2559 ql_disable_completion_interrupt(qdev, intr_context->intr);
32a5b2a0
RM
2560 napi_schedule(&rx_ring->napi);
2561 work_done++;
2562 }
bb0d215c 2563 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2564 return work_done ? IRQ_HANDLED : IRQ_NONE;
2565}
2566
2567static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2568{
2569
2570 if (skb_is_gso(skb)) {
2571 int err;
1ee1cfe7 2572 __be16 l3_proto = vlan_get_protocol(skb);
bb9689e6 2573
2574 err = skb_cow_head(skb, 0);
2575 if (err < 0)
2576 return err;
c4e84bde
RM
2577
2578 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2579 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2580 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2581 mac_iocb_ptr->total_hdrs_len =
2582 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2583 mac_iocb_ptr->net_trans_offset =
2584 cpu_to_le16(skb_network_offset(skb) |
2585 skb_transport_offset(skb)
2586 << OB_MAC_TRANSPORT_HDR_SHIFT);
2587 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2588 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1ee1cfe7 2589 if (likely(l3_proto == htons(ETH_P_IP))) {
c4e84bde
RM
2590 struct iphdr *iph = ip_hdr(skb);
2591 iph->check = 0;
2592 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2593 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2594 iph->daddr, 0,
2595 IPPROTO_TCP,
2596 0);
1ee1cfe7 2597 } else if (l3_proto == htons(ETH_P_IPV6)) {
c4e84bde
RM
2598 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2599 tcp_hdr(skb)->check =
2600 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2601 &ipv6_hdr(skb)->daddr,
2602 0, IPPROTO_TCP, 0);
2603 }
2604 return 1;
2605 }
2606 return 0;
2607}
2608
2609static void ql_hw_csum_setup(struct sk_buff *skb,
2610 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2611{
2612 int len;
2613 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 2614 __sum16 *check;
c4e84bde
RM
2615 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2616 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2617 mac_iocb_ptr->net_trans_offset =
2618 cpu_to_le16(skb_network_offset(skb) |
2619 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2620
2621 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2622 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2623 if (likely(iph->protocol == IPPROTO_TCP)) {
2624 check = &(tcp_hdr(skb)->check);
2625 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2626 mac_iocb_ptr->total_hdrs_len =
2627 cpu_to_le16(skb_transport_offset(skb) +
2628 (tcp_hdr(skb)->doff << 2));
2629 } else {
2630 check = &(udp_hdr(skb)->check);
2631 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2632 mac_iocb_ptr->total_hdrs_len =
2633 cpu_to_le16(skb_transport_offset(skb) +
2634 sizeof(struct udphdr));
2635 }
2636 *check = ~csum_tcpudp_magic(iph->saddr,
2637 iph->daddr, len, iph->protocol, 0);
2638}
2639
61357325 2640static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
c4e84bde
RM
2641{
2642 struct tx_ring_desc *tx_ring_desc;
2643 struct ob_mac_iocb_req *mac_iocb_ptr;
2644 struct ql_adapter *qdev = netdev_priv(ndev);
2645 int tso;
2646 struct tx_ring *tx_ring;
1e213303 2647 u32 tx_ring_idx = (u32) skb->queue_mapping;
c4e84bde
RM
2648
2649 tx_ring = &qdev->tx_ring[tx_ring_idx];
2650
74c50b4b
RM
2651 if (skb_padto(skb, ETH_ZLEN))
2652 return NETDEV_TX_OK;
2653
c4e84bde 2654 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
ae9540f7 2655 netif_info(qdev, tx_queued, qdev->ndev,
41812db8 2656 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
ae9540f7 2657 __func__, tx_ring_idx);
1e213303 2658 netif_stop_subqueue(ndev, tx_ring->wq_id);
885ee398 2659 tx_ring->tx_errors++;
c4e84bde
RM
2660 return NETDEV_TX_BUSY;
2661 }
2662 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2663 mac_iocb_ptr = tx_ring_desc->queue_entry;
e332471c 2664 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
c4e84bde
RM
2665
2666 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2667 mac_iocb_ptr->tid = tx_ring_desc->index;
2668 /* We use the upper 32-bits to store the tx queue for this IO.
2669 * When we get the completion we can use it to establish the context.
2670 */
2671 mac_iocb_ptr->txq_idx = tx_ring_idx;
2672 tx_ring_desc->skb = skb;
2673
2674 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2675
df8a39de 2676 if (skb_vlan_tag_present(skb)) {
ae9540f7 2677 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
df8a39de 2678 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
c4e84bde 2679 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
df8a39de 2680 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
c4e84bde
RM
2681 }
2682 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2683 if (tso < 0) {
2684 dev_kfree_skb_any(skb);
2685 return NETDEV_TX_OK;
2686 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2687 ql_hw_csum_setup(skb,
2688 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2689 }
0d979f74
RM
2690 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2691 NETDEV_TX_OK) {
ae9540f7
JP
2692 netif_err(qdev, tx_queued, qdev->ndev,
2693 "Could not map the segments.\n");
885ee398 2694 tx_ring->tx_errors++;
0d979f74
RM
2695 return NETDEV_TX_BUSY;
2696 }
c4e84bde
RM
2697 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2698 tx_ring->prod_idx++;
2699 if (tx_ring->prod_idx == tx_ring->wq_len)
2700 tx_ring->prod_idx = 0;
2701 wmb();
2702
e42d8cee
SK
2703 ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2704 mmiowb();
ae9540f7
JP
2705 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2706 "tx queued, slot %d, len %d\n",
2707 tx_ring->prod_idx, skb->len);
c4e84bde
RM
2708
2709 atomic_dec(&tx_ring->tx_count);
41812db8
JK
2710
2711 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2712 netif_stop_subqueue(ndev, tx_ring->wq_id);
2713 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2714 /*
2715 * The queue got stopped because the tx_ring was full.
2716 * Wake it up, because it's now at least 25% empty.
2717 */
2718 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2719 }
c4e84bde
RM
2720 return NETDEV_TX_OK;
2721}
2722
9dfbbaa6 2723
c4e84bde
RM
2724static void ql_free_shadow_space(struct ql_adapter *qdev)
2725{
2726 if (qdev->rx_ring_shadow_reg_area) {
2727 pci_free_consistent(qdev->pdev,
2728 PAGE_SIZE,
2729 qdev->rx_ring_shadow_reg_area,
2730 qdev->rx_ring_shadow_reg_dma);
2731 qdev->rx_ring_shadow_reg_area = NULL;
2732 }
2733 if (qdev->tx_ring_shadow_reg_area) {
2734 pci_free_consistent(qdev->pdev,
2735 PAGE_SIZE,
2736 qdev->tx_ring_shadow_reg_area,
2737 qdev->tx_ring_shadow_reg_dma);
2738 qdev->tx_ring_shadow_reg_area = NULL;
2739 }
2740}
2741
2742static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2743{
2744 qdev->rx_ring_shadow_reg_area =
440c734f
JP
2745 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2746 &qdev->rx_ring_shadow_reg_dma);
c4e84bde 2747 if (qdev->rx_ring_shadow_reg_area == NULL) {
ae9540f7
JP
2748 netif_err(qdev, ifup, qdev->ndev,
2749 "Allocation of RX shadow space failed.\n");
c4e84bde
RM
2750 return -ENOMEM;
2751 }
440c734f 2752
c4e84bde 2753 qdev->tx_ring_shadow_reg_area =
440c734f
JP
2754 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2755 &qdev->tx_ring_shadow_reg_dma);
c4e84bde 2756 if (qdev->tx_ring_shadow_reg_area == NULL) {
ae9540f7
JP
2757 netif_err(qdev, ifup, qdev->ndev,
2758 "Allocation of TX shadow space failed.\n");
c4e84bde
RM
2759 goto err_wqp_sh_area;
2760 }
2761 return 0;
2762
2763err_wqp_sh_area:
2764 pci_free_consistent(qdev->pdev,
2765 PAGE_SIZE,
2766 qdev->rx_ring_shadow_reg_area,
2767 qdev->rx_ring_shadow_reg_dma);
2768 return -ENOMEM;
2769}
2770
2771static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2772{
2773 struct tx_ring_desc *tx_ring_desc;
2774 int i;
2775 struct ob_mac_iocb_req *mac_iocb_ptr;
2776
2777 mac_iocb_ptr = tx_ring->wq_base;
2778 tx_ring_desc = tx_ring->q;
2779 for (i = 0; i < tx_ring->wq_len; i++) {
2780 tx_ring_desc->index = i;
2781 tx_ring_desc->skb = NULL;
2782 tx_ring_desc->queue_entry = mac_iocb_ptr;
2783 mac_iocb_ptr++;
2784 tx_ring_desc++;
2785 }
2786 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
c4e84bde
RM
2787}
2788
2789static void ql_free_tx_resources(struct ql_adapter *qdev,
2790 struct tx_ring *tx_ring)
2791{
2792 if (tx_ring->wq_base) {
2793 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2794 tx_ring->wq_base, tx_ring->wq_base_dma);
2795 tx_ring->wq_base = NULL;
2796 }
2797 kfree(tx_ring->q);
2798 tx_ring->q = NULL;
2799}
2800
2801static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2802 struct tx_ring *tx_ring)
2803{
2804 tx_ring->wq_base =
2805 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2806 &tx_ring->wq_base_dma);
2807
8e95a202 2808 if ((tx_ring->wq_base == NULL) ||
f5c4441c
JK
2809 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2810 goto pci_alloc_err;
2811
c4e84bde 2812 tx_ring->q =
6da2ec56
KC
2813 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2814 GFP_KERNEL);
c4e84bde
RM
2815 if (tx_ring->q == NULL)
2816 goto err;
2817
2818 return 0;
2819err:
2820 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2821 tx_ring->wq_base, tx_ring->wq_base_dma);
f5c4441c
JK
2822 tx_ring->wq_base = NULL;
2823pci_alloc_err:
2824 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
c4e84bde
RM
2825 return -ENOMEM;
2826}
2827
8668ae92 2828static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde 2829{
c4e84bde
RM
2830 struct bq_desc *lbq_desc;
2831
7c734359
RM
2832 uint32_t curr_idx, clean_idx;
2833
2834 curr_idx = rx_ring->lbq_curr_idx;
2835 clean_idx = rx_ring->lbq_clean_idx;
2836 while (curr_idx != clean_idx) {
2837 lbq_desc = &rx_ring->lbq[curr_idx];
2838
2839 if (lbq_desc->p.pg_chunk.last_flag) {
c4e84bde 2840 pci_unmap_page(qdev->pdev,
7c734359
RM
2841 lbq_desc->p.pg_chunk.map,
2842 ql_lbq_block_size(qdev),
c4e84bde 2843 PCI_DMA_FROMDEVICE);
7c734359 2844 lbq_desc->p.pg_chunk.last_flag = 0;
c4e84bde 2845 }
7c734359
RM
2846
2847 put_page(lbq_desc->p.pg_chunk.page);
2848 lbq_desc->p.pg_chunk.page = NULL;
2849
2850 if (++curr_idx == rx_ring->lbq_len)
2851 curr_idx = 0;
2852
c4e84bde 2853 }
ef380794
TLSC
2854 if (rx_ring->pg_chunk.page) {
2855 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2856 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2857 put_page(rx_ring->pg_chunk.page);
2858 rx_ring->pg_chunk.page = NULL;
2859 }
c4e84bde
RM
2860}
2861
8668ae92 2862static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2863{
2864 int i;
2865 struct bq_desc *sbq_desc;
2866
2867 for (i = 0; i < rx_ring->sbq_len; i++) {
2868 sbq_desc = &rx_ring->sbq[i];
2869 if (sbq_desc == NULL) {
ae9540f7
JP
2870 netif_err(qdev, ifup, qdev->ndev,
2871 "sbq_desc %d is NULL.\n", i);
c4e84bde
RM
2872 return;
2873 }
2874 if (sbq_desc->p.skb) {
2875 pci_unmap_single(qdev->pdev,
64b9b41d
FT
2876 dma_unmap_addr(sbq_desc, mapaddr),
2877 dma_unmap_len(sbq_desc, maplen),
c4e84bde
RM
2878 PCI_DMA_FROMDEVICE);
2879 dev_kfree_skb(sbq_desc->p.skb);
2880 sbq_desc->p.skb = NULL;
2881 }
c4e84bde
RM
2882 }
2883}
2884
4545a3f2
RM
2885/* Free all large and small rx buffers associated
2886 * with the completion queues for this device.
2887 */
2888static void ql_free_rx_buffers(struct ql_adapter *qdev)
2889{
2890 int i;
2891 struct rx_ring *rx_ring;
2892
2893 for (i = 0; i < qdev->rx_ring_count; i++) {
2894 rx_ring = &qdev->rx_ring[i];
2895 if (rx_ring->lbq)
2896 ql_free_lbq_buffers(qdev, rx_ring);
2897 if (rx_ring->sbq)
2898 ql_free_sbq_buffers(qdev, rx_ring);
2899 }
2900}
2901
2902static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2903{
2904 struct rx_ring *rx_ring;
2905 int i;
2906
2907 for (i = 0; i < qdev->rx_ring_count; i++) {
2908 rx_ring = &qdev->rx_ring[i];
2909 if (rx_ring->type != TX_Q)
2910 ql_update_buffer_queues(qdev, rx_ring);
2911 }
2912}
2913
2914static void ql_init_lbq_ring(struct ql_adapter *qdev,
2915 struct rx_ring *rx_ring)
2916{
2917 int i;
2918 struct bq_desc *lbq_desc;
2919 __le64 *bq = rx_ring->lbq_base;
2920
2921 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2922 for (i = 0; i < rx_ring->lbq_len; i++) {
2923 lbq_desc = &rx_ring->lbq[i];
2924 memset(lbq_desc, 0, sizeof(*lbq_desc));
2925 lbq_desc->index = i;
2926 lbq_desc->addr = bq;
2927 bq++;
2928 }
2929}
2930
2931static void ql_init_sbq_ring(struct ql_adapter *qdev,
c4e84bde
RM
2932 struct rx_ring *rx_ring)
2933{
2934 int i;
2935 struct bq_desc *sbq_desc;
2c9a0d41 2936 __le64 *bq = rx_ring->sbq_base;
c4e84bde 2937
4545a3f2 2938 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
c4e84bde
RM
2939 for (i = 0; i < rx_ring->sbq_len; i++) {
2940 sbq_desc = &rx_ring->sbq[i];
4545a3f2 2941 memset(sbq_desc, 0, sizeof(*sbq_desc));
c4e84bde 2942 sbq_desc->index = i;
2c9a0d41 2943 sbq_desc->addr = bq;
c4e84bde
RM
2944 bq++;
2945 }
c4e84bde
RM
2946}
2947
2948static void ql_free_rx_resources(struct ql_adapter *qdev,
2949 struct rx_ring *rx_ring)
2950{
c4e84bde
RM
2951 /* Free the small buffer queue. */
2952 if (rx_ring->sbq_base) {
2953 pci_free_consistent(qdev->pdev,
2954 rx_ring->sbq_size,
2955 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2956 rx_ring->sbq_base = NULL;
2957 }
2958
2959 /* Free the small buffer queue control blocks. */
2960 kfree(rx_ring->sbq);
2961 rx_ring->sbq = NULL;
2962
2963 /* Free the large buffer queue. */
2964 if (rx_ring->lbq_base) {
2965 pci_free_consistent(qdev->pdev,
2966 rx_ring->lbq_size,
2967 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2968 rx_ring->lbq_base = NULL;
2969 }
2970
2971 /* Free the large buffer queue control blocks. */
2972 kfree(rx_ring->lbq);
2973 rx_ring->lbq = NULL;
2974
2975 /* Free the rx queue. */
2976 if (rx_ring->cq_base) {
2977 pci_free_consistent(qdev->pdev,
2978 rx_ring->cq_size,
2979 rx_ring->cq_base, rx_ring->cq_base_dma);
2980 rx_ring->cq_base = NULL;
2981 }
2982}
2983
2984/* Allocate queues and buffers for this completions queue based
2985 * on the values in the parameter structure. */
2986static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2987 struct rx_ring *rx_ring)
2988{
2989
2990 /*
2991 * Allocate the completion queue for this rx_ring.
2992 */
2993 rx_ring->cq_base =
2994 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2995 &rx_ring->cq_base_dma);
2996
2997 if (rx_ring->cq_base == NULL) {
ae9540f7 2998 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
c4e84bde
RM
2999 return -ENOMEM;
3000 }
3001
3002 if (rx_ring->sbq_len) {
3003 /*
3004 * Allocate small buffer queue.
3005 */
3006 rx_ring->sbq_base =
3007 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3008 &rx_ring->sbq_base_dma);
3009
3010 if (rx_ring->sbq_base == NULL) {
ae9540f7
JP
3011 netif_err(qdev, ifup, qdev->ndev,
3012 "Small buffer queue allocation failed.\n");
c4e84bde
RM
3013 goto err_mem;
3014 }
3015
3016 /*
3017 * Allocate small buffer queue control blocks.
3018 */
14f8dc49
JP
3019 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3020 sizeof(struct bq_desc),
3021 GFP_KERNEL);
3022 if (rx_ring->sbq == NULL)
c4e84bde 3023 goto err_mem;
c4e84bde 3024
4545a3f2 3025 ql_init_sbq_ring(qdev, rx_ring);
c4e84bde
RM
3026 }
3027
3028 if (rx_ring->lbq_len) {
3029 /*
3030 * Allocate large buffer queue.
3031 */
3032 rx_ring->lbq_base =
3033 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3034 &rx_ring->lbq_base_dma);
3035
3036 if (rx_ring->lbq_base == NULL) {
ae9540f7
JP
3037 netif_err(qdev, ifup, qdev->ndev,
3038 "Large buffer queue allocation failed.\n");
c4e84bde
RM
3039 goto err_mem;
3040 }
3041 /*
3042 * Allocate large buffer queue control blocks.
3043 */
14f8dc49
JP
3044 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3045 sizeof(struct bq_desc),
3046 GFP_KERNEL);
3047 if (rx_ring->lbq == NULL)
c4e84bde 3048 goto err_mem;
c4e84bde 3049
4545a3f2 3050 ql_init_lbq_ring(qdev, rx_ring);
c4e84bde
RM
3051 }
3052
3053 return 0;
3054
3055err_mem:
3056 ql_free_rx_resources(qdev, rx_ring);
3057 return -ENOMEM;
3058}
3059
3060static void ql_tx_ring_clean(struct ql_adapter *qdev)
3061{
3062 struct tx_ring *tx_ring;
3063 struct tx_ring_desc *tx_ring_desc;
3064 int i, j;
3065
3066 /*
3067 * Loop through all queues and free
3068 * any resources.
3069 */
3070 for (j = 0; j < qdev->tx_ring_count; j++) {
3071 tx_ring = &qdev->tx_ring[j];
3072 for (i = 0; i < tx_ring->wq_len; i++) {
3073 tx_ring_desc = &tx_ring->q[i];
3074 if (tx_ring_desc && tx_ring_desc->skb) {
ae9540f7
JP
3075 netif_err(qdev, ifdown, qdev->ndev,
3076 "Freeing lost SKB %p, from queue %d, index %d.\n",
3077 tx_ring_desc->skb, j,
3078 tx_ring_desc->index);
c4e84bde
RM
3079 ql_unmap_send(qdev, tx_ring_desc,
3080 tx_ring_desc->map_cnt);
3081 dev_kfree_skb(tx_ring_desc->skb);
3082 tx_ring_desc->skb = NULL;
3083 }
3084 }
3085 }
3086}
3087
c4e84bde
RM
3088static void ql_free_mem_resources(struct ql_adapter *qdev)
3089{
3090 int i;
3091
3092 for (i = 0; i < qdev->tx_ring_count; i++)
3093 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3094 for (i = 0; i < qdev->rx_ring_count; i++)
3095 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3096 ql_free_shadow_space(qdev);
3097}
3098
3099static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3100{
3101 int i;
3102
3103 /* Allocate space for our shadow registers and such. */
3104 if (ql_alloc_shadow_space(qdev))
3105 return -ENOMEM;
3106
3107 for (i = 0; i < qdev->rx_ring_count; i++) {
3108 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
ae9540f7
JP
3109 netif_err(qdev, ifup, qdev->ndev,
3110 "RX resource allocation failed.\n");
c4e84bde
RM
3111 goto err_mem;
3112 }
3113 }
3114 /* Allocate tx queue resources */
3115 for (i = 0; i < qdev->tx_ring_count; i++) {
3116 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
ae9540f7
JP
3117 netif_err(qdev, ifup, qdev->ndev,
3118 "TX resource allocation failed.\n");
c4e84bde
RM
3119 goto err_mem;
3120 }
3121 }
3122 return 0;
3123
3124err_mem:
3125 ql_free_mem_resources(qdev);
3126 return -ENOMEM;
3127}
3128
3129/* Set up the rx ring control block and pass it to the chip.
3130 * The control block is defined as
3131 * "Completion Queue Initialization Control Block", or cqicb.
3132 */
3133static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3134{
3135 struct cqicb *cqicb = &rx_ring->cqicb;
3136 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
b8facca0 3137 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde 3138 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
b8facca0 3139 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde
RM
3140 void __iomem *doorbell_area =
3141 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3142 int err = 0;
3143 u16 bq_len;
d4a4aba6 3144 u64 tmp;
b8facca0
RM
3145 __le64 *base_indirect_ptr;
3146 int page_entries;
c4e84bde
RM
3147
3148 /* Set up the shadow registers for this ring. */
3149 rx_ring->prod_idx_sh_reg = shadow_reg;
3150 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
7c734359 3151 *rx_ring->prod_idx_sh_reg = 0;
c4e84bde
RM
3152 shadow_reg += sizeof(u64);
3153 shadow_reg_dma += sizeof(u64);
3154 rx_ring->lbq_base_indirect = shadow_reg;
3155 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
b8facca0
RM
3156 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3157 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
c4e84bde
RM
3158 rx_ring->sbq_base_indirect = shadow_reg;
3159 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3160
3161 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 3162 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
3163 rx_ring->cnsmr_idx = 0;
3164 rx_ring->curr_entry = rx_ring->cq_base;
3165
3166 /* PCI doorbell mem area + 0x04 for valid register */
3167 rx_ring->valid_db_reg = doorbell_area + 0x04;
3168
3169 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 3170 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
3171
3172 /* PCI doorbell mem area + 0x1c */
8668ae92 3173 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
3174
3175 memset((void *)cqicb, 0, sizeof(struct cqicb));
3176 cqicb->msix_vect = rx_ring->irq;
3177
459caf5a
RM
3178 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3179 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 3180
97345524 3181 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 3182
97345524 3183 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
3184
3185 /*
3186 * Set up the control block load flags.
3187 */
3188 cqicb->flags = FLAGS_LC | /* Load queue base address */
3189 FLAGS_LV | /* Load MSI-X vector */
3190 FLAGS_LI; /* Load irq delay values */
3191 if (rx_ring->lbq_len) {
3192 cqicb->flags |= FLAGS_LL; /* Load lbq values */
a419aef8 3193 tmp = (u64)rx_ring->lbq_base_dma;
43d620c8 3194 base_indirect_ptr = rx_ring->lbq_base_indirect;
b8facca0
RM
3195 page_entries = 0;
3196 do {
3197 *base_indirect_ptr = cpu_to_le64(tmp);
3198 tmp += DB_PAGE_SIZE;
3199 base_indirect_ptr++;
3200 page_entries++;
3201 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
97345524
RM
3202 cqicb->lbq_addr =
3203 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
3204 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3205 (u16) rx_ring->lbq_buf_size;
3206 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3207 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3208 (u16) rx_ring->lbq_len;
c4e84bde 3209 cqicb->lbq_len = cpu_to_le16(bq_len);
4545a3f2 3210 rx_ring->lbq_prod_idx = 0;
c4e84bde 3211 rx_ring->lbq_curr_idx = 0;
4545a3f2
RM
3212 rx_ring->lbq_clean_idx = 0;
3213 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
c4e84bde
RM
3214 }
3215 if (rx_ring->sbq_len) {
3216 cqicb->flags |= FLAGS_LS; /* Load sbq values */
a419aef8 3217 tmp = (u64)rx_ring->sbq_base_dma;
43d620c8 3218 base_indirect_ptr = rx_ring->sbq_base_indirect;
b8facca0
RM
3219 page_entries = 0;
3220 do {
3221 *base_indirect_ptr = cpu_to_le64(tmp);
3222 tmp += DB_PAGE_SIZE;
3223 base_indirect_ptr++;
3224 page_entries++;
3225 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
97345524
RM
3226 cqicb->sbq_addr =
3227 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde 3228 cqicb->sbq_buf_size =
52e55f3c 3229 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
459caf5a
RM
3230 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3231 (u16) rx_ring->sbq_len;
c4e84bde 3232 cqicb->sbq_len = cpu_to_le16(bq_len);
4545a3f2 3233 rx_ring->sbq_prod_idx = 0;
c4e84bde 3234 rx_ring->sbq_curr_idx = 0;
4545a3f2
RM
3235 rx_ring->sbq_clean_idx = 0;
3236 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
c4e84bde
RM
3237 }
3238 switch (rx_ring->type) {
3239 case TX_Q:
c4e84bde
RM
3240 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3241 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3242 break;
c4e84bde
RM
3243 case RX_Q:
3244 /* Inbound completion handling rx_rings run in
3245 * separate NAPI contexts.
3246 */
3247 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3248 64);
3249 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3250 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3251 break;
3252 default:
ae9540f7
JP
3253 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3254 "Invalid rx_ring->type = %d.\n", rx_ring->type);
c4e84bde 3255 }
c4e84bde
RM
3256 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3257 CFG_LCQ, rx_ring->cq_id);
3258 if (err) {
ae9540f7 3259 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
c4e84bde
RM
3260 return err;
3261 }
c4e84bde
RM
3262 return err;
3263}
3264
3265static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3266{
3267 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3268 void __iomem *doorbell_area =
3269 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3270 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3271 (tx_ring->wq_id * sizeof(u64));
3272 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3273 (tx_ring->wq_id * sizeof(u64));
3274 int err = 0;
3275
3276 /*
3277 * Assign doorbell registers for this tx_ring.
3278 */
3279 /* TX PCI doorbell mem area for tx producer index */
8668ae92 3280 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
3281 tx_ring->prod_idx = 0;
3282 /* TX PCI doorbell mem area + 0x04 */
3283 tx_ring->valid_db_reg = doorbell_area + 0x04;
3284
3285 /*
3286 * Assign shadow registers for this tx_ring.
3287 */
3288 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3289 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3290
3291 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3292 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3293 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3294 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3295 wqicb->rid = 0;
97345524 3296 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 3297
97345524 3298 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
3299
3300 ql_init_tx_ring(qdev, tx_ring);
3301
e332471c 3302 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
c4e84bde
RM
3303 (u16) tx_ring->wq_id);
3304 if (err) {
ae9540f7 3305 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
c4e84bde
RM
3306 return err;
3307 }
c4e84bde
RM
3308 return err;
3309}
3310
3311static void ql_disable_msix(struct ql_adapter *qdev)
3312{
3313 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3314 pci_disable_msix(qdev->pdev);
3315 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3316 kfree(qdev->msi_x_entry);
3317 qdev->msi_x_entry = NULL;
3318 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3319 pci_disable_msi(qdev->pdev);
3320 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3321 }
3322}
3323
a4ab6137
RM
3324/* We start by trying to get the number of vectors
3325 * stored in qdev->intr_count. If we don't get that
3326 * many then we reduce the count and try again.
3327 */
c4e84bde
RM
3328static void ql_enable_msix(struct ql_adapter *qdev)
3329{
a4ab6137 3330 int i, err;
c4e84bde 3331
c4e84bde 3332 /* Get the MSIX vectors. */
a5a62a1c 3333 if (qlge_irq_type == MSIX_IRQ) {
c4e84bde
RM
3334 /* Try to alloc space for the msix struct,
3335 * if it fails then go to MSI/legacy.
3336 */
a4ab6137 3337 qdev->msi_x_entry = kcalloc(qdev->intr_count,
c4e84bde
RM
3338 sizeof(struct msix_entry),
3339 GFP_KERNEL);
3340 if (!qdev->msi_x_entry) {
a5a62a1c 3341 qlge_irq_type = MSI_IRQ;
c4e84bde
RM
3342 goto msi;
3343 }
3344
a4ab6137 3345 for (i = 0; i < qdev->intr_count; i++)
c4e84bde
RM
3346 qdev->msi_x_entry[i].entry = i;
3347
50b483a1
AG
3348 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3349 1, qdev->intr_count);
a4ab6137 3350 if (err < 0) {
c4e84bde
RM
3351 kfree(qdev->msi_x_entry);
3352 qdev->msi_x_entry = NULL;
ae9540f7
JP
3353 netif_warn(qdev, ifup, qdev->ndev,
3354 "MSI-X Enable failed, trying MSI.\n");
a5a62a1c 3355 qlge_irq_type = MSI_IRQ;
50b483a1
AG
3356 } else {
3357 qdev->intr_count = err;
a4ab6137 3358 set_bit(QL_MSIX_ENABLED, &qdev->flags);
ae9540f7
JP
3359 netif_info(qdev, ifup, qdev->ndev,
3360 "MSI-X Enabled, got %d vectors.\n",
3361 qdev->intr_count);
a4ab6137 3362 return;
c4e84bde
RM
3363 }
3364 }
3365msi:
a4ab6137 3366 qdev->intr_count = 1;
a5a62a1c 3367 if (qlge_irq_type == MSI_IRQ) {
c4e84bde
RM
3368 if (!pci_enable_msi(qdev->pdev)) {
3369 set_bit(QL_MSI_ENABLED, &qdev->flags);
ae9540f7
JP
3370 netif_info(qdev, ifup, qdev->ndev,
3371 "Running with MSI interrupts.\n");
c4e84bde
RM
3372 return;
3373 }
3374 }
a5a62a1c 3375 qlge_irq_type = LEG_IRQ;
ae9540f7
JP
3376 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3377 "Running with legacy interrupts.\n");
c4e84bde
RM
3378}
3379
39aa8165
RM
3380/* Each vector services 1 RSS ring and and 1 or more
3381 * TX completion rings. This function loops through
3382 * the TX completion rings and assigns the vector that
3383 * will service it. An example would be if there are
3384 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3385 * This would mean that vector 0 would service RSS ring 0
25985edc 3386 * and TX completion rings 0,1,2 and 3. Vector 1 would
39aa8165
RM
3387 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3388 */
3389static void ql_set_tx_vect(struct ql_adapter *qdev)
3390{
3391 int i, j, vect;
3392 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3393
3394 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3395 /* Assign irq vectors to TX rx_rings.*/
3396 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3397 i < qdev->rx_ring_count; i++) {
3398 if (j == tx_rings_per_vector) {
3399 vect++;
3400 j = 0;
3401 }
3402 qdev->rx_ring[i].irq = vect;
3403 j++;
3404 }
3405 } else {
3406 /* For single vector all rings have an irq
3407 * of zero.
3408 */
3409 for (i = 0; i < qdev->rx_ring_count; i++)
3410 qdev->rx_ring[i].irq = 0;
3411 }
3412}
3413
3414/* Set the interrupt mask for this vector. Each vector
3415 * will service 1 RSS ring and 1 or more TX completion
3416 * rings. This function sets up a bit mask per vector
3417 * that indicates which rings it services.
3418 */
3419static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3420{
3421 int j, vect = ctx->intr;
3422 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3423
3424 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3425 /* Add the RSS ring serviced by this vector
3426 * to the mask.
3427 */
3428 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3429 /* Add the TX ring(s) serviced by this vector
3430 * to the mask. */
3431 for (j = 0; j < tx_rings_per_vector; j++) {
3432 ctx->irq_mask |=
3433 (1 << qdev->rx_ring[qdev->rss_ring_count +
3434 (vect * tx_rings_per_vector) + j].cq_id);
3435 }
3436 } else {
3437 /* For single vector we just shift each queue's
3438 * ID into the mask.
3439 */
3440 for (j = 0; j < qdev->rx_ring_count; j++)
3441 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3442 }
3443}
3444
c4e84bde
RM
3445/*
3446 * Here we build the intr_context structures based on
3447 * our rx_ring count and intr vector count.
3448 * The intr_context structure is used to hook each vector
3449 * to possibly different handlers.
3450 */
3451static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3452{
3453 int i = 0;
3454 struct intr_context *intr_context = &qdev->intr_context[0];
3455
c4e84bde
RM
3456 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3457 /* Each rx_ring has it's
3458 * own intr_context since we have separate
3459 * vectors for each queue.
c4e84bde
RM
3460 */
3461 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3462 qdev->rx_ring[i].irq = i;
3463 intr_context->intr = i;
3464 intr_context->qdev = qdev;
39aa8165
RM
3465 /* Set up this vector's bit-mask that indicates
3466 * which queues it services.
3467 */
3468 ql_set_irq_mask(qdev, intr_context);
c4e84bde
RM
3469 /*
3470 * We set up each vectors enable/disable/read bits so
3471 * there's no bit/mask calculations in the critical path.
3472 */
3473 intr_context->intr_en_mask =
3474 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3475 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3476 | i;
3477 intr_context->intr_dis_mask =
3478 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3479 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3480 INTR_EN_IHD | i;
3481 intr_context->intr_read_mask =
3482 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3483 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3484 i;
39aa8165
RM
3485 if (i == 0) {
3486 /* The first vector/queue handles
3487 * broadcast/multicast, fatal errors,
3488 * and firmware events. This in addition
3489 * to normal inbound NAPI processing.
c4e84bde 3490 */
39aa8165 3491 intr_context->handler = qlge_isr;
b2014ff8
RM
3492 sprintf(intr_context->name, "%s-rx-%d",
3493 qdev->ndev->name, i);
3494 } else {
c4e84bde 3495 /*
39aa8165 3496 * Inbound queues handle unicast frames only.
c4e84bde 3497 */
39aa8165
RM
3498 intr_context->handler = qlge_msix_rx_isr;
3499 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde 3500 qdev->ndev->name, i);
c4e84bde
RM
3501 }
3502 }
3503 } else {
3504 /*
3505 * All rx_rings use the same intr_context since
3506 * there is only one vector.
3507 */
3508 intr_context->intr = 0;
3509 intr_context->qdev = qdev;
3510 /*
3511 * We set up each vectors enable/disable/read bits so
3512 * there's no bit/mask calculations in the critical path.
3513 */
3514 intr_context->intr_en_mask =
3515 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3516 intr_context->intr_dis_mask =
3517 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3518 INTR_EN_TYPE_DISABLE;
3519 intr_context->intr_read_mask =
3520 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3521 /*
3522 * Single interrupt means one handler for all rings.
3523 */
3524 intr_context->handler = qlge_isr;
3525 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
39aa8165
RM
3526 /* Set up this vector's bit-mask that indicates
3527 * which queues it services. In this case there is
3528 * a single vector so it will service all RSS and
3529 * TX completion rings.
3530 */
3531 ql_set_irq_mask(qdev, intr_context);
c4e84bde 3532 }
39aa8165
RM
3533 /* Tell the TX completion rings which MSIx vector
3534 * they will be using.
3535 */
3536 ql_set_tx_vect(qdev);
c4e84bde
RM
3537}
3538
3539static void ql_free_irq(struct ql_adapter *qdev)
3540{
3541 int i;
3542 struct intr_context *intr_context = &qdev->intr_context[0];
3543
3544 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3545 if (intr_context->hooked) {
3546 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3547 free_irq(qdev->msi_x_entry[i].vector,
3548 &qdev->rx_ring[i]);
c4e84bde
RM
3549 } else {
3550 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
c4e84bde
RM
3551 }
3552 }
3553 }
3554 ql_disable_msix(qdev);
3555}
3556
3557static int ql_request_irq(struct ql_adapter *qdev)
3558{
3559 int i;
3560 int status = 0;
3561 struct pci_dev *pdev = qdev->pdev;
3562 struct intr_context *intr_context = &qdev->intr_context[0];
3563
3564 ql_resolve_queues_to_irqs(qdev);
3565
3566 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3567 atomic_set(&intr_context->irq_cnt, 0);
3568 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3569 status = request_irq(qdev->msi_x_entry[i].vector,
3570 intr_context->handler,
3571 0,
3572 intr_context->name,
3573 &qdev->rx_ring[i]);
3574 if (status) {
ae9540f7
JP
3575 netif_err(qdev, ifup, qdev->ndev,
3576 "Failed request for MSIX interrupt %d.\n",
3577 i);
c4e84bde 3578 goto err_irq;
c4e84bde
RM
3579 }
3580 } else {
ae9540f7
JP
3581 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3582 "trying msi or legacy interrupts.\n");
3583 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3584 "%s: irq = %d.\n", __func__, pdev->irq);
3585 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3586 "%s: context->name = %s.\n", __func__,
3587 intr_context->name);
3588 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3589 "%s: dev_id = 0x%p.\n", __func__,
3590 &qdev->rx_ring[0]);
c4e84bde
RM
3591 status =
3592 request_irq(pdev->irq, qlge_isr,
3593 test_bit(QL_MSI_ENABLED,
3594 &qdev->
3595 flags) ? 0 : IRQF_SHARED,
3596 intr_context->name, &qdev->rx_ring[0]);
3597 if (status)
3598 goto err_irq;
3599
ae9540f7
JP
3600 netif_err(qdev, ifup, qdev->ndev,
3601 "Hooked intr %d, queue type %s, with name %s.\n",
3602 i,
3603 qdev->rx_ring[0].type == DEFAULT_Q ?
3604 "DEFAULT_Q" :
3605 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3606 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3607 intr_context->name);
c4e84bde
RM
3608 }
3609 intr_context->hooked = 1;
3610 }
3611 return status;
3612err_irq:
a42c3a28 3613 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
c4e84bde
RM
3614 ql_free_irq(qdev);
3615 return status;
3616}
3617
3618static int ql_start_rss(struct ql_adapter *qdev)
3619{
215faf9c
JP
3620 static const u8 init_hash_seed[] = {
3621 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3622 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3623 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3624 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3625 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3626 };
c4e84bde
RM
3627 struct ricb *ricb = &qdev->ricb;
3628 int status = 0;
3629 int i;
3630 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3631
e332471c 3632 memset((void *)ricb, 0, sizeof(*ricb));
c4e84bde 3633
b2014ff8 3634 ricb->base_cq = RSS_L4K;
c4e84bde 3635 ricb->flags =
541ae28c
RM
3636 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3637 ricb->mask = cpu_to_le16((u16)(0x3ff));
c4e84bde
RM
3638
3639 /*
3640 * Fill out the Indirection Table.
3641 */
541ae28c
RM
3642 for (i = 0; i < 1024; i++)
3643 hash_id[i] = (i & (qdev->rss_ring_count - 1));
c4e84bde 3644
541ae28c
RM
3645 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3646 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
c4e84bde 3647
e332471c 3648 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
c4e84bde 3649 if (status) {
ae9540f7 3650 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
c4e84bde
RM
3651 return status;
3652 }
c4e84bde
RM
3653 return status;
3654}
3655
a5f59dc9 3656static int ql_clear_routing_entries(struct ql_adapter *qdev)
c4e84bde 3657{
a5f59dc9 3658 int i, status = 0;
c4e84bde 3659
8587ea35
RM
3660 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3661 if (status)
3662 return status;
c4e84bde
RM
3663 /* Clear all the entries in the routing table. */
3664 for (i = 0; i < 16; i++) {
3665 status = ql_set_routing_reg(qdev, i, 0, 0);
3666 if (status) {
ae9540f7
JP
3667 netif_err(qdev, ifup, qdev->ndev,
3668 "Failed to init routing register for CAM packets.\n");
a5f59dc9 3669 break;
c4e84bde
RM
3670 }
3671 }
a5f59dc9
RM
3672 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3673 return status;
3674}
3675
3676/* Initialize the frame-to-queue routing. */
3677static int ql_route_initialize(struct ql_adapter *qdev)
3678{
3679 int status = 0;
3680
fd21cf52
RM
3681 /* Clear all the entries in the routing table. */
3682 status = ql_clear_routing_entries(qdev);
a5f59dc9
RM
3683 if (status)
3684 return status;
3685
fd21cf52 3686 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
a5f59dc9 3687 if (status)
fd21cf52 3688 return status;
c4e84bde 3689
fbc2ac33
RM
3690 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3691 RT_IDX_IP_CSUM_ERR, 1);
3692 if (status) {
3693 netif_err(qdev, ifup, qdev->ndev,
3694 "Failed to init routing register "
3695 "for IP CSUM error packets.\n");
3696 goto exit;
3697 }
3698 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3699 RT_IDX_TU_CSUM_ERR, 1);
c4e84bde 3700 if (status) {
ae9540f7 3701 netif_err(qdev, ifup, qdev->ndev,
fbc2ac33
RM
3702 "Failed to init routing register "
3703 "for TCP/UDP CSUM error packets.\n");
8587ea35 3704 goto exit;
c4e84bde
RM
3705 }
3706 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3707 if (status) {
ae9540f7
JP
3708 netif_err(qdev, ifup, qdev->ndev,
3709 "Failed to init routing register for broadcast packets.\n");
8587ea35 3710 goto exit;
c4e84bde
RM
3711 }
3712 /* If we have more than one inbound queue, then turn on RSS in the
3713 * routing block.
3714 */
3715 if (qdev->rss_ring_count > 1) {
3716 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3717 RT_IDX_RSS_MATCH, 1);
3718 if (status) {
ae9540f7
JP
3719 netif_err(qdev, ifup, qdev->ndev,
3720 "Failed to init routing register for MATCH RSS packets.\n");
8587ea35 3721 goto exit;
c4e84bde
RM
3722 }
3723 }
3724
3725 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3726 RT_IDX_CAM_HIT, 1);
8587ea35 3727 if (status)
ae9540f7
JP
3728 netif_err(qdev, ifup, qdev->ndev,
3729 "Failed to init routing register for CAM packets.\n");
8587ea35
RM
3730exit:
3731 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3732 return status;
3733}
3734
2ee1e272 3735int ql_cam_route_initialize(struct ql_adapter *qdev)
bb58b5b6 3736{
7fab3bfe 3737 int status, set;
bb58b5b6 3738
7fab3bfe
RM
3739 /* If check if the link is up and use to
3740 * determine if we are setting or clearing
3741 * the MAC address in the CAM.
3742 */
3743 set = ql_read32(qdev, STS);
3744 set &= qdev->port_link_up;
3745 status = ql_set_mac_addr(qdev, set);
bb58b5b6 3746 if (status) {
ae9540f7 3747 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
bb58b5b6
RM
3748 return status;
3749 }
3750
3751 status = ql_route_initialize(qdev);
3752 if (status)
ae9540f7 3753 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
bb58b5b6
RM
3754
3755 return status;
3756}
3757
c4e84bde
RM
3758static int ql_adapter_initialize(struct ql_adapter *qdev)
3759{
3760 u32 value, mask;
3761 int i;
3762 int status = 0;
3763
3764 /*
3765 * Set up the System register to halt on errors.
3766 */
3767 value = SYS_EFE | SYS_FAE;
3768 mask = value << 16;
3769 ql_write32(qdev, SYS, mask | value);
3770
c9cf0a04 3771 /* Set the default queue, and VLAN behavior. */
a45adbe8
JK
3772 value = NIC_RCV_CFG_DFQ;
3773 mask = NIC_RCV_CFG_DFQ_MASK;
3774 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3775 value |= NIC_RCV_CFG_RV;
3776 mask |= (NIC_RCV_CFG_RV << 16);
3777 }
c4e84bde
RM
3778 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3779
3780 /* Set the MPI interrupt to enabled. */
3781 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3782
3783 /* Enable the function, set pagesize, enable error checking. */
3784 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
572c526f
RM
3785 FSC_EC | FSC_VM_PAGE_4K;
3786 value |= SPLT_SETTING;
c4e84bde
RM
3787
3788 /* Set/clear header splitting. */
3789 mask = FSC_VM_PAGESIZE_MASK |
3790 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3791 ql_write32(qdev, FSC, mask | value);
3792
572c526f 3793 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
c4e84bde 3794
a3b71939
RM
3795 /* Set RX packet routing to use port/pci function on which the
3796 * packet arrived on in addition to usual frame routing.
3797 * This is helpful on bonding where both interfaces can have
3798 * the same MAC address.
3799 */
3800 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
bc083ce9
RM
3801 /* Reroute all packets to our Interface.
3802 * They may have been routed to MPI firmware
3803 * due to WOL.
3804 */
3805 value = ql_read32(qdev, MGMT_RCV_CFG);
3806 value &= ~MGMT_RCV_CFG_RM;
3807 mask = 0xffff0000;
3808
3809 /* Sticky reg needs clearing due to WOL. */
3810 ql_write32(qdev, MGMT_RCV_CFG, mask);
3811 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3812
3813 /* Default WOL is enable on Mezz cards */
3814 if (qdev->pdev->subsystem_device == 0x0068 ||
3815 qdev->pdev->subsystem_device == 0x0180)
3816 qdev->wol = WAKE_MAGIC;
a3b71939 3817
c4e84bde
RM
3818 /* Start up the rx queues. */
3819 for (i = 0; i < qdev->rx_ring_count; i++) {
3820 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3821 if (status) {
ae9540f7
JP
3822 netif_err(qdev, ifup, qdev->ndev,
3823 "Failed to start rx ring[%d].\n", i);
c4e84bde
RM
3824 return status;
3825 }
3826 }
3827
3828 /* If there is more than one inbound completion queue
3829 * then download a RICB to configure RSS.
3830 */
3831 if (qdev->rss_ring_count > 1) {
3832 status = ql_start_rss(qdev);
3833 if (status) {
ae9540f7 3834 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
c4e84bde
RM
3835 return status;
3836 }
3837 }
3838
3839 /* Start up the tx queues. */
3840 for (i = 0; i < qdev->tx_ring_count; i++) {
3841 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3842 if (status) {
ae9540f7
JP
3843 netif_err(qdev, ifup, qdev->ndev,
3844 "Failed to start tx ring[%d].\n", i);
c4e84bde
RM
3845 return status;
3846 }
3847 }
3848
b0c2aadf
RM
3849 /* Initialize the port and set the max framesize. */
3850 status = qdev->nic_ops->port_initialize(qdev);
80928860 3851 if (status)
ae9540f7 3852 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
c4e84bde 3853
bb58b5b6
RM
3854 /* Set up the MAC address and frame routing filter. */
3855 status = ql_cam_route_initialize(qdev);
c4e84bde 3856 if (status) {
ae9540f7
JP
3857 netif_err(qdev, ifup, qdev->ndev,
3858 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3859 return status;
3860 }
3861
3862 /* Start NAPI for the RSS queues. */
19257f5a 3863 for (i = 0; i < qdev->rss_ring_count; i++)
c4e84bde 3864 napi_enable(&qdev->rx_ring[i].napi);
c4e84bde
RM
3865
3866 return status;
3867}
3868
3869/* Issue soft reset to chip. */
3870static int ql_adapter_reset(struct ql_adapter *qdev)
3871{
3872 u32 value;
c4e84bde 3873 int status = 0;
a5f59dc9 3874 unsigned long end_jiffies;
c4e84bde 3875
a5f59dc9
RM
3876 /* Clear all the entries in the routing table. */
3877 status = ql_clear_routing_entries(qdev);
3878 if (status) {
ae9540f7 3879 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
a5f59dc9
RM
3880 return status;
3881 }
3882
da92b393
JK
3883 /* Check if bit is set then skip the mailbox command and
3884 * clear the bit, else we are in normal reset process.
3885 */
3886 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3887 /* Stop management traffic. */
3888 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3889
3890 /* Wait for the NIC and MGMNT FIFOs to empty. */
3891 ql_wait_fifo_empty(qdev);
3892 } else
3893 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
84087f4d 3894
c4e84bde 3895 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
a75ee7f1 3896
3f6e785f 3897 end_jiffies = jiffies + usecs_to_jiffies(30);
c4e84bde
RM
3898 do {
3899 value = ql_read32(qdev, RST_FO);
3900 if ((value & RST_FO_FR) == 0)
3901 break;
a75ee7f1
RM
3902 cpu_relax();
3903 } while (time_before(jiffies, end_jiffies));
c4e84bde 3904
c4e84bde 3905 if (value & RST_FO_FR) {
ae9540f7
JP
3906 netif_err(qdev, ifdown, qdev->ndev,
3907 "ETIMEDOUT!!! errored out of resetting the chip!\n");
a75ee7f1 3908 status = -ETIMEDOUT;
c4e84bde
RM
3909 }
3910
84087f4d
RM
3911 /* Resume management traffic. */
3912 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
c4e84bde
RM
3913 return status;
3914}
3915
3916static void ql_display_dev_info(struct net_device *ndev)
3917{
b16fed0a 3918 struct ql_adapter *qdev = netdev_priv(ndev);
c4e84bde 3919
ae9540f7
JP
3920 netif_info(qdev, probe, qdev->ndev,
3921 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3922 "XG Roll = %d, XG Rev = %d.\n",
3923 qdev->func,
3924 qdev->port,
3925 qdev->chip_rev_id & 0x0000000f,
3926 qdev->chip_rev_id >> 4 & 0x0000000f,
3927 qdev->chip_rev_id >> 8 & 0x0000000f,
3928 qdev->chip_rev_id >> 12 & 0x0000000f);
3929 netif_info(qdev, probe, qdev->ndev,
3930 "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3931}
3932
ac409215 3933static int ql_wol(struct ql_adapter *qdev)
bc083ce9
RM
3934{
3935 int status = 0;
3936 u32 wol = MB_WOL_DISABLE;
3937
3938 /* The CAM is still intact after a reset, but if we
3939 * are doing WOL, then we may need to program the
3940 * routing regs. We would also need to issue the mailbox
3941 * commands to instruct the MPI what to do per the ethtool
3942 * settings.
3943 */
3944
3945 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3946 WAKE_MCAST | WAKE_BCAST)) {
ae9540f7 3947 netif_err(qdev, ifdown, qdev->ndev,
fd9071ec 3948 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
ae9540f7 3949 qdev->wol);
bc083ce9
RM
3950 return -EINVAL;
3951 }
3952
3953 if (qdev->wol & WAKE_MAGIC) {
3954 status = ql_mb_wol_set_magic(qdev, 1);
3955 if (status) {
ae9540f7
JP
3956 netif_err(qdev, ifdown, qdev->ndev,
3957 "Failed to set magic packet on %s.\n",
3958 qdev->ndev->name);
bc083ce9
RM
3959 return status;
3960 } else
ae9540f7
JP
3961 netif_info(qdev, drv, qdev->ndev,
3962 "Enabled magic packet successfully on %s.\n",
3963 qdev->ndev->name);
bc083ce9
RM
3964
3965 wol |= MB_WOL_MAGIC_PKT;
3966 }
3967
3968 if (qdev->wol) {
bc083ce9
RM
3969 wol |= MB_WOL_MODE_ON;
3970 status = ql_mb_wol_mode(qdev, wol);
ae9540f7
JP
3971 netif_err(qdev, drv, qdev->ndev,
3972 "WOL %s (wol code 0x%x) on %s\n",
318ae2ed 3973 (status == 0) ? "Successfully set" : "Failed",
ae9540f7 3974 wol, qdev->ndev->name);
bc083ce9
RM
3975 }
3976
3977 return status;
3978}
3979
c5dadddb 3980static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
c4e84bde 3981{
c4e84bde 3982
6497b607
RM
3983 /* Don't kill the reset worker thread if we
3984 * are in the process of recovery.
3985 */
3986 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3987 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3988 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3989 cancel_delayed_work_sync(&qdev->mpi_work);
2ee1e272 3990 cancel_delayed_work_sync(&qdev->mpi_idc_work);
8aae2600 3991 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
bcc2cb3b 3992 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
c5dadddb
BL
3993}
3994
3995static int ql_adapter_down(struct ql_adapter *qdev)
3996{
3997 int i, status = 0;
3998
3999 ql_link_off(qdev);
4000
4001 ql_cancel_all_work_sync(qdev);
c4e84bde 4002
39aa8165
RM
4003 for (i = 0; i < qdev->rss_ring_count; i++)
4004 napi_disable(&qdev->rx_ring[i].napi);
c4e84bde
RM
4005
4006 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4007
4008 ql_disable_interrupts(qdev);
4009
4010 ql_tx_ring_clean(qdev);
4011
6b318cb3
RM
4012 /* Call netif_napi_del() from common point.
4013 */
b2014ff8 4014 for (i = 0; i < qdev->rss_ring_count; i++)
6b318cb3
RM
4015 netif_napi_del(&qdev->rx_ring[i].napi);
4016
c4e84bde
RM
4017 status = ql_adapter_reset(qdev);
4018 if (status)
ae9540f7
JP
4019 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4020 qdev->func);
fe5f0980
BL
4021 ql_free_rx_buffers(qdev);
4022
c4e84bde
RM
4023 return status;
4024}
4025
4026static int ql_adapter_up(struct ql_adapter *qdev)
4027{
4028 int err = 0;
4029
c4e84bde
RM
4030 err = ql_adapter_initialize(qdev);
4031 if (err) {
ae9540f7 4032 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
c4e84bde
RM
4033 goto err_init;
4034 }
c4e84bde 4035 set_bit(QL_ADAPTER_UP, &qdev->flags);
4545a3f2 4036 ql_alloc_rx_buffers(qdev);
8b007de1
RM
4037 /* If the port is initialized and the
4038 * link is up the turn on the carrier.
4039 */
4040 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4041 (ql_read32(qdev, STS) & qdev->port_link_up))
6a473308 4042 ql_link_on(qdev);
f2c05004
RM
4043 /* Restore rx mode. */
4044 clear_bit(QL_ALLMULTI, &qdev->flags);
4045 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4046 qlge_set_multicast_list(qdev->ndev);
4047
c1b60092
RM
4048 /* Restore vlan setting. */
4049 qlge_restore_vlan(qdev);
4050
c4e84bde
RM
4051 ql_enable_interrupts(qdev);
4052 ql_enable_all_completion_interrupts(qdev);
1e213303 4053 netif_tx_start_all_queues(qdev->ndev);
c4e84bde
RM
4054
4055 return 0;
4056err_init:
4057 ql_adapter_reset(qdev);
4058 return err;
4059}
4060
c4e84bde
RM
4061static void ql_release_adapter_resources(struct ql_adapter *qdev)
4062{
4063 ql_free_mem_resources(qdev);
4064 ql_free_irq(qdev);
4065}
4066
4067static int ql_get_adapter_resources(struct ql_adapter *qdev)
4068{
4069 int status = 0;
4070
4071 if (ql_alloc_mem_resources(qdev)) {
ae9540f7 4072 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
c4e84bde
RM
4073 return -ENOMEM;
4074 }
4075 status = ql_request_irq(qdev);
c4e84bde
RM
4076 return status;
4077}
4078
4079static int qlge_close(struct net_device *ndev)
4080{
4081 struct ql_adapter *qdev = netdev_priv(ndev);
4082
4bbd1a19
RM
4083 /* If we hit pci_channel_io_perm_failure
4084 * failure condition, then we already
4085 * brought the adapter down.
4086 */
4087 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
ae9540f7 4088 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4bbd1a19
RM
4089 clear_bit(QL_EEH_FATAL, &qdev->flags);
4090 return 0;
4091 }
4092
c4e84bde
RM
4093 /*
4094 * Wait for device to recover from a reset.
4095 * (Rarely happens, but possible.)
4096 */
4097 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4098 msleep(1);
4099 ql_adapter_down(qdev);
4100 ql_release_adapter_resources(qdev);
c4e84bde
RM
4101 return 0;
4102}
4103
4104static int ql_configure_rings(struct ql_adapter *qdev)
4105{
4106 int i;
4107 struct rx_ring *rx_ring;
4108 struct tx_ring *tx_ring;
a4ab6137 4109 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
7c734359
RM
4110 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4111 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4112
4113 qdev->lbq_buf_order = get_order(lbq_buf_len);
a4ab6137
RM
4114
4115 /* In a perfect world we have one RSS ring for each CPU
4116 * and each has it's own vector. To do that we ask for
4117 * cpu_cnt vectors. ql_enable_msix() will adjust the
4118 * vector count to what we actually get. We then
4119 * allocate an RSS ring for each.
4120 * Essentially, we are doing min(cpu_count, msix_vector_count).
c4e84bde 4121 */
a4ab6137
RM
4122 qdev->intr_count = cpu_cnt;
4123 ql_enable_msix(qdev);
4124 /* Adjust the RSS ring count to the actual vector count. */
4125 qdev->rss_ring_count = qdev->intr_count;
c4e84bde 4126 qdev->tx_ring_count = cpu_cnt;
b2014ff8 4127 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
c4e84bde 4128
c4e84bde
RM
4129 for (i = 0; i < qdev->tx_ring_count; i++) {
4130 tx_ring = &qdev->tx_ring[i];
e332471c 4131 memset((void *)tx_ring, 0, sizeof(*tx_ring));
c4e84bde
RM
4132 tx_ring->qdev = qdev;
4133 tx_ring->wq_id = i;
4134 tx_ring->wq_len = qdev->tx_ring_size;
4135 tx_ring->wq_size =
4136 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4137
4138 /*
4139 * The completion queue ID for the tx rings start
39aa8165 4140 * immediately after the rss rings.
c4e84bde 4141 */
39aa8165 4142 tx_ring->cq_id = qdev->rss_ring_count + i;
c4e84bde
RM
4143 }
4144
4145 for (i = 0; i < qdev->rx_ring_count; i++) {
4146 rx_ring = &qdev->rx_ring[i];
e332471c 4147 memset((void *)rx_ring, 0, sizeof(*rx_ring));
c4e84bde
RM
4148 rx_ring->qdev = qdev;
4149 rx_ring->cq_id = i;
4150 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
b2014ff8 4151 if (i < qdev->rss_ring_count) {
39aa8165
RM
4152 /*
4153 * Inbound (RSS) queues.
4154 */
c4e84bde
RM
4155 rx_ring->cq_len = qdev->rx_ring_size;
4156 rx_ring->cq_size =
4157 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4158 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4159 rx_ring->lbq_size =
2c9a0d41 4160 rx_ring->lbq_len * sizeof(__le64);
7c734359 4161 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
c4e84bde
RM
4162 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4163 rx_ring->sbq_size =
2c9a0d41 4164 rx_ring->sbq_len * sizeof(__le64);
52e55f3c 4165 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
b2014ff8
RM
4166 rx_ring->type = RX_Q;
4167 } else {
c4e84bde
RM
4168 /*
4169 * Outbound queue handles outbound completions only.
4170 */
4171 /* outbound cq is same size as tx_ring it services. */
4172 rx_ring->cq_len = qdev->tx_ring_size;
4173 rx_ring->cq_size =
4174 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4175 rx_ring->lbq_len = 0;
4176 rx_ring->lbq_size = 0;
4177 rx_ring->lbq_buf_size = 0;
4178 rx_ring->sbq_len = 0;
4179 rx_ring->sbq_size = 0;
4180 rx_ring->sbq_buf_size = 0;
4181 rx_ring->type = TX_Q;
c4e84bde
RM
4182 }
4183 }
4184 return 0;
4185}
4186
4187static int qlge_open(struct net_device *ndev)
4188{
4189 int err = 0;
4190 struct ql_adapter *qdev = netdev_priv(ndev);
4191
74e12435
RM
4192 err = ql_adapter_reset(qdev);
4193 if (err)
4194 return err;
4195
c4e84bde
RM
4196 err = ql_configure_rings(qdev);
4197 if (err)
4198 return err;
4199
4200 err = ql_get_adapter_resources(qdev);
4201 if (err)
4202 goto error_up;
4203
4204 err = ql_adapter_up(qdev);
4205 if (err)
4206 goto error_up;
4207
4208 return err;
4209
4210error_up:
4211 ql_release_adapter_resources(qdev);
c4e84bde
RM
4212 return err;
4213}
4214
7c734359
RM
4215static int ql_change_rx_buffers(struct ql_adapter *qdev)
4216{
4217 struct rx_ring *rx_ring;
4218 int i, status;
4219 u32 lbq_buf_len;
4220
25985edc 4221 /* Wait for an outstanding reset to complete. */
7c734359 4222 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
351434c6
DC
4223 int i = 4;
4224
4225 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
ae9540f7
JP
4226 netif_err(qdev, ifup, qdev->ndev,
4227 "Waiting for adapter UP...\n");
7c734359
RM
4228 ssleep(1);
4229 }
4230
4231 if (!i) {
ae9540f7
JP
4232 netif_err(qdev, ifup, qdev->ndev,
4233 "Timed out waiting for adapter UP\n");
7c734359
RM
4234 return -ETIMEDOUT;
4235 }
4236 }
4237
4238 status = ql_adapter_down(qdev);
4239 if (status)
4240 goto error;
4241
4242 /* Get the new rx buffer size. */
4243 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4244 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4245 qdev->lbq_buf_order = get_order(lbq_buf_len);
4246
4247 for (i = 0; i < qdev->rss_ring_count; i++) {
4248 rx_ring = &qdev->rx_ring[i];
4249 /* Set the new size. */
4250 rx_ring->lbq_buf_size = lbq_buf_len;
4251 }
4252
4253 status = ql_adapter_up(qdev);
4254 if (status)
4255 goto error;
4256
4257 return status;
4258error:
ae9540f7
JP
4259 netif_alert(qdev, ifup, qdev->ndev,
4260 "Driver up/down cycle failed, closing device.\n");
7c734359
RM
4261 set_bit(QL_ADAPTER_UP, &qdev->flags);
4262 dev_close(qdev->ndev);
4263 return status;
4264}
4265
c4e84bde
RM
4266static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4267{
4268 struct ql_adapter *qdev = netdev_priv(ndev);
7c734359 4269 int status;
c4e84bde
RM
4270
4271 if (ndev->mtu == 1500 && new_mtu == 9000) {
ae9540f7 4272 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
c4e84bde 4273 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
ae9540f7 4274 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
c4e84bde
RM
4275 } else
4276 return -EINVAL;
7c734359
RM
4277
4278 queue_delayed_work(qdev->workqueue,
4279 &qdev->mpi_port_cfg_work, 3*HZ);
4280
746079da
BL
4281 ndev->mtu = new_mtu;
4282
7c734359 4283 if (!netif_running(qdev->ndev)) {
7c734359
RM
4284 return 0;
4285 }
4286
7c734359
RM
4287 status = ql_change_rx_buffers(qdev);
4288 if (status) {
ae9540f7
JP
4289 netif_err(qdev, ifup, qdev->ndev,
4290 "Changing MTU failed.\n");
7c734359
RM
4291 }
4292
4293 return status;
c4e84bde
RM
4294}
4295
4296static struct net_device_stats *qlge_get_stats(struct net_device
4297 *ndev)
4298{
885ee398
RM
4299 struct ql_adapter *qdev = netdev_priv(ndev);
4300 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4301 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4302 unsigned long pkts, mcast, dropped, errors, bytes;
4303 int i;
4304
4305 /* Get RX stats. */
4306 pkts = mcast = dropped = errors = bytes = 0;
4307 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4308 pkts += rx_ring->rx_packets;
4309 bytes += rx_ring->rx_bytes;
4310 dropped += rx_ring->rx_dropped;
4311 errors += rx_ring->rx_errors;
4312 mcast += rx_ring->rx_multicast;
4313 }
4314 ndev->stats.rx_packets = pkts;
4315 ndev->stats.rx_bytes = bytes;
4316 ndev->stats.rx_dropped = dropped;
4317 ndev->stats.rx_errors = errors;
4318 ndev->stats.multicast = mcast;
4319
4320 /* Get TX stats. */
4321 pkts = errors = bytes = 0;
4322 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4323 pkts += tx_ring->tx_packets;
4324 bytes += tx_ring->tx_bytes;
4325 errors += tx_ring->tx_errors;
4326 }
4327 ndev->stats.tx_packets = pkts;
4328 ndev->stats.tx_bytes = bytes;
4329 ndev->stats.tx_errors = errors;
bcc90f55 4330 return &ndev->stats;
c4e84bde
RM
4331}
4332
ac409215 4333static void qlge_set_multicast_list(struct net_device *ndev)
c4e84bde 4334{
b16fed0a 4335 struct ql_adapter *qdev = netdev_priv(ndev);
22bedad3 4336 struct netdev_hw_addr *ha;
cc288f54 4337 int i, status;
c4e84bde 4338
cc288f54
RM
4339 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4340 if (status)
4341 return;
c4e84bde
RM
4342 /*
4343 * Set or clear promiscuous mode if a
4344 * transition is taking place.
4345 */
4346 if (ndev->flags & IFF_PROMISC) {
4347 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4348 if (ql_set_routing_reg
4349 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
ae9540f7 4350 netif_err(qdev, hw, qdev->ndev,
25985edc 4351 "Failed to set promiscuous mode.\n");
c4e84bde
RM
4352 } else {
4353 set_bit(QL_PROMISCUOUS, &qdev->flags);
4354 }
4355 }
4356 } else {
4357 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4358 if (ql_set_routing_reg
4359 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
ae9540f7 4360 netif_err(qdev, hw, qdev->ndev,
25985edc 4361 "Failed to clear promiscuous mode.\n");
c4e84bde
RM
4362 } else {
4363 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4364 }
4365 }
4366 }
4367
4368 /*
4369 * Set or clear all multicast mode if a
4370 * transition is taking place.
4371 */
4372 if ((ndev->flags & IFF_ALLMULTI) ||
4cd24eaf 4373 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
c4e84bde
RM
4374 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4375 if (ql_set_routing_reg
4376 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
ae9540f7
JP
4377 netif_err(qdev, hw, qdev->ndev,
4378 "Failed to set all-multi mode.\n");
c4e84bde
RM
4379 } else {
4380 set_bit(QL_ALLMULTI, &qdev->flags);
4381 }
4382 }
4383 } else {
4384 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4385 if (ql_set_routing_reg
4386 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
ae9540f7
JP
4387 netif_err(qdev, hw, qdev->ndev,
4388 "Failed to clear all-multi mode.\n");
c4e84bde
RM
4389 } else {
4390 clear_bit(QL_ALLMULTI, &qdev->flags);
4391 }
4392 }
4393 }
4394
4cd24eaf 4395 if (!netdev_mc_empty(ndev)) {
cc288f54
RM
4396 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4397 if (status)
4398 goto exit;
f9dcbcc9 4399 i = 0;
22bedad3
JP
4400 netdev_for_each_mc_addr(ha, ndev) {
4401 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
c4e84bde 4402 MAC_ADDR_TYPE_MULTI_MAC, i)) {
ae9540f7
JP
4403 netif_err(qdev, hw, qdev->ndev,
4404 "Failed to loadmulticast address.\n");
cc288f54 4405 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
4406 goto exit;
4407 }
f9dcbcc9
JP
4408 i++;
4409 }
cc288f54 4410 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
4411 if (ql_set_routing_reg
4412 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
ae9540f7
JP
4413 netif_err(qdev, hw, qdev->ndev,
4414 "Failed to set multicast match mode.\n");
c4e84bde
RM
4415 } else {
4416 set_bit(QL_ALLMULTI, &qdev->flags);
4417 }
4418 }
4419exit:
8587ea35 4420 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
4421}
4422
4423static int qlge_set_mac_address(struct net_device *ndev, void *p)
4424{
b16fed0a 4425 struct ql_adapter *qdev = netdev_priv(ndev);
c4e84bde 4426 struct sockaddr *addr = p;
cc288f54 4427 int status;
c4e84bde 4428
c4e84bde
RM
4429 if (!is_valid_ether_addr(addr->sa_data))
4430 return -EADDRNOTAVAIL;
4431 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
801e9096
RM
4432 /* Update local copy of current mac address. */
4433 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
c4e84bde 4434
cc288f54
RM
4435 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4436 if (status)
4437 return status;
cc288f54
RM
4438 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4439 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
cc288f54 4440 if (status)
ae9540f7 4441 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
cc288f54
RM
4442 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4443 return status;
c4e84bde
RM
4444}
4445
4446static void qlge_tx_timeout(struct net_device *ndev)
4447{
b16fed0a 4448 struct ql_adapter *qdev = netdev_priv(ndev);
6497b607 4449 ql_queue_asic_error(qdev);
c4e84bde
RM
4450}
4451
4452static void ql_asic_reset_work(struct work_struct *work)
4453{
4454 struct ql_adapter *qdev =
4455 container_of(work, struct ql_adapter, asic_reset_work.work);
db98812f 4456 int status;
f2c0d8df 4457 rtnl_lock();
db98812f
RM
4458 status = ql_adapter_down(qdev);
4459 if (status)
4460 goto error;
4461
4462 status = ql_adapter_up(qdev);
4463 if (status)
4464 goto error;
2cd6dbaa
RM
4465
4466 /* Restore rx mode. */
4467 clear_bit(QL_ALLMULTI, &qdev->flags);
4468 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4469 qlge_set_multicast_list(qdev->ndev);
4470
f2c0d8df 4471 rtnl_unlock();
db98812f
RM
4472 return;
4473error:
ae9540f7
JP
4474 netif_alert(qdev, ifup, qdev->ndev,
4475 "Driver up/down cycle failed, closing device\n");
f2c0d8df 4476
db98812f
RM
4477 set_bit(QL_ADAPTER_UP, &qdev->flags);
4478 dev_close(qdev->ndev);
4479 rtnl_unlock();
c4e84bde
RM
4480}
4481
ef9c7ab4 4482static const struct nic_operations qla8012_nic_ops = {
b0c2aadf
RM
4483 .get_flash = ql_get_8012_flash_params,
4484 .port_initialize = ql_8012_port_initialize,
4485};
4486
ef9c7ab4 4487static const struct nic_operations qla8000_nic_ops = {
cdca8d02
RM
4488 .get_flash = ql_get_8000_flash_params,
4489 .port_initialize = ql_8000_port_initialize,
4490};
4491
e4552f51
RM
4492/* Find the pcie function number for the other NIC
4493 * on this chip. Since both NIC functions share a
4494 * common firmware we have the lowest enabled function
4495 * do any common work. Examples would be resetting
4496 * after a fatal firmware error, or doing a firmware
4497 * coredump.
4498 */
4499static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4500{
4501 int status = 0;
4502 u32 temp;
4503 u32 nic_func1, nic_func2;
4504
4505 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4506 &temp);
4507 if (status)
4508 return status;
4509
4510 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4511 MPI_TEST_NIC_FUNC_MASK);
4512 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4513 MPI_TEST_NIC_FUNC_MASK);
4514
4515 if (qdev->func == nic_func1)
4516 qdev->alt_func = nic_func2;
4517 else if (qdev->func == nic_func2)
4518 qdev->alt_func = nic_func1;
4519 else
4520 status = -EIO;
4521
4522 return status;
4523}
b0c2aadf 4524
e4552f51 4525static int ql_get_board_info(struct ql_adapter *qdev)
c4e84bde 4526{
e4552f51 4527 int status;
c4e84bde
RM
4528 qdev->func =
4529 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
e4552f51
RM
4530 if (qdev->func > 3)
4531 return -EIO;
4532
4533 status = ql_get_alt_pcie_func(qdev);
4534 if (status)
4535 return status;
4536
4537 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4538 if (qdev->port) {
c4e84bde
RM
4539 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4540 qdev->port_link_up = STS_PL1;
4541 qdev->port_init = STS_PI1;
4542 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4543 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4544 } else {
4545 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4546 qdev->port_link_up = STS_PL0;
4547 qdev->port_init = STS_PI0;
4548 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4549 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4550 }
4551 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
b0c2aadf
RM
4552 qdev->device_id = qdev->pdev->device;
4553 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4554 qdev->nic_ops = &qla8012_nic_ops;
cdca8d02
RM
4555 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4556 qdev->nic_ops = &qla8000_nic_ops;
e4552f51 4557 return status;
c4e84bde
RM
4558}
4559
4560static void ql_release_all(struct pci_dev *pdev)
4561{
4562 struct net_device *ndev = pci_get_drvdata(pdev);
4563 struct ql_adapter *qdev = netdev_priv(ndev);
4564
4565 if (qdev->workqueue) {
4566 destroy_workqueue(qdev->workqueue);
4567 qdev->workqueue = NULL;
4568 }
39aa8165 4569
c4e84bde 4570 if (qdev->reg_base)
8668ae92 4571 iounmap(qdev->reg_base);
c4e84bde
RM
4572 if (qdev->doorbell_area)
4573 iounmap(qdev->doorbell_area);
8aae2600 4574 vfree(qdev->mpi_coredump);
c4e84bde 4575 pci_release_regions(pdev);
c4e84bde
RM
4576}
4577
1dd06ae8
GKH
4578static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4579 int cards_found)
c4e84bde
RM
4580{
4581 struct ql_adapter *qdev = netdev_priv(ndev);
1d1023d0 4582 int err = 0;
c4e84bde 4583
e332471c 4584 memset((void *)qdev, 0, sizeof(*qdev));
c4e84bde
RM
4585 err = pci_enable_device(pdev);
4586 if (err) {
4587 dev_err(&pdev->dev, "PCI device enable failed.\n");
4588 return err;
4589 }
4590
ebd6e774
RM
4591 qdev->ndev = ndev;
4592 qdev->pdev = pdev;
4593 pci_set_drvdata(pdev, ndev);
c4e84bde 4594
bc9167f3
RM
4595 /* Set PCIe read request size */
4596 err = pcie_set_readrq(pdev, 4096);
4597 if (err) {
4598 dev_err(&pdev->dev, "Set readrq failed.\n");
4f9a91c8 4599 goto err_out1;
bc9167f3
RM
4600 }
4601
c4e84bde
RM
4602 err = pci_request_regions(pdev, DRV_NAME);
4603 if (err) {
4604 dev_err(&pdev->dev, "PCI region request failed.\n");
ebd6e774 4605 return err;
c4e84bde
RM
4606 }
4607
4608 pci_set_master(pdev);
6a35528a 4609 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c4e84bde 4610 set_bit(QL_DMA64, &qdev->flags);
6a35528a 4611 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
c4e84bde 4612 } else {
284901a9 4613 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde 4614 if (!err)
284901a9 4615 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde
RM
4616 }
4617
4618 if (err) {
4619 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4f9a91c8 4620 goto err_out2;
c4e84bde
RM
4621 }
4622
73475339
RM
4623 /* Set PCIe reset type for EEH to fundamental. */
4624 pdev->needs_freset = 1;
6d190c6e 4625 pci_save_state(pdev);
c4e84bde
RM
4626 qdev->reg_base =
4627 ioremap_nocache(pci_resource_start(pdev, 1),
4628 pci_resource_len(pdev, 1));
4629 if (!qdev->reg_base) {
4630 dev_err(&pdev->dev, "Register mapping failed.\n");
4631 err = -ENOMEM;
4f9a91c8 4632 goto err_out2;
c4e84bde
RM
4633 }
4634
4635 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4636 qdev->doorbell_area =
4637 ioremap_nocache(pci_resource_start(pdev, 3),
4638 pci_resource_len(pdev, 3));
4639 if (!qdev->doorbell_area) {
4640 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4641 err = -ENOMEM;
4f9a91c8 4642 goto err_out2;
c4e84bde
RM
4643 }
4644
e4552f51
RM
4645 err = ql_get_board_info(qdev);
4646 if (err) {
4647 dev_err(&pdev->dev, "Register access failed.\n");
4648 err = -EIO;
4f9a91c8 4649 goto err_out2;
e4552f51 4650 }
c4e84bde
RM
4651 qdev->msg_enable = netif_msg_init(debug, default_msg);
4652 spin_lock_init(&qdev->hw_lock);
4653 spin_lock_init(&qdev->stats_lock);
4654
8aae2600
RM
4655 if (qlge_mpi_coredump) {
4656 qdev->mpi_coredump =
4657 vmalloc(sizeof(struct ql_mpi_coredump));
4658 if (qdev->mpi_coredump == NULL) {
8aae2600 4659 err = -ENOMEM;
ce96bc86 4660 goto err_out2;
8aae2600 4661 }
d5c1da56
RM
4662 if (qlge_force_coredump)
4663 set_bit(QL_FRC_COREDUMP, &qdev->flags);
8aae2600 4664 }
c4e84bde 4665 /* make sure the EEPROM is good */
b0c2aadf 4666 err = qdev->nic_ops->get_flash(qdev);
c4e84bde
RM
4667 if (err) {
4668 dev_err(&pdev->dev, "Invalid FLASH.\n");
4f9a91c8 4669 goto err_out2;
c4e84bde
RM
4670 }
4671
801e9096
RM
4672 /* Keep local copy of current mac address. */
4673 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
c4e84bde
RM
4674
4675 /* Set up the default ring sizes. */
4676 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4677 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4678
4679 /* Set up the coalescing parameters. */
4680 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4681 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4682 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4683 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4684
4685 /*
4686 * Set up the operating parameters.
4687 */
df656bf6
KC
4688 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4689 ndev->name);
c4e84bde
RM
4690 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4691 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4692 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
bcc2cb3b 4693 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
2ee1e272 4694 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
8aae2600 4695 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
bcc2cb3b 4696 init_completion(&qdev->ide_completion);
4d7b6b5d 4697 mutex_init(&qdev->mpi_mutex);
c4e84bde
RM
4698
4699 if (!cards_found) {
4700 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4701 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4702 DRV_NAME, DRV_VERSION);
4703 }
4704 return 0;
4f9a91c8 4705err_out2:
c4e84bde 4706 ql_release_all(pdev);
4f9a91c8 4707err_out1:
c4e84bde
RM
4708 pci_disable_device(pdev);
4709 return err;
4710}
4711
25ed7849
SH
4712static const struct net_device_ops qlge_netdev_ops = {
4713 .ndo_open = qlge_open,
4714 .ndo_stop = qlge_close,
4715 .ndo_start_xmit = qlge_send,
4716 .ndo_change_mtu = qlge_change_mtu,
4717 .ndo_get_stats = qlge_get_stats,
afc4b13d 4718 .ndo_set_rx_mode = qlge_set_multicast_list,
25ed7849
SH
4719 .ndo_set_mac_address = qlge_set_mac_address,
4720 .ndo_validate_addr = eth_validate_addr,
4721 .ndo_tx_timeout = qlge_tx_timeout,
18c49b91
JP
4722 .ndo_fix_features = qlge_fix_features,
4723 .ndo_set_features = qlge_set_features,
01e6b953
RM
4724 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4725 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
25ed7849
SH
4726};
4727
df7e828c 4728static void ql_timer(struct timer_list *t)
15c052fc 4729{
df7e828c 4730 struct ql_adapter *qdev = from_timer(qdev, t, timer);
15c052fc
RM
4731 u32 var = 0;
4732
4733 var = ql_read32(qdev, STS);
4734 if (pci_channel_offline(qdev->pdev)) {
ae9540f7 4735 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
15c052fc
RM
4736 return;
4737 }
4738
72046d84 4739 mod_timer(&qdev->timer, jiffies + (5*HZ));
15c052fc
RM
4740}
4741
5d8e8726 4742static int qlge_probe(struct pci_dev *pdev,
1dd06ae8 4743 const struct pci_device_id *pci_entry)
c4e84bde
RM
4744{
4745 struct net_device *ndev = NULL;
4746 struct ql_adapter *qdev = NULL;
4747 static int cards_found = 0;
4748 int err = 0;
4749
1e213303 4750 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
9eb8738d 4751 min(MAX_CPUS, netif_get_num_default_rss_queues()));
c4e84bde
RM
4752 if (!ndev)
4753 return -ENOMEM;
4754
4755 err = ql_init_device(pdev, ndev, cards_found);
4756 if (err < 0) {
4757 free_netdev(ndev);
4758 return err;
4759 }
4760
4761 qdev = netdev_priv(ndev);
4762 SET_NETDEV_DEV(ndev, &pdev->dev);
a45adbe8
JK
4763 ndev->hw_features = NETIF_F_SG |
4764 NETIF_F_IP_CSUM |
4765 NETIF_F_TSO |
4766 NETIF_F_TSO_ECN |
4767 NETIF_F_HW_VLAN_CTAG_TX |
4768 NETIF_F_HW_VLAN_CTAG_RX |
4769 NETIF_F_HW_VLAN_CTAG_FILTER |
4770 NETIF_F_RXCSUM;
4771 ndev->features = ndev->hw_features;
1a0150a9 4772 ndev->vlan_features = ndev->hw_features;
51bb352f 4773 /* vlan gets same features (except vlan filter) */
f6d1ac4b
VY
4774 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4775 NETIF_F_HW_VLAN_CTAG_TX |
4776 NETIF_F_HW_VLAN_CTAG_RX);
c4e84bde
RM
4777
4778 if (test_bit(QL_DMA64, &qdev->flags))
4779 ndev->features |= NETIF_F_HIGHDMA;
4780
4781 /*
4782 * Set up net_device structure.
4783 */
4784 ndev->tx_queue_len = qdev->tx_ring_size;
4785 ndev->irq = pdev->irq;
25ed7849
SH
4786
4787 ndev->netdev_ops = &qlge_netdev_ops;
7ad24ea4 4788 ndev->ethtool_ops = &qlge_ethtool_ops;
c4e84bde 4789 ndev->watchdog_timeo = 10 * HZ;
25ed7849 4790
d894be57
JW
4791 /* MTU range: this driver only supports 1500 or 9000, so this only
4792 * filters out values above or below, and we'll rely on
4793 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4794 */
4795 ndev->min_mtu = ETH_DATA_LEN;
4796 ndev->max_mtu = 9000;
4797
c4e84bde
RM
4798 err = register_netdev(ndev);
4799 if (err) {
4800 dev_err(&pdev->dev, "net device registration failed.\n");
4801 ql_release_all(pdev);
4802 pci_disable_device(pdev);
4d2593cc 4803 free_netdev(ndev);
c4e84bde
RM
4804 return err;
4805 }
15c052fc
RM
4806 /* Start up the timer to trigger EEH if
4807 * the bus goes dead
4808 */
df7e828c
KC
4809 timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4810 mod_timer(&qdev->timer, jiffies + (5*HZ));
6a473308 4811 ql_link_off(qdev);
c4e84bde 4812 ql_display_dev_info(ndev);
9dfbbaa6 4813 atomic_set(&qdev->lb_count, 0);
c4e84bde
RM
4814 cards_found++;
4815 return 0;
4816}
4817
9dfbbaa6
RM
4818netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4819{
4820 return qlge_send(skb, ndev);
4821}
4822
4823int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4824{
4825 return ql_clean_inbound_rx_ring(rx_ring, budget);
4826}
4827
5d8e8726 4828static void qlge_remove(struct pci_dev *pdev)
c4e84bde
RM
4829{
4830 struct net_device *ndev = pci_get_drvdata(pdev);
15c052fc
RM
4831 struct ql_adapter *qdev = netdev_priv(ndev);
4832 del_timer_sync(&qdev->timer);
c5dadddb 4833 ql_cancel_all_work_sync(qdev);
c4e84bde
RM
4834 unregister_netdev(ndev);
4835 ql_release_all(pdev);
4836 pci_disable_device(pdev);
4837 free_netdev(ndev);
4838}
4839
6d190c6e
RM
4840/* Clean up resources without touching hardware. */
4841static void ql_eeh_close(struct net_device *ndev)
4842{
4843 int i;
4844 struct ql_adapter *qdev = netdev_priv(ndev);
4845
4846 if (netif_carrier_ok(ndev)) {
4847 netif_carrier_off(ndev);
4848 netif_stop_queue(ndev);
4849 }
4850
7ae80abd 4851 /* Disabling the timer */
c5dadddb 4852 ql_cancel_all_work_sync(qdev);
6d190c6e
RM
4853
4854 for (i = 0; i < qdev->rss_ring_count; i++)
4855 netif_napi_del(&qdev->rx_ring[i].napi);
4856
4857 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4858 ql_tx_ring_clean(qdev);
4859 ql_free_rx_buffers(qdev);
4860 ql_release_adapter_resources(qdev);
4861}
4862
c4e84bde
RM
4863/*
4864 * This callback is called by the PCI subsystem whenever
4865 * a PCI bus error is detected.
4866 */
4867static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4868 enum pci_channel_state state)
4869{
4870 struct net_device *ndev = pci_get_drvdata(pdev);
4bbd1a19 4871 struct ql_adapter *qdev = netdev_priv(ndev);
fbc663ce 4872
6d190c6e
RM
4873 switch (state) {
4874 case pci_channel_io_normal:
4875 return PCI_ERS_RESULT_CAN_RECOVER;
4876 case pci_channel_io_frozen:
4877 netif_device_detach(ndev);
3275c0c6 4878 del_timer_sync(&qdev->timer);
6d190c6e
RM
4879 if (netif_running(ndev))
4880 ql_eeh_close(ndev);
4881 pci_disable_device(pdev);
4882 return PCI_ERS_RESULT_NEED_RESET;
4883 case pci_channel_io_perm_failure:
4884 dev_err(&pdev->dev,
4885 "%s: pci_channel_io_perm_failure.\n", __func__);
3275c0c6 4886 del_timer_sync(&qdev->timer);
4bbd1a19
RM
4887 ql_eeh_close(ndev);
4888 set_bit(QL_EEH_FATAL, &qdev->flags);
fbc663ce 4889 return PCI_ERS_RESULT_DISCONNECT;
6d190c6e 4890 }
c4e84bde
RM
4891
4892 /* Request a slot reset. */
4893 return PCI_ERS_RESULT_NEED_RESET;
4894}
4895
4896/*
4897 * This callback is called after the PCI buss has been reset.
4898 * Basically, this tries to restart the card from scratch.
4899 * This is a shortened version of the device probe/discovery code,
4900 * it resembles the first-half of the () routine.
4901 */
4902static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4903{
4904 struct net_device *ndev = pci_get_drvdata(pdev);
4905 struct ql_adapter *qdev = netdev_priv(ndev);
4906
6d190c6e
RM
4907 pdev->error_state = pci_channel_io_normal;
4908
4909 pci_restore_state(pdev);
c4e84bde 4910 if (pci_enable_device(pdev)) {
ae9540f7
JP
4911 netif_err(qdev, ifup, qdev->ndev,
4912 "Cannot re-enable PCI device after reset.\n");
c4e84bde
RM
4913 return PCI_ERS_RESULT_DISCONNECT;
4914 }
c4e84bde 4915 pci_set_master(pdev);
a112fd4c
RM
4916
4917 if (ql_adapter_reset(qdev)) {
ae9540f7 4918 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4bbd1a19 4919 set_bit(QL_EEH_FATAL, &qdev->flags);
a112fd4c
RM
4920 return PCI_ERS_RESULT_DISCONNECT;
4921 }
4922
c4e84bde
RM
4923 return PCI_ERS_RESULT_RECOVERED;
4924}
4925
4926static void qlge_io_resume(struct pci_dev *pdev)
4927{
4928 struct net_device *ndev = pci_get_drvdata(pdev);
4929 struct ql_adapter *qdev = netdev_priv(ndev);
6d190c6e 4930 int err = 0;
c4e84bde 4931
c4e84bde 4932 if (netif_running(ndev)) {
6d190c6e
RM
4933 err = qlge_open(ndev);
4934 if (err) {
ae9540f7
JP
4935 netif_err(qdev, ifup, qdev->ndev,
4936 "Device initialization failed after reset.\n");
c4e84bde
RM
4937 return;
4938 }
6d190c6e 4939 } else {
ae9540f7
JP
4940 netif_err(qdev, ifup, qdev->ndev,
4941 "Device was not running prior to EEH.\n");
c4e84bde 4942 }
72046d84 4943 mod_timer(&qdev->timer, jiffies + (5*HZ));
c4e84bde
RM
4944 netif_device_attach(ndev);
4945}
4946
3646f0e5 4947static const struct pci_error_handlers qlge_err_handler = {
c4e84bde
RM
4948 .error_detected = qlge_io_error_detected,
4949 .slot_reset = qlge_io_slot_reset,
4950 .resume = qlge_io_resume,
4951};
4952
4953static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4954{
4955 struct net_device *ndev = pci_get_drvdata(pdev);
4956 struct ql_adapter *qdev = netdev_priv(ndev);
6b318cb3 4957 int err;
c4e84bde
RM
4958
4959 netif_device_detach(ndev);
15c052fc 4960 del_timer_sync(&qdev->timer);
c4e84bde
RM
4961
4962 if (netif_running(ndev)) {
4963 err = ql_adapter_down(qdev);
4964 if (!err)
4965 return err;
4966 }
4967
bc083ce9 4968 ql_wol(qdev);
c4e84bde
RM
4969 err = pci_save_state(pdev);
4970 if (err)
4971 return err;
4972
4973 pci_disable_device(pdev);
4974
4975 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4976
4977 return 0;
4978}
4979
04da2cf9 4980#ifdef CONFIG_PM
c4e84bde
RM
4981static int qlge_resume(struct pci_dev *pdev)
4982{
4983 struct net_device *ndev = pci_get_drvdata(pdev);
4984 struct ql_adapter *qdev = netdev_priv(ndev);
4985 int err;
4986
4987 pci_set_power_state(pdev, PCI_D0);
4988 pci_restore_state(pdev);
4989 err = pci_enable_device(pdev);
4990 if (err) {
ae9540f7 4991 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
c4e84bde
RM
4992 return err;
4993 }
4994 pci_set_master(pdev);
4995
4996 pci_enable_wake(pdev, PCI_D3hot, 0);
4997 pci_enable_wake(pdev, PCI_D3cold, 0);
4998
4999 if (netif_running(ndev)) {
5000 err = ql_adapter_up(qdev);
5001 if (err)
5002 return err;
5003 }
5004
72046d84 5005 mod_timer(&qdev->timer, jiffies + (5*HZ));
c4e84bde
RM
5006 netif_device_attach(ndev);
5007
5008 return 0;
5009}
04da2cf9 5010#endif /* CONFIG_PM */
c4e84bde
RM
5011
5012static void qlge_shutdown(struct pci_dev *pdev)
5013{
5014 qlge_suspend(pdev, PMSG_SUSPEND);
5015}
5016
5017static struct pci_driver qlge_driver = {
5018 .name = DRV_NAME,
5019 .id_table = qlge_pci_tbl,
5020 .probe = qlge_probe,
5d8e8726 5021 .remove = qlge_remove,
c4e84bde
RM
5022#ifdef CONFIG_PM
5023 .suspend = qlge_suspend,
5024 .resume = qlge_resume,
5025#endif
5026 .shutdown = qlge_shutdown,
5027 .err_handler = &qlge_err_handler
5028};
5029
70a611de 5030module_pci_driver(qlge_driver);