]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/qlge/qlge_main.c
drivers/net/qla3xxx.c: Remove unnecessary casts of netdev_priv
[mirror_ubuntu-jammy-kernel.git] / drivers / net / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
c4e84bde 37#include <linux/if_vlan.h>
c4e84bde
RM
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
b7c6bfb7 41#include <net/ip6_checksum.h>
c4e84bde
RM
42
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
4974097a
RM
60/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
c4e84bde
RM
62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
a5a62a1c
RM
72static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
c4e84bde 75
8aae2600
RM
76static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
d5c1da56
RM
80 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
8aae2600 87
a3aa1884 88static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
b0c2aadf 89 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
cdca8d02 90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
c4e84bde
RM
91 /* required last entry */
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
ac409215 97static int ql_wol(struct ql_adapter *qdev);
98static void qlge_set_multicast_list(struct net_device *ndev);
99
c4e84bde
RM
100/* This hardware semaphore causes exclusive access to
101 * resources shared between the NIC driver, MPI firmware,
102 * FCOE firmware and the FC driver.
103 */
104static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
105{
106 u32 sem_bits = 0;
107
108 switch (sem_mask) {
109 case SEM_XGMAC0_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
111 break;
112 case SEM_XGMAC1_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
114 break;
115 case SEM_ICB_MASK:
116 sem_bits = SEM_SET << SEM_ICB_SHIFT;
117 break;
118 case SEM_MAC_ADDR_MASK:
119 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
120 break;
121 case SEM_FLASH_MASK:
122 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
123 break;
124 case SEM_PROBE_MASK:
125 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
126 break;
127 case SEM_RT_IDX_MASK:
128 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
129 break;
130 case SEM_PROC_REG_MASK:
131 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
132 break;
133 default:
ae9540f7 134 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
c4e84bde
RM
135 return -EINVAL;
136 }
137
138 ql_write32(qdev, SEM, sem_bits | sem_mask);
139 return !(ql_read32(qdev, SEM) & sem_bits);
140}
141
142int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
143{
0857e9d7 144 unsigned int wait_count = 30;
c4e84bde
RM
145 do {
146 if (!ql_sem_trylock(qdev, sem_mask))
147 return 0;
0857e9d7
RM
148 udelay(100);
149 } while (--wait_count);
c4e84bde
RM
150 return -ETIMEDOUT;
151}
152
153void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
154{
155 ql_write32(qdev, SEM, sem_mask);
156 ql_read32(qdev, SEM); /* flush */
157}
158
159/* This function waits for a specific bit to come ready
160 * in a given register. It is used mostly by the initialize
161 * process, but is also used in kernel thread API such as
162 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
163 */
164int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
165{
166 u32 temp;
167 int count = UDELAY_COUNT;
168
169 while (count) {
170 temp = ql_read32(qdev, reg);
171
172 /* check for errors */
173 if (temp & err_bit) {
ae9540f7
JP
174 netif_alert(qdev, probe, qdev->ndev,
175 "register 0x%.08x access error, value = 0x%.08x!.\n",
176 reg, temp);
c4e84bde
RM
177 return -EIO;
178 } else if (temp & bit)
179 return 0;
180 udelay(UDELAY_DELAY);
181 count--;
182 }
ae9540f7
JP
183 netif_alert(qdev, probe, qdev->ndev,
184 "Timed out waiting for reg %x to come ready.\n", reg);
c4e84bde
RM
185 return -ETIMEDOUT;
186}
187
188/* The CFG register is used to download TX and RX control blocks
189 * to the chip. This function waits for an operation to complete.
190 */
191static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
192{
193 int count = UDELAY_COUNT;
194 u32 temp;
195
196 while (count) {
197 temp = ql_read32(qdev, CFG);
198 if (temp & CFG_LE)
199 return -EIO;
200 if (!(temp & bit))
201 return 0;
202 udelay(UDELAY_DELAY);
203 count--;
204 }
205 return -ETIMEDOUT;
206}
207
208
209/* Used to issue init control blocks to hw. Maps control block,
210 * sets address, triggers download, waits for completion.
211 */
212int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
213 u16 q_id)
214{
215 u64 map;
216 int status = 0;
217 int direction;
218 u32 mask;
219 u32 value;
220
221 direction =
222 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
223 PCI_DMA_FROMDEVICE;
224
225 map = pci_map_single(qdev->pdev, ptr, size, direction);
226 if (pci_dma_mapping_error(qdev->pdev, map)) {
ae9540f7 227 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
c4e84bde
RM
228 return -ENOMEM;
229 }
230
4322c5be
RM
231 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
232 if (status)
233 return status;
234
c4e84bde
RM
235 status = ql_wait_cfg(qdev, bit);
236 if (status) {
ae9540f7
JP
237 netif_err(qdev, ifup, qdev->ndev,
238 "Timed out waiting for CFG to come ready.\n");
c4e84bde
RM
239 goto exit;
240 }
241
c4e84bde
RM
242 ql_write32(qdev, ICB_L, (u32) map);
243 ql_write32(qdev, ICB_H, (u32) (map >> 32));
c4e84bde
RM
244
245 mask = CFG_Q_MASK | (bit << 16);
246 value = bit | (q_id << CFG_Q_SHIFT);
247 ql_write32(qdev, CFG, (mask | value));
248
249 /*
250 * Wait for the bit to clear after signaling hw.
251 */
252 status = ql_wait_cfg(qdev, bit);
253exit:
4322c5be 254 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
c4e84bde
RM
255 pci_unmap_single(qdev->pdev, map, size, direction);
256 return status;
257}
258
259/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
260int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
261 u32 *value)
262{
263 u32 offset = 0;
264 int status;
265
c4e84bde
RM
266 switch (type) {
267 case MAC_ADDR_TYPE_MULTI_MAC:
268 case MAC_ADDR_TYPE_CAM_MAC:
269 {
270 status =
271 ql_wait_reg_rdy(qdev,
939678f8 272 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
273 if (status)
274 goto exit;
275 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
276 (index << MAC_ADDR_IDX_SHIFT) | /* index */
277 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
278 status =
279 ql_wait_reg_rdy(qdev,
939678f8 280 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
281 if (status)
282 goto exit;
283 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
284 status =
285 ql_wait_reg_rdy(qdev,
939678f8 286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev,
939678f8 294 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 if (type == MAC_ADDR_TYPE_CAM_MAC) {
299 status =
300 ql_wait_reg_rdy(qdev,
939678f8 301 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
302 if (status)
303 goto exit;
304 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
305 (index << MAC_ADDR_IDX_SHIFT) | /* index */
306 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
307 status =
308 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 309 MAC_ADDR_MR, 0);
c4e84bde
RM
310 if (status)
311 goto exit;
312 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
313 }
314 break;
315 }
316 case MAC_ADDR_TYPE_VLAN:
317 case MAC_ADDR_TYPE_MULTI_FLTR:
318 default:
ae9540f7
JP
319 netif_crit(qdev, ifup, qdev->ndev,
320 "Address type %d not yet supported.\n", type);
c4e84bde
RM
321 status = -EPERM;
322 }
323exit:
c4e84bde
RM
324 return status;
325}
326
327/* Set up a MAC, multicast or VLAN address for the
328 * inbound frame matching.
329 */
330static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
331 u16 index)
332{
333 u32 offset = 0;
334 int status = 0;
335
c4e84bde
RM
336 switch (type) {
337 case MAC_ADDR_TYPE_MULTI_MAC:
76b26694
RM
338 {
339 u32 upper = (addr[0] << 8) | addr[1];
340 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
341 (addr[4] << 8) | (addr[5]);
342
343 status =
344 ql_wait_reg_rdy(qdev,
345 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
346 if (status)
347 goto exit;
348 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
349 (index << MAC_ADDR_IDX_SHIFT) |
350 type | MAC_ADDR_E);
351 ql_write32(qdev, MAC_ADDR_DATA, lower);
352 status =
353 ql_wait_reg_rdy(qdev,
354 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
355 if (status)
356 goto exit;
357 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
358 (index << MAC_ADDR_IDX_SHIFT) |
359 type | MAC_ADDR_E);
360
361 ql_write32(qdev, MAC_ADDR_DATA, upper);
362 status =
363 ql_wait_reg_rdy(qdev,
364 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
365 if (status)
366 goto exit;
367 break;
368 }
c4e84bde
RM
369 case MAC_ADDR_TYPE_CAM_MAC:
370 {
371 u32 cam_output;
372 u32 upper = (addr[0] << 8) | addr[1];
373 u32 lower =
374 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
375 (addr[5]);
376
ae9540f7
JP
377 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
378 "Adding %s address %pM at index %d in the CAM.\n",
379 type == MAC_ADDR_TYPE_MULTI_MAC ?
380 "MULTICAST" : "UNICAST",
381 addr, index);
c4e84bde
RM
382
383 status =
384 ql_wait_reg_rdy(qdev,
939678f8 385 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
386 if (status)
387 goto exit;
388 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
389 (index << MAC_ADDR_IDX_SHIFT) | /* index */
390 type); /* type */
391 ql_write32(qdev, MAC_ADDR_DATA, lower);
392 status =
393 ql_wait_reg_rdy(qdev,
939678f8 394 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
395 if (status)
396 goto exit;
397 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
398 (index << MAC_ADDR_IDX_SHIFT) | /* index */
399 type); /* type */
400 ql_write32(qdev, MAC_ADDR_DATA, upper);
401 status =
402 ql_wait_reg_rdy(qdev,
939678f8 403 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
404 if (status)
405 goto exit;
406 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
407 (index << MAC_ADDR_IDX_SHIFT) | /* index */
408 type); /* type */
409 /* This field should also include the queue id
410 and possibly the function id. Right now we hardcode
411 the route field to NIC core.
412 */
76b26694
RM
413 cam_output = (CAM_OUT_ROUTE_NIC |
414 (qdev->
415 func << CAM_OUT_FUNC_SHIFT) |
416 (0 << CAM_OUT_CQ_ID_SHIFT));
417 if (qdev->vlgrp)
418 cam_output |= CAM_OUT_RV;
419 /* route to NIC core */
420 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
c4e84bde
RM
421 break;
422 }
423 case MAC_ADDR_TYPE_VLAN:
424 {
425 u32 enable_bit = *((u32 *) &addr[0]);
426 /* For VLAN, the addr actually holds a bit that
427 * either enables or disables the vlan id we are
428 * addressing. It's either MAC_ADDR_E on or off.
429 * That's bit-27 we're talking about.
430 */
ae9540f7
JP
431 netif_info(qdev, ifup, qdev->ndev,
432 "%s VLAN ID %d %s the CAM.\n",
433 enable_bit ? "Adding" : "Removing",
434 index,
435 enable_bit ? "to" : "from");
c4e84bde
RM
436
437 status =
438 ql_wait_reg_rdy(qdev,
939678f8 439 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
440 if (status)
441 goto exit;
442 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
443 (index << MAC_ADDR_IDX_SHIFT) | /* index */
444 type | /* type */
445 enable_bit); /* enable/disable */
446 break;
447 }
448 case MAC_ADDR_TYPE_MULTI_FLTR:
449 default:
ae9540f7
JP
450 netif_crit(qdev, ifup, qdev->ndev,
451 "Address type %d not yet supported.\n", type);
c4e84bde
RM
452 status = -EPERM;
453 }
454exit:
c4e84bde
RM
455 return status;
456}
457
7fab3bfe
RM
458/* Set or clear MAC address in hardware. We sometimes
459 * have to clear it to prevent wrong frame routing
460 * especially in a bonding environment.
461 */
462static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
463{
464 int status;
465 char zero_mac_addr[ETH_ALEN];
466 char *addr;
467
468 if (set) {
801e9096 469 addr = &qdev->current_mac_addr[0];
ae9540f7
JP
470 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
471 "Set Mac addr %pM\n", addr);
7fab3bfe
RM
472 } else {
473 memset(zero_mac_addr, 0, ETH_ALEN);
474 addr = &zero_mac_addr[0];
ae9540f7
JP
475 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
476 "Clearing MAC address\n");
7fab3bfe
RM
477 }
478 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
479 if (status)
480 return status;
481 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
482 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
483 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
484 if (status)
ae9540f7
JP
485 netif_err(qdev, ifup, qdev->ndev,
486 "Failed to init mac address.\n");
7fab3bfe
RM
487 return status;
488}
489
6a473308
RM
490void ql_link_on(struct ql_adapter *qdev)
491{
ae9540f7 492 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
6a473308
RM
493 netif_carrier_on(qdev->ndev);
494 ql_set_mac_addr(qdev, 1);
495}
496
497void ql_link_off(struct ql_adapter *qdev)
498{
ae9540f7 499 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
6a473308
RM
500 netif_carrier_off(qdev->ndev);
501 ql_set_mac_addr(qdev, 0);
502}
503
c4e84bde
RM
504/* Get a specific frame routing value from the CAM.
505 * Used for debug and reg dump.
506 */
507int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
508{
509 int status = 0;
510
939678f8 511 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
512 if (status)
513 goto exit;
514
515 ql_write32(qdev, RT_IDX,
516 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 517 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
518 if (status)
519 goto exit;
520 *value = ql_read32(qdev, RT_DATA);
521exit:
c4e84bde
RM
522 return status;
523}
524
525/* The NIC function for this chip has 16 routing indexes. Each one can be used
526 * to route different frame types to various inbound queues. We send broadcast/
527 * multicast/error frames to the default queue for slow handling,
528 * and CAM hit/RSS frames to the fast handling queues.
529 */
530static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
531 int enable)
532{
8587ea35 533 int status = -EINVAL; /* Return error if no mask match. */
c4e84bde
RM
534 u32 value = 0;
535
ae9540f7
JP
536 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
537 "%s %s mask %s the routing reg.\n",
538 enable ? "Adding" : "Removing",
539 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
540 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
541 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
542 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
543 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
544 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
545 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
546 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
547 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
548 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
549 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
550 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
551 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
552 index == RT_IDX_UNUSED013 ? "UNUSED13" :
553 index == RT_IDX_UNUSED014 ? "UNUSED14" :
554 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
555 "(Bad index != RT_IDX)",
556 enable ? "to" : "from");
c4e84bde
RM
557
558 switch (mask) {
559 case RT_IDX_CAM_HIT:
560 {
561 value = RT_IDX_DST_CAM_Q | /* dest */
562 RT_IDX_TYPE_NICQ | /* type */
563 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
564 break;
565 }
566 case RT_IDX_VALID: /* Promiscuous Mode frames. */
567 {
568 value = RT_IDX_DST_DFLT_Q | /* dest */
569 RT_IDX_TYPE_NICQ | /* type */
570 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
571 break;
572 }
573 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
574 {
575 value = RT_IDX_DST_DFLT_Q | /* dest */
576 RT_IDX_TYPE_NICQ | /* type */
577 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
578 break;
579 }
fbc2ac33
RM
580 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
581 {
582 value = RT_IDX_DST_DFLT_Q | /* dest */
583 RT_IDX_TYPE_NICQ | /* type */
584 (RT_IDX_IP_CSUM_ERR_SLOT <<
585 RT_IDX_IDX_SHIFT); /* index */
586 break;
587 }
588 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
589 {
590 value = RT_IDX_DST_DFLT_Q | /* dest */
591 RT_IDX_TYPE_NICQ | /* type */
592 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
593 RT_IDX_IDX_SHIFT); /* index */
594 break;
595 }
c4e84bde
RM
596 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
597 {
598 value = RT_IDX_DST_DFLT_Q | /* dest */
599 RT_IDX_TYPE_NICQ | /* type */
600 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
601 break;
602 }
603 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
604 {
e163d7f2 605 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
606 RT_IDX_TYPE_NICQ | /* type */
607 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
608 break;
609 }
610 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
611 {
e163d7f2 612 value = RT_IDX_DST_DFLT_Q | /* dest */
c4e84bde
RM
613 RT_IDX_TYPE_NICQ | /* type */
614 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
615 break;
616 }
617 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
618 {
619 value = RT_IDX_DST_RSS | /* dest */
620 RT_IDX_TYPE_NICQ | /* type */
621 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
622 break;
623 }
624 case 0: /* Clear the E-bit on an entry. */
625 {
626 value = RT_IDX_DST_DFLT_Q | /* dest */
627 RT_IDX_TYPE_NICQ | /* type */
628 (index << RT_IDX_IDX_SHIFT);/* index */
629 break;
630 }
631 default:
ae9540f7
JP
632 netif_err(qdev, ifup, qdev->ndev,
633 "Mask type %d not yet supported.\n", mask);
c4e84bde
RM
634 status = -EPERM;
635 goto exit;
636 }
637
638 if (value) {
639 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
640 if (status)
641 goto exit;
642 value |= (enable ? RT_IDX_E : 0);
643 ql_write32(qdev, RT_IDX, value);
644 ql_write32(qdev, RT_DATA, enable ? mask : 0);
645 }
646exit:
c4e84bde
RM
647 return status;
648}
649
650static void ql_enable_interrupts(struct ql_adapter *qdev)
651{
652 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
653}
654
655static void ql_disable_interrupts(struct ql_adapter *qdev)
656{
657 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
658}
659
660/* If we're running with multiple MSI-X vectors then we enable on the fly.
661 * Otherwise, we may have multiple outstanding workers and don't want to
662 * enable until the last one finishes. In this case, the irq_cnt gets
663 * incremented everytime we queue a worker and decremented everytime
664 * a worker finishes. Once it hits zero we enable the interrupt.
665 */
bb0d215c 666u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 667{
bb0d215c
RM
668 u32 var = 0;
669 unsigned long hw_flags = 0;
670 struct intr_context *ctx = qdev->intr_context + intr;
671
672 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
673 /* Always enable if we're MSIX multi interrupts and
674 * it's not the default (zeroeth) interrupt.
675 */
c4e84bde 676 ql_write32(qdev, INTR_EN,
bb0d215c
RM
677 ctx->intr_en_mask);
678 var = ql_read32(qdev, STS);
679 return var;
c4e84bde 680 }
bb0d215c
RM
681
682 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
683 if (atomic_dec_and_test(&ctx->irq_cnt)) {
684 ql_write32(qdev, INTR_EN,
685 ctx->intr_en_mask);
686 var = ql_read32(qdev, STS);
687 }
688 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
689 return var;
c4e84bde
RM
690}
691
692static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
693{
694 u32 var = 0;
bb0d215c 695 struct intr_context *ctx;
c4e84bde 696
bb0d215c
RM
697 /* HW disables for us if we're MSIX multi interrupts and
698 * it's not the default (zeroeth) interrupt.
699 */
700 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
701 return 0;
702
703 ctx = qdev->intr_context + intr;
08b1bc8f 704 spin_lock(&qdev->hw_lock);
bb0d215c 705 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 706 ql_write32(qdev, INTR_EN,
bb0d215c 707 ctx->intr_dis_mask);
c4e84bde
RM
708 var = ql_read32(qdev, STS);
709 }
bb0d215c 710 atomic_inc(&ctx->irq_cnt);
08b1bc8f 711 spin_unlock(&qdev->hw_lock);
c4e84bde
RM
712 return var;
713}
714
715static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
716{
717 int i;
718 for (i = 0; i < qdev->intr_count; i++) {
719 /* The enable call does a atomic_dec_and_test
720 * and enables only if the result is zero.
721 * So we precharge it here.
722 */
bb0d215c
RM
723 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
724 i == 0))
725 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
726 ql_enable_completion_interrupt(qdev, i);
727 }
728
729}
730
b0c2aadf
RM
731static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
732{
733 int status, i;
734 u16 csum = 0;
735 __le16 *flash = (__le16 *)&qdev->flash;
736
737 status = strncmp((char *)&qdev->flash, str, 4);
738 if (status) {
ae9540f7 739 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
b0c2aadf
RM
740 return status;
741 }
742
743 for (i = 0; i < size; i++)
744 csum += le16_to_cpu(*flash++);
745
746 if (csum)
ae9540f7
JP
747 netif_err(qdev, ifup, qdev->ndev,
748 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
b0c2aadf
RM
749
750 return csum;
751}
752
26351479 753static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
754{
755 int status = 0;
756 /* wait for reg to come ready */
757 status = ql_wait_reg_rdy(qdev,
758 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
759 if (status)
760 goto exit;
761 /* set up for reg read */
762 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
763 /* wait for reg to come ready */
764 status = ql_wait_reg_rdy(qdev,
765 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
766 if (status)
767 goto exit;
26351479
RM
768 /* This data is stored on flash as an array of
769 * __le32. Since ql_read32() returns cpu endian
770 * we need to swap it back.
771 */
772 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
773exit:
774 return status;
775}
776
cdca8d02
RM
777static int ql_get_8000_flash_params(struct ql_adapter *qdev)
778{
779 u32 i, size;
780 int status;
781 __le32 *p = (__le32 *)&qdev->flash;
782 u32 offset;
542512e4 783 u8 mac_addr[6];
cdca8d02
RM
784
785 /* Get flash offset for function and adjust
786 * for dword access.
787 */
e4552f51 788 if (!qdev->port)
cdca8d02
RM
789 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
790 else
791 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
792
793 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
794 return -ETIMEDOUT;
795
796 size = sizeof(struct flash_params_8000) / sizeof(u32);
797 for (i = 0; i < size; i++, p++) {
798 status = ql_read_flash_word(qdev, i+offset, p);
799 if (status) {
ae9540f7
JP
800 netif_err(qdev, ifup, qdev->ndev,
801 "Error reading flash.\n");
cdca8d02
RM
802 goto exit;
803 }
804 }
805
806 status = ql_validate_flash(qdev,
807 sizeof(struct flash_params_8000) / sizeof(u16),
808 "8000");
809 if (status) {
ae9540f7 810 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
cdca8d02
RM
811 status = -EINVAL;
812 goto exit;
813 }
814
542512e4
RM
815 /* Extract either manufacturer or BOFM modified
816 * MAC address.
817 */
818 if (qdev->flash.flash_params_8000.data_type1 == 2)
819 memcpy(mac_addr,
820 qdev->flash.flash_params_8000.mac_addr1,
821 qdev->ndev->addr_len);
822 else
823 memcpy(mac_addr,
824 qdev->flash.flash_params_8000.mac_addr,
825 qdev->ndev->addr_len);
826
827 if (!is_valid_ether_addr(mac_addr)) {
ae9540f7 828 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
cdca8d02
RM
829 status = -EINVAL;
830 goto exit;
831 }
832
833 memcpy(qdev->ndev->dev_addr,
542512e4 834 mac_addr,
cdca8d02
RM
835 qdev->ndev->addr_len);
836
837exit:
838 ql_sem_unlock(qdev, SEM_FLASH_MASK);
839 return status;
840}
841
b0c2aadf 842static int ql_get_8012_flash_params(struct ql_adapter *qdev)
c4e84bde
RM
843{
844 int i;
845 int status;
26351479 846 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7 847 u32 offset = 0;
b0c2aadf 848 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
e78f5fa7
RM
849
850 /* Second function's parameters follow the first
851 * function's.
852 */
e4552f51 853 if (qdev->port)
b0c2aadf 854 offset = size;
c4e84bde
RM
855
856 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
857 return -ETIMEDOUT;
858
b0c2aadf 859 for (i = 0; i < size; i++, p++) {
e78f5fa7 860 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde 861 if (status) {
ae9540f7
JP
862 netif_err(qdev, ifup, qdev->ndev,
863 "Error reading flash.\n");
c4e84bde
RM
864 goto exit;
865 }
866
867 }
b0c2aadf
RM
868
869 status = ql_validate_flash(qdev,
870 sizeof(struct flash_params_8012) / sizeof(u16),
871 "8012");
872 if (status) {
ae9540f7 873 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
b0c2aadf
RM
874 status = -EINVAL;
875 goto exit;
876 }
877
878 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
879 status = -EINVAL;
880 goto exit;
881 }
882
883 memcpy(qdev->ndev->dev_addr,
884 qdev->flash.flash_params_8012.mac_addr,
885 qdev->ndev->addr_len);
886
c4e84bde
RM
887exit:
888 ql_sem_unlock(qdev, SEM_FLASH_MASK);
889 return status;
890}
891
892/* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
895 */
896static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
897{
898 int status;
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902 if (status)
903 return status;
904 /* write the data to the data reg */
905 ql_write32(qdev, XGMAC_DATA, data);
906 /* trigger the write */
907 ql_write32(qdev, XGMAC_ADDR, reg);
908 return status;
909}
910
911/* xgmac register are located behind the xgmac_addr and xgmac_data
912 * register pair. Each read/write requires us to wait for the ready
913 * bit before reading/writing the data.
914 */
915int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
916{
917 int status = 0;
918 /* wait for reg to come ready */
919 status = ql_wait_reg_rdy(qdev,
920 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
921 if (status)
922 goto exit;
923 /* set up for reg read */
924 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
925 /* wait for reg to come ready */
926 status = ql_wait_reg_rdy(qdev,
927 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
928 if (status)
929 goto exit;
930 /* get the data */
931 *data = ql_read32(qdev, XGMAC_DATA);
932exit:
933 return status;
934}
935
936/* This is used for reading the 64-bit statistics regs. */
937int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
938{
939 int status = 0;
940 u32 hi = 0;
941 u32 lo = 0;
942
943 status = ql_read_xgmac_reg(qdev, reg, &lo);
944 if (status)
945 goto exit;
946
947 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
948 if (status)
949 goto exit;
950
951 *data = (u64) lo | ((u64) hi << 32);
952
953exit:
954 return status;
955}
956
cdca8d02
RM
957static int ql_8000_port_initialize(struct ql_adapter *qdev)
958{
bcc2cb3b 959 int status;
cfec0cbc
RM
960 /*
961 * Get MPI firmware version for driver banner
962 * and ethool info.
963 */
964 status = ql_mb_about_fw(qdev);
965 if (status)
966 goto exit;
bcc2cb3b
RM
967 status = ql_mb_get_fw_state(qdev);
968 if (status)
969 goto exit;
970 /* Wake up a worker to get/set the TX/RX frame sizes. */
971 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
972exit:
973 return status;
cdca8d02
RM
974}
975
c4e84bde
RM
976/* Take the MAC Core out of reset.
977 * Enable statistics counting.
978 * Take the transmitter/receiver out of reset.
979 * This functionality may be done in the MPI firmware at a
980 * later date.
981 */
b0c2aadf 982static int ql_8012_port_initialize(struct ql_adapter *qdev)
c4e84bde
RM
983{
984 int status = 0;
985 u32 data;
986
987 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
988 /* Another function has the semaphore, so
989 * wait for the port init bit to come ready.
990 */
ae9540f7
JP
991 netif_info(qdev, link, qdev->ndev,
992 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
c4e84bde
RM
993 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
994 if (status) {
ae9540f7
JP
995 netif_crit(qdev, link, qdev->ndev,
996 "Port initialize timed out.\n");
c4e84bde
RM
997 }
998 return status;
999 }
1000
ae9540f7 1001 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
c4e84bde
RM
1002 /* Set the core reset. */
1003 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1004 if (status)
1005 goto end;
1006 data |= GLOBAL_CFG_RESET;
1007 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Clear the core reset and turn on jumbo for receiver. */
1012 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
1013 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
1014 data |= GLOBAL_CFG_TX_STAT_EN;
1015 data |= GLOBAL_CFG_RX_STAT_EN;
1016 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1017 if (status)
1018 goto end;
1019
1020 /* Enable transmitter, and clear it's reset. */
1021 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1022 if (status)
1023 goto end;
1024 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1025 data |= TX_CFG_EN; /* Enable the transmitter. */
1026 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1027 if (status)
1028 goto end;
1029
1030 /* Enable receiver and clear it's reset. */
1031 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1032 if (status)
1033 goto end;
1034 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1035 data |= RX_CFG_EN; /* Enable the receiver. */
1036 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1037 if (status)
1038 goto end;
1039
1040 /* Turn on jumbo. */
1041 status =
1042 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1043 if (status)
1044 goto end;
1045 status =
1046 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1047 if (status)
1048 goto end;
1049
1050 /* Signal to the world that the port is enabled. */
1051 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1052end:
1053 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1054 return status;
1055}
1056
7c734359
RM
1057static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1058{
1059 return PAGE_SIZE << qdev->lbq_buf_order;
1060}
1061
c4e84bde 1062/* Get the next large buffer. */
8668ae92 1063static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1064{
1065 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1066 rx_ring->lbq_curr_idx++;
1067 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1068 rx_ring->lbq_curr_idx = 0;
1069 rx_ring->lbq_free_cnt++;
1070 return lbq_desc;
1071}
1072
7c734359
RM
1073static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1074 struct rx_ring *rx_ring)
1075{
1076 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1077
1078 pci_dma_sync_single_for_cpu(qdev->pdev,
64b9b41d 1079 dma_unmap_addr(lbq_desc, mapaddr),
7c734359
RM
1080 rx_ring->lbq_buf_size,
1081 PCI_DMA_FROMDEVICE);
1082
1083 /* If it's the last chunk of our master page then
1084 * we unmap it.
1085 */
1086 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1087 == ql_lbq_block_size(qdev))
1088 pci_unmap_page(qdev->pdev,
1089 lbq_desc->p.pg_chunk.map,
1090 ql_lbq_block_size(qdev),
1091 PCI_DMA_FROMDEVICE);
1092 return lbq_desc;
1093}
1094
c4e84bde 1095/* Get the next small buffer. */
8668ae92 1096static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
1097{
1098 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1099 rx_ring->sbq_curr_idx++;
1100 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1101 rx_ring->sbq_curr_idx = 0;
1102 rx_ring->sbq_free_cnt++;
1103 return sbq_desc;
1104}
1105
1106/* Update an rx ring index. */
1107static void ql_update_cq(struct rx_ring *rx_ring)
1108{
1109 rx_ring->cnsmr_idx++;
1110 rx_ring->curr_entry++;
1111 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1112 rx_ring->cnsmr_idx = 0;
1113 rx_ring->curr_entry = rx_ring->cq_base;
1114 }
1115}
1116
1117static void ql_write_cq_idx(struct rx_ring *rx_ring)
1118{
1119 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1120}
1121
7c734359
RM
1122static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1123 struct bq_desc *lbq_desc)
1124{
1125 if (!rx_ring->pg_chunk.page) {
1126 u64 map;
1127 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1128 GFP_ATOMIC,
1129 qdev->lbq_buf_order);
1130 if (unlikely(!rx_ring->pg_chunk.page)) {
ae9540f7
JP
1131 netif_err(qdev, drv, qdev->ndev,
1132 "page allocation failed.\n");
7c734359
RM
1133 return -ENOMEM;
1134 }
1135 rx_ring->pg_chunk.offset = 0;
1136 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1137 0, ql_lbq_block_size(qdev),
1138 PCI_DMA_FROMDEVICE);
1139 if (pci_dma_mapping_error(qdev->pdev, map)) {
1140 __free_pages(rx_ring->pg_chunk.page,
1141 qdev->lbq_buf_order);
ae9540f7
JP
1142 netif_err(qdev, drv, qdev->ndev,
1143 "PCI mapping failed.\n");
7c734359
RM
1144 return -ENOMEM;
1145 }
1146 rx_ring->pg_chunk.map = map;
1147 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1148 }
1149
1150 /* Copy the current master pg_chunk info
1151 * to the current descriptor.
1152 */
1153 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1154
1155 /* Adjust the master page chunk for next
1156 * buffer get.
1157 */
1158 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1159 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1160 rx_ring->pg_chunk.page = NULL;
1161 lbq_desc->p.pg_chunk.last_flag = 1;
1162 } else {
1163 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1164 get_page(rx_ring->pg_chunk.page);
1165 lbq_desc->p.pg_chunk.last_flag = 0;
1166 }
1167 return 0;
1168}
c4e84bde
RM
1169/* Process (refill) a large buffer queue. */
1170static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1171{
49f2186d
RM
1172 u32 clean_idx = rx_ring->lbq_clean_idx;
1173 u32 start_idx = clean_idx;
c4e84bde 1174 struct bq_desc *lbq_desc;
c4e84bde
RM
1175 u64 map;
1176 int i;
1177
7c734359 1178 while (rx_ring->lbq_free_cnt > 32) {
c4e84bde 1179 for (i = 0; i < 16; i++) {
ae9540f7
JP
1180 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1181 "lbq: try cleaning clean_idx = %d.\n",
1182 clean_idx);
c4e84bde 1183 lbq_desc = &rx_ring->lbq[clean_idx];
7c734359 1184 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
ae9540f7
JP
1185 netif_err(qdev, ifup, qdev->ndev,
1186 "Could not get a page chunk.\n");
1187 return;
1188 }
7c734359
RM
1189
1190 map = lbq_desc->p.pg_chunk.map +
1191 lbq_desc->p.pg_chunk.offset;
64b9b41d
FT
1192 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1193 dma_unmap_len_set(lbq_desc, maplen,
7c734359 1194 rx_ring->lbq_buf_size);
2c9a0d41 1195 *lbq_desc->addr = cpu_to_le64(map);
7c734359
RM
1196
1197 pci_dma_sync_single_for_device(qdev->pdev, map,
1198 rx_ring->lbq_buf_size,
1199 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1200 clean_idx++;
1201 if (clean_idx == rx_ring->lbq_len)
1202 clean_idx = 0;
1203 }
1204
1205 rx_ring->lbq_clean_idx = clean_idx;
1206 rx_ring->lbq_prod_idx += 16;
1207 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1208 rx_ring->lbq_prod_idx = 0;
49f2186d
RM
1209 rx_ring->lbq_free_cnt -= 16;
1210 }
1211
1212 if (start_idx != clean_idx) {
ae9540f7
JP
1213 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1214 "lbq: updating prod idx = %d.\n",
1215 rx_ring->lbq_prod_idx);
c4e84bde
RM
1216 ql_write_db_reg(rx_ring->lbq_prod_idx,
1217 rx_ring->lbq_prod_idx_db_reg);
c4e84bde
RM
1218 }
1219}
1220
1221/* Process (refill) a small buffer queue. */
1222static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1223{
49f2186d
RM
1224 u32 clean_idx = rx_ring->sbq_clean_idx;
1225 u32 start_idx = clean_idx;
c4e84bde 1226 struct bq_desc *sbq_desc;
c4e84bde
RM
1227 u64 map;
1228 int i;
1229
1230 while (rx_ring->sbq_free_cnt > 16) {
1231 for (i = 0; i < 16; i++) {
1232 sbq_desc = &rx_ring->sbq[clean_idx];
ae9540f7
JP
1233 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1234 "sbq: try cleaning clean_idx = %d.\n",
1235 clean_idx);
c4e84bde 1236 if (sbq_desc->p.skb == NULL) {
ae9540f7
JP
1237 netif_printk(qdev, rx_status, KERN_DEBUG,
1238 qdev->ndev,
1239 "sbq: getting new skb for index %d.\n",
1240 sbq_desc->index);
c4e84bde
RM
1241 sbq_desc->p.skb =
1242 netdev_alloc_skb(qdev->ndev,
52e55f3c 1243 SMALL_BUFFER_SIZE);
c4e84bde 1244 if (sbq_desc->p.skb == NULL) {
ae9540f7
JP
1245 netif_err(qdev, probe, qdev->ndev,
1246 "Couldn't get an skb.\n");
c4e84bde
RM
1247 rx_ring->sbq_clean_idx = clean_idx;
1248 return;
1249 }
1250 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1251 map = pci_map_single(qdev->pdev,
1252 sbq_desc->p.skb->data,
52e55f3c
RM
1253 rx_ring->sbq_buf_size,
1254 PCI_DMA_FROMDEVICE);
c907a35a 1255 if (pci_dma_mapping_error(qdev->pdev, map)) {
ae9540f7
JP
1256 netif_err(qdev, ifup, qdev->ndev,
1257 "PCI mapping failed.\n");
c907a35a 1258 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
1259 dev_kfree_skb_any(sbq_desc->p.skb);
1260 sbq_desc->p.skb = NULL;
c907a35a
RM
1261 return;
1262 }
64b9b41d
FT
1263 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1264 dma_unmap_len_set(sbq_desc, maplen,
52e55f3c 1265 rx_ring->sbq_buf_size);
2c9a0d41 1266 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
1267 }
1268
1269 clean_idx++;
1270 if (clean_idx == rx_ring->sbq_len)
1271 clean_idx = 0;
1272 }
1273 rx_ring->sbq_clean_idx = clean_idx;
1274 rx_ring->sbq_prod_idx += 16;
1275 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1276 rx_ring->sbq_prod_idx = 0;
49f2186d
RM
1277 rx_ring->sbq_free_cnt -= 16;
1278 }
1279
1280 if (start_idx != clean_idx) {
ae9540f7
JP
1281 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1282 "sbq: updating prod idx = %d.\n",
1283 rx_ring->sbq_prod_idx);
c4e84bde
RM
1284 ql_write_db_reg(rx_ring->sbq_prod_idx,
1285 rx_ring->sbq_prod_idx_db_reg);
c4e84bde
RM
1286 }
1287}
1288
1289static void ql_update_buffer_queues(struct ql_adapter *qdev,
1290 struct rx_ring *rx_ring)
1291{
1292 ql_update_sbq(qdev, rx_ring);
1293 ql_update_lbq(qdev, rx_ring);
1294}
1295
1296/* Unmaps tx buffers. Can be called from send() if a pci mapping
1297 * fails at some stage, or from the interrupt when a tx completes.
1298 */
1299static void ql_unmap_send(struct ql_adapter *qdev,
1300 struct tx_ring_desc *tx_ring_desc, int mapped)
1301{
1302 int i;
1303 for (i = 0; i < mapped; i++) {
1304 if (i == 0 || (i == 7 && mapped > 7)) {
1305 /*
1306 * Unmap the skb->data area, or the
1307 * external sglist (AKA the Outbound
1308 * Address List (OAL)).
1309 * If its the zeroeth element, then it's
1310 * the skb->data area. If it's the 7th
1311 * element and there is more than 6 frags,
1312 * then its an OAL.
1313 */
1314 if (i == 7) {
ae9540f7
JP
1315 netif_printk(qdev, tx_done, KERN_DEBUG,
1316 qdev->ndev,
1317 "unmapping OAL area.\n");
c4e84bde
RM
1318 }
1319 pci_unmap_single(qdev->pdev,
64b9b41d 1320 dma_unmap_addr(&tx_ring_desc->map[i],
c4e84bde 1321 mapaddr),
64b9b41d 1322 dma_unmap_len(&tx_ring_desc->map[i],
c4e84bde
RM
1323 maplen),
1324 PCI_DMA_TODEVICE);
1325 } else {
ae9540f7
JP
1326 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1327 "unmapping frag %d.\n", i);
c4e84bde 1328 pci_unmap_page(qdev->pdev,
64b9b41d 1329 dma_unmap_addr(&tx_ring_desc->map[i],
c4e84bde 1330 mapaddr),
64b9b41d 1331 dma_unmap_len(&tx_ring_desc->map[i],
c4e84bde
RM
1332 maplen), PCI_DMA_TODEVICE);
1333 }
1334 }
1335
1336}
1337
1338/* Map the buffers for this transmit. This will return
1339 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1340 */
1341static int ql_map_send(struct ql_adapter *qdev,
1342 struct ob_mac_iocb_req *mac_iocb_ptr,
1343 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1344{
1345 int len = skb_headlen(skb);
1346 dma_addr_t map;
1347 int frag_idx, err, map_idx = 0;
1348 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1349 int frag_cnt = skb_shinfo(skb)->nr_frags;
1350
1351 if (frag_cnt) {
ae9540f7
JP
1352 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1353 "frag_cnt = %d.\n", frag_cnt);
c4e84bde
RM
1354 }
1355 /*
1356 * Map the skb buffer first.
1357 */
1358 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1359
1360 err = pci_dma_mapping_error(qdev->pdev, map);
1361 if (err) {
ae9540f7
JP
1362 netif_err(qdev, tx_queued, qdev->ndev,
1363 "PCI mapping failed with error: %d\n", err);
c4e84bde
RM
1364
1365 return NETDEV_TX_BUSY;
1366 }
1367
1368 tbd->len = cpu_to_le32(len);
1369 tbd->addr = cpu_to_le64(map);
64b9b41d
FT
1370 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1371 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
c4e84bde
RM
1372 map_idx++;
1373
1374 /*
1375 * This loop fills the remainder of the 8 address descriptors
1376 * in the IOCB. If there are more than 7 fragments, then the
1377 * eighth address desc will point to an external list (OAL).
1378 * When this happens, the remainder of the frags will be stored
1379 * in this list.
1380 */
1381 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1382 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1383 tbd++;
1384 if (frag_idx == 6 && frag_cnt > 7) {
1385 /* Let's tack on an sglist.
1386 * Our control block will now
1387 * look like this:
1388 * iocb->seg[0] = skb->data
1389 * iocb->seg[1] = frag[0]
1390 * iocb->seg[2] = frag[1]
1391 * iocb->seg[3] = frag[2]
1392 * iocb->seg[4] = frag[3]
1393 * iocb->seg[5] = frag[4]
1394 * iocb->seg[6] = frag[5]
1395 * iocb->seg[7] = ptr to OAL (external sglist)
1396 * oal->seg[0] = frag[6]
1397 * oal->seg[1] = frag[7]
1398 * oal->seg[2] = frag[8]
1399 * oal->seg[3] = frag[9]
1400 * oal->seg[4] = frag[10]
1401 * etc...
1402 */
1403 /* Tack on the OAL in the eighth segment of IOCB. */
1404 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1405 sizeof(struct oal),
1406 PCI_DMA_TODEVICE);
1407 err = pci_dma_mapping_error(qdev->pdev, map);
1408 if (err) {
ae9540f7
JP
1409 netif_err(qdev, tx_queued, qdev->ndev,
1410 "PCI mapping outbound address list with error: %d\n",
1411 err);
c4e84bde
RM
1412 goto map_error;
1413 }
1414
1415 tbd->addr = cpu_to_le64(map);
1416 /*
1417 * The length is the number of fragments
1418 * that remain to be mapped times the length
1419 * of our sglist (OAL).
1420 */
1421 tbd->len =
1422 cpu_to_le32((sizeof(struct tx_buf_desc) *
1423 (frag_cnt - frag_idx)) | TX_DESC_C);
64b9b41d 1424 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
c4e84bde 1425 map);
64b9b41d 1426 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
c4e84bde
RM
1427 sizeof(struct oal));
1428 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1429 map_idx++;
1430 }
1431
1432 map =
1433 pci_map_page(qdev->pdev, frag->page,
1434 frag->page_offset, frag->size,
1435 PCI_DMA_TODEVICE);
1436
1437 err = pci_dma_mapping_error(qdev->pdev, map);
1438 if (err) {
ae9540f7
JP
1439 netif_err(qdev, tx_queued, qdev->ndev,
1440 "PCI mapping frags failed with error: %d.\n",
1441 err);
c4e84bde
RM
1442 goto map_error;
1443 }
1444
1445 tbd->addr = cpu_to_le64(map);
1446 tbd->len = cpu_to_le32(frag->size);
64b9b41d
FT
1447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
c4e84bde
RM
1449 frag->size);
1450
1451 }
1452 /* Save the number of segments we've mapped. */
1453 tx_ring_desc->map_cnt = map_idx;
1454 /* Terminate the last segment. */
1455 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 return NETDEV_TX_OK;
1457
1458map_error:
1459 /*
1460 * If the first frag mapping failed, then i will be zero.
1461 * This causes the unmap of the skb->data area. Otherwise
1462 * we pass in the number of frags that mapped successfully
1463 * so they can be umapped.
1464 */
1465 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 return NETDEV_TX_BUSY;
1467}
1468
63526713
RM
1469/* Process an inbound completion from an rx ring. */
1470static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 struct rx_ring *rx_ring,
1472 struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 u32 length,
1474 u16 vlan_id)
1475{
1476 struct sk_buff *skb;
1477 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 struct skb_frag_struct *rx_frag;
1479 int nr_frags;
1480 struct napi_struct *napi = &rx_ring->napi;
1481
1482 napi->dev = qdev->ndev;
1483
1484 skb = napi_get_frags(napi);
1485 if (!skb) {
ae9540f7
JP
1486 netif_err(qdev, drv, qdev->ndev,
1487 "Couldn't get an skb, exiting.\n");
63526713
RM
1488 rx_ring->rx_dropped++;
1489 put_page(lbq_desc->p.pg_chunk.page);
1490 return;
1491 }
1492 prefetch(lbq_desc->p.pg_chunk.va);
1493 rx_frag = skb_shinfo(skb)->frags;
1494 nr_frags = skb_shinfo(skb)->nr_frags;
1495 rx_frag += nr_frags;
1496 rx_frag->page = lbq_desc->p.pg_chunk.page;
1497 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1498 rx_frag->size = length;
1499
1500 skb->len += length;
1501 skb->data_len += length;
1502 skb->truesize += length;
1503 skb_shinfo(skb)->nr_frags++;
1504
1505 rx_ring->rx_packets++;
1506 rx_ring->rx_bytes += length;
1507 skb->ip_summed = CHECKSUM_UNNECESSARY;
1508 skb_record_rx_queue(skb, rx_ring->cq_id);
1509 if (qdev->vlgrp && (vlan_id != 0xffff))
1510 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1511 else
1512 napi_gro_frags(napi);
1513}
1514
4f848c0a
RM
1515/* Process an inbound completion from an rx ring. */
1516static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517 struct rx_ring *rx_ring,
1518 struct ib_mac_iocb_rsp *ib_mac_rsp,
1519 u32 length,
1520 u16 vlan_id)
1521{
1522 struct net_device *ndev = qdev->ndev;
1523 struct sk_buff *skb = NULL;
1524 void *addr;
1525 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526 struct napi_struct *napi = &rx_ring->napi;
1527
1528 skb = netdev_alloc_skb(ndev, length);
1529 if (!skb) {
ae9540f7
JP
1530 netif_err(qdev, drv, qdev->ndev,
1531 "Couldn't get an skb, need to unwind!.\n");
4f848c0a
RM
1532 rx_ring->rx_dropped++;
1533 put_page(lbq_desc->p.pg_chunk.page);
1534 return;
1535 }
1536
1537 addr = lbq_desc->p.pg_chunk.va;
1538 prefetch(addr);
1539
1540
1541 /* Frame error, so drop the packet. */
1542 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
3b11d36e 1543 netif_info(qdev, drv, qdev->ndev,
ae9540f7 1544 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
4f848c0a
RM
1545 rx_ring->rx_errors++;
1546 goto err_out;
1547 }
1548
1549 /* The max framesize filter on this chip is set higher than
1550 * MTU since FCoE uses 2k frames.
1551 */
1552 if (skb->len > ndev->mtu + ETH_HLEN) {
ae9540f7
JP
1553 netif_err(qdev, drv, qdev->ndev,
1554 "Segment too small, dropping.\n");
4f848c0a
RM
1555 rx_ring->rx_dropped++;
1556 goto err_out;
1557 }
1558 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
ae9540f7
JP
1559 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1561 length);
4f848c0a
RM
1562 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1563 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1564 length-ETH_HLEN);
1565 skb->len += length-ETH_HLEN;
1566 skb->data_len += length-ETH_HLEN;
1567 skb->truesize += length-ETH_HLEN;
1568
1569 rx_ring->rx_packets++;
1570 rx_ring->rx_bytes += skb->len;
1571 skb->protocol = eth_type_trans(skb, ndev);
bc8acf2c 1572 skb_checksum_none_assert(skb);
4f848c0a
RM
1573
1574 if (qdev->rx_csum &&
1575 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1576 /* TCP frame. */
1577 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
ae9540f7
JP
1578 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1579 "TCP checksum done!\n");
4f848c0a
RM
1580 skb->ip_summed = CHECKSUM_UNNECESSARY;
1581 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1582 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1583 /* Unfragmented ipv4 UDP frame. */
1584 struct iphdr *iph = (struct iphdr *) skb->data;
1585 if (!(iph->frag_off &
1586 cpu_to_be16(IP_MF|IP_OFFSET))) {
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
ae9540f7
JP
1588 netif_printk(qdev, rx_status, KERN_DEBUG,
1589 qdev->ndev,
1590 "TCP checksum done!\n");
4f848c0a
RM
1591 }
1592 }
1593 }
1594
1595 skb_record_rx_queue(skb, rx_ring->cq_id);
1596 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1597 if (qdev->vlgrp && (vlan_id != 0xffff))
1598 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1599 else
1600 napi_gro_receive(napi, skb);
1601 } else {
1602 if (qdev->vlgrp && (vlan_id != 0xffff))
1603 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1604 else
1605 netif_receive_skb(skb);
1606 }
1607 return;
1608err_out:
1609 dev_kfree_skb_any(skb);
1610 put_page(lbq_desc->p.pg_chunk.page);
1611}
1612
1613/* Process an inbound completion from an rx ring. */
1614static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1615 struct rx_ring *rx_ring,
1616 struct ib_mac_iocb_rsp *ib_mac_rsp,
1617 u32 length,
1618 u16 vlan_id)
1619{
1620 struct net_device *ndev = qdev->ndev;
1621 struct sk_buff *skb = NULL;
1622 struct sk_buff *new_skb = NULL;
1623 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1624
1625 skb = sbq_desc->p.skb;
1626 /* Allocate new_skb and copy */
1627 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1628 if (new_skb == NULL) {
ae9540f7
JP
1629 netif_err(qdev, probe, qdev->ndev,
1630 "No skb available, drop the packet.\n");
4f848c0a
RM
1631 rx_ring->rx_dropped++;
1632 return;
1633 }
1634 skb_reserve(new_skb, NET_IP_ALIGN);
1635 memcpy(skb_put(new_skb, length), skb->data, length);
1636 skb = new_skb;
1637
1638 /* Frame error, so drop the packet. */
1639 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
3b11d36e 1640 netif_info(qdev, drv, qdev->ndev,
ae9540f7 1641 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
4f848c0a
RM
1642 dev_kfree_skb_any(skb);
1643 rx_ring->rx_errors++;
1644 return;
1645 }
1646
1647 /* loopback self test for ethtool */
1648 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1649 ql_check_lb_frame(qdev, skb);
1650 dev_kfree_skb_any(skb);
1651 return;
1652 }
1653
1654 /* The max framesize filter on this chip is set higher than
1655 * MTU since FCoE uses 2k frames.
1656 */
1657 if (skb->len > ndev->mtu + ETH_HLEN) {
1658 dev_kfree_skb_any(skb);
1659 rx_ring->rx_dropped++;
1660 return;
1661 }
1662
1663 prefetch(skb->data);
1664 skb->dev = ndev;
1665 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
ae9540f7
JP
1666 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 "%s Multicast.\n",
1668 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1669 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1670 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1671 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1672 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1673 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
4f848c0a
RM
1674 }
1675 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
ae9540f7
JP
1676 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1677 "Promiscuous Packet.\n");
4f848c0a
RM
1678
1679 rx_ring->rx_packets++;
1680 rx_ring->rx_bytes += skb->len;
1681 skb->protocol = eth_type_trans(skb, ndev);
bc8acf2c 1682 skb_checksum_none_assert(skb);
4f848c0a
RM
1683
1684 /* If rx checksum is on, and there are no
1685 * csum or frame errors.
1686 */
1687 if (qdev->rx_csum &&
1688 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1689 /* TCP frame. */
1690 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
ae9540f7
JP
1691 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1692 "TCP checksum done!\n");
4f848c0a
RM
1693 skb->ip_summed = CHECKSUM_UNNECESSARY;
1694 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1695 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1696 /* Unfragmented ipv4 UDP frame. */
1697 struct iphdr *iph = (struct iphdr *) skb->data;
1698 if (!(iph->frag_off &
6d29b1ef 1699 ntohs(IP_MF|IP_OFFSET))) {
4f848c0a 1700 skb->ip_summed = CHECKSUM_UNNECESSARY;
ae9540f7
JP
1701 netif_printk(qdev, rx_status, KERN_DEBUG,
1702 qdev->ndev,
1703 "TCP checksum done!\n");
4f848c0a
RM
1704 }
1705 }
1706 }
1707
1708 skb_record_rx_queue(skb, rx_ring->cq_id);
1709 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1710 if (qdev->vlgrp && (vlan_id != 0xffff))
1711 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1712 vlan_id, skb);
1713 else
1714 napi_gro_receive(&rx_ring->napi, skb);
1715 } else {
1716 if (qdev->vlgrp && (vlan_id != 0xffff))
1717 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1718 else
1719 netif_receive_skb(skb);
1720 }
1721}
1722
8668ae92 1723static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1724{
1725 void *temp_addr = skb->data;
1726
1727 /* Undo the skb_reserve(skb,32) we did before
1728 * giving to hardware, and realign data on
1729 * a 2-byte boundary.
1730 */
1731 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1732 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1733 skb_copy_to_linear_data(skb, temp_addr,
1734 (unsigned int)len);
1735}
1736
1737/*
1738 * This function builds an skb for the given inbound
1739 * completion. It will be rewritten for readability in the near
1740 * future, but for not it works well.
1741 */
1742static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1743 struct rx_ring *rx_ring,
1744 struct ib_mac_iocb_rsp *ib_mac_rsp)
1745{
1746 struct bq_desc *lbq_desc;
1747 struct bq_desc *sbq_desc;
1748 struct sk_buff *skb = NULL;
1749 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1750 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1751
1752 /*
1753 * Handle the header buffer if present.
1754 */
1755 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1756 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
ae9540f7
JP
1757 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1758 "Header of %d bytes in small buffer.\n", hdr_len);
c4e84bde
RM
1759 /*
1760 * Headers fit nicely into a small buffer.
1761 */
1762 sbq_desc = ql_get_curr_sbuf(rx_ring);
1763 pci_unmap_single(qdev->pdev,
64b9b41d
FT
1764 dma_unmap_addr(sbq_desc, mapaddr),
1765 dma_unmap_len(sbq_desc, maplen),
c4e84bde
RM
1766 PCI_DMA_FROMDEVICE);
1767 skb = sbq_desc->p.skb;
1768 ql_realign_skb(skb, hdr_len);
1769 skb_put(skb, hdr_len);
1770 sbq_desc->p.skb = NULL;
1771 }
1772
1773 /*
1774 * Handle the data buffer(s).
1775 */
1776 if (unlikely(!length)) { /* Is there data too? */
ae9540f7
JP
1777 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1778 "No Data buffer in this packet.\n");
c4e84bde
RM
1779 return skb;
1780 }
1781
1782 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1783 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
ae9540f7
JP
1784 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1785 "Headers in small, data of %d bytes in small, combine them.\n",
1786 length);
c4e84bde
RM
1787 /*
1788 * Data is less than small buffer size so it's
1789 * stuffed in a small buffer.
1790 * For this case we append the data
1791 * from the "data" small buffer to the "header" small
1792 * buffer.
1793 */
1794 sbq_desc = ql_get_curr_sbuf(rx_ring);
1795 pci_dma_sync_single_for_cpu(qdev->pdev,
64b9b41d 1796 dma_unmap_addr
c4e84bde 1797 (sbq_desc, mapaddr),
64b9b41d 1798 dma_unmap_len
c4e84bde
RM
1799 (sbq_desc, maplen),
1800 PCI_DMA_FROMDEVICE);
1801 memcpy(skb_put(skb, length),
1802 sbq_desc->p.skb->data, length);
1803 pci_dma_sync_single_for_device(qdev->pdev,
64b9b41d 1804 dma_unmap_addr
c4e84bde
RM
1805 (sbq_desc,
1806 mapaddr),
64b9b41d 1807 dma_unmap_len
c4e84bde
RM
1808 (sbq_desc,
1809 maplen),
1810 PCI_DMA_FROMDEVICE);
1811 } else {
ae9540f7
JP
1812 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1813 "%d bytes in a single small buffer.\n",
1814 length);
c4e84bde
RM
1815 sbq_desc = ql_get_curr_sbuf(rx_ring);
1816 skb = sbq_desc->p.skb;
1817 ql_realign_skb(skb, length);
1818 skb_put(skb, length);
1819 pci_unmap_single(qdev->pdev,
64b9b41d 1820 dma_unmap_addr(sbq_desc,
c4e84bde 1821 mapaddr),
64b9b41d 1822 dma_unmap_len(sbq_desc,
c4e84bde
RM
1823 maplen),
1824 PCI_DMA_FROMDEVICE);
1825 sbq_desc->p.skb = NULL;
1826 }
1827 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1828 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
ae9540f7
JP
1829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1830 "Header in small, %d bytes in large. Chain large to small!\n",
1831 length);
c4e84bde
RM
1832 /*
1833 * The data is in a single large buffer. We
1834 * chain it to the header buffer's skb and let
1835 * it rip.
1836 */
7c734359 1837 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
ae9540f7
JP
1838 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1839 "Chaining page at offset = %d, for %d bytes to skb.\n",
1840 lbq_desc->p.pg_chunk.offset, length);
7c734359
RM
1841 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1842 lbq_desc->p.pg_chunk.offset,
1843 length);
c4e84bde
RM
1844 skb->len += length;
1845 skb->data_len += length;
1846 skb->truesize += length;
c4e84bde
RM
1847 } else {
1848 /*
1849 * The headers and data are in a single large buffer. We
1850 * copy it to a new skb and let it go. This can happen with
1851 * jumbo mtu on a non-TCP/UDP frame.
1852 */
7c734359 1853 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
c4e84bde
RM
1854 skb = netdev_alloc_skb(qdev->ndev, length);
1855 if (skb == NULL) {
ae9540f7
JP
1856 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1857 "No skb available, drop the packet.\n");
c4e84bde
RM
1858 return NULL;
1859 }
4055c7d4 1860 pci_unmap_page(qdev->pdev,
64b9b41d 1861 dma_unmap_addr(lbq_desc,
4055c7d4 1862 mapaddr),
64b9b41d 1863 dma_unmap_len(lbq_desc, maplen),
4055c7d4 1864 PCI_DMA_FROMDEVICE);
c4e84bde 1865 skb_reserve(skb, NET_IP_ALIGN);
ae9540f7
JP
1866 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1867 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1868 length);
7c734359
RM
1869 skb_fill_page_desc(skb, 0,
1870 lbq_desc->p.pg_chunk.page,
1871 lbq_desc->p.pg_chunk.offset,
1872 length);
c4e84bde
RM
1873 skb->len += length;
1874 skb->data_len += length;
1875 skb->truesize += length;
1876 length -= length;
c4e84bde
RM
1877 __pskb_pull_tail(skb,
1878 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1879 VLAN_ETH_HLEN : ETH_HLEN);
1880 }
1881 } else {
1882 /*
1883 * The data is in a chain of large buffers
1884 * pointed to by a small buffer. We loop
1885 * thru and chain them to the our small header
1886 * buffer's skb.
1887 * frags: There are 18 max frags and our small
1888 * buffer will hold 32 of them. The thing is,
1889 * we'll use 3 max for our 9000 byte jumbo
1890 * frames. If the MTU goes up we could
1891 * eventually be in trouble.
1892 */
7c734359 1893 int size, i = 0;
c4e84bde
RM
1894 sbq_desc = ql_get_curr_sbuf(rx_ring);
1895 pci_unmap_single(qdev->pdev,
64b9b41d
FT
1896 dma_unmap_addr(sbq_desc, mapaddr),
1897 dma_unmap_len(sbq_desc, maplen),
c4e84bde
RM
1898 PCI_DMA_FROMDEVICE);
1899 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1900 /*
1901 * This is an non TCP/UDP IP frame, so
1902 * the headers aren't split into a small
1903 * buffer. We have to use the small buffer
1904 * that contains our sg list as our skb to
1905 * send upstairs. Copy the sg list here to
1906 * a local buffer and use it to find the
1907 * pages to chain.
1908 */
ae9540f7
JP
1909 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1910 "%d bytes of headers & data in chain of large.\n",
1911 length);
c4e84bde 1912 skb = sbq_desc->p.skb;
c4e84bde
RM
1913 sbq_desc->p.skb = NULL;
1914 skb_reserve(skb, NET_IP_ALIGN);
c4e84bde
RM
1915 }
1916 while (length > 0) {
7c734359
RM
1917 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1918 size = (length < rx_ring->lbq_buf_size) ? length :
1919 rx_ring->lbq_buf_size;
c4e84bde 1920
ae9540f7
JP
1921 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1922 "Adding page %d to skb for %d bytes.\n",
1923 i, size);
7c734359
RM
1924 skb_fill_page_desc(skb, i,
1925 lbq_desc->p.pg_chunk.page,
1926 lbq_desc->p.pg_chunk.offset,
1927 size);
c4e84bde
RM
1928 skb->len += size;
1929 skb->data_len += size;
1930 skb->truesize += size;
1931 length -= size;
c4e84bde
RM
1932 i++;
1933 }
1934 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1935 VLAN_ETH_HLEN : ETH_HLEN);
1936 }
1937 return skb;
1938}
1939
1940/* Process an inbound completion from an rx ring. */
4f848c0a 1941static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
c4e84bde 1942 struct rx_ring *rx_ring,
4f848c0a
RM
1943 struct ib_mac_iocb_rsp *ib_mac_rsp,
1944 u16 vlan_id)
c4e84bde
RM
1945{
1946 struct net_device *ndev = qdev->ndev;
1947 struct sk_buff *skb = NULL;
1948
1949 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1950
1951 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1952 if (unlikely(!skb)) {
ae9540f7
JP
1953 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1954 "No skb available, drop packet.\n");
885ee398 1955 rx_ring->rx_dropped++;
c4e84bde
RM
1956 return;
1957 }
1958
a32959cd
RM
1959 /* Frame error, so drop the packet. */
1960 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
3b11d36e 1961 netif_info(qdev, drv, qdev->ndev,
ae9540f7 1962 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
a32959cd 1963 dev_kfree_skb_any(skb);
885ee398 1964 rx_ring->rx_errors++;
a32959cd
RM
1965 return;
1966 }
ec33a491
RM
1967
1968 /* The max framesize filter on this chip is set higher than
1969 * MTU since FCoE uses 2k frames.
1970 */
1971 if (skb->len > ndev->mtu + ETH_HLEN) {
1972 dev_kfree_skb_any(skb);
885ee398 1973 rx_ring->rx_dropped++;
ec33a491
RM
1974 return;
1975 }
1976
9dfbbaa6
RM
1977 /* loopback self test for ethtool */
1978 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1979 ql_check_lb_frame(qdev, skb);
1980 dev_kfree_skb_any(skb);
1981 return;
1982 }
1983
c4e84bde
RM
1984 prefetch(skb->data);
1985 skb->dev = ndev;
1986 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
ae9540f7
JP
1987 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1988 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1989 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1990 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1991 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1992 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1993 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
885ee398 1994 rx_ring->rx_multicast++;
c4e84bde
RM
1995 }
1996 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
ae9540f7
JP
1997 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1998 "Promiscuous Packet.\n");
c4e84bde 1999 }
d555f592 2000
d555f592 2001 skb->protocol = eth_type_trans(skb, ndev);
bc8acf2c 2002 skb_checksum_none_assert(skb);
d555f592
RM
2003
2004 /* If rx checksum is on, and there are no
2005 * csum or frame errors.
2006 */
2007 if (qdev->rx_csum &&
d555f592
RM
2008 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2009 /* TCP frame. */
2010 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
ae9540f7
JP
2011 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2012 "TCP checksum done!\n");
d555f592
RM
2013 skb->ip_summed = CHECKSUM_UNNECESSARY;
2014 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2015 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2016 /* Unfragmented ipv4 UDP frame. */
2017 struct iphdr *iph = (struct iphdr *) skb->data;
2018 if (!(iph->frag_off &
6d29b1ef 2019 ntohs(IP_MF|IP_OFFSET))) {
d555f592 2020 skb->ip_summed = CHECKSUM_UNNECESSARY;
ae9540f7
JP
2021 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2022 "TCP checksum done!\n");
d555f592
RM
2023 }
2024 }
c4e84bde 2025 }
d555f592 2026
885ee398
RM
2027 rx_ring->rx_packets++;
2028 rx_ring->rx_bytes += skb->len;
b2014ff8 2029 skb_record_rx_queue(skb, rx_ring->cq_id);
22bdd4f5
RM
2030 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2031 if (qdev->vlgrp &&
2032 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2033 (vlan_id != 0))
2034 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2035 vlan_id, skb);
2036 else
2037 napi_gro_receive(&rx_ring->napi, skb);
c4e84bde 2038 } else {
22bdd4f5
RM
2039 if (qdev->vlgrp &&
2040 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2041 (vlan_id != 0))
2042 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2043 else
2044 netif_receive_skb(skb);
c4e84bde 2045 }
c4e84bde
RM
2046}
2047
4f848c0a
RM
2048/* Process an inbound completion from an rx ring. */
2049static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2050 struct rx_ring *rx_ring,
2051 struct ib_mac_iocb_rsp *ib_mac_rsp)
2052{
2053 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2054 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2055 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2056 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2057
2058 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2059
2060 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2061 /* The data and headers are split into
2062 * separate buffers.
2063 */
2064 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2065 vlan_id);
2066 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2067 /* The data fit in a single small buffer.
2068 * Allocate a new skb, copy the data and
2069 * return the buffer to the free pool.
2070 */
2071 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2072 length, vlan_id);
63526713
RM
2073 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2074 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2075 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2076 /* TCP packet in a page chunk that's been checksummed.
2077 * Tack it on to our GRO skb and let it go.
2078 */
2079 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2080 length, vlan_id);
4f848c0a
RM
2081 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2082 /* Non-TCP packet in a page chunk. Allocate an
2083 * skb, tack it on frags, and send it up.
2084 */
2085 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2086 length, vlan_id);
2087 } else {
c0c56955
RM
2088 /* Non-TCP/UDP large frames that span multiple buffers
2089 * can be processed corrrectly by the split frame logic.
2090 */
2091 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2092 vlan_id);
4f848c0a
RM
2093 }
2094
2095 return (unsigned long)length;
2096}
2097
c4e84bde
RM
2098/* Process an outbound completion from an rx ring. */
2099static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2100 struct ob_mac_iocb_rsp *mac_rsp)
2101{
2102 struct tx_ring *tx_ring;
2103 struct tx_ring_desc *tx_ring_desc;
2104
2105 QL_DUMP_OB_MAC_RSP(mac_rsp);
2106 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2107 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2108 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
885ee398
RM
2109 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2110 tx_ring->tx_packets++;
c4e84bde
RM
2111 dev_kfree_skb(tx_ring_desc->skb);
2112 tx_ring_desc->skb = NULL;
2113
2114 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2115 OB_MAC_IOCB_RSP_S |
2116 OB_MAC_IOCB_RSP_L |
2117 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2118 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
ae9540f7
JP
2119 netif_warn(qdev, tx_done, qdev->ndev,
2120 "Total descriptor length did not match transfer length.\n");
c4e84bde
RM
2121 }
2122 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
ae9540f7
JP
2123 netif_warn(qdev, tx_done, qdev->ndev,
2124 "Frame too short to be valid, not sent.\n");
c4e84bde
RM
2125 }
2126 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
ae9540f7
JP
2127 netif_warn(qdev, tx_done, qdev->ndev,
2128 "Frame too long, but sent anyway.\n");
c4e84bde
RM
2129 }
2130 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
ae9540f7
JP
2131 netif_warn(qdev, tx_done, qdev->ndev,
2132 "PCI backplane error. Frame not sent.\n");
c4e84bde
RM
2133 }
2134 }
2135 atomic_inc(&tx_ring->tx_count);
2136}
2137
2138/* Fire up a handler to reset the MPI processor. */
2139void ql_queue_fw_error(struct ql_adapter *qdev)
2140{
6a473308 2141 ql_link_off(qdev);
c4e84bde
RM
2142 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2143}
2144
2145void ql_queue_asic_error(struct ql_adapter *qdev)
2146{
6a473308 2147 ql_link_off(qdev);
c4e84bde 2148 ql_disable_interrupts(qdev);
6497b607
RM
2149 /* Clear adapter up bit to signal the recovery
2150 * process that it shouldn't kill the reset worker
2151 * thread
2152 */
2153 clear_bit(QL_ADAPTER_UP, &qdev->flags);
c4e84bde
RM
2154 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2155}
2156
2157static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2158 struct ib_ae_iocb_rsp *ib_ae_rsp)
2159{
2160 switch (ib_ae_rsp->event) {
2161 case MGMT_ERR_EVENT:
ae9540f7
JP
2162 netif_err(qdev, rx_err, qdev->ndev,
2163 "Management Processor Fatal Error.\n");
c4e84bde
RM
2164 ql_queue_fw_error(qdev);
2165 return;
2166
2167 case CAM_LOOKUP_ERR_EVENT:
ae9540f7
JP
2168 netif_err(qdev, link, qdev->ndev,
2169 "Multiple CAM hits lookup occurred.\n");
2170 netif_err(qdev, drv, qdev->ndev,
2171 "This event shouldn't occur.\n");
c4e84bde
RM
2172 ql_queue_asic_error(qdev);
2173 return;
2174
2175 case SOFT_ECC_ERROR_EVENT:
ae9540f7
JP
2176 netif_err(qdev, rx_err, qdev->ndev,
2177 "Soft ECC error detected.\n");
c4e84bde
RM
2178 ql_queue_asic_error(qdev);
2179 break;
2180
2181 case PCI_ERR_ANON_BUF_RD:
ae9540f7
JP
2182 netif_err(qdev, rx_err, qdev->ndev,
2183 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2184 ib_ae_rsp->q_id);
c4e84bde
RM
2185 ql_queue_asic_error(qdev);
2186 break;
2187
2188 default:
ae9540f7
JP
2189 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2190 ib_ae_rsp->event);
c4e84bde
RM
2191 ql_queue_asic_error(qdev);
2192 break;
2193 }
2194}
2195
2196static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2197{
2198 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 2199 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2200 struct ob_mac_iocb_rsp *net_rsp = NULL;
2201 int count = 0;
2202
1e213303 2203 struct tx_ring *tx_ring;
c4e84bde
RM
2204 /* While there are entries in the completion queue. */
2205 while (prod != rx_ring->cnsmr_idx) {
2206
ae9540f7
JP
2207 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2208 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2209 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
c4e84bde
RM
2210
2211 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2212 rmb();
2213 switch (net_rsp->opcode) {
2214
2215 case OPCODE_OB_MAC_TSO_IOCB:
2216 case OPCODE_OB_MAC_IOCB:
2217 ql_process_mac_tx_intr(qdev, net_rsp);
2218 break;
2219 default:
ae9540f7
JP
2220 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2221 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2222 net_rsp->opcode);
c4e84bde
RM
2223 }
2224 count++;
2225 ql_update_cq(rx_ring);
ba7cd3ba 2226 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde 2227 }
4da79504
DC
2228 if (!net_rsp)
2229 return 0;
c4e84bde 2230 ql_write_cq_idx(rx_ring);
1e213303 2231 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
4da79504 2232 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
c4e84bde
RM
2233 if (atomic_read(&tx_ring->queue_stopped) &&
2234 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2235 /*
2236 * The queue got stopped because the tx_ring was full.
2237 * Wake it up, because it's now at least 25% empty.
2238 */
1e213303 2239 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
c4e84bde
RM
2240 }
2241
2242 return count;
2243}
2244
2245static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2246{
2247 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 2248 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2249 struct ql_net_rsp_iocb *net_rsp;
2250 int count = 0;
2251
2252 /* While there are entries in the completion queue. */
2253 while (prod != rx_ring->cnsmr_idx) {
2254
ae9540f7
JP
2255 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2256 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2257 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
c4e84bde
RM
2258
2259 net_rsp = rx_ring->curr_entry;
2260 rmb();
2261 switch (net_rsp->opcode) {
2262 case OPCODE_IB_MAC_IOCB:
2263 ql_process_mac_rx_intr(qdev, rx_ring,
2264 (struct ib_mac_iocb_rsp *)
2265 net_rsp);
2266 break;
2267
2268 case OPCODE_IB_AE_IOCB:
2269 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2270 net_rsp);
2271 break;
2272 default:
ae9540f7
JP
2273 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2274 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2275 net_rsp->opcode);
2276 break;
c4e84bde
RM
2277 }
2278 count++;
2279 ql_update_cq(rx_ring);
ba7cd3ba 2280 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
2281 if (count == budget)
2282 break;
2283 }
2284 ql_update_buffer_queues(qdev, rx_ring);
2285 ql_write_cq_idx(rx_ring);
2286 return count;
2287}
2288
2289static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2290{
2291 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2292 struct ql_adapter *qdev = rx_ring->qdev;
39aa8165
RM
2293 struct rx_ring *trx_ring;
2294 int i, work_done = 0;
2295 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
c4e84bde 2296
ae9540f7
JP
2297 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2298 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
c4e84bde 2299
39aa8165
RM
2300 /* Service the TX rings first. They start
2301 * right after the RSS rings. */
2302 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2303 trx_ring = &qdev->rx_ring[i];
2304 /* If this TX completion ring belongs to this vector and
2305 * it's not empty then service it.
2306 */
2307 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2308 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2309 trx_ring->cnsmr_idx)) {
ae9540f7
JP
2310 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2311 "%s: Servicing TX completion ring %d.\n",
2312 __func__, trx_ring->cq_id);
39aa8165
RM
2313 ql_clean_outbound_rx_ring(trx_ring);
2314 }
2315 }
2316
2317 /*
2318 * Now service the RSS ring if it's active.
2319 */
2320 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2321 rx_ring->cnsmr_idx) {
ae9540f7
JP
2322 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2323 "%s: Servicing RX completion ring %d.\n",
2324 __func__, rx_ring->cq_id);
39aa8165
RM
2325 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2326 }
2327
c4e84bde 2328 if (work_done < budget) {
22bdd4f5 2329 napi_complete(napi);
c4e84bde
RM
2330 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2331 }
2332 return work_done;
2333}
2334
01e6b953 2335static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
c4e84bde
RM
2336{
2337 struct ql_adapter *qdev = netdev_priv(ndev);
2338
2339 qdev->vlgrp = grp;
2340 if (grp) {
ae9540f7
JP
2341 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2342 "Turning on VLAN in NIC_RCV_CFG.\n");
c4e84bde
RM
2343 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2344 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2345 } else {
ae9540f7
JP
2346 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2347 "Turning off VLAN in NIC_RCV_CFG.\n");
c4e84bde
RM
2348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2349 }
2350}
2351
01e6b953 2352static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
c4e84bde
RM
2353{
2354 struct ql_adapter *qdev = netdev_priv(ndev);
2355 u32 enable_bit = MAC_ADDR_E;
cc288f54 2356 int status;
c4e84bde 2357
cc288f54
RM
2358 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2359 if (status)
2360 return;
c4e84bde
RM
2361 if (ql_set_mac_addr_reg
2362 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
ae9540f7
JP
2363 netif_err(qdev, ifup, qdev->ndev,
2364 "Failed to init vlan address.\n");
c4e84bde 2365 }
cc288f54 2366 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
2367}
2368
01e6b953 2369static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
c4e84bde
RM
2370{
2371 struct ql_adapter *qdev = netdev_priv(ndev);
2372 u32 enable_bit = 0;
cc288f54
RM
2373 int status;
2374
2375 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2376 if (status)
2377 return;
c4e84bde 2378
c4e84bde
RM
2379 if (ql_set_mac_addr_reg
2380 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
ae9540f7
JP
2381 netif_err(qdev, ifup, qdev->ndev,
2382 "Failed to clear vlan address.\n");
c4e84bde 2383 }
cc288f54 2384 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
2385
2386}
2387
c1b60092
RM
2388static void qlge_restore_vlan(struct ql_adapter *qdev)
2389{
2390 qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
2391
2392 if (qdev->vlgrp) {
2393 u16 vid;
2394 for (vid = 0; vid < VLAN_N_VID; vid++) {
2395 if (!vlan_group_get_device(qdev->vlgrp, vid))
2396 continue;
2397 qlge_vlan_rx_add_vid(qdev->ndev, vid);
2398 }
2399 }
2400}
2401
c4e84bde
RM
2402/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2403static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2404{
2405 struct rx_ring *rx_ring = dev_id;
288379f0 2406 napi_schedule(&rx_ring->napi);
c4e84bde
RM
2407 return IRQ_HANDLED;
2408}
2409
c4e84bde
RM
2410/* This handles a fatal error, MPI activity, and the default
2411 * rx_ring in an MSI-X multiple vector environment.
2412 * In MSI/Legacy environment it also process the rest of
2413 * the rx_rings.
2414 */
2415static irqreturn_t qlge_isr(int irq, void *dev_id)
2416{
2417 struct rx_ring *rx_ring = dev_id;
2418 struct ql_adapter *qdev = rx_ring->qdev;
2419 struct intr_context *intr_context = &qdev->intr_context[0];
2420 u32 var;
c4e84bde
RM
2421 int work_done = 0;
2422
bb0d215c
RM
2423 spin_lock(&qdev->hw_lock);
2424 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
ae9540f7
JP
2425 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2426 "Shared Interrupt, Not ours!\n");
bb0d215c
RM
2427 spin_unlock(&qdev->hw_lock);
2428 return IRQ_NONE;
c4e84bde 2429 }
bb0d215c 2430 spin_unlock(&qdev->hw_lock);
c4e84bde 2431
bb0d215c 2432 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2433
2434 /*
2435 * Check for fatal error.
2436 */
2437 if (var & STS_FE) {
2438 ql_queue_asic_error(qdev);
ae9540f7
JP
2439 netif_err(qdev, intr, qdev->ndev,
2440 "Got fatal error, STS = %x.\n", var);
c4e84bde 2441 var = ql_read32(qdev, ERR_STS);
ae9540f7
JP
2442 netif_err(qdev, intr, qdev->ndev,
2443 "Resetting chip. Error Status Register = 0x%x\n", var);
c4e84bde
RM
2444 return IRQ_HANDLED;
2445 }
2446
2447 /*
2448 * Check MPI processor activity.
2449 */
5ee22a5a
RM
2450 if ((var & STS_PI) &&
2451 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
c4e84bde
RM
2452 /*
2453 * We've got an async event or mailbox completion.
2454 * Handle it and clear the source of the interrupt.
2455 */
ae9540f7
JP
2456 netif_err(qdev, intr, qdev->ndev,
2457 "Got MPI processor interrupt.\n");
c4e84bde 2458 ql_disable_completion_interrupt(qdev, intr_context->intr);
5ee22a5a
RM
2459 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2460 queue_delayed_work_on(smp_processor_id(),
2461 qdev->workqueue, &qdev->mpi_work, 0);
c4e84bde
RM
2462 work_done++;
2463 }
2464
2465 /*
39aa8165
RM
2466 * Get the bit-mask that shows the active queues for this
2467 * pass. Compare it to the queues that this irq services
2468 * and call napi if there's a match.
c4e84bde 2469 */
39aa8165
RM
2470 var = ql_read32(qdev, ISR1);
2471 if (var & intr_context->irq_mask) {
ae9540f7
JP
2472 netif_info(qdev, intr, qdev->ndev,
2473 "Waking handler for rx_ring[0].\n");
39aa8165 2474 ql_disable_completion_interrupt(qdev, intr_context->intr);
32a5b2a0
RM
2475 napi_schedule(&rx_ring->napi);
2476 work_done++;
2477 }
bb0d215c 2478 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
2479 return work_done ? IRQ_HANDLED : IRQ_NONE;
2480}
2481
2482static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2483{
2484
2485 if (skb_is_gso(skb)) {
2486 int err;
2487 if (skb_header_cloned(skb)) {
2488 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2489 if (err)
2490 return err;
2491 }
2492
2493 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2494 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2495 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2496 mac_iocb_ptr->total_hdrs_len =
2497 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2498 mac_iocb_ptr->net_trans_offset =
2499 cpu_to_le16(skb_network_offset(skb) |
2500 skb_transport_offset(skb)
2501 << OB_MAC_TRANSPORT_HDR_SHIFT);
2502 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2503 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2504 if (likely(skb->protocol == htons(ETH_P_IP))) {
2505 struct iphdr *iph = ip_hdr(skb);
2506 iph->check = 0;
2507 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2508 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2509 iph->daddr, 0,
2510 IPPROTO_TCP,
2511 0);
2512 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2513 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2514 tcp_hdr(skb)->check =
2515 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2516 &ipv6_hdr(skb)->daddr,
2517 0, IPPROTO_TCP, 0);
2518 }
2519 return 1;
2520 }
2521 return 0;
2522}
2523
2524static void ql_hw_csum_setup(struct sk_buff *skb,
2525 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2526{
2527 int len;
2528 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 2529 __sum16 *check;
c4e84bde
RM
2530 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2531 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2532 mac_iocb_ptr->net_trans_offset =
2533 cpu_to_le16(skb_network_offset(skb) |
2534 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2535
2536 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2537 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2538 if (likely(iph->protocol == IPPROTO_TCP)) {
2539 check = &(tcp_hdr(skb)->check);
2540 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2541 mac_iocb_ptr->total_hdrs_len =
2542 cpu_to_le16(skb_transport_offset(skb) +
2543 (tcp_hdr(skb)->doff << 2));
2544 } else {
2545 check = &(udp_hdr(skb)->check);
2546 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2547 mac_iocb_ptr->total_hdrs_len =
2548 cpu_to_le16(skb_transport_offset(skb) +
2549 sizeof(struct udphdr));
2550 }
2551 *check = ~csum_tcpudp_magic(iph->saddr,
2552 iph->daddr, len, iph->protocol, 0);
2553}
2554
61357325 2555static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
c4e84bde
RM
2556{
2557 struct tx_ring_desc *tx_ring_desc;
2558 struct ob_mac_iocb_req *mac_iocb_ptr;
2559 struct ql_adapter *qdev = netdev_priv(ndev);
2560 int tso;
2561 struct tx_ring *tx_ring;
1e213303 2562 u32 tx_ring_idx = (u32) skb->queue_mapping;
c4e84bde
RM
2563
2564 tx_ring = &qdev->tx_ring[tx_ring_idx];
2565
74c50b4b
RM
2566 if (skb_padto(skb, ETH_ZLEN))
2567 return NETDEV_TX_OK;
2568
c4e84bde 2569 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
ae9540f7
JP
2570 netif_info(qdev, tx_queued, qdev->ndev,
2571 "%s: shutting down tx queue %d du to lack of resources.\n",
2572 __func__, tx_ring_idx);
1e213303 2573 netif_stop_subqueue(ndev, tx_ring->wq_id);
c4e84bde 2574 atomic_inc(&tx_ring->queue_stopped);
885ee398 2575 tx_ring->tx_errors++;
c4e84bde
RM
2576 return NETDEV_TX_BUSY;
2577 }
2578 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2579 mac_iocb_ptr = tx_ring_desc->queue_entry;
e332471c 2580 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
c4e84bde
RM
2581
2582 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2583 mac_iocb_ptr->tid = tx_ring_desc->index;
2584 /* We use the upper 32-bits to store the tx queue for this IO.
2585 * When we get the completion we can use it to establish the context.
2586 */
2587 mac_iocb_ptr->txq_idx = tx_ring_idx;
2588 tx_ring_desc->skb = skb;
2589
2590 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2591
eab6d18d 2592 if (vlan_tx_tag_present(skb)) {
ae9540f7
JP
2593 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2594 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
c4e84bde
RM
2595 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2596 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2597 }
2598 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2599 if (tso < 0) {
2600 dev_kfree_skb_any(skb);
2601 return NETDEV_TX_OK;
2602 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2603 ql_hw_csum_setup(skb,
2604 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2605 }
0d979f74
RM
2606 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2607 NETDEV_TX_OK) {
ae9540f7
JP
2608 netif_err(qdev, tx_queued, qdev->ndev,
2609 "Could not map the segments.\n");
885ee398 2610 tx_ring->tx_errors++;
0d979f74
RM
2611 return NETDEV_TX_BUSY;
2612 }
c4e84bde
RM
2613 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2614 tx_ring->prod_idx++;
2615 if (tx_ring->prod_idx == tx_ring->wq_len)
2616 tx_ring->prod_idx = 0;
2617 wmb();
2618
2619 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
ae9540f7
JP
2620 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2621 "tx queued, slot %d, len %d\n",
2622 tx_ring->prod_idx, skb->len);
c4e84bde
RM
2623
2624 atomic_dec(&tx_ring->tx_count);
2625 return NETDEV_TX_OK;
2626}
2627
9dfbbaa6 2628
c4e84bde
RM
2629static void ql_free_shadow_space(struct ql_adapter *qdev)
2630{
2631 if (qdev->rx_ring_shadow_reg_area) {
2632 pci_free_consistent(qdev->pdev,
2633 PAGE_SIZE,
2634 qdev->rx_ring_shadow_reg_area,
2635 qdev->rx_ring_shadow_reg_dma);
2636 qdev->rx_ring_shadow_reg_area = NULL;
2637 }
2638 if (qdev->tx_ring_shadow_reg_area) {
2639 pci_free_consistent(qdev->pdev,
2640 PAGE_SIZE,
2641 qdev->tx_ring_shadow_reg_area,
2642 qdev->tx_ring_shadow_reg_dma);
2643 qdev->tx_ring_shadow_reg_area = NULL;
2644 }
2645}
2646
2647static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2648{
2649 qdev->rx_ring_shadow_reg_area =
2650 pci_alloc_consistent(qdev->pdev,
2651 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2652 if (qdev->rx_ring_shadow_reg_area == NULL) {
ae9540f7
JP
2653 netif_err(qdev, ifup, qdev->ndev,
2654 "Allocation of RX shadow space failed.\n");
c4e84bde
RM
2655 return -ENOMEM;
2656 }
b25215d0 2657 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2658 qdev->tx_ring_shadow_reg_area =
2659 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2660 &qdev->tx_ring_shadow_reg_dma);
2661 if (qdev->tx_ring_shadow_reg_area == NULL) {
ae9540f7
JP
2662 netif_err(qdev, ifup, qdev->ndev,
2663 "Allocation of TX shadow space failed.\n");
c4e84bde
RM
2664 goto err_wqp_sh_area;
2665 }
b25215d0 2666 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
c4e84bde
RM
2667 return 0;
2668
2669err_wqp_sh_area:
2670 pci_free_consistent(qdev->pdev,
2671 PAGE_SIZE,
2672 qdev->rx_ring_shadow_reg_area,
2673 qdev->rx_ring_shadow_reg_dma);
2674 return -ENOMEM;
2675}
2676
2677static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2678{
2679 struct tx_ring_desc *tx_ring_desc;
2680 int i;
2681 struct ob_mac_iocb_req *mac_iocb_ptr;
2682
2683 mac_iocb_ptr = tx_ring->wq_base;
2684 tx_ring_desc = tx_ring->q;
2685 for (i = 0; i < tx_ring->wq_len; i++) {
2686 tx_ring_desc->index = i;
2687 tx_ring_desc->skb = NULL;
2688 tx_ring_desc->queue_entry = mac_iocb_ptr;
2689 mac_iocb_ptr++;
2690 tx_ring_desc++;
2691 }
2692 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2693 atomic_set(&tx_ring->queue_stopped, 0);
2694}
2695
2696static void ql_free_tx_resources(struct ql_adapter *qdev,
2697 struct tx_ring *tx_ring)
2698{
2699 if (tx_ring->wq_base) {
2700 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2701 tx_ring->wq_base, tx_ring->wq_base_dma);
2702 tx_ring->wq_base = NULL;
2703 }
2704 kfree(tx_ring->q);
2705 tx_ring->q = NULL;
2706}
2707
2708static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2709 struct tx_ring *tx_ring)
2710{
2711 tx_ring->wq_base =
2712 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2713 &tx_ring->wq_base_dma);
2714
8e95a202
JP
2715 if ((tx_ring->wq_base == NULL) ||
2716 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
ae9540f7 2717 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
c4e84bde
RM
2718 return -ENOMEM;
2719 }
2720 tx_ring->q =
2721 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2722 if (tx_ring->q == NULL)
2723 goto err;
2724
2725 return 0;
2726err:
2727 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2728 tx_ring->wq_base, tx_ring->wq_base_dma);
2729 return -ENOMEM;
2730}
2731
8668ae92 2732static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde 2733{
c4e84bde
RM
2734 struct bq_desc *lbq_desc;
2735
7c734359
RM
2736 uint32_t curr_idx, clean_idx;
2737
2738 curr_idx = rx_ring->lbq_curr_idx;
2739 clean_idx = rx_ring->lbq_clean_idx;
2740 while (curr_idx != clean_idx) {
2741 lbq_desc = &rx_ring->lbq[curr_idx];
2742
2743 if (lbq_desc->p.pg_chunk.last_flag) {
c4e84bde 2744 pci_unmap_page(qdev->pdev,
7c734359
RM
2745 lbq_desc->p.pg_chunk.map,
2746 ql_lbq_block_size(qdev),
c4e84bde 2747 PCI_DMA_FROMDEVICE);
7c734359 2748 lbq_desc->p.pg_chunk.last_flag = 0;
c4e84bde 2749 }
7c734359
RM
2750
2751 put_page(lbq_desc->p.pg_chunk.page);
2752 lbq_desc->p.pg_chunk.page = NULL;
2753
2754 if (++curr_idx == rx_ring->lbq_len)
2755 curr_idx = 0;
2756
c4e84bde
RM
2757 }
2758}
2759
8668ae92 2760static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2761{
2762 int i;
2763 struct bq_desc *sbq_desc;
2764
2765 for (i = 0; i < rx_ring->sbq_len; i++) {
2766 sbq_desc = &rx_ring->sbq[i];
2767 if (sbq_desc == NULL) {
ae9540f7
JP
2768 netif_err(qdev, ifup, qdev->ndev,
2769 "sbq_desc %d is NULL.\n", i);
c4e84bde
RM
2770 return;
2771 }
2772 if (sbq_desc->p.skb) {
2773 pci_unmap_single(qdev->pdev,
64b9b41d
FT
2774 dma_unmap_addr(sbq_desc, mapaddr),
2775 dma_unmap_len(sbq_desc, maplen),
c4e84bde
RM
2776 PCI_DMA_FROMDEVICE);
2777 dev_kfree_skb(sbq_desc->p.skb);
2778 sbq_desc->p.skb = NULL;
2779 }
c4e84bde
RM
2780 }
2781}
2782
4545a3f2
RM
2783/* Free all large and small rx buffers associated
2784 * with the completion queues for this device.
2785 */
2786static void ql_free_rx_buffers(struct ql_adapter *qdev)
2787{
2788 int i;
2789 struct rx_ring *rx_ring;
2790
2791 for (i = 0; i < qdev->rx_ring_count; i++) {
2792 rx_ring = &qdev->rx_ring[i];
2793 if (rx_ring->lbq)
2794 ql_free_lbq_buffers(qdev, rx_ring);
2795 if (rx_ring->sbq)
2796 ql_free_sbq_buffers(qdev, rx_ring);
2797 }
2798}
2799
2800static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2801{
2802 struct rx_ring *rx_ring;
2803 int i;
2804
2805 for (i = 0; i < qdev->rx_ring_count; i++) {
2806 rx_ring = &qdev->rx_ring[i];
2807 if (rx_ring->type != TX_Q)
2808 ql_update_buffer_queues(qdev, rx_ring);
2809 }
2810}
2811
2812static void ql_init_lbq_ring(struct ql_adapter *qdev,
2813 struct rx_ring *rx_ring)
2814{
2815 int i;
2816 struct bq_desc *lbq_desc;
2817 __le64 *bq = rx_ring->lbq_base;
2818
2819 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2820 for (i = 0; i < rx_ring->lbq_len; i++) {
2821 lbq_desc = &rx_ring->lbq[i];
2822 memset(lbq_desc, 0, sizeof(*lbq_desc));
2823 lbq_desc->index = i;
2824 lbq_desc->addr = bq;
2825 bq++;
2826 }
2827}
2828
2829static void ql_init_sbq_ring(struct ql_adapter *qdev,
c4e84bde
RM
2830 struct rx_ring *rx_ring)
2831{
2832 int i;
2833 struct bq_desc *sbq_desc;
2c9a0d41 2834 __le64 *bq = rx_ring->sbq_base;
c4e84bde 2835
4545a3f2 2836 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
c4e84bde
RM
2837 for (i = 0; i < rx_ring->sbq_len; i++) {
2838 sbq_desc = &rx_ring->sbq[i];
4545a3f2 2839 memset(sbq_desc, 0, sizeof(*sbq_desc));
c4e84bde 2840 sbq_desc->index = i;
2c9a0d41 2841 sbq_desc->addr = bq;
c4e84bde
RM
2842 bq++;
2843 }
c4e84bde
RM
2844}
2845
2846static void ql_free_rx_resources(struct ql_adapter *qdev,
2847 struct rx_ring *rx_ring)
2848{
c4e84bde
RM
2849 /* Free the small buffer queue. */
2850 if (rx_ring->sbq_base) {
2851 pci_free_consistent(qdev->pdev,
2852 rx_ring->sbq_size,
2853 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2854 rx_ring->sbq_base = NULL;
2855 }
2856
2857 /* Free the small buffer queue control blocks. */
2858 kfree(rx_ring->sbq);
2859 rx_ring->sbq = NULL;
2860
2861 /* Free the large buffer queue. */
2862 if (rx_ring->lbq_base) {
2863 pci_free_consistent(qdev->pdev,
2864 rx_ring->lbq_size,
2865 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2866 rx_ring->lbq_base = NULL;
2867 }
2868
2869 /* Free the large buffer queue control blocks. */
2870 kfree(rx_ring->lbq);
2871 rx_ring->lbq = NULL;
2872
2873 /* Free the rx queue. */
2874 if (rx_ring->cq_base) {
2875 pci_free_consistent(qdev->pdev,
2876 rx_ring->cq_size,
2877 rx_ring->cq_base, rx_ring->cq_base_dma);
2878 rx_ring->cq_base = NULL;
2879 }
2880}
2881
2882/* Allocate queues and buffers for this completions queue based
2883 * on the values in the parameter structure. */
2884static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2885 struct rx_ring *rx_ring)
2886{
2887
2888 /*
2889 * Allocate the completion queue for this rx_ring.
2890 */
2891 rx_ring->cq_base =
2892 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2893 &rx_ring->cq_base_dma);
2894
2895 if (rx_ring->cq_base == NULL) {
ae9540f7 2896 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
c4e84bde
RM
2897 return -ENOMEM;
2898 }
2899
2900 if (rx_ring->sbq_len) {
2901 /*
2902 * Allocate small buffer queue.
2903 */
2904 rx_ring->sbq_base =
2905 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2906 &rx_ring->sbq_base_dma);
2907
2908 if (rx_ring->sbq_base == NULL) {
ae9540f7
JP
2909 netif_err(qdev, ifup, qdev->ndev,
2910 "Small buffer queue allocation failed.\n");
c4e84bde
RM
2911 goto err_mem;
2912 }
2913
2914 /*
2915 * Allocate small buffer queue control blocks.
2916 */
2917 rx_ring->sbq =
2918 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2919 GFP_KERNEL);
2920 if (rx_ring->sbq == NULL) {
ae9540f7
JP
2921 netif_err(qdev, ifup, qdev->ndev,
2922 "Small buffer queue control block allocation failed.\n");
c4e84bde
RM
2923 goto err_mem;
2924 }
2925
4545a3f2 2926 ql_init_sbq_ring(qdev, rx_ring);
c4e84bde
RM
2927 }
2928
2929 if (rx_ring->lbq_len) {
2930 /*
2931 * Allocate large buffer queue.
2932 */
2933 rx_ring->lbq_base =
2934 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2935 &rx_ring->lbq_base_dma);
2936
2937 if (rx_ring->lbq_base == NULL) {
ae9540f7
JP
2938 netif_err(qdev, ifup, qdev->ndev,
2939 "Large buffer queue allocation failed.\n");
c4e84bde
RM
2940 goto err_mem;
2941 }
2942 /*
2943 * Allocate large buffer queue control blocks.
2944 */
2945 rx_ring->lbq =
2946 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2947 GFP_KERNEL);
2948 if (rx_ring->lbq == NULL) {
ae9540f7
JP
2949 netif_err(qdev, ifup, qdev->ndev,
2950 "Large buffer queue control block allocation failed.\n");
c4e84bde
RM
2951 goto err_mem;
2952 }
2953
4545a3f2 2954 ql_init_lbq_ring(qdev, rx_ring);
c4e84bde
RM
2955 }
2956
2957 return 0;
2958
2959err_mem:
2960 ql_free_rx_resources(qdev, rx_ring);
2961 return -ENOMEM;
2962}
2963
2964static void ql_tx_ring_clean(struct ql_adapter *qdev)
2965{
2966 struct tx_ring *tx_ring;
2967 struct tx_ring_desc *tx_ring_desc;
2968 int i, j;
2969
2970 /*
2971 * Loop through all queues and free
2972 * any resources.
2973 */
2974 for (j = 0; j < qdev->tx_ring_count; j++) {
2975 tx_ring = &qdev->tx_ring[j];
2976 for (i = 0; i < tx_ring->wq_len; i++) {
2977 tx_ring_desc = &tx_ring->q[i];
2978 if (tx_ring_desc && tx_ring_desc->skb) {
ae9540f7
JP
2979 netif_err(qdev, ifdown, qdev->ndev,
2980 "Freeing lost SKB %p, from queue %d, index %d.\n",
2981 tx_ring_desc->skb, j,
2982 tx_ring_desc->index);
c4e84bde
RM
2983 ql_unmap_send(qdev, tx_ring_desc,
2984 tx_ring_desc->map_cnt);
2985 dev_kfree_skb(tx_ring_desc->skb);
2986 tx_ring_desc->skb = NULL;
2987 }
2988 }
2989 }
2990}
2991
c4e84bde
RM
2992static void ql_free_mem_resources(struct ql_adapter *qdev)
2993{
2994 int i;
2995
2996 for (i = 0; i < qdev->tx_ring_count; i++)
2997 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2998 for (i = 0; i < qdev->rx_ring_count; i++)
2999 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3000 ql_free_shadow_space(qdev);
3001}
3002
3003static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3004{
3005 int i;
3006
3007 /* Allocate space for our shadow registers and such. */
3008 if (ql_alloc_shadow_space(qdev))
3009 return -ENOMEM;
3010
3011 for (i = 0; i < qdev->rx_ring_count; i++) {
3012 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
ae9540f7
JP
3013 netif_err(qdev, ifup, qdev->ndev,
3014 "RX resource allocation failed.\n");
c4e84bde
RM
3015 goto err_mem;
3016 }
3017 }
3018 /* Allocate tx queue resources */
3019 for (i = 0; i < qdev->tx_ring_count; i++) {
3020 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
ae9540f7
JP
3021 netif_err(qdev, ifup, qdev->ndev,
3022 "TX resource allocation failed.\n");
c4e84bde
RM
3023 goto err_mem;
3024 }
3025 }
3026 return 0;
3027
3028err_mem:
3029 ql_free_mem_resources(qdev);
3030 return -ENOMEM;
3031}
3032
3033/* Set up the rx ring control block and pass it to the chip.
3034 * The control block is defined as
3035 * "Completion Queue Initialization Control Block", or cqicb.
3036 */
3037static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3038{
3039 struct cqicb *cqicb = &rx_ring->cqicb;
3040 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
b8facca0 3041 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde 3042 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
b8facca0 3043 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
c4e84bde
RM
3044 void __iomem *doorbell_area =
3045 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3046 int err = 0;
3047 u16 bq_len;
d4a4aba6 3048 u64 tmp;
b8facca0
RM
3049 __le64 *base_indirect_ptr;
3050 int page_entries;
c4e84bde
RM
3051
3052 /* Set up the shadow registers for this ring. */
3053 rx_ring->prod_idx_sh_reg = shadow_reg;
3054 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
7c734359 3055 *rx_ring->prod_idx_sh_reg = 0;
c4e84bde
RM
3056 shadow_reg += sizeof(u64);
3057 shadow_reg_dma += sizeof(u64);
3058 rx_ring->lbq_base_indirect = shadow_reg;
3059 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
b8facca0
RM
3060 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3061 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
c4e84bde
RM
3062 rx_ring->sbq_base_indirect = shadow_reg;
3063 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3064
3065 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 3066 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
3067 rx_ring->cnsmr_idx = 0;
3068 rx_ring->curr_entry = rx_ring->cq_base;
3069
3070 /* PCI doorbell mem area + 0x04 for valid register */
3071 rx_ring->valid_db_reg = doorbell_area + 0x04;
3072
3073 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 3074 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
3075
3076 /* PCI doorbell mem area + 0x1c */
8668ae92 3077 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
3078
3079 memset((void *)cqicb, 0, sizeof(struct cqicb));
3080 cqicb->msix_vect = rx_ring->irq;
3081
459caf5a
RM
3082 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3083 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 3084
97345524 3085 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 3086
97345524 3087 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
3088
3089 /*
3090 * Set up the control block load flags.
3091 */
3092 cqicb->flags = FLAGS_LC | /* Load queue base address */
3093 FLAGS_LV | /* Load MSI-X vector */
3094 FLAGS_LI; /* Load irq delay values */
3095 if (rx_ring->lbq_len) {
3096 cqicb->flags |= FLAGS_LL; /* Load lbq values */
a419aef8 3097 tmp = (u64)rx_ring->lbq_base_dma;
b8facca0
RM
3098 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3099 page_entries = 0;
3100 do {
3101 *base_indirect_ptr = cpu_to_le64(tmp);
3102 tmp += DB_PAGE_SIZE;
3103 base_indirect_ptr++;
3104 page_entries++;
3105 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
97345524
RM
3106 cqicb->lbq_addr =
3107 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
3108 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3109 (u16) rx_ring->lbq_buf_size;
3110 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3111 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3112 (u16) rx_ring->lbq_len;
c4e84bde 3113 cqicb->lbq_len = cpu_to_le16(bq_len);
4545a3f2 3114 rx_ring->lbq_prod_idx = 0;
c4e84bde 3115 rx_ring->lbq_curr_idx = 0;
4545a3f2
RM
3116 rx_ring->lbq_clean_idx = 0;
3117 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
c4e84bde
RM
3118 }
3119 if (rx_ring->sbq_len) {
3120 cqicb->flags |= FLAGS_LS; /* Load sbq values */
a419aef8 3121 tmp = (u64)rx_ring->sbq_base_dma;
b8facca0
RM
3122 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3123 page_entries = 0;
3124 do {
3125 *base_indirect_ptr = cpu_to_le64(tmp);
3126 tmp += DB_PAGE_SIZE;
3127 base_indirect_ptr++;
3128 page_entries++;
3129 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
97345524
RM
3130 cqicb->sbq_addr =
3131 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde 3132 cqicb->sbq_buf_size =
52e55f3c 3133 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
459caf5a
RM
3134 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3135 (u16) rx_ring->sbq_len;
c4e84bde 3136 cqicb->sbq_len = cpu_to_le16(bq_len);
4545a3f2 3137 rx_ring->sbq_prod_idx = 0;
c4e84bde 3138 rx_ring->sbq_curr_idx = 0;
4545a3f2
RM
3139 rx_ring->sbq_clean_idx = 0;
3140 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
c4e84bde
RM
3141 }
3142 switch (rx_ring->type) {
3143 case TX_Q:
c4e84bde
RM
3144 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3145 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3146 break;
c4e84bde
RM
3147 case RX_Q:
3148 /* Inbound completion handling rx_rings run in
3149 * separate NAPI contexts.
3150 */
3151 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3152 64);
3153 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3154 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3155 break;
3156 default:
ae9540f7
JP
3157 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3158 "Invalid rx_ring->type = %d.\n", rx_ring->type);
c4e84bde 3159 }
ae9540f7
JP
3160 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3161 "Initializing rx work queue.\n");
c4e84bde
RM
3162 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3163 CFG_LCQ, rx_ring->cq_id);
3164 if (err) {
ae9540f7 3165 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
c4e84bde
RM
3166 return err;
3167 }
c4e84bde
RM
3168 return err;
3169}
3170
3171static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3172{
3173 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3174 void __iomem *doorbell_area =
3175 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3176 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3177 (tx_ring->wq_id * sizeof(u64));
3178 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3179 (tx_ring->wq_id * sizeof(u64));
3180 int err = 0;
3181
3182 /*
3183 * Assign doorbell registers for this tx_ring.
3184 */
3185 /* TX PCI doorbell mem area for tx producer index */
8668ae92 3186 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
3187 tx_ring->prod_idx = 0;
3188 /* TX PCI doorbell mem area + 0x04 */
3189 tx_ring->valid_db_reg = doorbell_area + 0x04;
3190
3191 /*
3192 * Assign shadow registers for this tx_ring.
3193 */
3194 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3195 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3196
3197 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3198 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3199 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3200 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3201 wqicb->rid = 0;
97345524 3202 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 3203
97345524 3204 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
3205
3206 ql_init_tx_ring(qdev, tx_ring);
3207
e332471c 3208 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
c4e84bde
RM
3209 (u16) tx_ring->wq_id);
3210 if (err) {
ae9540f7 3211 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
c4e84bde
RM
3212 return err;
3213 }
ae9540f7
JP
3214 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3215 "Successfully loaded WQICB.\n");
c4e84bde
RM
3216 return err;
3217}
3218
3219static void ql_disable_msix(struct ql_adapter *qdev)
3220{
3221 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3222 pci_disable_msix(qdev->pdev);
3223 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3224 kfree(qdev->msi_x_entry);
3225 qdev->msi_x_entry = NULL;
3226 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3227 pci_disable_msi(qdev->pdev);
3228 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3229 }
3230}
3231
a4ab6137
RM
3232/* We start by trying to get the number of vectors
3233 * stored in qdev->intr_count. If we don't get that
3234 * many then we reduce the count and try again.
3235 */
c4e84bde
RM
3236static void ql_enable_msix(struct ql_adapter *qdev)
3237{
a4ab6137 3238 int i, err;
c4e84bde 3239
c4e84bde 3240 /* Get the MSIX vectors. */
a5a62a1c 3241 if (qlge_irq_type == MSIX_IRQ) {
c4e84bde
RM
3242 /* Try to alloc space for the msix struct,
3243 * if it fails then go to MSI/legacy.
3244 */
a4ab6137 3245 qdev->msi_x_entry = kcalloc(qdev->intr_count,
c4e84bde
RM
3246 sizeof(struct msix_entry),
3247 GFP_KERNEL);
3248 if (!qdev->msi_x_entry) {
a5a62a1c 3249 qlge_irq_type = MSI_IRQ;
c4e84bde
RM
3250 goto msi;
3251 }
3252
a4ab6137 3253 for (i = 0; i < qdev->intr_count; i++)
c4e84bde
RM
3254 qdev->msi_x_entry[i].entry = i;
3255
a4ab6137
RM
3256 /* Loop to get our vectors. We start with
3257 * what we want and settle for what we get.
3258 */
3259 do {
3260 err = pci_enable_msix(qdev->pdev,
3261 qdev->msi_x_entry, qdev->intr_count);
3262 if (err > 0)
3263 qdev->intr_count = err;
3264 } while (err > 0);
3265
3266 if (err < 0) {
c4e84bde
RM
3267 kfree(qdev->msi_x_entry);
3268 qdev->msi_x_entry = NULL;
ae9540f7
JP
3269 netif_warn(qdev, ifup, qdev->ndev,
3270 "MSI-X Enable failed, trying MSI.\n");
a4ab6137 3271 qdev->intr_count = 1;
a5a62a1c 3272 qlge_irq_type = MSI_IRQ;
a4ab6137
RM
3273 } else if (err == 0) {
3274 set_bit(QL_MSIX_ENABLED, &qdev->flags);
ae9540f7
JP
3275 netif_info(qdev, ifup, qdev->ndev,
3276 "MSI-X Enabled, got %d vectors.\n",
3277 qdev->intr_count);
a4ab6137 3278 return;
c4e84bde
RM
3279 }
3280 }
3281msi:
a4ab6137 3282 qdev->intr_count = 1;
a5a62a1c 3283 if (qlge_irq_type == MSI_IRQ) {
c4e84bde
RM
3284 if (!pci_enable_msi(qdev->pdev)) {
3285 set_bit(QL_MSI_ENABLED, &qdev->flags);
ae9540f7
JP
3286 netif_info(qdev, ifup, qdev->ndev,
3287 "Running with MSI interrupts.\n");
c4e84bde
RM
3288 return;
3289 }
3290 }
a5a62a1c 3291 qlge_irq_type = LEG_IRQ;
ae9540f7
JP
3292 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3293 "Running with legacy interrupts.\n");
c4e84bde
RM
3294}
3295
39aa8165
RM
3296/* Each vector services 1 RSS ring and and 1 or more
3297 * TX completion rings. This function loops through
3298 * the TX completion rings and assigns the vector that
3299 * will service it. An example would be if there are
3300 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3301 * This would mean that vector 0 would service RSS ring 0
3302 * and TX competion rings 0,1,2 and 3. Vector 1 would
3303 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3304 */
3305static void ql_set_tx_vect(struct ql_adapter *qdev)
3306{
3307 int i, j, vect;
3308 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3309
3310 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3311 /* Assign irq vectors to TX rx_rings.*/
3312 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3313 i < qdev->rx_ring_count; i++) {
3314 if (j == tx_rings_per_vector) {
3315 vect++;
3316 j = 0;
3317 }
3318 qdev->rx_ring[i].irq = vect;
3319 j++;
3320 }
3321 } else {
3322 /* For single vector all rings have an irq
3323 * of zero.
3324 */
3325 for (i = 0; i < qdev->rx_ring_count; i++)
3326 qdev->rx_ring[i].irq = 0;
3327 }
3328}
3329
3330/* Set the interrupt mask for this vector. Each vector
3331 * will service 1 RSS ring and 1 or more TX completion
3332 * rings. This function sets up a bit mask per vector
3333 * that indicates which rings it services.
3334 */
3335static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3336{
3337 int j, vect = ctx->intr;
3338 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3339
3340 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3341 /* Add the RSS ring serviced by this vector
3342 * to the mask.
3343 */
3344 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3345 /* Add the TX ring(s) serviced by this vector
3346 * to the mask. */
3347 for (j = 0; j < tx_rings_per_vector; j++) {
3348 ctx->irq_mask |=
3349 (1 << qdev->rx_ring[qdev->rss_ring_count +
3350 (vect * tx_rings_per_vector) + j].cq_id);
3351 }
3352 } else {
3353 /* For single vector we just shift each queue's
3354 * ID into the mask.
3355 */
3356 for (j = 0; j < qdev->rx_ring_count; j++)
3357 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3358 }
3359}
3360
c4e84bde
RM
3361/*
3362 * Here we build the intr_context structures based on
3363 * our rx_ring count and intr vector count.
3364 * The intr_context structure is used to hook each vector
3365 * to possibly different handlers.
3366 */
3367static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3368{
3369 int i = 0;
3370 struct intr_context *intr_context = &qdev->intr_context[0];
3371
c4e84bde
RM
3372 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3373 /* Each rx_ring has it's
3374 * own intr_context since we have separate
3375 * vectors for each queue.
c4e84bde
RM
3376 */
3377 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3378 qdev->rx_ring[i].irq = i;
3379 intr_context->intr = i;
3380 intr_context->qdev = qdev;
39aa8165
RM
3381 /* Set up this vector's bit-mask that indicates
3382 * which queues it services.
3383 */
3384 ql_set_irq_mask(qdev, intr_context);
c4e84bde
RM
3385 /*
3386 * We set up each vectors enable/disable/read bits so
3387 * there's no bit/mask calculations in the critical path.
3388 */
3389 intr_context->intr_en_mask =
3390 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3391 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3392 | i;
3393 intr_context->intr_dis_mask =
3394 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3395 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3396 INTR_EN_IHD | i;
3397 intr_context->intr_read_mask =
3398 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3399 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3400 i;
39aa8165
RM
3401 if (i == 0) {
3402 /* The first vector/queue handles
3403 * broadcast/multicast, fatal errors,
3404 * and firmware events. This in addition
3405 * to normal inbound NAPI processing.
c4e84bde 3406 */
39aa8165 3407 intr_context->handler = qlge_isr;
b2014ff8
RM
3408 sprintf(intr_context->name, "%s-rx-%d",
3409 qdev->ndev->name, i);
3410 } else {
c4e84bde 3411 /*
39aa8165 3412 * Inbound queues handle unicast frames only.
c4e84bde 3413 */
39aa8165
RM
3414 intr_context->handler = qlge_msix_rx_isr;
3415 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde 3416 qdev->ndev->name, i);
c4e84bde
RM
3417 }
3418 }
3419 } else {
3420 /*
3421 * All rx_rings use the same intr_context since
3422 * there is only one vector.
3423 */
3424 intr_context->intr = 0;
3425 intr_context->qdev = qdev;
3426 /*
3427 * We set up each vectors enable/disable/read bits so
3428 * there's no bit/mask calculations in the critical path.
3429 */
3430 intr_context->intr_en_mask =
3431 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3432 intr_context->intr_dis_mask =
3433 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3434 INTR_EN_TYPE_DISABLE;
3435 intr_context->intr_read_mask =
3436 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3437 /*
3438 * Single interrupt means one handler for all rings.
3439 */
3440 intr_context->handler = qlge_isr;
3441 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
39aa8165
RM
3442 /* Set up this vector's bit-mask that indicates
3443 * which queues it services. In this case there is
3444 * a single vector so it will service all RSS and
3445 * TX completion rings.
3446 */
3447 ql_set_irq_mask(qdev, intr_context);
c4e84bde 3448 }
39aa8165
RM
3449 /* Tell the TX completion rings which MSIx vector
3450 * they will be using.
3451 */
3452 ql_set_tx_vect(qdev);
c4e84bde
RM
3453}
3454
3455static void ql_free_irq(struct ql_adapter *qdev)
3456{
3457 int i;
3458 struct intr_context *intr_context = &qdev->intr_context[0];
3459
3460 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3461 if (intr_context->hooked) {
3462 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3463 free_irq(qdev->msi_x_entry[i].vector,
3464 &qdev->rx_ring[i]);
ae9540f7
JP
3465 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3466 "freeing msix interrupt %d.\n", i);
c4e84bde
RM
3467 } else {
3468 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
ae9540f7
JP
3469 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3470 "freeing msi interrupt %d.\n", i);
c4e84bde
RM
3471 }
3472 }
3473 }
3474 ql_disable_msix(qdev);
3475}
3476
3477static int ql_request_irq(struct ql_adapter *qdev)
3478{
3479 int i;
3480 int status = 0;
3481 struct pci_dev *pdev = qdev->pdev;
3482 struct intr_context *intr_context = &qdev->intr_context[0];
3483
3484 ql_resolve_queues_to_irqs(qdev);
3485
3486 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3487 atomic_set(&intr_context->irq_cnt, 0);
3488 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3489 status = request_irq(qdev->msi_x_entry[i].vector,
3490 intr_context->handler,
3491 0,
3492 intr_context->name,
3493 &qdev->rx_ring[i]);
3494 if (status) {
ae9540f7
JP
3495 netif_err(qdev, ifup, qdev->ndev,
3496 "Failed request for MSIX interrupt %d.\n",
3497 i);
c4e84bde
RM
3498 goto err_irq;
3499 } else {
ae9540f7
JP
3500 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3501 "Hooked intr %d, queue type %s, with name %s.\n",
3502 i,
3503 qdev->rx_ring[i].type == DEFAULT_Q ?
3504 "DEFAULT_Q" :
3505 qdev->rx_ring[i].type == TX_Q ?
3506 "TX_Q" :
3507 qdev->rx_ring[i].type == RX_Q ?
3508 "RX_Q" : "",
3509 intr_context->name);
c4e84bde
RM
3510 }
3511 } else {
ae9540f7
JP
3512 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3513 "trying msi or legacy interrupts.\n");
3514 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3515 "%s: irq = %d.\n", __func__, pdev->irq);
3516 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3517 "%s: context->name = %s.\n", __func__,
3518 intr_context->name);
3519 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3520 "%s: dev_id = 0x%p.\n", __func__,
3521 &qdev->rx_ring[0]);
c4e84bde
RM
3522 status =
3523 request_irq(pdev->irq, qlge_isr,
3524 test_bit(QL_MSI_ENABLED,
3525 &qdev->
3526 flags) ? 0 : IRQF_SHARED,
3527 intr_context->name, &qdev->rx_ring[0]);
3528 if (status)
3529 goto err_irq;
3530
ae9540f7
JP
3531 netif_err(qdev, ifup, qdev->ndev,
3532 "Hooked intr %d, queue type %s, with name %s.\n",
3533 i,
3534 qdev->rx_ring[0].type == DEFAULT_Q ?
3535 "DEFAULT_Q" :
3536 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3537 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3538 intr_context->name);
c4e84bde
RM
3539 }
3540 intr_context->hooked = 1;
3541 }
3542 return status;
3543err_irq:
ae9540f7 3544 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
c4e84bde
RM
3545 ql_free_irq(qdev);
3546 return status;
3547}
3548
3549static int ql_start_rss(struct ql_adapter *qdev)
3550{
541ae28c
RM
3551 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3552 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3553 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3554 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3555 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3556 0xbe, 0xac, 0x01, 0xfa};
c4e84bde
RM
3557 struct ricb *ricb = &qdev->ricb;
3558 int status = 0;
3559 int i;
3560 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3561
e332471c 3562 memset((void *)ricb, 0, sizeof(*ricb));
c4e84bde 3563
b2014ff8 3564 ricb->base_cq = RSS_L4K;
c4e84bde 3565 ricb->flags =
541ae28c
RM
3566 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3567 ricb->mask = cpu_to_le16((u16)(0x3ff));
c4e84bde
RM
3568
3569 /*
3570 * Fill out the Indirection Table.
3571 */
541ae28c
RM
3572 for (i = 0; i < 1024; i++)
3573 hash_id[i] = (i & (qdev->rss_ring_count - 1));
c4e84bde 3574
541ae28c
RM
3575 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3576 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
c4e84bde 3577
ae9540f7 3578 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
c4e84bde 3579
e332471c 3580 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
c4e84bde 3581 if (status) {
ae9540f7 3582 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
c4e84bde
RM
3583 return status;
3584 }
ae9540f7
JP
3585 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3586 "Successfully loaded RICB.\n");
c4e84bde
RM
3587 return status;
3588}
3589
a5f59dc9 3590static int ql_clear_routing_entries(struct ql_adapter *qdev)
c4e84bde 3591{
a5f59dc9 3592 int i, status = 0;
c4e84bde 3593
8587ea35
RM
3594 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3595 if (status)
3596 return status;
c4e84bde
RM
3597 /* Clear all the entries in the routing table. */
3598 for (i = 0; i < 16; i++) {
3599 status = ql_set_routing_reg(qdev, i, 0, 0);
3600 if (status) {
ae9540f7
JP
3601 netif_err(qdev, ifup, qdev->ndev,
3602 "Failed to init routing register for CAM packets.\n");
a5f59dc9 3603 break;
c4e84bde
RM
3604 }
3605 }
a5f59dc9
RM
3606 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3607 return status;
3608}
3609
3610/* Initialize the frame-to-queue routing. */
3611static int ql_route_initialize(struct ql_adapter *qdev)
3612{
3613 int status = 0;
3614
fd21cf52
RM
3615 /* Clear all the entries in the routing table. */
3616 status = ql_clear_routing_entries(qdev);
a5f59dc9
RM
3617 if (status)
3618 return status;
3619
fd21cf52 3620 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
a5f59dc9 3621 if (status)
fd21cf52 3622 return status;
c4e84bde 3623
fbc2ac33
RM
3624 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3625 RT_IDX_IP_CSUM_ERR, 1);
3626 if (status) {
3627 netif_err(qdev, ifup, qdev->ndev,
3628 "Failed to init routing register "
3629 "for IP CSUM error packets.\n");
3630 goto exit;
3631 }
3632 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3633 RT_IDX_TU_CSUM_ERR, 1);
c4e84bde 3634 if (status) {
ae9540f7 3635 netif_err(qdev, ifup, qdev->ndev,
fbc2ac33
RM
3636 "Failed to init routing register "
3637 "for TCP/UDP CSUM error packets.\n");
8587ea35 3638 goto exit;
c4e84bde
RM
3639 }
3640 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3641 if (status) {
ae9540f7
JP
3642 netif_err(qdev, ifup, qdev->ndev,
3643 "Failed to init routing register for broadcast packets.\n");
8587ea35 3644 goto exit;
c4e84bde
RM
3645 }
3646 /* If we have more than one inbound queue, then turn on RSS in the
3647 * routing block.
3648 */
3649 if (qdev->rss_ring_count > 1) {
3650 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3651 RT_IDX_RSS_MATCH, 1);
3652 if (status) {
ae9540f7
JP
3653 netif_err(qdev, ifup, qdev->ndev,
3654 "Failed to init routing register for MATCH RSS packets.\n");
8587ea35 3655 goto exit;
c4e84bde
RM
3656 }
3657 }
3658
3659 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3660 RT_IDX_CAM_HIT, 1);
8587ea35 3661 if (status)
ae9540f7
JP
3662 netif_err(qdev, ifup, qdev->ndev,
3663 "Failed to init routing register for CAM packets.\n");
8587ea35
RM
3664exit:
3665 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
3666 return status;
3667}
3668
2ee1e272 3669int ql_cam_route_initialize(struct ql_adapter *qdev)
bb58b5b6 3670{
7fab3bfe 3671 int status, set;
bb58b5b6 3672
7fab3bfe
RM
3673 /* If check if the link is up and use to
3674 * determine if we are setting or clearing
3675 * the MAC address in the CAM.
3676 */
3677 set = ql_read32(qdev, STS);
3678 set &= qdev->port_link_up;
3679 status = ql_set_mac_addr(qdev, set);
bb58b5b6 3680 if (status) {
ae9540f7 3681 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
bb58b5b6
RM
3682 return status;
3683 }
3684
3685 status = ql_route_initialize(qdev);
3686 if (status)
ae9540f7 3687 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
bb58b5b6
RM
3688
3689 return status;
3690}
3691
c4e84bde
RM
3692static int ql_adapter_initialize(struct ql_adapter *qdev)
3693{
3694 u32 value, mask;
3695 int i;
3696 int status = 0;
3697
3698 /*
3699 * Set up the System register to halt on errors.
3700 */
3701 value = SYS_EFE | SYS_FAE;
3702 mask = value << 16;
3703 ql_write32(qdev, SYS, mask | value);
3704
c9cf0a04
RM
3705 /* Set the default queue, and VLAN behavior. */
3706 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3707 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
c4e84bde
RM
3708 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3709
3710 /* Set the MPI interrupt to enabled. */
3711 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3712
3713 /* Enable the function, set pagesize, enable error checking. */
3714 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
572c526f
RM
3715 FSC_EC | FSC_VM_PAGE_4K;
3716 value |= SPLT_SETTING;
c4e84bde
RM
3717
3718 /* Set/clear header splitting. */
3719 mask = FSC_VM_PAGESIZE_MASK |
3720 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3721 ql_write32(qdev, FSC, mask | value);
3722
572c526f 3723 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
c4e84bde 3724
a3b71939
RM
3725 /* Set RX packet routing to use port/pci function on which the
3726 * packet arrived on in addition to usual frame routing.
3727 * This is helpful on bonding where both interfaces can have
3728 * the same MAC address.
3729 */
3730 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
bc083ce9
RM
3731 /* Reroute all packets to our Interface.
3732 * They may have been routed to MPI firmware
3733 * due to WOL.
3734 */
3735 value = ql_read32(qdev, MGMT_RCV_CFG);
3736 value &= ~MGMT_RCV_CFG_RM;
3737 mask = 0xffff0000;
3738
3739 /* Sticky reg needs clearing due to WOL. */
3740 ql_write32(qdev, MGMT_RCV_CFG, mask);
3741 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3742
3743 /* Default WOL is enable on Mezz cards */
3744 if (qdev->pdev->subsystem_device == 0x0068 ||
3745 qdev->pdev->subsystem_device == 0x0180)
3746 qdev->wol = WAKE_MAGIC;
a3b71939 3747
c4e84bde
RM
3748 /* Start up the rx queues. */
3749 for (i = 0; i < qdev->rx_ring_count; i++) {
3750 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3751 if (status) {
ae9540f7
JP
3752 netif_err(qdev, ifup, qdev->ndev,
3753 "Failed to start rx ring[%d].\n", i);
c4e84bde
RM
3754 return status;
3755 }
3756 }
3757
3758 /* If there is more than one inbound completion queue
3759 * then download a RICB to configure RSS.
3760 */
3761 if (qdev->rss_ring_count > 1) {
3762 status = ql_start_rss(qdev);
3763 if (status) {
ae9540f7 3764 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
c4e84bde
RM
3765 return status;
3766 }
3767 }
3768
3769 /* Start up the tx queues. */
3770 for (i = 0; i < qdev->tx_ring_count; i++) {
3771 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3772 if (status) {
ae9540f7
JP
3773 netif_err(qdev, ifup, qdev->ndev,
3774 "Failed to start tx ring[%d].\n", i);
c4e84bde
RM
3775 return status;
3776 }
3777 }
3778
b0c2aadf
RM
3779 /* Initialize the port and set the max framesize. */
3780 status = qdev->nic_ops->port_initialize(qdev);
80928860 3781 if (status)
ae9540f7 3782 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
c4e84bde 3783
bb58b5b6
RM
3784 /* Set up the MAC address and frame routing filter. */
3785 status = ql_cam_route_initialize(qdev);
c4e84bde 3786 if (status) {
ae9540f7
JP
3787 netif_err(qdev, ifup, qdev->ndev,
3788 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3789 return status;
3790 }
3791
3792 /* Start NAPI for the RSS queues. */
b2014ff8 3793 for (i = 0; i < qdev->rss_ring_count; i++) {
ae9540f7
JP
3794 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3795 "Enabling NAPI for rx_ring[%d].\n", i);
c4e84bde
RM
3796 napi_enable(&qdev->rx_ring[i].napi);
3797 }
3798
3799 return status;
3800}
3801
3802/* Issue soft reset to chip. */
3803static int ql_adapter_reset(struct ql_adapter *qdev)
3804{
3805 u32 value;
c4e84bde 3806 int status = 0;
a5f59dc9 3807 unsigned long end_jiffies;
c4e84bde 3808
a5f59dc9
RM
3809 /* Clear all the entries in the routing table. */
3810 status = ql_clear_routing_entries(qdev);
3811 if (status) {
ae9540f7 3812 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
a5f59dc9
RM
3813 return status;
3814 }
3815
3816 end_jiffies = jiffies +
3817 max((unsigned long)1, usecs_to_jiffies(30));
84087f4d
RM
3818
3819 /* Stop management traffic. */
3820 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3821
3822 /* Wait for the NIC and MGMNT FIFOs to empty. */
3823 ql_wait_fifo_empty(qdev);
3824
c4e84bde 3825 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
a75ee7f1 3826
c4e84bde
RM
3827 do {
3828 value = ql_read32(qdev, RST_FO);
3829 if ((value & RST_FO_FR) == 0)
3830 break;
a75ee7f1
RM
3831 cpu_relax();
3832 } while (time_before(jiffies, end_jiffies));
c4e84bde 3833
c4e84bde 3834 if (value & RST_FO_FR) {
ae9540f7
JP
3835 netif_err(qdev, ifdown, qdev->ndev,
3836 "ETIMEDOUT!!! errored out of resetting the chip!\n");
a75ee7f1 3837 status = -ETIMEDOUT;
c4e84bde
RM
3838 }
3839
84087f4d
RM
3840 /* Resume management traffic. */
3841 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
c4e84bde
RM
3842 return status;
3843}
3844
3845static void ql_display_dev_info(struct net_device *ndev)
3846{
3847 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3848
ae9540f7
JP
3849 netif_info(qdev, probe, qdev->ndev,
3850 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3851 "XG Roll = %d, XG Rev = %d.\n",
3852 qdev->func,
3853 qdev->port,
3854 qdev->chip_rev_id & 0x0000000f,
3855 qdev->chip_rev_id >> 4 & 0x0000000f,
3856 qdev->chip_rev_id >> 8 & 0x0000000f,
3857 qdev->chip_rev_id >> 12 & 0x0000000f);
3858 netif_info(qdev, probe, qdev->ndev,
3859 "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3860}
3861
ac409215 3862static int ql_wol(struct ql_adapter *qdev)
bc083ce9
RM
3863{
3864 int status = 0;
3865 u32 wol = MB_WOL_DISABLE;
3866
3867 /* The CAM is still intact after a reset, but if we
3868 * are doing WOL, then we may need to program the
3869 * routing regs. We would also need to issue the mailbox
3870 * commands to instruct the MPI what to do per the ethtool
3871 * settings.
3872 */
3873
3874 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3875 WAKE_MCAST | WAKE_BCAST)) {
ae9540f7
JP
3876 netif_err(qdev, ifdown, qdev->ndev,
3877 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3878 qdev->wol);
bc083ce9
RM
3879 return -EINVAL;
3880 }
3881
3882 if (qdev->wol & WAKE_MAGIC) {
3883 status = ql_mb_wol_set_magic(qdev, 1);
3884 if (status) {
ae9540f7
JP
3885 netif_err(qdev, ifdown, qdev->ndev,
3886 "Failed to set magic packet on %s.\n",
3887 qdev->ndev->name);
bc083ce9
RM
3888 return status;
3889 } else
ae9540f7
JP
3890 netif_info(qdev, drv, qdev->ndev,
3891 "Enabled magic packet successfully on %s.\n",
3892 qdev->ndev->name);
bc083ce9
RM
3893
3894 wol |= MB_WOL_MAGIC_PKT;
3895 }
3896
3897 if (qdev->wol) {
bc083ce9
RM
3898 wol |= MB_WOL_MODE_ON;
3899 status = ql_mb_wol_mode(qdev, wol);
ae9540f7
JP
3900 netif_err(qdev, drv, qdev->ndev,
3901 "WOL %s (wol code 0x%x) on %s\n",
318ae2ed 3902 (status == 0) ? "Successfully set" : "Failed",
ae9540f7 3903 wol, qdev->ndev->name);
bc083ce9
RM
3904 }
3905
3906 return status;
3907}
3908
c5dadddb 3909static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
c4e84bde 3910{
c4e84bde 3911
6497b607
RM
3912 /* Don't kill the reset worker thread if we
3913 * are in the process of recovery.
3914 */
3915 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3916 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3917 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3918 cancel_delayed_work_sync(&qdev->mpi_work);
2ee1e272 3919 cancel_delayed_work_sync(&qdev->mpi_idc_work);
8aae2600 3920 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
bcc2cb3b 3921 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
c5dadddb
BL
3922}
3923
3924static int ql_adapter_down(struct ql_adapter *qdev)
3925{
3926 int i, status = 0;
3927
3928 ql_link_off(qdev);
3929
3930 ql_cancel_all_work_sync(qdev);
c4e84bde 3931
39aa8165
RM
3932 for (i = 0; i < qdev->rss_ring_count; i++)
3933 napi_disable(&qdev->rx_ring[i].napi);
c4e84bde
RM
3934
3935 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3936
3937 ql_disable_interrupts(qdev);
3938
3939 ql_tx_ring_clean(qdev);
3940
6b318cb3
RM
3941 /* Call netif_napi_del() from common point.
3942 */
b2014ff8 3943 for (i = 0; i < qdev->rss_ring_count; i++)
6b318cb3
RM
3944 netif_napi_del(&qdev->rx_ring[i].napi);
3945
c4e84bde
RM
3946 status = ql_adapter_reset(qdev);
3947 if (status)
ae9540f7
JP
3948 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3949 qdev->func);
fe5f0980
BL
3950 ql_free_rx_buffers(qdev);
3951
c4e84bde
RM
3952 return status;
3953}
3954
3955static int ql_adapter_up(struct ql_adapter *qdev)
3956{
3957 int err = 0;
3958
c4e84bde
RM
3959 err = ql_adapter_initialize(qdev);
3960 if (err) {
ae9540f7 3961 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
c4e84bde
RM
3962 goto err_init;
3963 }
c4e84bde 3964 set_bit(QL_ADAPTER_UP, &qdev->flags);
4545a3f2 3965 ql_alloc_rx_buffers(qdev);
8b007de1
RM
3966 /* If the port is initialized and the
3967 * link is up the turn on the carrier.
3968 */
3969 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3970 (ql_read32(qdev, STS) & qdev->port_link_up))
6a473308 3971 ql_link_on(qdev);
f2c05004
RM
3972 /* Restore rx mode. */
3973 clear_bit(QL_ALLMULTI, &qdev->flags);
3974 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3975 qlge_set_multicast_list(qdev->ndev);
3976
c1b60092
RM
3977 /* Restore vlan setting. */
3978 qlge_restore_vlan(qdev);
3979
c4e84bde
RM
3980 ql_enable_interrupts(qdev);
3981 ql_enable_all_completion_interrupts(qdev);
1e213303 3982 netif_tx_start_all_queues(qdev->ndev);
c4e84bde
RM
3983
3984 return 0;
3985err_init:
3986 ql_adapter_reset(qdev);
3987 return err;
3988}
3989
c4e84bde
RM
3990static void ql_release_adapter_resources(struct ql_adapter *qdev)
3991{
3992 ql_free_mem_resources(qdev);
3993 ql_free_irq(qdev);
3994}
3995
3996static int ql_get_adapter_resources(struct ql_adapter *qdev)
3997{
3998 int status = 0;
3999
4000 if (ql_alloc_mem_resources(qdev)) {
ae9540f7 4001 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
c4e84bde
RM
4002 return -ENOMEM;
4003 }
4004 status = ql_request_irq(qdev);
c4e84bde
RM
4005 return status;
4006}
4007
4008static int qlge_close(struct net_device *ndev)
4009{
4010 struct ql_adapter *qdev = netdev_priv(ndev);
4011
4bbd1a19
RM
4012 /* If we hit pci_channel_io_perm_failure
4013 * failure condition, then we already
4014 * brought the adapter down.
4015 */
4016 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
ae9540f7 4017 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4bbd1a19
RM
4018 clear_bit(QL_EEH_FATAL, &qdev->flags);
4019 return 0;
4020 }
4021
c4e84bde
RM
4022 /*
4023 * Wait for device to recover from a reset.
4024 * (Rarely happens, but possible.)
4025 */
4026 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4027 msleep(1);
4028 ql_adapter_down(qdev);
4029 ql_release_adapter_resources(qdev);
c4e84bde
RM
4030 return 0;
4031}
4032
4033static int ql_configure_rings(struct ql_adapter *qdev)
4034{
4035 int i;
4036 struct rx_ring *rx_ring;
4037 struct tx_ring *tx_ring;
a4ab6137 4038 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
7c734359
RM
4039 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4040 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4041
4042 qdev->lbq_buf_order = get_order(lbq_buf_len);
a4ab6137
RM
4043
4044 /* In a perfect world we have one RSS ring for each CPU
4045 * and each has it's own vector. To do that we ask for
4046 * cpu_cnt vectors. ql_enable_msix() will adjust the
4047 * vector count to what we actually get. We then
4048 * allocate an RSS ring for each.
4049 * Essentially, we are doing min(cpu_count, msix_vector_count).
c4e84bde 4050 */
a4ab6137
RM
4051 qdev->intr_count = cpu_cnt;
4052 ql_enable_msix(qdev);
4053 /* Adjust the RSS ring count to the actual vector count. */
4054 qdev->rss_ring_count = qdev->intr_count;
c4e84bde 4055 qdev->tx_ring_count = cpu_cnt;
b2014ff8 4056 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
c4e84bde 4057
c4e84bde
RM
4058 for (i = 0; i < qdev->tx_ring_count; i++) {
4059 tx_ring = &qdev->tx_ring[i];
e332471c 4060 memset((void *)tx_ring, 0, sizeof(*tx_ring));
c4e84bde
RM
4061 tx_ring->qdev = qdev;
4062 tx_ring->wq_id = i;
4063 tx_ring->wq_len = qdev->tx_ring_size;
4064 tx_ring->wq_size =
4065 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4066
4067 /*
4068 * The completion queue ID for the tx rings start
39aa8165 4069 * immediately after the rss rings.
c4e84bde 4070 */
39aa8165 4071 tx_ring->cq_id = qdev->rss_ring_count + i;
c4e84bde
RM
4072 }
4073
4074 for (i = 0; i < qdev->rx_ring_count; i++) {
4075 rx_ring = &qdev->rx_ring[i];
e332471c 4076 memset((void *)rx_ring, 0, sizeof(*rx_ring));
c4e84bde
RM
4077 rx_ring->qdev = qdev;
4078 rx_ring->cq_id = i;
4079 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
b2014ff8 4080 if (i < qdev->rss_ring_count) {
39aa8165
RM
4081 /*
4082 * Inbound (RSS) queues.
4083 */
c4e84bde
RM
4084 rx_ring->cq_len = qdev->rx_ring_size;
4085 rx_ring->cq_size =
4086 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4087 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4088 rx_ring->lbq_size =
2c9a0d41 4089 rx_ring->lbq_len * sizeof(__le64);
7c734359 4090 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
ae9540f7
JP
4091 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4092 "lbq_buf_size %d, order = %d\n",
4093 rx_ring->lbq_buf_size,
4094 qdev->lbq_buf_order);
c4e84bde
RM
4095 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4096 rx_ring->sbq_size =
2c9a0d41 4097 rx_ring->sbq_len * sizeof(__le64);
52e55f3c 4098 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
b2014ff8
RM
4099 rx_ring->type = RX_Q;
4100 } else {
c4e84bde
RM
4101 /*
4102 * Outbound queue handles outbound completions only.
4103 */
4104 /* outbound cq is same size as tx_ring it services. */
4105 rx_ring->cq_len = qdev->tx_ring_size;
4106 rx_ring->cq_size =
4107 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4108 rx_ring->lbq_len = 0;
4109 rx_ring->lbq_size = 0;
4110 rx_ring->lbq_buf_size = 0;
4111 rx_ring->sbq_len = 0;
4112 rx_ring->sbq_size = 0;
4113 rx_ring->sbq_buf_size = 0;
4114 rx_ring->type = TX_Q;
c4e84bde
RM
4115 }
4116 }
4117 return 0;
4118}
4119
4120static int qlge_open(struct net_device *ndev)
4121{
4122 int err = 0;
4123 struct ql_adapter *qdev = netdev_priv(ndev);
4124
74e12435
RM
4125 err = ql_adapter_reset(qdev);
4126 if (err)
4127 return err;
4128
c4e84bde
RM
4129 err = ql_configure_rings(qdev);
4130 if (err)
4131 return err;
4132
4133 err = ql_get_adapter_resources(qdev);
4134 if (err)
4135 goto error_up;
4136
4137 err = ql_adapter_up(qdev);
4138 if (err)
4139 goto error_up;
4140
4141 return err;
4142
4143error_up:
4144 ql_release_adapter_resources(qdev);
c4e84bde
RM
4145 return err;
4146}
4147
7c734359
RM
4148static int ql_change_rx_buffers(struct ql_adapter *qdev)
4149{
4150 struct rx_ring *rx_ring;
4151 int i, status;
4152 u32 lbq_buf_len;
4153
4154 /* Wait for an oustanding reset to complete. */
4155 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4156 int i = 3;
4157 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
ae9540f7
JP
4158 netif_err(qdev, ifup, qdev->ndev,
4159 "Waiting for adapter UP...\n");
7c734359
RM
4160 ssleep(1);
4161 }
4162
4163 if (!i) {
ae9540f7
JP
4164 netif_err(qdev, ifup, qdev->ndev,
4165 "Timed out waiting for adapter UP\n");
7c734359
RM
4166 return -ETIMEDOUT;
4167 }
4168 }
4169
4170 status = ql_adapter_down(qdev);
4171 if (status)
4172 goto error;
4173
4174 /* Get the new rx buffer size. */
4175 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4176 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4177 qdev->lbq_buf_order = get_order(lbq_buf_len);
4178
4179 for (i = 0; i < qdev->rss_ring_count; i++) {
4180 rx_ring = &qdev->rx_ring[i];
4181 /* Set the new size. */
4182 rx_ring->lbq_buf_size = lbq_buf_len;
4183 }
4184
4185 status = ql_adapter_up(qdev);
4186 if (status)
4187 goto error;
4188
4189 return status;
4190error:
ae9540f7
JP
4191 netif_alert(qdev, ifup, qdev->ndev,
4192 "Driver up/down cycle failed, closing device.\n");
7c734359
RM
4193 set_bit(QL_ADAPTER_UP, &qdev->flags);
4194 dev_close(qdev->ndev);
4195 return status;
4196}
4197
c4e84bde
RM
4198static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4199{
4200 struct ql_adapter *qdev = netdev_priv(ndev);
7c734359 4201 int status;
c4e84bde
RM
4202
4203 if (ndev->mtu == 1500 && new_mtu == 9000) {
ae9540f7 4204 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
c4e84bde 4205 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
ae9540f7 4206 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
c4e84bde
RM
4207 } else
4208 return -EINVAL;
7c734359
RM
4209
4210 queue_delayed_work(qdev->workqueue,
4211 &qdev->mpi_port_cfg_work, 3*HZ);
4212
746079da
BL
4213 ndev->mtu = new_mtu;
4214
7c734359 4215 if (!netif_running(qdev->ndev)) {
7c734359
RM
4216 return 0;
4217 }
4218
7c734359
RM
4219 status = ql_change_rx_buffers(qdev);
4220 if (status) {
ae9540f7
JP
4221 netif_err(qdev, ifup, qdev->ndev,
4222 "Changing MTU failed.\n");
7c734359
RM
4223 }
4224
4225 return status;
c4e84bde
RM
4226}
4227
4228static struct net_device_stats *qlge_get_stats(struct net_device
4229 *ndev)
4230{
885ee398
RM
4231 struct ql_adapter *qdev = netdev_priv(ndev);
4232 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4233 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4234 unsigned long pkts, mcast, dropped, errors, bytes;
4235 int i;
4236
4237 /* Get RX stats. */
4238 pkts = mcast = dropped = errors = bytes = 0;
4239 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4240 pkts += rx_ring->rx_packets;
4241 bytes += rx_ring->rx_bytes;
4242 dropped += rx_ring->rx_dropped;
4243 errors += rx_ring->rx_errors;
4244 mcast += rx_ring->rx_multicast;
4245 }
4246 ndev->stats.rx_packets = pkts;
4247 ndev->stats.rx_bytes = bytes;
4248 ndev->stats.rx_dropped = dropped;
4249 ndev->stats.rx_errors = errors;
4250 ndev->stats.multicast = mcast;
4251
4252 /* Get TX stats. */
4253 pkts = errors = bytes = 0;
4254 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4255 pkts += tx_ring->tx_packets;
4256 bytes += tx_ring->tx_bytes;
4257 errors += tx_ring->tx_errors;
4258 }
4259 ndev->stats.tx_packets = pkts;
4260 ndev->stats.tx_bytes = bytes;
4261 ndev->stats.tx_errors = errors;
bcc90f55 4262 return &ndev->stats;
c4e84bde
RM
4263}
4264
ac409215 4265static void qlge_set_multicast_list(struct net_device *ndev)
c4e84bde
RM
4266{
4267 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
22bedad3 4268 struct netdev_hw_addr *ha;
cc288f54 4269 int i, status;
c4e84bde 4270
cc288f54
RM
4271 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4272 if (status)
4273 return;
c4e84bde
RM
4274 /*
4275 * Set or clear promiscuous mode if a
4276 * transition is taking place.
4277 */
4278 if (ndev->flags & IFF_PROMISC) {
4279 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4280 if (ql_set_routing_reg
4281 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
ae9540f7
JP
4282 netif_err(qdev, hw, qdev->ndev,
4283 "Failed to set promiscous mode.\n");
c4e84bde
RM
4284 } else {
4285 set_bit(QL_PROMISCUOUS, &qdev->flags);
4286 }
4287 }
4288 } else {
4289 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4290 if (ql_set_routing_reg
4291 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
ae9540f7
JP
4292 netif_err(qdev, hw, qdev->ndev,
4293 "Failed to clear promiscous mode.\n");
c4e84bde
RM
4294 } else {
4295 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4296 }
4297 }
4298 }
4299
4300 /*
4301 * Set or clear all multicast mode if a
4302 * transition is taking place.
4303 */
4304 if ((ndev->flags & IFF_ALLMULTI) ||
4cd24eaf 4305 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
c4e84bde
RM
4306 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4307 if (ql_set_routing_reg
4308 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
ae9540f7
JP
4309 netif_err(qdev, hw, qdev->ndev,
4310 "Failed to set all-multi mode.\n");
c4e84bde
RM
4311 } else {
4312 set_bit(QL_ALLMULTI, &qdev->flags);
4313 }
4314 }
4315 } else {
4316 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4317 if (ql_set_routing_reg
4318 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
ae9540f7
JP
4319 netif_err(qdev, hw, qdev->ndev,
4320 "Failed to clear all-multi mode.\n");
c4e84bde
RM
4321 } else {
4322 clear_bit(QL_ALLMULTI, &qdev->flags);
4323 }
4324 }
4325 }
4326
4cd24eaf 4327 if (!netdev_mc_empty(ndev)) {
cc288f54
RM
4328 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4329 if (status)
4330 goto exit;
f9dcbcc9 4331 i = 0;
22bedad3
JP
4332 netdev_for_each_mc_addr(ha, ndev) {
4333 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
c4e84bde 4334 MAC_ADDR_TYPE_MULTI_MAC, i)) {
ae9540f7
JP
4335 netif_err(qdev, hw, qdev->ndev,
4336 "Failed to loadmulticast address.\n");
cc288f54 4337 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
4338 goto exit;
4339 }
f9dcbcc9
JP
4340 i++;
4341 }
cc288f54 4342 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
c4e84bde
RM
4343 if (ql_set_routing_reg
4344 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
ae9540f7
JP
4345 netif_err(qdev, hw, qdev->ndev,
4346 "Failed to set multicast match mode.\n");
c4e84bde
RM
4347 } else {
4348 set_bit(QL_ALLMULTI, &qdev->flags);
4349 }
4350 }
4351exit:
8587ea35 4352 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
c4e84bde
RM
4353}
4354
4355static int qlge_set_mac_address(struct net_device *ndev, void *p)
4356{
4357 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4358 struct sockaddr *addr = p;
cc288f54 4359 int status;
c4e84bde 4360
c4e84bde
RM
4361 if (!is_valid_ether_addr(addr->sa_data))
4362 return -EADDRNOTAVAIL;
4363 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
801e9096
RM
4364 /* Update local copy of current mac address. */
4365 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
c4e84bde 4366
cc288f54
RM
4367 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4368 if (status)
4369 return status;
cc288f54
RM
4370 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4371 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
cc288f54 4372 if (status)
ae9540f7 4373 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
cc288f54
RM
4374 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4375 return status;
c4e84bde
RM
4376}
4377
4378static void qlge_tx_timeout(struct net_device *ndev)
4379{
4380 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
6497b607 4381 ql_queue_asic_error(qdev);
c4e84bde
RM
4382}
4383
4384static void ql_asic_reset_work(struct work_struct *work)
4385{
4386 struct ql_adapter *qdev =
4387 container_of(work, struct ql_adapter, asic_reset_work.work);
db98812f 4388 int status;
f2c0d8df 4389 rtnl_lock();
db98812f
RM
4390 status = ql_adapter_down(qdev);
4391 if (status)
4392 goto error;
4393
4394 status = ql_adapter_up(qdev);
4395 if (status)
4396 goto error;
2cd6dbaa
RM
4397
4398 /* Restore rx mode. */
4399 clear_bit(QL_ALLMULTI, &qdev->flags);
4400 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4401 qlge_set_multicast_list(qdev->ndev);
4402
f2c0d8df 4403 rtnl_unlock();
db98812f
RM
4404 return;
4405error:
ae9540f7
JP
4406 netif_alert(qdev, ifup, qdev->ndev,
4407 "Driver up/down cycle failed, closing device\n");
f2c0d8df 4408
db98812f
RM
4409 set_bit(QL_ADAPTER_UP, &qdev->flags);
4410 dev_close(qdev->ndev);
4411 rtnl_unlock();
c4e84bde
RM
4412}
4413
b0c2aadf
RM
4414static struct nic_operations qla8012_nic_ops = {
4415 .get_flash = ql_get_8012_flash_params,
4416 .port_initialize = ql_8012_port_initialize,
4417};
4418
cdca8d02
RM
4419static struct nic_operations qla8000_nic_ops = {
4420 .get_flash = ql_get_8000_flash_params,
4421 .port_initialize = ql_8000_port_initialize,
4422};
4423
e4552f51
RM
4424/* Find the pcie function number for the other NIC
4425 * on this chip. Since both NIC functions share a
4426 * common firmware we have the lowest enabled function
4427 * do any common work. Examples would be resetting
4428 * after a fatal firmware error, or doing a firmware
4429 * coredump.
4430 */
4431static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4432{
4433 int status = 0;
4434 u32 temp;
4435 u32 nic_func1, nic_func2;
4436
4437 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4438 &temp);
4439 if (status)
4440 return status;
4441
4442 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4443 MPI_TEST_NIC_FUNC_MASK);
4444 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4445 MPI_TEST_NIC_FUNC_MASK);
4446
4447 if (qdev->func == nic_func1)
4448 qdev->alt_func = nic_func2;
4449 else if (qdev->func == nic_func2)
4450 qdev->alt_func = nic_func1;
4451 else
4452 status = -EIO;
4453
4454 return status;
4455}
b0c2aadf 4456
e4552f51 4457static int ql_get_board_info(struct ql_adapter *qdev)
c4e84bde 4458{
e4552f51 4459 int status;
c4e84bde
RM
4460 qdev->func =
4461 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
e4552f51
RM
4462 if (qdev->func > 3)
4463 return -EIO;
4464
4465 status = ql_get_alt_pcie_func(qdev);
4466 if (status)
4467 return status;
4468
4469 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4470 if (qdev->port) {
c4e84bde
RM
4471 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4472 qdev->port_link_up = STS_PL1;
4473 qdev->port_init = STS_PI1;
4474 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4475 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4476 } else {
4477 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4478 qdev->port_link_up = STS_PL0;
4479 qdev->port_init = STS_PI0;
4480 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4481 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4482 }
4483 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
b0c2aadf
RM
4484 qdev->device_id = qdev->pdev->device;
4485 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4486 qdev->nic_ops = &qla8012_nic_ops;
cdca8d02
RM
4487 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4488 qdev->nic_ops = &qla8000_nic_ops;
e4552f51 4489 return status;
c4e84bde
RM
4490}
4491
4492static void ql_release_all(struct pci_dev *pdev)
4493{
4494 struct net_device *ndev = pci_get_drvdata(pdev);
4495 struct ql_adapter *qdev = netdev_priv(ndev);
4496
4497 if (qdev->workqueue) {
4498 destroy_workqueue(qdev->workqueue);
4499 qdev->workqueue = NULL;
4500 }
39aa8165 4501
c4e84bde 4502 if (qdev->reg_base)
8668ae92 4503 iounmap(qdev->reg_base);
c4e84bde
RM
4504 if (qdev->doorbell_area)
4505 iounmap(qdev->doorbell_area);
8aae2600 4506 vfree(qdev->mpi_coredump);
c4e84bde
RM
4507 pci_release_regions(pdev);
4508 pci_set_drvdata(pdev, NULL);
4509}
4510
4511static int __devinit ql_init_device(struct pci_dev *pdev,
4512 struct net_device *ndev, int cards_found)
4513{
4514 struct ql_adapter *qdev = netdev_priv(ndev);
1d1023d0 4515 int err = 0;
c4e84bde 4516
e332471c 4517 memset((void *)qdev, 0, sizeof(*qdev));
c4e84bde
RM
4518 err = pci_enable_device(pdev);
4519 if (err) {
4520 dev_err(&pdev->dev, "PCI device enable failed.\n");
4521 return err;
4522 }
4523
ebd6e774
RM
4524 qdev->ndev = ndev;
4525 qdev->pdev = pdev;
4526 pci_set_drvdata(pdev, ndev);
c4e84bde 4527
bc9167f3
RM
4528 /* Set PCIe read request size */
4529 err = pcie_set_readrq(pdev, 4096);
4530 if (err) {
4531 dev_err(&pdev->dev, "Set readrq failed.\n");
4f9a91c8 4532 goto err_out1;
bc9167f3
RM
4533 }
4534
c4e84bde
RM
4535 err = pci_request_regions(pdev, DRV_NAME);
4536 if (err) {
4537 dev_err(&pdev->dev, "PCI region request failed.\n");
ebd6e774 4538 return err;
c4e84bde
RM
4539 }
4540
4541 pci_set_master(pdev);
6a35528a 4542 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c4e84bde 4543 set_bit(QL_DMA64, &qdev->flags);
6a35528a 4544 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
c4e84bde 4545 } else {
284901a9 4546 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde 4547 if (!err)
284901a9 4548 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
c4e84bde
RM
4549 }
4550
4551 if (err) {
4552 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4f9a91c8 4553 goto err_out2;
c4e84bde
RM
4554 }
4555
73475339
RM
4556 /* Set PCIe reset type for EEH to fundamental. */
4557 pdev->needs_freset = 1;
6d190c6e 4558 pci_save_state(pdev);
c4e84bde
RM
4559 qdev->reg_base =
4560 ioremap_nocache(pci_resource_start(pdev, 1),
4561 pci_resource_len(pdev, 1));
4562 if (!qdev->reg_base) {
4563 dev_err(&pdev->dev, "Register mapping failed.\n");
4564 err = -ENOMEM;
4f9a91c8 4565 goto err_out2;
c4e84bde
RM
4566 }
4567
4568 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4569 qdev->doorbell_area =
4570 ioremap_nocache(pci_resource_start(pdev, 3),
4571 pci_resource_len(pdev, 3));
4572 if (!qdev->doorbell_area) {
4573 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4574 err = -ENOMEM;
4f9a91c8 4575 goto err_out2;
c4e84bde
RM
4576 }
4577
e4552f51
RM
4578 err = ql_get_board_info(qdev);
4579 if (err) {
4580 dev_err(&pdev->dev, "Register access failed.\n");
4581 err = -EIO;
4f9a91c8 4582 goto err_out2;
e4552f51 4583 }
c4e84bde
RM
4584 qdev->msg_enable = netif_msg_init(debug, default_msg);
4585 spin_lock_init(&qdev->hw_lock);
4586 spin_lock_init(&qdev->stats_lock);
4587
8aae2600
RM
4588 if (qlge_mpi_coredump) {
4589 qdev->mpi_coredump =
4590 vmalloc(sizeof(struct ql_mpi_coredump));
4591 if (qdev->mpi_coredump == NULL) {
4592 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4593 err = -ENOMEM;
ce96bc86 4594 goto err_out2;
8aae2600 4595 }
d5c1da56
RM
4596 if (qlge_force_coredump)
4597 set_bit(QL_FRC_COREDUMP, &qdev->flags);
8aae2600 4598 }
c4e84bde 4599 /* make sure the EEPROM is good */
b0c2aadf 4600 err = qdev->nic_ops->get_flash(qdev);
c4e84bde
RM
4601 if (err) {
4602 dev_err(&pdev->dev, "Invalid FLASH.\n");
4f9a91c8 4603 goto err_out2;
c4e84bde
RM
4604 }
4605
c4e84bde 4606 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
801e9096
RM
4607 /* Keep local copy of current mac address. */
4608 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
c4e84bde
RM
4609
4610 /* Set up the default ring sizes. */
4611 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4612 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4613
4614 /* Set up the coalescing parameters. */
4615 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4616 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4617 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4618 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4619
4620 /*
4621 * Set up the operating parameters.
4622 */
4623 qdev->rx_csum = 1;
c4e84bde
RM
4624 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4625 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4626 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4627 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
bcc2cb3b 4628 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
2ee1e272 4629 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
8aae2600 4630 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
bcc2cb3b 4631 init_completion(&qdev->ide_completion);
c4e84bde
RM
4632
4633 if (!cards_found) {
4634 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4635 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4636 DRV_NAME, DRV_VERSION);
4637 }
4638 return 0;
4f9a91c8 4639err_out2:
c4e84bde 4640 ql_release_all(pdev);
4f9a91c8 4641err_out1:
c4e84bde
RM
4642 pci_disable_device(pdev);
4643 return err;
4644}
4645
25ed7849
SH
4646static const struct net_device_ops qlge_netdev_ops = {
4647 .ndo_open = qlge_open,
4648 .ndo_stop = qlge_close,
4649 .ndo_start_xmit = qlge_send,
4650 .ndo_change_mtu = qlge_change_mtu,
4651 .ndo_get_stats = qlge_get_stats,
4652 .ndo_set_multicast_list = qlge_set_multicast_list,
4653 .ndo_set_mac_address = qlge_set_mac_address,
4654 .ndo_validate_addr = eth_validate_addr,
4655 .ndo_tx_timeout = qlge_tx_timeout,
01e6b953
RM
4656 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4657 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4658 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
25ed7849
SH
4659};
4660
15c052fc
RM
4661static void ql_timer(unsigned long data)
4662{
4663 struct ql_adapter *qdev = (struct ql_adapter *)data;
4664 u32 var = 0;
4665
4666 var = ql_read32(qdev, STS);
4667 if (pci_channel_offline(qdev->pdev)) {
ae9540f7 4668 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
15c052fc
RM
4669 return;
4670 }
4671
72046d84 4672 mod_timer(&qdev->timer, jiffies + (5*HZ));
15c052fc
RM
4673}
4674
c4e84bde
RM
4675static int __devinit qlge_probe(struct pci_dev *pdev,
4676 const struct pci_device_id *pci_entry)
4677{
4678 struct net_device *ndev = NULL;
4679 struct ql_adapter *qdev = NULL;
4680 static int cards_found = 0;
4681 int err = 0;
4682
1e213303
RM
4683 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4684 min(MAX_CPUS, (int)num_online_cpus()));
c4e84bde
RM
4685 if (!ndev)
4686 return -ENOMEM;
4687
4688 err = ql_init_device(pdev, ndev, cards_found);
4689 if (err < 0) {
4690 free_netdev(ndev);
4691 return err;
4692 }
4693
4694 qdev = netdev_priv(ndev);
4695 SET_NETDEV_DEV(ndev, &pdev->dev);
4696 ndev->features = (0
4697 | NETIF_F_IP_CSUM
4698 | NETIF_F_SG
4699 | NETIF_F_TSO
4700 | NETIF_F_TSO6
4701 | NETIF_F_TSO_ECN
4702 | NETIF_F_HW_VLAN_TX
4703 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
22bdd4f5 4704 ndev->features |= NETIF_F_GRO;
c4e84bde
RM
4705
4706 if (test_bit(QL_DMA64, &qdev->flags))
4707 ndev->features |= NETIF_F_HIGHDMA;
4708
4709 /*
4710 * Set up net_device structure.
4711 */
4712 ndev->tx_queue_len = qdev->tx_ring_size;
4713 ndev->irq = pdev->irq;
25ed7849
SH
4714
4715 ndev->netdev_ops = &qlge_netdev_ops;
c4e84bde 4716 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
c4e84bde 4717 ndev->watchdog_timeo = 10 * HZ;
25ed7849 4718
c4e84bde
RM
4719 err = register_netdev(ndev);
4720 if (err) {
4721 dev_err(&pdev->dev, "net device registration failed.\n");
4722 ql_release_all(pdev);
4723 pci_disable_device(pdev);
4724 return err;
4725 }
15c052fc
RM
4726 /* Start up the timer to trigger EEH if
4727 * the bus goes dead
4728 */
4729 init_timer_deferrable(&qdev->timer);
4730 qdev->timer.data = (unsigned long)qdev;
4731 qdev->timer.function = ql_timer;
4732 qdev->timer.expires = jiffies + (5*HZ);
4733 add_timer(&qdev->timer);
6a473308 4734 ql_link_off(qdev);
c4e84bde 4735 ql_display_dev_info(ndev);
9dfbbaa6 4736 atomic_set(&qdev->lb_count, 0);
c4e84bde
RM
4737 cards_found++;
4738 return 0;
4739}
4740
9dfbbaa6
RM
4741netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4742{
4743 return qlge_send(skb, ndev);
4744}
4745
4746int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4747{
4748 return ql_clean_inbound_rx_ring(rx_ring, budget);
4749}
4750
c4e84bde
RM
4751static void __devexit qlge_remove(struct pci_dev *pdev)
4752{
4753 struct net_device *ndev = pci_get_drvdata(pdev);
15c052fc
RM
4754 struct ql_adapter *qdev = netdev_priv(ndev);
4755 del_timer_sync(&qdev->timer);
c5dadddb 4756 ql_cancel_all_work_sync(qdev);
c4e84bde
RM
4757 unregister_netdev(ndev);
4758 ql_release_all(pdev);
4759 pci_disable_device(pdev);
4760 free_netdev(ndev);
4761}
4762
6d190c6e
RM
4763/* Clean up resources without touching hardware. */
4764static void ql_eeh_close(struct net_device *ndev)
4765{
4766 int i;
4767 struct ql_adapter *qdev = netdev_priv(ndev);
4768
4769 if (netif_carrier_ok(ndev)) {
4770 netif_carrier_off(ndev);
4771 netif_stop_queue(ndev);
4772 }
4773
7ae80abd
BL
4774 /* Disabling the timer */
4775 del_timer_sync(&qdev->timer);
c5dadddb 4776 ql_cancel_all_work_sync(qdev);
6d190c6e
RM
4777
4778 for (i = 0; i < qdev->rss_ring_count; i++)
4779 netif_napi_del(&qdev->rx_ring[i].napi);
4780
4781 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4782 ql_tx_ring_clean(qdev);
4783 ql_free_rx_buffers(qdev);
4784 ql_release_adapter_resources(qdev);
4785}
4786
c4e84bde
RM
4787/*
4788 * This callback is called by the PCI subsystem whenever
4789 * a PCI bus error is detected.
4790 */
4791static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4792 enum pci_channel_state state)
4793{
4794 struct net_device *ndev = pci_get_drvdata(pdev);
4bbd1a19 4795 struct ql_adapter *qdev = netdev_priv(ndev);
fbc663ce 4796
6d190c6e
RM
4797 switch (state) {
4798 case pci_channel_io_normal:
4799 return PCI_ERS_RESULT_CAN_RECOVER;
4800 case pci_channel_io_frozen:
4801 netif_device_detach(ndev);
4802 if (netif_running(ndev))
4803 ql_eeh_close(ndev);
4804 pci_disable_device(pdev);
4805 return PCI_ERS_RESULT_NEED_RESET;
4806 case pci_channel_io_perm_failure:
4807 dev_err(&pdev->dev,
4808 "%s: pci_channel_io_perm_failure.\n", __func__);
4bbd1a19
RM
4809 ql_eeh_close(ndev);
4810 set_bit(QL_EEH_FATAL, &qdev->flags);
fbc663ce 4811 return PCI_ERS_RESULT_DISCONNECT;
6d190c6e 4812 }
c4e84bde
RM
4813
4814 /* Request a slot reset. */
4815 return PCI_ERS_RESULT_NEED_RESET;
4816}
4817
4818/*
4819 * This callback is called after the PCI buss has been reset.
4820 * Basically, this tries to restart the card from scratch.
4821 * This is a shortened version of the device probe/discovery code,
4822 * it resembles the first-half of the () routine.
4823 */
4824static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4825{
4826 struct net_device *ndev = pci_get_drvdata(pdev);
4827 struct ql_adapter *qdev = netdev_priv(ndev);
4828
6d190c6e
RM
4829 pdev->error_state = pci_channel_io_normal;
4830
4831 pci_restore_state(pdev);
c4e84bde 4832 if (pci_enable_device(pdev)) {
ae9540f7
JP
4833 netif_err(qdev, ifup, qdev->ndev,
4834 "Cannot re-enable PCI device after reset.\n");
c4e84bde
RM
4835 return PCI_ERS_RESULT_DISCONNECT;
4836 }
c4e84bde 4837 pci_set_master(pdev);
a112fd4c
RM
4838
4839 if (ql_adapter_reset(qdev)) {
ae9540f7 4840 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4bbd1a19 4841 set_bit(QL_EEH_FATAL, &qdev->flags);
a112fd4c
RM
4842 return PCI_ERS_RESULT_DISCONNECT;
4843 }
4844
c4e84bde
RM
4845 return PCI_ERS_RESULT_RECOVERED;
4846}
4847
4848static void qlge_io_resume(struct pci_dev *pdev)
4849{
4850 struct net_device *ndev = pci_get_drvdata(pdev);
4851 struct ql_adapter *qdev = netdev_priv(ndev);
6d190c6e 4852 int err = 0;
c4e84bde 4853
c4e84bde 4854 if (netif_running(ndev)) {
6d190c6e
RM
4855 err = qlge_open(ndev);
4856 if (err) {
ae9540f7
JP
4857 netif_err(qdev, ifup, qdev->ndev,
4858 "Device initialization failed after reset.\n");
c4e84bde
RM
4859 return;
4860 }
6d190c6e 4861 } else {
ae9540f7
JP
4862 netif_err(qdev, ifup, qdev->ndev,
4863 "Device was not running prior to EEH.\n");
c4e84bde 4864 }
72046d84 4865 mod_timer(&qdev->timer, jiffies + (5*HZ));
c4e84bde
RM
4866 netif_device_attach(ndev);
4867}
4868
4869static struct pci_error_handlers qlge_err_handler = {
4870 .error_detected = qlge_io_error_detected,
4871 .slot_reset = qlge_io_slot_reset,
4872 .resume = qlge_io_resume,
4873};
4874
4875static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4876{
4877 struct net_device *ndev = pci_get_drvdata(pdev);
4878 struct ql_adapter *qdev = netdev_priv(ndev);
6b318cb3 4879 int err;
c4e84bde
RM
4880
4881 netif_device_detach(ndev);
15c052fc 4882 del_timer_sync(&qdev->timer);
c4e84bde
RM
4883
4884 if (netif_running(ndev)) {
4885 err = ql_adapter_down(qdev);
4886 if (!err)
4887 return err;
4888 }
4889
bc083ce9 4890 ql_wol(qdev);
c4e84bde
RM
4891 err = pci_save_state(pdev);
4892 if (err)
4893 return err;
4894
4895 pci_disable_device(pdev);
4896
4897 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4898
4899 return 0;
4900}
4901
04da2cf9 4902#ifdef CONFIG_PM
c4e84bde
RM
4903static int qlge_resume(struct pci_dev *pdev)
4904{
4905 struct net_device *ndev = pci_get_drvdata(pdev);
4906 struct ql_adapter *qdev = netdev_priv(ndev);
4907 int err;
4908
4909 pci_set_power_state(pdev, PCI_D0);
4910 pci_restore_state(pdev);
4911 err = pci_enable_device(pdev);
4912 if (err) {
ae9540f7 4913 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
c4e84bde
RM
4914 return err;
4915 }
4916 pci_set_master(pdev);
4917
4918 pci_enable_wake(pdev, PCI_D3hot, 0);
4919 pci_enable_wake(pdev, PCI_D3cold, 0);
4920
4921 if (netif_running(ndev)) {
4922 err = ql_adapter_up(qdev);
4923 if (err)
4924 return err;
4925 }
4926
72046d84 4927 mod_timer(&qdev->timer, jiffies + (5*HZ));
c4e84bde
RM
4928 netif_device_attach(ndev);
4929
4930 return 0;
4931}
04da2cf9 4932#endif /* CONFIG_PM */
c4e84bde
RM
4933
4934static void qlge_shutdown(struct pci_dev *pdev)
4935{
4936 qlge_suspend(pdev, PMSG_SUSPEND);
4937}
4938
4939static struct pci_driver qlge_driver = {
4940 .name = DRV_NAME,
4941 .id_table = qlge_pci_tbl,
4942 .probe = qlge_probe,
4943 .remove = __devexit_p(qlge_remove),
4944#ifdef CONFIG_PM
4945 .suspend = qlge_suspend,
4946 .resume = qlge_resume,
4947#endif
4948 .shutdown = qlge_shutdown,
4949 .err_handler = &qlge_err_handler
4950};
4951
4952static int __init qlge_init_module(void)
4953{
4954 return pci_register_driver(&qlge_driver);
4955}
4956
4957static void __exit qlge_exit(void)
4958{
4959 pci_unregister_driver(&qlge_driver);
4960}
4961
4962module_init(qlge_init_module);
4963module_exit(qlge_exit);