]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/qlge/qlge_main.c
qlge: Clean up mac address and frame route settings.
[mirror_ubuntu-jammy-kernel.git] / drivers / net / qlge / qlge_main.c
CommitLineData
c4e84bde
RM
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h>
c4e84bde
RM
39#include <linux/delay.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
b7c6bfb7 42#include <net/ip6_checksum.h>
c4e84bde
RM
43
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_TX_QUEUED |
62 NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66static int debug = 0x00007fff; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
73static int irq_type = MSIX_IRQ;
74module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
c4e84bde
RM
79 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
0857e9d7 129 unsigned int wait_count = 30;
c4e84bde
RM
130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
0857e9d7
RM
133 udelay(100);
134 } while (--wait_count);
c4e84bde
RM
135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
216 status = ql_wait_cfg(qdev, bit);
217 if (status) {
218 QPRINTK(qdev, IFUP, ERR,
219 "Timed out waiting for CFG to come ready.\n");
220 goto exit;
221 }
222
223 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
224 if (status)
225 goto exit;
226 ql_write32(qdev, ICB_L, (u32) map);
227 ql_write32(qdev, ICB_H, (u32) (map >> 32));
228 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
239 pci_unmap_single(qdev->pdev, map, size, direction);
240 return status;
241}
242
243/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
244int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
245 u32 *value)
246{
247 u32 offset = 0;
248 int status;
249
250 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
251 if (status)
252 return status;
253 switch (type) {
254 case MAC_ADDR_TYPE_MULTI_MAC:
255 case MAC_ADDR_TYPE_CAM_MAC:
256 {
257 status =
258 ql_wait_reg_rdy(qdev,
939678f8 259 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
260 if (status)
261 goto exit;
262 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
263 (index << MAC_ADDR_IDX_SHIFT) | /* index */
264 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
265 status =
266 ql_wait_reg_rdy(qdev,
939678f8 267 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
268 if (status)
269 goto exit;
270 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
271 status =
272 ql_wait_reg_rdy(qdev,
939678f8 273 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
274 if (status)
275 goto exit;
276 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
277 (index << MAC_ADDR_IDX_SHIFT) | /* index */
278 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
279 status =
280 ql_wait_reg_rdy(qdev,
939678f8 281 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
c4e84bde
RM
282 if (status)
283 goto exit;
284 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
285 if (type == MAC_ADDR_TYPE_CAM_MAC) {
286 status =
287 ql_wait_reg_rdy(qdev,
939678f8 288 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
289 if (status)
290 goto exit;
291 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 status =
295 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
939678f8 296 MAC_ADDR_MR, 0);
c4e84bde
RM
297 if (status)
298 goto exit;
299 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 }
301 break;
302 }
303 case MAC_ADDR_TYPE_VLAN:
304 case MAC_ADDR_TYPE_MULTI_FLTR:
305 default:
306 QPRINTK(qdev, IFUP, CRIT,
307 "Address type %d not yet supported.\n", type);
308 status = -EPERM;
309 }
310exit:
311 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
312 return status;
313}
314
315/* Set up a MAC, multicast or VLAN address for the
316 * inbound frame matching.
317 */
318static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
319 u16 index)
320{
321 u32 offset = 0;
322 int status = 0;
323
324 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
325 if (status)
326 return status;
327 switch (type) {
328 case MAC_ADDR_TYPE_MULTI_MAC:
329 case MAC_ADDR_TYPE_CAM_MAC:
330 {
331 u32 cam_output;
332 u32 upper = (addr[0] << 8) | addr[1];
333 u32 lower =
334 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
335 (addr[5]);
336
337 QPRINTK(qdev, IFUP, INFO,
7c510e4b 338 "Adding %s address %pM"
c4e84bde
RM
339 " at index %d in the CAM.\n",
340 ((type ==
341 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
7c510e4b 342 "UNICAST"), addr, index);
c4e84bde
RM
343
344 status =
345 ql_wait_reg_rdy(qdev,
939678f8 346 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
347 if (status)
348 goto exit;
349 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
350 (index << MAC_ADDR_IDX_SHIFT) | /* index */
351 type); /* type */
352 ql_write32(qdev, MAC_ADDR_DATA, lower);
353 status =
354 ql_wait_reg_rdy(qdev,
939678f8 355 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
356 if (status)
357 goto exit;
358 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
359 (index << MAC_ADDR_IDX_SHIFT) | /* index */
360 type); /* type */
361 ql_write32(qdev, MAC_ADDR_DATA, upper);
362 status =
363 ql_wait_reg_rdy(qdev,
939678f8 364 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
365 if (status)
366 goto exit;
367 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
368 (index << MAC_ADDR_IDX_SHIFT) | /* index */
369 type); /* type */
370 /* This field should also include the queue id
371 and possibly the function id. Right now we hardcode
372 the route field to NIC core.
373 */
374 if (type == MAC_ADDR_TYPE_CAM_MAC) {
375 cam_output = (CAM_OUT_ROUTE_NIC |
376 (qdev->
377 func << CAM_OUT_FUNC_SHIFT) |
378 (qdev->
379 rss_ring_first_cq_id <<
380 CAM_OUT_CQ_ID_SHIFT));
381 if (qdev->vlgrp)
382 cam_output |= CAM_OUT_RV;
383 /* route to NIC core */
384 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
385 }
386 break;
387 }
388 case MAC_ADDR_TYPE_VLAN:
389 {
390 u32 enable_bit = *((u32 *) &addr[0]);
391 /* For VLAN, the addr actually holds a bit that
392 * either enables or disables the vlan id we are
393 * addressing. It's either MAC_ADDR_E on or off.
394 * That's bit-27 we're talking about.
395 */
396 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
397 (enable_bit ? "Adding" : "Removing"),
398 index, (enable_bit ? "to" : "from"));
399
400 status =
401 ql_wait_reg_rdy(qdev,
939678f8 402 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
c4e84bde
RM
403 if (status)
404 goto exit;
405 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
406 (index << MAC_ADDR_IDX_SHIFT) | /* index */
407 type | /* type */
408 enable_bit); /* enable/disable */
409 break;
410 }
411 case MAC_ADDR_TYPE_MULTI_FLTR:
412 default:
413 QPRINTK(qdev, IFUP, CRIT,
414 "Address type %d not yet supported.\n", type);
415 status = -EPERM;
416 }
417exit:
418 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
419 return status;
420}
421
422/* Get a specific frame routing value from the CAM.
423 * Used for debug and reg dump.
424 */
425int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
426{
427 int status = 0;
428
429 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
430 if (status)
431 goto exit;
432
939678f8 433 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
c4e84bde
RM
434 if (status)
435 goto exit;
436
437 ql_write32(qdev, RT_IDX,
438 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
939678f8 439 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
c4e84bde
RM
440 if (status)
441 goto exit;
442 *value = ql_read32(qdev, RT_DATA);
443exit:
444 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
445 return status;
446}
447
448/* The NIC function for this chip has 16 routing indexes. Each one can be used
449 * to route different frame types to various inbound queues. We send broadcast/
450 * multicast/error frames to the default queue for slow handling,
451 * and CAM hit/RSS frames to the fast handling queues.
452 */
453static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
454 int enable)
455{
456 int status;
457 u32 value = 0;
458
459 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
460 if (status)
461 return status;
462
463 QPRINTK(qdev, IFUP, DEBUG,
464 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
465 (enable ? "Adding" : "Removing"),
466 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
467 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
468 ((index ==
469 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
470 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
471 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
472 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
473 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
474 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
475 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
476 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
477 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
478 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
479 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
480 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
481 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
482 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
483 (enable ? "to" : "from"));
484
485 switch (mask) {
486 case RT_IDX_CAM_HIT:
487 {
488 value = RT_IDX_DST_CAM_Q | /* dest */
489 RT_IDX_TYPE_NICQ | /* type */
490 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
491 break;
492 }
493 case RT_IDX_VALID: /* Promiscuous Mode frames. */
494 {
495 value = RT_IDX_DST_DFLT_Q | /* dest */
496 RT_IDX_TYPE_NICQ | /* type */
497 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
498 break;
499 }
500 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
501 {
502 value = RT_IDX_DST_DFLT_Q | /* dest */
503 RT_IDX_TYPE_NICQ | /* type */
504 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
505 break;
506 }
507 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
508 {
509 value = RT_IDX_DST_DFLT_Q | /* dest */
510 RT_IDX_TYPE_NICQ | /* type */
511 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
512 break;
513 }
514 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
515 {
516 value = RT_IDX_DST_CAM_Q | /* dest */
517 RT_IDX_TYPE_NICQ | /* type */
518 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
519 break;
520 }
521 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
522 {
523 value = RT_IDX_DST_CAM_Q | /* dest */
524 RT_IDX_TYPE_NICQ | /* type */
525 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
526 break;
527 }
528 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
529 {
530 value = RT_IDX_DST_RSS | /* dest */
531 RT_IDX_TYPE_NICQ | /* type */
532 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
533 break;
534 }
535 case 0: /* Clear the E-bit on an entry. */
536 {
537 value = RT_IDX_DST_DFLT_Q | /* dest */
538 RT_IDX_TYPE_NICQ | /* type */
539 (index << RT_IDX_IDX_SHIFT);/* index */
540 break;
541 }
542 default:
543 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
544 mask);
545 status = -EPERM;
546 goto exit;
547 }
548
549 if (value) {
550 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
551 if (status)
552 goto exit;
553 value |= (enable ? RT_IDX_E : 0);
554 ql_write32(qdev, RT_IDX, value);
555 ql_write32(qdev, RT_DATA, enable ? mask : 0);
556 }
557exit:
558 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
559 return status;
560}
561
562static void ql_enable_interrupts(struct ql_adapter *qdev)
563{
564 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
565}
566
567static void ql_disable_interrupts(struct ql_adapter *qdev)
568{
569 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
570}
571
572/* If we're running with multiple MSI-X vectors then we enable on the fly.
573 * Otherwise, we may have multiple outstanding workers and don't want to
574 * enable until the last one finishes. In this case, the irq_cnt gets
575 * incremented everytime we queue a worker and decremented everytime
576 * a worker finishes. Once it hits zero we enable the interrupt.
577 */
bb0d215c 578u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
c4e84bde 579{
bb0d215c
RM
580 u32 var = 0;
581 unsigned long hw_flags = 0;
582 struct intr_context *ctx = qdev->intr_context + intr;
583
584 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
585 /* Always enable if we're MSIX multi interrupts and
586 * it's not the default (zeroeth) interrupt.
587 */
c4e84bde 588 ql_write32(qdev, INTR_EN,
bb0d215c
RM
589 ctx->intr_en_mask);
590 var = ql_read32(qdev, STS);
591 return var;
c4e84bde 592 }
bb0d215c
RM
593
594 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
595 if (atomic_dec_and_test(&ctx->irq_cnt)) {
596 ql_write32(qdev, INTR_EN,
597 ctx->intr_en_mask);
598 var = ql_read32(qdev, STS);
599 }
600 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
601 return var;
c4e84bde
RM
602}
603
604static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
605{
606 u32 var = 0;
bb0d215c
RM
607 unsigned long hw_flags;
608 struct intr_context *ctx;
c4e84bde 609
bb0d215c
RM
610 /* HW disables for us if we're MSIX multi interrupts and
611 * it's not the default (zeroeth) interrupt.
612 */
613 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
614 return 0;
615
616 ctx = qdev->intr_context + intr;
617 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
618 if (!atomic_read(&ctx->irq_cnt)) {
c4e84bde 619 ql_write32(qdev, INTR_EN,
bb0d215c 620 ctx->intr_dis_mask);
c4e84bde
RM
621 var = ql_read32(qdev, STS);
622 }
bb0d215c
RM
623 atomic_inc(&ctx->irq_cnt);
624 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
c4e84bde
RM
625 return var;
626}
627
628static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
629{
630 int i;
631 for (i = 0; i < qdev->intr_count; i++) {
632 /* The enable call does a atomic_dec_and_test
633 * and enables only if the result is zero.
634 * So we precharge it here.
635 */
bb0d215c
RM
636 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
637 i == 0))
638 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
c4e84bde
RM
639 ql_enable_completion_interrupt(qdev, i);
640 }
641
642}
643
26351479 644static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
c4e84bde
RM
645{
646 int status = 0;
647 /* wait for reg to come ready */
648 status = ql_wait_reg_rdy(qdev,
649 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
650 if (status)
651 goto exit;
652 /* set up for reg read */
653 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
654 /* wait for reg to come ready */
655 status = ql_wait_reg_rdy(qdev,
656 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
657 if (status)
658 goto exit;
26351479
RM
659 /* This data is stored on flash as an array of
660 * __le32. Since ql_read32() returns cpu endian
661 * we need to swap it back.
662 */
663 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
c4e84bde
RM
664exit:
665 return status;
666}
667
668static int ql_get_flash_params(struct ql_adapter *qdev)
669{
670 int i;
671 int status;
26351479 672 __le32 *p = (__le32 *)&qdev->flash;
e78f5fa7
RM
673 u32 offset = 0;
674
675 /* Second function's parameters follow the first
676 * function's.
677 */
678 if (qdev->func)
679 offset = sizeof(qdev->flash) / sizeof(u32);
c4e84bde
RM
680
681 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
682 return -ETIMEDOUT;
683
684 for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
e78f5fa7 685 status = ql_read_flash_word(qdev, i+offset, p);
c4e84bde
RM
686 if (status) {
687 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
688 goto exit;
689 }
690
691 }
692exit:
693 ql_sem_unlock(qdev, SEM_FLASH_MASK);
694 return status;
695}
696
697/* xgmac register are located behind the xgmac_addr and xgmac_data
698 * register pair. Each read/write requires us to wait for the ready
699 * bit before reading/writing the data.
700 */
701static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
702{
703 int status;
704 /* wait for reg to come ready */
705 status = ql_wait_reg_rdy(qdev,
706 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
707 if (status)
708 return status;
709 /* write the data to the data reg */
710 ql_write32(qdev, XGMAC_DATA, data);
711 /* trigger the write */
712 ql_write32(qdev, XGMAC_ADDR, reg);
713 return status;
714}
715
716/* xgmac register are located behind the xgmac_addr and xgmac_data
717 * register pair. Each read/write requires us to wait for the ready
718 * bit before reading/writing the data.
719 */
720int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
721{
722 int status = 0;
723 /* wait for reg to come ready */
724 status = ql_wait_reg_rdy(qdev,
725 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
726 if (status)
727 goto exit;
728 /* set up for reg read */
729 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
730 /* wait for reg to come ready */
731 status = ql_wait_reg_rdy(qdev,
732 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
733 if (status)
734 goto exit;
735 /* get the data */
736 *data = ql_read32(qdev, XGMAC_DATA);
737exit:
738 return status;
739}
740
741/* This is used for reading the 64-bit statistics regs. */
742int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
743{
744 int status = 0;
745 u32 hi = 0;
746 u32 lo = 0;
747
748 status = ql_read_xgmac_reg(qdev, reg, &lo);
749 if (status)
750 goto exit;
751
752 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
753 if (status)
754 goto exit;
755
756 *data = (u64) lo | ((u64) hi << 32);
757
758exit:
759 return status;
760}
761
762/* Take the MAC Core out of reset.
763 * Enable statistics counting.
764 * Take the transmitter/receiver out of reset.
765 * This functionality may be done in the MPI firmware at a
766 * later date.
767 */
768static int ql_port_initialize(struct ql_adapter *qdev)
769{
770 int status = 0;
771 u32 data;
772
773 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
774 /* Another function has the semaphore, so
775 * wait for the port init bit to come ready.
776 */
777 QPRINTK(qdev, LINK, INFO,
778 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
779 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
780 if (status) {
781 QPRINTK(qdev, LINK, CRIT,
782 "Port initialize timed out.\n");
783 }
784 return status;
785 }
786
787 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
788 /* Set the core reset. */
789 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
790 if (status)
791 goto end;
792 data |= GLOBAL_CFG_RESET;
793 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
794 if (status)
795 goto end;
796
797 /* Clear the core reset and turn on jumbo for receiver. */
798 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
799 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
800 data |= GLOBAL_CFG_TX_STAT_EN;
801 data |= GLOBAL_CFG_RX_STAT_EN;
802 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
803 if (status)
804 goto end;
805
806 /* Enable transmitter, and clear it's reset. */
807 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
808 if (status)
809 goto end;
810 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
811 data |= TX_CFG_EN; /* Enable the transmitter. */
812 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
813 if (status)
814 goto end;
815
816 /* Enable receiver and clear it's reset. */
817 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
818 if (status)
819 goto end;
820 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
821 data |= RX_CFG_EN; /* Enable the receiver. */
822 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
823 if (status)
824 goto end;
825
826 /* Turn on jumbo. */
827 status =
828 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
829 if (status)
830 goto end;
831 status =
832 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
833 if (status)
834 goto end;
835
836 /* Signal to the world that the port is enabled. */
837 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
838end:
839 ql_sem_unlock(qdev, qdev->xg_sem_mask);
840 return status;
841}
842
843/* Get the next large buffer. */
8668ae92 844static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
c4e84bde
RM
845{
846 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
847 rx_ring->lbq_curr_idx++;
848 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
849 rx_ring->lbq_curr_idx = 0;
850 rx_ring->lbq_free_cnt++;
851 return lbq_desc;
852}
853
854/* Get the next small buffer. */
8668ae92 855static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
c4e84bde
RM
856{
857 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
858 rx_ring->sbq_curr_idx++;
859 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
860 rx_ring->sbq_curr_idx = 0;
861 rx_ring->sbq_free_cnt++;
862 return sbq_desc;
863}
864
865/* Update an rx ring index. */
866static void ql_update_cq(struct rx_ring *rx_ring)
867{
868 rx_ring->cnsmr_idx++;
869 rx_ring->curr_entry++;
870 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
871 rx_ring->cnsmr_idx = 0;
872 rx_ring->curr_entry = rx_ring->cq_base;
873 }
874}
875
876static void ql_write_cq_idx(struct rx_ring *rx_ring)
877{
878 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
879}
880
881/* Process (refill) a large buffer queue. */
882static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
883{
884 int clean_idx = rx_ring->lbq_clean_idx;
885 struct bq_desc *lbq_desc;
c4e84bde
RM
886 u64 map;
887 int i;
888
889 while (rx_ring->lbq_free_cnt > 16) {
890 for (i = 0; i < 16; i++) {
891 QPRINTK(qdev, RX_STATUS, DEBUG,
892 "lbq: try cleaning clean_idx = %d.\n",
893 clean_idx);
894 lbq_desc = &rx_ring->lbq[clean_idx];
c4e84bde
RM
895 if (lbq_desc->p.lbq_page == NULL) {
896 QPRINTK(qdev, RX_STATUS, DEBUG,
897 "lbq: getting new page for index %d.\n",
898 lbq_desc->index);
899 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
900 if (lbq_desc->p.lbq_page == NULL) {
79d2b29e 901 rx_ring->lbq_clean_idx = clean_idx;
c4e84bde
RM
902 QPRINTK(qdev, RX_STATUS, ERR,
903 "Couldn't get a page.\n");
904 return;
905 }
906 map = pci_map_page(qdev->pdev,
907 lbq_desc->p.lbq_page,
908 0, PAGE_SIZE,
909 PCI_DMA_FROMDEVICE);
910 if (pci_dma_mapping_error(qdev->pdev, map)) {
79d2b29e 911 rx_ring->lbq_clean_idx = clean_idx;
f2603c2c
RM
912 put_page(lbq_desc->p.lbq_page);
913 lbq_desc->p.lbq_page = NULL;
c4e84bde
RM
914 QPRINTK(qdev, RX_STATUS, ERR,
915 "PCI mapping failed.\n");
916 return;
917 }
918 pci_unmap_addr_set(lbq_desc, mapaddr, map);
919 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2c9a0d41 920 *lbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
921 }
922 clean_idx++;
923 if (clean_idx == rx_ring->lbq_len)
924 clean_idx = 0;
925 }
926
927 rx_ring->lbq_clean_idx = clean_idx;
928 rx_ring->lbq_prod_idx += 16;
929 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
930 rx_ring->lbq_prod_idx = 0;
931 QPRINTK(qdev, RX_STATUS, DEBUG,
932 "lbq: updating prod idx = %d.\n",
933 rx_ring->lbq_prod_idx);
934 ql_write_db_reg(rx_ring->lbq_prod_idx,
935 rx_ring->lbq_prod_idx_db_reg);
936 rx_ring->lbq_free_cnt -= 16;
937 }
938}
939
940/* Process (refill) a small buffer queue. */
941static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
942{
943 int clean_idx = rx_ring->sbq_clean_idx;
944 struct bq_desc *sbq_desc;
c4e84bde
RM
945 u64 map;
946 int i;
947
948 while (rx_ring->sbq_free_cnt > 16) {
949 for (i = 0; i < 16; i++) {
950 sbq_desc = &rx_ring->sbq[clean_idx];
951 QPRINTK(qdev, RX_STATUS, DEBUG,
952 "sbq: try cleaning clean_idx = %d.\n",
953 clean_idx);
c4e84bde
RM
954 if (sbq_desc->p.skb == NULL) {
955 QPRINTK(qdev, RX_STATUS, DEBUG,
956 "sbq: getting new skb for index %d.\n",
957 sbq_desc->index);
958 sbq_desc->p.skb =
959 netdev_alloc_skb(qdev->ndev,
960 rx_ring->sbq_buf_size);
961 if (sbq_desc->p.skb == NULL) {
962 QPRINTK(qdev, PROBE, ERR,
963 "Couldn't get an skb.\n");
964 rx_ring->sbq_clean_idx = clean_idx;
965 return;
966 }
967 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
968 map = pci_map_single(qdev->pdev,
969 sbq_desc->p.skb->data,
970 rx_ring->sbq_buf_size /
971 2, PCI_DMA_FROMDEVICE);
c907a35a
RM
972 if (pci_dma_mapping_error(qdev->pdev, map)) {
973 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
974 rx_ring->sbq_clean_idx = clean_idx;
06a3d510
RM
975 dev_kfree_skb_any(sbq_desc->p.skb);
976 sbq_desc->p.skb = NULL;
c907a35a
RM
977 return;
978 }
c4e84bde
RM
979 pci_unmap_addr_set(sbq_desc, mapaddr, map);
980 pci_unmap_len_set(sbq_desc, maplen,
981 rx_ring->sbq_buf_size / 2);
2c9a0d41 982 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
983 }
984
985 clean_idx++;
986 if (clean_idx == rx_ring->sbq_len)
987 clean_idx = 0;
988 }
989 rx_ring->sbq_clean_idx = clean_idx;
990 rx_ring->sbq_prod_idx += 16;
991 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
992 rx_ring->sbq_prod_idx = 0;
993 QPRINTK(qdev, RX_STATUS, DEBUG,
994 "sbq: updating prod idx = %d.\n",
995 rx_ring->sbq_prod_idx);
996 ql_write_db_reg(rx_ring->sbq_prod_idx,
997 rx_ring->sbq_prod_idx_db_reg);
998
999 rx_ring->sbq_free_cnt -= 16;
1000 }
1001}
1002
1003static void ql_update_buffer_queues(struct ql_adapter *qdev,
1004 struct rx_ring *rx_ring)
1005{
1006 ql_update_sbq(qdev, rx_ring);
1007 ql_update_lbq(qdev, rx_ring);
1008}
1009
1010/* Unmaps tx buffers. Can be called from send() if a pci mapping
1011 * fails at some stage, or from the interrupt when a tx completes.
1012 */
1013static void ql_unmap_send(struct ql_adapter *qdev,
1014 struct tx_ring_desc *tx_ring_desc, int mapped)
1015{
1016 int i;
1017 for (i = 0; i < mapped; i++) {
1018 if (i == 0 || (i == 7 && mapped > 7)) {
1019 /*
1020 * Unmap the skb->data area, or the
1021 * external sglist (AKA the Outbound
1022 * Address List (OAL)).
1023 * If its the zeroeth element, then it's
1024 * the skb->data area. If it's the 7th
1025 * element and there is more than 6 frags,
1026 * then its an OAL.
1027 */
1028 if (i == 7) {
1029 QPRINTK(qdev, TX_DONE, DEBUG,
1030 "unmapping OAL area.\n");
1031 }
1032 pci_unmap_single(qdev->pdev,
1033 pci_unmap_addr(&tx_ring_desc->map[i],
1034 mapaddr),
1035 pci_unmap_len(&tx_ring_desc->map[i],
1036 maplen),
1037 PCI_DMA_TODEVICE);
1038 } else {
1039 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1040 i);
1041 pci_unmap_page(qdev->pdev,
1042 pci_unmap_addr(&tx_ring_desc->map[i],
1043 mapaddr),
1044 pci_unmap_len(&tx_ring_desc->map[i],
1045 maplen), PCI_DMA_TODEVICE);
1046 }
1047 }
1048
1049}
1050
1051/* Map the buffers for this transmit. This will return
1052 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1053 */
1054static int ql_map_send(struct ql_adapter *qdev,
1055 struct ob_mac_iocb_req *mac_iocb_ptr,
1056 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1057{
1058 int len = skb_headlen(skb);
1059 dma_addr_t map;
1060 int frag_idx, err, map_idx = 0;
1061 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1062 int frag_cnt = skb_shinfo(skb)->nr_frags;
1063
1064 if (frag_cnt) {
1065 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1066 }
1067 /*
1068 * Map the skb buffer first.
1069 */
1070 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1071
1072 err = pci_dma_mapping_error(qdev->pdev, map);
1073 if (err) {
1074 QPRINTK(qdev, TX_QUEUED, ERR,
1075 "PCI mapping failed with error: %d\n", err);
1076
1077 return NETDEV_TX_BUSY;
1078 }
1079
1080 tbd->len = cpu_to_le32(len);
1081 tbd->addr = cpu_to_le64(map);
1082 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1083 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1084 map_idx++;
1085
1086 /*
1087 * This loop fills the remainder of the 8 address descriptors
1088 * in the IOCB. If there are more than 7 fragments, then the
1089 * eighth address desc will point to an external list (OAL).
1090 * When this happens, the remainder of the frags will be stored
1091 * in this list.
1092 */
1093 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1094 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1095 tbd++;
1096 if (frag_idx == 6 && frag_cnt > 7) {
1097 /* Let's tack on an sglist.
1098 * Our control block will now
1099 * look like this:
1100 * iocb->seg[0] = skb->data
1101 * iocb->seg[1] = frag[0]
1102 * iocb->seg[2] = frag[1]
1103 * iocb->seg[3] = frag[2]
1104 * iocb->seg[4] = frag[3]
1105 * iocb->seg[5] = frag[4]
1106 * iocb->seg[6] = frag[5]
1107 * iocb->seg[7] = ptr to OAL (external sglist)
1108 * oal->seg[0] = frag[6]
1109 * oal->seg[1] = frag[7]
1110 * oal->seg[2] = frag[8]
1111 * oal->seg[3] = frag[9]
1112 * oal->seg[4] = frag[10]
1113 * etc...
1114 */
1115 /* Tack on the OAL in the eighth segment of IOCB. */
1116 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1117 sizeof(struct oal),
1118 PCI_DMA_TODEVICE);
1119 err = pci_dma_mapping_error(qdev->pdev, map);
1120 if (err) {
1121 QPRINTK(qdev, TX_QUEUED, ERR,
1122 "PCI mapping outbound address list with error: %d\n",
1123 err);
1124 goto map_error;
1125 }
1126
1127 tbd->addr = cpu_to_le64(map);
1128 /*
1129 * The length is the number of fragments
1130 * that remain to be mapped times the length
1131 * of our sglist (OAL).
1132 */
1133 tbd->len =
1134 cpu_to_le32((sizeof(struct tx_buf_desc) *
1135 (frag_cnt - frag_idx)) | TX_DESC_C);
1136 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1137 map);
1138 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1139 sizeof(struct oal));
1140 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1141 map_idx++;
1142 }
1143
1144 map =
1145 pci_map_page(qdev->pdev, frag->page,
1146 frag->page_offset, frag->size,
1147 PCI_DMA_TODEVICE);
1148
1149 err = pci_dma_mapping_error(qdev->pdev, map);
1150 if (err) {
1151 QPRINTK(qdev, TX_QUEUED, ERR,
1152 "PCI mapping frags failed with error: %d.\n",
1153 err);
1154 goto map_error;
1155 }
1156
1157 tbd->addr = cpu_to_le64(map);
1158 tbd->len = cpu_to_le32(frag->size);
1159 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1160 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1161 frag->size);
1162
1163 }
1164 /* Save the number of segments we've mapped. */
1165 tx_ring_desc->map_cnt = map_idx;
1166 /* Terminate the last segment. */
1167 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1168 return NETDEV_TX_OK;
1169
1170map_error:
1171 /*
1172 * If the first frag mapping failed, then i will be zero.
1173 * This causes the unmap of the skb->data area. Otherwise
1174 * we pass in the number of frags that mapped successfully
1175 * so they can be umapped.
1176 */
1177 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1178 return NETDEV_TX_BUSY;
1179}
1180
8668ae92 1181static void ql_realign_skb(struct sk_buff *skb, int len)
c4e84bde
RM
1182{
1183 void *temp_addr = skb->data;
1184
1185 /* Undo the skb_reserve(skb,32) we did before
1186 * giving to hardware, and realign data on
1187 * a 2-byte boundary.
1188 */
1189 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1190 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1191 skb_copy_to_linear_data(skb, temp_addr,
1192 (unsigned int)len);
1193}
1194
1195/*
1196 * This function builds an skb for the given inbound
1197 * completion. It will be rewritten for readability in the near
1198 * future, but for not it works well.
1199 */
1200static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1201 struct rx_ring *rx_ring,
1202 struct ib_mac_iocb_rsp *ib_mac_rsp)
1203{
1204 struct bq_desc *lbq_desc;
1205 struct bq_desc *sbq_desc;
1206 struct sk_buff *skb = NULL;
1207 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1208 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1209
1210 /*
1211 * Handle the header buffer if present.
1212 */
1213 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1214 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1215 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1216 /*
1217 * Headers fit nicely into a small buffer.
1218 */
1219 sbq_desc = ql_get_curr_sbuf(rx_ring);
1220 pci_unmap_single(qdev->pdev,
1221 pci_unmap_addr(sbq_desc, mapaddr),
1222 pci_unmap_len(sbq_desc, maplen),
1223 PCI_DMA_FROMDEVICE);
1224 skb = sbq_desc->p.skb;
1225 ql_realign_skb(skb, hdr_len);
1226 skb_put(skb, hdr_len);
1227 sbq_desc->p.skb = NULL;
1228 }
1229
1230 /*
1231 * Handle the data buffer(s).
1232 */
1233 if (unlikely(!length)) { /* Is there data too? */
1234 QPRINTK(qdev, RX_STATUS, DEBUG,
1235 "No Data buffer in this packet.\n");
1236 return skb;
1237 }
1238
1239 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1240 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1241 QPRINTK(qdev, RX_STATUS, DEBUG,
1242 "Headers in small, data of %d bytes in small, combine them.\n", length);
1243 /*
1244 * Data is less than small buffer size so it's
1245 * stuffed in a small buffer.
1246 * For this case we append the data
1247 * from the "data" small buffer to the "header" small
1248 * buffer.
1249 */
1250 sbq_desc = ql_get_curr_sbuf(rx_ring);
1251 pci_dma_sync_single_for_cpu(qdev->pdev,
1252 pci_unmap_addr
1253 (sbq_desc, mapaddr),
1254 pci_unmap_len
1255 (sbq_desc, maplen),
1256 PCI_DMA_FROMDEVICE);
1257 memcpy(skb_put(skb, length),
1258 sbq_desc->p.skb->data, length);
1259 pci_dma_sync_single_for_device(qdev->pdev,
1260 pci_unmap_addr
1261 (sbq_desc,
1262 mapaddr),
1263 pci_unmap_len
1264 (sbq_desc,
1265 maplen),
1266 PCI_DMA_FROMDEVICE);
1267 } else {
1268 QPRINTK(qdev, RX_STATUS, DEBUG,
1269 "%d bytes in a single small buffer.\n", length);
1270 sbq_desc = ql_get_curr_sbuf(rx_ring);
1271 skb = sbq_desc->p.skb;
1272 ql_realign_skb(skb, length);
1273 skb_put(skb, length);
1274 pci_unmap_single(qdev->pdev,
1275 pci_unmap_addr(sbq_desc,
1276 mapaddr),
1277 pci_unmap_len(sbq_desc,
1278 maplen),
1279 PCI_DMA_FROMDEVICE);
1280 sbq_desc->p.skb = NULL;
1281 }
1282 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1283 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1284 QPRINTK(qdev, RX_STATUS, DEBUG,
1285 "Header in small, %d bytes in large. Chain large to small!\n", length);
1286 /*
1287 * The data is in a single large buffer. We
1288 * chain it to the header buffer's skb and let
1289 * it rip.
1290 */
1291 lbq_desc = ql_get_curr_lbuf(rx_ring);
1292 pci_unmap_page(qdev->pdev,
1293 pci_unmap_addr(lbq_desc,
1294 mapaddr),
1295 pci_unmap_len(lbq_desc, maplen),
1296 PCI_DMA_FROMDEVICE);
1297 QPRINTK(qdev, RX_STATUS, DEBUG,
1298 "Chaining page to skb.\n");
1299 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1300 0, length);
1301 skb->len += length;
1302 skb->data_len += length;
1303 skb->truesize += length;
1304 lbq_desc->p.lbq_page = NULL;
1305 } else {
1306 /*
1307 * The headers and data are in a single large buffer. We
1308 * copy it to a new skb and let it go. This can happen with
1309 * jumbo mtu on a non-TCP/UDP frame.
1310 */
1311 lbq_desc = ql_get_curr_lbuf(rx_ring);
1312 skb = netdev_alloc_skb(qdev->ndev, length);
1313 if (skb == NULL) {
1314 QPRINTK(qdev, PROBE, DEBUG,
1315 "No skb available, drop the packet.\n");
1316 return NULL;
1317 }
4055c7d4
RM
1318 pci_unmap_page(qdev->pdev,
1319 pci_unmap_addr(lbq_desc,
1320 mapaddr),
1321 pci_unmap_len(lbq_desc, maplen),
1322 PCI_DMA_FROMDEVICE);
c4e84bde
RM
1323 skb_reserve(skb, NET_IP_ALIGN);
1324 QPRINTK(qdev, RX_STATUS, DEBUG,
1325 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1326 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1327 0, length);
1328 skb->len += length;
1329 skb->data_len += length;
1330 skb->truesize += length;
1331 length -= length;
1332 lbq_desc->p.lbq_page = NULL;
1333 __pskb_pull_tail(skb,
1334 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1335 VLAN_ETH_HLEN : ETH_HLEN);
1336 }
1337 } else {
1338 /*
1339 * The data is in a chain of large buffers
1340 * pointed to by a small buffer. We loop
1341 * thru and chain them to the our small header
1342 * buffer's skb.
1343 * frags: There are 18 max frags and our small
1344 * buffer will hold 32 of them. The thing is,
1345 * we'll use 3 max for our 9000 byte jumbo
1346 * frames. If the MTU goes up we could
1347 * eventually be in trouble.
1348 */
1349 int size, offset, i = 0;
2c9a0d41 1350 __le64 *bq, bq_array[8];
c4e84bde
RM
1351 sbq_desc = ql_get_curr_sbuf(rx_ring);
1352 pci_unmap_single(qdev->pdev,
1353 pci_unmap_addr(sbq_desc, mapaddr),
1354 pci_unmap_len(sbq_desc, maplen),
1355 PCI_DMA_FROMDEVICE);
1356 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1357 /*
1358 * This is an non TCP/UDP IP frame, so
1359 * the headers aren't split into a small
1360 * buffer. We have to use the small buffer
1361 * that contains our sg list as our skb to
1362 * send upstairs. Copy the sg list here to
1363 * a local buffer and use it to find the
1364 * pages to chain.
1365 */
1366 QPRINTK(qdev, RX_STATUS, DEBUG,
1367 "%d bytes of headers & data in chain of large.\n", length);
1368 skb = sbq_desc->p.skb;
1369 bq = &bq_array[0];
1370 memcpy(bq, skb->data, sizeof(bq_array));
1371 sbq_desc->p.skb = NULL;
1372 skb_reserve(skb, NET_IP_ALIGN);
1373 } else {
1374 QPRINTK(qdev, RX_STATUS, DEBUG,
1375 "Headers in small, %d bytes of data in chain of large.\n", length);
2c9a0d41 1376 bq = (__le64 *)sbq_desc->p.skb->data;
c4e84bde
RM
1377 }
1378 while (length > 0) {
1379 lbq_desc = ql_get_curr_lbuf(rx_ring);
c4e84bde
RM
1380 pci_unmap_page(qdev->pdev,
1381 pci_unmap_addr(lbq_desc,
1382 mapaddr),
1383 pci_unmap_len(lbq_desc,
1384 maplen),
1385 PCI_DMA_FROMDEVICE);
1386 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1387 offset = 0;
1388
1389 QPRINTK(qdev, RX_STATUS, DEBUG,
1390 "Adding page %d to skb for %d bytes.\n",
1391 i, size);
1392 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1393 offset, size);
1394 skb->len += size;
1395 skb->data_len += size;
1396 skb->truesize += size;
1397 length -= size;
1398 lbq_desc->p.lbq_page = NULL;
1399 bq++;
1400 i++;
1401 }
1402 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1403 VLAN_ETH_HLEN : ETH_HLEN);
1404 }
1405 return skb;
1406}
1407
1408/* Process an inbound completion from an rx ring. */
1409static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1410 struct rx_ring *rx_ring,
1411 struct ib_mac_iocb_rsp *ib_mac_rsp)
1412{
1413 struct net_device *ndev = qdev->ndev;
1414 struct sk_buff *skb = NULL;
1415
1416 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1417
1418 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1419 if (unlikely(!skb)) {
1420 QPRINTK(qdev, RX_STATUS, DEBUG,
1421 "No skb available, drop packet.\n");
1422 return;
1423 }
1424
1425 prefetch(skb->data);
1426 skb->dev = ndev;
1427 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1428 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1429 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1430 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1431 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1432 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1433 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1434 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1435 }
1436 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1437 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1438 }
1439 if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1440 QPRINTK(qdev, RX_STATUS, ERR,
1441 "Bad checksum for this %s packet.\n",
1442 ((ib_mac_rsp->
1443 flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1444 skb->ip_summed = CHECKSUM_NONE;
1445 } else if (qdev->rx_csum &&
1446 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1447 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1448 !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1449 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1450 skb->ip_summed = CHECKSUM_UNNECESSARY;
1451 }
1452 qdev->stats.rx_packets++;
1453 qdev->stats.rx_bytes += skb->len;
1454 skb->protocol = eth_type_trans(skb, ndev);
0c8dfc83 1455 skb_record_rx_queue(skb, rx_ring - &qdev->rx_ring[0]);
c4e84bde
RM
1456 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1457 QPRINTK(qdev, RX_STATUS, DEBUG,
1458 "Passing a VLAN packet upstream.\n");
7a9deb66 1459 vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
c4e84bde
RM
1460 le16_to_cpu(ib_mac_rsp->vlan_id));
1461 } else {
1462 QPRINTK(qdev, RX_STATUS, DEBUG,
1463 "Passing a normal packet upstream.\n");
7a9deb66 1464 netif_receive_skb(skb);
c4e84bde 1465 }
c4e84bde
RM
1466}
1467
1468/* Process an outbound completion from an rx ring. */
1469static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1470 struct ob_mac_iocb_rsp *mac_rsp)
1471{
1472 struct tx_ring *tx_ring;
1473 struct tx_ring_desc *tx_ring_desc;
1474
1475 QL_DUMP_OB_MAC_RSP(mac_rsp);
1476 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1477 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1478 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1479 qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1480 qdev->stats.tx_packets++;
1481 dev_kfree_skb(tx_ring_desc->skb);
1482 tx_ring_desc->skb = NULL;
1483
1484 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1485 OB_MAC_IOCB_RSP_S |
1486 OB_MAC_IOCB_RSP_L |
1487 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1488 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1489 QPRINTK(qdev, TX_DONE, WARNING,
1490 "Total descriptor length did not match transfer length.\n");
1491 }
1492 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1493 QPRINTK(qdev, TX_DONE, WARNING,
1494 "Frame too short to be legal, not sent.\n");
1495 }
1496 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1497 QPRINTK(qdev, TX_DONE, WARNING,
1498 "Frame too long, but sent anyway.\n");
1499 }
1500 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1501 QPRINTK(qdev, TX_DONE, WARNING,
1502 "PCI backplane error. Frame not sent.\n");
1503 }
1504 }
1505 atomic_inc(&tx_ring->tx_count);
1506}
1507
1508/* Fire up a handler to reset the MPI processor. */
1509void ql_queue_fw_error(struct ql_adapter *qdev)
1510{
1511 netif_stop_queue(qdev->ndev);
1512 netif_carrier_off(qdev->ndev);
1513 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1514}
1515
1516void ql_queue_asic_error(struct ql_adapter *qdev)
1517{
1518 netif_stop_queue(qdev->ndev);
1519 netif_carrier_off(qdev->ndev);
1520 ql_disable_interrupts(qdev);
6497b607
RM
1521 /* Clear adapter up bit to signal the recovery
1522 * process that it shouldn't kill the reset worker
1523 * thread
1524 */
1525 clear_bit(QL_ADAPTER_UP, &qdev->flags);
c4e84bde
RM
1526 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1527}
1528
1529static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1530 struct ib_ae_iocb_rsp *ib_ae_rsp)
1531{
1532 switch (ib_ae_rsp->event) {
1533 case MGMT_ERR_EVENT:
1534 QPRINTK(qdev, RX_ERR, ERR,
1535 "Management Processor Fatal Error.\n");
1536 ql_queue_fw_error(qdev);
1537 return;
1538
1539 case CAM_LOOKUP_ERR_EVENT:
1540 QPRINTK(qdev, LINK, ERR,
1541 "Multiple CAM hits lookup occurred.\n");
1542 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1543 ql_queue_asic_error(qdev);
1544 return;
1545
1546 case SOFT_ECC_ERROR_EVENT:
1547 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1548 ql_queue_asic_error(qdev);
1549 break;
1550
1551 case PCI_ERR_ANON_BUF_RD:
1552 QPRINTK(qdev, RX_ERR, ERR,
1553 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1554 ib_ae_rsp->q_id);
1555 ql_queue_asic_error(qdev);
1556 break;
1557
1558 default:
1559 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1560 ib_ae_rsp->event);
1561 ql_queue_asic_error(qdev);
1562 break;
1563 }
1564}
1565
1566static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1567{
1568 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1569 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1570 struct ob_mac_iocb_rsp *net_rsp = NULL;
1571 int count = 0;
1572
1573 /* While there are entries in the completion queue. */
1574 while (prod != rx_ring->cnsmr_idx) {
1575
1576 QPRINTK(qdev, RX_STATUS, DEBUG,
1577 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1578 prod, rx_ring->cnsmr_idx);
1579
1580 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1581 rmb();
1582 switch (net_rsp->opcode) {
1583
1584 case OPCODE_OB_MAC_TSO_IOCB:
1585 case OPCODE_OB_MAC_IOCB:
1586 ql_process_mac_tx_intr(qdev, net_rsp);
1587 break;
1588 default:
1589 QPRINTK(qdev, RX_STATUS, DEBUG,
1590 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1591 net_rsp->opcode);
1592 }
1593 count++;
1594 ql_update_cq(rx_ring);
ba7cd3ba 1595 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1596 }
1597 ql_write_cq_idx(rx_ring);
1598 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1599 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1600 if (atomic_read(&tx_ring->queue_stopped) &&
1601 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1602 /*
1603 * The queue got stopped because the tx_ring was full.
1604 * Wake it up, because it's now at least 25% empty.
1605 */
1606 netif_wake_queue(qdev->ndev);
1607 }
1608
1609 return count;
1610}
1611
1612static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1613{
1614 struct ql_adapter *qdev = rx_ring->qdev;
ba7cd3ba 1615 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1616 struct ql_net_rsp_iocb *net_rsp;
1617 int count = 0;
1618
1619 /* While there are entries in the completion queue. */
1620 while (prod != rx_ring->cnsmr_idx) {
1621
1622 QPRINTK(qdev, RX_STATUS, DEBUG,
1623 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1624 prod, rx_ring->cnsmr_idx);
1625
1626 net_rsp = rx_ring->curr_entry;
1627 rmb();
1628 switch (net_rsp->opcode) {
1629 case OPCODE_IB_MAC_IOCB:
1630 ql_process_mac_rx_intr(qdev, rx_ring,
1631 (struct ib_mac_iocb_rsp *)
1632 net_rsp);
1633 break;
1634
1635 case OPCODE_IB_AE_IOCB:
1636 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1637 net_rsp);
1638 break;
1639 default:
1640 {
1641 QPRINTK(qdev, RX_STATUS, DEBUG,
1642 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1643 net_rsp->opcode);
1644 }
1645 }
1646 count++;
1647 ql_update_cq(rx_ring);
ba7cd3ba 1648 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
c4e84bde
RM
1649 if (count == budget)
1650 break;
1651 }
1652 ql_update_buffer_queues(qdev, rx_ring);
1653 ql_write_cq_idx(rx_ring);
1654 return count;
1655}
1656
1657static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1658{
1659 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1660 struct ql_adapter *qdev = rx_ring->qdev;
1661 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1662
1663 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1664 rx_ring->cq_id);
1665
1666 if (work_done < budget) {
288379f0 1667 __napi_complete(napi);
c4e84bde
RM
1668 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1669 }
1670 return work_done;
1671}
1672
1673static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1674{
1675 struct ql_adapter *qdev = netdev_priv(ndev);
1676
1677 qdev->vlgrp = grp;
1678 if (grp) {
1679 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1680 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1681 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1682 } else {
1683 QPRINTK(qdev, IFUP, DEBUG,
1684 "Turning off VLAN in NIC_RCV_CFG.\n");
1685 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1686 }
1687}
1688
1689static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1690{
1691 struct ql_adapter *qdev = netdev_priv(ndev);
1692 u32 enable_bit = MAC_ADDR_E;
1693
1694 spin_lock(&qdev->hw_lock);
1695 if (ql_set_mac_addr_reg
1696 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1697 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1698 }
1699 spin_unlock(&qdev->hw_lock);
1700}
1701
1702static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1703{
1704 struct ql_adapter *qdev = netdev_priv(ndev);
1705 u32 enable_bit = 0;
1706
1707 spin_lock(&qdev->hw_lock);
1708 if (ql_set_mac_addr_reg
1709 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1710 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1711 }
1712 spin_unlock(&qdev->hw_lock);
1713
1714}
1715
1716/* Worker thread to process a given rx_ring that is dedicated
1717 * to outbound completions.
1718 */
1719static void ql_tx_clean(struct work_struct *work)
1720{
1721 struct rx_ring *rx_ring =
1722 container_of(work, struct rx_ring, rx_work.work);
1723 ql_clean_outbound_rx_ring(rx_ring);
1724 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1725
1726}
1727
1728/* Worker thread to process a given rx_ring that is dedicated
1729 * to inbound completions.
1730 */
1731static void ql_rx_clean(struct work_struct *work)
1732{
1733 struct rx_ring *rx_ring =
1734 container_of(work, struct rx_ring, rx_work.work);
1735 ql_clean_inbound_rx_ring(rx_ring, 64);
1736 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1737}
1738
1739/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1740static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1741{
1742 struct rx_ring *rx_ring = dev_id;
1743 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1744 &rx_ring->rx_work, 0);
1745 return IRQ_HANDLED;
1746}
1747
1748/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1749static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1750{
1751 struct rx_ring *rx_ring = dev_id;
288379f0 1752 napi_schedule(&rx_ring->napi);
c4e84bde
RM
1753 return IRQ_HANDLED;
1754}
1755
c4e84bde
RM
1756/* This handles a fatal error, MPI activity, and the default
1757 * rx_ring in an MSI-X multiple vector environment.
1758 * In MSI/Legacy environment it also process the rest of
1759 * the rx_rings.
1760 */
1761static irqreturn_t qlge_isr(int irq, void *dev_id)
1762{
1763 struct rx_ring *rx_ring = dev_id;
1764 struct ql_adapter *qdev = rx_ring->qdev;
1765 struct intr_context *intr_context = &qdev->intr_context[0];
1766 u32 var;
1767 int i;
1768 int work_done = 0;
1769
bb0d215c
RM
1770 spin_lock(&qdev->hw_lock);
1771 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
1772 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
1773 spin_unlock(&qdev->hw_lock);
1774 return IRQ_NONE;
c4e84bde 1775 }
bb0d215c 1776 spin_unlock(&qdev->hw_lock);
c4e84bde 1777
bb0d215c 1778 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
1779
1780 /*
1781 * Check for fatal error.
1782 */
1783 if (var & STS_FE) {
1784 ql_queue_asic_error(qdev);
1785 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1786 var = ql_read32(qdev, ERR_STS);
1787 QPRINTK(qdev, INTR, ERR,
1788 "Resetting chip. Error Status Register = 0x%x\n", var);
1789 return IRQ_HANDLED;
1790 }
1791
1792 /*
1793 * Check MPI processor activity.
1794 */
1795 if (var & STS_PI) {
1796 /*
1797 * We've got an async event or mailbox completion.
1798 * Handle it and clear the source of the interrupt.
1799 */
1800 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1801 ql_disable_completion_interrupt(qdev, intr_context->intr);
1802 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1803 &qdev->mpi_work, 0);
1804 work_done++;
1805 }
1806
1807 /*
1808 * Check the default queue and wake handler if active.
1809 */
1810 rx_ring = &qdev->rx_ring[0];
ba7cd3ba 1811 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
c4e84bde
RM
1812 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1813 ql_disable_completion_interrupt(qdev, intr_context->intr);
1814 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1815 &rx_ring->rx_work, 0);
1816 work_done++;
1817 }
1818
1819 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1820 /*
1821 * Start the DPC for each active queue.
1822 */
1823 for (i = 1; i < qdev->rx_ring_count; i++) {
1824 rx_ring = &qdev->rx_ring[i];
ba7cd3ba 1825 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
c4e84bde
RM
1826 rx_ring->cnsmr_idx) {
1827 QPRINTK(qdev, INTR, INFO,
1828 "Waking handler for rx_ring[%d].\n", i);
1829 ql_disable_completion_interrupt(qdev,
1830 intr_context->
1831 intr);
1832 if (i < qdev->rss_ring_first_cq_id)
1833 queue_delayed_work_on(rx_ring->cpu,
1834 qdev->q_workqueue,
1835 &rx_ring->rx_work,
1836 0);
1837 else
288379f0 1838 napi_schedule(&rx_ring->napi);
c4e84bde
RM
1839 work_done++;
1840 }
1841 }
1842 }
bb0d215c 1843 ql_enable_completion_interrupt(qdev, intr_context->intr);
c4e84bde
RM
1844 return work_done ? IRQ_HANDLED : IRQ_NONE;
1845}
1846
1847static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1848{
1849
1850 if (skb_is_gso(skb)) {
1851 int err;
1852 if (skb_header_cloned(skb)) {
1853 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1854 if (err)
1855 return err;
1856 }
1857
1858 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1859 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1860 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1861 mac_iocb_ptr->total_hdrs_len =
1862 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1863 mac_iocb_ptr->net_trans_offset =
1864 cpu_to_le16(skb_network_offset(skb) |
1865 skb_transport_offset(skb)
1866 << OB_MAC_TRANSPORT_HDR_SHIFT);
1867 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1868 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1869 if (likely(skb->protocol == htons(ETH_P_IP))) {
1870 struct iphdr *iph = ip_hdr(skb);
1871 iph->check = 0;
1872 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1873 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1874 iph->daddr, 0,
1875 IPPROTO_TCP,
1876 0);
1877 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1878 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1879 tcp_hdr(skb)->check =
1880 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1881 &ipv6_hdr(skb)->daddr,
1882 0, IPPROTO_TCP, 0);
1883 }
1884 return 1;
1885 }
1886 return 0;
1887}
1888
1889static void ql_hw_csum_setup(struct sk_buff *skb,
1890 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1891{
1892 int len;
1893 struct iphdr *iph = ip_hdr(skb);
fd2df4f7 1894 __sum16 *check;
c4e84bde
RM
1895 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1896 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1897 mac_iocb_ptr->net_trans_offset =
1898 cpu_to_le16(skb_network_offset(skb) |
1899 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1900
1901 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1902 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1903 if (likely(iph->protocol == IPPROTO_TCP)) {
1904 check = &(tcp_hdr(skb)->check);
1905 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1906 mac_iocb_ptr->total_hdrs_len =
1907 cpu_to_le16(skb_transport_offset(skb) +
1908 (tcp_hdr(skb)->doff << 2));
1909 } else {
1910 check = &(udp_hdr(skb)->check);
1911 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1912 mac_iocb_ptr->total_hdrs_len =
1913 cpu_to_le16(skb_transport_offset(skb) +
1914 sizeof(struct udphdr));
1915 }
1916 *check = ~csum_tcpudp_magic(iph->saddr,
1917 iph->daddr, len, iph->protocol, 0);
1918}
1919
1920static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1921{
1922 struct tx_ring_desc *tx_ring_desc;
1923 struct ob_mac_iocb_req *mac_iocb_ptr;
1924 struct ql_adapter *qdev = netdev_priv(ndev);
1925 int tso;
1926 struct tx_ring *tx_ring;
1927 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1928
1929 tx_ring = &qdev->tx_ring[tx_ring_idx];
1930
1931 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1932 QPRINTK(qdev, TX_QUEUED, INFO,
1933 "%s: shutting down tx queue %d du to lack of resources.\n",
1934 __func__, tx_ring_idx);
1935 netif_stop_queue(ndev);
1936 atomic_inc(&tx_ring->queue_stopped);
1937 return NETDEV_TX_BUSY;
1938 }
1939 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1940 mac_iocb_ptr = tx_ring_desc->queue_entry;
1941 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
c4e84bde
RM
1942
1943 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1944 mac_iocb_ptr->tid = tx_ring_desc->index;
1945 /* We use the upper 32-bits to store the tx queue for this IO.
1946 * When we get the completion we can use it to establish the context.
1947 */
1948 mac_iocb_ptr->txq_idx = tx_ring_idx;
1949 tx_ring_desc->skb = skb;
1950
1951 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1952
1953 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1954 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1955 vlan_tx_tag_get(skb));
1956 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1957 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1958 }
1959 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1960 if (tso < 0) {
1961 dev_kfree_skb_any(skb);
1962 return NETDEV_TX_OK;
1963 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1964 ql_hw_csum_setup(skb,
1965 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1966 }
0d979f74
RM
1967 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
1968 NETDEV_TX_OK) {
1969 QPRINTK(qdev, TX_QUEUED, ERR,
1970 "Could not map the segments.\n");
1971 return NETDEV_TX_BUSY;
1972 }
c4e84bde
RM
1973 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1974 tx_ring->prod_idx++;
1975 if (tx_ring->prod_idx == tx_ring->wq_len)
1976 tx_ring->prod_idx = 0;
1977 wmb();
1978
1979 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1980 ndev->trans_start = jiffies;
1981 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1982 tx_ring->prod_idx, skb->len);
1983
1984 atomic_dec(&tx_ring->tx_count);
1985 return NETDEV_TX_OK;
1986}
1987
1988static void ql_free_shadow_space(struct ql_adapter *qdev)
1989{
1990 if (qdev->rx_ring_shadow_reg_area) {
1991 pci_free_consistent(qdev->pdev,
1992 PAGE_SIZE,
1993 qdev->rx_ring_shadow_reg_area,
1994 qdev->rx_ring_shadow_reg_dma);
1995 qdev->rx_ring_shadow_reg_area = NULL;
1996 }
1997 if (qdev->tx_ring_shadow_reg_area) {
1998 pci_free_consistent(qdev->pdev,
1999 PAGE_SIZE,
2000 qdev->tx_ring_shadow_reg_area,
2001 qdev->tx_ring_shadow_reg_dma);
2002 qdev->tx_ring_shadow_reg_area = NULL;
2003 }
2004}
2005
2006static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2007{
2008 qdev->rx_ring_shadow_reg_area =
2009 pci_alloc_consistent(qdev->pdev,
2010 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2011 if (qdev->rx_ring_shadow_reg_area == NULL) {
2012 QPRINTK(qdev, IFUP, ERR,
2013 "Allocation of RX shadow space failed.\n");
2014 return -ENOMEM;
2015 }
2016 qdev->tx_ring_shadow_reg_area =
2017 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2018 &qdev->tx_ring_shadow_reg_dma);
2019 if (qdev->tx_ring_shadow_reg_area == NULL) {
2020 QPRINTK(qdev, IFUP, ERR,
2021 "Allocation of TX shadow space failed.\n");
2022 goto err_wqp_sh_area;
2023 }
2024 return 0;
2025
2026err_wqp_sh_area:
2027 pci_free_consistent(qdev->pdev,
2028 PAGE_SIZE,
2029 qdev->rx_ring_shadow_reg_area,
2030 qdev->rx_ring_shadow_reg_dma);
2031 return -ENOMEM;
2032}
2033
2034static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2035{
2036 struct tx_ring_desc *tx_ring_desc;
2037 int i;
2038 struct ob_mac_iocb_req *mac_iocb_ptr;
2039
2040 mac_iocb_ptr = tx_ring->wq_base;
2041 tx_ring_desc = tx_ring->q;
2042 for (i = 0; i < tx_ring->wq_len; i++) {
2043 tx_ring_desc->index = i;
2044 tx_ring_desc->skb = NULL;
2045 tx_ring_desc->queue_entry = mac_iocb_ptr;
2046 mac_iocb_ptr++;
2047 tx_ring_desc++;
2048 }
2049 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2050 atomic_set(&tx_ring->queue_stopped, 0);
2051}
2052
2053static void ql_free_tx_resources(struct ql_adapter *qdev,
2054 struct tx_ring *tx_ring)
2055{
2056 if (tx_ring->wq_base) {
2057 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2058 tx_ring->wq_base, tx_ring->wq_base_dma);
2059 tx_ring->wq_base = NULL;
2060 }
2061 kfree(tx_ring->q);
2062 tx_ring->q = NULL;
2063}
2064
2065static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2066 struct tx_ring *tx_ring)
2067{
2068 tx_ring->wq_base =
2069 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2070 &tx_ring->wq_base_dma);
2071
2072 if ((tx_ring->wq_base == NULL)
2073 || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2074 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2075 return -ENOMEM;
2076 }
2077 tx_ring->q =
2078 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2079 if (tx_ring->q == NULL)
2080 goto err;
2081
2082 return 0;
2083err:
2084 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2085 tx_ring->wq_base, tx_ring->wq_base_dma);
2086 return -ENOMEM;
2087}
2088
8668ae92 2089static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2090{
2091 int i;
2092 struct bq_desc *lbq_desc;
2093
2094 for (i = 0; i < rx_ring->lbq_len; i++) {
2095 lbq_desc = &rx_ring->lbq[i];
2096 if (lbq_desc->p.lbq_page) {
2097 pci_unmap_page(qdev->pdev,
2098 pci_unmap_addr(lbq_desc, mapaddr),
2099 pci_unmap_len(lbq_desc, maplen),
2100 PCI_DMA_FROMDEVICE);
2101
2102 put_page(lbq_desc->p.lbq_page);
2103 lbq_desc->p.lbq_page = NULL;
2104 }
c4e84bde
RM
2105 }
2106}
2107
2108/*
2109 * Allocate and map a page for each element of the lbq.
2110 */
2111static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2112 struct rx_ring *rx_ring)
2113{
2114 int i;
2115 struct bq_desc *lbq_desc;
2116 u64 map;
2c9a0d41 2117 __le64 *bq = rx_ring->lbq_base;
c4e84bde
RM
2118
2119 for (i = 0; i < rx_ring->lbq_len; i++) {
2120 lbq_desc = &rx_ring->lbq[i];
2121 memset(lbq_desc, 0, sizeof(lbq_desc));
2c9a0d41 2122 lbq_desc->addr = bq;
c4e84bde
RM
2123 lbq_desc->index = i;
2124 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2125 if (unlikely(!lbq_desc->p.lbq_page)) {
2126 QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2127 goto mem_error;
2128 } else {
2129 map = pci_map_page(qdev->pdev,
2130 lbq_desc->p.lbq_page,
2131 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2132 if (pci_dma_mapping_error(qdev->pdev, map)) {
2133 QPRINTK(qdev, IFUP, ERR,
2134 "PCI mapping failed.\n");
2135 goto mem_error;
2136 }
2137 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2138 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2c9a0d41 2139 *lbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
2140 }
2141 bq++;
2142 }
2143 return 0;
2144mem_error:
2145 ql_free_lbq_buffers(qdev, rx_ring);
2146 return -ENOMEM;
2147}
2148
8668ae92 2149static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
c4e84bde
RM
2150{
2151 int i;
2152 struct bq_desc *sbq_desc;
2153
2154 for (i = 0; i < rx_ring->sbq_len; i++) {
2155 sbq_desc = &rx_ring->sbq[i];
2156 if (sbq_desc == NULL) {
2157 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2158 return;
2159 }
2160 if (sbq_desc->p.skb) {
2161 pci_unmap_single(qdev->pdev,
2162 pci_unmap_addr(sbq_desc, mapaddr),
2163 pci_unmap_len(sbq_desc, maplen),
2164 PCI_DMA_FROMDEVICE);
2165 dev_kfree_skb(sbq_desc->p.skb);
2166 sbq_desc->p.skb = NULL;
2167 }
c4e84bde
RM
2168 }
2169}
2170
2171/* Allocate and map an skb for each element of the sbq. */
2172static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2173 struct rx_ring *rx_ring)
2174{
2175 int i;
2176 struct bq_desc *sbq_desc;
2177 struct sk_buff *skb;
2178 u64 map;
2c9a0d41 2179 __le64 *bq = rx_ring->sbq_base;
c4e84bde
RM
2180
2181 for (i = 0; i < rx_ring->sbq_len; i++) {
2182 sbq_desc = &rx_ring->sbq[i];
2183 memset(sbq_desc, 0, sizeof(sbq_desc));
2184 sbq_desc->index = i;
2c9a0d41 2185 sbq_desc->addr = bq;
c4e84bde
RM
2186 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2187 if (unlikely(!skb)) {
2188 /* Better luck next round */
2189 QPRINTK(qdev, IFUP, ERR,
2190 "small buff alloc failed for %d bytes at index %d.\n",
2191 rx_ring->sbq_buf_size, i);
2192 goto mem_err;
2193 }
2194 skb_reserve(skb, QLGE_SB_PAD);
2195 sbq_desc->p.skb = skb;
2196 /*
2197 * Map only half the buffer. Because the
2198 * other half may get some data copied to it
2199 * when the completion arrives.
2200 */
2201 map = pci_map_single(qdev->pdev,
2202 skb->data,
2203 rx_ring->sbq_buf_size / 2,
2204 PCI_DMA_FROMDEVICE);
2205 if (pci_dma_mapping_error(qdev->pdev, map)) {
2206 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2207 goto mem_err;
2208 }
2209 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2210 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2c9a0d41 2211 *sbq_desc->addr = cpu_to_le64(map);
c4e84bde
RM
2212 bq++;
2213 }
2214 return 0;
2215mem_err:
2216 ql_free_sbq_buffers(qdev, rx_ring);
2217 return -ENOMEM;
2218}
2219
2220static void ql_free_rx_resources(struct ql_adapter *qdev,
2221 struct rx_ring *rx_ring)
2222{
2223 if (rx_ring->sbq_len)
2224 ql_free_sbq_buffers(qdev, rx_ring);
2225 if (rx_ring->lbq_len)
2226 ql_free_lbq_buffers(qdev, rx_ring);
2227
2228 /* Free the small buffer queue. */
2229 if (rx_ring->sbq_base) {
2230 pci_free_consistent(qdev->pdev,
2231 rx_ring->sbq_size,
2232 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2233 rx_ring->sbq_base = NULL;
2234 }
2235
2236 /* Free the small buffer queue control blocks. */
2237 kfree(rx_ring->sbq);
2238 rx_ring->sbq = NULL;
2239
2240 /* Free the large buffer queue. */
2241 if (rx_ring->lbq_base) {
2242 pci_free_consistent(qdev->pdev,
2243 rx_ring->lbq_size,
2244 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2245 rx_ring->lbq_base = NULL;
2246 }
2247
2248 /* Free the large buffer queue control blocks. */
2249 kfree(rx_ring->lbq);
2250 rx_ring->lbq = NULL;
2251
2252 /* Free the rx queue. */
2253 if (rx_ring->cq_base) {
2254 pci_free_consistent(qdev->pdev,
2255 rx_ring->cq_size,
2256 rx_ring->cq_base, rx_ring->cq_base_dma);
2257 rx_ring->cq_base = NULL;
2258 }
2259}
2260
2261/* Allocate queues and buffers for this completions queue based
2262 * on the values in the parameter structure. */
2263static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2264 struct rx_ring *rx_ring)
2265{
2266
2267 /*
2268 * Allocate the completion queue for this rx_ring.
2269 */
2270 rx_ring->cq_base =
2271 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2272 &rx_ring->cq_base_dma);
2273
2274 if (rx_ring->cq_base == NULL) {
2275 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2276 return -ENOMEM;
2277 }
2278
2279 if (rx_ring->sbq_len) {
2280 /*
2281 * Allocate small buffer queue.
2282 */
2283 rx_ring->sbq_base =
2284 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2285 &rx_ring->sbq_base_dma);
2286
2287 if (rx_ring->sbq_base == NULL) {
2288 QPRINTK(qdev, IFUP, ERR,
2289 "Small buffer queue allocation failed.\n");
2290 goto err_mem;
2291 }
2292
2293 /*
2294 * Allocate small buffer queue control blocks.
2295 */
2296 rx_ring->sbq =
2297 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2298 GFP_KERNEL);
2299 if (rx_ring->sbq == NULL) {
2300 QPRINTK(qdev, IFUP, ERR,
2301 "Small buffer queue control block allocation failed.\n");
2302 goto err_mem;
2303 }
2304
2305 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2306 QPRINTK(qdev, IFUP, ERR,
2307 "Small buffer allocation failed.\n");
2308 goto err_mem;
2309 }
2310 }
2311
2312 if (rx_ring->lbq_len) {
2313 /*
2314 * Allocate large buffer queue.
2315 */
2316 rx_ring->lbq_base =
2317 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2318 &rx_ring->lbq_base_dma);
2319
2320 if (rx_ring->lbq_base == NULL) {
2321 QPRINTK(qdev, IFUP, ERR,
2322 "Large buffer queue allocation failed.\n");
2323 goto err_mem;
2324 }
2325 /*
2326 * Allocate large buffer queue control blocks.
2327 */
2328 rx_ring->lbq =
2329 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2330 GFP_KERNEL);
2331 if (rx_ring->lbq == NULL) {
2332 QPRINTK(qdev, IFUP, ERR,
2333 "Large buffer queue control block allocation failed.\n");
2334 goto err_mem;
2335 }
2336
2337 /*
2338 * Allocate the buffers.
2339 */
2340 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2341 QPRINTK(qdev, IFUP, ERR,
2342 "Large buffer allocation failed.\n");
2343 goto err_mem;
2344 }
2345 }
2346
2347 return 0;
2348
2349err_mem:
2350 ql_free_rx_resources(qdev, rx_ring);
2351 return -ENOMEM;
2352}
2353
2354static void ql_tx_ring_clean(struct ql_adapter *qdev)
2355{
2356 struct tx_ring *tx_ring;
2357 struct tx_ring_desc *tx_ring_desc;
2358 int i, j;
2359
2360 /*
2361 * Loop through all queues and free
2362 * any resources.
2363 */
2364 for (j = 0; j < qdev->tx_ring_count; j++) {
2365 tx_ring = &qdev->tx_ring[j];
2366 for (i = 0; i < tx_ring->wq_len; i++) {
2367 tx_ring_desc = &tx_ring->q[i];
2368 if (tx_ring_desc && tx_ring_desc->skb) {
2369 QPRINTK(qdev, IFDOWN, ERR,
2370 "Freeing lost SKB %p, from queue %d, index %d.\n",
2371 tx_ring_desc->skb, j,
2372 tx_ring_desc->index);
2373 ql_unmap_send(qdev, tx_ring_desc,
2374 tx_ring_desc->map_cnt);
2375 dev_kfree_skb(tx_ring_desc->skb);
2376 tx_ring_desc->skb = NULL;
2377 }
2378 }
2379 }
2380}
2381
c4e84bde
RM
2382static void ql_free_mem_resources(struct ql_adapter *qdev)
2383{
2384 int i;
2385
2386 for (i = 0; i < qdev->tx_ring_count; i++)
2387 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2388 for (i = 0; i < qdev->rx_ring_count; i++)
2389 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2390 ql_free_shadow_space(qdev);
2391}
2392
2393static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2394{
2395 int i;
2396
2397 /* Allocate space for our shadow registers and such. */
2398 if (ql_alloc_shadow_space(qdev))
2399 return -ENOMEM;
2400
2401 for (i = 0; i < qdev->rx_ring_count; i++) {
2402 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2403 QPRINTK(qdev, IFUP, ERR,
2404 "RX resource allocation failed.\n");
2405 goto err_mem;
2406 }
2407 }
2408 /* Allocate tx queue resources */
2409 for (i = 0; i < qdev->tx_ring_count; i++) {
2410 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2411 QPRINTK(qdev, IFUP, ERR,
2412 "TX resource allocation failed.\n");
2413 goto err_mem;
2414 }
2415 }
2416 return 0;
2417
2418err_mem:
2419 ql_free_mem_resources(qdev);
2420 return -ENOMEM;
2421}
2422
2423/* Set up the rx ring control block and pass it to the chip.
2424 * The control block is defined as
2425 * "Completion Queue Initialization Control Block", or cqicb.
2426 */
2427static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2428{
2429 struct cqicb *cqicb = &rx_ring->cqicb;
2430 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2431 (rx_ring->cq_id * sizeof(u64) * 4);
2432 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2433 (rx_ring->cq_id * sizeof(u64) * 4);
2434 void __iomem *doorbell_area =
2435 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2436 int err = 0;
2437 u16 bq_len;
2438
2439 /* Set up the shadow registers for this ring. */
2440 rx_ring->prod_idx_sh_reg = shadow_reg;
2441 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2442 shadow_reg += sizeof(u64);
2443 shadow_reg_dma += sizeof(u64);
2444 rx_ring->lbq_base_indirect = shadow_reg;
2445 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2446 shadow_reg += sizeof(u64);
2447 shadow_reg_dma += sizeof(u64);
2448 rx_ring->sbq_base_indirect = shadow_reg;
2449 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2450
2451 /* PCI doorbell mem area + 0x00 for consumer index register */
8668ae92 2452 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2453 rx_ring->cnsmr_idx = 0;
2454 rx_ring->curr_entry = rx_ring->cq_base;
2455
2456 /* PCI doorbell mem area + 0x04 for valid register */
2457 rx_ring->valid_db_reg = doorbell_area + 0x04;
2458
2459 /* PCI doorbell mem area + 0x18 for large buffer consumer */
8668ae92 2460 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
c4e84bde
RM
2461
2462 /* PCI doorbell mem area + 0x1c */
8668ae92 2463 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
c4e84bde
RM
2464
2465 memset((void *)cqicb, 0, sizeof(struct cqicb));
2466 cqicb->msix_vect = rx_ring->irq;
2467
459caf5a
RM
2468 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2469 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
c4e84bde 2470
97345524 2471 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
c4e84bde 2472
97345524 2473 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
c4e84bde
RM
2474
2475 /*
2476 * Set up the control block load flags.
2477 */
2478 cqicb->flags = FLAGS_LC | /* Load queue base address */
2479 FLAGS_LV | /* Load MSI-X vector */
2480 FLAGS_LI; /* Load irq delay values */
2481 if (rx_ring->lbq_len) {
2482 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2483 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
97345524
RM
2484 cqicb->lbq_addr =
2485 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
459caf5a
RM
2486 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2487 (u16) rx_ring->lbq_buf_size;
2488 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2489 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2490 (u16) rx_ring->lbq_len;
c4e84bde
RM
2491 cqicb->lbq_len = cpu_to_le16(bq_len);
2492 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2493 rx_ring->lbq_curr_idx = 0;
2494 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2495 rx_ring->lbq_free_cnt = 16;
2496 }
2497 if (rx_ring->sbq_len) {
2498 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2499 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
97345524
RM
2500 cqicb->sbq_addr =
2501 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
c4e84bde
RM
2502 cqicb->sbq_buf_size =
2503 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
459caf5a
RM
2504 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2505 (u16) rx_ring->sbq_len;
c4e84bde
RM
2506 cqicb->sbq_len = cpu_to_le16(bq_len);
2507 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2508 rx_ring->sbq_curr_idx = 0;
2509 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2510 rx_ring->sbq_free_cnt = 16;
2511 }
2512 switch (rx_ring->type) {
2513 case TX_Q:
2514 /* If there's only one interrupt, then we use
2515 * worker threads to process the outbound
2516 * completion handling rx_rings. We do this so
2517 * they can be run on multiple CPUs. There is
2518 * room to play with this more where we would only
2519 * run in a worker if there are more than x number
2520 * of outbound completions on the queue and more
2521 * than one queue active. Some threshold that
2522 * would indicate a benefit in spite of the cost
2523 * of a context switch.
2524 * If there's more than one interrupt, then the
2525 * outbound completions are processed in the ISR.
2526 */
2527 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2528 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2529 else {
2530 /* With all debug warnings on we see a WARN_ON message
2531 * when we free the skb in the interrupt context.
2532 */
2533 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2534 }
2535 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2536 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2537 break;
2538 case DEFAULT_Q:
2539 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2540 cqicb->irq_delay = 0;
2541 cqicb->pkt_delay = 0;
2542 break;
2543 case RX_Q:
2544 /* Inbound completion handling rx_rings run in
2545 * separate NAPI contexts.
2546 */
2547 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2548 64);
2549 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2550 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2551 break;
2552 default:
2553 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2554 rx_ring->type);
2555 }
2556 QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2557 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2558 CFG_LCQ, rx_ring->cq_id);
2559 if (err) {
2560 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2561 return err;
2562 }
2563 QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2564 /*
2565 * Advance the producer index for the buffer queues.
2566 */
2567 wmb();
2568 if (rx_ring->lbq_len)
2569 ql_write_db_reg(rx_ring->lbq_prod_idx,
2570 rx_ring->lbq_prod_idx_db_reg);
2571 if (rx_ring->sbq_len)
2572 ql_write_db_reg(rx_ring->sbq_prod_idx,
2573 rx_ring->sbq_prod_idx_db_reg);
2574 return err;
2575}
2576
2577static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2578{
2579 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2580 void __iomem *doorbell_area =
2581 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2582 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2583 (tx_ring->wq_id * sizeof(u64));
2584 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2585 (tx_ring->wq_id * sizeof(u64));
2586 int err = 0;
2587
2588 /*
2589 * Assign doorbell registers for this tx_ring.
2590 */
2591 /* TX PCI doorbell mem area for tx producer index */
8668ae92 2592 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
c4e84bde
RM
2593 tx_ring->prod_idx = 0;
2594 /* TX PCI doorbell mem area + 0x04 */
2595 tx_ring->valid_db_reg = doorbell_area + 0x04;
2596
2597 /*
2598 * Assign shadow registers for this tx_ring.
2599 */
2600 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2601 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2602
2603 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2604 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2605 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2606 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2607 wqicb->rid = 0;
97345524 2608 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
c4e84bde 2609
97345524 2610 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
c4e84bde
RM
2611
2612 ql_init_tx_ring(qdev, tx_ring);
2613
2614 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2615 (u16) tx_ring->wq_id);
2616 if (err) {
2617 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2618 return err;
2619 }
2620 QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2621 return err;
2622}
2623
2624static void ql_disable_msix(struct ql_adapter *qdev)
2625{
2626 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2627 pci_disable_msix(qdev->pdev);
2628 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2629 kfree(qdev->msi_x_entry);
2630 qdev->msi_x_entry = NULL;
2631 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2632 pci_disable_msi(qdev->pdev);
2633 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2634 }
2635}
2636
2637static void ql_enable_msix(struct ql_adapter *qdev)
2638{
2639 int i;
2640
2641 qdev->intr_count = 1;
2642 /* Get the MSIX vectors. */
2643 if (irq_type == MSIX_IRQ) {
2644 /* Try to alloc space for the msix struct,
2645 * if it fails then go to MSI/legacy.
2646 */
2647 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2648 sizeof(struct msix_entry),
2649 GFP_KERNEL);
2650 if (!qdev->msi_x_entry) {
2651 irq_type = MSI_IRQ;
2652 goto msi;
2653 }
2654
2655 for (i = 0; i < qdev->rx_ring_count; i++)
2656 qdev->msi_x_entry[i].entry = i;
2657
2658 if (!pci_enable_msix
2659 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2660 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2661 qdev->intr_count = qdev->rx_ring_count;
2662 QPRINTK(qdev, IFUP, INFO,
2663 "MSI-X Enabled, got %d vectors.\n",
2664 qdev->intr_count);
2665 return;
2666 } else {
2667 kfree(qdev->msi_x_entry);
2668 qdev->msi_x_entry = NULL;
2669 QPRINTK(qdev, IFUP, WARNING,
2670 "MSI-X Enable failed, trying MSI.\n");
2671 irq_type = MSI_IRQ;
2672 }
2673 }
2674msi:
2675 if (irq_type == MSI_IRQ) {
2676 if (!pci_enable_msi(qdev->pdev)) {
2677 set_bit(QL_MSI_ENABLED, &qdev->flags);
2678 QPRINTK(qdev, IFUP, INFO,
2679 "Running with MSI interrupts.\n");
2680 return;
2681 }
2682 }
2683 irq_type = LEG_IRQ;
c4e84bde
RM
2684 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2685}
2686
2687/*
2688 * Here we build the intr_context structures based on
2689 * our rx_ring count and intr vector count.
2690 * The intr_context structure is used to hook each vector
2691 * to possibly different handlers.
2692 */
2693static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2694{
2695 int i = 0;
2696 struct intr_context *intr_context = &qdev->intr_context[0];
2697
2698 ql_enable_msix(qdev);
2699
2700 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2701 /* Each rx_ring has it's
2702 * own intr_context since we have separate
2703 * vectors for each queue.
2704 * This only true when MSI-X is enabled.
2705 */
2706 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2707 qdev->rx_ring[i].irq = i;
2708 intr_context->intr = i;
2709 intr_context->qdev = qdev;
2710 /*
2711 * We set up each vectors enable/disable/read bits so
2712 * there's no bit/mask calculations in the critical path.
2713 */
2714 intr_context->intr_en_mask =
2715 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2716 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2717 | i;
2718 intr_context->intr_dis_mask =
2719 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2720 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2721 INTR_EN_IHD | i;
2722 intr_context->intr_read_mask =
2723 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2724 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2725 i;
2726
2727 if (i == 0) {
2728 /*
2729 * Default queue handles bcast/mcast plus
2730 * async events. Needs buffers.
2731 */
2732 intr_context->handler = qlge_isr;
2733 sprintf(intr_context->name, "%s-default-queue",
2734 qdev->ndev->name);
2735 } else if (i < qdev->rss_ring_first_cq_id) {
2736 /*
2737 * Outbound queue is for outbound completions only.
2738 */
2739 intr_context->handler = qlge_msix_tx_isr;
c224969e 2740 sprintf(intr_context->name, "%s-tx-%d",
c4e84bde
RM
2741 qdev->ndev->name, i);
2742 } else {
2743 /*
2744 * Inbound queues handle unicast frames only.
2745 */
2746 intr_context->handler = qlge_msix_rx_isr;
c224969e 2747 sprintf(intr_context->name, "%s-rx-%d",
c4e84bde
RM
2748 qdev->ndev->name, i);
2749 }
2750 }
2751 } else {
2752 /*
2753 * All rx_rings use the same intr_context since
2754 * there is only one vector.
2755 */
2756 intr_context->intr = 0;
2757 intr_context->qdev = qdev;
2758 /*
2759 * We set up each vectors enable/disable/read bits so
2760 * there's no bit/mask calculations in the critical path.
2761 */
2762 intr_context->intr_en_mask =
2763 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2764 intr_context->intr_dis_mask =
2765 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2766 INTR_EN_TYPE_DISABLE;
2767 intr_context->intr_read_mask =
2768 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2769 /*
2770 * Single interrupt means one handler for all rings.
2771 */
2772 intr_context->handler = qlge_isr;
2773 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2774 for (i = 0; i < qdev->rx_ring_count; i++)
2775 qdev->rx_ring[i].irq = 0;
2776 }
2777}
2778
2779static void ql_free_irq(struct ql_adapter *qdev)
2780{
2781 int i;
2782 struct intr_context *intr_context = &qdev->intr_context[0];
2783
2784 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2785 if (intr_context->hooked) {
2786 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2787 free_irq(qdev->msi_x_entry[i].vector,
2788 &qdev->rx_ring[i]);
2789 QPRINTK(qdev, IFDOWN, ERR,
2790 "freeing msix interrupt %d.\n", i);
2791 } else {
2792 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2793 QPRINTK(qdev, IFDOWN, ERR,
2794 "freeing msi interrupt %d.\n", i);
2795 }
2796 }
2797 }
2798 ql_disable_msix(qdev);
2799}
2800
2801static int ql_request_irq(struct ql_adapter *qdev)
2802{
2803 int i;
2804 int status = 0;
2805 struct pci_dev *pdev = qdev->pdev;
2806 struct intr_context *intr_context = &qdev->intr_context[0];
2807
2808 ql_resolve_queues_to_irqs(qdev);
2809
2810 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2811 atomic_set(&intr_context->irq_cnt, 0);
2812 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2813 status = request_irq(qdev->msi_x_entry[i].vector,
2814 intr_context->handler,
2815 0,
2816 intr_context->name,
2817 &qdev->rx_ring[i]);
2818 if (status) {
2819 QPRINTK(qdev, IFUP, ERR,
2820 "Failed request for MSIX interrupt %d.\n",
2821 i);
2822 goto err_irq;
2823 } else {
2824 QPRINTK(qdev, IFUP, INFO,
2825 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2826 i,
2827 qdev->rx_ring[i].type ==
2828 DEFAULT_Q ? "DEFAULT_Q" : "",
2829 qdev->rx_ring[i].type ==
2830 TX_Q ? "TX_Q" : "",
2831 qdev->rx_ring[i].type ==
2832 RX_Q ? "RX_Q" : "", intr_context->name);
2833 }
2834 } else {
2835 QPRINTK(qdev, IFUP, DEBUG,
2836 "trying msi or legacy interrupts.\n");
2837 QPRINTK(qdev, IFUP, DEBUG,
2838 "%s: irq = %d.\n", __func__, pdev->irq);
2839 QPRINTK(qdev, IFUP, DEBUG,
2840 "%s: context->name = %s.\n", __func__,
2841 intr_context->name);
2842 QPRINTK(qdev, IFUP, DEBUG,
2843 "%s: dev_id = 0x%p.\n", __func__,
2844 &qdev->rx_ring[0]);
2845 status =
2846 request_irq(pdev->irq, qlge_isr,
2847 test_bit(QL_MSI_ENABLED,
2848 &qdev->
2849 flags) ? 0 : IRQF_SHARED,
2850 intr_context->name, &qdev->rx_ring[0]);
2851 if (status)
2852 goto err_irq;
2853
2854 QPRINTK(qdev, IFUP, ERR,
2855 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2856 i,
2857 qdev->rx_ring[0].type ==
2858 DEFAULT_Q ? "DEFAULT_Q" : "",
2859 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2860 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2861 intr_context->name);
2862 }
2863 intr_context->hooked = 1;
2864 }
2865 return status;
2866err_irq:
2867 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2868 ql_free_irq(qdev);
2869 return status;
2870}
2871
2872static int ql_start_rss(struct ql_adapter *qdev)
2873{
2874 struct ricb *ricb = &qdev->ricb;
2875 int status = 0;
2876 int i;
2877 u8 *hash_id = (u8 *) ricb->hash_cq_id;
2878
2879 memset((void *)ricb, 0, sizeof(ricb));
2880
2881 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2882 ricb->flags =
2883 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2884 RSS_RT6);
2885 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2886
2887 /*
2888 * Fill out the Indirection Table.
2889 */
def48b6e
RM
2890 for (i = 0; i < 256; i++)
2891 hash_id[i] = i & (qdev->rss_ring_count - 1);
c4e84bde
RM
2892
2893 /*
2894 * Random values for the IPv6 and IPv4 Hash Keys.
2895 */
2896 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2897 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2898
2899 QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2900
2901 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2902 if (status) {
2903 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2904 return status;
2905 }
2906 QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2907 return status;
2908}
2909
2910/* Initialize the frame-to-queue routing. */
2911static int ql_route_initialize(struct ql_adapter *qdev)
2912{
2913 int status = 0;
2914 int i;
2915
2916 /* Clear all the entries in the routing table. */
2917 for (i = 0; i < 16; i++) {
2918 status = ql_set_routing_reg(qdev, i, 0, 0);
2919 if (status) {
2920 QPRINTK(qdev, IFUP, ERR,
2921 "Failed to init routing register for CAM packets.\n");
2922 return status;
2923 }
2924 }
2925
2926 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2927 if (status) {
2928 QPRINTK(qdev, IFUP, ERR,
2929 "Failed to init routing register for error packets.\n");
2930 return status;
2931 }
2932 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2933 if (status) {
2934 QPRINTK(qdev, IFUP, ERR,
2935 "Failed to init routing register for broadcast packets.\n");
2936 return status;
2937 }
2938 /* If we have more than one inbound queue, then turn on RSS in the
2939 * routing block.
2940 */
2941 if (qdev->rss_ring_count > 1) {
2942 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2943 RT_IDX_RSS_MATCH, 1);
2944 if (status) {
2945 QPRINTK(qdev, IFUP, ERR,
2946 "Failed to init routing register for MATCH RSS packets.\n");
2947 return status;
2948 }
2949 }
2950
2951 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2952 RT_IDX_CAM_HIT, 1);
2953 if (status) {
2954 QPRINTK(qdev, IFUP, ERR,
2955 "Failed to init routing register for CAM packets.\n");
2956 return status;
2957 }
2958 return status;
2959}
2960
bb58b5b6
RM
2961static int ql_cam_route_initialize(struct ql_adapter *qdev)
2962{
2963 int status;
2964
2965 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
2966 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
2967 if (status) {
2968 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
2969 return status;
2970 }
2971
2972 status = ql_route_initialize(qdev);
2973 if (status)
2974 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
2975
2976 return status;
2977}
2978
c4e84bde
RM
2979static int ql_adapter_initialize(struct ql_adapter *qdev)
2980{
2981 u32 value, mask;
2982 int i;
2983 int status = 0;
2984
2985 /*
2986 * Set up the System register to halt on errors.
2987 */
2988 value = SYS_EFE | SYS_FAE;
2989 mask = value << 16;
2990 ql_write32(qdev, SYS, mask | value);
2991
2992 /* Set the default queue. */
2993 value = NIC_RCV_CFG_DFQ;
2994 mask = NIC_RCV_CFG_DFQ_MASK;
2995 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
2996
2997 /* Set the MPI interrupt to enabled. */
2998 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
2999
3000 /* Enable the function, set pagesize, enable error checking. */
3001 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3002 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3003
3004 /* Set/clear header splitting. */
3005 mask = FSC_VM_PAGESIZE_MASK |
3006 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3007 ql_write32(qdev, FSC, mask | value);
3008
3009 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3010 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3011
3012 /* Start up the rx queues. */
3013 for (i = 0; i < qdev->rx_ring_count; i++) {
3014 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3015 if (status) {
3016 QPRINTK(qdev, IFUP, ERR,
3017 "Failed to start rx ring[%d].\n", i);
3018 return status;
3019 }
3020 }
3021
3022 /* If there is more than one inbound completion queue
3023 * then download a RICB to configure RSS.
3024 */
3025 if (qdev->rss_ring_count > 1) {
3026 status = ql_start_rss(qdev);
3027 if (status) {
3028 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3029 return status;
3030 }
3031 }
3032
3033 /* Start up the tx queues. */
3034 for (i = 0; i < qdev->tx_ring_count; i++) {
3035 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3036 if (status) {
3037 QPRINTK(qdev, IFUP, ERR,
3038 "Failed to start tx ring[%d].\n", i);
3039 return status;
3040 }
3041 }
3042
3043 status = ql_port_initialize(qdev);
3044 if (status) {
3045 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3046 return status;
3047 }
3048
bb58b5b6
RM
3049 /* Set up the MAC address and frame routing filter. */
3050 status = ql_cam_route_initialize(qdev);
c4e84bde 3051 if (status) {
bb58b5b6
RM
3052 QPRINTK(qdev, IFUP, ERR,
3053 "Failed to init CAM/Routing tables.\n");
c4e84bde
RM
3054 return status;
3055 }
3056
3057 /* Start NAPI for the RSS queues. */
3058 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3059 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3060 i);
3061 napi_enable(&qdev->rx_ring[i].napi);
3062 }
3063
3064 return status;
3065}
3066
3067/* Issue soft reset to chip. */
3068static int ql_adapter_reset(struct ql_adapter *qdev)
3069{
3070 u32 value;
3071 int max_wait_time;
3072 int status = 0;
3073 int resetCnt = 0;
3074
3075#define MAX_RESET_CNT 1
3076issueReset:
3077 resetCnt++;
3078 QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3079 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3080 /* Wait for reset to complete. */
3081 max_wait_time = 3;
3082 QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3083 max_wait_time);
3084 do {
3085 value = ql_read32(qdev, RST_FO);
3086 if ((value & RST_FO_FR) == 0)
3087 break;
3088
3089 ssleep(1);
3090 } while ((--max_wait_time));
3091 if (value & RST_FO_FR) {
3092 QPRINTK(qdev, IFDOWN, ERR,
3093 "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
3094 if (resetCnt < MAX_RESET_CNT)
3095 goto issueReset;
3096 }
3097 if (max_wait_time == 0) {
3098 status = -ETIMEDOUT;
3099 QPRINTK(qdev, IFDOWN, ERR,
3100 "ETIMEOUT!!! errored out of resetting the chip!\n");
3101 }
3102
3103 return status;
3104}
3105
3106static void ql_display_dev_info(struct net_device *ndev)
3107{
3108 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3109
3110 QPRINTK(qdev, PROBE, INFO,
3111 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3112 "XG Roll = %d, XG Rev = %d.\n",
3113 qdev->func,
3114 qdev->chip_rev_id & 0x0000000f,
3115 qdev->chip_rev_id >> 4 & 0x0000000f,
3116 qdev->chip_rev_id >> 8 & 0x0000000f,
3117 qdev->chip_rev_id >> 12 & 0x0000000f);
7c510e4b 3118 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
c4e84bde
RM
3119}
3120
3121static int ql_adapter_down(struct ql_adapter *qdev)
3122{
3123 struct net_device *ndev = qdev->ndev;
3124 int i, status = 0;
3125 struct rx_ring *rx_ring;
3126
3127 netif_stop_queue(ndev);
3128 netif_carrier_off(ndev);
3129
6497b607
RM
3130 /* Don't kill the reset worker thread if we
3131 * are in the process of recovery.
3132 */
3133 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3134 cancel_delayed_work_sync(&qdev->asic_reset_work);
c4e84bde
RM
3135 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3136 cancel_delayed_work_sync(&qdev->mpi_work);
3137
3138 /* The default queue at index 0 is always processed in
3139 * a workqueue.
3140 */
3141 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3142
3143 /* The rest of the rx_rings are processed in
3144 * a workqueue only if it's a single interrupt
3145 * environment (MSI/Legacy).
3146 */
c062076c 3147 for (i = 1; i < qdev->rx_ring_count; i++) {
c4e84bde
RM
3148 rx_ring = &qdev->rx_ring[i];
3149 /* Only the RSS rings use NAPI on multi irq
3150 * environment. Outbound completion processing
3151 * is done in interrupt context.
3152 */
3153 if (i >= qdev->rss_ring_first_cq_id) {
3154 napi_disable(&rx_ring->napi);
3155 } else {
3156 cancel_delayed_work_sync(&rx_ring->rx_work);
3157 }
3158 }
3159
3160 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3161
3162 ql_disable_interrupts(qdev);
3163
3164 ql_tx_ring_clean(qdev);
3165
3166 spin_lock(&qdev->hw_lock);
3167 status = ql_adapter_reset(qdev);
3168 if (status)
3169 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3170 qdev->func);
3171 spin_unlock(&qdev->hw_lock);
3172 return status;
3173}
3174
3175static int ql_adapter_up(struct ql_adapter *qdev)
3176{
3177 int err = 0;
3178
3179 spin_lock(&qdev->hw_lock);
3180 err = ql_adapter_initialize(qdev);
3181 if (err) {
3182 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3183 spin_unlock(&qdev->hw_lock);
3184 goto err_init;
3185 }
3186 spin_unlock(&qdev->hw_lock);
3187 set_bit(QL_ADAPTER_UP, &qdev->flags);
3188 ql_enable_interrupts(qdev);
3189 ql_enable_all_completion_interrupts(qdev);
3190 if ((ql_read32(qdev, STS) & qdev->port_init)) {
3191 netif_carrier_on(qdev->ndev);
3192 netif_start_queue(qdev->ndev);
3193 }
3194
3195 return 0;
3196err_init:
3197 ql_adapter_reset(qdev);
3198 return err;
3199}
3200
3201static int ql_cycle_adapter(struct ql_adapter *qdev)
3202{
3203 int status;
3204
3205 status = ql_adapter_down(qdev);
3206 if (status)
3207 goto error;
3208
3209 status = ql_adapter_up(qdev);
3210 if (status)
3211 goto error;
3212
3213 return status;
3214error:
3215 QPRINTK(qdev, IFUP, ALERT,
3216 "Driver up/down cycle failed, closing device\n");
3217 rtnl_lock();
3218 dev_close(qdev->ndev);
3219 rtnl_unlock();
3220 return status;
3221}
3222
3223static void ql_release_adapter_resources(struct ql_adapter *qdev)
3224{
3225 ql_free_mem_resources(qdev);
3226 ql_free_irq(qdev);
3227}
3228
3229static int ql_get_adapter_resources(struct ql_adapter *qdev)
3230{
3231 int status = 0;
3232
3233 if (ql_alloc_mem_resources(qdev)) {
3234 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3235 return -ENOMEM;
3236 }
3237 status = ql_request_irq(qdev);
3238 if (status)
3239 goto err_irq;
3240 return status;
3241err_irq:
3242 ql_free_mem_resources(qdev);
3243 return status;
3244}
3245
3246static int qlge_close(struct net_device *ndev)
3247{
3248 struct ql_adapter *qdev = netdev_priv(ndev);
3249
3250 /*
3251 * Wait for device to recover from a reset.
3252 * (Rarely happens, but possible.)
3253 */
3254 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3255 msleep(1);
3256 ql_adapter_down(qdev);
3257 ql_release_adapter_resources(qdev);
c4e84bde
RM
3258 return 0;
3259}
3260
3261static int ql_configure_rings(struct ql_adapter *qdev)
3262{
3263 int i;
3264 struct rx_ring *rx_ring;
3265 struct tx_ring *tx_ring;
3266 int cpu_cnt = num_online_cpus();
3267
3268 /*
3269 * For each processor present we allocate one
3270 * rx_ring for outbound completions, and one
3271 * rx_ring for inbound completions. Plus there is
3272 * always the one default queue. For the CPU
3273 * counts we end up with the following rx_rings:
3274 * rx_ring count =
3275 * one default queue +
3276 * (CPU count * outbound completion rx_ring) +
3277 * (CPU count * inbound (RSS) completion rx_ring)
3278 * To keep it simple we limit the total number of
3279 * queues to < 32, so we truncate CPU to 8.
3280 * This limitation can be removed when requested.
3281 */
3282
683d46a9
RM
3283 if (cpu_cnt > MAX_CPUS)
3284 cpu_cnt = MAX_CPUS;
c4e84bde
RM
3285
3286 /*
3287 * rx_ring[0] is always the default queue.
3288 */
3289 /* Allocate outbound completion ring for each CPU. */
3290 qdev->tx_ring_count = cpu_cnt;
3291 /* Allocate inbound completion (RSS) ring for each CPU. */
3292 qdev->rss_ring_count = cpu_cnt;
3293 /* cq_id for the first inbound ring handler. */
3294 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3295 /*
3296 * qdev->rx_ring_count:
3297 * Total number of rx_rings. This includes the one
3298 * default queue, a number of outbound completion
3299 * handler rx_rings, and the number of inbound
3300 * completion handler rx_rings.
3301 */
3302 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3303
c4e84bde
RM
3304 for (i = 0; i < qdev->tx_ring_count; i++) {
3305 tx_ring = &qdev->tx_ring[i];
3306 memset((void *)tx_ring, 0, sizeof(tx_ring));
3307 tx_ring->qdev = qdev;
3308 tx_ring->wq_id = i;
3309 tx_ring->wq_len = qdev->tx_ring_size;
3310 tx_ring->wq_size =
3311 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3312
3313 /*
3314 * The completion queue ID for the tx rings start
3315 * immediately after the default Q ID, which is zero.
3316 */
3317 tx_ring->cq_id = i + 1;
3318 }
3319
3320 for (i = 0; i < qdev->rx_ring_count; i++) {
3321 rx_ring = &qdev->rx_ring[i];
3322 memset((void *)rx_ring, 0, sizeof(rx_ring));
3323 rx_ring->qdev = qdev;
3324 rx_ring->cq_id = i;
3325 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3326 if (i == 0) { /* Default queue at index 0. */
3327 /*
3328 * Default queue handles bcast/mcast plus
3329 * async events. Needs buffers.
3330 */
3331 rx_ring->cq_len = qdev->rx_ring_size;
3332 rx_ring->cq_size =
3333 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3334 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3335 rx_ring->lbq_size =
2c9a0d41 3336 rx_ring->lbq_len * sizeof(__le64);
c4e84bde
RM
3337 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3338 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3339 rx_ring->sbq_size =
2c9a0d41 3340 rx_ring->sbq_len * sizeof(__le64);
c4e84bde
RM
3341 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3342 rx_ring->type = DEFAULT_Q;
3343 } else if (i < qdev->rss_ring_first_cq_id) {
3344 /*
3345 * Outbound queue handles outbound completions only.
3346 */
3347 /* outbound cq is same size as tx_ring it services. */
3348 rx_ring->cq_len = qdev->tx_ring_size;
3349 rx_ring->cq_size =
3350 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3351 rx_ring->lbq_len = 0;
3352 rx_ring->lbq_size = 0;
3353 rx_ring->lbq_buf_size = 0;
3354 rx_ring->sbq_len = 0;
3355 rx_ring->sbq_size = 0;
3356 rx_ring->sbq_buf_size = 0;
3357 rx_ring->type = TX_Q;
3358 } else { /* Inbound completions (RSS) queues */
3359 /*
3360 * Inbound queues handle unicast frames only.
3361 */
3362 rx_ring->cq_len = qdev->rx_ring_size;
3363 rx_ring->cq_size =
3364 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3365 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3366 rx_ring->lbq_size =
2c9a0d41 3367 rx_ring->lbq_len * sizeof(__le64);
c4e84bde
RM
3368 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3369 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3370 rx_ring->sbq_size =
2c9a0d41 3371 rx_ring->sbq_len * sizeof(__le64);
c4e84bde
RM
3372 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3373 rx_ring->type = RX_Q;
3374 }
3375 }
3376 return 0;
3377}
3378
3379static int qlge_open(struct net_device *ndev)
3380{
3381 int err = 0;
3382 struct ql_adapter *qdev = netdev_priv(ndev);
3383
3384 err = ql_configure_rings(qdev);
3385 if (err)
3386 return err;
3387
3388 err = ql_get_adapter_resources(qdev);
3389 if (err)
3390 goto error_up;
3391
3392 err = ql_adapter_up(qdev);
3393 if (err)
3394 goto error_up;
3395
3396 return err;
3397
3398error_up:
3399 ql_release_adapter_resources(qdev);
c4e84bde
RM
3400 return err;
3401}
3402
3403static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3404{
3405 struct ql_adapter *qdev = netdev_priv(ndev);
3406
3407 if (ndev->mtu == 1500 && new_mtu == 9000) {
3408 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3409 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3410 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3411 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3412 (ndev->mtu == 9000 && new_mtu == 9000)) {
3413 return 0;
3414 } else
3415 return -EINVAL;
3416 ndev->mtu = new_mtu;
3417 return 0;
3418}
3419
3420static struct net_device_stats *qlge_get_stats(struct net_device
3421 *ndev)
3422{
3423 struct ql_adapter *qdev = netdev_priv(ndev);
3424 return &qdev->stats;
3425}
3426
3427static void qlge_set_multicast_list(struct net_device *ndev)
3428{
3429 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3430 struct dev_mc_list *mc_ptr;
3431 int i;
3432
3433 spin_lock(&qdev->hw_lock);
3434 /*
3435 * Set or clear promiscuous mode if a
3436 * transition is taking place.
3437 */
3438 if (ndev->flags & IFF_PROMISC) {
3439 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3440 if (ql_set_routing_reg
3441 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3442 QPRINTK(qdev, HW, ERR,
3443 "Failed to set promiscous mode.\n");
3444 } else {
3445 set_bit(QL_PROMISCUOUS, &qdev->flags);
3446 }
3447 }
3448 } else {
3449 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3450 if (ql_set_routing_reg
3451 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3452 QPRINTK(qdev, HW, ERR,
3453 "Failed to clear promiscous mode.\n");
3454 } else {
3455 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3456 }
3457 }
3458 }
3459
3460 /*
3461 * Set or clear all multicast mode if a
3462 * transition is taking place.
3463 */
3464 if ((ndev->flags & IFF_ALLMULTI) ||
3465 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3466 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3467 if (ql_set_routing_reg
3468 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3469 QPRINTK(qdev, HW, ERR,
3470 "Failed to set all-multi mode.\n");
3471 } else {
3472 set_bit(QL_ALLMULTI, &qdev->flags);
3473 }
3474 }
3475 } else {
3476 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3477 if (ql_set_routing_reg
3478 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3479 QPRINTK(qdev, HW, ERR,
3480 "Failed to clear all-multi mode.\n");
3481 } else {
3482 clear_bit(QL_ALLMULTI, &qdev->flags);
3483 }
3484 }
3485 }
3486
3487 if (ndev->mc_count) {
3488 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3489 i++, mc_ptr = mc_ptr->next)
3490 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3491 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3492 QPRINTK(qdev, HW, ERR,
3493 "Failed to loadmulticast address.\n");
3494 goto exit;
3495 }
3496 if (ql_set_routing_reg
3497 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3498 QPRINTK(qdev, HW, ERR,
3499 "Failed to set multicast match mode.\n");
3500 } else {
3501 set_bit(QL_ALLMULTI, &qdev->flags);
3502 }
3503 }
3504exit:
3505 spin_unlock(&qdev->hw_lock);
3506}
3507
3508static int qlge_set_mac_address(struct net_device *ndev, void *p)
3509{
3510 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3511 struct sockaddr *addr = p;
8668ae92 3512 int ret = 0;
c4e84bde
RM
3513
3514 if (netif_running(ndev))
3515 return -EBUSY;
3516
3517 if (!is_valid_ether_addr(addr->sa_data))
3518 return -EADDRNOTAVAIL;
3519 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3520
3521 spin_lock(&qdev->hw_lock);
3522 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3523 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3524 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
8668ae92 3525 ret = -1;
c4e84bde
RM
3526 }
3527 spin_unlock(&qdev->hw_lock);
3528
8668ae92 3529 return ret;
c4e84bde
RM
3530}
3531
3532static void qlge_tx_timeout(struct net_device *ndev)
3533{
3534 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
6497b607 3535 ql_queue_asic_error(qdev);
c4e84bde
RM
3536}
3537
3538static void ql_asic_reset_work(struct work_struct *work)
3539{
3540 struct ql_adapter *qdev =
3541 container_of(work, struct ql_adapter, asic_reset_work.work);
3542 ql_cycle_adapter(qdev);
3543}
3544
3545static void ql_get_board_info(struct ql_adapter *qdev)
3546{
3547 qdev->func =
3548 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3549 if (qdev->func) {
3550 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3551 qdev->port_link_up = STS_PL1;
3552 qdev->port_init = STS_PI1;
3553 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3554 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3555 } else {
3556 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3557 qdev->port_link_up = STS_PL0;
3558 qdev->port_init = STS_PI0;
3559 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3560 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3561 }
3562 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3563}
3564
3565static void ql_release_all(struct pci_dev *pdev)
3566{
3567 struct net_device *ndev = pci_get_drvdata(pdev);
3568 struct ql_adapter *qdev = netdev_priv(ndev);
3569
3570 if (qdev->workqueue) {
3571 destroy_workqueue(qdev->workqueue);
3572 qdev->workqueue = NULL;
3573 }
3574 if (qdev->q_workqueue) {
3575 destroy_workqueue(qdev->q_workqueue);
3576 qdev->q_workqueue = NULL;
3577 }
3578 if (qdev->reg_base)
8668ae92 3579 iounmap(qdev->reg_base);
c4e84bde
RM
3580 if (qdev->doorbell_area)
3581 iounmap(qdev->doorbell_area);
3582 pci_release_regions(pdev);
3583 pci_set_drvdata(pdev, NULL);
3584}
3585
3586static int __devinit ql_init_device(struct pci_dev *pdev,
3587 struct net_device *ndev, int cards_found)
3588{
3589 struct ql_adapter *qdev = netdev_priv(ndev);
3590 int pos, err = 0;
3591 u16 val16;
3592
3593 memset((void *)qdev, 0, sizeof(qdev));
3594 err = pci_enable_device(pdev);
3595 if (err) {
3596 dev_err(&pdev->dev, "PCI device enable failed.\n");
3597 return err;
3598 }
3599
3600 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3601 if (pos <= 0) {
3602 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3603 "aborting.\n");
3604 goto err_out;
3605 } else {
3606 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3607 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3608 val16 |= (PCI_EXP_DEVCTL_CERE |
3609 PCI_EXP_DEVCTL_NFERE |
3610 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3611 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3612 }
3613
3614 err = pci_request_regions(pdev, DRV_NAME);
3615 if (err) {
3616 dev_err(&pdev->dev, "PCI region request failed.\n");
3617 goto err_out;
3618 }
3619
3620 pci_set_master(pdev);
3621 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3622 set_bit(QL_DMA64, &qdev->flags);
3623 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3624 } else {
3625 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3626 if (!err)
3627 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3628 }
3629
3630 if (err) {
3631 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3632 goto err_out;
3633 }
3634
3635 pci_set_drvdata(pdev, ndev);
3636 qdev->reg_base =
3637 ioremap_nocache(pci_resource_start(pdev, 1),
3638 pci_resource_len(pdev, 1));
3639 if (!qdev->reg_base) {
3640 dev_err(&pdev->dev, "Register mapping failed.\n");
3641 err = -ENOMEM;
3642 goto err_out;
3643 }
3644
3645 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3646 qdev->doorbell_area =
3647 ioremap_nocache(pci_resource_start(pdev, 3),
3648 pci_resource_len(pdev, 3));
3649 if (!qdev->doorbell_area) {
3650 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3651 err = -ENOMEM;
3652 goto err_out;
3653 }
3654
3655 ql_get_board_info(qdev);
3656 qdev->ndev = ndev;
3657 qdev->pdev = pdev;
3658 qdev->msg_enable = netif_msg_init(debug, default_msg);
3659 spin_lock_init(&qdev->hw_lock);
3660 spin_lock_init(&qdev->stats_lock);
3661
3662 /* make sure the EEPROM is good */
3663 err = ql_get_flash_params(qdev);
3664 if (err) {
3665 dev_err(&pdev->dev, "Invalid FLASH.\n");
3666 goto err_out;
3667 }
3668
3669 if (!is_valid_ether_addr(qdev->flash.mac_addr))
3670 goto err_out;
3671
3672 memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3673 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3674
3675 /* Set up the default ring sizes. */
3676 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3677 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3678
3679 /* Set up the coalescing parameters. */
3680 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3681 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3682 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3683 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3684
3685 /*
3686 * Set up the operating parameters.
3687 */
3688 qdev->rx_csum = 1;
3689
3690 qdev->q_workqueue = create_workqueue(ndev->name);
3691 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3692 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3693 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3694 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3695
3696 if (!cards_found) {
3697 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3698 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3699 DRV_NAME, DRV_VERSION);
3700 }
3701 return 0;
3702err_out:
3703 ql_release_all(pdev);
3704 pci_disable_device(pdev);
3705 return err;
3706}
3707
25ed7849
SH
3708
3709static const struct net_device_ops qlge_netdev_ops = {
3710 .ndo_open = qlge_open,
3711 .ndo_stop = qlge_close,
3712 .ndo_start_xmit = qlge_send,
3713 .ndo_change_mtu = qlge_change_mtu,
3714 .ndo_get_stats = qlge_get_stats,
3715 .ndo_set_multicast_list = qlge_set_multicast_list,
3716 .ndo_set_mac_address = qlge_set_mac_address,
3717 .ndo_validate_addr = eth_validate_addr,
3718 .ndo_tx_timeout = qlge_tx_timeout,
3719 .ndo_vlan_rx_register = ql_vlan_rx_register,
3720 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3721 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3722};
3723
c4e84bde
RM
3724static int __devinit qlge_probe(struct pci_dev *pdev,
3725 const struct pci_device_id *pci_entry)
3726{
3727 struct net_device *ndev = NULL;
3728 struct ql_adapter *qdev = NULL;
3729 static int cards_found = 0;
3730 int err = 0;
3731
3732 ndev = alloc_etherdev(sizeof(struct ql_adapter));
3733 if (!ndev)
3734 return -ENOMEM;
3735
3736 err = ql_init_device(pdev, ndev, cards_found);
3737 if (err < 0) {
3738 free_netdev(ndev);
3739 return err;
3740 }
3741
3742 qdev = netdev_priv(ndev);
3743 SET_NETDEV_DEV(ndev, &pdev->dev);
3744 ndev->features = (0
3745 | NETIF_F_IP_CSUM
3746 | NETIF_F_SG
3747 | NETIF_F_TSO
3748 | NETIF_F_TSO6
3749 | NETIF_F_TSO_ECN
3750 | NETIF_F_HW_VLAN_TX
3751 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3752
3753 if (test_bit(QL_DMA64, &qdev->flags))
3754 ndev->features |= NETIF_F_HIGHDMA;
3755
3756 /*
3757 * Set up net_device structure.
3758 */
3759 ndev->tx_queue_len = qdev->tx_ring_size;
3760 ndev->irq = pdev->irq;
25ed7849
SH
3761
3762 ndev->netdev_ops = &qlge_netdev_ops;
c4e84bde 3763 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
c4e84bde 3764 ndev->watchdog_timeo = 10 * HZ;
25ed7849 3765
c4e84bde
RM
3766 err = register_netdev(ndev);
3767 if (err) {
3768 dev_err(&pdev->dev, "net device registration failed.\n");
3769 ql_release_all(pdev);
3770 pci_disable_device(pdev);
3771 return err;
3772 }
3773 netif_carrier_off(ndev);
3774 netif_stop_queue(ndev);
3775 ql_display_dev_info(ndev);
3776 cards_found++;
3777 return 0;
3778}
3779
3780static void __devexit qlge_remove(struct pci_dev *pdev)
3781{
3782 struct net_device *ndev = pci_get_drvdata(pdev);
3783 unregister_netdev(ndev);
3784 ql_release_all(pdev);
3785 pci_disable_device(pdev);
3786 free_netdev(ndev);
3787}
3788
3789/*
3790 * This callback is called by the PCI subsystem whenever
3791 * a PCI bus error is detected.
3792 */
3793static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3794 enum pci_channel_state state)
3795{
3796 struct net_device *ndev = pci_get_drvdata(pdev);
3797 struct ql_adapter *qdev = netdev_priv(ndev);
3798
3799 if (netif_running(ndev))
3800 ql_adapter_down(qdev);
3801
3802 pci_disable_device(pdev);
3803
3804 /* Request a slot reset. */
3805 return PCI_ERS_RESULT_NEED_RESET;
3806}
3807
3808/*
3809 * This callback is called after the PCI buss has been reset.
3810 * Basically, this tries to restart the card from scratch.
3811 * This is a shortened version of the device probe/discovery code,
3812 * it resembles the first-half of the () routine.
3813 */
3814static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3815{
3816 struct net_device *ndev = pci_get_drvdata(pdev);
3817 struct ql_adapter *qdev = netdev_priv(ndev);
3818
3819 if (pci_enable_device(pdev)) {
3820 QPRINTK(qdev, IFUP, ERR,
3821 "Cannot re-enable PCI device after reset.\n");
3822 return PCI_ERS_RESULT_DISCONNECT;
3823 }
3824
3825 pci_set_master(pdev);
3826
3827 netif_carrier_off(ndev);
3828 netif_stop_queue(ndev);
3829 ql_adapter_reset(qdev);
3830
3831 /* Make sure the EEPROM is good */
3832 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3833
3834 if (!is_valid_ether_addr(ndev->perm_addr)) {
3835 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3836 return PCI_ERS_RESULT_DISCONNECT;
3837 }
3838
3839 return PCI_ERS_RESULT_RECOVERED;
3840}
3841
3842static void qlge_io_resume(struct pci_dev *pdev)
3843{
3844 struct net_device *ndev = pci_get_drvdata(pdev);
3845 struct ql_adapter *qdev = netdev_priv(ndev);
3846
3847 pci_set_master(pdev);
3848
3849 if (netif_running(ndev)) {
3850 if (ql_adapter_up(qdev)) {
3851 QPRINTK(qdev, IFUP, ERR,
3852 "Device initialization failed after reset.\n");
3853 return;
3854 }
3855 }
3856
3857 netif_device_attach(ndev);
3858}
3859
3860static struct pci_error_handlers qlge_err_handler = {
3861 .error_detected = qlge_io_error_detected,
3862 .slot_reset = qlge_io_slot_reset,
3863 .resume = qlge_io_resume,
3864};
3865
3866static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3867{
3868 struct net_device *ndev = pci_get_drvdata(pdev);
3869 struct ql_adapter *qdev = netdev_priv(ndev);
0047e5d2 3870 int err, i;
c4e84bde
RM
3871
3872 netif_device_detach(ndev);
3873
3874 if (netif_running(ndev)) {
3875 err = ql_adapter_down(qdev);
3876 if (!err)
3877 return err;
3878 }
3879
0047e5d2
RM
3880 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++)
3881 netif_napi_del(&qdev->rx_ring[i].napi);
3882
c4e84bde
RM
3883 err = pci_save_state(pdev);
3884 if (err)
3885 return err;
3886
3887 pci_disable_device(pdev);
3888
3889 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3890
3891 return 0;
3892}
3893
04da2cf9 3894#ifdef CONFIG_PM
c4e84bde
RM
3895static int qlge_resume(struct pci_dev *pdev)
3896{
3897 struct net_device *ndev = pci_get_drvdata(pdev);
3898 struct ql_adapter *qdev = netdev_priv(ndev);
3899 int err;
3900
3901 pci_set_power_state(pdev, PCI_D0);
3902 pci_restore_state(pdev);
3903 err = pci_enable_device(pdev);
3904 if (err) {
3905 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3906 return err;
3907 }
3908 pci_set_master(pdev);
3909
3910 pci_enable_wake(pdev, PCI_D3hot, 0);
3911 pci_enable_wake(pdev, PCI_D3cold, 0);
3912
3913 if (netif_running(ndev)) {
3914 err = ql_adapter_up(qdev);
3915 if (err)
3916 return err;
3917 }
3918
3919 netif_device_attach(ndev);
3920
3921 return 0;
3922}
04da2cf9 3923#endif /* CONFIG_PM */
c4e84bde
RM
3924
3925static void qlge_shutdown(struct pci_dev *pdev)
3926{
3927 qlge_suspend(pdev, PMSG_SUSPEND);
3928}
3929
3930static struct pci_driver qlge_driver = {
3931 .name = DRV_NAME,
3932 .id_table = qlge_pci_tbl,
3933 .probe = qlge_probe,
3934 .remove = __devexit_p(qlge_remove),
3935#ifdef CONFIG_PM
3936 .suspend = qlge_suspend,
3937 .resume = qlge_resume,
3938#endif
3939 .shutdown = qlge_shutdown,
3940 .err_handler = &qlge_err_handler
3941};
3942
3943static int __init qlge_init_module(void)
3944{
3945 return pci_register_driver(&qlge_driver);
3946}
3947
3948static void __exit qlge_exit(void)
3949{
3950 pci_unregister_driver(&qlge_driver);
3951}
3952
3953module_init(qlge_init_module);
3954module_exit(qlge_exit);