2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
45 char qlge_driver_name
[] = DRV_NAME
;
46 const char qlge_driver_version
[] = DRV_VERSION
;
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING
" ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION
);
53 static const u32 default_msg
=
54 NETIF_MSG_DRV
| NETIF_MSG_PROBE
| NETIF_MSG_LINK
|
55 /* NETIF_MSG_TIMER | */
60 /* NETIF_MSG_TX_QUEUED | */
61 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW
| NETIF_MSG_WOL
| 0;
65 static int debug
= 0x00007fff; /* defaults above */
66 module_param(debug
, int, 0);
67 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
72 static int qlge_irq_type
= MSIX_IRQ
;
73 module_param(qlge_irq_type
, int, MSIX_IRQ
);
74 MODULE_PARM_DESC(qlge_irq_type
, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76 static struct pci_device_id qlge_pci_tbl
[] __devinitdata
= {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8012
)},
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC
, QLGE_DEVICE_ID_8000
)},
79 /* required last entry */
83 MODULE_DEVICE_TABLE(pci
, qlge_pci_tbl
);
85 /* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
89 static int ql_sem_trylock(struct ql_adapter
*qdev
, u32 sem_mask
)
95 sem_bits
= SEM_SET
<< SEM_XGMAC0_SHIFT
;
98 sem_bits
= SEM_SET
<< SEM_XGMAC1_SHIFT
;
101 sem_bits
= SEM_SET
<< SEM_ICB_SHIFT
;
103 case SEM_MAC_ADDR_MASK
:
104 sem_bits
= SEM_SET
<< SEM_MAC_ADDR_SHIFT
;
107 sem_bits
= SEM_SET
<< SEM_FLASH_SHIFT
;
110 sem_bits
= SEM_SET
<< SEM_PROBE_SHIFT
;
112 case SEM_RT_IDX_MASK
:
113 sem_bits
= SEM_SET
<< SEM_RT_IDX_SHIFT
;
115 case SEM_PROC_REG_MASK
:
116 sem_bits
= SEM_SET
<< SEM_PROC_REG_SHIFT
;
119 QPRINTK(qdev
, PROBE
, ALERT
, "Bad Semaphore mask!.\n");
123 ql_write32(qdev
, SEM
, sem_bits
| sem_mask
);
124 return !(ql_read32(qdev
, SEM
) & sem_bits
);
127 int ql_sem_spinlock(struct ql_adapter
*qdev
, u32 sem_mask
)
129 unsigned int wait_count
= 30;
131 if (!ql_sem_trylock(qdev
, sem_mask
))
134 } while (--wait_count
);
138 void ql_sem_unlock(struct ql_adapter
*qdev
, u32 sem_mask
)
140 ql_write32(qdev
, SEM
, sem_mask
);
141 ql_read32(qdev
, SEM
); /* flush */
144 /* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 int ql_wait_reg_rdy(struct ql_adapter
*qdev
, u32 reg
, u32 bit
, u32 err_bit
)
152 int count
= UDELAY_COUNT
;
155 temp
= ql_read32(qdev
, reg
);
157 /* check for errors */
158 if (temp
& err_bit
) {
159 QPRINTK(qdev
, PROBE
, ALERT
,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
163 } else if (temp
& bit
)
165 udelay(UDELAY_DELAY
);
168 QPRINTK(qdev
, PROBE
, ALERT
,
169 "Timed out waiting for reg %x to come ready.\n", reg
);
173 /* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
176 static int ql_wait_cfg(struct ql_adapter
*qdev
, u32 bit
)
178 int count
= UDELAY_COUNT
;
182 temp
= ql_read32(qdev
, CFG
);
187 udelay(UDELAY_DELAY
);
194 /* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
197 int ql_write_cfg(struct ql_adapter
*qdev
, void *ptr
, int size
, u32 bit
,
207 (bit
& (CFG_LRQ
| CFG_LR
| CFG_LCQ
)) ? PCI_DMA_TODEVICE
:
210 map
= pci_map_single(qdev
->pdev
, ptr
, size
, direction
);
211 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
212 QPRINTK(qdev
, IFUP
, ERR
, "Couldn't map DMA area.\n");
216 status
= ql_sem_spinlock(qdev
, SEM_ICB_MASK
);
220 status
= ql_wait_cfg(qdev
, bit
);
222 QPRINTK(qdev
, IFUP
, ERR
,
223 "Timed out waiting for CFG to come ready.\n");
227 ql_write32(qdev
, ICB_L
, (u32
) map
);
228 ql_write32(qdev
, ICB_H
, (u32
) (map
>> 32));
230 mask
= CFG_Q_MASK
| (bit
<< 16);
231 value
= bit
| (q_id
<< CFG_Q_SHIFT
);
232 ql_write32(qdev
, CFG
, (mask
| value
));
235 * Wait for the bit to clear after signaling hw.
237 status
= ql_wait_cfg(qdev
, bit
);
239 ql_sem_unlock(qdev
, SEM_ICB_MASK
); /* does flush too */
240 pci_unmap_single(qdev
->pdev
, map
, size
, direction
);
244 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245 int ql_get_mac_addr_reg(struct ql_adapter
*qdev
, u32 type
, u16 index
,
252 case MAC_ADDR_TYPE_MULTI_MAC
:
253 case MAC_ADDR_TYPE_CAM_MAC
:
256 ql_wait_reg_rdy(qdev
,
257 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
260 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
261 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
262 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
264 ql_wait_reg_rdy(qdev
,
265 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
268 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
270 ql_wait_reg_rdy(qdev
,
271 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
274 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
275 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
276 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
278 ql_wait_reg_rdy(qdev
,
279 MAC_ADDR_IDX
, MAC_ADDR_MR
, 0);
282 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
283 if (type
== MAC_ADDR_TYPE_CAM_MAC
) {
285 ql_wait_reg_rdy(qdev
,
286 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
289 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
290 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
291 MAC_ADDR_ADR
| MAC_ADDR_RS
| type
); /* type */
293 ql_wait_reg_rdy(qdev
, MAC_ADDR_IDX
,
297 *value
++ = ql_read32(qdev
, MAC_ADDR_DATA
);
301 case MAC_ADDR_TYPE_VLAN
:
302 case MAC_ADDR_TYPE_MULTI_FLTR
:
304 QPRINTK(qdev
, IFUP
, CRIT
,
305 "Address type %d not yet supported.\n", type
);
312 /* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
315 static int ql_set_mac_addr_reg(struct ql_adapter
*qdev
, u8
*addr
, u32 type
,
322 case MAC_ADDR_TYPE_MULTI_MAC
:
324 u32 upper
= (addr
[0] << 8) | addr
[1];
325 u32 lower
= (addr
[2] << 24) | (addr
[3] << 16) |
326 (addr
[4] << 8) | (addr
[5]);
329 ql_wait_reg_rdy(qdev
,
330 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
333 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
334 (index
<< MAC_ADDR_IDX_SHIFT
) |
336 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
338 ql_wait_reg_rdy(qdev
,
339 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
342 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) |
343 (index
<< MAC_ADDR_IDX_SHIFT
) |
346 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
348 ql_wait_reg_rdy(qdev
,
349 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
354 case MAC_ADDR_TYPE_CAM_MAC
:
357 u32 upper
= (addr
[0] << 8) | addr
[1];
359 (addr
[2] << 24) | (addr
[3] << 16) | (addr
[4] << 8) |
362 QPRINTK(qdev
, IFUP
, DEBUG
,
363 "Adding %s address %pM"
364 " at index %d in the CAM.\n",
366 MAC_ADDR_TYPE_MULTI_MAC
) ? "MULTICAST" :
367 "UNICAST"), addr
, index
);
370 ql_wait_reg_rdy(qdev
,
371 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
374 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
375 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
377 ql_write32(qdev
, MAC_ADDR_DATA
, lower
);
379 ql_wait_reg_rdy(qdev
,
380 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
383 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
++) | /* offset */
384 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
386 ql_write32(qdev
, MAC_ADDR_DATA
, upper
);
388 ql_wait_reg_rdy(qdev
,
389 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
392 ql_write32(qdev
, MAC_ADDR_IDX
, (offset
) | /* offset */
393 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
399 cam_output
= (CAM_OUT_ROUTE_NIC
|
401 func
<< CAM_OUT_FUNC_SHIFT
) |
402 (0 << CAM_OUT_CQ_ID_SHIFT
));
404 cam_output
|= CAM_OUT_RV
;
405 /* route to NIC core */
406 ql_write32(qdev
, MAC_ADDR_DATA
, cam_output
);
409 case MAC_ADDR_TYPE_VLAN
:
411 u32 enable_bit
= *((u32
*) &addr
[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
417 QPRINTK(qdev
, IFUP
, INFO
, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit
? "Adding" : "Removing"),
419 index
, (enable_bit
? "to" : "from"));
422 ql_wait_reg_rdy(qdev
,
423 MAC_ADDR_IDX
, MAC_ADDR_MW
, 0);
426 ql_write32(qdev
, MAC_ADDR_IDX
, offset
| /* offset */
427 (index
<< MAC_ADDR_IDX_SHIFT
) | /* index */
429 enable_bit
); /* enable/disable */
432 case MAC_ADDR_TYPE_MULTI_FLTR
:
434 QPRINTK(qdev
, IFUP
, CRIT
,
435 "Address type %d not yet supported.\n", type
);
442 /* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
446 static int ql_set_mac_addr(struct ql_adapter
*qdev
, int set
)
449 char zero_mac_addr
[ETH_ALEN
];
453 addr
= &qdev
->ndev
->dev_addr
[0];
454 QPRINTK(qdev
, IFUP
, DEBUG
,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr
[0], addr
[1], addr
[2], addr
[3],
459 memset(zero_mac_addr
, 0, ETH_ALEN
);
460 addr
= &zero_mac_addr
[0];
461 QPRINTK(qdev
, IFUP
, DEBUG
,
462 "Clearing MAC address on %s\n",
465 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
468 status
= ql_set_mac_addr_reg(qdev
, (u8
*) addr
,
469 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
470 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
472 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac "
477 void ql_link_on(struct ql_adapter
*qdev
)
479 QPRINTK(qdev
, LINK
, ERR
, "%s: Link is up.\n",
481 netif_carrier_on(qdev
->ndev
);
482 ql_set_mac_addr(qdev
, 1);
485 void ql_link_off(struct ql_adapter
*qdev
)
487 QPRINTK(qdev
, LINK
, ERR
, "%s: Link is down.\n",
489 netif_carrier_off(qdev
->ndev
);
490 ql_set_mac_addr(qdev
, 0);
493 /* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
496 int ql_get_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32
*value
)
500 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
504 ql_write32(qdev
, RT_IDX
,
505 RT_IDX_TYPE_NICQ
| RT_IDX_RS
| (index
<< RT_IDX_IDX_SHIFT
));
506 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MR
, 0);
509 *value
= ql_read32(qdev
, RT_DATA
);
514 /* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
519 static int ql_set_routing_reg(struct ql_adapter
*qdev
, u32 index
, u32 mask
,
522 int status
= -EINVAL
; /* Return error if no mask match. */
525 QPRINTK(qdev
, IFUP
, DEBUG
,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable
? "Adding" : "Removing"),
528 ((index
== RT_IDX_ALL_ERR_SLOT
) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index
== RT_IDX_IP_CSUM_ERR_SLOT
) ? "IP CSUM ERROR" : ""),
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT
) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index
== RT_IDX_BCAST_SLOT
) ? "BROADCAST" : ""),
533 ((index
== RT_IDX_MCAST_MATCH_SLOT
) ? "MULTICAST MATCH" : ""),
534 ((index
== RT_IDX_ALLMULTI_SLOT
) ? "ALL MULTICAST MATCH" : ""),
535 ((index
== RT_IDX_UNUSED6_SLOT
) ? "UNUSED6" : ""),
536 ((index
== RT_IDX_UNUSED7_SLOT
) ? "UNUSED7" : ""),
537 ((index
== RT_IDX_RSS_MATCH_SLOT
) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index
== RT_IDX_RSS_IPV6_SLOT
) ? "RSS IPV6" : ""),
539 ((index
== RT_IDX_RSS_TCP4_SLOT
) ? "RSS TCP4" : ""),
540 ((index
== RT_IDX_RSS_TCP6_SLOT
) ? "RSS TCP6" : ""),
541 ((index
== RT_IDX_CAM_HIT_SLOT
) ? "CAM HIT" : ""),
542 ((index
== RT_IDX_UNUSED013
) ? "UNUSED13" : ""),
543 ((index
== RT_IDX_UNUSED014
) ? "UNUSED14" : ""),
544 ((index
== RT_IDX_PROMISCUOUS_SLOT
) ? "PROMISCUOUS" : ""),
545 (enable
? "to" : "from"));
550 value
= RT_IDX_DST_CAM_Q
| /* dest */
551 RT_IDX_TYPE_NICQ
| /* type */
552 (RT_IDX_CAM_HIT_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
555 case RT_IDX_VALID
: /* Promiscuous Mode frames. */
557 value
= RT_IDX_DST_DFLT_Q
| /* dest */
558 RT_IDX_TYPE_NICQ
| /* type */
559 (RT_IDX_PROMISCUOUS_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
562 case RT_IDX_ERR
: /* Pass up MAC,IP,TCP/UDP error frames. */
564 value
= RT_IDX_DST_DFLT_Q
| /* dest */
565 RT_IDX_TYPE_NICQ
| /* type */
566 (RT_IDX_ALL_ERR_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
569 case RT_IDX_BCAST
: /* Pass up Broadcast frames to default Q. */
571 value
= RT_IDX_DST_DFLT_Q
| /* dest */
572 RT_IDX_TYPE_NICQ
| /* type */
573 (RT_IDX_BCAST_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
576 case RT_IDX_MCAST
: /* Pass up All Multicast frames. */
578 value
= RT_IDX_DST_DFLT_Q
| /* dest */
579 RT_IDX_TYPE_NICQ
| /* type */
580 (RT_IDX_ALLMULTI_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
583 case RT_IDX_MCAST_MATCH
: /* Pass up matched Multicast frames. */
585 value
= RT_IDX_DST_DFLT_Q
| /* dest */
586 RT_IDX_TYPE_NICQ
| /* type */
587 (RT_IDX_MCAST_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
590 case RT_IDX_RSS_MATCH
: /* Pass up matched RSS frames. */
592 value
= RT_IDX_DST_RSS
| /* dest */
593 RT_IDX_TYPE_NICQ
| /* type */
594 (RT_IDX_RSS_MATCH_SLOT
<< RT_IDX_IDX_SHIFT
);/* index */
597 case 0: /* Clear the E-bit on an entry. */
599 value
= RT_IDX_DST_DFLT_Q
| /* dest */
600 RT_IDX_TYPE_NICQ
| /* type */
601 (index
<< RT_IDX_IDX_SHIFT
);/* index */
605 QPRINTK(qdev
, IFUP
, ERR
, "Mask type %d not yet supported.\n",
612 status
= ql_wait_reg_rdy(qdev
, RT_IDX
, RT_IDX_MW
, 0);
615 value
|= (enable
? RT_IDX_E
: 0);
616 ql_write32(qdev
, RT_IDX
, value
);
617 ql_write32(qdev
, RT_DATA
, enable
? mask
: 0);
623 static void ql_enable_interrupts(struct ql_adapter
*qdev
)
625 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16) | INTR_EN_EI
);
628 static void ql_disable_interrupts(struct ql_adapter
*qdev
)
630 ql_write32(qdev
, INTR_EN
, (INTR_EN_EI
<< 16));
633 /* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
639 u32
ql_enable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
642 unsigned long hw_flags
= 0;
643 struct intr_context
*ctx
= qdev
->intr_context
+ intr
;
645 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
649 ql_write32(qdev
, INTR_EN
,
651 var
= ql_read32(qdev
, STS
);
655 spin_lock_irqsave(&qdev
->hw_lock
, hw_flags
);
656 if (atomic_dec_and_test(&ctx
->irq_cnt
)) {
657 ql_write32(qdev
, INTR_EN
,
659 var
= ql_read32(qdev
, STS
);
661 spin_unlock_irqrestore(&qdev
->hw_lock
, hw_flags
);
665 static u32
ql_disable_completion_interrupt(struct ql_adapter
*qdev
, u32 intr
)
668 struct intr_context
*ctx
;
670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
673 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) && intr
))
676 ctx
= qdev
->intr_context
+ intr
;
677 spin_lock(&qdev
->hw_lock
);
678 if (!atomic_read(&ctx
->irq_cnt
)) {
679 ql_write32(qdev
, INTR_EN
,
681 var
= ql_read32(qdev
, STS
);
683 atomic_inc(&ctx
->irq_cnt
);
684 spin_unlock(&qdev
->hw_lock
);
688 static void ql_enable_all_completion_interrupts(struct ql_adapter
*qdev
)
691 for (i
= 0; i
< qdev
->intr_count
; i
++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
696 if (unlikely(!test_bit(QL_MSIX_ENABLED
, &qdev
->flags
) ||
698 atomic_set(&qdev
->intr_context
[i
].irq_cnt
, 1);
699 ql_enable_completion_interrupt(qdev
, i
);
704 static int ql_validate_flash(struct ql_adapter
*qdev
, u32 size
, const char *str
)
708 __le16
*flash
= (__le16
*)&qdev
->flash
;
710 status
= strncmp((char *)&qdev
->flash
, str
, 4);
712 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash signature.\n");
716 for (i
= 0; i
< size
; i
++)
717 csum
+= le16_to_cpu(*flash
++);
720 QPRINTK(qdev
, IFUP
, ERR
,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum
);
726 static int ql_read_flash_word(struct ql_adapter
*qdev
, int offset
, __le32
*data
)
729 /* wait for reg to come ready */
730 status
= ql_wait_reg_rdy(qdev
,
731 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
734 /* set up for reg read */
735 ql_write32(qdev
, FLASH_ADDR
, FLASH_ADDR_R
| offset
);
736 /* wait for reg to come ready */
737 status
= ql_wait_reg_rdy(qdev
,
738 FLASH_ADDR
, FLASH_ADDR_RDY
, FLASH_ADDR_ERR
);
741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
745 *data
= cpu_to_le32(ql_read32(qdev
, FLASH_DATA
));
750 static int ql_get_8000_flash_params(struct ql_adapter
*qdev
)
754 __le32
*p
= (__le32
*)&qdev
->flash
;
758 /* Get flash offset for function and adjust
762 offset
= FUNC0_FLASH_OFFSET
/ sizeof(u32
);
764 offset
= FUNC1_FLASH_OFFSET
/ sizeof(u32
);
766 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
769 size
= sizeof(struct flash_params_8000
) / sizeof(u32
);
770 for (i
= 0; i
< size
; i
++, p
++) {
771 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
773 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
778 status
= ql_validate_flash(qdev
,
779 sizeof(struct flash_params_8000
) / sizeof(u16
),
782 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
787 /* Extract either manufacturer or BOFM modified
790 if (qdev
->flash
.flash_params_8000
.data_type1
== 2)
792 qdev
->flash
.flash_params_8000
.mac_addr1
,
793 qdev
->ndev
->addr_len
);
796 qdev
->flash
.flash_params_8000
.mac_addr
,
797 qdev
->ndev
->addr_len
);
799 if (!is_valid_ether_addr(mac_addr
)) {
800 QPRINTK(qdev
, IFUP
, ERR
, "Invalid MAC address.\n");
805 memcpy(qdev
->ndev
->dev_addr
,
807 qdev
->ndev
->addr_len
);
810 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
814 static int ql_get_8012_flash_params(struct ql_adapter
*qdev
)
818 __le32
*p
= (__le32
*)&qdev
->flash
;
820 u32 size
= sizeof(struct flash_params_8012
) / sizeof(u32
);
822 /* Second function's parameters follow the first
828 if (ql_sem_spinlock(qdev
, SEM_FLASH_MASK
))
831 for (i
= 0; i
< size
; i
++, p
++) {
832 status
= ql_read_flash_word(qdev
, i
+offset
, p
);
834 QPRINTK(qdev
, IFUP
, ERR
, "Error reading flash.\n");
840 status
= ql_validate_flash(qdev
,
841 sizeof(struct flash_params_8012
) / sizeof(u16
),
844 QPRINTK(qdev
, IFUP
, ERR
, "Invalid flash.\n");
849 if (!is_valid_ether_addr(qdev
->flash
.flash_params_8012
.mac_addr
)) {
854 memcpy(qdev
->ndev
->dev_addr
,
855 qdev
->flash
.flash_params_8012
.mac_addr
,
856 qdev
->ndev
->addr_len
);
859 ql_sem_unlock(qdev
, SEM_FLASH_MASK
);
863 /* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
867 static int ql_write_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32 data
)
870 /* wait for reg to come ready */
871 status
= ql_wait_reg_rdy(qdev
,
872 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
875 /* write the data to the data reg */
876 ql_write32(qdev
, XGMAC_DATA
, data
);
877 /* trigger the write */
878 ql_write32(qdev
, XGMAC_ADDR
, reg
);
882 /* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
886 int ql_read_xgmac_reg(struct ql_adapter
*qdev
, u32 reg
, u32
*data
)
889 /* wait for reg to come ready */
890 status
= ql_wait_reg_rdy(qdev
,
891 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
894 /* set up for reg read */
895 ql_write32(qdev
, XGMAC_ADDR
, reg
| XGMAC_ADDR_R
);
896 /* wait for reg to come ready */
897 status
= ql_wait_reg_rdy(qdev
,
898 XGMAC_ADDR
, XGMAC_ADDR_RDY
, XGMAC_ADDR_XME
);
902 *data
= ql_read32(qdev
, XGMAC_DATA
);
907 /* This is used for reading the 64-bit statistics regs. */
908 int ql_read_xgmac_reg64(struct ql_adapter
*qdev
, u32 reg
, u64
*data
)
914 status
= ql_read_xgmac_reg(qdev
, reg
, &lo
);
918 status
= ql_read_xgmac_reg(qdev
, reg
+ 4, &hi
);
922 *data
= (u64
) lo
| ((u64
) hi
<< 32);
928 static int ql_8000_port_initialize(struct ql_adapter
*qdev
)
932 * Get MPI firmware version for driver banner
935 status
= ql_mb_about_fw(qdev
);
938 status
= ql_mb_get_fw_state(qdev
);
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_port_cfg_work
, 0);
947 /* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
953 static int ql_8012_port_initialize(struct ql_adapter
*qdev
)
958 if (ql_sem_trylock(qdev
, qdev
->xg_sem_mask
)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
962 QPRINTK(qdev
, LINK
, INFO
,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status
= ql_wait_reg_rdy(qdev
, STS
, qdev
->port_init
, 0);
966 QPRINTK(qdev
, LINK
, CRIT
,
967 "Port initialize timed out.\n");
972 QPRINTK(qdev
, LINK
, INFO
, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status
= ql_read_xgmac_reg(qdev
, GLOBAL_CFG
, &data
);
977 data
|= GLOBAL_CFG_RESET
;
978 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data
&= ~GLOBAL_CFG_RESET
; /* Clear core reset. */
984 data
|= GLOBAL_CFG_JUMBO
; /* Turn on jumbo. */
985 data
|= GLOBAL_CFG_TX_STAT_EN
;
986 data
|= GLOBAL_CFG_RX_STAT_EN
;
987 status
= ql_write_xgmac_reg(qdev
, GLOBAL_CFG
, data
);
991 /* Enable transmitter, and clear it's reset. */
992 status
= ql_read_xgmac_reg(qdev
, TX_CFG
, &data
);
995 data
&= ~TX_CFG_RESET
; /* Clear the TX MAC reset. */
996 data
|= TX_CFG_EN
; /* Enable the transmitter. */
997 status
= ql_write_xgmac_reg(qdev
, TX_CFG
, data
);
1001 /* Enable receiver and clear it's reset. */
1002 status
= ql_read_xgmac_reg(qdev
, RX_CFG
, &data
);
1005 data
&= ~RX_CFG_RESET
; /* Clear the RX MAC reset. */
1006 data
|= RX_CFG_EN
; /* Enable the receiver. */
1007 status
= ql_write_xgmac_reg(qdev
, RX_CFG
, data
);
1011 /* Turn on jumbo. */
1013 ql_write_xgmac_reg(qdev
, MAC_TX_PARAMS
, MAC_TX_PARAMS_JUMBO
| (0x2580 << 16));
1017 ql_write_xgmac_reg(qdev
, MAC_RX_PARAMS
, 0x2580);
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev
, STS
, ((qdev
->port_init
<< 16) | qdev
->port_init
));
1024 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
1028 static inline unsigned int ql_lbq_block_size(struct ql_adapter
*qdev
)
1030 return PAGE_SIZE
<< qdev
->lbq_buf_order
;
1033 /* Get the next large buffer. */
1034 static struct bq_desc
*ql_get_curr_lbuf(struct rx_ring
*rx_ring
)
1036 struct bq_desc
*lbq_desc
= &rx_ring
->lbq
[rx_ring
->lbq_curr_idx
];
1037 rx_ring
->lbq_curr_idx
++;
1038 if (rx_ring
->lbq_curr_idx
== rx_ring
->lbq_len
)
1039 rx_ring
->lbq_curr_idx
= 0;
1040 rx_ring
->lbq_free_cnt
++;
1044 static struct bq_desc
*ql_get_curr_lchunk(struct ql_adapter
*qdev
,
1045 struct rx_ring
*rx_ring
)
1047 struct bq_desc
*lbq_desc
= ql_get_curr_lbuf(rx_ring
);
1049 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1050 pci_unmap_addr(lbq_desc
, mapaddr
),
1051 rx_ring
->lbq_buf_size
,
1052 PCI_DMA_FROMDEVICE
);
1054 /* If it's the last chunk of our master page then
1057 if ((lbq_desc
->p
.pg_chunk
.offset
+ rx_ring
->lbq_buf_size
)
1058 == ql_lbq_block_size(qdev
))
1059 pci_unmap_page(qdev
->pdev
,
1060 lbq_desc
->p
.pg_chunk
.map
,
1061 ql_lbq_block_size(qdev
),
1062 PCI_DMA_FROMDEVICE
);
1066 /* Get the next small buffer. */
1067 static struct bq_desc
*ql_get_curr_sbuf(struct rx_ring
*rx_ring
)
1069 struct bq_desc
*sbq_desc
= &rx_ring
->sbq
[rx_ring
->sbq_curr_idx
];
1070 rx_ring
->sbq_curr_idx
++;
1071 if (rx_ring
->sbq_curr_idx
== rx_ring
->sbq_len
)
1072 rx_ring
->sbq_curr_idx
= 0;
1073 rx_ring
->sbq_free_cnt
++;
1077 /* Update an rx ring index. */
1078 static void ql_update_cq(struct rx_ring
*rx_ring
)
1080 rx_ring
->cnsmr_idx
++;
1081 rx_ring
->curr_entry
++;
1082 if (unlikely(rx_ring
->cnsmr_idx
== rx_ring
->cq_len
)) {
1083 rx_ring
->cnsmr_idx
= 0;
1084 rx_ring
->curr_entry
= rx_ring
->cq_base
;
1088 static void ql_write_cq_idx(struct rx_ring
*rx_ring
)
1090 ql_write_db_reg(rx_ring
->cnsmr_idx
, rx_ring
->cnsmr_idx_db_reg
);
1093 static int ql_get_next_chunk(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
,
1094 struct bq_desc
*lbq_desc
)
1096 if (!rx_ring
->pg_chunk
.page
) {
1098 rx_ring
->pg_chunk
.page
= alloc_pages(__GFP_COLD
| __GFP_COMP
|
1100 qdev
->lbq_buf_order
);
1101 if (unlikely(!rx_ring
->pg_chunk
.page
)) {
1102 QPRINTK(qdev
, DRV
, ERR
,
1103 "page allocation failed.\n");
1106 rx_ring
->pg_chunk
.offset
= 0;
1107 map
= pci_map_page(qdev
->pdev
, rx_ring
->pg_chunk
.page
,
1108 0, ql_lbq_block_size(qdev
),
1109 PCI_DMA_FROMDEVICE
);
1110 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1111 __free_pages(rx_ring
->pg_chunk
.page
,
1112 qdev
->lbq_buf_order
);
1113 QPRINTK(qdev
, DRV
, ERR
,
1114 "PCI mapping failed.\n");
1117 rx_ring
->pg_chunk
.map
= map
;
1118 rx_ring
->pg_chunk
.va
= page_address(rx_ring
->pg_chunk
.page
);
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1124 lbq_desc
->p
.pg_chunk
= rx_ring
->pg_chunk
;
1126 /* Adjust the master page chunk for next
1129 rx_ring
->pg_chunk
.offset
+= rx_ring
->lbq_buf_size
;
1130 if (rx_ring
->pg_chunk
.offset
== ql_lbq_block_size(qdev
)) {
1131 rx_ring
->pg_chunk
.page
= NULL
;
1132 lbq_desc
->p
.pg_chunk
.last_flag
= 1;
1134 rx_ring
->pg_chunk
.va
+= rx_ring
->lbq_buf_size
;
1135 get_page(rx_ring
->pg_chunk
.page
);
1136 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
1140 /* Process (refill) a large buffer queue. */
1141 static void ql_update_lbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1143 u32 clean_idx
= rx_ring
->lbq_clean_idx
;
1144 u32 start_idx
= clean_idx
;
1145 struct bq_desc
*lbq_desc
;
1149 while (rx_ring
->lbq_free_cnt
> 32) {
1150 for (i
= 0; i
< 16; i
++) {
1151 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1152 "lbq: try cleaning clean_idx = %d.\n",
1154 lbq_desc
= &rx_ring
->lbq
[clean_idx
];
1155 if (ql_get_next_chunk(qdev
, rx_ring
, lbq_desc
)) {
1156 QPRINTK(qdev
, IFUP
, ERR
,
1157 "Could not get a page chunk.\n");
1161 map
= lbq_desc
->p
.pg_chunk
.map
+
1162 lbq_desc
->p
.pg_chunk
.offset
;
1163 pci_unmap_addr_set(lbq_desc
, mapaddr
, map
);
1164 pci_unmap_len_set(lbq_desc
, maplen
,
1165 rx_ring
->lbq_buf_size
);
1166 *lbq_desc
->addr
= cpu_to_le64(map
);
1168 pci_dma_sync_single_for_device(qdev
->pdev
, map
,
1169 rx_ring
->lbq_buf_size
,
1170 PCI_DMA_FROMDEVICE
);
1172 if (clean_idx
== rx_ring
->lbq_len
)
1176 rx_ring
->lbq_clean_idx
= clean_idx
;
1177 rx_ring
->lbq_prod_idx
+= 16;
1178 if (rx_ring
->lbq_prod_idx
== rx_ring
->lbq_len
)
1179 rx_ring
->lbq_prod_idx
= 0;
1180 rx_ring
->lbq_free_cnt
-= 16;
1183 if (start_idx
!= clean_idx
) {
1184 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring
->lbq_prod_idx
);
1187 ql_write_db_reg(rx_ring
->lbq_prod_idx
,
1188 rx_ring
->lbq_prod_idx_db_reg
);
1192 /* Process (refill) a small buffer queue. */
1193 static void ql_update_sbq(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
1195 u32 clean_idx
= rx_ring
->sbq_clean_idx
;
1196 u32 start_idx
= clean_idx
;
1197 struct bq_desc
*sbq_desc
;
1201 while (rx_ring
->sbq_free_cnt
> 16) {
1202 for (i
= 0; i
< 16; i
++) {
1203 sbq_desc
= &rx_ring
->sbq
[clean_idx
];
1204 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1205 "sbq: try cleaning clean_idx = %d.\n",
1207 if (sbq_desc
->p
.skb
== NULL
) {
1208 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1209 "sbq: getting new skb for index %d.\n",
1212 netdev_alloc_skb(qdev
->ndev
,
1214 if (sbq_desc
->p
.skb
== NULL
) {
1215 QPRINTK(qdev
, PROBE
, ERR
,
1216 "Couldn't get an skb.\n");
1217 rx_ring
->sbq_clean_idx
= clean_idx
;
1220 skb_reserve(sbq_desc
->p
.skb
, QLGE_SB_PAD
);
1221 map
= pci_map_single(qdev
->pdev
,
1222 sbq_desc
->p
.skb
->data
,
1223 rx_ring
->sbq_buf_size
,
1224 PCI_DMA_FROMDEVICE
);
1225 if (pci_dma_mapping_error(qdev
->pdev
, map
)) {
1226 QPRINTK(qdev
, IFUP
, ERR
, "PCI mapping failed.\n");
1227 rx_ring
->sbq_clean_idx
= clean_idx
;
1228 dev_kfree_skb_any(sbq_desc
->p
.skb
);
1229 sbq_desc
->p
.skb
= NULL
;
1232 pci_unmap_addr_set(sbq_desc
, mapaddr
, map
);
1233 pci_unmap_len_set(sbq_desc
, maplen
,
1234 rx_ring
->sbq_buf_size
);
1235 *sbq_desc
->addr
= cpu_to_le64(map
);
1239 if (clean_idx
== rx_ring
->sbq_len
)
1242 rx_ring
->sbq_clean_idx
= clean_idx
;
1243 rx_ring
->sbq_prod_idx
+= 16;
1244 if (rx_ring
->sbq_prod_idx
== rx_ring
->sbq_len
)
1245 rx_ring
->sbq_prod_idx
= 0;
1246 rx_ring
->sbq_free_cnt
-= 16;
1249 if (start_idx
!= clean_idx
) {
1250 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring
->sbq_prod_idx
);
1253 ql_write_db_reg(rx_ring
->sbq_prod_idx
,
1254 rx_ring
->sbq_prod_idx_db_reg
);
1258 static void ql_update_buffer_queues(struct ql_adapter
*qdev
,
1259 struct rx_ring
*rx_ring
)
1261 ql_update_sbq(qdev
, rx_ring
);
1262 ql_update_lbq(qdev
, rx_ring
);
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1268 static void ql_unmap_send(struct ql_adapter
*qdev
,
1269 struct tx_ring_desc
*tx_ring_desc
, int mapped
)
1272 for (i
= 0; i
< mapped
; i
++) {
1273 if (i
== 0 || (i
== 7 && mapped
> 7)) {
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1284 QPRINTK(qdev
, TX_DONE
, DEBUG
,
1285 "unmapping OAL area.\n");
1287 pci_unmap_single(qdev
->pdev
,
1288 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1290 pci_unmap_len(&tx_ring_desc
->map
[i
],
1294 QPRINTK(qdev
, TX_DONE
, DEBUG
, "unmapping frag %d.\n",
1296 pci_unmap_page(qdev
->pdev
,
1297 pci_unmap_addr(&tx_ring_desc
->map
[i
],
1299 pci_unmap_len(&tx_ring_desc
->map
[i
],
1300 maplen
), PCI_DMA_TODEVICE
);
1306 /* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 static int ql_map_send(struct ql_adapter
*qdev
,
1310 struct ob_mac_iocb_req
*mac_iocb_ptr
,
1311 struct sk_buff
*skb
, struct tx_ring_desc
*tx_ring_desc
)
1313 int len
= skb_headlen(skb
);
1315 int frag_idx
, err
, map_idx
= 0;
1316 struct tx_buf_desc
*tbd
= mac_iocb_ptr
->tbd
;
1317 int frag_cnt
= skb_shinfo(skb
)->nr_frags
;
1320 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "frag_cnt = %d.\n", frag_cnt
);
1323 * Map the skb buffer first.
1325 map
= pci_map_single(qdev
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
1327 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1329 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1330 "PCI mapping failed with error: %d\n", err
);
1332 return NETDEV_TX_BUSY
;
1335 tbd
->len
= cpu_to_le32(len
);
1336 tbd
->addr
= cpu_to_le64(map
);
1337 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1338 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
, len
);
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1348 for (frag_idx
= 0; frag_idx
< frag_cnt
; frag_idx
++, map_idx
++) {
1349 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
1351 if (frag_idx
== 6 && frag_cnt
> 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map
= pci_map_single(qdev
->pdev
, &tx_ring_desc
->oal
,
1374 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1376 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1377 "PCI mapping outbound address list with error: %d\n",
1382 tbd
->addr
= cpu_to_le64(map
);
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1389 cpu_to_le32((sizeof(struct tx_buf_desc
) *
1390 (frag_cnt
- frag_idx
)) | TX_DESC_C
);
1391 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
,
1393 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1394 sizeof(struct oal
));
1395 tbd
= (struct tx_buf_desc
*)&tx_ring_desc
->oal
;
1400 pci_map_page(qdev
->pdev
, frag
->page
,
1401 frag
->page_offset
, frag
->size
,
1404 err
= pci_dma_mapping_error(qdev
->pdev
, map
);
1406 QPRINTK(qdev
, TX_QUEUED
, ERR
,
1407 "PCI mapping frags failed with error: %d.\n",
1412 tbd
->addr
= cpu_to_le64(map
);
1413 tbd
->len
= cpu_to_le32(frag
->size
);
1414 pci_unmap_addr_set(&tx_ring_desc
->map
[map_idx
], mapaddr
, map
);
1415 pci_unmap_len_set(&tx_ring_desc
->map
[map_idx
], maplen
,
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc
->map_cnt
= map_idx
;
1421 /* Terminate the last segment. */
1422 tbd
->len
= cpu_to_le32(le32_to_cpu(tbd
->len
) | TX_DESC_E
);
1423 return NETDEV_TX_OK
;
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1432 ql_unmap_send(qdev
, tx_ring_desc
, map_idx
);
1433 return NETDEV_TX_BUSY
;
1436 static void ql_realign_skb(struct sk_buff
*skb
, int len
)
1438 void *temp_addr
= skb
->data
;
1440 /* Undo the skb_reserve(skb,32) we did before
1441 * giving to hardware, and realign data on
1442 * a 2-byte boundary.
1444 skb
->data
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1445 skb
->tail
-= QLGE_SB_PAD
- NET_IP_ALIGN
;
1446 skb_copy_to_linear_data(skb
, temp_addr
,
1451 * This function builds an skb for the given inbound
1452 * completion. It will be rewritten for readability in the near
1453 * future, but for not it works well.
1455 static struct sk_buff
*ql_build_rx_skb(struct ql_adapter
*qdev
,
1456 struct rx_ring
*rx_ring
,
1457 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1459 struct bq_desc
*lbq_desc
;
1460 struct bq_desc
*sbq_desc
;
1461 struct sk_buff
*skb
= NULL
;
1462 u32 length
= le32_to_cpu(ib_mac_rsp
->data_len
);
1463 u32 hdr_len
= le32_to_cpu(ib_mac_rsp
->hdr_len
);
1466 * Handle the header buffer if present.
1468 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HV
&&
1469 ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1470 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Header of %d bytes in small buffer.\n", hdr_len
);
1472 * Headers fit nicely into a small buffer.
1474 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1475 pci_unmap_single(qdev
->pdev
,
1476 pci_unmap_addr(sbq_desc
, mapaddr
),
1477 pci_unmap_len(sbq_desc
, maplen
),
1478 PCI_DMA_FROMDEVICE
);
1479 skb
= sbq_desc
->p
.skb
;
1480 ql_realign_skb(skb
, hdr_len
);
1481 skb_put(skb
, hdr_len
);
1482 sbq_desc
->p
.skb
= NULL
;
1486 * Handle the data buffer(s).
1488 if (unlikely(!length
)) { /* Is there data too? */
1489 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1490 "No Data buffer in this packet.\n");
1494 if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DS
) {
1495 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1496 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1497 "Headers in small, data of %d bytes in small, combine them.\n", length
);
1499 * Data is less than small buffer size so it's
1500 * stuffed in a small buffer.
1501 * For this case we append the data
1502 * from the "data" small buffer to the "header" small
1505 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1506 pci_dma_sync_single_for_cpu(qdev
->pdev
,
1508 (sbq_desc
, mapaddr
),
1511 PCI_DMA_FROMDEVICE
);
1512 memcpy(skb_put(skb
, length
),
1513 sbq_desc
->p
.skb
->data
, length
);
1514 pci_dma_sync_single_for_device(qdev
->pdev
,
1521 PCI_DMA_FROMDEVICE
);
1523 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1524 "%d bytes in a single small buffer.\n", length
);
1525 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1526 skb
= sbq_desc
->p
.skb
;
1527 ql_realign_skb(skb
, length
);
1528 skb_put(skb
, length
);
1529 pci_unmap_single(qdev
->pdev
,
1530 pci_unmap_addr(sbq_desc
,
1532 pci_unmap_len(sbq_desc
,
1534 PCI_DMA_FROMDEVICE
);
1535 sbq_desc
->p
.skb
= NULL
;
1537 } else if (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_DL
) {
1538 if (ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
) {
1539 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1540 "Header in small, %d bytes in large. Chain large to small!\n", length
);
1542 * The data is in a single large buffer. We
1543 * chain it to the header buffer's skb and let
1546 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1547 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1548 "Chaining page at offset = %d,"
1549 "for %d bytes to skb.\n",
1550 lbq_desc
->p
.pg_chunk
.offset
, length
);
1551 skb_fill_page_desc(skb
, 0, lbq_desc
->p
.pg_chunk
.page
,
1552 lbq_desc
->p
.pg_chunk
.offset
,
1555 skb
->data_len
+= length
;
1556 skb
->truesize
+= length
;
1559 * The headers and data are in a single large buffer. We
1560 * copy it to a new skb and let it go. This can happen with
1561 * jumbo mtu on a non-TCP/UDP frame.
1563 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1564 skb
= netdev_alloc_skb(qdev
->ndev
, length
);
1566 QPRINTK(qdev
, PROBE
, DEBUG
,
1567 "No skb available, drop the packet.\n");
1570 pci_unmap_page(qdev
->pdev
,
1571 pci_unmap_addr(lbq_desc
,
1573 pci_unmap_len(lbq_desc
, maplen
),
1574 PCI_DMA_FROMDEVICE
);
1575 skb_reserve(skb
, NET_IP_ALIGN
);
1576 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length
);
1578 skb_fill_page_desc(skb
, 0,
1579 lbq_desc
->p
.pg_chunk
.page
,
1580 lbq_desc
->p
.pg_chunk
.offset
,
1583 skb
->data_len
+= length
;
1584 skb
->truesize
+= length
;
1586 __pskb_pull_tail(skb
,
1587 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1588 VLAN_ETH_HLEN
: ETH_HLEN
);
1592 * The data is in a chain of large buffers
1593 * pointed to by a small buffer. We loop
1594 * thru and chain them to the our small header
1596 * frags: There are 18 max frags and our small
1597 * buffer will hold 32 of them. The thing is,
1598 * we'll use 3 max for our 9000 byte jumbo
1599 * frames. If the MTU goes up we could
1600 * eventually be in trouble.
1603 sbq_desc
= ql_get_curr_sbuf(rx_ring
);
1604 pci_unmap_single(qdev
->pdev
,
1605 pci_unmap_addr(sbq_desc
, mapaddr
),
1606 pci_unmap_len(sbq_desc
, maplen
),
1607 PCI_DMA_FROMDEVICE
);
1608 if (!(ib_mac_rsp
->flags4
& IB_MAC_IOCB_RSP_HS
)) {
1610 * This is an non TCP/UDP IP frame, so
1611 * the headers aren't split into a small
1612 * buffer. We have to use the small buffer
1613 * that contains our sg list as our skb to
1614 * send upstairs. Copy the sg list here to
1615 * a local buffer and use it to find the
1618 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1619 "%d bytes of headers & data in chain of large.\n", length
);
1620 skb
= sbq_desc
->p
.skb
;
1621 sbq_desc
->p
.skb
= NULL
;
1622 skb_reserve(skb
, NET_IP_ALIGN
);
1624 while (length
> 0) {
1625 lbq_desc
= ql_get_curr_lchunk(qdev
, rx_ring
);
1626 size
= (length
< rx_ring
->lbq_buf_size
) ? length
:
1627 rx_ring
->lbq_buf_size
;
1629 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1630 "Adding page %d to skb for %d bytes.\n",
1632 skb_fill_page_desc(skb
, i
,
1633 lbq_desc
->p
.pg_chunk
.page
,
1634 lbq_desc
->p
.pg_chunk
.offset
,
1637 skb
->data_len
+= size
;
1638 skb
->truesize
+= size
;
1642 __pskb_pull_tail(skb
, (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) ?
1643 VLAN_ETH_HLEN
: ETH_HLEN
);
1648 /* Process an inbound completion from an rx ring. */
1649 static void ql_process_mac_rx_intr(struct ql_adapter
*qdev
,
1650 struct rx_ring
*rx_ring
,
1651 struct ib_mac_iocb_rsp
*ib_mac_rsp
)
1653 struct net_device
*ndev
= qdev
->ndev
;
1654 struct sk_buff
*skb
= NULL
;
1655 u16 vlan_id
= (le16_to_cpu(ib_mac_rsp
->vlan_id
) &
1656 IB_MAC_IOCB_RSP_VLAN_MASK
)
1658 QL_DUMP_IB_MAC_RSP(ib_mac_rsp
);
1660 skb
= ql_build_rx_skb(qdev
, rx_ring
, ib_mac_rsp
);
1661 if (unlikely(!skb
)) {
1662 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1663 "No skb available, drop packet.\n");
1664 rx_ring
->rx_dropped
++;
1668 /* Frame error, so drop the packet. */
1669 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_ERR_MASK
) {
1670 QPRINTK(qdev
, DRV
, ERR
, "Receive error, flags2 = 0x%x\n",
1671 ib_mac_rsp
->flags2
);
1672 dev_kfree_skb_any(skb
);
1673 rx_ring
->rx_errors
++;
1677 /* The max framesize filter on this chip is set higher than
1678 * MTU since FCoE uses 2k frames.
1680 if (skb
->len
> ndev
->mtu
+ ETH_HLEN
) {
1681 dev_kfree_skb_any(skb
);
1682 rx_ring
->rx_dropped
++;
1686 /* loopback self test for ethtool */
1687 if (test_bit(QL_SELFTEST
, &qdev
->flags
)) {
1688 ql_check_lb_frame(qdev
, skb
);
1689 dev_kfree_skb_any(skb
);
1693 prefetch(skb
->data
);
1695 if (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) {
1696 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "%s%s%s Multicast.\n",
1697 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1698 IB_MAC_IOCB_RSP_M_HASH
? "Hash" : "",
1699 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1700 IB_MAC_IOCB_RSP_M_REG
? "Registered" : "",
1701 (ib_mac_rsp
->flags1
& IB_MAC_IOCB_RSP_M_MASK
) ==
1702 IB_MAC_IOCB_RSP_M_PROM
? "Promiscuous" : "");
1703 rx_ring
->rx_multicast
++;
1705 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_P
) {
1706 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Promiscuous Packet.\n");
1709 skb
->protocol
= eth_type_trans(skb
, ndev
);
1710 skb
->ip_summed
= CHECKSUM_NONE
;
1712 /* If rx checksum is on, and there are no
1713 * csum or frame errors.
1715 if (qdev
->rx_csum
&&
1716 !(ib_mac_rsp
->flags1
& IB_MAC_CSUM_ERR_MASK
)) {
1718 if (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_T
) {
1719 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1720 "TCP checksum done!\n");
1721 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1722 } else if ((ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_U
) &&
1723 (ib_mac_rsp
->flags3
& IB_MAC_IOCB_RSP_V4
)) {
1724 /* Unfragmented ipv4 UDP frame. */
1725 struct iphdr
*iph
= (struct iphdr
*) skb
->data
;
1726 if (!(iph
->frag_off
&
1727 cpu_to_be16(IP_MF
|IP_OFFSET
))) {
1728 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1729 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1730 "TCP checksum done!\n");
1735 rx_ring
->rx_packets
++;
1736 rx_ring
->rx_bytes
+= skb
->len
;
1737 skb_record_rx_queue(skb
, rx_ring
->cq_id
);
1738 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1740 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
1742 vlan_gro_receive(&rx_ring
->napi
, qdev
->vlgrp
,
1745 napi_gro_receive(&rx_ring
->napi
, skb
);
1748 (ib_mac_rsp
->flags2
& IB_MAC_IOCB_RSP_V
) &&
1750 vlan_hwaccel_receive_skb(skb
, qdev
->vlgrp
, vlan_id
);
1752 netif_receive_skb(skb
);
1756 /* Process an outbound completion from an rx ring. */
1757 static void ql_process_mac_tx_intr(struct ql_adapter
*qdev
,
1758 struct ob_mac_iocb_rsp
*mac_rsp
)
1760 struct tx_ring
*tx_ring
;
1761 struct tx_ring_desc
*tx_ring_desc
;
1763 QL_DUMP_OB_MAC_RSP(mac_rsp
);
1764 tx_ring
= &qdev
->tx_ring
[mac_rsp
->txq_idx
];
1765 tx_ring_desc
= &tx_ring
->q
[mac_rsp
->tid
];
1766 ql_unmap_send(qdev
, tx_ring_desc
, tx_ring_desc
->map_cnt
);
1767 tx_ring
->tx_bytes
+= (tx_ring_desc
->skb
)->len
;
1768 tx_ring
->tx_packets
++;
1769 dev_kfree_skb(tx_ring_desc
->skb
);
1770 tx_ring_desc
->skb
= NULL
;
1772 if (unlikely(mac_rsp
->flags1
& (OB_MAC_IOCB_RSP_E
|
1775 OB_MAC_IOCB_RSP_P
| OB_MAC_IOCB_RSP_B
))) {
1776 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_E
) {
1777 QPRINTK(qdev
, TX_DONE
, WARNING
,
1778 "Total descriptor length did not match transfer length.\n");
1780 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_S
) {
1781 QPRINTK(qdev
, TX_DONE
, WARNING
,
1782 "Frame too short to be legal, not sent.\n");
1784 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_L
) {
1785 QPRINTK(qdev
, TX_DONE
, WARNING
,
1786 "Frame too long, but sent anyway.\n");
1788 if (mac_rsp
->flags1
& OB_MAC_IOCB_RSP_B
) {
1789 QPRINTK(qdev
, TX_DONE
, WARNING
,
1790 "PCI backplane error. Frame not sent.\n");
1793 atomic_inc(&tx_ring
->tx_count
);
1796 /* Fire up a handler to reset the MPI processor. */
1797 void ql_queue_fw_error(struct ql_adapter
*qdev
)
1800 queue_delayed_work(qdev
->workqueue
, &qdev
->mpi_reset_work
, 0);
1803 void ql_queue_asic_error(struct ql_adapter
*qdev
)
1806 ql_disable_interrupts(qdev
);
1807 /* Clear adapter up bit to signal the recovery
1808 * process that it shouldn't kill the reset worker
1811 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
1812 queue_delayed_work(qdev
->workqueue
, &qdev
->asic_reset_work
, 0);
1815 static void ql_process_chip_ae_intr(struct ql_adapter
*qdev
,
1816 struct ib_ae_iocb_rsp
*ib_ae_rsp
)
1818 switch (ib_ae_rsp
->event
) {
1819 case MGMT_ERR_EVENT
:
1820 QPRINTK(qdev
, RX_ERR
, ERR
,
1821 "Management Processor Fatal Error.\n");
1822 ql_queue_fw_error(qdev
);
1825 case CAM_LOOKUP_ERR_EVENT
:
1826 QPRINTK(qdev
, LINK
, ERR
,
1827 "Multiple CAM hits lookup occurred.\n");
1828 QPRINTK(qdev
, DRV
, ERR
, "This event shouldn't occur.\n");
1829 ql_queue_asic_error(qdev
);
1832 case SOFT_ECC_ERROR_EVENT
:
1833 QPRINTK(qdev
, RX_ERR
, ERR
, "Soft ECC error detected.\n");
1834 ql_queue_asic_error(qdev
);
1837 case PCI_ERR_ANON_BUF_RD
:
1838 QPRINTK(qdev
, RX_ERR
, ERR
,
1839 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1841 ql_queue_asic_error(qdev
);
1845 QPRINTK(qdev
, DRV
, ERR
, "Unexpected event %d.\n",
1847 ql_queue_asic_error(qdev
);
1852 static int ql_clean_outbound_rx_ring(struct rx_ring
*rx_ring
)
1854 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1855 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1856 struct ob_mac_iocb_rsp
*net_rsp
= NULL
;
1859 struct tx_ring
*tx_ring
;
1860 /* While there are entries in the completion queue. */
1861 while (prod
!= rx_ring
->cnsmr_idx
) {
1863 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1864 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1865 prod
, rx_ring
->cnsmr_idx
);
1867 net_rsp
= (struct ob_mac_iocb_rsp
*)rx_ring
->curr_entry
;
1869 switch (net_rsp
->opcode
) {
1871 case OPCODE_OB_MAC_TSO_IOCB
:
1872 case OPCODE_OB_MAC_IOCB
:
1873 ql_process_mac_tx_intr(qdev
, net_rsp
);
1876 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1877 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1881 ql_update_cq(rx_ring
);
1882 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1884 ql_write_cq_idx(rx_ring
);
1885 tx_ring
= &qdev
->tx_ring
[net_rsp
->txq_idx
];
1886 if (__netif_subqueue_stopped(qdev
->ndev
, tx_ring
->wq_id
) &&
1888 if (atomic_read(&tx_ring
->queue_stopped
) &&
1889 (atomic_read(&tx_ring
->tx_count
) > (tx_ring
->wq_len
/ 4)))
1891 * The queue got stopped because the tx_ring was full.
1892 * Wake it up, because it's now at least 25% empty.
1894 netif_wake_subqueue(qdev
->ndev
, tx_ring
->wq_id
);
1900 static int ql_clean_inbound_rx_ring(struct rx_ring
*rx_ring
, int budget
)
1902 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1903 u32 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1904 struct ql_net_rsp_iocb
*net_rsp
;
1907 /* While there are entries in the completion queue. */
1908 while (prod
!= rx_ring
->cnsmr_idx
) {
1910 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1911 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring
->cq_id
,
1912 prod
, rx_ring
->cnsmr_idx
);
1914 net_rsp
= rx_ring
->curr_entry
;
1916 switch (net_rsp
->opcode
) {
1917 case OPCODE_IB_MAC_IOCB
:
1918 ql_process_mac_rx_intr(qdev
, rx_ring
,
1919 (struct ib_mac_iocb_rsp
*)
1923 case OPCODE_IB_AE_IOCB
:
1924 ql_process_chip_ae_intr(qdev
, (struct ib_ae_iocb_rsp
*)
1929 QPRINTK(qdev
, RX_STATUS
, DEBUG
,
1930 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1935 ql_update_cq(rx_ring
);
1936 prod
= ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
);
1937 if (count
== budget
)
1940 ql_update_buffer_queues(qdev
, rx_ring
);
1941 ql_write_cq_idx(rx_ring
);
1945 static int ql_napi_poll_msix(struct napi_struct
*napi
, int budget
)
1947 struct rx_ring
*rx_ring
= container_of(napi
, struct rx_ring
, napi
);
1948 struct ql_adapter
*qdev
= rx_ring
->qdev
;
1949 struct rx_ring
*trx_ring
;
1950 int i
, work_done
= 0;
1951 struct intr_context
*ctx
= &qdev
->intr_context
[rx_ring
->cq_id
];
1953 QPRINTK(qdev
, RX_STATUS
, DEBUG
, "Enter, NAPI POLL cq_id = %d.\n",
1956 /* Service the TX rings first. They start
1957 * right after the RSS rings. */
1958 for (i
= qdev
->rss_ring_count
; i
< qdev
->rx_ring_count
; i
++) {
1959 trx_ring
= &qdev
->rx_ring
[i
];
1960 /* If this TX completion ring belongs to this vector and
1961 * it's not empty then service it.
1963 if ((ctx
->irq_mask
& (1 << trx_ring
->cq_id
)) &&
1964 (ql_read_sh_reg(trx_ring
->prod_idx_sh_reg
) !=
1965 trx_ring
->cnsmr_idx
)) {
1966 QPRINTK(qdev
, INTR
, DEBUG
,
1967 "%s: Servicing TX completion ring %d.\n",
1968 __func__
, trx_ring
->cq_id
);
1969 ql_clean_outbound_rx_ring(trx_ring
);
1974 * Now service the RSS ring if it's active.
1976 if (ql_read_sh_reg(rx_ring
->prod_idx_sh_reg
) !=
1977 rx_ring
->cnsmr_idx
) {
1978 QPRINTK(qdev
, INTR
, DEBUG
,
1979 "%s: Servicing RX completion ring %d.\n",
1980 __func__
, rx_ring
->cq_id
);
1981 work_done
= ql_clean_inbound_rx_ring(rx_ring
, budget
);
1984 if (work_done
< budget
) {
1985 napi_complete(napi
);
1986 ql_enable_completion_interrupt(qdev
, rx_ring
->irq
);
1991 static void qlge_vlan_rx_register(struct net_device
*ndev
, struct vlan_group
*grp
)
1993 struct ql_adapter
*qdev
= netdev_priv(ndev
);
1997 QPRINTK(qdev
, IFUP
, DEBUG
, "Turning on VLAN in NIC_RCV_CFG.\n");
1998 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
|
1999 NIC_RCV_CFG_VLAN_MATCH_AND_NON
);
2001 QPRINTK(qdev
, IFUP
, DEBUG
,
2002 "Turning off VLAN in NIC_RCV_CFG.\n");
2003 ql_write32(qdev
, NIC_RCV_CFG
, NIC_RCV_CFG_VLAN_MASK
);
2007 static void qlge_vlan_rx_add_vid(struct net_device
*ndev
, u16 vid
)
2009 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2010 u32 enable_bit
= MAC_ADDR_E
;
2013 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2016 if (ql_set_mac_addr_reg
2017 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2018 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init vlan address.\n");
2020 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2023 static void qlge_vlan_rx_kill_vid(struct net_device
*ndev
, u16 vid
)
2025 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2029 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
2033 if (ql_set_mac_addr_reg
2034 (qdev
, (u8
*) &enable_bit
, MAC_ADDR_TYPE_VLAN
, vid
)) {
2035 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear vlan address.\n");
2037 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
2041 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2042 static irqreturn_t
qlge_msix_rx_isr(int irq
, void *dev_id
)
2044 struct rx_ring
*rx_ring
= dev_id
;
2045 napi_schedule(&rx_ring
->napi
);
2049 /* This handles a fatal error, MPI activity, and the default
2050 * rx_ring in an MSI-X multiple vector environment.
2051 * In MSI/Legacy environment it also process the rest of
2054 static irqreturn_t
qlge_isr(int irq
, void *dev_id
)
2056 struct rx_ring
*rx_ring
= dev_id
;
2057 struct ql_adapter
*qdev
= rx_ring
->qdev
;
2058 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
2062 spin_lock(&qdev
->hw_lock
);
2063 if (atomic_read(&qdev
->intr_context
[0].irq_cnt
)) {
2064 QPRINTK(qdev
, INTR
, DEBUG
, "Shared Interrupt, Not ours!\n");
2065 spin_unlock(&qdev
->hw_lock
);
2068 spin_unlock(&qdev
->hw_lock
);
2070 var
= ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2073 * Check for fatal error.
2076 ql_queue_asic_error(qdev
);
2077 QPRINTK(qdev
, INTR
, ERR
, "Got fatal error, STS = %x.\n", var
);
2078 var
= ql_read32(qdev
, ERR_STS
);
2079 QPRINTK(qdev
, INTR
, ERR
,
2080 "Resetting chip. Error Status Register = 0x%x\n", var
);
2085 * Check MPI processor activity.
2087 if ((var
& STS_PI
) &&
2088 (ql_read32(qdev
, INTR_MASK
) & INTR_MASK_PI
)) {
2090 * We've got an async event or mailbox completion.
2091 * Handle it and clear the source of the interrupt.
2093 QPRINTK(qdev
, INTR
, ERR
, "Got MPI processor interrupt.\n");
2094 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2095 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16));
2096 queue_delayed_work_on(smp_processor_id(),
2097 qdev
->workqueue
, &qdev
->mpi_work
, 0);
2102 * Get the bit-mask that shows the active queues for this
2103 * pass. Compare it to the queues that this irq services
2104 * and call napi if there's a match.
2106 var
= ql_read32(qdev
, ISR1
);
2107 if (var
& intr_context
->irq_mask
) {
2108 QPRINTK(qdev
, INTR
, INFO
,
2109 "Waking handler for rx_ring[0].\n");
2110 ql_disable_completion_interrupt(qdev
, intr_context
->intr
);
2111 napi_schedule(&rx_ring
->napi
);
2114 ql_enable_completion_interrupt(qdev
, intr_context
->intr
);
2115 return work_done
? IRQ_HANDLED
: IRQ_NONE
;
2118 static int ql_tso(struct sk_buff
*skb
, struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2121 if (skb_is_gso(skb
)) {
2123 if (skb_header_cloned(skb
)) {
2124 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2129 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2130 mac_iocb_ptr
->flags3
|= OB_MAC_TSO_IOCB_IC
;
2131 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2132 mac_iocb_ptr
->total_hdrs_len
=
2133 cpu_to_le16(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2134 mac_iocb_ptr
->net_trans_offset
=
2135 cpu_to_le16(skb_network_offset(skb
) |
2136 skb_transport_offset(skb
)
2137 << OB_MAC_TRANSPORT_HDR_SHIFT
);
2138 mac_iocb_ptr
->mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2139 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_LSO
;
2140 if (likely(skb
->protocol
== htons(ETH_P_IP
))) {
2141 struct iphdr
*iph
= ip_hdr(skb
);
2143 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2144 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2148 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2149 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP6
;
2150 tcp_hdr(skb
)->check
=
2151 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2152 &ipv6_hdr(skb
)->daddr
,
2160 static void ql_hw_csum_setup(struct sk_buff
*skb
,
2161 struct ob_mac_tso_iocb_req
*mac_iocb_ptr
)
2164 struct iphdr
*iph
= ip_hdr(skb
);
2166 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_TSO_IOCB
;
2167 mac_iocb_ptr
->frame_len
= cpu_to_le32((u32
) skb
->len
);
2168 mac_iocb_ptr
->net_trans_offset
=
2169 cpu_to_le16(skb_network_offset(skb
) |
2170 skb_transport_offset(skb
) << OB_MAC_TRANSPORT_HDR_SHIFT
);
2172 mac_iocb_ptr
->flags1
|= OB_MAC_TSO_IOCB_IP4
;
2173 len
= (ntohs(iph
->tot_len
) - (iph
->ihl
<< 2));
2174 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
2175 check
= &(tcp_hdr(skb
)->check
);
2176 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_TC
;
2177 mac_iocb_ptr
->total_hdrs_len
=
2178 cpu_to_le16(skb_transport_offset(skb
) +
2179 (tcp_hdr(skb
)->doff
<< 2));
2181 check
= &(udp_hdr(skb
)->check
);
2182 mac_iocb_ptr
->flags2
|= OB_MAC_TSO_IOCB_UC
;
2183 mac_iocb_ptr
->total_hdrs_len
=
2184 cpu_to_le16(skb_transport_offset(skb
) +
2185 sizeof(struct udphdr
));
2187 *check
= ~csum_tcpudp_magic(iph
->saddr
,
2188 iph
->daddr
, len
, iph
->protocol
, 0);
2191 static netdev_tx_t
qlge_send(struct sk_buff
*skb
, struct net_device
*ndev
)
2193 struct tx_ring_desc
*tx_ring_desc
;
2194 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2195 struct ql_adapter
*qdev
= netdev_priv(ndev
);
2197 struct tx_ring
*tx_ring
;
2198 u32 tx_ring_idx
= (u32
) skb
->queue_mapping
;
2200 tx_ring
= &qdev
->tx_ring
[tx_ring_idx
];
2202 if (skb_padto(skb
, ETH_ZLEN
))
2203 return NETDEV_TX_OK
;
2205 if (unlikely(atomic_read(&tx_ring
->tx_count
) < 2)) {
2206 QPRINTK(qdev
, TX_QUEUED
, INFO
,
2207 "%s: shutting down tx queue %d du to lack of resources.\n",
2208 __func__
, tx_ring_idx
);
2209 netif_stop_subqueue(ndev
, tx_ring
->wq_id
);
2210 atomic_inc(&tx_ring
->queue_stopped
);
2211 tx_ring
->tx_errors
++;
2212 return NETDEV_TX_BUSY
;
2214 tx_ring_desc
= &tx_ring
->q
[tx_ring
->prod_idx
];
2215 mac_iocb_ptr
= tx_ring_desc
->queue_entry
;
2216 memset((void *)mac_iocb_ptr
, 0, sizeof(*mac_iocb_ptr
));
2218 mac_iocb_ptr
->opcode
= OPCODE_OB_MAC_IOCB
;
2219 mac_iocb_ptr
->tid
= tx_ring_desc
->index
;
2220 /* We use the upper 32-bits to store the tx queue for this IO.
2221 * When we get the completion we can use it to establish the context.
2223 mac_iocb_ptr
->txq_idx
= tx_ring_idx
;
2224 tx_ring_desc
->skb
= skb
;
2226 mac_iocb_ptr
->frame_len
= cpu_to_le16((u16
) skb
->len
);
2228 if (qdev
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2229 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "Adding a vlan tag %d.\n",
2230 vlan_tx_tag_get(skb
));
2231 mac_iocb_ptr
->flags3
|= OB_MAC_IOCB_V
;
2232 mac_iocb_ptr
->vlan_tci
= cpu_to_le16(vlan_tx_tag_get(skb
));
2234 tso
= ql_tso(skb
, (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2236 dev_kfree_skb_any(skb
);
2237 return NETDEV_TX_OK
;
2238 } else if (unlikely(!tso
) && (skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2239 ql_hw_csum_setup(skb
,
2240 (struct ob_mac_tso_iocb_req
*)mac_iocb_ptr
);
2242 if (ql_map_send(qdev
, mac_iocb_ptr
, skb
, tx_ring_desc
) !=
2244 QPRINTK(qdev
, TX_QUEUED
, ERR
,
2245 "Could not map the segments.\n");
2246 tx_ring
->tx_errors
++;
2247 return NETDEV_TX_BUSY
;
2249 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr
);
2250 tx_ring
->prod_idx
++;
2251 if (tx_ring
->prod_idx
== tx_ring
->wq_len
)
2252 tx_ring
->prod_idx
= 0;
2255 ql_write_db_reg(tx_ring
->prod_idx
, tx_ring
->prod_idx_db_reg
);
2256 QPRINTK(qdev
, TX_QUEUED
, DEBUG
, "tx queued, slot %d, len %d\n",
2257 tx_ring
->prod_idx
, skb
->len
);
2259 atomic_dec(&tx_ring
->tx_count
);
2260 return NETDEV_TX_OK
;
2264 static void ql_free_shadow_space(struct ql_adapter
*qdev
)
2266 if (qdev
->rx_ring_shadow_reg_area
) {
2267 pci_free_consistent(qdev
->pdev
,
2269 qdev
->rx_ring_shadow_reg_area
,
2270 qdev
->rx_ring_shadow_reg_dma
);
2271 qdev
->rx_ring_shadow_reg_area
= NULL
;
2273 if (qdev
->tx_ring_shadow_reg_area
) {
2274 pci_free_consistent(qdev
->pdev
,
2276 qdev
->tx_ring_shadow_reg_area
,
2277 qdev
->tx_ring_shadow_reg_dma
);
2278 qdev
->tx_ring_shadow_reg_area
= NULL
;
2282 static int ql_alloc_shadow_space(struct ql_adapter
*qdev
)
2284 qdev
->rx_ring_shadow_reg_area
=
2285 pci_alloc_consistent(qdev
->pdev
,
2286 PAGE_SIZE
, &qdev
->rx_ring_shadow_reg_dma
);
2287 if (qdev
->rx_ring_shadow_reg_area
== NULL
) {
2288 QPRINTK(qdev
, IFUP
, ERR
,
2289 "Allocation of RX shadow space failed.\n");
2292 memset(qdev
->rx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2293 qdev
->tx_ring_shadow_reg_area
=
2294 pci_alloc_consistent(qdev
->pdev
, PAGE_SIZE
,
2295 &qdev
->tx_ring_shadow_reg_dma
);
2296 if (qdev
->tx_ring_shadow_reg_area
== NULL
) {
2297 QPRINTK(qdev
, IFUP
, ERR
,
2298 "Allocation of TX shadow space failed.\n");
2299 goto err_wqp_sh_area
;
2301 memset(qdev
->tx_ring_shadow_reg_area
, 0, PAGE_SIZE
);
2305 pci_free_consistent(qdev
->pdev
,
2307 qdev
->rx_ring_shadow_reg_area
,
2308 qdev
->rx_ring_shadow_reg_dma
);
2312 static void ql_init_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2314 struct tx_ring_desc
*tx_ring_desc
;
2316 struct ob_mac_iocb_req
*mac_iocb_ptr
;
2318 mac_iocb_ptr
= tx_ring
->wq_base
;
2319 tx_ring_desc
= tx_ring
->q
;
2320 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2321 tx_ring_desc
->index
= i
;
2322 tx_ring_desc
->skb
= NULL
;
2323 tx_ring_desc
->queue_entry
= mac_iocb_ptr
;
2327 atomic_set(&tx_ring
->tx_count
, tx_ring
->wq_len
);
2328 atomic_set(&tx_ring
->queue_stopped
, 0);
2331 static void ql_free_tx_resources(struct ql_adapter
*qdev
,
2332 struct tx_ring
*tx_ring
)
2334 if (tx_ring
->wq_base
) {
2335 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2336 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2337 tx_ring
->wq_base
= NULL
;
2343 static int ql_alloc_tx_resources(struct ql_adapter
*qdev
,
2344 struct tx_ring
*tx_ring
)
2347 pci_alloc_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2348 &tx_ring
->wq_base_dma
);
2350 if ((tx_ring
->wq_base
== NULL
) ||
2351 tx_ring
->wq_base_dma
& WQ_ADDR_ALIGN
) {
2352 QPRINTK(qdev
, IFUP
, ERR
, "tx_ring alloc failed.\n");
2356 kmalloc(tx_ring
->wq_len
* sizeof(struct tx_ring_desc
), GFP_KERNEL
);
2357 if (tx_ring
->q
== NULL
)
2362 pci_free_consistent(qdev
->pdev
, tx_ring
->wq_size
,
2363 tx_ring
->wq_base
, tx_ring
->wq_base_dma
);
2367 static void ql_free_lbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2369 struct bq_desc
*lbq_desc
;
2371 uint32_t curr_idx
, clean_idx
;
2373 curr_idx
= rx_ring
->lbq_curr_idx
;
2374 clean_idx
= rx_ring
->lbq_clean_idx
;
2375 while (curr_idx
!= clean_idx
) {
2376 lbq_desc
= &rx_ring
->lbq
[curr_idx
];
2378 if (lbq_desc
->p
.pg_chunk
.last_flag
) {
2379 pci_unmap_page(qdev
->pdev
,
2380 lbq_desc
->p
.pg_chunk
.map
,
2381 ql_lbq_block_size(qdev
),
2382 PCI_DMA_FROMDEVICE
);
2383 lbq_desc
->p
.pg_chunk
.last_flag
= 0;
2386 put_page(lbq_desc
->p
.pg_chunk
.page
);
2387 lbq_desc
->p
.pg_chunk
.page
= NULL
;
2389 if (++curr_idx
== rx_ring
->lbq_len
)
2395 static void ql_free_sbq_buffers(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2398 struct bq_desc
*sbq_desc
;
2400 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2401 sbq_desc
= &rx_ring
->sbq
[i
];
2402 if (sbq_desc
== NULL
) {
2403 QPRINTK(qdev
, IFUP
, ERR
, "sbq_desc %d is NULL.\n", i
);
2406 if (sbq_desc
->p
.skb
) {
2407 pci_unmap_single(qdev
->pdev
,
2408 pci_unmap_addr(sbq_desc
, mapaddr
),
2409 pci_unmap_len(sbq_desc
, maplen
),
2410 PCI_DMA_FROMDEVICE
);
2411 dev_kfree_skb(sbq_desc
->p
.skb
);
2412 sbq_desc
->p
.skb
= NULL
;
2417 /* Free all large and small rx buffers associated
2418 * with the completion queues for this device.
2420 static void ql_free_rx_buffers(struct ql_adapter
*qdev
)
2423 struct rx_ring
*rx_ring
;
2425 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2426 rx_ring
= &qdev
->rx_ring
[i
];
2428 ql_free_lbq_buffers(qdev
, rx_ring
);
2430 ql_free_sbq_buffers(qdev
, rx_ring
);
2434 static void ql_alloc_rx_buffers(struct ql_adapter
*qdev
)
2436 struct rx_ring
*rx_ring
;
2439 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2440 rx_ring
= &qdev
->rx_ring
[i
];
2441 if (rx_ring
->type
!= TX_Q
)
2442 ql_update_buffer_queues(qdev
, rx_ring
);
2446 static void ql_init_lbq_ring(struct ql_adapter
*qdev
,
2447 struct rx_ring
*rx_ring
)
2450 struct bq_desc
*lbq_desc
;
2451 __le64
*bq
= rx_ring
->lbq_base
;
2453 memset(rx_ring
->lbq
, 0, rx_ring
->lbq_len
* sizeof(struct bq_desc
));
2454 for (i
= 0; i
< rx_ring
->lbq_len
; i
++) {
2455 lbq_desc
= &rx_ring
->lbq
[i
];
2456 memset(lbq_desc
, 0, sizeof(*lbq_desc
));
2457 lbq_desc
->index
= i
;
2458 lbq_desc
->addr
= bq
;
2463 static void ql_init_sbq_ring(struct ql_adapter
*qdev
,
2464 struct rx_ring
*rx_ring
)
2467 struct bq_desc
*sbq_desc
;
2468 __le64
*bq
= rx_ring
->sbq_base
;
2470 memset(rx_ring
->sbq
, 0, rx_ring
->sbq_len
* sizeof(struct bq_desc
));
2471 for (i
= 0; i
< rx_ring
->sbq_len
; i
++) {
2472 sbq_desc
= &rx_ring
->sbq
[i
];
2473 memset(sbq_desc
, 0, sizeof(*sbq_desc
));
2474 sbq_desc
->index
= i
;
2475 sbq_desc
->addr
= bq
;
2480 static void ql_free_rx_resources(struct ql_adapter
*qdev
,
2481 struct rx_ring
*rx_ring
)
2483 /* Free the small buffer queue. */
2484 if (rx_ring
->sbq_base
) {
2485 pci_free_consistent(qdev
->pdev
,
2487 rx_ring
->sbq_base
, rx_ring
->sbq_base_dma
);
2488 rx_ring
->sbq_base
= NULL
;
2491 /* Free the small buffer queue control blocks. */
2492 kfree(rx_ring
->sbq
);
2493 rx_ring
->sbq
= NULL
;
2495 /* Free the large buffer queue. */
2496 if (rx_ring
->lbq_base
) {
2497 pci_free_consistent(qdev
->pdev
,
2499 rx_ring
->lbq_base
, rx_ring
->lbq_base_dma
);
2500 rx_ring
->lbq_base
= NULL
;
2503 /* Free the large buffer queue control blocks. */
2504 kfree(rx_ring
->lbq
);
2505 rx_ring
->lbq
= NULL
;
2507 /* Free the rx queue. */
2508 if (rx_ring
->cq_base
) {
2509 pci_free_consistent(qdev
->pdev
,
2511 rx_ring
->cq_base
, rx_ring
->cq_base_dma
);
2512 rx_ring
->cq_base
= NULL
;
2516 /* Allocate queues and buffers for this completions queue based
2517 * on the values in the parameter structure. */
2518 static int ql_alloc_rx_resources(struct ql_adapter
*qdev
,
2519 struct rx_ring
*rx_ring
)
2523 * Allocate the completion queue for this rx_ring.
2526 pci_alloc_consistent(qdev
->pdev
, rx_ring
->cq_size
,
2527 &rx_ring
->cq_base_dma
);
2529 if (rx_ring
->cq_base
== NULL
) {
2530 QPRINTK(qdev
, IFUP
, ERR
, "rx_ring alloc failed.\n");
2534 if (rx_ring
->sbq_len
) {
2536 * Allocate small buffer queue.
2539 pci_alloc_consistent(qdev
->pdev
, rx_ring
->sbq_size
,
2540 &rx_ring
->sbq_base_dma
);
2542 if (rx_ring
->sbq_base
== NULL
) {
2543 QPRINTK(qdev
, IFUP
, ERR
,
2544 "Small buffer queue allocation failed.\n");
2549 * Allocate small buffer queue control blocks.
2552 kmalloc(rx_ring
->sbq_len
* sizeof(struct bq_desc
),
2554 if (rx_ring
->sbq
== NULL
) {
2555 QPRINTK(qdev
, IFUP
, ERR
,
2556 "Small buffer queue control block allocation failed.\n");
2560 ql_init_sbq_ring(qdev
, rx_ring
);
2563 if (rx_ring
->lbq_len
) {
2565 * Allocate large buffer queue.
2568 pci_alloc_consistent(qdev
->pdev
, rx_ring
->lbq_size
,
2569 &rx_ring
->lbq_base_dma
);
2571 if (rx_ring
->lbq_base
== NULL
) {
2572 QPRINTK(qdev
, IFUP
, ERR
,
2573 "Large buffer queue allocation failed.\n");
2577 * Allocate large buffer queue control blocks.
2580 kmalloc(rx_ring
->lbq_len
* sizeof(struct bq_desc
),
2582 if (rx_ring
->lbq
== NULL
) {
2583 QPRINTK(qdev
, IFUP
, ERR
,
2584 "Large buffer queue control block allocation failed.\n");
2588 ql_init_lbq_ring(qdev
, rx_ring
);
2594 ql_free_rx_resources(qdev
, rx_ring
);
2598 static void ql_tx_ring_clean(struct ql_adapter
*qdev
)
2600 struct tx_ring
*tx_ring
;
2601 struct tx_ring_desc
*tx_ring_desc
;
2605 * Loop through all queues and free
2608 for (j
= 0; j
< qdev
->tx_ring_count
; j
++) {
2609 tx_ring
= &qdev
->tx_ring
[j
];
2610 for (i
= 0; i
< tx_ring
->wq_len
; i
++) {
2611 tx_ring_desc
= &tx_ring
->q
[i
];
2612 if (tx_ring_desc
&& tx_ring_desc
->skb
) {
2613 QPRINTK(qdev
, IFDOWN
, ERR
,
2614 "Freeing lost SKB %p, from queue %d, index %d.\n",
2615 tx_ring_desc
->skb
, j
,
2616 tx_ring_desc
->index
);
2617 ql_unmap_send(qdev
, tx_ring_desc
,
2618 tx_ring_desc
->map_cnt
);
2619 dev_kfree_skb(tx_ring_desc
->skb
);
2620 tx_ring_desc
->skb
= NULL
;
2626 static void ql_free_mem_resources(struct ql_adapter
*qdev
)
2630 for (i
= 0; i
< qdev
->tx_ring_count
; i
++)
2631 ql_free_tx_resources(qdev
, &qdev
->tx_ring
[i
]);
2632 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2633 ql_free_rx_resources(qdev
, &qdev
->rx_ring
[i
]);
2634 ql_free_shadow_space(qdev
);
2637 static int ql_alloc_mem_resources(struct ql_adapter
*qdev
)
2641 /* Allocate space for our shadow registers and such. */
2642 if (ql_alloc_shadow_space(qdev
))
2645 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
2646 if (ql_alloc_rx_resources(qdev
, &qdev
->rx_ring
[i
]) != 0) {
2647 QPRINTK(qdev
, IFUP
, ERR
,
2648 "RX resource allocation failed.\n");
2652 /* Allocate tx queue resources */
2653 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
2654 if (ql_alloc_tx_resources(qdev
, &qdev
->tx_ring
[i
]) != 0) {
2655 QPRINTK(qdev
, IFUP
, ERR
,
2656 "TX resource allocation failed.\n");
2663 ql_free_mem_resources(qdev
);
2667 /* Set up the rx ring control block and pass it to the chip.
2668 * The control block is defined as
2669 * "Completion Queue Initialization Control Block", or cqicb.
2671 static int ql_start_rx_ring(struct ql_adapter
*qdev
, struct rx_ring
*rx_ring
)
2673 struct cqicb
*cqicb
= &rx_ring
->cqicb
;
2674 void *shadow_reg
= qdev
->rx_ring_shadow_reg_area
+
2675 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
2676 u64 shadow_reg_dma
= qdev
->rx_ring_shadow_reg_dma
+
2677 (rx_ring
->cq_id
* RX_RING_SHADOW_SPACE
);
2678 void __iomem
*doorbell_area
=
2679 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* (128 + rx_ring
->cq_id
));
2683 __le64
*base_indirect_ptr
;
2686 /* Set up the shadow registers for this ring. */
2687 rx_ring
->prod_idx_sh_reg
= shadow_reg
;
2688 rx_ring
->prod_idx_sh_reg_dma
= shadow_reg_dma
;
2689 *rx_ring
->prod_idx_sh_reg
= 0;
2690 shadow_reg
+= sizeof(u64
);
2691 shadow_reg_dma
+= sizeof(u64
);
2692 rx_ring
->lbq_base_indirect
= shadow_reg
;
2693 rx_ring
->lbq_base_indirect_dma
= shadow_reg_dma
;
2694 shadow_reg
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2695 shadow_reg_dma
+= (sizeof(u64
) * MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2696 rx_ring
->sbq_base_indirect
= shadow_reg
;
2697 rx_ring
->sbq_base_indirect_dma
= shadow_reg_dma
;
2699 /* PCI doorbell mem area + 0x00 for consumer index register */
2700 rx_ring
->cnsmr_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2701 rx_ring
->cnsmr_idx
= 0;
2702 rx_ring
->curr_entry
= rx_ring
->cq_base
;
2704 /* PCI doorbell mem area + 0x04 for valid register */
2705 rx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2707 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2708 rx_ring
->lbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x18);
2710 /* PCI doorbell mem area + 0x1c */
2711 rx_ring
->sbq_prod_idx_db_reg
= (u32 __iomem
*) (doorbell_area
+ 0x1c);
2713 memset((void *)cqicb
, 0, sizeof(struct cqicb
));
2714 cqicb
->msix_vect
= rx_ring
->irq
;
2716 bq_len
= (rx_ring
->cq_len
== 65536) ? 0 : (u16
) rx_ring
->cq_len
;
2717 cqicb
->len
= cpu_to_le16(bq_len
| LEN_V
| LEN_CPP_CONT
);
2719 cqicb
->addr
= cpu_to_le64(rx_ring
->cq_base_dma
);
2721 cqicb
->prod_idx_addr
= cpu_to_le64(rx_ring
->prod_idx_sh_reg_dma
);
2724 * Set up the control block load flags.
2726 cqicb
->flags
= FLAGS_LC
| /* Load queue base address */
2727 FLAGS_LV
| /* Load MSI-X vector */
2728 FLAGS_LI
; /* Load irq delay values */
2729 if (rx_ring
->lbq_len
) {
2730 cqicb
->flags
|= FLAGS_LL
; /* Load lbq values */
2731 tmp
= (u64
)rx_ring
->lbq_base_dma
;
2732 base_indirect_ptr
= (__le64
*) rx_ring
->lbq_base_indirect
;
2735 *base_indirect_ptr
= cpu_to_le64(tmp
);
2736 tmp
+= DB_PAGE_SIZE
;
2737 base_indirect_ptr
++;
2739 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->lbq_len
));
2741 cpu_to_le64(rx_ring
->lbq_base_indirect_dma
);
2742 bq_len
= (rx_ring
->lbq_buf_size
== 65536) ? 0 :
2743 (u16
) rx_ring
->lbq_buf_size
;
2744 cqicb
->lbq_buf_size
= cpu_to_le16(bq_len
);
2745 bq_len
= (rx_ring
->lbq_len
== 65536) ? 0 :
2746 (u16
) rx_ring
->lbq_len
;
2747 cqicb
->lbq_len
= cpu_to_le16(bq_len
);
2748 rx_ring
->lbq_prod_idx
= 0;
2749 rx_ring
->lbq_curr_idx
= 0;
2750 rx_ring
->lbq_clean_idx
= 0;
2751 rx_ring
->lbq_free_cnt
= rx_ring
->lbq_len
;
2753 if (rx_ring
->sbq_len
) {
2754 cqicb
->flags
|= FLAGS_LS
; /* Load sbq values */
2755 tmp
= (u64
)rx_ring
->sbq_base_dma
;
2756 base_indirect_ptr
= (__le64
*) rx_ring
->sbq_base_indirect
;
2759 *base_indirect_ptr
= cpu_to_le64(tmp
);
2760 tmp
+= DB_PAGE_SIZE
;
2761 base_indirect_ptr
++;
2763 } while (page_entries
< MAX_DB_PAGES_PER_BQ(rx_ring
->sbq_len
));
2765 cpu_to_le64(rx_ring
->sbq_base_indirect_dma
);
2766 cqicb
->sbq_buf_size
=
2767 cpu_to_le16((u16
)(rx_ring
->sbq_buf_size
));
2768 bq_len
= (rx_ring
->sbq_len
== 65536) ? 0 :
2769 (u16
) rx_ring
->sbq_len
;
2770 cqicb
->sbq_len
= cpu_to_le16(bq_len
);
2771 rx_ring
->sbq_prod_idx
= 0;
2772 rx_ring
->sbq_curr_idx
= 0;
2773 rx_ring
->sbq_clean_idx
= 0;
2774 rx_ring
->sbq_free_cnt
= rx_ring
->sbq_len
;
2776 switch (rx_ring
->type
) {
2778 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
2779 cqicb
->pkt_delay
= cpu_to_le16(qdev
->tx_max_coalesced_frames
);
2782 /* Inbound completion handling rx_rings run in
2783 * separate NAPI contexts.
2785 netif_napi_add(qdev
->ndev
, &rx_ring
->napi
, ql_napi_poll_msix
,
2787 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
2788 cqicb
->pkt_delay
= cpu_to_le16(qdev
->rx_max_coalesced_frames
);
2791 QPRINTK(qdev
, IFUP
, DEBUG
, "Invalid rx_ring->type = %d.\n",
2794 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing rx work queue.\n");
2795 err
= ql_write_cfg(qdev
, cqicb
, sizeof(struct cqicb
),
2796 CFG_LCQ
, rx_ring
->cq_id
);
2798 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load CQICB.\n");
2804 static int ql_start_tx_ring(struct ql_adapter
*qdev
, struct tx_ring
*tx_ring
)
2806 struct wqicb
*wqicb
= (struct wqicb
*)tx_ring
;
2807 void __iomem
*doorbell_area
=
2808 qdev
->doorbell_area
+ (DB_PAGE_SIZE
* tx_ring
->wq_id
);
2809 void *shadow_reg
= qdev
->tx_ring_shadow_reg_area
+
2810 (tx_ring
->wq_id
* sizeof(u64
));
2811 u64 shadow_reg_dma
= qdev
->tx_ring_shadow_reg_dma
+
2812 (tx_ring
->wq_id
* sizeof(u64
));
2816 * Assign doorbell registers for this tx_ring.
2818 /* TX PCI doorbell mem area for tx producer index */
2819 tx_ring
->prod_idx_db_reg
= (u32 __iomem
*) doorbell_area
;
2820 tx_ring
->prod_idx
= 0;
2821 /* TX PCI doorbell mem area + 0x04 */
2822 tx_ring
->valid_db_reg
= doorbell_area
+ 0x04;
2825 * Assign shadow registers for this tx_ring.
2827 tx_ring
->cnsmr_idx_sh_reg
= shadow_reg
;
2828 tx_ring
->cnsmr_idx_sh_reg_dma
= shadow_reg_dma
;
2830 wqicb
->len
= cpu_to_le16(tx_ring
->wq_len
| Q_LEN_V
| Q_LEN_CPP_CONT
);
2831 wqicb
->flags
= cpu_to_le16(Q_FLAGS_LC
|
2832 Q_FLAGS_LB
| Q_FLAGS_LI
| Q_FLAGS_LO
);
2833 wqicb
->cq_id_rss
= cpu_to_le16(tx_ring
->cq_id
);
2835 wqicb
->addr
= cpu_to_le64(tx_ring
->wq_base_dma
);
2837 wqicb
->cnsmr_idx_addr
= cpu_to_le64(tx_ring
->cnsmr_idx_sh_reg_dma
);
2839 ql_init_tx_ring(qdev
, tx_ring
);
2841 err
= ql_write_cfg(qdev
, wqicb
, sizeof(*wqicb
), CFG_LRQ
,
2842 (u16
) tx_ring
->wq_id
);
2844 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load tx_ring.\n");
2847 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded WQICB.\n");
2851 static void ql_disable_msix(struct ql_adapter
*qdev
)
2853 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
2854 pci_disable_msix(qdev
->pdev
);
2855 clear_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2856 kfree(qdev
->msi_x_entry
);
2857 qdev
->msi_x_entry
= NULL
;
2858 } else if (test_bit(QL_MSI_ENABLED
, &qdev
->flags
)) {
2859 pci_disable_msi(qdev
->pdev
);
2860 clear_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2864 /* We start by trying to get the number of vectors
2865 * stored in qdev->intr_count. If we don't get that
2866 * many then we reduce the count and try again.
2868 static void ql_enable_msix(struct ql_adapter
*qdev
)
2872 /* Get the MSIX vectors. */
2873 if (qlge_irq_type
== MSIX_IRQ
) {
2874 /* Try to alloc space for the msix struct,
2875 * if it fails then go to MSI/legacy.
2877 qdev
->msi_x_entry
= kcalloc(qdev
->intr_count
,
2878 sizeof(struct msix_entry
),
2880 if (!qdev
->msi_x_entry
) {
2881 qlge_irq_type
= MSI_IRQ
;
2885 for (i
= 0; i
< qdev
->intr_count
; i
++)
2886 qdev
->msi_x_entry
[i
].entry
= i
;
2888 /* Loop to get our vectors. We start with
2889 * what we want and settle for what we get.
2892 err
= pci_enable_msix(qdev
->pdev
,
2893 qdev
->msi_x_entry
, qdev
->intr_count
);
2895 qdev
->intr_count
= err
;
2899 kfree(qdev
->msi_x_entry
);
2900 qdev
->msi_x_entry
= NULL
;
2901 QPRINTK(qdev
, IFUP
, WARNING
,
2902 "MSI-X Enable failed, trying MSI.\n");
2903 qdev
->intr_count
= 1;
2904 qlge_irq_type
= MSI_IRQ
;
2905 } else if (err
== 0) {
2906 set_bit(QL_MSIX_ENABLED
, &qdev
->flags
);
2907 QPRINTK(qdev
, IFUP
, INFO
,
2908 "MSI-X Enabled, got %d vectors.\n",
2914 qdev
->intr_count
= 1;
2915 if (qlge_irq_type
== MSI_IRQ
) {
2916 if (!pci_enable_msi(qdev
->pdev
)) {
2917 set_bit(QL_MSI_ENABLED
, &qdev
->flags
);
2918 QPRINTK(qdev
, IFUP
, INFO
,
2919 "Running with MSI interrupts.\n");
2923 qlge_irq_type
= LEG_IRQ
;
2924 QPRINTK(qdev
, IFUP
, DEBUG
, "Running with legacy interrupts.\n");
2927 /* Each vector services 1 RSS ring and and 1 or more
2928 * TX completion rings. This function loops through
2929 * the TX completion rings and assigns the vector that
2930 * will service it. An example would be if there are
2931 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2932 * This would mean that vector 0 would service RSS ring 0
2933 * and TX competion rings 0,1,2 and 3. Vector 1 would
2934 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2936 static void ql_set_tx_vect(struct ql_adapter
*qdev
)
2939 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
2941 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2942 /* Assign irq vectors to TX rx_rings.*/
2943 for (vect
= 0, j
= 0, i
= qdev
->rss_ring_count
;
2944 i
< qdev
->rx_ring_count
; i
++) {
2945 if (j
== tx_rings_per_vector
) {
2949 qdev
->rx_ring
[i
].irq
= vect
;
2953 /* For single vector all rings have an irq
2956 for (i
= 0; i
< qdev
->rx_ring_count
; i
++)
2957 qdev
->rx_ring
[i
].irq
= 0;
2961 /* Set the interrupt mask for this vector. Each vector
2962 * will service 1 RSS ring and 1 or more TX completion
2963 * rings. This function sets up a bit mask per vector
2964 * that indicates which rings it services.
2966 static void ql_set_irq_mask(struct ql_adapter
*qdev
, struct intr_context
*ctx
)
2968 int j
, vect
= ctx
->intr
;
2969 u32 tx_rings_per_vector
= qdev
->tx_ring_count
/ qdev
->intr_count
;
2971 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
2972 /* Add the RSS ring serviced by this vector
2975 ctx
->irq_mask
= (1 << qdev
->rx_ring
[vect
].cq_id
);
2976 /* Add the TX ring(s) serviced by this vector
2978 for (j
= 0; j
< tx_rings_per_vector
; j
++) {
2980 (1 << qdev
->rx_ring
[qdev
->rss_ring_count
+
2981 (vect
* tx_rings_per_vector
) + j
].cq_id
);
2984 /* For single vector we just shift each queue's
2987 for (j
= 0; j
< qdev
->rx_ring_count
; j
++)
2988 ctx
->irq_mask
|= (1 << qdev
->rx_ring
[j
].cq_id
);
2993 * Here we build the intr_context structures based on
2994 * our rx_ring count and intr vector count.
2995 * The intr_context structure is used to hook each vector
2996 * to possibly different handlers.
2998 static void ql_resolve_queues_to_irqs(struct ql_adapter
*qdev
)
3001 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3003 if (likely(test_bit(QL_MSIX_ENABLED
, &qdev
->flags
))) {
3004 /* Each rx_ring has it's
3005 * own intr_context since we have separate
3006 * vectors for each queue.
3008 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3009 qdev
->rx_ring
[i
].irq
= i
;
3010 intr_context
->intr
= i
;
3011 intr_context
->qdev
= qdev
;
3012 /* Set up this vector's bit-mask that indicates
3013 * which queues it services.
3015 ql_set_irq_mask(qdev
, intr_context
);
3017 * We set up each vectors enable/disable/read bits so
3018 * there's no bit/mask calculations in the critical path.
3020 intr_context
->intr_en_mask
=
3021 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3022 INTR_EN_TYPE_ENABLE
| INTR_EN_IHD_MASK
| INTR_EN_IHD
3024 intr_context
->intr_dis_mask
=
3025 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3026 INTR_EN_TYPE_DISABLE
| INTR_EN_IHD_MASK
|
3028 intr_context
->intr_read_mask
=
3029 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3030 INTR_EN_TYPE_READ
| INTR_EN_IHD_MASK
| INTR_EN_IHD
|
3033 /* The first vector/queue handles
3034 * broadcast/multicast, fatal errors,
3035 * and firmware events. This in addition
3036 * to normal inbound NAPI processing.
3038 intr_context
->handler
= qlge_isr
;
3039 sprintf(intr_context
->name
, "%s-rx-%d",
3040 qdev
->ndev
->name
, i
);
3043 * Inbound queues handle unicast frames only.
3045 intr_context
->handler
= qlge_msix_rx_isr
;
3046 sprintf(intr_context
->name
, "%s-rx-%d",
3047 qdev
->ndev
->name
, i
);
3052 * All rx_rings use the same intr_context since
3053 * there is only one vector.
3055 intr_context
->intr
= 0;
3056 intr_context
->qdev
= qdev
;
3058 * We set up each vectors enable/disable/read bits so
3059 * there's no bit/mask calculations in the critical path.
3061 intr_context
->intr_en_mask
=
3062 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_ENABLE
;
3063 intr_context
->intr_dis_mask
=
3064 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
|
3065 INTR_EN_TYPE_DISABLE
;
3066 intr_context
->intr_read_mask
=
3067 INTR_EN_TYPE_MASK
| INTR_EN_INTR_MASK
| INTR_EN_TYPE_READ
;
3069 * Single interrupt means one handler for all rings.
3071 intr_context
->handler
= qlge_isr
;
3072 sprintf(intr_context
->name
, "%s-single_irq", qdev
->ndev
->name
);
3073 /* Set up this vector's bit-mask that indicates
3074 * which queues it services. In this case there is
3075 * a single vector so it will service all RSS and
3076 * TX completion rings.
3078 ql_set_irq_mask(qdev
, intr_context
);
3080 /* Tell the TX completion rings which MSIx vector
3081 * they will be using.
3083 ql_set_tx_vect(qdev
);
3086 static void ql_free_irq(struct ql_adapter
*qdev
)
3089 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3091 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3092 if (intr_context
->hooked
) {
3093 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3094 free_irq(qdev
->msi_x_entry
[i
].vector
,
3096 QPRINTK(qdev
, IFDOWN
, DEBUG
,
3097 "freeing msix interrupt %d.\n", i
);
3099 free_irq(qdev
->pdev
->irq
, &qdev
->rx_ring
[0]);
3100 QPRINTK(qdev
, IFDOWN
, DEBUG
,
3101 "freeing msi interrupt %d.\n", i
);
3105 ql_disable_msix(qdev
);
3108 static int ql_request_irq(struct ql_adapter
*qdev
)
3112 struct pci_dev
*pdev
= qdev
->pdev
;
3113 struct intr_context
*intr_context
= &qdev
->intr_context
[0];
3115 ql_resolve_queues_to_irqs(qdev
);
3117 for (i
= 0; i
< qdev
->intr_count
; i
++, intr_context
++) {
3118 atomic_set(&intr_context
->irq_cnt
, 0);
3119 if (test_bit(QL_MSIX_ENABLED
, &qdev
->flags
)) {
3120 status
= request_irq(qdev
->msi_x_entry
[i
].vector
,
3121 intr_context
->handler
,
3126 QPRINTK(qdev
, IFUP
, ERR
,
3127 "Failed request for MSIX interrupt %d.\n",
3131 QPRINTK(qdev
, IFUP
, DEBUG
,
3132 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3134 qdev
->rx_ring
[i
].type
==
3135 DEFAULT_Q
? "DEFAULT_Q" : "",
3136 qdev
->rx_ring
[i
].type
==
3138 qdev
->rx_ring
[i
].type
==
3139 RX_Q
? "RX_Q" : "", intr_context
->name
);
3142 QPRINTK(qdev
, IFUP
, DEBUG
,
3143 "trying msi or legacy interrupts.\n");
3144 QPRINTK(qdev
, IFUP
, DEBUG
,
3145 "%s: irq = %d.\n", __func__
, pdev
->irq
);
3146 QPRINTK(qdev
, IFUP
, DEBUG
,
3147 "%s: context->name = %s.\n", __func__
,
3148 intr_context
->name
);
3149 QPRINTK(qdev
, IFUP
, DEBUG
,
3150 "%s: dev_id = 0x%p.\n", __func__
,
3153 request_irq(pdev
->irq
, qlge_isr
,
3154 test_bit(QL_MSI_ENABLED
,
3156 flags
) ? 0 : IRQF_SHARED
,
3157 intr_context
->name
, &qdev
->rx_ring
[0]);
3161 QPRINTK(qdev
, IFUP
, ERR
,
3162 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3164 qdev
->rx_ring
[0].type
==
3165 DEFAULT_Q
? "DEFAULT_Q" : "",
3166 qdev
->rx_ring
[0].type
== TX_Q
? "TX_Q" : "",
3167 qdev
->rx_ring
[0].type
== RX_Q
? "RX_Q" : "",
3168 intr_context
->name
);
3170 intr_context
->hooked
= 1;
3174 QPRINTK(qdev
, IFUP
, ERR
, "Failed to get the interrupts!!!/n");
3179 static int ql_start_rss(struct ql_adapter
*qdev
)
3181 u8 init_hash_seed
[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3182 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3183 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3184 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3185 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3186 0xbe, 0xac, 0x01, 0xfa};
3187 struct ricb
*ricb
= &qdev
->ricb
;
3190 u8
*hash_id
= (u8
*) ricb
->hash_cq_id
;
3192 memset((void *)ricb
, 0, sizeof(*ricb
));
3194 ricb
->base_cq
= RSS_L4K
;
3196 (RSS_L6K
| RSS_LI
| RSS_LB
| RSS_LM
| RSS_RT4
| RSS_RT6
);
3197 ricb
->mask
= cpu_to_le16((u16
)(0x3ff));
3200 * Fill out the Indirection Table.
3202 for (i
= 0; i
< 1024; i
++)
3203 hash_id
[i
] = (i
& (qdev
->rss_ring_count
- 1));
3205 memcpy((void *)&ricb
->ipv6_hash_key
[0], init_hash_seed
, 40);
3206 memcpy((void *)&ricb
->ipv4_hash_key
[0], init_hash_seed
, 16);
3208 QPRINTK(qdev
, IFUP
, DEBUG
, "Initializing RSS.\n");
3210 status
= ql_write_cfg(qdev
, ricb
, sizeof(*ricb
), CFG_LR
, 0);
3212 QPRINTK(qdev
, IFUP
, ERR
, "Failed to load RICB.\n");
3215 QPRINTK(qdev
, IFUP
, DEBUG
, "Successfully loaded RICB.\n");
3219 static int ql_clear_routing_entries(struct ql_adapter
*qdev
)
3223 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3226 /* Clear all the entries in the routing table. */
3227 for (i
= 0; i
< 16; i
++) {
3228 status
= ql_set_routing_reg(qdev
, i
, 0, 0);
3230 QPRINTK(qdev
, IFUP
, ERR
,
3231 "Failed to init routing register for CAM "
3236 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3240 /* Initialize the frame-to-queue routing. */
3241 static int ql_route_initialize(struct ql_adapter
*qdev
)
3245 /* Clear all the entries in the routing table. */
3246 status
= ql_clear_routing_entries(qdev
);
3250 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3254 status
= ql_set_routing_reg(qdev
, RT_IDX_ALL_ERR_SLOT
, RT_IDX_ERR
, 1);
3256 QPRINTK(qdev
, IFUP
, ERR
,
3257 "Failed to init routing register for error packets.\n");
3260 status
= ql_set_routing_reg(qdev
, RT_IDX_BCAST_SLOT
, RT_IDX_BCAST
, 1);
3262 QPRINTK(qdev
, IFUP
, ERR
,
3263 "Failed to init routing register for broadcast packets.\n");
3266 /* If we have more than one inbound queue, then turn on RSS in the
3269 if (qdev
->rss_ring_count
> 1) {
3270 status
= ql_set_routing_reg(qdev
, RT_IDX_RSS_MATCH_SLOT
,
3271 RT_IDX_RSS_MATCH
, 1);
3273 QPRINTK(qdev
, IFUP
, ERR
,
3274 "Failed to init routing register for MATCH RSS packets.\n");
3279 status
= ql_set_routing_reg(qdev
, RT_IDX_CAM_HIT_SLOT
,
3282 QPRINTK(qdev
, IFUP
, ERR
,
3283 "Failed to init routing register for CAM packets.\n");
3285 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3289 int ql_cam_route_initialize(struct ql_adapter
*qdev
)
3293 /* If check if the link is up and use to
3294 * determine if we are setting or clearing
3295 * the MAC address in the CAM.
3297 set
= ql_read32(qdev
, STS
);
3298 set
&= qdev
->port_link_up
;
3299 status
= ql_set_mac_addr(qdev
, set
);
3301 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init mac address.\n");
3305 status
= ql_route_initialize(qdev
);
3307 QPRINTK(qdev
, IFUP
, ERR
, "Failed to init routing table.\n");
3312 static int ql_adapter_initialize(struct ql_adapter
*qdev
)
3319 * Set up the System register to halt on errors.
3321 value
= SYS_EFE
| SYS_FAE
;
3323 ql_write32(qdev
, SYS
, mask
| value
);
3325 /* Set the default queue, and VLAN behavior. */
3326 value
= NIC_RCV_CFG_DFQ
| NIC_RCV_CFG_RV
;
3327 mask
= NIC_RCV_CFG_DFQ_MASK
| (NIC_RCV_CFG_RV
<< 16);
3328 ql_write32(qdev
, NIC_RCV_CFG
, (mask
| value
));
3330 /* Set the MPI interrupt to enabled. */
3331 ql_write32(qdev
, INTR_MASK
, (INTR_MASK_PI
<< 16) | INTR_MASK_PI
);
3333 /* Enable the function, set pagesize, enable error checking. */
3334 value
= FSC_FE
| FSC_EPC_INBOUND
| FSC_EPC_OUTBOUND
|
3335 FSC_EC
| FSC_VM_PAGE_4K
| FSC_SH
;
3337 /* Set/clear header splitting. */
3338 mask
= FSC_VM_PAGESIZE_MASK
|
3339 FSC_DBL_MASK
| FSC_DBRST_MASK
| (value
<< 16);
3340 ql_write32(qdev
, FSC
, mask
| value
);
3342 ql_write32(qdev
, SPLT_HDR
, SPLT_HDR_EP
|
3343 min(SMALL_BUF_MAP_SIZE
, MAX_SPLIT_SIZE
));
3345 /* Set RX packet routing to use port/pci function on which the
3346 * packet arrived on in addition to usual frame routing.
3347 * This is helpful on bonding where both interfaces can have
3348 * the same MAC address.
3350 ql_write32(qdev
, RST_FO
, RST_FO_RR_MASK
| RST_FO_RR_RCV_FUNC_CQ
);
3351 /* Reroute all packets to our Interface.
3352 * They may have been routed to MPI firmware
3355 value
= ql_read32(qdev
, MGMT_RCV_CFG
);
3356 value
&= ~MGMT_RCV_CFG_RM
;
3359 /* Sticky reg needs clearing due to WOL. */
3360 ql_write32(qdev
, MGMT_RCV_CFG
, mask
);
3361 ql_write32(qdev
, MGMT_RCV_CFG
, mask
| value
);
3363 /* Default WOL is enable on Mezz cards */
3364 if (qdev
->pdev
->subsystem_device
== 0x0068 ||
3365 qdev
->pdev
->subsystem_device
== 0x0180)
3366 qdev
->wol
= WAKE_MAGIC
;
3368 /* Start up the rx queues. */
3369 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3370 status
= ql_start_rx_ring(qdev
, &qdev
->rx_ring
[i
]);
3372 QPRINTK(qdev
, IFUP
, ERR
,
3373 "Failed to start rx ring[%d].\n", i
);
3378 /* If there is more than one inbound completion queue
3379 * then download a RICB to configure RSS.
3381 if (qdev
->rss_ring_count
> 1) {
3382 status
= ql_start_rss(qdev
);
3384 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start RSS.\n");
3389 /* Start up the tx queues. */
3390 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3391 status
= ql_start_tx_ring(qdev
, &qdev
->tx_ring
[i
]);
3393 QPRINTK(qdev
, IFUP
, ERR
,
3394 "Failed to start tx ring[%d].\n", i
);
3399 /* Initialize the port and set the max framesize. */
3400 status
= qdev
->nic_ops
->port_initialize(qdev
);
3402 QPRINTK(qdev
, IFUP
, ERR
, "Failed to start port.\n");
3404 /* Set up the MAC address and frame routing filter. */
3405 status
= ql_cam_route_initialize(qdev
);
3407 QPRINTK(qdev
, IFUP
, ERR
,
3408 "Failed to init CAM/Routing tables.\n");
3412 /* Start NAPI for the RSS queues. */
3413 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
3414 QPRINTK(qdev
, IFUP
, DEBUG
, "Enabling NAPI for rx_ring[%d].\n",
3416 napi_enable(&qdev
->rx_ring
[i
].napi
);
3422 /* Issue soft reset to chip. */
3423 static int ql_adapter_reset(struct ql_adapter
*qdev
)
3427 unsigned long end_jiffies
;
3429 /* Clear all the entries in the routing table. */
3430 status
= ql_clear_routing_entries(qdev
);
3432 QPRINTK(qdev
, IFUP
, ERR
, "Failed to clear routing bits.\n");
3436 end_jiffies
= jiffies
+
3437 max((unsigned long)1, usecs_to_jiffies(30));
3439 /* Stop management traffic. */
3440 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_STOP
);
3442 /* Wait for the NIC and MGMNT FIFOs to empty. */
3443 ql_wait_fifo_empty(qdev
);
3445 ql_write32(qdev
, RST_FO
, (RST_FO_FR
<< 16) | RST_FO_FR
);
3448 value
= ql_read32(qdev
, RST_FO
);
3449 if ((value
& RST_FO_FR
) == 0)
3452 } while (time_before(jiffies
, end_jiffies
));
3454 if (value
& RST_FO_FR
) {
3455 QPRINTK(qdev
, IFDOWN
, ERR
,
3456 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3457 status
= -ETIMEDOUT
;
3460 /* Resume management traffic. */
3461 ql_mb_set_mgmnt_traffic_ctl(qdev
, MB_SET_MPI_TFK_RESUME
);
3465 static void ql_display_dev_info(struct net_device
*ndev
)
3467 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3469 QPRINTK(qdev
, PROBE
, INFO
,
3470 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3471 "XG Roll = %d, XG Rev = %d.\n",
3474 qdev
->chip_rev_id
& 0x0000000f,
3475 qdev
->chip_rev_id
>> 4 & 0x0000000f,
3476 qdev
->chip_rev_id
>> 8 & 0x0000000f,
3477 qdev
->chip_rev_id
>> 12 & 0x0000000f);
3478 QPRINTK(qdev
, PROBE
, INFO
, "MAC address %pM\n", ndev
->dev_addr
);
3481 int ql_wol(struct ql_adapter
*qdev
)
3484 u32 wol
= MB_WOL_DISABLE
;
3486 /* The CAM is still intact after a reset, but if we
3487 * are doing WOL, then we may need to program the
3488 * routing regs. We would also need to issue the mailbox
3489 * commands to instruct the MPI what to do per the ethtool
3493 if (qdev
->wol
& (WAKE_ARP
| WAKE_MAGICSECURE
| WAKE_PHY
| WAKE_UCAST
|
3494 WAKE_MCAST
| WAKE_BCAST
)) {
3495 QPRINTK(qdev
, IFDOWN
, ERR
,
3496 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3501 if (qdev
->wol
& WAKE_MAGIC
) {
3502 status
= ql_mb_wol_set_magic(qdev
, 1);
3504 QPRINTK(qdev
, IFDOWN
, ERR
,
3505 "Failed to set magic packet on %s.\n",
3509 QPRINTK(qdev
, DRV
, INFO
,
3510 "Enabled magic packet successfully on %s.\n",
3513 wol
|= MB_WOL_MAGIC_PKT
;
3517 wol
|= MB_WOL_MODE_ON
;
3518 status
= ql_mb_wol_mode(qdev
, wol
);
3519 QPRINTK(qdev
, DRV
, ERR
, "WOL %s (wol code 0x%x) on %s\n",
3520 (status
== 0) ? "Successfully set" : "Failed", wol
,
3527 static int ql_adapter_down(struct ql_adapter
*qdev
)
3533 /* Don't kill the reset worker thread if we
3534 * are in the process of recovery.
3536 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3537 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
3538 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
3539 cancel_delayed_work_sync(&qdev
->mpi_work
);
3540 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
3541 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
3543 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3544 napi_disable(&qdev
->rx_ring
[i
].napi
);
3546 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3548 ql_disable_interrupts(qdev
);
3550 ql_tx_ring_clean(qdev
);
3552 /* Call netif_napi_del() from common point.
3554 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
3555 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
3557 ql_free_rx_buffers(qdev
);
3559 status
= ql_adapter_reset(qdev
);
3561 QPRINTK(qdev
, IFDOWN
, ERR
, "reset(func #%d) FAILED!\n",
3566 static int ql_adapter_up(struct ql_adapter
*qdev
)
3570 err
= ql_adapter_initialize(qdev
);
3572 QPRINTK(qdev
, IFUP
, INFO
, "Unable to initialize adapter.\n");
3575 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3576 ql_alloc_rx_buffers(qdev
);
3577 /* If the port is initialized and the
3578 * link is up the turn on the carrier.
3580 if ((ql_read32(qdev
, STS
) & qdev
->port_init
) &&
3581 (ql_read32(qdev
, STS
) & qdev
->port_link_up
))
3583 ql_enable_interrupts(qdev
);
3584 ql_enable_all_completion_interrupts(qdev
);
3585 netif_tx_start_all_queues(qdev
->ndev
);
3589 ql_adapter_reset(qdev
);
3593 static void ql_release_adapter_resources(struct ql_adapter
*qdev
)
3595 ql_free_mem_resources(qdev
);
3599 static int ql_get_adapter_resources(struct ql_adapter
*qdev
)
3603 if (ql_alloc_mem_resources(qdev
)) {
3604 QPRINTK(qdev
, IFUP
, ERR
, "Unable to allocate memory.\n");
3607 status
= ql_request_irq(qdev
);
3611 static int qlge_close(struct net_device
*ndev
)
3613 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3616 * Wait for device to recover from a reset.
3617 * (Rarely happens, but possible.)
3619 while (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
3621 ql_adapter_down(qdev
);
3622 ql_release_adapter_resources(qdev
);
3626 static int ql_configure_rings(struct ql_adapter
*qdev
)
3629 struct rx_ring
*rx_ring
;
3630 struct tx_ring
*tx_ring
;
3631 int cpu_cnt
= min(MAX_CPUS
, (int)num_online_cpus());
3632 unsigned int lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
3633 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
3635 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
3637 /* In a perfect world we have one RSS ring for each CPU
3638 * and each has it's own vector. To do that we ask for
3639 * cpu_cnt vectors. ql_enable_msix() will adjust the
3640 * vector count to what we actually get. We then
3641 * allocate an RSS ring for each.
3642 * Essentially, we are doing min(cpu_count, msix_vector_count).
3644 qdev
->intr_count
= cpu_cnt
;
3645 ql_enable_msix(qdev
);
3646 /* Adjust the RSS ring count to the actual vector count. */
3647 qdev
->rss_ring_count
= qdev
->intr_count
;
3648 qdev
->tx_ring_count
= cpu_cnt
;
3649 qdev
->rx_ring_count
= qdev
->tx_ring_count
+ qdev
->rss_ring_count
;
3651 for (i
= 0; i
< qdev
->tx_ring_count
; i
++) {
3652 tx_ring
= &qdev
->tx_ring
[i
];
3653 memset((void *)tx_ring
, 0, sizeof(*tx_ring
));
3654 tx_ring
->qdev
= qdev
;
3656 tx_ring
->wq_len
= qdev
->tx_ring_size
;
3658 tx_ring
->wq_len
* sizeof(struct ob_mac_iocb_req
);
3661 * The completion queue ID for the tx rings start
3662 * immediately after the rss rings.
3664 tx_ring
->cq_id
= qdev
->rss_ring_count
+ i
;
3667 for (i
= 0; i
< qdev
->rx_ring_count
; i
++) {
3668 rx_ring
= &qdev
->rx_ring
[i
];
3669 memset((void *)rx_ring
, 0, sizeof(*rx_ring
));
3670 rx_ring
->qdev
= qdev
;
3672 rx_ring
->cpu
= i
% cpu_cnt
; /* CPU to run handler on. */
3673 if (i
< qdev
->rss_ring_count
) {
3675 * Inbound (RSS) queues.
3677 rx_ring
->cq_len
= qdev
->rx_ring_size
;
3679 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3680 rx_ring
->lbq_len
= NUM_LARGE_BUFFERS
;
3682 rx_ring
->lbq_len
* sizeof(__le64
);
3683 rx_ring
->lbq_buf_size
= (u16
)lbq_buf_len
;
3684 QPRINTK(qdev
, IFUP
, DEBUG
,
3685 "lbq_buf_size %d, order = %d\n",
3686 rx_ring
->lbq_buf_size
, qdev
->lbq_buf_order
);
3687 rx_ring
->sbq_len
= NUM_SMALL_BUFFERS
;
3689 rx_ring
->sbq_len
* sizeof(__le64
);
3690 rx_ring
->sbq_buf_size
= SMALL_BUF_MAP_SIZE
;
3691 rx_ring
->type
= RX_Q
;
3694 * Outbound queue handles outbound completions only.
3696 /* outbound cq is same size as tx_ring it services. */
3697 rx_ring
->cq_len
= qdev
->tx_ring_size
;
3699 rx_ring
->cq_len
* sizeof(struct ql_net_rsp_iocb
);
3700 rx_ring
->lbq_len
= 0;
3701 rx_ring
->lbq_size
= 0;
3702 rx_ring
->lbq_buf_size
= 0;
3703 rx_ring
->sbq_len
= 0;
3704 rx_ring
->sbq_size
= 0;
3705 rx_ring
->sbq_buf_size
= 0;
3706 rx_ring
->type
= TX_Q
;
3712 static int qlge_open(struct net_device
*ndev
)
3715 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3717 err
= ql_adapter_reset(qdev
);
3721 err
= ql_configure_rings(qdev
);
3725 err
= ql_get_adapter_resources(qdev
);
3729 err
= ql_adapter_up(qdev
);
3736 ql_release_adapter_resources(qdev
);
3740 static int ql_change_rx_buffers(struct ql_adapter
*qdev
)
3742 struct rx_ring
*rx_ring
;
3746 /* Wait for an oustanding reset to complete. */
3747 if (!test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
3749 while (i
-- && !test_bit(QL_ADAPTER_UP
, &qdev
->flags
)) {
3750 QPRINTK(qdev
, IFUP
, ERR
,
3751 "Waiting for adapter UP...\n");
3756 QPRINTK(qdev
, IFUP
, ERR
,
3757 "Timed out waiting for adapter UP\n");
3762 status
= ql_adapter_down(qdev
);
3766 /* Get the new rx buffer size. */
3767 lbq_buf_len
= (qdev
->ndev
->mtu
> 1500) ?
3768 LARGE_BUFFER_MAX_SIZE
: LARGE_BUFFER_MIN_SIZE
;
3769 qdev
->lbq_buf_order
= get_order(lbq_buf_len
);
3771 for (i
= 0; i
< qdev
->rss_ring_count
; i
++) {
3772 rx_ring
= &qdev
->rx_ring
[i
];
3773 /* Set the new size. */
3774 rx_ring
->lbq_buf_size
= lbq_buf_len
;
3777 status
= ql_adapter_up(qdev
);
3783 QPRINTK(qdev
, IFUP
, ALERT
,
3784 "Driver up/down cycle failed, closing device.\n");
3785 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
3786 dev_close(qdev
->ndev
);
3790 static int qlge_change_mtu(struct net_device
*ndev
, int new_mtu
)
3792 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3795 if (ndev
->mtu
== 1500 && new_mtu
== 9000) {
3796 QPRINTK(qdev
, IFUP
, ERR
, "Changing to jumbo MTU.\n");
3797 } else if (ndev
->mtu
== 9000 && new_mtu
== 1500) {
3798 QPRINTK(qdev
, IFUP
, ERR
, "Changing to normal MTU.\n");
3799 } else if ((ndev
->mtu
== 1500 && new_mtu
== 1500) ||
3800 (ndev
->mtu
== 9000 && new_mtu
== 9000)) {
3805 queue_delayed_work(qdev
->workqueue
,
3806 &qdev
->mpi_port_cfg_work
, 3*HZ
);
3808 if (!netif_running(qdev
->ndev
)) {
3809 ndev
->mtu
= new_mtu
;
3813 ndev
->mtu
= new_mtu
;
3814 status
= ql_change_rx_buffers(qdev
);
3816 QPRINTK(qdev
, IFUP
, ERR
,
3817 "Changing MTU failed.\n");
3823 static struct net_device_stats
*qlge_get_stats(struct net_device
3826 struct ql_adapter
*qdev
= netdev_priv(ndev
);
3827 struct rx_ring
*rx_ring
= &qdev
->rx_ring
[0];
3828 struct tx_ring
*tx_ring
= &qdev
->tx_ring
[0];
3829 unsigned long pkts
, mcast
, dropped
, errors
, bytes
;
3833 pkts
= mcast
= dropped
= errors
= bytes
= 0;
3834 for (i
= 0; i
< qdev
->rss_ring_count
; i
++, rx_ring
++) {
3835 pkts
+= rx_ring
->rx_packets
;
3836 bytes
+= rx_ring
->rx_bytes
;
3837 dropped
+= rx_ring
->rx_dropped
;
3838 errors
+= rx_ring
->rx_errors
;
3839 mcast
+= rx_ring
->rx_multicast
;
3841 ndev
->stats
.rx_packets
= pkts
;
3842 ndev
->stats
.rx_bytes
= bytes
;
3843 ndev
->stats
.rx_dropped
= dropped
;
3844 ndev
->stats
.rx_errors
= errors
;
3845 ndev
->stats
.multicast
= mcast
;
3848 pkts
= errors
= bytes
= 0;
3849 for (i
= 0; i
< qdev
->tx_ring_count
; i
++, tx_ring
++) {
3850 pkts
+= tx_ring
->tx_packets
;
3851 bytes
+= tx_ring
->tx_bytes
;
3852 errors
+= tx_ring
->tx_errors
;
3854 ndev
->stats
.tx_packets
= pkts
;
3855 ndev
->stats
.tx_bytes
= bytes
;
3856 ndev
->stats
.tx_errors
= errors
;
3857 return &ndev
->stats
;
3860 static void qlge_set_multicast_list(struct net_device
*ndev
)
3862 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3863 struct dev_mc_list
*mc_ptr
;
3866 status
= ql_sem_spinlock(qdev
, SEM_RT_IDX_MASK
);
3870 * Set or clear promiscuous mode if a
3871 * transition is taking place.
3873 if (ndev
->flags
& IFF_PROMISC
) {
3874 if (!test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3875 if (ql_set_routing_reg
3876 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 1)) {
3877 QPRINTK(qdev
, HW
, ERR
,
3878 "Failed to set promiscous mode.\n");
3880 set_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3884 if (test_bit(QL_PROMISCUOUS
, &qdev
->flags
)) {
3885 if (ql_set_routing_reg
3886 (qdev
, RT_IDX_PROMISCUOUS_SLOT
, RT_IDX_VALID
, 0)) {
3887 QPRINTK(qdev
, HW
, ERR
,
3888 "Failed to clear promiscous mode.\n");
3890 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3896 * Set or clear all multicast mode if a
3897 * transition is taking place.
3899 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3900 (ndev
->mc_count
> MAX_MULTICAST_ENTRIES
)) {
3901 if (!test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3902 if (ql_set_routing_reg
3903 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 1)) {
3904 QPRINTK(qdev
, HW
, ERR
,
3905 "Failed to set all-multi mode.\n");
3907 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3911 if (test_bit(QL_ALLMULTI
, &qdev
->flags
)) {
3912 if (ql_set_routing_reg
3913 (qdev
, RT_IDX_ALLMULTI_SLOT
, RT_IDX_MCAST
, 0)) {
3914 QPRINTK(qdev
, HW
, ERR
,
3915 "Failed to clear all-multi mode.\n");
3917 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3922 if (ndev
->mc_count
) {
3923 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3926 for (i
= 0, mc_ptr
= ndev
->mc_list
; mc_ptr
;
3927 i
++, mc_ptr
= mc_ptr
->next
)
3928 if (ql_set_mac_addr_reg(qdev
, (u8
*) mc_ptr
->dmi_addr
,
3929 MAC_ADDR_TYPE_MULTI_MAC
, i
)) {
3930 QPRINTK(qdev
, HW
, ERR
,
3931 "Failed to loadmulticast address.\n");
3932 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3935 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3936 if (ql_set_routing_reg
3937 (qdev
, RT_IDX_MCAST_MATCH_SLOT
, RT_IDX_MCAST_MATCH
, 1)) {
3938 QPRINTK(qdev
, HW
, ERR
,
3939 "Failed to set multicast match mode.\n");
3941 set_bit(QL_ALLMULTI
, &qdev
->flags
);
3945 ql_sem_unlock(qdev
, SEM_RT_IDX_MASK
);
3948 static int qlge_set_mac_address(struct net_device
*ndev
, void *p
)
3950 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3951 struct sockaddr
*addr
= p
;
3954 if (!is_valid_ether_addr(addr
->sa_data
))
3955 return -EADDRNOTAVAIL
;
3956 memcpy(ndev
->dev_addr
, addr
->sa_data
, ndev
->addr_len
);
3958 status
= ql_sem_spinlock(qdev
, SEM_MAC_ADDR_MASK
);
3961 status
= ql_set_mac_addr_reg(qdev
, (u8
*) ndev
->dev_addr
,
3962 MAC_ADDR_TYPE_CAM_MAC
, qdev
->func
* MAX_CQ
);
3964 QPRINTK(qdev
, HW
, ERR
, "Failed to load MAC address.\n");
3965 ql_sem_unlock(qdev
, SEM_MAC_ADDR_MASK
);
3969 static void qlge_tx_timeout(struct net_device
*ndev
)
3971 struct ql_adapter
*qdev
= (struct ql_adapter
*)netdev_priv(ndev
);
3972 ql_queue_asic_error(qdev
);
3975 static void ql_asic_reset_work(struct work_struct
*work
)
3977 struct ql_adapter
*qdev
=
3978 container_of(work
, struct ql_adapter
, asic_reset_work
.work
);
3981 status
= ql_adapter_down(qdev
);
3985 status
= ql_adapter_up(qdev
);
3989 /* Restore rx mode. */
3990 clear_bit(QL_ALLMULTI
, &qdev
->flags
);
3991 clear_bit(QL_PROMISCUOUS
, &qdev
->flags
);
3992 qlge_set_multicast_list(qdev
->ndev
);
3997 QPRINTK(qdev
, IFUP
, ALERT
,
3998 "Driver up/down cycle failed, closing device\n");
4000 set_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4001 dev_close(qdev
->ndev
);
4005 static struct nic_operations qla8012_nic_ops
= {
4006 .get_flash
= ql_get_8012_flash_params
,
4007 .port_initialize
= ql_8012_port_initialize
,
4010 static struct nic_operations qla8000_nic_ops
= {
4011 .get_flash
= ql_get_8000_flash_params
,
4012 .port_initialize
= ql_8000_port_initialize
,
4015 /* Find the pcie function number for the other NIC
4016 * on this chip. Since both NIC functions share a
4017 * common firmware we have the lowest enabled function
4018 * do any common work. Examples would be resetting
4019 * after a fatal firmware error, or doing a firmware
4022 static int ql_get_alt_pcie_func(struct ql_adapter
*qdev
)
4026 u32 nic_func1
, nic_func2
;
4028 status
= ql_read_mpi_reg(qdev
, MPI_TEST_FUNC_PORT_CFG
,
4033 nic_func1
= ((temp
>> MPI_TEST_NIC1_FUNC_SHIFT
) &
4034 MPI_TEST_NIC_FUNC_MASK
);
4035 nic_func2
= ((temp
>> MPI_TEST_NIC2_FUNC_SHIFT
) &
4036 MPI_TEST_NIC_FUNC_MASK
);
4038 if (qdev
->func
== nic_func1
)
4039 qdev
->alt_func
= nic_func2
;
4040 else if (qdev
->func
== nic_func2
)
4041 qdev
->alt_func
= nic_func1
;
4048 static int ql_get_board_info(struct ql_adapter
*qdev
)
4052 (ql_read32(qdev
, STS
) & STS_FUNC_ID_MASK
) >> STS_FUNC_ID_SHIFT
;
4056 status
= ql_get_alt_pcie_func(qdev
);
4060 qdev
->port
= (qdev
->func
< qdev
->alt_func
) ? 0 : 1;
4062 qdev
->xg_sem_mask
= SEM_XGMAC1_MASK
;
4063 qdev
->port_link_up
= STS_PL1
;
4064 qdev
->port_init
= STS_PI1
;
4065 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBI
;
4066 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC2_MBO
;
4068 qdev
->xg_sem_mask
= SEM_XGMAC0_MASK
;
4069 qdev
->port_link_up
= STS_PL0
;
4070 qdev
->port_init
= STS_PI0
;
4071 qdev
->mailbox_in
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBI
;
4072 qdev
->mailbox_out
= PROC_ADDR_MPI_RISC
| PROC_ADDR_FUNC0_MBO
;
4074 qdev
->chip_rev_id
= ql_read32(qdev
, REV_ID
);
4075 qdev
->device_id
= qdev
->pdev
->device
;
4076 if (qdev
->device_id
== QLGE_DEVICE_ID_8012
)
4077 qdev
->nic_ops
= &qla8012_nic_ops
;
4078 else if (qdev
->device_id
== QLGE_DEVICE_ID_8000
)
4079 qdev
->nic_ops
= &qla8000_nic_ops
;
4083 static void ql_release_all(struct pci_dev
*pdev
)
4085 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4086 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4088 if (qdev
->workqueue
) {
4089 destroy_workqueue(qdev
->workqueue
);
4090 qdev
->workqueue
= NULL
;
4094 iounmap(qdev
->reg_base
);
4095 if (qdev
->doorbell_area
)
4096 iounmap(qdev
->doorbell_area
);
4097 pci_release_regions(pdev
);
4098 pci_set_drvdata(pdev
, NULL
);
4101 static int __devinit
ql_init_device(struct pci_dev
*pdev
,
4102 struct net_device
*ndev
, int cards_found
)
4104 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4107 memset((void *)qdev
, 0, sizeof(*qdev
));
4108 err
= pci_enable_device(pdev
);
4110 dev_err(&pdev
->dev
, "PCI device enable failed.\n");
4116 pci_set_drvdata(pdev
, ndev
);
4118 /* Set PCIe read request size */
4119 err
= pcie_set_readrq(pdev
, 4096);
4121 dev_err(&pdev
->dev
, "Set readrq failed.\n");
4125 err
= pci_request_regions(pdev
, DRV_NAME
);
4127 dev_err(&pdev
->dev
, "PCI region request failed.\n");
4131 pci_set_master(pdev
);
4132 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4133 set_bit(QL_DMA64
, &qdev
->flags
);
4134 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4136 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4138 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
4142 dev_err(&pdev
->dev
, "No usable DMA configuration.\n");
4146 /* Set PCIe reset type for EEH to fundamental. */
4147 pdev
->needs_freset
= 1;
4148 pci_save_state(pdev
);
4150 ioremap_nocache(pci_resource_start(pdev
, 1),
4151 pci_resource_len(pdev
, 1));
4152 if (!qdev
->reg_base
) {
4153 dev_err(&pdev
->dev
, "Register mapping failed.\n");
4158 qdev
->doorbell_area_size
= pci_resource_len(pdev
, 3);
4159 qdev
->doorbell_area
=
4160 ioremap_nocache(pci_resource_start(pdev
, 3),
4161 pci_resource_len(pdev
, 3));
4162 if (!qdev
->doorbell_area
) {
4163 dev_err(&pdev
->dev
, "Doorbell register mapping failed.\n");
4168 err
= ql_get_board_info(qdev
);
4170 dev_err(&pdev
->dev
, "Register access failed.\n");
4174 qdev
->msg_enable
= netif_msg_init(debug
, default_msg
);
4175 spin_lock_init(&qdev
->hw_lock
);
4176 spin_lock_init(&qdev
->stats_lock
);
4178 /* make sure the EEPROM is good */
4179 err
= qdev
->nic_ops
->get_flash(qdev
);
4181 dev_err(&pdev
->dev
, "Invalid FLASH.\n");
4185 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
4187 /* Set up the default ring sizes. */
4188 qdev
->tx_ring_size
= NUM_TX_RING_ENTRIES
;
4189 qdev
->rx_ring_size
= NUM_RX_RING_ENTRIES
;
4191 /* Set up the coalescing parameters. */
4192 qdev
->rx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4193 qdev
->tx_coalesce_usecs
= DFLT_COALESCE_WAIT
;
4194 qdev
->rx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4195 qdev
->tx_max_coalesced_frames
= DFLT_INTER_FRAME_WAIT
;
4198 * Set up the operating parameters.
4201 qdev
->workqueue
= create_singlethread_workqueue(ndev
->name
);
4202 INIT_DELAYED_WORK(&qdev
->asic_reset_work
, ql_asic_reset_work
);
4203 INIT_DELAYED_WORK(&qdev
->mpi_reset_work
, ql_mpi_reset_work
);
4204 INIT_DELAYED_WORK(&qdev
->mpi_work
, ql_mpi_work
);
4205 INIT_DELAYED_WORK(&qdev
->mpi_port_cfg_work
, ql_mpi_port_cfg_work
);
4206 INIT_DELAYED_WORK(&qdev
->mpi_idc_work
, ql_mpi_idc_work
);
4207 init_completion(&qdev
->ide_completion
);
4210 dev_info(&pdev
->dev
, "%s\n", DRV_STRING
);
4211 dev_info(&pdev
->dev
, "Driver name: %s, Version: %s.\n",
4212 DRV_NAME
, DRV_VERSION
);
4216 ql_release_all(pdev
);
4218 pci_disable_device(pdev
);
4222 static const struct net_device_ops qlge_netdev_ops
= {
4223 .ndo_open
= qlge_open
,
4224 .ndo_stop
= qlge_close
,
4225 .ndo_start_xmit
= qlge_send
,
4226 .ndo_change_mtu
= qlge_change_mtu
,
4227 .ndo_get_stats
= qlge_get_stats
,
4228 .ndo_set_multicast_list
= qlge_set_multicast_list
,
4229 .ndo_set_mac_address
= qlge_set_mac_address
,
4230 .ndo_validate_addr
= eth_validate_addr
,
4231 .ndo_tx_timeout
= qlge_tx_timeout
,
4232 .ndo_vlan_rx_register
= qlge_vlan_rx_register
,
4233 .ndo_vlan_rx_add_vid
= qlge_vlan_rx_add_vid
,
4234 .ndo_vlan_rx_kill_vid
= qlge_vlan_rx_kill_vid
,
4237 static int __devinit
qlge_probe(struct pci_dev
*pdev
,
4238 const struct pci_device_id
*pci_entry
)
4240 struct net_device
*ndev
= NULL
;
4241 struct ql_adapter
*qdev
= NULL
;
4242 static int cards_found
= 0;
4245 ndev
= alloc_etherdev_mq(sizeof(struct ql_adapter
),
4246 min(MAX_CPUS
, (int)num_online_cpus()));
4250 err
= ql_init_device(pdev
, ndev
, cards_found
);
4256 qdev
= netdev_priv(ndev
);
4257 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
4264 | NETIF_F_HW_VLAN_TX
4265 | NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
);
4266 ndev
->features
|= NETIF_F_GRO
;
4268 if (test_bit(QL_DMA64
, &qdev
->flags
))
4269 ndev
->features
|= NETIF_F_HIGHDMA
;
4272 * Set up net_device structure.
4274 ndev
->tx_queue_len
= qdev
->tx_ring_size
;
4275 ndev
->irq
= pdev
->irq
;
4277 ndev
->netdev_ops
= &qlge_netdev_ops
;
4278 SET_ETHTOOL_OPS(ndev
, &qlge_ethtool_ops
);
4279 ndev
->watchdog_timeo
= 10 * HZ
;
4281 err
= register_netdev(ndev
);
4283 dev_err(&pdev
->dev
, "net device registration failed.\n");
4284 ql_release_all(pdev
);
4285 pci_disable_device(pdev
);
4289 ql_display_dev_info(ndev
);
4290 atomic_set(&qdev
->lb_count
, 0);
4295 netdev_tx_t
ql_lb_send(struct sk_buff
*skb
, struct net_device
*ndev
)
4297 return qlge_send(skb
, ndev
);
4300 int ql_clean_lb_rx_ring(struct rx_ring
*rx_ring
, int budget
)
4302 return ql_clean_inbound_rx_ring(rx_ring
, budget
);
4305 static void __devexit
qlge_remove(struct pci_dev
*pdev
)
4307 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4308 unregister_netdev(ndev
);
4309 ql_release_all(pdev
);
4310 pci_disable_device(pdev
);
4314 /* Clean up resources without touching hardware. */
4315 static void ql_eeh_close(struct net_device
*ndev
)
4318 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4320 if (netif_carrier_ok(ndev
)) {
4321 netif_carrier_off(ndev
);
4322 netif_stop_queue(ndev
);
4325 if (test_bit(QL_ADAPTER_UP
, &qdev
->flags
))
4326 cancel_delayed_work_sync(&qdev
->asic_reset_work
);
4327 cancel_delayed_work_sync(&qdev
->mpi_reset_work
);
4328 cancel_delayed_work_sync(&qdev
->mpi_work
);
4329 cancel_delayed_work_sync(&qdev
->mpi_idc_work
);
4330 cancel_delayed_work_sync(&qdev
->mpi_port_cfg_work
);
4332 for (i
= 0; i
< qdev
->rss_ring_count
; i
++)
4333 netif_napi_del(&qdev
->rx_ring
[i
].napi
);
4335 clear_bit(QL_ADAPTER_UP
, &qdev
->flags
);
4336 ql_tx_ring_clean(qdev
);
4337 ql_free_rx_buffers(qdev
);
4338 ql_release_adapter_resources(qdev
);
4342 * This callback is called by the PCI subsystem whenever
4343 * a PCI bus error is detected.
4345 static pci_ers_result_t
qlge_io_error_detected(struct pci_dev
*pdev
,
4346 enum pci_channel_state state
)
4348 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4351 case pci_channel_io_normal
:
4352 return PCI_ERS_RESULT_CAN_RECOVER
;
4353 case pci_channel_io_frozen
:
4354 netif_device_detach(ndev
);
4355 if (netif_running(ndev
))
4357 pci_disable_device(pdev
);
4358 return PCI_ERS_RESULT_NEED_RESET
;
4359 case pci_channel_io_perm_failure
:
4361 "%s: pci_channel_io_perm_failure.\n", __func__
);
4362 return PCI_ERS_RESULT_DISCONNECT
;
4365 /* Request a slot reset. */
4366 return PCI_ERS_RESULT_NEED_RESET
;
4370 * This callback is called after the PCI buss has been reset.
4371 * Basically, this tries to restart the card from scratch.
4372 * This is a shortened version of the device probe/discovery code,
4373 * it resembles the first-half of the () routine.
4375 static pci_ers_result_t
qlge_io_slot_reset(struct pci_dev
*pdev
)
4377 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4378 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4380 pdev
->error_state
= pci_channel_io_normal
;
4382 pci_restore_state(pdev
);
4383 if (pci_enable_device(pdev
)) {
4384 QPRINTK(qdev
, IFUP
, ERR
,
4385 "Cannot re-enable PCI device after reset.\n");
4386 return PCI_ERS_RESULT_DISCONNECT
;
4388 pci_set_master(pdev
);
4389 return PCI_ERS_RESULT_RECOVERED
;
4392 static void qlge_io_resume(struct pci_dev
*pdev
)
4394 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4395 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4398 if (ql_adapter_reset(qdev
))
4399 QPRINTK(qdev
, DRV
, ERR
, "reset FAILED!\n");
4400 if (netif_running(ndev
)) {
4401 err
= qlge_open(ndev
);
4403 QPRINTK(qdev
, IFUP
, ERR
,
4404 "Device initialization failed after reset.\n");
4408 QPRINTK(qdev
, IFUP
, ERR
,
4409 "Device was not running prior to EEH.\n");
4411 netif_device_attach(ndev
);
4414 static struct pci_error_handlers qlge_err_handler
= {
4415 .error_detected
= qlge_io_error_detected
,
4416 .slot_reset
= qlge_io_slot_reset
,
4417 .resume
= qlge_io_resume
,
4420 static int qlge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4422 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4423 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4426 netif_device_detach(ndev
);
4428 if (netif_running(ndev
)) {
4429 err
= ql_adapter_down(qdev
);
4435 err
= pci_save_state(pdev
);
4439 pci_disable_device(pdev
);
4441 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4447 static int qlge_resume(struct pci_dev
*pdev
)
4449 struct net_device
*ndev
= pci_get_drvdata(pdev
);
4450 struct ql_adapter
*qdev
= netdev_priv(ndev
);
4453 pci_set_power_state(pdev
, PCI_D0
);
4454 pci_restore_state(pdev
);
4455 err
= pci_enable_device(pdev
);
4457 QPRINTK(qdev
, IFUP
, ERR
, "Cannot enable PCI device from suspend\n");
4460 pci_set_master(pdev
);
4462 pci_enable_wake(pdev
, PCI_D3hot
, 0);
4463 pci_enable_wake(pdev
, PCI_D3cold
, 0);
4465 if (netif_running(ndev
)) {
4466 err
= ql_adapter_up(qdev
);
4471 netif_device_attach(ndev
);
4475 #endif /* CONFIG_PM */
4477 static void qlge_shutdown(struct pci_dev
*pdev
)
4479 qlge_suspend(pdev
, PMSG_SUSPEND
);
4482 static struct pci_driver qlge_driver
= {
4484 .id_table
= qlge_pci_tbl
,
4485 .probe
= qlge_probe
,
4486 .remove
= __devexit_p(qlge_remove
),
4488 .suspend
= qlge_suspend
,
4489 .resume
= qlge_resume
,
4491 .shutdown
= qlge_shutdown
,
4492 .err_handler
= &qlge_err_handler
4495 static int __init
qlge_init_module(void)
4497 return pci_register_driver(&qlge_driver
);
4500 static void __exit
qlge_exit(void)
4502 pci_unregister_driver(&qlge_driver
);
4505 module_init(qlge_init_module
);
4506 module_exit(qlge_exit
);