1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
25 #include <net/ip6_checksum.h>
26 #include <linux/firmware.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
35 * bnx2x_bz_fp - zero content of the fastpath structure.
38 * @index: fastpath index to be zeroed
40 * Makes sure the contents of the bp->fp[index].napi is kept
43 static inline void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
45 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
46 struct napi_struct orig_napi
= fp
->napi
;
47 /* bzero bnx2x_fastpath contents */
48 memset(fp
, 0, sizeof(*fp
));
50 /* Restore the NAPI object as it has been already initialized */
56 fp
->max_cos
= bp
->max_cos
;
58 /* Special queues support only one CoS */
62 * set the tpa flag for each queue. The tpa flag determines the queue
63 * minimal size so it must be set prior to queue memory allocation
65 fp
->disable_tpa
= ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
68 /* We don't want TPA on an FCoE L2 ring */
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target
86 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
88 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
89 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
91 /* Copy the NAPI object as it has been already initialized */
92 from_fp
->napi
= to_fp
->napi
;
94 /* Move bnx2x_fastpath contents */
95 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
99 int load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
101 /* free skb in the packet ring at pos idx
102 * return idx of last bd freed
104 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
107 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
108 struct eth_tx_start_bd
*tx_start_bd
;
109 struct eth_tx_bd
*tx_data_bd
;
110 struct sk_buff
*skb
= tx_buf
->skb
;
111 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
114 /* prefetch skb end pointer to speedup dev_kfree_skb() */
117 DP(BNX2X_MSG_FP
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
118 txdata
->txq_index
, idx
, tx_buf
, skb
);
121 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
122 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
123 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
124 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
127 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
128 #ifdef BNX2X_STOP_ON_ERROR
129 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
130 BNX2X_ERR("BAD nbd!\n");
134 new_cons
= nbd
+ tx_buf
->first_bd
;
136 /* Get the next bd */
137 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
139 /* Skip a parse bd... */
141 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
143 /* ...and the TSO split header bd since they have no mapping */
144 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
146 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
152 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
153 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
154 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
155 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
157 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
162 dev_kfree_skb_any(skb
);
163 tx_buf
->first_bd
= 0;
169 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
171 struct netdev_queue
*txq
;
172 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
174 #ifdef BNX2X_STOP_ON_ERROR
175 if (unlikely(bp
->panic
))
179 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
180 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
181 sw_cons
= txdata
->tx_pkt_cons
;
183 while (sw_cons
!= hw_cons
) {
186 pkt_cons
= TX_BD(sw_cons
);
188 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
190 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
192 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
);
196 txdata
->tx_pkt_cons
= sw_cons
;
197 txdata
->tx_bd_cons
= bd_cons
;
199 /* Need to make the tx_bd_cons update visible to start_xmit()
200 * before checking for netif_tx_queue_stopped(). Without the
201 * memory barrier, there is a small possibility that
202 * start_xmit() will miss it and cause the queue to be stopped
204 * On the other hand we need an rmb() here to ensure the proper
205 * ordering of bit testing in the following
206 * netif_tx_queue_stopped(txq) call.
210 if (unlikely(netif_tx_queue_stopped(txq
))) {
211 /* Taking tx_lock() is needed to prevent reenabling the queue
212 * while it's empty. This could have happen if rx_action() gets
213 * suspended in bnx2x_tx_int() after the condition before
214 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
216 * stops the queue->sees fresh tx_bd_cons->releases the queue->
217 * sends some packets consuming the whole queue again->
221 __netif_tx_lock(txq
, smp_processor_id());
223 if ((netif_tx_queue_stopped(txq
)) &&
224 (bp
->state
== BNX2X_STATE_OPEN
) &&
225 (bnx2x_tx_avail(bp
, txdata
) >= MAX_SKB_FRAGS
+ 3))
226 netif_tx_wake_queue(txq
);
228 __netif_tx_unlock(txq
);
233 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
236 u16 last_max
= fp
->last_max_sge
;
238 if (SUB_S16(idx
, last_max
) > 0)
239 fp
->last_max_sge
= idx
;
242 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
243 struct eth_fast_path_rx_cqe
*fp_cqe
)
245 struct bnx2x
*bp
= fp
->bp
;
246 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
247 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
249 u16 last_max
, last_elem
, first_elem
;
256 /* First mark all used pages */
257 for (i
= 0; i
< sge_len
; i
++)
258 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
259 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
261 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
262 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
264 /* Here we assume that the last SGE index is the biggest */
265 prefetch((void *)(fp
->sge_mask
));
266 bnx2x_update_last_max_sge(fp
,
267 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
269 last_max
= RX_SGE(fp
->last_max_sge
);
270 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
271 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
273 /* If ring is not full */
274 if (last_elem
+ 1 != first_elem
)
277 /* Now update the prod */
278 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
279 if (likely(fp
->sge_mask
[i
]))
282 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
283 delta
+= BIT_VEC64_ELEM_SZ
;
287 fp
->rx_sge_prod
+= delta
;
288 /* clear page-end entries */
289 bnx2x_clear_sge_mask_next_elems(fp
);
292 DP(NETIF_MSG_RX_STATUS
,
293 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
294 fp
->last_max_sge
, fp
->rx_sge_prod
);
297 /* Set Toeplitz hash value in the skb using the value from the
298 * CQE (calculated by HW).
300 static u32
bnx2x_get_rxhash(const struct bnx2x
*bp
,
301 const struct eth_fast_path_rx_cqe
*cqe
)
303 /* Set Toeplitz hash from CQE */
304 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
305 (cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
306 return le32_to_cpu(cqe
->rss_hash_result
);
310 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
312 struct eth_fast_path_rx_cqe
*cqe
)
314 struct bnx2x
*bp
= fp
->bp
;
315 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
316 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
317 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
319 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
320 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
322 /* print error if current state != stop */
323 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
324 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
326 /* Try to map an empty data buffer from the aggregation info */
327 mapping
= dma_map_single(&bp
->pdev
->dev
,
328 first_buf
->data
+ NET_SKB_PAD
,
329 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
331 * ...if it fails - move the skb from the consumer to the producer
332 * and set the current aggregation state as ERROR to drop it
333 * when TPA_STOP arrives.
336 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
337 /* Move the BD from the consumer to the producer */
338 bnx2x_reuse_rx_data(fp
, cons
, prod
);
339 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
343 /* move empty data from pool to prod */
344 prod_rx_buf
->data
= first_buf
->data
;
345 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
346 /* point prod_bd to new data */
347 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
348 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
350 /* move partial skb from cons to pool (don't unmap yet) */
351 *first_buf
= *cons_rx_buf
;
353 /* mark bin state as START */
354 tpa_info
->parsing_flags
=
355 le16_to_cpu(cqe
->pars_flags
.flags
);
356 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
357 tpa_info
->tpa_state
= BNX2X_TPA_START
;
358 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
359 tpa_info
->placement_offset
= cqe
->placement_offset
;
360 tpa_info
->rxhash
= bnx2x_get_rxhash(bp
, cqe
);
362 #ifdef BNX2X_STOP_ON_ERROR
363 fp
->tpa_queue_used
|= (1 << queue
);
364 #ifdef _ASM_GENERIC_INT_L64_H
365 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
367 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
373 /* Timestamp option length allowed for TPA aggregation:
375 * nop nop kind length echo val
377 #define TPA_TSTAMP_OPT_LEN 12
379 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
382 * @parsing_flags: parsing flags from the START CQE
383 * @len_on_bd: total length of the first packet for the
386 * Approximate value of the MSS for this aggregation calculated using
387 * the first packet of it.
389 static inline u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
393 * TPA arrgregation won't have either IP options or TCP options
394 * other than timestamp or IPv6 extension headers.
396 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
398 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
399 PRS_FLAG_OVERETH_IPV6
)
400 hdrs_len
+= sizeof(struct ipv6hdr
);
402 hdrs_len
+= sizeof(struct iphdr
);
405 /* Check if there was a TCP timestamp, if there is it's will
406 * always be 12 bytes length: nop nop kind length echo val.
408 * Otherwise FW would close the aggregation.
410 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
411 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
413 return len_on_bd
- hdrs_len
;
416 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
417 u16 queue
, struct sk_buff
*skb
,
418 struct eth_end_agg_rx_cqe
*cqe
,
421 struct sw_rx_page
*rx_pg
, old_rx_pg
;
422 u32 i
, frag_len
, frag_size
, pages
;
425 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
426 u16 len_on_bd
= tpa_info
->len_on_bd
;
428 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
429 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
431 /* This is needed in order to enable forwarding support */
433 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
,
434 tpa_info
->parsing_flags
, len_on_bd
);
436 #ifdef BNX2X_STOP_ON_ERROR
437 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
438 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
440 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
446 /* Run through the SGL and compose the fragmented skb */
447 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
448 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
450 /* FW gives the indices of the SGE as if the ring is an array
451 (meaning that "next" element will consume 2 indices) */
452 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
453 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
456 /* If we fail to allocate a substitute page, we simply stop
457 where we are and drop the whole packet */
458 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
460 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
464 /* Unmap the page as we r going to pass it to the stack */
465 dma_unmap_page(&bp
->pdev
->dev
,
466 dma_unmap_addr(&old_rx_pg
, mapping
),
467 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
469 /* Add one frag and update the appropriate fields in the skb */
470 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
472 skb
->data_len
+= frag_len
;
473 skb
->truesize
+= SGE_PAGE_SIZE
* PAGES_PER_SGE
;
474 skb
->len
+= frag_len
;
476 frag_size
-= frag_len
;
482 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
483 u16 queue
, struct eth_end_agg_rx_cqe
*cqe
,
486 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
487 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
488 u32 pad
= tpa_info
->placement_offset
;
489 u16 len
= tpa_info
->len_on_bd
;
490 struct sk_buff
*skb
= NULL
;
491 u8
*data
= rx_buf
->data
;
494 u8 old_tpa_state
= tpa_info
->tpa_state
;
496 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
498 /* If we there was an error during the handling of the TPA_START -
499 * drop this aggregation.
501 if (old_tpa_state
== BNX2X_TPA_ERROR
)
504 /* Try to allocate the new data */
505 new_data
= kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
, GFP_ATOMIC
);
507 /* Unmap skb in the pool anyway, as we are going to change
508 pool entry status to BNX2X_TPA_STOP even if new skb allocation
510 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
511 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
512 if (likely(new_data
))
513 skb
= build_skb(data
);
517 #ifdef BNX2X_STOP_ON_ERROR
518 if (pad
+ len
> fp
->rx_buf_size
) {
519 BNX2X_ERR("skb_put is about to fail... "
520 "pad %d len %d rx_buf_size %d\n",
521 pad
, len
, fp
->rx_buf_size
);
527 skb_reserve(skb
, pad
+ NET_SKB_PAD
);
529 skb
->rxhash
= tpa_info
->rxhash
;
531 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
532 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
534 if (!bnx2x_fill_frag_skb(bp
, fp
, queue
, skb
, cqe
, cqe_idx
)) {
535 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
536 __vlan_hwaccel_put_tag(skb
, tpa_info
->vlan_tag
);
537 napi_gro_receive(&fp
->napi
, skb
);
539 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
540 " - dropping packet!\n");
541 dev_kfree_skb_any(skb
);
545 /* put new data in bin */
546 rx_buf
->data
= new_data
;
552 /* drop the packet and keep the buffer in the bin */
553 DP(NETIF_MSG_RX_STATUS
,
554 "Failed to allocate or map a new skb - dropping packet!\n");
555 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
559 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
561 struct bnx2x
*bp
= fp
->bp
;
562 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
563 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
566 #ifdef BNX2X_STOP_ON_ERROR
567 if (unlikely(bp
->panic
))
571 /* CQ "next element" is of the size of the regular element,
572 that's why it's ok here */
573 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
574 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
577 bd_cons
= fp
->rx_bd_cons
;
578 bd_prod
= fp
->rx_bd_prod
;
579 bd_prod_fw
= bd_prod
;
580 sw_comp_cons
= fp
->rx_comp_cons
;
581 sw_comp_prod
= fp
->rx_comp_prod
;
583 /* Memory barrier necessary as speculative reads of the rx
584 * buffer can be ahead of the index in the status block
588 DP(NETIF_MSG_RX_STATUS
,
589 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
590 fp
->index
, hw_comp_cons
, sw_comp_cons
);
592 while (sw_comp_cons
!= hw_comp_cons
) {
593 struct sw_rx_bd
*rx_buf
= NULL
;
595 union eth_rx_cqe
*cqe
;
596 struct eth_fast_path_rx_cqe
*cqe_fp
;
598 enum eth_rx_cqe_type cqe_fp_type
;
602 #ifdef BNX2X_STOP_ON_ERROR
603 if (unlikely(bp
->panic
))
607 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
608 bd_prod
= RX_BD(bd_prod
);
609 bd_cons
= RX_BD(bd_cons
);
611 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
612 cqe_fp
= &cqe
->fast_path_cqe
;
613 cqe_fp_flags
= cqe_fp
->type_error_flags
;
614 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
616 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
617 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
618 cqe_fp_flags
, cqe_fp
->status_flags
,
619 le32_to_cpu(cqe_fp
->rss_hash_result
),
620 le16_to_cpu(cqe_fp
->vlan_tag
), le16_to_cpu(cqe_fp
->pkt_len
));
622 /* is this a slowpath msg? */
623 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
624 bnx2x_sp_event(fp
, cqe
);
627 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
630 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
631 #ifdef BNX2X_STOP_ON_ERROR
633 if (fp
->disable_tpa
&&
634 (CQE_TYPE_START(cqe_fp_type
) ||
635 CQE_TYPE_STOP(cqe_fp_type
)))
636 BNX2X_ERR("START/STOP packet while "
637 "disable_tpa type %x\n",
638 CQE_TYPE(cqe_fp_type
));
641 if (CQE_TYPE_START(cqe_fp_type
)) {
642 u16 queue
= cqe_fp
->queue_index
;
643 DP(NETIF_MSG_RX_STATUS
,
644 "calling tpa_start on queue %d\n",
647 bnx2x_tpa_start(fp
, queue
,
653 cqe
->end_agg_cqe
.queue_index
;
654 DP(NETIF_MSG_RX_STATUS
,
655 "calling tpa_stop on queue %d\n",
658 bnx2x_tpa_stop(bp
, fp
, queue
,
661 #ifdef BNX2X_STOP_ON_ERROR
666 bnx2x_update_sge_prod(fp
, cqe_fp
);
671 len
= le16_to_cpu(cqe_fp
->pkt_len
);
672 pad
= cqe_fp
->placement_offset
;
673 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
674 dma_unmap_addr(rx_buf
, mapping
),
675 pad
+ RX_COPY_THRESH
,
678 prefetch(data
+ pad
); /* speedup eth_type_trans() */
679 /* is this an error packet? */
680 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
682 "ERROR flags %x rx packet %u\n",
683 cqe_fp_flags
, sw_comp_cons
);
684 fp
->eth_q_stats
.rx_err_discard_pkt
++;
688 /* Since we don't have a jumbo ring
689 * copy small packets if mtu > 1500
691 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
692 (len
<= RX_COPY_THRESH
)) {
693 skb
= netdev_alloc_skb_ip_align(bp
->dev
, len
);
696 "ERROR packet dropped because of alloc failure\n");
697 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
700 memcpy(skb
->data
, data
+ pad
, len
);
701 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
703 if (likely(bnx2x_alloc_rx_data(bp
, fp
, bd_prod
) == 0)) {
704 dma_unmap_single(&bp
->pdev
->dev
,
705 dma_unmap_addr(rx_buf
, mapping
),
708 skb
= build_skb(data
);
709 if (unlikely(!skb
)) {
711 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
714 skb_reserve(skb
, pad
);
717 "ERROR packet dropped because "
718 "of alloc failure\n");
719 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
721 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
726 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
728 /* Set Toeplitz hash for a none-LRO skb */
729 skb
->rxhash
= bnx2x_get_rxhash(bp
, cqe_fp
);
731 skb_checksum_none_assert(skb
);
733 if (bp
->dev
->features
& NETIF_F_RXCSUM
) {
735 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
736 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
738 fp
->eth_q_stats
.hw_csum_err
++;
742 skb_record_rx_queue(skb
, fp
->rx_queue
);
744 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
746 __vlan_hwaccel_put_tag(skb
,
747 le16_to_cpu(cqe_fp
->vlan_tag
));
748 napi_gro_receive(&fp
->napi
, skb
);
754 bd_cons
= NEXT_RX_IDX(bd_cons
);
755 bd_prod
= NEXT_RX_IDX(bd_prod
);
756 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
759 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
760 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
762 if (rx_pkt
== budget
)
766 fp
->rx_bd_cons
= bd_cons
;
767 fp
->rx_bd_prod
= bd_prod_fw
;
768 fp
->rx_comp_cons
= sw_comp_cons
;
769 fp
->rx_comp_prod
= sw_comp_prod
;
771 /* Update producers */
772 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
775 fp
->rx_pkt
+= rx_pkt
;
781 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
783 struct bnx2x_fastpath
*fp
= fp_cookie
;
784 struct bnx2x
*bp
= fp
->bp
;
787 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
788 "[fp %d fw_sd %d igusb %d]\n",
789 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
790 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
792 #ifdef BNX2X_STOP_ON_ERROR
793 if (unlikely(bp
->panic
))
797 /* Handle Rx and Tx according to MSI-X vector */
798 prefetch(fp
->rx_cons_sb
);
800 for_each_cos_in_tx_queue(fp
, cos
)
801 prefetch(fp
->txdata
[cos
].tx_cons_sb
);
803 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
804 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
809 /* HW Lock for shared dual port PHYs */
810 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
812 mutex_lock(&bp
->port
.phy_mutex
);
814 if (bp
->port
.need_hw_lock
)
815 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
818 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
820 if (bp
->port
.need_hw_lock
)
821 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
823 mutex_unlock(&bp
->port
.phy_mutex
);
826 /* calculates MF speed according to current linespeed and MF configuration */
827 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
829 u16 line_speed
= bp
->link_vars
.line_speed
;
831 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
832 bp
->mf_config
[BP_VN(bp
)]);
834 /* Calculate the current MAX line speed limit for the MF
838 line_speed
= (line_speed
* maxCfg
) / 100;
840 u16 vn_max_rate
= maxCfg
* 100;
842 if (vn_max_rate
< line_speed
)
843 line_speed
= vn_max_rate
;
851 * bnx2x_fill_report_data - fill link report data to report
854 * @data: link state to update
856 * It uses a none-atomic bit operations because is called under the mutex.
858 static inline void bnx2x_fill_report_data(struct bnx2x
*bp
,
859 struct bnx2x_link_report_data
*data
)
861 u16 line_speed
= bnx2x_get_mf_speed(bp
);
863 memset(data
, 0, sizeof(*data
));
865 /* Fill the report data: efective line speed */
866 data
->line_speed
= line_speed
;
869 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
870 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
871 &data
->link_report_flags
);
874 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
875 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
877 /* Rx Flow Control is ON */
878 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
879 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
881 /* Tx Flow Control is ON */
882 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
883 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
887 * bnx2x_link_report - report link status to OS.
891 * Calls the __bnx2x_link_report() under the same locking scheme
892 * as a link/PHY state managing code to ensure a consistent link
896 void bnx2x_link_report(struct bnx2x
*bp
)
898 bnx2x_acquire_phy_lock(bp
);
899 __bnx2x_link_report(bp
);
900 bnx2x_release_phy_lock(bp
);
904 * __bnx2x_link_report - report link status to OS.
908 * None atomic inmlementation.
909 * Should be called under the phy_lock.
911 void __bnx2x_link_report(struct bnx2x
*bp
)
913 struct bnx2x_link_report_data cur_data
;
917 bnx2x_read_mf_cfg(bp
);
919 /* Read the current link report info */
920 bnx2x_fill_report_data(bp
, &cur_data
);
922 /* Don't report link down or exactly the same link status twice */
923 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
924 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
925 &bp
->last_reported_link
.link_report_flags
) &&
926 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
927 &cur_data
.link_report_flags
)))
932 /* We are going to report a new link parameters now -
933 * remember the current data for the next time.
935 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
937 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
938 &cur_data
.link_report_flags
)) {
939 netif_carrier_off(bp
->dev
);
940 netdev_err(bp
->dev
, "NIC Link is Down\n");
946 netif_carrier_on(bp
->dev
);
948 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
949 &cur_data
.link_report_flags
))
954 /* Handle the FC at the end so that only these flags would be
955 * possibly set. This way we may easily check if there is no FC
958 if (cur_data
.link_report_flags
) {
959 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
960 &cur_data
.link_report_flags
)) {
961 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
962 &cur_data
.link_report_flags
))
963 flow
= "ON - receive & transmit";
965 flow
= "ON - receive";
967 flow
= "ON - transmit";
972 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
973 cur_data
.line_speed
, duplex
, flow
);
977 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
979 int func
= BP_FUNC(bp
);
983 /* Allocate TPA resources */
984 for_each_rx_queue(bp
, j
) {
985 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
988 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
990 if (!fp
->disable_tpa
) {
991 /* Fill the per-aggregtion pool */
992 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
993 struct bnx2x_agg_info
*tpa_info
=
995 struct sw_rx_bd
*first_buf
=
996 &tpa_info
->first_buf
;
998 first_buf
->data
= kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
,
1000 if (!first_buf
->data
) {
1001 BNX2X_ERR("Failed to allocate TPA "
1002 "skb pool for queue[%d] - "
1003 "disabling TPA on this "
1005 bnx2x_free_tpa_pool(bp
, fp
, i
);
1006 fp
->disable_tpa
= 1;
1009 dma_unmap_addr_set(first_buf
, mapping
, 0);
1010 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1013 /* "next page" elements initialization */
1014 bnx2x_set_next_page_sgl(fp
);
1016 /* set SGEs bit mask */
1017 bnx2x_init_sge_ring_bit_mask(fp
);
1019 /* Allocate SGEs and initialize the ring elements */
1020 for (i
= 0, ring_prod
= 0;
1021 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1023 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
1024 BNX2X_ERR("was only able to allocate "
1026 BNX2X_ERR("disabling TPA for "
1028 /* Cleanup already allocated elements */
1029 bnx2x_free_rx_sge_range(bp
, fp
,
1031 bnx2x_free_tpa_pool(bp
, fp
,
1033 fp
->disable_tpa
= 1;
1037 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1040 fp
->rx_sge_prod
= ring_prod
;
1044 for_each_rx_queue(bp
, j
) {
1045 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1049 /* Activate BD ring */
1051 * this will generate an interrupt (to the TSTORM)
1052 * must only be done after chip is initialized
1054 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1060 if (CHIP_IS_E1(bp
)) {
1061 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1062 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1063 U64_LO(fp
->rx_comp_mapping
));
1064 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1065 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1066 U64_HI(fp
->rx_comp_mapping
));
1071 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1076 for_each_tx_queue(bp
, i
) {
1077 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1078 for_each_cos_in_tx_queue(fp
, cos
) {
1079 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
1081 u16 sw_prod
= txdata
->tx_pkt_prod
;
1082 u16 sw_cons
= txdata
->tx_pkt_cons
;
1084 while (sw_cons
!= sw_prod
) {
1085 bnx2x_free_tx_pkt(bp
, txdata
, TX_BD(sw_cons
));
1092 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1094 struct bnx2x
*bp
= fp
->bp
;
1097 /* ring wasn't allocated */
1098 if (fp
->rx_buf_ring
== NULL
)
1101 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1102 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1103 u8
*data
= rx_buf
->data
;
1107 dma_unmap_single(&bp
->pdev
->dev
,
1108 dma_unmap_addr(rx_buf
, mapping
),
1109 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1111 rx_buf
->data
= NULL
;
1116 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1120 for_each_rx_queue(bp
, j
) {
1121 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1123 bnx2x_free_rx_bds(fp
);
1125 if (!fp
->disable_tpa
)
1126 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1130 void bnx2x_free_skbs(struct bnx2x
*bp
)
1132 bnx2x_free_tx_skbs(bp
);
1133 bnx2x_free_rx_skbs(bp
);
1136 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1138 /* load old values */
1139 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1141 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1142 /* leave all but MAX value */
1143 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1145 /* set new MAX value */
1146 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1147 & FUNC_MF_CFG_MAX_BW_MASK
;
1149 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1154 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1156 * @bp: driver handle
1157 * @nvecs: number of vectors to be released
1159 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1163 if (nvecs
== offset
)
1165 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1166 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1167 bp
->msix_table
[offset
].vector
);
1170 if (nvecs
== offset
)
1175 for_each_eth_queue(bp
, i
) {
1176 if (nvecs
== offset
)
1178 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d "
1179 "irq\n", i
, bp
->msix_table
[offset
].vector
);
1181 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1185 void bnx2x_free_irq(struct bnx2x
*bp
)
1187 if (bp
->flags
& USING_MSIX_FLAG
)
1188 bnx2x_free_msix_irqs(bp
, BNX2X_NUM_ETH_QUEUES(bp
) +
1190 else if (bp
->flags
& USING_MSI_FLAG
)
1191 free_irq(bp
->pdev
->irq
, bp
->dev
);
1193 free_irq(bp
->pdev
->irq
, bp
->dev
);
1196 int bnx2x_enable_msix(struct bnx2x
*bp
)
1198 int msix_vec
= 0, i
, rc
, req_cnt
;
1200 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1201 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
1202 bp
->msix_table
[0].entry
);
1206 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1207 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
1208 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
1211 /* We need separate vectors for ETH queues only (not FCoE) */
1212 for_each_eth_queue(bp
, i
) {
1213 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1214 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
1215 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1219 req_cnt
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_PRESENT
+ 1;
1221 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1224 * reconfigure number of tx/rx queues according to available
1227 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1228 /* how less vectors we will have? */
1229 int diff
= req_cnt
- rc
;
1232 "Trying to use less MSI-X vectors: %d\n", rc
);
1234 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1238 "MSI-X is not attainable rc %d\n", rc
);
1242 * decrease number of queues by number of unallocated entries
1244 bp
->num_queues
-= diff
;
1246 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1249 /* fall to INTx if not enough memory */
1251 bp
->flags
|= DISABLE_MSI_FLAG
;
1252 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1256 bp
->flags
|= USING_MSIX_FLAG
;
1261 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1263 int i
, rc
, offset
= 0;
1265 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1266 bnx2x_msix_sp_int
, 0,
1267 bp
->dev
->name
, bp
->dev
);
1269 BNX2X_ERR("request sp irq failed\n");
1276 for_each_eth_queue(bp
, i
) {
1277 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1278 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1281 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1282 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1284 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1285 bp
->msix_table
[offset
].vector
, rc
);
1286 bnx2x_free_msix_irqs(bp
, offset
);
1293 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1294 offset
= 1 + CNIC_PRESENT
;
1295 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1297 bp
->msix_table
[0].vector
,
1298 0, bp
->msix_table
[offset
].vector
,
1299 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1304 int bnx2x_enable_msi(struct bnx2x
*bp
)
1308 rc
= pci_enable_msi(bp
->pdev
);
1310 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1313 bp
->flags
|= USING_MSI_FLAG
;
1318 static int bnx2x_req_irq(struct bnx2x
*bp
)
1320 unsigned long flags
;
1323 if (bp
->flags
& USING_MSI_FLAG
)
1326 flags
= IRQF_SHARED
;
1328 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1329 bp
->dev
->name
, bp
->dev
);
1333 static inline int bnx2x_setup_irqs(struct bnx2x
*bp
)
1336 if (bp
->flags
& USING_MSIX_FLAG
) {
1337 rc
= bnx2x_req_msix_irqs(bp
);
1342 rc
= bnx2x_req_irq(bp
);
1344 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1347 if (bp
->flags
& USING_MSI_FLAG
) {
1348 bp
->dev
->irq
= bp
->pdev
->irq
;
1349 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1357 static inline void bnx2x_napi_enable(struct bnx2x
*bp
)
1361 for_each_rx_queue(bp
, i
)
1362 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1365 static inline void bnx2x_napi_disable(struct bnx2x
*bp
)
1369 for_each_rx_queue(bp
, i
)
1370 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1373 void bnx2x_netif_start(struct bnx2x
*bp
)
1375 if (netif_running(bp
->dev
)) {
1376 bnx2x_napi_enable(bp
);
1377 bnx2x_int_enable(bp
);
1378 if (bp
->state
== BNX2X_STATE_OPEN
)
1379 netif_tx_wake_all_queues(bp
->dev
);
1383 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1385 bnx2x_int_disable_sync(bp
, disable_hw
);
1386 bnx2x_napi_disable(bp
);
1389 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1391 struct bnx2x
*bp
= netdev_priv(dev
);
1395 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1396 u16 ether_type
= ntohs(hdr
->h_proto
);
1398 /* Skip VLAN tag if present */
1399 if (ether_type
== ETH_P_8021Q
) {
1400 struct vlan_ethhdr
*vhdr
=
1401 (struct vlan_ethhdr
*)skb
->data
;
1403 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1406 /* If ethertype is FCoE or FIP - use FCoE ring */
1407 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1408 return bnx2x_fcoe_tx(bp
, txq_index
);
1411 /* select a non-FCoE queue */
1412 return __skb_tx_hash(dev
, skb
, BNX2X_NUM_ETH_QUEUES(bp
));
1415 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1417 switch (bp
->multi_mode
) {
1418 case ETH_RSS_MODE_DISABLED
:
1421 case ETH_RSS_MODE_REGULAR
:
1422 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1430 /* Add special queues */
1431 bp
->num_queues
+= NON_ETH_CONTEXT_USE
;
1435 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1437 * @bp: Driver handle
1439 * We currently support for at most 16 Tx queues for each CoS thus we will
1440 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1443 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1444 * index after all ETH L2 indices.
1446 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1447 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1448 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1450 * The proper configuration of skb->queue_mapping is handled by
1451 * bnx2x_select_queue() and __skb_tx_hash().
1453 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1454 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1456 static inline int bnx2x_set_real_num_queues(struct bnx2x
*bp
)
1460 tx
= MAX_TXQS_PER_COS
* bp
->max_cos
;
1461 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1463 /* account for fcoe queue */
1471 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1473 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1476 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
1478 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
1482 DP(NETIF_MSG_DRV
, "Setting real num queues to (tx, rx) (%d, %d)\n",
1488 static inline void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1492 for_each_queue(bp
, i
) {
1493 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1496 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1499 * Although there are no IP frames expected to arrive to
1500 * this ring we still want to add an
1501 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1504 mtu
= BNX2X_FCOE_MINI_JUMBO_MTU
;
1507 fp
->rx_buf_size
= BNX2X_FW_RX_ALIGN_START
+
1508 IP_HEADER_ALIGNMENT_PADDING
+
1511 BNX2X_FW_RX_ALIGN_END
;
1512 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1516 static inline int bnx2x_init_rss_pf(struct bnx2x
*bp
)
1519 u8 ind_table
[T_ETH_INDIRECTION_TABLE_SIZE
] = {0};
1520 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
1523 * Prepare the inital contents fo the indirection table if RSS is
1526 if (bp
->multi_mode
!= ETH_RSS_MODE_DISABLED
) {
1527 for (i
= 0; i
< sizeof(ind_table
); i
++)
1529 bp
->fp
->cl_id
+ (i
% num_eth_queues
);
1533 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1534 * per-port, so if explicit configuration is needed , do it only
1537 * For 57712 and newer on the other hand it's a per-function
1540 return bnx2x_config_rss_pf(bp
, ind_table
,
1541 bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
1544 int bnx2x_config_rss_pf(struct bnx2x
*bp
, u8
*ind_table
, bool config_hash
)
1546 struct bnx2x_config_rss_params params
= {0};
1549 /* Although RSS is meaningless when there is a single HW queue we
1550 * still need it enabled in order to have HW Rx hash generated.
1552 * if (!is_eth_multi(bp))
1553 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1556 params
.rss_obj
= &bp
->rss_conf_obj
;
1558 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
1561 switch (bp
->multi_mode
) {
1562 case ETH_RSS_MODE_DISABLED
:
1563 __set_bit(BNX2X_RSS_MODE_DISABLED
, ¶ms
.rss_flags
);
1565 case ETH_RSS_MODE_REGULAR
:
1566 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
1568 case ETH_RSS_MODE_VLAN_PRI
:
1569 __set_bit(BNX2X_RSS_MODE_VLAN_PRI
, ¶ms
.rss_flags
);
1571 case ETH_RSS_MODE_E1HOV_PRI
:
1572 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI
, ¶ms
.rss_flags
);
1574 case ETH_RSS_MODE_IP_DSCP
:
1575 __set_bit(BNX2X_RSS_MODE_IP_DSCP
, ¶ms
.rss_flags
);
1578 BNX2X_ERR("Unknown multi_mode: %d\n", bp
->multi_mode
);
1582 /* If RSS is enabled */
1583 if (bp
->multi_mode
!= ETH_RSS_MODE_DISABLED
) {
1584 /* RSS configuration */
1585 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
1586 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
1587 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
1588 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
1591 params
.rss_result_mask
= MULTI_MASK
;
1593 memcpy(params
.ind_table
, ind_table
, sizeof(params
.ind_table
));
1597 for (i
= 0; i
< sizeof(params
.rss_key
) / 4; i
++)
1598 params
.rss_key
[i
] = random32();
1600 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
1604 return bnx2x_config_rss(bp
, ¶ms
);
1607 static inline int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
1609 struct bnx2x_func_state_params func_params
= {0};
1611 /* Prepare parameters for function state transitions */
1612 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
1614 func_params
.f_obj
= &bp
->func_obj
;
1615 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
1617 func_params
.params
.hw_init
.load_phase
= load_code
;
1619 return bnx2x_func_state_change(bp
, &func_params
);
1623 * Cleans the object that have internal lists without sending
1624 * ramrods. Should be run when interrutps are disabled.
1626 static void bnx2x_squeeze_objects(struct bnx2x
*bp
)
1629 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
1630 struct bnx2x_mcast_ramrod_params rparam
= {0};
1631 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->fp
->mac_obj
;
1633 /***************** Cleanup MACs' object first *************************/
1635 /* Wait for completion of requested */
1636 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
1637 /* Perform a dry cleanup */
1638 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
1640 /* Clean ETH primary MAC */
1641 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
1642 rc
= mac_obj
->delete_all(bp
, &bp
->fp
->mac_obj
, &vlan_mac_flags
,
1645 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
1647 /* Cleanup UC list */
1649 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
1650 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
1653 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
1655 /***************** Now clean mcast object *****************************/
1656 rparam
.mcast_obj
= &bp
->mcast_obj
;
1657 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
1659 /* Add a DEL command... */
1660 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
1662 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1663 "object: %d\n", rc
);
1665 /* ...and wait until all pending commands are cleared */
1666 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1669 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1674 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
1678 #ifndef BNX2X_STOP_ON_ERROR
1679 #define LOAD_ERROR_EXIT(bp, label) \
1681 (bp)->state = BNX2X_STATE_ERROR; \
1685 #define LOAD_ERROR_EXIT(bp, label) \
1687 (bp)->state = BNX2X_STATE_ERROR; \
1693 /* must be called with rtnl_lock */
1694 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1696 int port
= BP_PORT(bp
);
1700 #ifdef BNX2X_STOP_ON_ERROR
1701 if (unlikely(bp
->panic
))
1705 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1707 /* Set the initial link reported state to link down */
1708 bnx2x_acquire_phy_lock(bp
);
1709 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
1710 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1711 &bp
->last_reported_link
.link_report_flags
);
1712 bnx2x_release_phy_lock(bp
);
1714 /* must be called before memory allocation and HW init */
1715 bnx2x_ilt_set_info(bp
);
1718 * Zero fastpath structures preserving invariants like napi, which are
1719 * allocated only once, fp index, max_cos, bp pointer.
1720 * Also set fp->disable_tpa.
1722 for_each_queue(bp
, i
)
1726 /* Set the receive queues buffer size */
1727 bnx2x_set_rx_buf_size(bp
);
1729 if (bnx2x_alloc_mem(bp
))
1732 /* As long as bnx2x_alloc_mem() may possibly update
1733 * bp->num_queues, bnx2x_set_real_num_queues() should always
1736 rc
= bnx2x_set_real_num_queues(bp
);
1738 BNX2X_ERR("Unable to set real_num_queues\n");
1739 LOAD_ERROR_EXIT(bp
, load_error0
);
1742 /* configure multi cos mappings in kernel.
1743 * this configuration may be overriden by a multi class queue discipline
1744 * or by a dcbx negotiation result.
1746 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
1748 bnx2x_napi_enable(bp
);
1750 /* Send LOAD_REQUEST command to MCP
1751 * Returns the type of LOAD command:
1752 * if it is the first port to be initialized
1753 * common blocks should be initialized, otherwise - not
1755 if (!BP_NOMCP(bp
)) {
1756 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1758 BNX2X_ERR("MCP response failure, aborting\n");
1760 LOAD_ERROR_EXIT(bp
, load_error1
);
1762 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1763 rc
= -EBUSY
; /* other port in diagnostic mode */
1764 LOAD_ERROR_EXIT(bp
, load_error1
);
1768 int path
= BP_PATH(bp
);
1770 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1771 path
, load_count
[path
][0], load_count
[path
][1],
1772 load_count
[path
][2]);
1773 load_count
[path
][0]++;
1774 load_count
[path
][1 + port
]++;
1775 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1776 path
, load_count
[path
][0], load_count
[path
][1],
1777 load_count
[path
][2]);
1778 if (load_count
[path
][0] == 1)
1779 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1780 else if (load_count
[path
][1 + port
] == 1)
1781 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1783 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1786 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1787 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1788 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
1791 * We need the barrier to ensure the ordering between the
1792 * writing to bp->port.pmf here and reading it from the
1793 * bnx2x_periodic_task().
1796 queue_delayed_work(bnx2x_wq
, &bp
->period_task
, 0);
1800 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1802 /* Init Function state controlling object */
1803 bnx2x__init_func_obj(bp
);
1806 rc
= bnx2x_init_hw(bp
, load_code
);
1808 BNX2X_ERR("HW init failed, aborting\n");
1809 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1810 LOAD_ERROR_EXIT(bp
, load_error2
);
1813 /* Connect to IRQs */
1814 rc
= bnx2x_setup_irqs(bp
);
1816 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1817 LOAD_ERROR_EXIT(bp
, load_error2
);
1820 /* Setup NIC internals and enable interrupts */
1821 bnx2x_nic_init(bp
, load_code
);
1823 /* Init per-function objects */
1824 bnx2x_init_bp_objs(bp
);
1826 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1827 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1828 (bp
->common
.shmem2_base
)) {
1829 if (SHMEM2_HAS(bp
, dcc_support
))
1830 SHMEM2_WR(bp
, dcc_support
,
1831 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1832 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1835 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1836 rc
= bnx2x_func_start(bp
);
1838 BNX2X_ERR("Function start failed!\n");
1839 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1840 LOAD_ERROR_EXIT(bp
, load_error3
);
1843 /* Send LOAD_DONE command to MCP */
1844 if (!BP_NOMCP(bp
)) {
1845 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1847 BNX2X_ERR("MCP response failure, aborting\n");
1849 LOAD_ERROR_EXIT(bp
, load_error3
);
1853 rc
= bnx2x_setup_leading(bp
);
1855 BNX2X_ERR("Setup leading failed!\n");
1856 LOAD_ERROR_EXIT(bp
, load_error3
);
1860 /* Enable Timer scan */
1861 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
1864 for_each_nondefault_queue(bp
, i
) {
1865 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
1867 LOAD_ERROR_EXIT(bp
, load_error4
);
1870 rc
= bnx2x_init_rss_pf(bp
);
1872 LOAD_ERROR_EXIT(bp
, load_error4
);
1874 /* Now when Clients are configured we are ready to work */
1875 bp
->state
= BNX2X_STATE_OPEN
;
1877 /* Configure a ucast MAC */
1878 rc
= bnx2x_set_eth_mac(bp
, true);
1880 LOAD_ERROR_EXIT(bp
, load_error4
);
1882 if (bp
->pending_max
) {
1883 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
1884 bp
->pending_max
= 0;
1888 bnx2x_initial_phy_init(bp
, load_mode
);
1890 /* Start fast path */
1892 /* Initialize Rx filter. */
1893 netif_addr_lock_bh(bp
->dev
);
1894 bnx2x_set_rx_mode(bp
->dev
);
1895 netif_addr_unlock_bh(bp
->dev
);
1898 switch (load_mode
) {
1900 /* Tx queue should be only reenabled */
1901 netif_tx_wake_all_queues(bp
->dev
);
1905 netif_tx_start_all_queues(bp
->dev
);
1906 smp_mb__after_clear_bit();
1910 bp
->state
= BNX2X_STATE_DIAG
;
1918 bnx2x_update_drv_flags(bp
, DRV_FLAGS_DCB_CONFIGURED
, 0);
1920 bnx2x__link_status_update(bp
);
1922 /* start the timer */
1923 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1926 /* re-read iscsi info */
1927 bnx2x_get_iscsi_info(bp
);
1928 bnx2x_setup_cnic_irq_info(bp
);
1929 if (bp
->state
== BNX2X_STATE_OPEN
)
1930 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1932 bnx2x_inc_load_cnt(bp
);
1934 /* Wait for all pending SP commands to complete */
1935 if (!bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
1936 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1937 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
1941 bnx2x_dcbx_init(bp
);
1944 #ifndef BNX2X_STOP_ON_ERROR
1947 /* Disable Timer scan */
1948 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
1951 bnx2x_int_disable_sync(bp
, 1);
1953 /* Clean queueable objects */
1954 bnx2x_squeeze_objects(bp
);
1956 /* Free SKBs, SGEs, TPA pool and driver internals */
1957 bnx2x_free_skbs(bp
);
1958 for_each_rx_queue(bp
, i
)
1959 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1964 if (!BP_NOMCP(bp
)) {
1965 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1966 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1971 bnx2x_napi_disable(bp
);
1976 #endif /* ! BNX2X_STOP_ON_ERROR */
1979 /* must be called with rtnl_lock */
1980 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
1983 bool global
= false;
1985 if ((bp
->state
== BNX2X_STATE_CLOSED
) ||
1986 (bp
->state
== BNX2X_STATE_ERROR
)) {
1987 /* We can get here if the driver has been unloaded
1988 * during parity error recovery and is either waiting for a
1989 * leader to complete or for other functions to unload and
1990 * then ifdown has been issued. In this case we want to
1991 * unload and let other functions to complete a recovery
1994 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
1996 bnx2x_release_leader_lock(bp
);
1999 DP(NETIF_MSG_HW
, "Releasing a leadership...\n");
2005 * It's important to set the bp->state to the value different from
2006 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2007 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2009 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
2013 bnx2x_tx_disable(bp
);
2016 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
2019 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2021 del_timer_sync(&bp
->timer
);
2023 /* Set ALWAYS_ALIVE bit in shmem */
2024 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
2026 bnx2x_drv_pulse(bp
);
2028 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2030 /* Cleanup the chip if needed */
2031 if (unload_mode
!= UNLOAD_RECOVERY
)
2032 bnx2x_chip_cleanup(bp
, unload_mode
);
2034 /* Send the UNLOAD_REQUEST to the MCP */
2035 bnx2x_send_unload_req(bp
, unload_mode
);
2038 * Prevent transactions to host from the functions on the
2039 * engine that doesn't reset global blocks in case of global
2040 * attention once gloabl blocks are reset and gates are opened
2041 * (the engine which leader will perform the recovery
2044 if (!CHIP_IS_E1x(bp
))
2045 bnx2x_pf_disable(bp
);
2047 /* Disable HW interrupts, NAPI */
2048 bnx2x_netif_stop(bp
, 1);
2053 /* Report UNLOAD_DONE to MCP */
2054 bnx2x_send_unload_done(bp
);
2058 * At this stage no more interrupts will arrive so we may safly clean
2059 * the queueable objects here in case they failed to get cleaned so far.
2061 bnx2x_squeeze_objects(bp
);
2063 /* There should be no more pending SP commands at this stage */
2068 /* Free SKBs, SGEs, TPA pool and driver internals */
2069 bnx2x_free_skbs(bp
);
2070 for_each_rx_queue(bp
, i
)
2071 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2075 bp
->state
= BNX2X_STATE_CLOSED
;
2077 /* Check if there are pending parity attentions. If there are - set
2078 * RECOVERY_IN_PROGRESS.
2080 if (bnx2x_chk_parity_attn(bp
, &global
, false)) {
2081 bnx2x_set_reset_in_progress(bp
);
2083 /* Set RESET_IS_GLOBAL if needed */
2085 bnx2x_set_reset_global(bp
);
2089 /* The last driver must disable a "close the gate" if there is no
2090 * parity attention or "process kill" pending.
2092 if (!bnx2x_dec_load_cnt(bp
) && bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
2093 bnx2x_disable_close_the_gate(bp
);
2098 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
2102 /* If there is no power capability, silently succeed */
2104 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
2108 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2112 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2113 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2114 PCI_PM_CTRL_PME_STATUS
));
2116 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2117 /* delay required during transition out of D3hot */
2122 /* If there are other clients above don't
2123 shut down the power */
2124 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
2126 /* Don't shut down the power for emulation and FPGA */
2127 if (CHIP_REV_IS_SLOW(bp
))
2130 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2134 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2136 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2139 /* No more memory access after this point until
2140 * device is brought back to D0.
2151 * net_device service functions
2153 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
2157 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
2159 struct bnx2x
*bp
= fp
->bp
;
2162 #ifdef BNX2X_STOP_ON_ERROR
2163 if (unlikely(bp
->panic
)) {
2164 napi_complete(napi
);
2169 for_each_cos_in_tx_queue(fp
, cos
)
2170 if (bnx2x_tx_queue_has_work(&fp
->txdata
[cos
]))
2171 bnx2x_tx_int(bp
, &fp
->txdata
[cos
]);
2174 if (bnx2x_has_rx_work(fp
)) {
2175 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
2177 /* must not complete if we consumed full budget */
2178 if (work_done
>= budget
)
2182 /* Fall out from the NAPI loop if needed */
2183 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
2185 /* No need to update SB for FCoE L2 ring as long as
2186 * it's connected to the default SB and the SB
2187 * has been updated when NAPI was scheduled.
2189 if (IS_FCOE_FP(fp
)) {
2190 napi_complete(napi
);
2195 bnx2x_update_fpsb_idx(fp
);
2196 /* bnx2x_has_rx_work() reads the status block,
2197 * thus we need to ensure that status block indices
2198 * have been actually read (bnx2x_update_fpsb_idx)
2199 * prior to this check (bnx2x_has_rx_work) so that
2200 * we won't write the "newer" value of the status block
2201 * to IGU (if there was a DMA right after
2202 * bnx2x_has_rx_work and if there is no rmb, the memory
2203 * reading (bnx2x_update_fpsb_idx) may be postponed
2204 * to right before bnx2x_ack_sb). In this case there
2205 * will never be another interrupt until there is
2206 * another update of the status block, while there
2207 * is still unhandled work.
2211 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
2212 napi_complete(napi
);
2213 /* Re-enable interrupts */
2215 "Update index to %d\n", fp
->fp_hc_idx
);
2216 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
2217 le16_to_cpu(fp
->fp_hc_idx
),
2227 /* we split the first BD into headers and data BDs
2228 * to ease the pain of our fellow microcode engineers
2229 * we use one mapping for both BDs
2230 * So far this has only been observed to happen
2231 * in Other Operating Systems(TM)
2233 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
2234 struct bnx2x_fp_txdata
*txdata
,
2235 struct sw_tx_bd
*tx_buf
,
2236 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
2237 u16 bd_prod
, int nbd
)
2239 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
2240 struct eth_tx_bd
*d_tx_bd
;
2242 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
2244 /* first fix first BD */
2245 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
2246 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
2248 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
2249 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
2250 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
2252 /* now get a new data BD
2253 * (after the pbd) and fill it */
2254 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2255 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2257 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
2258 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
2260 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2261 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2262 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
2264 /* this marks the BD as one that has no individual mapping */
2265 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
2267 DP(NETIF_MSG_TX_QUEUED
,
2268 "TSO split data size is %d (%x:%x)\n",
2269 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
2272 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
2277 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
2280 csum
= (u16
) ~csum_fold(csum_sub(csum
,
2281 csum_partial(t_header
- fix
, fix
, 0)));
2284 csum
= (u16
) ~csum_fold(csum_add(csum
,
2285 csum_partial(t_header
, -fix
, 0)));
2287 return swab16(csum
);
2290 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
2294 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2298 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
2300 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2301 rc
|= XMIT_CSUM_TCP
;
2305 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2306 rc
|= XMIT_CSUM_TCP
;
2310 if (skb_is_gso_v6(skb
))
2311 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
2312 else if (skb_is_gso(skb
))
2313 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
2318 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2319 /* check if packet requires linearization (packet is too fragmented)
2320 no need to check fragmentation if page size > 8K (there will be no
2321 violation to FW restrictions) */
2322 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
2327 int first_bd_sz
= 0;
2329 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2330 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
2332 if (xmit_type
& XMIT_GSO
) {
2333 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
2334 /* Check if LSO packet needs to be copied:
2335 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2336 int wnd_size
= MAX_FETCH_BD
- 3;
2337 /* Number of windows to check */
2338 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
2343 /* Headers length */
2344 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
2347 /* Amount of data (w/o headers) on linear part of SKB*/
2348 first_bd_sz
= skb_headlen(skb
) - hlen
;
2350 wnd_sum
= first_bd_sz
;
2352 /* Calculate the first sum - it's special */
2353 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
2355 skb_frag_size(&skb_shinfo(skb
)->frags
[frag_idx
]);
2357 /* If there was data on linear skb data - check it */
2358 if (first_bd_sz
> 0) {
2359 if (unlikely(wnd_sum
< lso_mss
)) {
2364 wnd_sum
-= first_bd_sz
;
2367 /* Others are easier: run through the frag list and
2368 check all windows */
2369 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
2371 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1]);
2373 if (unlikely(wnd_sum
< lso_mss
)) {
2378 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
]);
2381 /* in non-LSO too fragmented packet should always
2388 if (unlikely(to_copy
))
2389 DP(NETIF_MSG_TX_QUEUED
,
2390 "Linearization IS REQUIRED for %s packet. "
2391 "num_frags %d hlen %d first_bd_sz %d\n",
2392 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
2393 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
2399 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
2402 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
2403 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
2404 ETH_TX_PARSE_BD_E2_LSO_MSS
;
2405 if ((xmit_type
& XMIT_GSO_V6
) &&
2406 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2407 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
2411 * bnx2x_set_pbd_gso - update PBD in GSO case.
2415 * @xmit_type: xmit flags
2417 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
2418 struct eth_tx_parse_bd_e1x
*pbd
,
2421 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2422 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
2423 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
2425 if (xmit_type
& XMIT_GSO_V4
) {
2426 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
2427 pbd
->tcp_pseudo_csum
=
2428 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
2430 0, IPPROTO_TCP
, 0));
2433 pbd
->tcp_pseudo_csum
=
2434 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2435 &ipv6_hdr(skb
)->daddr
,
2436 0, IPPROTO_TCP
, 0));
2438 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
2442 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2444 * @bp: driver handle
2446 * @parsing_data: data to be updated
2447 * @xmit_type: xmit flags
2451 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
2452 u32
*parsing_data
, u32 xmit_type
)
2455 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
2456 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
2457 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
2459 if (xmit_type
& XMIT_CSUM_TCP
) {
2460 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
2461 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
2462 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
2464 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
2466 /* We support checksum offload for TCP and UDP only.
2467 * No need to pass the UDP header length - it's a constant.
2469 return skb_transport_header(skb
) +
2470 sizeof(struct udphdr
) - skb
->data
;
2473 static inline void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2474 struct eth_tx_start_bd
*tx_start_bd
, u32 xmit_type
)
2476 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2478 if (xmit_type
& XMIT_CSUM_V4
)
2479 tx_start_bd
->bd_flags
.as_bitfield
|=
2480 ETH_TX_BD_FLAGS_IP_CSUM
;
2482 tx_start_bd
->bd_flags
.as_bitfield
|=
2483 ETH_TX_BD_FLAGS_IPV6
;
2485 if (!(xmit_type
& XMIT_CSUM_TCP
))
2486 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
2490 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2492 * @bp: driver handle
2494 * @pbd: parse BD to be updated
2495 * @xmit_type: xmit flags
2497 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2498 struct eth_tx_parse_bd_e1x
*pbd
,
2501 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
2503 /* for now NS flag is not used in Linux */
2505 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
2506 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
2508 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
2509 skb_network_header(skb
)) >> 1;
2511 hlen
+= pbd
->ip_hlen_w
;
2513 /* We support checksum offload for TCP and UDP only */
2514 if (xmit_type
& XMIT_CSUM_TCP
)
2515 hlen
+= tcp_hdrlen(skb
) / 2;
2517 hlen
+= sizeof(struct udphdr
) / 2;
2519 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
2522 if (xmit_type
& XMIT_CSUM_TCP
) {
2523 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
2526 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
2528 DP(NETIF_MSG_TX_QUEUED
,
2529 "hlen %d fix %d csum before fix %x\n",
2530 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
2532 /* HW bug: fixup the CSUM */
2533 pbd
->tcp_pseudo_csum
=
2534 bnx2x_csum_fix(skb_transport_header(skb
),
2537 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
2538 pbd
->tcp_pseudo_csum
);
2544 /* called with netif_tx_lock
2545 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2546 * netif_wake_queue()
2548 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2550 struct bnx2x
*bp
= netdev_priv(dev
);
2552 struct bnx2x_fastpath
*fp
;
2553 struct netdev_queue
*txq
;
2554 struct bnx2x_fp_txdata
*txdata
;
2555 struct sw_tx_bd
*tx_buf
;
2556 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
2557 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
2558 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
2559 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
2560 u32 pbd_e2_parsing_data
= 0;
2561 u16 pkt_prod
, bd_prod
;
2562 int nbd
, txq_index
, fp_index
, txdata_index
;
2564 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
2567 __le16 pkt_size
= 0;
2569 u8 mac_type
= UNICAST_ADDRESS
;
2571 #ifdef BNX2X_STOP_ON_ERROR
2572 if (unlikely(bp
->panic
))
2573 return NETDEV_TX_BUSY
;
2576 txq_index
= skb_get_queue_mapping(skb
);
2577 txq
= netdev_get_tx_queue(dev
, txq_index
);
2579 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + FCOE_PRESENT
);
2581 /* decode the fastpath index and the cos index from the txq */
2582 fp_index
= TXQ_TO_FP(txq_index
);
2583 txdata_index
= TXQ_TO_COS(txq_index
);
2587 * Override the above for the FCoE queue:
2588 * - FCoE fp entry is right after the ETH entries.
2589 * - FCoE L2 queue uses bp->txdata[0] only.
2591 if (unlikely(!NO_FCOE(bp
) && (txq_index
==
2592 bnx2x_fcoe_tx(bp
, txq_index
)))) {
2593 fp_index
= FCOE_IDX
;
2598 /* enable this debug print to view the transmission queue being used
2599 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
2600 txq_index, fp_index, txdata_index); */
2602 /* locate the fastpath and the txdata */
2603 fp
= &bp
->fp
[fp_index
];
2604 txdata
= &fp
->txdata
[txdata_index
];
2606 /* enable this debug print to view the tranmission details
2607 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2608 " tx_data ptr %p fp pointer %p\n",
2609 txdata->cid, fp_index, txdata_index, txdata, fp); */
2611 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
2612 (skb_shinfo(skb
)->nr_frags
+ 3))) {
2613 fp
->eth_q_stats
.driver_xoff
++;
2614 netif_tx_stop_queue(txq
);
2615 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2616 return NETDEV_TX_BUSY
;
2619 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
2620 "protocol(%x,%x) gso type %x xmit_type %x\n",
2621 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
2622 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
2624 eth
= (struct ethhdr
*)skb
->data
;
2626 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2627 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
2628 if (is_broadcast_ether_addr(eth
->h_dest
))
2629 mac_type
= BROADCAST_ADDRESS
;
2631 mac_type
= MULTICAST_ADDRESS
;
2634 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2635 /* First, check if we need to linearize the skb (due to FW
2636 restrictions). No need to check fragmentation if page size > 8K
2637 (there will be no violation to FW restrictions) */
2638 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
2639 /* Statistics of linearization */
2641 if (skb_linearize(skb
) != 0) {
2642 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
2643 "silently dropping this SKB\n");
2644 dev_kfree_skb_any(skb
);
2645 return NETDEV_TX_OK
;
2649 /* Map skb linear data for DMA */
2650 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2651 skb_headlen(skb
), DMA_TO_DEVICE
);
2652 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
2653 DP(NETIF_MSG_TX_QUEUED
, "SKB mapping failed - "
2654 "silently dropping this SKB\n");
2655 dev_kfree_skb_any(skb
);
2656 return NETDEV_TX_OK
;
2659 Please read carefully. First we use one BD which we mark as start,
2660 then we have a parsing info BD (used for TSO or xsum),
2661 and only then we have the rest of the TSO BDs.
2662 (don't forget to mark the last one as last,
2663 and to unmap only AFTER you write to the BD ...)
2664 And above all, all pdb sizes are in words - NOT DWORDS!
2667 /* get current pkt produced now - advance it just before sending packet
2668 * since mapping of pages may fail and cause packet to be dropped
2670 pkt_prod
= txdata
->tx_pkt_prod
;
2671 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
2673 /* get a tx_buf and first BD
2674 * tx_start_bd may be changed during SPLIT,
2675 * but first_bd will always stay first
2677 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
2678 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
2679 first_bd
= tx_start_bd
;
2681 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2682 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
2686 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
2688 /* remember the first BD of the packet */
2689 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
2693 DP(NETIF_MSG_TX_QUEUED
,
2694 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2695 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2697 if (vlan_tx_tag_present(skb
)) {
2698 tx_start_bd
->vlan_or_ethertype
=
2699 cpu_to_le16(vlan_tx_tag_get(skb
));
2700 tx_start_bd
->bd_flags
.as_bitfield
|=
2701 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2703 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2705 /* turn on parsing and get a BD */
2706 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2708 if (xmit_type
& XMIT_CSUM
)
2709 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
2711 if (!CHIP_IS_E1x(bp
)) {
2712 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2713 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2714 /* Set PBD in checksum offload case */
2715 if (xmit_type
& XMIT_CSUM
)
2716 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
2717 &pbd_e2_parsing_data
,
2721 * fill in the MAC addresses in the PBD - for local
2724 bnx2x_set_fw_mac_addr(&pbd_e2
->src_mac_addr_hi
,
2725 &pbd_e2
->src_mac_addr_mid
,
2726 &pbd_e2
->src_mac_addr_lo
,
2728 bnx2x_set_fw_mac_addr(&pbd_e2
->dst_mac_addr_hi
,
2729 &pbd_e2
->dst_mac_addr_mid
,
2730 &pbd_e2
->dst_mac_addr_lo
,
2734 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2735 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2736 /* Set PBD in checksum offload case */
2737 if (xmit_type
& XMIT_CSUM
)
2738 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2742 /* Setup the data pointer of the first BD of the packet */
2743 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2744 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2745 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
2746 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2747 pkt_size
= tx_start_bd
->nbytes
;
2749 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2750 " nbytes %d flags %x vlan %x\n",
2751 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2752 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2753 tx_start_bd
->bd_flags
.as_bitfield
,
2754 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2756 if (xmit_type
& XMIT_GSO
) {
2758 DP(NETIF_MSG_TX_QUEUED
,
2759 "TSO packet len %d hlen %d total len %d tso size %d\n",
2760 skb
->len
, hlen
, skb_headlen(skb
),
2761 skb_shinfo(skb
)->gso_size
);
2763 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2765 if (unlikely(skb_headlen(skb
) > hlen
))
2766 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
2769 if (!CHIP_IS_E1x(bp
))
2770 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
2773 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2776 /* Set the PBD's parsing_data field if not zero
2777 * (for the chips newer than 57711).
2779 if (pbd_e2_parsing_data
)
2780 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
2782 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2784 /* Handle fragmented skb */
2785 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2786 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2788 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
, 0,
2789 skb_frag_size(frag
), DMA_TO_DEVICE
);
2790 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
2792 DP(NETIF_MSG_TX_QUEUED
, "Unable to map page - "
2793 "dropping packet...\n");
2795 /* we need unmap all buffers already mapped
2797 * first_bd->nbd need to be properly updated
2798 * before call to bnx2x_free_tx_pkt
2800 first_bd
->nbd
= cpu_to_le16(nbd
);
2801 bnx2x_free_tx_pkt(bp
, txdata
,
2802 TX_BD(txdata
->tx_pkt_prod
));
2803 return NETDEV_TX_OK
;
2806 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2807 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2808 if (total_pkt_bd
== NULL
)
2809 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
2811 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2812 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2813 tx_data_bd
->nbytes
= cpu_to_le16(skb_frag_size(frag
));
2814 le16_add_cpu(&pkt_size
, skb_frag_size(frag
));
2817 DP(NETIF_MSG_TX_QUEUED
,
2818 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2819 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2820 le16_to_cpu(tx_data_bd
->nbytes
));
2823 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2825 /* update with actual num BDs */
2826 first_bd
->nbd
= cpu_to_le16(nbd
);
2828 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2830 /* now send a tx doorbell, counting the next BD
2831 * if the packet contains or ends with it
2833 if (TX_BD_POFF(bd_prod
) < nbd
)
2836 /* total_pkt_bytes should be set on the first data BD if
2837 * it's not an LSO packet and there is more than one
2838 * data BD. In this case pkt_size is limited by an MTU value.
2839 * However we prefer to set it for an LSO packet (while we don't
2840 * have to) in order to save some CPU cycles in a none-LSO
2841 * case, when we much more care about them.
2843 if (total_pkt_bd
!= NULL
)
2844 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2847 DP(NETIF_MSG_TX_QUEUED
,
2848 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2849 " tcp_flags %x xsum %x seq %u hlen %u\n",
2850 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2851 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2852 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2853 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2855 DP(NETIF_MSG_TX_QUEUED
,
2856 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2857 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2858 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2859 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2860 pbd_e2
->parsing_data
);
2861 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2863 txdata
->tx_pkt_prod
++;
2865 * Make sure that the BD data is updated before updating the producer
2866 * since FW might read the BD right after the producer is updated.
2867 * This is only applicable for weak-ordered memory model archs such
2868 * as IA-64. The following barrier is also mandatory since FW will
2869 * assumes packets must have BDs.
2873 txdata
->tx_db
.data
.prod
+= nbd
;
2876 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
2880 txdata
->tx_bd_prod
+= nbd
;
2882 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_SKB_FRAGS
+ 3)) {
2883 netif_tx_stop_queue(txq
);
2885 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2886 * ordering of set_bit() in netif_tx_stop_queue() and read of
2890 fp
->eth_q_stats
.driver_xoff
++;
2891 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_SKB_FRAGS
+ 3)
2892 netif_tx_wake_queue(txq
);
2896 return NETDEV_TX_OK
;
2900 * bnx2x_setup_tc - routine to configure net_device for multi tc
2902 * @netdev: net device to configure
2903 * @tc: number of traffic classes to enable
2905 * callback connected to the ndo_setup_tc function pointer
2907 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
2909 int cos
, prio
, count
, offset
;
2910 struct bnx2x
*bp
= netdev_priv(dev
);
2912 /* setup tc must be called under rtnl lock */
2915 /* no traffic classes requested. aborting */
2917 netdev_reset_tc(dev
);
2921 /* requested to support too many traffic classes */
2922 if (num_tc
> bp
->max_cos
) {
2923 DP(NETIF_MSG_TX_ERR
, "support for too many traffic classes"
2924 " requested: %d. max supported is %d\n",
2925 num_tc
, bp
->max_cos
);
2929 /* declare amount of supported traffic classes */
2930 if (netdev_set_num_tc(dev
, num_tc
)) {
2931 DP(NETIF_MSG_TX_ERR
, "failed to declare %d traffic classes\n",
2936 /* configure priority to traffic class mapping */
2937 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
2938 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[prio
]);
2939 DP(BNX2X_MSG_SP
, "mapping priority %d to tc %d\n",
2940 prio
, bp
->prio_to_cos
[prio
]);
2944 /* Use this configuration to diffrentiate tc0 from other COSes
2945 This can be used for ets or pfc, and save the effort of setting
2946 up a multio class queue disc or negotiating DCBX with a switch
2947 netdev_set_prio_tc_map(dev, 0, 0);
2948 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
2949 for (prio = 1; prio < 16; prio++) {
2950 netdev_set_prio_tc_map(dev, prio, 1);
2951 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
2954 /* configure traffic class to transmission queue mapping */
2955 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
2956 count
= BNX2X_NUM_ETH_QUEUES(bp
);
2957 offset
= cos
* MAX_TXQS_PER_COS
;
2958 netdev_set_tc_queue(dev
, cos
, count
, offset
);
2959 DP(BNX2X_MSG_SP
, "mapping tc %d to offset %d count %d\n",
2960 cos
, offset
, count
);
2966 /* called with rtnl_lock */
2967 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2969 struct sockaddr
*addr
= p
;
2970 struct bnx2x
*bp
= netdev_priv(dev
);
2973 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
2976 if (netif_running(dev
)) {
2977 rc
= bnx2x_set_eth_mac(bp
, false);
2982 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2984 if (netif_running(dev
))
2985 rc
= bnx2x_set_eth_mac(bp
, true);
2990 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
2992 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
2993 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
2998 if (IS_FCOE_IDX(fp_index
)) {
2999 memset(sb
, 0, sizeof(union host_hc_status_block
));
3000 fp
->status_blk_mapping
= 0;
3005 if (!CHIP_IS_E1x(bp
))
3006 BNX2X_PCI_FREE(sb
->e2_sb
,
3007 bnx2x_fp(bp
, fp_index
,
3008 status_blk_mapping
),
3009 sizeof(struct host_hc_status_block_e2
));
3011 BNX2X_PCI_FREE(sb
->e1x_sb
,
3012 bnx2x_fp(bp
, fp_index
,
3013 status_blk_mapping
),
3014 sizeof(struct host_hc_status_block_e1x
));
3019 if (!skip_rx_queue(bp
, fp_index
)) {
3020 bnx2x_free_rx_bds(fp
);
3022 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3023 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
3024 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
3025 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
3026 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3028 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
3029 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
3030 sizeof(struct eth_fast_path_rx_cqe
) *
3034 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
3035 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
3036 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
3037 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3041 if (!skip_tx_queue(bp
, fp_index
)) {
3042 /* fastpath tx rings: tx_buf tx_desc */
3043 for_each_cos_in_tx_queue(fp
, cos
) {
3044 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
3047 "freeing tx memory of fp %d cos %d cid %d\n",
3048 fp_index
, cos
, txdata
->cid
);
3050 BNX2X_FREE(txdata
->tx_buf_ring
);
3051 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
3052 txdata
->tx_desc_mapping
,
3053 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3056 /* end of fastpath */
3059 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
3062 for_each_queue(bp
, i
)
3063 bnx2x_free_fp_mem_at(bp
, i
);
3066 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
3068 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
3069 if (!CHIP_IS_E1x(bp
)) {
3070 bnx2x_fp(bp
, index
, sb_index_values
) =
3071 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
3072 bnx2x_fp(bp
, index
, sb_running_index
) =
3073 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
3075 bnx2x_fp(bp
, index
, sb_index_values
) =
3076 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
3077 bnx2x_fp(bp
, index
, sb_running_index
) =
3078 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
3082 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
3084 union host_hc_status_block
*sb
;
3085 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
3088 int rx_ring_size
= 0;
3090 /* if rx_ring_size specified - use it */
3091 if (!bp
->rx_ring_size
) {
3093 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
3095 /* allocate at least number of buffers required by FW */
3096 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
3097 MIN_RX_SIZE_TPA
, rx_ring_size
);
3099 bp
->rx_ring_size
= rx_ring_size
;
3101 rx_ring_size
= bp
->rx_ring_size
;
3104 sb
= &bnx2x_fp(bp
, index
, status_blk
);
3106 if (!IS_FCOE_IDX(index
)) {
3109 if (!CHIP_IS_E1x(bp
))
3110 BNX2X_PCI_ALLOC(sb
->e2_sb
,
3111 &bnx2x_fp(bp
, index
, status_blk_mapping
),
3112 sizeof(struct host_hc_status_block_e2
));
3114 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
3115 &bnx2x_fp(bp
, index
, status_blk_mapping
),
3116 sizeof(struct host_hc_status_block_e1x
));
3121 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3122 * set shortcuts for it.
3124 if (!IS_FCOE_IDX(index
))
3125 set_sb_shortcuts(bp
, index
);
3128 if (!skip_tx_queue(bp
, index
)) {
3129 /* fastpath tx rings: tx_buf tx_desc */
3130 for_each_cos_in_tx_queue(fp
, cos
) {
3131 struct bnx2x_fp_txdata
*txdata
= &fp
->txdata
[cos
];
3133 DP(BNX2X_MSG_SP
, "allocating tx memory of "
3137 BNX2X_ALLOC(txdata
->tx_buf_ring
,
3138 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
3139 BNX2X_PCI_ALLOC(txdata
->tx_desc_ring
,
3140 &txdata
->tx_desc_mapping
,
3141 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3146 if (!skip_rx_queue(bp
, index
)) {
3147 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3148 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
3149 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
3150 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
3151 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
3152 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3154 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
3155 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
3156 sizeof(struct eth_fast_path_rx_cqe
) *
3160 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
3161 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
3162 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
3163 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
3164 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3166 bnx2x_set_next_page_rx_bd(fp
);
3169 bnx2x_set_next_page_rx_cq(fp
);
3172 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
3173 if (ring_size
< rx_ring_size
)
3179 /* handles low memory cases */
3181 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3183 /* FW will drop all packets if queue is not big enough,
3184 * In these cases we disable the queue
3185 * Min size is different for OOO, TPA and non-TPA queues
3187 if (ring_size
< (fp
->disable_tpa
?
3188 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
3189 /* release memory allocated for this queue */
3190 bnx2x_free_fp_mem_at(bp
, index
);
3196 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
3201 * 1. Allocate FP for leading - fatal if error
3202 * 2. {CNIC} Allocate FCoE FP - fatal if error
3203 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3204 * 4. Allocate RSS - fix number of queues if error
3208 if (bnx2x_alloc_fp_mem_at(bp
, 0))
3214 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX
))
3215 /* we will fail load process instead of mark
3222 for_each_nondefault_eth_queue(bp
, i
)
3223 if (bnx2x_alloc_fp_mem_at(bp
, i
))
3226 /* handle memory failures */
3227 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
3228 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
3233 * move non eth FPs next to last eth FP
3234 * must be done in that order
3235 * FCOE_IDX < FWD_IDX < OOO_IDX
3238 /* move FCoE fp even NO_FCOE_FLAG is on */
3239 bnx2x_move_fp(bp
, FCOE_IDX
, FCOE_IDX
- delta
);
3241 bp
->num_queues
-= delta
;
3242 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3243 bp
->num_queues
+ delta
, bp
->num_queues
);
3249 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
3252 kfree(bp
->msix_table
);
3256 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
3258 struct bnx2x_fastpath
*fp
;
3259 struct msix_entry
*tbl
;
3260 struct bnx2x_ilt
*ilt
;
3261 int msix_table_size
= 0;
3264 * The biggest MSI-X table we might need is as a maximum number of fast
3265 * path IGU SBs plus default SB (for PF).
3267 msix_table_size
= bp
->igu_sb_cnt
+ 1;
3269 /* fp array: RSS plus CNIC related L2 queues */
3270 fp
= kzalloc((BNX2X_MAX_RSS_COUNT(bp
) + NON_ETH_CONTEXT_USE
) *
3271 sizeof(*fp
), GFP_KERNEL
);
3277 tbl
= kzalloc(msix_table_size
* sizeof(*tbl
), GFP_KERNEL
);
3280 bp
->msix_table
= tbl
;
3283 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
3290 bnx2x_free_mem_bp(bp
);
3295 int bnx2x_reload_if_running(struct net_device
*dev
)
3297 struct bnx2x
*bp
= netdev_priv(dev
);
3299 if (unlikely(!netif_running(dev
)))
3302 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
3303 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
3306 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
3308 u32 sel_phy_idx
= 0;
3309 if (bp
->link_params
.num_phys
<= 1)
3312 if (bp
->link_vars
.link_up
) {
3313 sel_phy_idx
= EXT_PHY1
;
3314 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3315 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
3316 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
3317 sel_phy_idx
= EXT_PHY2
;
3320 switch (bnx2x_phy_selection(&bp
->link_params
)) {
3321 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
3322 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
3323 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
3324 sel_phy_idx
= EXT_PHY1
;
3326 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
3327 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
3328 sel_phy_idx
= EXT_PHY2
;
3336 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
3338 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
3340 * The selected actived PHY is always after swapping (in case PHY
3341 * swapping is enabled). So when swapping is enabled, we need to reverse
3345 if (bp
->link_params
.multi_phy_config
&
3346 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
3347 if (sel_phy_idx
== EXT_PHY1
)
3348 sel_phy_idx
= EXT_PHY2
;
3349 else if (sel_phy_idx
== EXT_PHY2
)
3350 sel_phy_idx
= EXT_PHY1
;
3352 return LINK_CONFIG_IDX(sel_phy_idx
);
3355 #if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3356 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
3358 struct bnx2x
*bp
= netdev_priv(dev
);
3359 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
3362 case NETDEV_FCOE_WWNN
:
3363 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
3364 cp
->fcoe_wwn_node_name_lo
);
3366 case NETDEV_FCOE_WWPN
:
3367 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
3368 cp
->fcoe_wwn_port_name_lo
);
3378 /* called with rtnl_lock */
3379 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
3381 struct bnx2x
*bp
= netdev_priv(dev
);
3383 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
3384 pr_err("Handling parity error recovery. Try again later\n");
3388 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
3389 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
3392 /* This does not race with packet allocation
3393 * because the actual alloc size is
3394 * only updated as part of load
3398 return bnx2x_reload_if_running(dev
);
3401 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
3402 netdev_features_t features
)
3404 struct bnx2x
*bp
= netdev_priv(dev
);
3406 /* TPA requires Rx CSUM offloading */
3407 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
)
3408 features
&= ~NETIF_F_LRO
;
3413 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
)
3415 struct bnx2x
*bp
= netdev_priv(dev
);
3416 u32 flags
= bp
->flags
;
3417 bool bnx2x_reload
= false;
3419 if (features
& NETIF_F_LRO
)
3420 flags
|= TPA_ENABLE_FLAG
;
3422 flags
&= ~TPA_ENABLE_FLAG
;
3424 if (features
& NETIF_F_LOOPBACK
) {
3425 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
3426 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
3427 bnx2x_reload
= true;
3430 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
3431 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
3432 bnx2x_reload
= true;
3436 if (flags
^ bp
->flags
) {
3438 bnx2x_reload
= true;
3442 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
3443 return bnx2x_reload_if_running(dev
);
3444 /* else: bnx2x_nic_load() will be called at end of recovery */
3450 void bnx2x_tx_timeout(struct net_device
*dev
)
3452 struct bnx2x
*bp
= netdev_priv(dev
);
3454 #ifdef BNX2X_STOP_ON_ERROR
3459 smp_mb__before_clear_bit();
3460 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT
, &bp
->sp_rtnl_state
);
3461 smp_mb__after_clear_bit();
3463 /* This allows the netif to be shutdown gracefully before resetting */
3464 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
3467 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3469 struct net_device
*dev
= pci_get_drvdata(pdev
);
3473 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
3476 bp
= netdev_priv(dev
);
3480 pci_save_state(pdev
);
3482 if (!netif_running(dev
)) {
3487 netif_device_detach(dev
);
3489 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
3491 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
3498 int bnx2x_resume(struct pci_dev
*pdev
)
3500 struct net_device
*dev
= pci_get_drvdata(pdev
);
3505 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
3508 bp
= netdev_priv(dev
);
3510 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
3511 pr_err("Handling parity error recovery. Try again later\n");
3517 pci_restore_state(pdev
);
3519 if (!netif_running(dev
)) {
3524 bnx2x_set_power_state(bp
, PCI_D0
);
3525 netif_device_attach(dev
);
3527 /* Since the chip was reset, clear the FW sequence number */
3529 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
3537 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
3540 /* ustorm cxt validation */
3541 cxt
->ustorm_ag_context
.cdu_usage
=
3542 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
3543 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
3544 /* xcontext validation */
3545 cxt
->xstorm_ag_context
.cdu_reserved
=
3546 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
3547 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
3550 static inline void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
3551 u8 fw_sb_id
, u8 sb_index
,
3555 u32 addr
= BAR_CSTRORM_INTMEM
+
3556 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
3557 REG_WR8(bp
, addr
, ticks
);
3558 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3559 port
, fw_sb_id
, sb_index
, ticks
);
3562 static inline void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
3563 u16 fw_sb_id
, u8 sb_index
,
3566 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
3567 u32 addr
= BAR_CSTRORM_INTMEM
+
3568 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
3569 u16 flags
= REG_RD16(bp
, addr
);
3571 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
3572 flags
|= enable_flag
;
3573 REG_WR16(bp
, addr
, flags
);
3574 DP(NETIF_MSG_HW
, "port %x fw_sb_id %d sb_index %d disable %d\n",
3575 port
, fw_sb_id
, sb_index
, disable
);
3578 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
3579 u8 sb_index
, u8 disable
, u16 usec
)
3581 int port
= BP_PORT(bp
);
3582 u8 ticks
= usec
/ BNX2X_BTR
;
3584 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
3586 disable
= disable
? 1 : (usec
? 0 : 1);
3587 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);