1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <linux/prefetch.h>
28 #include "bnx2x_cmn.h"
29 #include "bnx2x_init.h"
33 * bnx2x_move_fp - move content of the fastpath structure.
36 * @from: source FP index
37 * @to: destination FP index
39 * Makes sure the contents of the bp->fp[to].napi is kept
40 * intact. This is done by first copying the napi struct from
41 * the target to the source, and then mem copying the entire
42 * source onto the target. Update txdata pointers and related
45 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
47 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
48 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
49 struct bnx2x_sp_objs
*from_sp_objs
= &bp
->sp_objs
[from
];
50 struct bnx2x_sp_objs
*to_sp_objs
= &bp
->sp_objs
[to
];
51 struct bnx2x_fp_stats
*from_fp_stats
= &bp
->fp_stats
[from
];
52 struct bnx2x_fp_stats
*to_fp_stats
= &bp
->fp_stats
[to
];
53 int old_max_eth_txqs
, new_max_eth_txqs
;
54 int old_txdata_index
= 0, new_txdata_index
= 0;
56 /* Copy the NAPI object as it has been already initialized */
57 from_fp
->napi
= to_fp
->napi
;
59 /* Move bnx2x_fastpath contents */
60 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
63 /* move sp_objs contents as well, as their indices match fp ones */
64 memcpy(to_sp_objs
, from_sp_objs
, sizeof(*to_sp_objs
));
66 /* move fp_stats contents as well, as their indices match fp ones */
67 memcpy(to_fp_stats
, from_fp_stats
, sizeof(*to_fp_stats
));
69 /* Update txdata pointers in fp and move txdata content accordingly:
70 * Each fp consumes 'max_cos' txdata structures, so the index should be
71 * decremented by max_cos x delta.
74 old_max_eth_txqs
= BNX2X_NUM_ETH_QUEUES(bp
) * (bp
)->max_cos
;
75 new_max_eth_txqs
= (BNX2X_NUM_ETH_QUEUES(bp
) - from
+ to
) *
77 if (from
== FCOE_IDX(bp
)) {
78 old_txdata_index
= old_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
79 new_txdata_index
= new_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
82 memcpy(&bp
->bnx2x_txq
[new_txdata_index
],
83 &bp
->bnx2x_txq
[old_txdata_index
],
84 sizeof(struct bnx2x_fp_txdata
));
85 to_fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[new_txdata_index
];
89 * bnx2x_fill_fw_str - Fill buffer with FW version string.
92 * @buf: character buffer to fill with the fw name
93 * @buf_len: length of the above buffer
96 void bnx2x_fill_fw_str(struct bnx2x
*bp
, char *buf
, size_t buf_len
)
99 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
101 phy_fw_ver
[0] = '\0';
102 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
103 phy_fw_ver
, PHY_FW_VER_LEN
);
104 strlcpy(buf
, bp
->fw_ver
, buf_len
);
105 snprintf(buf
+ strlen(bp
->fw_ver
), 32 - strlen(bp
->fw_ver
),
107 (bp
->common
.bc_ver
& 0xff0000) >> 16,
108 (bp
->common
.bc_ver
& 0xff00) >> 8,
109 (bp
->common
.bc_ver
& 0xff),
110 ((phy_fw_ver
[0] != '\0') ? " phy " : ""), phy_fw_ver
);
112 bnx2x_vf_fill_fw_str(bp
, buf
, buf_len
);
117 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
120 * @delta: number of eth queues which were not allocated
122 static void bnx2x_shrink_eth_fp(struct bnx2x
*bp
, int delta
)
124 int i
, cos
, old_eth_num
= BNX2X_NUM_ETH_QUEUES(bp
);
126 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
127 * backward along the array could cause memory to be overriden
129 for (cos
= 1; cos
< bp
->max_cos
; cos
++) {
130 for (i
= 0; i
< old_eth_num
- delta
; i
++) {
131 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
132 int new_idx
= cos
* (old_eth_num
- delta
) + i
;
134 memcpy(&bp
->bnx2x_txq
[new_idx
], fp
->txdata_ptr
[cos
],
135 sizeof(struct bnx2x_fp_txdata
));
136 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[new_idx
];
141 int load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
143 /* free skb in the packet ring at pos idx
144 * return idx of last bd freed
146 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
147 u16 idx
, unsigned int *pkts_compl
,
148 unsigned int *bytes_compl
)
150 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
151 struct eth_tx_start_bd
*tx_start_bd
;
152 struct eth_tx_bd
*tx_data_bd
;
153 struct sk_buff
*skb
= tx_buf
->skb
;
154 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
157 /* prefetch skb end pointer to speedup dev_kfree_skb() */
160 DP(NETIF_MSG_TX_DONE
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
161 txdata
->txq_index
, idx
, tx_buf
, skb
);
164 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
165 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
166 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
169 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
170 #ifdef BNX2X_STOP_ON_ERROR
171 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
172 BNX2X_ERR("BAD nbd!\n");
176 new_cons
= nbd
+ tx_buf
->first_bd
;
178 /* Get the next bd */
179 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
181 /* Skip a parse bd... */
183 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
185 /* ...and the TSO split header bd since they have no mapping */
186 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
188 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
194 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
195 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
196 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
198 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
205 (*bytes_compl
) += skb
->len
;
208 dev_kfree_skb_any(skb
);
209 tx_buf
->first_bd
= 0;
215 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
217 struct netdev_queue
*txq
;
218 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
219 unsigned int pkts_compl
= 0, bytes_compl
= 0;
221 #ifdef BNX2X_STOP_ON_ERROR
222 if (unlikely(bp
->panic
))
226 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
227 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
228 sw_cons
= txdata
->tx_pkt_cons
;
230 while (sw_cons
!= hw_cons
) {
233 pkt_cons
= TX_BD(sw_cons
);
235 DP(NETIF_MSG_TX_DONE
,
236 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
237 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
239 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
,
240 &pkts_compl
, &bytes_compl
);
245 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
247 txdata
->tx_pkt_cons
= sw_cons
;
248 txdata
->tx_bd_cons
= bd_cons
;
250 /* Need to make the tx_bd_cons update visible to start_xmit()
251 * before checking for netif_tx_queue_stopped(). Without the
252 * memory barrier, there is a small possibility that
253 * start_xmit() will miss it and cause the queue to be stopped
255 * On the other hand we need an rmb() here to ensure the proper
256 * ordering of bit testing in the following
257 * netif_tx_queue_stopped(txq) call.
261 if (unlikely(netif_tx_queue_stopped(txq
))) {
262 /* Taking tx_lock() is needed to prevent reenabling the queue
263 * while it's empty. This could have happen if rx_action() gets
264 * suspended in bnx2x_tx_int() after the condition before
265 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
267 * stops the queue->sees fresh tx_bd_cons->releases the queue->
268 * sends some packets consuming the whole queue again->
272 __netif_tx_lock(txq
, smp_processor_id());
274 if ((netif_tx_queue_stopped(txq
)) &&
275 (bp
->state
== BNX2X_STATE_OPEN
) &&
276 (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
))
277 netif_tx_wake_queue(txq
);
279 __netif_tx_unlock(txq
);
284 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
287 u16 last_max
= fp
->last_max_sge
;
289 if (SUB_S16(idx
, last_max
) > 0)
290 fp
->last_max_sge
= idx
;
293 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
295 struct eth_end_agg_rx_cqe
*cqe
)
297 struct bnx2x
*bp
= fp
->bp
;
298 u16 last_max
, last_elem
, first_elem
;
305 /* First mark all used pages */
306 for (i
= 0; i
< sge_len
; i
++)
307 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
308 RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[i
])));
310 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
311 sge_len
- 1, le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
313 /* Here we assume that the last SGE index is the biggest */
314 prefetch((void *)(fp
->sge_mask
));
315 bnx2x_update_last_max_sge(fp
,
316 le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
318 last_max
= RX_SGE(fp
->last_max_sge
);
319 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
320 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
322 /* If ring is not full */
323 if (last_elem
+ 1 != first_elem
)
326 /* Now update the prod */
327 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
328 if (likely(fp
->sge_mask
[i
]))
331 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
332 delta
+= BIT_VEC64_ELEM_SZ
;
336 fp
->rx_sge_prod
+= delta
;
337 /* clear page-end entries */
338 bnx2x_clear_sge_mask_next_elems(fp
);
341 DP(NETIF_MSG_RX_STATUS
,
342 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
343 fp
->last_max_sge
, fp
->rx_sge_prod
);
346 /* Get Toeplitz hash value in the skb using the value from the
347 * CQE (calculated by HW).
349 static u32
bnx2x_get_rxhash(const struct bnx2x
*bp
,
350 const struct eth_fast_path_rx_cqe
*cqe
,
353 /* Get Toeplitz hash from CQE */
354 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
355 (cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
)) {
356 enum eth_rss_hash_type htype
;
358 htype
= cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE
;
359 *l4_rxhash
= (htype
== TCP_IPV4_HASH_TYPE
) ||
360 (htype
== TCP_IPV6_HASH_TYPE
);
361 return le32_to_cpu(cqe
->rss_hash_result
);
367 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
369 struct eth_fast_path_rx_cqe
*cqe
)
371 struct bnx2x
*bp
= fp
->bp
;
372 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
373 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
374 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
376 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
377 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
379 /* print error if current state != stop */
380 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
381 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
383 /* Try to map an empty data buffer from the aggregation info */
384 mapping
= dma_map_single(&bp
->pdev
->dev
,
385 first_buf
->data
+ NET_SKB_PAD
,
386 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
388 * ...if it fails - move the skb from the consumer to the producer
389 * and set the current aggregation state as ERROR to drop it
390 * when TPA_STOP arrives.
393 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
394 /* Move the BD from the consumer to the producer */
395 bnx2x_reuse_rx_data(fp
, cons
, prod
);
396 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
400 /* move empty data from pool to prod */
401 prod_rx_buf
->data
= first_buf
->data
;
402 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
403 /* point prod_bd to new data */
404 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
405 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
407 /* move partial skb from cons to pool (don't unmap yet) */
408 *first_buf
= *cons_rx_buf
;
410 /* mark bin state as START */
411 tpa_info
->parsing_flags
=
412 le16_to_cpu(cqe
->pars_flags
.flags
);
413 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
414 tpa_info
->tpa_state
= BNX2X_TPA_START
;
415 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
416 tpa_info
->placement_offset
= cqe
->placement_offset
;
417 tpa_info
->rxhash
= bnx2x_get_rxhash(bp
, cqe
, &tpa_info
->l4_rxhash
);
418 if (fp
->mode
== TPA_MODE_GRO
) {
419 u16 gro_size
= le16_to_cpu(cqe
->pkt_len_or_gro_seg_len
);
420 tpa_info
->full_page
= SGE_PAGES
/ gro_size
* gro_size
;
421 tpa_info
->gro_size
= gro_size
;
424 #ifdef BNX2X_STOP_ON_ERROR
425 fp
->tpa_queue_used
|= (1 << queue
);
426 #ifdef _ASM_GENERIC_INT_L64_H
427 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
429 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
435 /* Timestamp option length allowed for TPA aggregation:
437 * nop nop kind length echo val
439 #define TPA_TSTAMP_OPT_LEN 12
441 * bnx2x_set_gro_params - compute GRO values
444 * @parsing_flags: parsing flags from the START CQE
445 * @len_on_bd: total length of the first packet for the
447 * @pkt_len: length of all segments
449 * Approximate value of the MSS for this aggregation calculated using
450 * the first packet of it.
451 * Compute number of aggregated segments, and gso_type.
453 static void bnx2x_set_gro_params(struct sk_buff
*skb
, u16 parsing_flags
,
454 u16 len_on_bd
, unsigned int pkt_len
)
456 /* TPA aggregation won't have either IP options or TCP options
457 * other than timestamp or IPv6 extension headers.
459 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
461 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
462 PRS_FLAG_OVERETH_IPV6
) {
463 hdrs_len
+= sizeof(struct ipv6hdr
);
464 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
466 hdrs_len
+= sizeof(struct iphdr
);
467 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
470 /* Check if there was a TCP timestamp, if there is it's will
471 * always be 12 bytes length: nop nop kind length echo val.
473 * Otherwise FW would close the aggregation.
475 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
476 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
478 skb_shinfo(skb
)->gso_size
= len_on_bd
- hdrs_len
;
480 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
481 * to skb_shinfo(skb)->gso_segs
483 NAPI_GRO_CB(skb
)->count
= DIV_ROUND_UP(pkt_len
- hdrs_len
,
484 skb_shinfo(skb
)->gso_size
);
487 static int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
488 struct bnx2x_fastpath
*fp
, u16 index
)
490 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
491 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
492 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
495 if (unlikely(page
== NULL
)) {
496 BNX2X_ERR("Can't alloc sge\n");
500 mapping
= dma_map_page(&bp
->pdev
->dev
, page
, 0,
501 SGE_PAGES
, DMA_FROM_DEVICE
);
502 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
503 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
504 BNX2X_ERR("Can't map sge\n");
509 dma_unmap_addr_set(sw_buf
, mapping
, mapping
);
511 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
512 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
517 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
518 struct bnx2x_agg_info
*tpa_info
,
521 struct eth_end_agg_rx_cqe
*cqe
,
524 struct sw_rx_page
*rx_pg
, old_rx_pg
;
525 u32 i
, frag_len
, frag_size
;
526 int err
, j
, frag_id
= 0;
527 u16 len_on_bd
= tpa_info
->len_on_bd
;
528 u16 full_page
= 0, gro_size
= 0;
530 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
532 if (fp
->mode
== TPA_MODE_GRO
) {
533 gro_size
= tpa_info
->gro_size
;
534 full_page
= tpa_info
->full_page
;
537 /* This is needed in order to enable forwarding support */
539 bnx2x_set_gro_params(skb
, tpa_info
->parsing_flags
, len_on_bd
,
540 le16_to_cpu(cqe
->pkt_len
));
542 #ifdef BNX2X_STOP_ON_ERROR
543 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
) * SGE_PAGES
) {
544 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
546 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
552 /* Run through the SGL and compose the fragmented skb */
553 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
554 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
556 /* FW gives the indices of the SGE as if the ring is an array
557 (meaning that "next" element will consume 2 indices) */
558 if (fp
->mode
== TPA_MODE_GRO
)
559 frag_len
= min_t(u32
, frag_size
, (u32
)full_page
);
561 frag_len
= min_t(u32
, frag_size
, (u32
)SGE_PAGES
);
563 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
566 /* If we fail to allocate a substitute page, we simply stop
567 where we are and drop the whole packet */
568 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
570 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
574 /* Unmap the page as we r going to pass it to the stack */
575 dma_unmap_page(&bp
->pdev
->dev
,
576 dma_unmap_addr(&old_rx_pg
, mapping
),
577 SGE_PAGES
, DMA_FROM_DEVICE
);
578 /* Add one frag and update the appropriate fields in the skb */
579 if (fp
->mode
== TPA_MODE_LRO
)
580 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
584 for (rem
= frag_len
; rem
> 0; rem
-= gro_size
) {
585 int len
= rem
> gro_size
? gro_size
: rem
;
586 skb_fill_page_desc(skb
, frag_id
++,
587 old_rx_pg
.page
, offset
, len
);
589 get_page(old_rx_pg
.page
);
594 skb
->data_len
+= frag_len
;
595 skb
->truesize
+= SGE_PAGES
;
596 skb
->len
+= frag_len
;
598 frag_size
-= frag_len
;
604 static void bnx2x_frag_free(const struct bnx2x_fastpath
*fp
, void *data
)
606 if (fp
->rx_frag_size
)
607 put_page(virt_to_head_page(data
));
612 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath
*fp
)
614 if (fp
->rx_frag_size
)
615 return netdev_alloc_frag(fp
->rx_frag_size
);
617 return kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
, GFP_ATOMIC
);
621 static void bnx2x_gro_ip_csum(struct bnx2x
*bp
, struct sk_buff
*skb
)
623 const struct iphdr
*iph
= ip_hdr(skb
);
626 skb_set_transport_header(skb
, sizeof(struct iphdr
));
629 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
630 iph
->saddr
, iph
->daddr
, 0);
633 static void bnx2x_gro_ipv6_csum(struct bnx2x
*bp
, struct sk_buff
*skb
)
635 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
638 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
641 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
642 &iph
->saddr
, &iph
->daddr
, 0);
646 static void bnx2x_gro_receive(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
650 if (skb_shinfo(skb
)->gso_size
) {
651 skb_set_network_header(skb
, 0);
652 switch (be16_to_cpu(skb
->protocol
)) {
654 bnx2x_gro_ip_csum(bp
, skb
);
657 bnx2x_gro_ipv6_csum(bp
, skb
);
660 BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
661 be16_to_cpu(skb
->protocol
));
663 tcp_gro_complete(skb
);
666 napi_gro_receive(&fp
->napi
, skb
);
669 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
670 struct bnx2x_agg_info
*tpa_info
,
672 struct eth_end_agg_rx_cqe
*cqe
,
675 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
676 u8 pad
= tpa_info
->placement_offset
;
677 u16 len
= tpa_info
->len_on_bd
;
678 struct sk_buff
*skb
= NULL
;
679 u8
*new_data
, *data
= rx_buf
->data
;
680 u8 old_tpa_state
= tpa_info
->tpa_state
;
682 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
684 /* If we there was an error during the handling of the TPA_START -
685 * drop this aggregation.
687 if (old_tpa_state
== BNX2X_TPA_ERROR
)
690 /* Try to allocate the new data */
691 new_data
= bnx2x_frag_alloc(fp
);
692 /* Unmap skb in the pool anyway, as we are going to change
693 pool entry status to BNX2X_TPA_STOP even if new skb allocation
695 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
696 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
697 if (likely(new_data
))
698 skb
= build_skb(data
, fp
->rx_frag_size
);
701 #ifdef BNX2X_STOP_ON_ERROR
702 if (pad
+ len
> fp
->rx_buf_size
) {
703 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
704 pad
, len
, fp
->rx_buf_size
);
710 skb_reserve(skb
, pad
+ NET_SKB_PAD
);
712 skb
->rxhash
= tpa_info
->rxhash
;
713 skb
->l4_rxhash
= tpa_info
->l4_rxhash
;
715 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
716 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
718 if (!bnx2x_fill_frag_skb(bp
, fp
, tpa_info
, pages
,
719 skb
, cqe
, cqe_idx
)) {
720 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
721 __vlan_hwaccel_put_tag(skb
, tpa_info
->vlan_tag
);
722 bnx2x_gro_receive(bp
, fp
, skb
);
724 DP(NETIF_MSG_RX_STATUS
,
725 "Failed to allocate new pages - dropping packet!\n");
726 dev_kfree_skb_any(skb
);
730 /* put new data in bin */
731 rx_buf
->data
= new_data
;
735 bnx2x_frag_free(fp
, new_data
);
737 /* drop the packet and keep the buffer in the bin */
738 DP(NETIF_MSG_RX_STATUS
,
739 "Failed to allocate or map a new skb - dropping packet!\n");
740 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
++;
743 static int bnx2x_alloc_rx_data(struct bnx2x
*bp
,
744 struct bnx2x_fastpath
*fp
, u16 index
)
747 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
748 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
751 data
= bnx2x_frag_alloc(fp
);
752 if (unlikely(data
== NULL
))
755 mapping
= dma_map_single(&bp
->pdev
->dev
, data
+ NET_SKB_PAD
,
758 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
759 bnx2x_frag_free(fp
, data
);
760 BNX2X_ERR("Can't map rx data\n");
765 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
767 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
768 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
774 void bnx2x_csum_validate(struct sk_buff
*skb
, union eth_rx_cqe
*cqe
,
775 struct bnx2x_fastpath
*fp
,
776 struct bnx2x_eth_q_stats
*qstats
)
778 /* Do nothing if no L4 csum validation was done.
779 * We do not check whether IP csum was validated. For IPv4 we assume
780 * that if the card got as far as validating the L4 csum, it also
781 * validated the IP csum. IPv6 has no IP csum.
783 if (cqe
->fast_path_cqe
.status_flags
&
784 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG
)
787 /* If L4 validation was done, check if an error was found. */
789 if (cqe
->fast_path_cqe
.type_error_flags
&
790 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG
|
791 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG
))
792 qstats
->hw_csum_err
++;
794 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
797 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
799 struct bnx2x
*bp
= fp
->bp
;
800 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
801 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
804 #ifdef BNX2X_STOP_ON_ERROR
805 if (unlikely(bp
->panic
))
809 /* CQ "next element" is of the size of the regular element,
810 that's why it's ok here */
811 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
812 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
815 bd_cons
= fp
->rx_bd_cons
;
816 bd_prod
= fp
->rx_bd_prod
;
817 bd_prod_fw
= bd_prod
;
818 sw_comp_cons
= fp
->rx_comp_cons
;
819 sw_comp_prod
= fp
->rx_comp_prod
;
821 /* Memory barrier necessary as speculative reads of the rx
822 * buffer can be ahead of the index in the status block
826 DP(NETIF_MSG_RX_STATUS
,
827 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
828 fp
->index
, hw_comp_cons
, sw_comp_cons
);
830 while (sw_comp_cons
!= hw_comp_cons
) {
831 struct sw_rx_bd
*rx_buf
= NULL
;
833 union eth_rx_cqe
*cqe
;
834 struct eth_fast_path_rx_cqe
*cqe_fp
;
836 enum eth_rx_cqe_type cqe_fp_type
;
841 #ifdef BNX2X_STOP_ON_ERROR
842 if (unlikely(bp
->panic
))
846 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
847 bd_prod
= RX_BD(bd_prod
);
848 bd_cons
= RX_BD(bd_cons
);
850 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
851 cqe_fp
= &cqe
->fast_path_cqe
;
852 cqe_fp_flags
= cqe_fp
->type_error_flags
;
853 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
855 DP(NETIF_MSG_RX_STATUS
,
856 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
857 CQE_TYPE(cqe_fp_flags
),
858 cqe_fp_flags
, cqe_fp
->status_flags
,
859 le32_to_cpu(cqe_fp
->rss_hash_result
),
860 le16_to_cpu(cqe_fp
->vlan_tag
),
861 le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
));
863 /* is this a slowpath msg? */
864 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
865 bnx2x_sp_event(fp
, cqe
);
869 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
872 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
873 struct bnx2x_agg_info
*tpa_info
;
874 u16 frag_size
, pages
;
875 #ifdef BNX2X_STOP_ON_ERROR
877 if (fp
->disable_tpa
&&
878 (CQE_TYPE_START(cqe_fp_type
) ||
879 CQE_TYPE_STOP(cqe_fp_type
)))
880 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
881 CQE_TYPE(cqe_fp_type
));
884 if (CQE_TYPE_START(cqe_fp_type
)) {
885 u16 queue
= cqe_fp
->queue_index
;
886 DP(NETIF_MSG_RX_STATUS
,
887 "calling tpa_start on queue %d\n",
890 bnx2x_tpa_start(fp
, queue
,
897 queue
= cqe
->end_agg_cqe
.queue_index
;
898 tpa_info
= &fp
->tpa_info
[queue
];
899 DP(NETIF_MSG_RX_STATUS
,
900 "calling tpa_stop on queue %d\n",
903 frag_size
= le16_to_cpu(cqe
->end_agg_cqe
.pkt_len
) -
906 if (fp
->mode
== TPA_MODE_GRO
)
907 pages
= (frag_size
+ tpa_info
->full_page
- 1) /
910 pages
= SGE_PAGE_ALIGN(frag_size
) >>
913 bnx2x_tpa_stop(bp
, fp
, tpa_info
, pages
,
914 &cqe
->end_agg_cqe
, comp_ring_cons
);
915 #ifdef BNX2X_STOP_ON_ERROR
920 bnx2x_update_sge_prod(fp
, pages
, &cqe
->end_agg_cqe
);
924 len
= le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
);
925 pad
= cqe_fp
->placement_offset
;
926 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
927 dma_unmap_addr(rx_buf
, mapping
),
928 pad
+ RX_COPY_THRESH
,
931 prefetch(data
+ pad
); /* speedup eth_type_trans() */
932 /* is this an error packet? */
933 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
934 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
935 "ERROR flags %x rx packet %u\n",
936 cqe_fp_flags
, sw_comp_cons
);
937 bnx2x_fp_qstats(bp
, fp
)->rx_err_discard_pkt
++;
941 /* Since we don't have a jumbo ring
942 * copy small packets if mtu > 1500
944 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
945 (len
<= RX_COPY_THRESH
)) {
946 skb
= netdev_alloc_skb_ip_align(bp
->dev
, len
);
948 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
949 "ERROR packet dropped because of alloc failure\n");
950 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
953 memcpy(skb
->data
, data
+ pad
, len
);
954 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
956 if (likely(bnx2x_alloc_rx_data(bp
, fp
, bd_prod
) == 0)) {
957 dma_unmap_single(&bp
->pdev
->dev
,
958 dma_unmap_addr(rx_buf
, mapping
),
961 skb
= build_skb(data
, fp
->rx_frag_size
);
962 if (unlikely(!skb
)) {
963 bnx2x_frag_free(fp
, data
);
964 bnx2x_fp_qstats(bp
, fp
)->
965 rx_skb_alloc_failed
++;
968 skb_reserve(skb
, pad
);
970 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
971 "ERROR packet dropped because of alloc failure\n");
972 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
974 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
980 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
982 /* Set Toeplitz hash for a none-LRO skb */
983 skb
->rxhash
= bnx2x_get_rxhash(bp
, cqe_fp
, &l4_rxhash
);
984 skb
->l4_rxhash
= l4_rxhash
;
986 skb_checksum_none_assert(skb
);
988 if (bp
->dev
->features
& NETIF_F_RXCSUM
)
989 bnx2x_csum_validate(skb
, cqe
, fp
,
990 bnx2x_fp_qstats(bp
, fp
));
992 skb_record_rx_queue(skb
, fp
->rx_queue
);
994 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
996 __vlan_hwaccel_put_tag(skb
,
997 le16_to_cpu(cqe_fp
->vlan_tag
));
998 napi_gro_receive(&fp
->napi
, skb
);
1002 rx_buf
->data
= NULL
;
1004 bd_cons
= NEXT_RX_IDX(bd_cons
);
1005 bd_prod
= NEXT_RX_IDX(bd_prod
);
1006 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1009 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1010 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1012 if (rx_pkt
== budget
)
1016 fp
->rx_bd_cons
= bd_cons
;
1017 fp
->rx_bd_prod
= bd_prod_fw
;
1018 fp
->rx_comp_cons
= sw_comp_cons
;
1019 fp
->rx_comp_prod
= sw_comp_prod
;
1021 /* Update producers */
1022 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1025 fp
->rx_pkt
+= rx_pkt
;
1031 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1033 struct bnx2x_fastpath
*fp
= fp_cookie
;
1034 struct bnx2x
*bp
= fp
->bp
;
1038 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1039 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
1040 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1042 #ifdef BNX2X_STOP_ON_ERROR
1043 if (unlikely(bp
->panic
))
1047 /* Handle Rx and Tx according to MSI-X vector */
1048 prefetch(fp
->rx_cons_sb
);
1050 for_each_cos_in_tx_queue(fp
, cos
)
1051 prefetch(fp
->txdata_ptr
[cos
]->tx_cons_sb
);
1053 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
1054 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
1059 /* HW Lock for shared dual port PHYs */
1060 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1062 mutex_lock(&bp
->port
.phy_mutex
);
1064 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1067 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1069 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1071 mutex_unlock(&bp
->port
.phy_mutex
);
1074 /* calculates MF speed according to current linespeed and MF configuration */
1075 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
1077 u16 line_speed
= bp
->link_vars
.line_speed
;
1079 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
1080 bp
->mf_config
[BP_VN(bp
)]);
1082 /* Calculate the current MAX line speed limit for the MF
1086 line_speed
= (line_speed
* maxCfg
) / 100;
1087 else { /* SD mode */
1088 u16 vn_max_rate
= maxCfg
* 100;
1090 if (vn_max_rate
< line_speed
)
1091 line_speed
= vn_max_rate
;
1099 * bnx2x_fill_report_data - fill link report data to report
1101 * @bp: driver handle
1102 * @data: link state to update
1104 * It uses a none-atomic bit operations because is called under the mutex.
1106 static void bnx2x_fill_report_data(struct bnx2x
*bp
,
1107 struct bnx2x_link_report_data
*data
)
1109 u16 line_speed
= bnx2x_get_mf_speed(bp
);
1111 memset(data
, 0, sizeof(*data
));
1113 /* Fill the report data: efective line speed */
1114 data
->line_speed
= line_speed
;
1117 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
1118 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1119 &data
->link_report_flags
);
1122 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1123 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
1125 /* Rx Flow Control is ON */
1126 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
1127 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
1129 /* Tx Flow Control is ON */
1130 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
1131 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
1135 * bnx2x_link_report - report link status to OS.
1137 * @bp: driver handle
1139 * Calls the __bnx2x_link_report() under the same locking scheme
1140 * as a link/PHY state managing code to ensure a consistent link
1144 void bnx2x_link_report(struct bnx2x
*bp
)
1146 bnx2x_acquire_phy_lock(bp
);
1147 __bnx2x_link_report(bp
);
1148 bnx2x_release_phy_lock(bp
);
1152 * __bnx2x_link_report - report link status to OS.
1154 * @bp: driver handle
1156 * None atomic inmlementation.
1157 * Should be called under the phy_lock.
1159 void __bnx2x_link_report(struct bnx2x
*bp
)
1161 struct bnx2x_link_report_data cur_data
;
1164 if (IS_PF(bp
) && !CHIP_IS_E1(bp
))
1165 bnx2x_read_mf_cfg(bp
);
1167 /* Read the current link report info */
1168 bnx2x_fill_report_data(bp
, &cur_data
);
1170 /* Don't report link down or exactly the same link status twice */
1171 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
1172 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1173 &bp
->last_reported_link
.link_report_flags
) &&
1174 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1175 &cur_data
.link_report_flags
)))
1180 /* We are going to report a new link parameters now -
1181 * remember the current data for the next time.
1183 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
1185 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1186 &cur_data
.link_report_flags
)) {
1187 netif_carrier_off(bp
->dev
);
1188 netdev_err(bp
->dev
, "NIC Link is Down\n");
1194 netif_carrier_on(bp
->dev
);
1196 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
1197 &cur_data
.link_report_flags
))
1202 /* Handle the FC at the end so that only these flags would be
1203 * possibly set. This way we may easily check if there is no FC
1206 if (cur_data
.link_report_flags
) {
1207 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
1208 &cur_data
.link_report_flags
)) {
1209 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
1210 &cur_data
.link_report_flags
))
1211 flow
= "ON - receive & transmit";
1213 flow
= "ON - receive";
1215 flow
= "ON - transmit";
1220 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1221 cur_data
.line_speed
, duplex
, flow
);
1225 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath
*fp
)
1229 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1230 struct eth_rx_sge
*sge
;
1232 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
1234 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
1235 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1238 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
1239 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1243 static void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
1244 struct bnx2x_fastpath
*fp
, int last
)
1248 for (i
= 0; i
< last
; i
++) {
1249 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[i
];
1250 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
1251 u8
*data
= first_buf
->data
;
1254 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
1257 if (tpa_info
->tpa_state
== BNX2X_TPA_START
)
1258 dma_unmap_single(&bp
->pdev
->dev
,
1259 dma_unmap_addr(first_buf
, mapping
),
1260 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1261 bnx2x_frag_free(fp
, data
);
1262 first_buf
->data
= NULL
;
1266 void bnx2x_init_rx_rings_cnic(struct bnx2x
*bp
)
1270 for_each_rx_queue_cnic(bp
, j
) {
1271 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1275 /* Activate BD ring */
1277 * this will generate an interrupt (to the TSTORM)
1278 * must only be done after chip is initialized
1280 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1285 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
1287 int func
= BP_FUNC(bp
);
1291 /* Allocate TPA resources */
1292 for_each_eth_queue(bp
, j
) {
1293 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1296 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
1298 if (!fp
->disable_tpa
) {
1299 /* Fill the per-aggregtion pool */
1300 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
1301 struct bnx2x_agg_info
*tpa_info
=
1303 struct sw_rx_bd
*first_buf
=
1304 &tpa_info
->first_buf
;
1306 first_buf
->data
= bnx2x_frag_alloc(fp
);
1307 if (!first_buf
->data
) {
1308 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1310 bnx2x_free_tpa_pool(bp
, fp
, i
);
1311 fp
->disable_tpa
= 1;
1314 dma_unmap_addr_set(first_buf
, mapping
, 0);
1315 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1318 /* "next page" elements initialization */
1319 bnx2x_set_next_page_sgl(fp
);
1321 /* set SGEs bit mask */
1322 bnx2x_init_sge_ring_bit_mask(fp
);
1324 /* Allocate SGEs and initialize the ring elements */
1325 for (i
= 0, ring_prod
= 0;
1326 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1328 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
1329 BNX2X_ERR("was only able to allocate %d rx sges\n",
1331 BNX2X_ERR("disabling TPA for queue[%d]\n",
1333 /* Cleanup already allocated elements */
1334 bnx2x_free_rx_sge_range(bp
, fp
,
1336 bnx2x_free_tpa_pool(bp
, fp
,
1338 fp
->disable_tpa
= 1;
1342 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1345 fp
->rx_sge_prod
= ring_prod
;
1349 for_each_eth_queue(bp
, j
) {
1350 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1354 /* Activate BD ring */
1356 * this will generate an interrupt (to the TSTORM)
1357 * must only be done after chip is initialized
1359 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1365 if (CHIP_IS_E1(bp
)) {
1366 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1367 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1368 U64_LO(fp
->rx_comp_mapping
));
1369 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1370 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1371 U64_HI(fp
->rx_comp_mapping
));
1376 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath
*fp
)
1379 struct bnx2x
*bp
= fp
->bp
;
1381 for_each_cos_in_tx_queue(fp
, cos
) {
1382 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
1383 unsigned pkts_compl
= 0, bytes_compl
= 0;
1385 u16 sw_prod
= txdata
->tx_pkt_prod
;
1386 u16 sw_cons
= txdata
->tx_pkt_cons
;
1388 while (sw_cons
!= sw_prod
) {
1389 bnx2x_free_tx_pkt(bp
, txdata
, TX_BD(sw_cons
),
1390 &pkts_compl
, &bytes_compl
);
1394 netdev_tx_reset_queue(
1395 netdev_get_tx_queue(bp
->dev
,
1396 txdata
->txq_index
));
1400 static void bnx2x_free_tx_skbs_cnic(struct bnx2x
*bp
)
1404 for_each_tx_queue_cnic(bp
, i
) {
1405 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1409 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1413 for_each_eth_queue(bp
, i
) {
1414 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1418 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1420 struct bnx2x
*bp
= fp
->bp
;
1423 /* ring wasn't allocated */
1424 if (fp
->rx_buf_ring
== NULL
)
1427 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1428 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1429 u8
*data
= rx_buf
->data
;
1433 dma_unmap_single(&bp
->pdev
->dev
,
1434 dma_unmap_addr(rx_buf
, mapping
),
1435 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1437 rx_buf
->data
= NULL
;
1438 bnx2x_frag_free(fp
, data
);
1442 static void bnx2x_free_rx_skbs_cnic(struct bnx2x
*bp
)
1446 for_each_rx_queue_cnic(bp
, j
) {
1447 bnx2x_free_rx_bds(&bp
->fp
[j
]);
1451 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1455 for_each_eth_queue(bp
, j
) {
1456 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1458 bnx2x_free_rx_bds(fp
);
1460 if (!fp
->disable_tpa
)
1461 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1465 void bnx2x_free_skbs_cnic(struct bnx2x
*bp
)
1467 bnx2x_free_tx_skbs_cnic(bp
);
1468 bnx2x_free_rx_skbs_cnic(bp
);
1471 void bnx2x_free_skbs(struct bnx2x
*bp
)
1473 bnx2x_free_tx_skbs(bp
);
1474 bnx2x_free_rx_skbs(bp
);
1477 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1479 /* load old values */
1480 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1482 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1483 /* leave all but MAX value */
1484 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1486 /* set new MAX value */
1487 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1488 & FUNC_MF_CFG_MAX_BW_MASK
;
1490 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1495 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1497 * @bp: driver handle
1498 * @nvecs: number of vectors to be released
1500 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1504 if (nvecs
== offset
)
1507 /* VFs don't have a default SB */
1509 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1510 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1511 bp
->msix_table
[offset
].vector
);
1515 if (CNIC_SUPPORT(bp
)) {
1516 if (nvecs
== offset
)
1521 for_each_eth_queue(bp
, i
) {
1522 if (nvecs
== offset
)
1524 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq\n",
1525 i
, bp
->msix_table
[offset
].vector
);
1527 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1531 void bnx2x_free_irq(struct bnx2x
*bp
)
1533 if (bp
->flags
& USING_MSIX_FLAG
&&
1534 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1535 int nvecs
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_SUPPORT(bp
);
1537 /* vfs don't have a default status block */
1541 bnx2x_free_msix_irqs(bp
, nvecs
);
1543 free_irq(bp
->dev
->irq
, bp
->dev
);
1547 int bnx2x_enable_msix(struct bnx2x
*bp
)
1549 int msix_vec
= 0, i
, rc
;
1551 /* VFs don't have a default status block */
1553 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1554 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1555 bp
->msix_table
[0].entry
);
1559 /* Cnic requires an msix vector for itself */
1560 if (CNIC_SUPPORT(bp
)) {
1561 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1562 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1563 msix_vec
, bp
->msix_table
[msix_vec
].entry
);
1567 /* We need separate vectors for ETH queues only (not FCoE) */
1568 for_each_eth_queue(bp
, i
) {
1569 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1570 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1571 msix_vec
, msix_vec
, i
);
1575 DP(BNX2X_MSG_SP
, "about to request enable msix with %d vectors\n",
1578 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], msix_vec
);
1581 * reconfigure number of tx/rx queues according to available
1584 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT(bp
)) {
1585 /* how less vectors we will have? */
1586 int diff
= msix_vec
- rc
;
1588 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc
);
1590 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1593 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc
);
1597 * decrease number of queues by number of unallocated entries
1599 bp
->num_ethernet_queues
-= diff
;
1600 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1602 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1604 } else if (rc
> 0) {
1605 /* Get by with single vector */
1606 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], 1);
1608 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1613 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1614 bp
->flags
|= USING_SINGLE_MSIX_FLAG
;
1616 BNX2X_DEV_INFO("set number of queues to 1\n");
1617 bp
->num_ethernet_queues
= 1;
1618 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1619 } else if (rc
< 0) {
1620 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc
);
1624 bp
->flags
|= USING_MSIX_FLAG
;
1629 /* fall to INTx if not enough memory */
1631 bp
->flags
|= DISABLE_MSI_FLAG
;
1636 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1638 int i
, rc
, offset
= 0;
1640 /* no default status block for vf */
1642 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1643 bnx2x_msix_sp_int
, 0,
1644 bp
->dev
->name
, bp
->dev
);
1646 BNX2X_ERR("request sp irq failed\n");
1651 if (CNIC_SUPPORT(bp
))
1654 for_each_eth_queue(bp
, i
) {
1655 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1656 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1659 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1660 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1662 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1663 bp
->msix_table
[offset
].vector
, rc
);
1664 bnx2x_free_msix_irqs(bp
, offset
);
1671 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1673 offset
= 1 + CNIC_SUPPORT(bp
);
1674 netdev_info(bp
->dev
,
1675 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1676 bp
->msix_table
[0].vector
,
1677 0, bp
->msix_table
[offset
].vector
,
1678 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1680 offset
= CNIC_SUPPORT(bp
);
1681 netdev_info(bp
->dev
,
1682 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1683 0, bp
->msix_table
[offset
].vector
,
1684 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1689 int bnx2x_enable_msi(struct bnx2x
*bp
)
1693 rc
= pci_enable_msi(bp
->pdev
);
1695 BNX2X_DEV_INFO("MSI is not attainable\n");
1698 bp
->flags
|= USING_MSI_FLAG
;
1703 static int bnx2x_req_irq(struct bnx2x
*bp
)
1705 unsigned long flags
;
1708 if (bp
->flags
& (USING_MSI_FLAG
| USING_MSIX_FLAG
))
1711 flags
= IRQF_SHARED
;
1713 if (bp
->flags
& USING_MSIX_FLAG
)
1714 irq
= bp
->msix_table
[0].vector
;
1716 irq
= bp
->pdev
->irq
;
1718 return request_irq(irq
, bnx2x_interrupt
, flags
, bp
->dev
->name
, bp
->dev
);
1721 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
1724 if (bp
->flags
& USING_MSIX_FLAG
&&
1725 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1726 rc
= bnx2x_req_msix_irqs(bp
);
1730 rc
= bnx2x_req_irq(bp
);
1732 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1735 if (bp
->flags
& USING_MSI_FLAG
) {
1736 bp
->dev
->irq
= bp
->pdev
->irq
;
1737 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1740 if (bp
->flags
& USING_MSIX_FLAG
) {
1741 bp
->dev
->irq
= bp
->msix_table
[0].vector
;
1742 netdev_info(bp
->dev
, "using MSIX IRQ %d\n",
1750 static void bnx2x_napi_enable_cnic(struct bnx2x
*bp
)
1754 for_each_rx_queue_cnic(bp
, i
)
1755 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1758 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1762 for_each_eth_queue(bp
, i
)
1763 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1766 static void bnx2x_napi_disable_cnic(struct bnx2x
*bp
)
1770 for_each_rx_queue_cnic(bp
, i
)
1771 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1774 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1778 for_each_eth_queue(bp
, i
)
1779 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1782 void bnx2x_netif_start(struct bnx2x
*bp
)
1784 if (netif_running(bp
->dev
)) {
1785 bnx2x_napi_enable(bp
);
1786 if (CNIC_LOADED(bp
))
1787 bnx2x_napi_enable_cnic(bp
);
1788 bnx2x_int_enable(bp
);
1789 if (bp
->state
== BNX2X_STATE_OPEN
)
1790 netif_tx_wake_all_queues(bp
->dev
);
1794 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1796 bnx2x_int_disable_sync(bp
, disable_hw
);
1797 bnx2x_napi_disable(bp
);
1798 if (CNIC_LOADED(bp
))
1799 bnx2x_napi_disable_cnic(bp
);
1802 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1804 struct bnx2x
*bp
= netdev_priv(dev
);
1806 if (CNIC_LOADED(bp
) && !NO_FCOE(bp
)) {
1807 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1808 u16 ether_type
= ntohs(hdr
->h_proto
);
1810 /* Skip VLAN tag if present */
1811 if (ether_type
== ETH_P_8021Q
) {
1812 struct vlan_ethhdr
*vhdr
=
1813 (struct vlan_ethhdr
*)skb
->data
;
1815 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1818 /* If ethertype is FCoE or FIP - use FCoE ring */
1819 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1820 return bnx2x_fcoe_tx(bp
, txq_index
);
1823 /* select a non-FCoE queue */
1824 return __skb_tx_hash(dev
, skb
, BNX2X_NUM_ETH_QUEUES(bp
));
1827 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1830 bp
->num_ethernet_queues
= bnx2x_calc_num_queues(bp
);
1832 /* override in STORAGE SD modes */
1833 if (IS_MF_STORAGE_SD(bp
) || IS_MF_FCOE_AFEX(bp
))
1834 bp
->num_ethernet_queues
= 1;
1836 /* Add special queues */
1837 bp
->num_cnic_queues
= CNIC_SUPPORT(bp
); /* For FCOE */
1838 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1840 BNX2X_DEV_INFO("set number of queues to %d\n", bp
->num_queues
);
1844 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1846 * @bp: Driver handle
1848 * We currently support for at most 16 Tx queues for each CoS thus we will
1849 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1852 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1853 * index after all ETH L2 indices.
1855 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1856 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1857 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1859 * The proper configuration of skb->queue_mapping is handled by
1860 * bnx2x_select_queue() and __skb_tx_hash().
1862 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1863 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1865 static int bnx2x_set_real_num_queues(struct bnx2x
*bp
, int include_cnic
)
1869 tx
= BNX2X_NUM_ETH_QUEUES(bp
) * bp
->max_cos
;
1870 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1872 /* account for fcoe queue */
1873 if (include_cnic
&& !NO_FCOE(bp
)) {
1878 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1880 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1883 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
1885 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
1889 DP(NETIF_MSG_IFUP
, "Setting real num queues to (tx, rx) (%d, %d)\n",
1895 static void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1899 for_each_queue(bp
, i
) {
1900 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1903 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1906 * Although there are no IP frames expected to arrive to
1907 * this ring we still want to add an
1908 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1911 mtu
= BNX2X_FCOE_MINI_JUMBO_MTU
;
1914 fp
->rx_buf_size
= BNX2X_FW_RX_ALIGN_START
+
1915 IP_HEADER_ALIGNMENT_PADDING
+
1918 BNX2X_FW_RX_ALIGN_END
;
1919 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1920 if (fp
->rx_buf_size
+ NET_SKB_PAD
<= PAGE_SIZE
)
1921 fp
->rx_frag_size
= fp
->rx_buf_size
+ NET_SKB_PAD
;
1923 fp
->rx_frag_size
= 0;
1927 static int bnx2x_init_rss_pf(struct bnx2x
*bp
)
1930 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
1932 /* Prepare the initial contents fo the indirection table if RSS is
1935 for (i
= 0; i
< sizeof(bp
->rss_conf_obj
.ind_table
); i
++)
1936 bp
->rss_conf_obj
.ind_table
[i
] =
1938 ethtool_rxfh_indir_default(i
, num_eth_queues
);
1941 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1942 * per-port, so if explicit configuration is needed , do it only
1945 * For 57712 and newer on the other hand it's a per-function
1948 return bnx2x_config_rss_eth(bp
, bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
1951 int bnx2x_config_rss_pf(struct bnx2x
*bp
, struct bnx2x_rss_config_obj
*rss_obj
,
1954 struct bnx2x_config_rss_params params
= {NULL
};
1956 /* Although RSS is meaningless when there is a single HW queue we
1957 * still need it enabled in order to have HW Rx hash generated.
1959 * if (!is_eth_multi(bp))
1960 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1963 params
.rss_obj
= rss_obj
;
1965 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
1967 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
1969 /* RSS configuration */
1970 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
1971 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
1972 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
1973 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
1974 if (rss_obj
->udp_rss_v4
)
1975 __set_bit(BNX2X_RSS_IPV4_UDP
, ¶ms
.rss_flags
);
1976 if (rss_obj
->udp_rss_v6
)
1977 __set_bit(BNX2X_RSS_IPV6_UDP
, ¶ms
.rss_flags
);
1980 params
.rss_result_mask
= MULTI_MASK
;
1982 memcpy(params
.ind_table
, rss_obj
->ind_table
, sizeof(params
.ind_table
));
1986 prandom_bytes(params
.rss_key
, sizeof(params
.rss_key
));
1987 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
1990 return bnx2x_config_rss(bp
, ¶ms
);
1993 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
1995 struct bnx2x_func_state_params func_params
= {NULL
};
1997 /* Prepare parameters for function state transitions */
1998 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
2000 func_params
.f_obj
= &bp
->func_obj
;
2001 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
2003 func_params
.params
.hw_init
.load_phase
= load_code
;
2005 return bnx2x_func_state_change(bp
, &func_params
);
2009 * Cleans the object that have internal lists without sending
2010 * ramrods. Should be run when interrutps are disabled.
2012 static void bnx2x_squeeze_objects(struct bnx2x
*bp
)
2015 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
2016 struct bnx2x_mcast_ramrod_params rparam
= {NULL
};
2017 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->sp_objs
->mac_obj
;
2019 /***************** Cleanup MACs' object first *************************/
2021 /* Wait for completion of requested */
2022 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
2023 /* Perform a dry cleanup */
2024 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
2026 /* Clean ETH primary MAC */
2027 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
2028 rc
= mac_obj
->delete_all(bp
, &bp
->sp_objs
->mac_obj
, &vlan_mac_flags
,
2031 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
2033 /* Cleanup UC list */
2035 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
2036 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
2039 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
2041 /***************** Now clean mcast object *****************************/
2042 rparam
.mcast_obj
= &bp
->mcast_obj
;
2043 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
2045 /* Add a DEL command... */
2046 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
2048 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2051 /* ...and wait until all pending commands are cleared */
2052 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2055 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2060 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2064 #ifndef BNX2X_STOP_ON_ERROR
2065 #define LOAD_ERROR_EXIT(bp, label) \
2067 (bp)->state = BNX2X_STATE_ERROR; \
2071 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2073 bp->cnic_loaded = false; \
2076 #else /*BNX2X_STOP_ON_ERROR*/
2077 #define LOAD_ERROR_EXIT(bp, label) \
2079 (bp)->state = BNX2X_STATE_ERROR; \
2083 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2085 bp->cnic_loaded = false; \
2089 #endif /*BNX2X_STOP_ON_ERROR*/
2091 static void bnx2x_free_fw_stats_mem(struct bnx2x
*bp
)
2093 BNX2X_PCI_FREE(bp
->fw_stats
, bp
->fw_stats_mapping
,
2094 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2098 static int bnx2x_alloc_fw_stats_mem(struct bnx2x
*bp
)
2100 int num_groups
, vf_headroom
= 0;
2101 int is_fcoe_stats
= NO_FCOE(bp
) ? 0 : 1;
2103 /* number of queues for statistics is number of eth queues + FCoE */
2104 u8 num_queue_stats
= BNX2X_NUM_ETH_QUEUES(bp
) + is_fcoe_stats
;
2106 /* Total number of FW statistics requests =
2107 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2108 * and fcoe l2 queue) stats + num of queues (which includes another 1
2109 * for fcoe l2 queue if applicable)
2111 bp
->fw_stats_num
= 2 + is_fcoe_stats
+ num_queue_stats
;
2113 /* vf stats appear in the request list, but their data is allocated by
2114 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2115 * it is used to determine where to place the vf stats queries in the
2119 vf_headroom
= bnx2x_vf_headroom(bp
);
2121 /* Request is built from stats_query_header and an array of
2122 * stats_query_cmd_group each of which contains
2123 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2124 * configured in the stats_query_header.
2127 (((bp
->fw_stats_num
+ vf_headroom
) / STATS_QUERY_CMD_COUNT
) +
2128 (((bp
->fw_stats_num
+ vf_headroom
) % STATS_QUERY_CMD_COUNT
) ?
2131 DP(BNX2X_MSG_SP
, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2132 bp
->fw_stats_num
, vf_headroom
, num_groups
);
2133 bp
->fw_stats_req_sz
= sizeof(struct stats_query_header
) +
2134 num_groups
* sizeof(struct stats_query_cmd_group
);
2136 /* Data for statistics requests + stats_counter
2137 * stats_counter holds per-STORM counters that are incremented
2138 * when STORM has finished with the current request.
2139 * memory for FCoE offloaded statistics are counted anyway,
2140 * even if they will not be sent.
2141 * VF stats are not accounted for here as the data of VF stats is stored
2142 * in memory allocated by the VF, not here.
2144 bp
->fw_stats_data_sz
= sizeof(struct per_port_stats
) +
2145 sizeof(struct per_pf_stats
) +
2146 sizeof(struct fcoe_statistics_params
) +
2147 sizeof(struct per_queue_stats
) * num_queue_stats
+
2148 sizeof(struct stats_counter
);
2150 BNX2X_PCI_ALLOC(bp
->fw_stats
, &bp
->fw_stats_mapping
,
2151 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2154 bp
->fw_stats_req
= (struct bnx2x_fw_stats_req
*)bp
->fw_stats
;
2155 bp
->fw_stats_req_mapping
= bp
->fw_stats_mapping
;
2156 bp
->fw_stats_data
= (struct bnx2x_fw_stats_data
*)
2157 ((u8
*)bp
->fw_stats
+ bp
->fw_stats_req_sz
);
2158 bp
->fw_stats_data_mapping
= bp
->fw_stats_mapping
+
2159 bp
->fw_stats_req_sz
;
2161 DP(BNX2X_MSG_SP
, "statistics request base address set to %x %x",
2162 U64_HI(bp
->fw_stats_req_mapping
),
2163 U64_LO(bp
->fw_stats_req_mapping
));
2164 DP(BNX2X_MSG_SP
, "statistics data base address set to %x %x",
2165 U64_HI(bp
->fw_stats_data_mapping
),
2166 U64_LO(bp
->fw_stats_data_mapping
));
2170 bnx2x_free_fw_stats_mem(bp
);
2171 BNX2X_ERR("Can't allocate FW stats memory\n");
2175 /* send load request to mcp and analyze response */
2176 static int bnx2x_nic_load_request(struct bnx2x
*bp
, u32
*load_code
)
2180 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
2181 DRV_MSG_SEQ_NUMBER_MASK
);
2182 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
2184 /* Get current FW pulse sequence */
2185 bp
->fw_drv_pulse_wr_seq
=
2186 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
) &
2187 DRV_PULSE_SEQ_MASK
);
2188 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
2191 (*load_code
) = bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
,
2192 DRV_MSG_CODE_LOAD_REQ_WITH_LFA
);
2194 /* if mcp fails to respond we must abort */
2195 if (!(*load_code
)) {
2196 BNX2X_ERR("MCP response failure, aborting\n");
2200 /* If mcp refused (e.g. other port is in diagnostic mode) we
2203 if ((*load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED
) {
2204 BNX2X_ERR("MCP refused load request, aborting\n");
2210 /* check whether another PF has already loaded FW to chip. In
2211 * virtualized environments a pf from another VM may have already
2212 * initialized the device including loading FW
2214 int bnx2x_nic_load_analyze_req(struct bnx2x
*bp
, u32 load_code
)
2216 /* is another pf loaded on this engine? */
2217 if (load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
&&
2218 load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON
) {
2219 /* build my FW version dword */
2220 u32 my_fw
= (BCM_5710_FW_MAJOR_VERSION
) +
2221 (BCM_5710_FW_MINOR_VERSION
<< 8) +
2222 (BCM_5710_FW_REVISION_VERSION
<< 16) +
2223 (BCM_5710_FW_ENGINEERING_VERSION
<< 24);
2225 /* read loaded FW from chip */
2226 u32 loaded_fw
= REG_RD(bp
, XSEM_REG_PRAM
);
2228 DP(BNX2X_MSG_SP
, "loaded fw %x, my fw %x\n",
2231 /* abort nic load if version mismatch */
2232 if (my_fw
!= loaded_fw
) {
2233 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
2241 /* returns the "mcp load_code" according to global load_count array */
2242 static int bnx2x_nic_load_no_mcp(struct bnx2x
*bp
, int port
)
2244 int path
= BP_PATH(bp
);
2246 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
2247 path
, load_count
[path
][0], load_count
[path
][1],
2248 load_count
[path
][2]);
2249 load_count
[path
][0]++;
2250 load_count
[path
][1 + port
]++;
2251 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
2252 path
, load_count
[path
][0], load_count
[path
][1],
2253 load_count
[path
][2]);
2254 if (load_count
[path
][0] == 1)
2255 return FW_MSG_CODE_DRV_LOAD_COMMON
;
2256 else if (load_count
[path
][1 + port
] == 1)
2257 return FW_MSG_CODE_DRV_LOAD_PORT
;
2259 return FW_MSG_CODE_DRV_LOAD_FUNCTION
;
2262 /* mark PMF if applicable */
2263 static void bnx2x_nic_load_pmf(struct bnx2x
*bp
, u32 load_code
)
2265 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2266 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
2267 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
2269 /* We need the barrier to ensure the ordering between the
2270 * writing to bp->port.pmf here and reading it from the
2271 * bnx2x_periodic_task().
2278 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2281 static void bnx2x_nic_load_afex_dcc(struct bnx2x
*bp
, int load_code
)
2283 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2284 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
2285 (bp
->common
.shmem2_base
)) {
2286 if (SHMEM2_HAS(bp
, dcc_support
))
2287 SHMEM2_WR(bp
, dcc_support
,
2288 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
2289 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
2290 if (SHMEM2_HAS(bp
, afex_driver_support
))
2291 SHMEM2_WR(bp
, afex_driver_support
,
2292 SHMEM_AFEX_SUPPORTED_VERSION_ONE
);
2295 /* Set AFEX default VLAN tag to an invalid value */
2296 bp
->afex_def_vlan_tag
= -1;
2300 * bnx2x_bz_fp - zero content of the fastpath structure.
2302 * @bp: driver handle
2303 * @index: fastpath index to be zeroed
2305 * Makes sure the contents of the bp->fp[index].napi is kept
2308 static void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
2310 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
2313 struct napi_struct orig_napi
= fp
->napi
;
2314 struct bnx2x_agg_info
*orig_tpa_info
= fp
->tpa_info
;
2315 /* bzero bnx2x_fastpath contents */
2317 memset(fp
->tpa_info
, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2
*
2318 sizeof(struct bnx2x_agg_info
));
2319 memset(fp
, 0, sizeof(*fp
));
2321 /* Restore the NAPI object as it has been already initialized */
2322 fp
->napi
= orig_napi
;
2323 fp
->tpa_info
= orig_tpa_info
;
2327 fp
->max_cos
= bp
->max_cos
;
2329 /* Special queues support only one CoS */
2332 /* Init txdata pointers */
2334 fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[FCOE_TXQ_IDX(bp
)];
2336 for_each_cos_in_tx_queue(fp
, cos
)
2337 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[cos
*
2338 BNX2X_NUM_ETH_QUEUES(bp
) + index
];
2341 * set the tpa flag for each queue. The tpa flag determines the queue
2342 * minimal size so it must be set prior to queue memory allocation
2344 fp
->disable_tpa
= !(bp
->flags
& TPA_ENABLE_FLAG
||
2345 (bp
->flags
& GRO_ENABLE_FLAG
&&
2346 bnx2x_mtu_allows_gro(bp
->dev
->mtu
)));
2347 if (bp
->flags
& TPA_ENABLE_FLAG
)
2348 fp
->mode
= TPA_MODE_LRO
;
2349 else if (bp
->flags
& GRO_ENABLE_FLAG
)
2350 fp
->mode
= TPA_MODE_GRO
;
2352 /* We don't want TPA on an FCoE L2 ring */
2354 fp
->disable_tpa
= 1;
2357 int bnx2x_load_cnic(struct bnx2x
*bp
)
2359 int i
, rc
, port
= BP_PORT(bp
);
2361 DP(NETIF_MSG_IFUP
, "Starting CNIC-related load\n");
2363 mutex_init(&bp
->cnic_mutex
);
2366 rc
= bnx2x_alloc_mem_cnic(bp
);
2368 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2369 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2373 rc
= bnx2x_alloc_fp_mem_cnic(bp
);
2375 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2376 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2379 /* Update the number of queues with the cnic queues */
2380 rc
= bnx2x_set_real_num_queues(bp
, 1);
2382 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2383 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2386 /* Add all CNIC NAPI objects */
2387 bnx2x_add_all_napi_cnic(bp
);
2388 DP(NETIF_MSG_IFUP
, "cnic napi added\n");
2389 bnx2x_napi_enable_cnic(bp
);
2391 rc
= bnx2x_init_hw_func_cnic(bp
);
2393 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic1
);
2395 bnx2x_nic_init_cnic(bp
);
2398 /* Enable Timer scan */
2399 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
2401 /* setup cnic queues */
2402 for_each_cnic_queue(bp
, i
) {
2403 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
2405 BNX2X_ERR("Queue setup failed\n");
2406 LOAD_ERROR_EXIT(bp
, load_error_cnic2
);
2411 /* Initialize Rx filter. */
2412 netif_addr_lock_bh(bp
->dev
);
2413 bnx2x_set_rx_mode(bp
->dev
);
2414 netif_addr_unlock_bh(bp
->dev
);
2416 /* re-read iscsi info */
2417 bnx2x_get_iscsi_info(bp
);
2418 bnx2x_setup_cnic_irq_info(bp
);
2419 bnx2x_setup_cnic_info(bp
);
2420 bp
->cnic_loaded
= true;
2421 if (bp
->state
== BNX2X_STATE_OPEN
)
2422 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
2425 DP(NETIF_MSG_IFUP
, "Ending successfully CNIC-related load\n");
2429 #ifndef BNX2X_STOP_ON_ERROR
2431 /* Disable Timer scan */
2432 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
2435 bnx2x_napi_disable_cnic(bp
);
2436 /* Update the number of queues without the cnic queues */
2437 rc
= bnx2x_set_real_num_queues(bp
, 0);
2439 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2441 BNX2X_ERR("CNIC-related load failed\n");
2442 bnx2x_free_fp_mem_cnic(bp
);
2443 bnx2x_free_mem_cnic(bp
);
2445 #endif /* ! BNX2X_STOP_ON_ERROR */
2448 /* must be called with rtnl_lock */
2449 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
2451 int port
= BP_PORT(bp
);
2452 int i
, rc
= 0, load_code
= 0;
2454 DP(NETIF_MSG_IFUP
, "Starting NIC load\n");
2456 "CNIC is %s\n", CNIC_ENABLED(bp
) ? "enabled" : "disabled");
2458 #ifdef BNX2X_STOP_ON_ERROR
2459 if (unlikely(bp
->panic
)) {
2460 BNX2X_ERR("Can't load NIC when there is panic\n");
2465 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
2467 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
2468 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
2469 &bp
->last_reported_link
.link_report_flags
);
2472 /* must be called before memory allocation and HW init */
2473 bnx2x_ilt_set_info(bp
);
2476 * Zero fastpath structures preserving invariants like napi, which are
2477 * allocated only once, fp index, max_cos, bp pointer.
2478 * Also set fp->disable_tpa and txdata_ptr.
2480 DP(NETIF_MSG_IFUP
, "num queues: %d", bp
->num_queues
);
2481 for_each_queue(bp
, i
)
2483 memset(bp
->bnx2x_txq
, 0, (BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+
2484 bp
->num_cnic_queues
) *
2485 sizeof(struct bnx2x_fp_txdata
));
2487 bp
->fcoe_init
= false;
2489 /* Set the receive queues buffer size */
2490 bnx2x_set_rx_buf_size(bp
);
2493 rc
= bnx2x_alloc_mem(bp
);
2495 BNX2X_ERR("Unable to allocate bp memory\n");
2500 /* Allocated memory for FW statistics */
2501 if (bnx2x_alloc_fw_stats_mem(bp
))
2502 LOAD_ERROR_EXIT(bp
, load_error0
);
2504 /* need to be done after alloc mem, since it's self adjusting to amount
2505 * of memory available for RSS queues
2507 rc
= bnx2x_alloc_fp_mem(bp
);
2509 BNX2X_ERR("Unable to allocate memory for fps\n");
2510 LOAD_ERROR_EXIT(bp
, load_error0
);
2513 /* request pf to initialize status blocks */
2515 rc
= bnx2x_vfpf_init(bp
);
2517 LOAD_ERROR_EXIT(bp
, load_error0
);
2520 /* As long as bnx2x_alloc_mem() may possibly update
2521 * bp->num_queues, bnx2x_set_real_num_queues() should always
2522 * come after it. At this stage cnic queues are not counted.
2524 rc
= bnx2x_set_real_num_queues(bp
, 0);
2526 BNX2X_ERR("Unable to set real_num_queues\n");
2527 LOAD_ERROR_EXIT(bp
, load_error0
);
2530 /* configure multi cos mappings in kernel.
2531 * this configuration may be overriden by a multi class queue discipline
2532 * or by a dcbx negotiation result.
2534 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
2536 /* Add all NAPI objects */
2537 bnx2x_add_all_napi(bp
);
2538 DP(NETIF_MSG_IFUP
, "napi added\n");
2539 bnx2x_napi_enable(bp
);
2542 /* set pf load just before approaching the MCP */
2543 bnx2x_set_pf_load(bp
);
2545 /* if mcp exists send load request and analyze response */
2546 if (!BP_NOMCP(bp
)) {
2547 /* attempt to load pf */
2548 rc
= bnx2x_nic_load_request(bp
, &load_code
);
2550 LOAD_ERROR_EXIT(bp
, load_error1
);
2552 /* what did mcp say? */
2553 rc
= bnx2x_nic_load_analyze_req(bp
, load_code
);
2555 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2556 LOAD_ERROR_EXIT(bp
, load_error2
);
2559 load_code
= bnx2x_nic_load_no_mcp(bp
, port
);
2562 /* mark pmf if applicable */
2563 bnx2x_nic_load_pmf(bp
, load_code
);
2565 /* Init Function state controlling object */
2566 bnx2x__init_func_obj(bp
);
2569 rc
= bnx2x_init_hw(bp
, load_code
);
2571 BNX2X_ERR("HW init failed, aborting\n");
2572 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2573 LOAD_ERROR_EXIT(bp
, load_error2
);
2577 /* Connect to IRQs */
2578 rc
= bnx2x_setup_irqs(bp
);
2580 BNX2X_ERR("setup irqs failed\n");
2582 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2583 LOAD_ERROR_EXIT(bp
, load_error2
);
2586 /* Setup NIC internals and enable interrupts */
2587 bnx2x_nic_init(bp
, load_code
);
2589 /* Init per-function objects */
2591 bnx2x_init_bp_objs(bp
);
2592 bnx2x_iov_nic_init(bp
);
2594 /* Set AFEX default VLAN tag to an invalid value */
2595 bp
->afex_def_vlan_tag
= -1;
2596 bnx2x_nic_load_afex_dcc(bp
, load_code
);
2597 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
2598 rc
= bnx2x_func_start(bp
);
2600 BNX2X_ERR("Function start failed!\n");
2601 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2603 LOAD_ERROR_EXIT(bp
, load_error3
);
2606 /* Send LOAD_DONE command to MCP */
2607 if (!BP_NOMCP(bp
)) {
2608 load_code
= bnx2x_fw_command(bp
,
2609 DRV_MSG_CODE_LOAD_DONE
, 0);
2611 BNX2X_ERR("MCP response failure, aborting\n");
2613 LOAD_ERROR_EXIT(bp
, load_error3
);
2617 /* initialize FW coalescing state machines in RAM */
2618 bnx2x_update_coalesce(bp
);
2620 /* setup the leading queue */
2621 rc
= bnx2x_setup_leading(bp
);
2623 BNX2X_ERR("Setup leading failed!\n");
2624 LOAD_ERROR_EXIT(bp
, load_error3
);
2627 /* set up the rest of the queues */
2628 for_each_nondefault_eth_queue(bp
, i
) {
2629 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
2631 BNX2X_ERR("Queue setup failed\n");
2632 LOAD_ERROR_EXIT(bp
, load_error3
);
2637 rc
= bnx2x_init_rss_pf(bp
);
2639 BNX2X_ERR("PF RSS init failed\n");
2640 LOAD_ERROR_EXIT(bp
, load_error3
);
2644 for_each_eth_queue(bp
, i
) {
2645 rc
= bnx2x_vfpf_setup_q(bp
, i
);
2647 BNX2X_ERR("Queue setup failed\n");
2648 LOAD_ERROR_EXIT(bp
, load_error3
);
2653 /* Now when Clients are configured we are ready to work */
2654 bp
->state
= BNX2X_STATE_OPEN
;
2656 /* Configure a ucast MAC */
2658 rc
= bnx2x_set_eth_mac(bp
, true);
2660 rc
= bnx2x_vfpf_set_mac(bp
);
2662 BNX2X_ERR("Setting Ethernet MAC failed\n");
2663 LOAD_ERROR_EXIT(bp
, load_error3
);
2666 if (IS_PF(bp
) && bp
->pending_max
) {
2667 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
2668 bp
->pending_max
= 0;
2672 rc
= bnx2x_initial_phy_init(bp
, load_mode
);
2674 LOAD_ERROR_EXIT(bp
, load_error3
);
2676 bp
->link_params
.feature_config_flags
&= ~FEATURE_CONFIG_BOOT_FROM_SAN
;
2678 /* Start fast path */
2680 /* Initialize Rx filter. */
2681 netif_addr_lock_bh(bp
->dev
);
2682 bnx2x_set_rx_mode(bp
->dev
);
2683 netif_addr_unlock_bh(bp
->dev
);
2686 switch (load_mode
) {
2688 /* Tx queue should be only reenabled */
2689 netif_tx_wake_all_queues(bp
->dev
);
2693 netif_tx_start_all_queues(bp
->dev
);
2694 smp_mb__after_clear_bit();
2698 case LOAD_LOOPBACK_EXT
:
2699 bp
->state
= BNX2X_STATE_DIAG
;
2707 bnx2x_update_drv_flags(bp
, 1 << DRV_FLAGS_PORT_MASK
, 0);
2709 bnx2x__link_status_update(bp
);
2711 /* start the timer */
2712 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
2714 if (CNIC_ENABLED(bp
))
2715 bnx2x_load_cnic(bp
);
2717 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2718 /* mark driver is loaded in shmem2 */
2720 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2721 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2722 val
| DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED
|
2723 DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2726 /* Wait for all pending SP commands to complete */
2727 if (IS_PF(bp
) && !bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
2728 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2729 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
2733 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2734 if (bp
->port
.pmf
&& (bp
->state
!= BNX2X_STATE_DIAG
))
2735 bnx2x_dcbx_init(bp
, false);
2737 DP(NETIF_MSG_IFUP
, "Ending successfully NIC load\n");
2741 #ifndef BNX2X_STOP_ON_ERROR
2744 bnx2x_int_disable_sync(bp
, 1);
2746 /* Clean queueable objects */
2747 bnx2x_squeeze_objects(bp
);
2750 /* Free SKBs, SGEs, TPA pool and driver internals */
2751 bnx2x_free_skbs(bp
);
2752 for_each_rx_queue(bp
, i
)
2753 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2758 if (IS_PF(bp
) && !BP_NOMCP(bp
)) {
2759 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
2760 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
2765 bnx2x_napi_disable(bp
);
2766 bnx2x_del_all_napi(bp
);
2768 /* clear pf_load status, as it was already set */
2770 bnx2x_clear_pf_load(bp
);
2772 bnx2x_free_fp_mem(bp
);
2773 bnx2x_free_fw_stats_mem(bp
);
2777 #endif /* ! BNX2X_STOP_ON_ERROR */
2780 static int bnx2x_drain_tx_queues(struct bnx2x
*bp
)
2784 /* Wait until tx fastpath tasks complete */
2785 for_each_tx_queue(bp
, i
) {
2786 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2788 for_each_cos_in_tx_queue(fp
, cos
)
2789 rc
= bnx2x_clean_tx_queue(bp
, fp
->txdata_ptr
[cos
]);
2796 /* must be called with rtnl_lock */
2797 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
, bool keep_link
)
2800 bool global
= false;
2802 DP(NETIF_MSG_IFUP
, "Starting NIC unload\n");
2804 /* mark driver is unloaded in shmem2 */
2805 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2807 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2808 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2809 val
& ~DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2812 if (IS_PF(bp
) && bp
->recovery_state
!= BNX2X_RECOVERY_DONE
&&
2813 (bp
->state
== BNX2X_STATE_CLOSED
||
2814 bp
->state
== BNX2X_STATE_ERROR
)) {
2815 /* We can get here if the driver has been unloaded
2816 * during parity error recovery and is either waiting for a
2817 * leader to complete or for other functions to unload and
2818 * then ifdown has been issued. In this case we want to
2819 * unload and let other functions to complete a recovery
2822 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
2824 bnx2x_release_leader_lock(bp
);
2827 DP(NETIF_MSG_IFDOWN
, "Releasing a leadership...\n");
2828 BNX2X_ERR("Can't unload in closed or error state\n");
2832 /* Nothing to do during unload if previous bnx2x_nic_load()
2833 * have not completed succesfully - all resourses are released.
2835 * we can get here only after unsuccessful ndo_* callback, during which
2836 * dev->IFF_UP flag is still on.
2838 if (bp
->state
== BNX2X_STATE_CLOSED
|| bp
->state
== BNX2X_STATE_ERROR
)
2841 /* It's important to set the bp->state to the value different from
2842 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2843 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2845 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
2848 if (CNIC_LOADED(bp
))
2849 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
2852 bnx2x_tx_disable(bp
);
2853 netdev_reset_tc(bp
->dev
);
2855 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2857 del_timer_sync(&bp
->timer
);
2860 /* Set ALWAYS_ALIVE bit in shmem */
2861 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
2862 bnx2x_drv_pulse(bp
);
2863 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2864 bnx2x_save_statistics(bp
);
2867 /* wait till consumers catch up with producers in all queues */
2868 bnx2x_drain_tx_queues(bp
);
2870 /* if VF indicate to PF this function is going down (PF will delete sp
2871 * elements and clear initializations
2874 bnx2x_vfpf_close_vf(bp
);
2875 else if (unload_mode
!= UNLOAD_RECOVERY
)
2876 /* if this is a normal/close unload need to clean up chip*/
2877 bnx2x_chip_cleanup(bp
, unload_mode
, keep_link
);
2879 /* Send the UNLOAD_REQUEST to the MCP */
2880 bnx2x_send_unload_req(bp
, unload_mode
);
2883 * Prevent transactions to host from the functions on the
2884 * engine that doesn't reset global blocks in case of global
2885 * attention once gloabl blocks are reset and gates are opened
2886 * (the engine which leader will perform the recovery
2889 if (!CHIP_IS_E1x(bp
))
2890 bnx2x_pf_disable(bp
);
2892 /* Disable HW interrupts, NAPI */
2893 bnx2x_netif_stop(bp
, 1);
2894 /* Delete all NAPI objects */
2895 bnx2x_del_all_napi(bp
);
2896 if (CNIC_LOADED(bp
))
2897 bnx2x_del_all_napi_cnic(bp
);
2901 /* Report UNLOAD_DONE to MCP */
2902 bnx2x_send_unload_done(bp
, false);
2906 * At this stage no more interrupts will arrive so we may safly clean
2907 * the queueable objects here in case they failed to get cleaned so far.
2910 bnx2x_squeeze_objects(bp
);
2912 /* There should be no more pending SP commands at this stage */
2917 /* Free SKBs, SGEs, TPA pool and driver internals */
2918 bnx2x_free_skbs(bp
);
2919 if (CNIC_LOADED(bp
))
2920 bnx2x_free_skbs_cnic(bp
);
2921 for_each_rx_queue(bp
, i
)
2922 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2924 bnx2x_free_fp_mem(bp
);
2925 if (CNIC_LOADED(bp
))
2926 bnx2x_free_fp_mem_cnic(bp
);
2930 if (CNIC_LOADED(bp
))
2931 bnx2x_free_mem_cnic(bp
);
2933 bp
->state
= BNX2X_STATE_CLOSED
;
2934 bp
->cnic_loaded
= false;
2936 /* Check if there are pending parity attentions. If there are - set
2937 * RECOVERY_IN_PROGRESS.
2939 if (IS_PF(bp
) && bnx2x_chk_parity_attn(bp
, &global
, false)) {
2940 bnx2x_set_reset_in_progress(bp
);
2942 /* Set RESET_IS_GLOBAL if needed */
2944 bnx2x_set_reset_global(bp
);
2948 /* The last driver must disable a "close the gate" if there is no
2949 * parity attention or "process kill" pending.
2952 !bnx2x_clear_pf_load(bp
) &&
2953 bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
2954 bnx2x_disable_close_the_gate(bp
);
2956 DP(NETIF_MSG_IFUP
, "Ending NIC unload\n");
2961 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
2965 /* If there is no power capability, silently succeed */
2967 BNX2X_DEV_INFO("No power capability. Breaking.\n");
2971 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2975 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2976 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2977 PCI_PM_CTRL_PME_STATUS
));
2979 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2980 /* delay required during transition out of D3hot */
2985 /* If there are other clients above don't
2986 shut down the power */
2987 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
2989 /* Don't shut down the power for emulation and FPGA */
2990 if (CHIP_REV_IS_SLOW(bp
))
2993 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2997 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2999 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
3002 /* No more memory access after this point until
3003 * device is brought back to D0.
3008 dev_err(&bp
->pdev
->dev
, "Can't support state = %d\n", state
);
3015 * net_device service functions
3017 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
3021 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
3023 struct bnx2x
*bp
= fp
->bp
;
3026 #ifdef BNX2X_STOP_ON_ERROR
3027 if (unlikely(bp
->panic
)) {
3028 napi_complete(napi
);
3033 for_each_cos_in_tx_queue(fp
, cos
)
3034 if (bnx2x_tx_queue_has_work(fp
->txdata_ptr
[cos
]))
3035 bnx2x_tx_int(bp
, fp
->txdata_ptr
[cos
]);
3037 if (bnx2x_has_rx_work(fp
)) {
3038 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
3040 /* must not complete if we consumed full budget */
3041 if (work_done
>= budget
)
3045 /* Fall out from the NAPI loop if needed */
3046 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3048 /* No need to update SB for FCoE L2 ring as long as
3049 * it's connected to the default SB and the SB
3050 * has been updated when NAPI was scheduled.
3052 if (IS_FCOE_FP(fp
)) {
3053 napi_complete(napi
);
3056 bnx2x_update_fpsb_idx(fp
);
3057 /* bnx2x_has_rx_work() reads the status block,
3058 * thus we need to ensure that status block indices
3059 * have been actually read (bnx2x_update_fpsb_idx)
3060 * prior to this check (bnx2x_has_rx_work) so that
3061 * we won't write the "newer" value of the status block
3062 * to IGU (if there was a DMA right after
3063 * bnx2x_has_rx_work and if there is no rmb, the memory
3064 * reading (bnx2x_update_fpsb_idx) may be postponed
3065 * to right before bnx2x_ack_sb). In this case there
3066 * will never be another interrupt until there is
3067 * another update of the status block, while there
3068 * is still unhandled work.
3072 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3073 napi_complete(napi
);
3074 /* Re-enable interrupts */
3075 DP(NETIF_MSG_RX_STATUS
,
3076 "Update index to %d\n", fp
->fp_hc_idx
);
3077 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
3078 le16_to_cpu(fp
->fp_hc_idx
),
3088 /* we split the first BD into headers and data BDs
3089 * to ease the pain of our fellow microcode engineers
3090 * we use one mapping for both BDs
3092 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
3093 struct bnx2x_fp_txdata
*txdata
,
3094 struct sw_tx_bd
*tx_buf
,
3095 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
3096 u16 bd_prod
, int nbd
)
3098 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
3099 struct eth_tx_bd
*d_tx_bd
;
3101 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
3103 /* first fix first BD */
3104 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
3105 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
3107 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d (%x:%x) nbd %d\n",
3108 h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
, h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
3110 /* now get a new data BD
3111 * (after the pbd) and fill it */
3112 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3113 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3115 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
3116 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
3118 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3119 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3120 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
3122 /* this marks the BD as one that has no individual mapping */
3123 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
3125 DP(NETIF_MSG_TX_QUEUED
,
3126 "TSO split data size is %d (%x:%x)\n",
3127 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
3130 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
3135 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3136 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3137 static inline __le16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
3139 __sum16 tsum
= (__force __sum16
) csum
;
3142 tsum
= ~csum_fold(csum_sub((__force __wsum
) csum
,
3143 csum_partial(t_header
- fix
, fix
, 0)));
3146 tsum
= ~csum_fold(csum_add((__force __wsum
) csum
,
3147 csum_partial(t_header
, -fix
, 0)));
3149 return bswab16(tsum
);
3152 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
3156 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3160 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
3162 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3163 rc
|= XMIT_CSUM_TCP
;
3167 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3168 rc
|= XMIT_CSUM_TCP
;
3172 if (skb_is_gso_v6(skb
))
3173 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
3174 else if (skb_is_gso(skb
))
3175 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
3180 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3181 /* check if packet requires linearization (packet is too fragmented)
3182 no need to check fragmentation if page size > 8K (there will be no
3183 violation to FW restrictions) */
3184 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
3189 int first_bd_sz
= 0;
3191 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3192 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
3194 if (xmit_type
& XMIT_GSO
) {
3195 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
3196 /* Check if LSO packet needs to be copied:
3197 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3198 int wnd_size
= MAX_FETCH_BD
- 3;
3199 /* Number of windows to check */
3200 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
3205 /* Headers length */
3206 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
3209 /* Amount of data (w/o headers) on linear part of SKB*/
3210 first_bd_sz
= skb_headlen(skb
) - hlen
;
3212 wnd_sum
= first_bd_sz
;
3214 /* Calculate the first sum - it's special */
3215 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
3217 skb_frag_size(&skb_shinfo(skb
)->frags
[frag_idx
]);
3219 /* If there was data on linear skb data - check it */
3220 if (first_bd_sz
> 0) {
3221 if (unlikely(wnd_sum
< lso_mss
)) {
3226 wnd_sum
-= first_bd_sz
;
3229 /* Others are easier: run through the frag list and
3230 check all windows */
3231 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
3233 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1]);
3235 if (unlikely(wnd_sum
< lso_mss
)) {
3240 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
]);
3243 /* in non-LSO too fragmented packet should always
3250 if (unlikely(to_copy
))
3251 DP(NETIF_MSG_TX_QUEUED
,
3252 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3253 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
3254 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
3260 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
3263 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
3264 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
3265 ETH_TX_PARSE_BD_E2_LSO_MSS
;
3266 if ((xmit_type
& XMIT_GSO_V6
) &&
3267 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
3268 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
3272 * bnx2x_set_pbd_gso - update PBD in GSO case.
3276 * @xmit_type: xmit flags
3278 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
3279 struct eth_tx_parse_bd_e1x
*pbd
,
3282 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
3283 pbd
->tcp_send_seq
= bswab32(tcp_hdr(skb
)->seq
);
3284 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
3286 if (xmit_type
& XMIT_GSO_V4
) {
3287 pbd
->ip_id
= bswab16(ip_hdr(skb
)->id
);
3288 pbd
->tcp_pseudo_csum
=
3289 bswab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
3291 0, IPPROTO_TCP
, 0));
3294 pbd
->tcp_pseudo_csum
=
3295 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3296 &ipv6_hdr(skb
)->daddr
,
3297 0, IPPROTO_TCP
, 0));
3300 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
);
3304 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3306 * @bp: driver handle
3308 * @parsing_data: data to be updated
3309 * @xmit_type: xmit flags
3313 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
3314 u32
*parsing_data
, u32 xmit_type
)
3317 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
3318 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
3319 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
3321 if (xmit_type
& XMIT_CSUM_TCP
) {
3322 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
3323 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
3324 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
3326 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
3328 /* We support checksum offload for TCP and UDP only.
3329 * No need to pass the UDP header length - it's a constant.
3331 return skb_transport_header(skb
) + sizeof(struct udphdr
) - skb
->data
;
3334 static inline void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3335 struct eth_tx_start_bd
*tx_start_bd
, u32 xmit_type
)
3337 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
3339 if (xmit_type
& XMIT_CSUM_V4
)
3340 tx_start_bd
->bd_flags
.as_bitfield
|=
3341 ETH_TX_BD_FLAGS_IP_CSUM
;
3343 tx_start_bd
->bd_flags
.as_bitfield
|=
3344 ETH_TX_BD_FLAGS_IPV6
;
3346 if (!(xmit_type
& XMIT_CSUM_TCP
))
3347 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
3351 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3353 * @bp: driver handle
3355 * @pbd: parse BD to be updated
3356 * @xmit_type: xmit flags
3358 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3359 struct eth_tx_parse_bd_e1x
*pbd
,
3362 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
3364 /* for now NS flag is not used in Linux */
3367 ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
3368 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
3370 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
3371 skb_network_header(skb
)) >> 1;
3373 hlen
+= pbd
->ip_hlen_w
;
3375 /* We support checksum offload for TCP and UDP only */
3376 if (xmit_type
& XMIT_CSUM_TCP
)
3377 hlen
+= tcp_hdrlen(skb
) / 2;
3379 hlen
+= sizeof(struct udphdr
) / 2;
3381 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
3384 if (xmit_type
& XMIT_CSUM_TCP
) {
3385 pbd
->tcp_pseudo_csum
= bswab16(tcp_hdr(skb
)->check
);
3388 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
3390 DP(NETIF_MSG_TX_QUEUED
,
3391 "hlen %d fix %d csum before fix %x\n",
3392 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
3394 /* HW bug: fixup the CSUM */
3395 pbd
->tcp_pseudo_csum
=
3396 bnx2x_csum_fix(skb_transport_header(skb
),
3399 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
3400 pbd
->tcp_pseudo_csum
);
3406 /* called with netif_tx_lock
3407 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3408 * netif_wake_queue()
3410 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3412 struct bnx2x
*bp
= netdev_priv(dev
);
3414 struct netdev_queue
*txq
;
3415 struct bnx2x_fp_txdata
*txdata
;
3416 struct sw_tx_bd
*tx_buf
;
3417 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
3418 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
3419 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
3420 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
3421 u32 pbd_e2_parsing_data
= 0;
3422 u16 pkt_prod
, bd_prod
;
3425 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
3428 __le16 pkt_size
= 0;
3430 u8 mac_type
= UNICAST_ADDRESS
;
3432 #ifdef BNX2X_STOP_ON_ERROR
3433 if (unlikely(bp
->panic
))
3434 return NETDEV_TX_BUSY
;
3437 txq_index
= skb_get_queue_mapping(skb
);
3438 txq
= netdev_get_tx_queue(dev
, txq_index
);
3440 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + (CNIC_LOADED(bp
) ? 1 : 0));
3442 txdata
= &bp
->bnx2x_txq
[txq_index
];
3444 /* enable this debug print to view the transmission queue being used
3445 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3446 txq_index, fp_index, txdata_index); */
3448 /* enable this debug print to view the tranmission details
3449 DP(NETIF_MSG_TX_QUEUED,
3450 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3451 txdata->cid, fp_index, txdata_index, txdata, fp); */
3453 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
3454 skb_shinfo(skb
)->nr_frags
+
3456 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT
))) {
3457 /* Handle special storage cases separately */
3458 if (txdata
->tx_ring_size
== 0) {
3459 struct bnx2x_eth_q_stats
*q_stats
=
3460 bnx2x_fp_qstats(bp
, txdata
->parent_fp
);
3461 q_stats
->driver_filtered_tx_pkt
++;
3463 return NETDEV_TX_OK
;
3465 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
3466 netif_tx_stop_queue(txq
);
3467 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3469 return NETDEV_TX_BUSY
;
3472 DP(NETIF_MSG_TX_QUEUED
,
3473 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3474 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
3475 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
,
3478 eth
= (struct ethhdr
*)skb
->data
;
3480 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3481 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
3482 if (is_broadcast_ether_addr(eth
->h_dest
))
3483 mac_type
= BROADCAST_ADDRESS
;
3485 mac_type
= MULTICAST_ADDRESS
;
3488 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3489 /* First, check if we need to linearize the skb (due to FW
3490 restrictions). No need to check fragmentation if page size > 8K
3491 (there will be no violation to FW restrictions) */
3492 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
3493 /* Statistics of linearization */
3495 if (skb_linearize(skb
) != 0) {
3496 DP(NETIF_MSG_TX_QUEUED
,
3497 "SKB linearization failed - silently dropping this SKB\n");
3498 dev_kfree_skb_any(skb
);
3499 return NETDEV_TX_OK
;
3503 /* Map skb linear data for DMA */
3504 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
3505 skb_headlen(skb
), DMA_TO_DEVICE
);
3506 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
3507 DP(NETIF_MSG_TX_QUEUED
,
3508 "SKB mapping failed - silently dropping this SKB\n");
3509 dev_kfree_skb_any(skb
);
3510 return NETDEV_TX_OK
;
3513 Please read carefully. First we use one BD which we mark as start,
3514 then we have a parsing info BD (used for TSO or xsum),
3515 and only then we have the rest of the TSO BDs.
3516 (don't forget to mark the last one as last,
3517 and to unmap only AFTER you write to the BD ...)
3518 And above all, all pdb sizes are in words - NOT DWORDS!
3521 /* get current pkt produced now - advance it just before sending packet
3522 * since mapping of pages may fail and cause packet to be dropped
3524 pkt_prod
= txdata
->tx_pkt_prod
;
3525 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
3527 /* get a tx_buf and first BD
3528 * tx_start_bd may be changed during SPLIT,
3529 * but first_bd will always stay first
3531 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
3532 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
3533 first_bd
= tx_start_bd
;
3535 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
3536 SET_FLAG(tx_start_bd
->general_data
,
3537 ETH_TX_START_BD_PARSE_NBDS
,
3541 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
3543 /* remember the first BD of the packet */
3544 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
3548 DP(NETIF_MSG_TX_QUEUED
,
3549 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3550 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
3552 if (vlan_tx_tag_present(skb
)) {
3553 tx_start_bd
->vlan_or_ethertype
=
3554 cpu_to_le16(vlan_tx_tag_get(skb
));
3555 tx_start_bd
->bd_flags
.as_bitfield
|=
3556 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
3558 /* when transmitting in a vf, start bd must hold the ethertype
3559 * for fw to enforce it
3561 #ifndef BNX2X_STOP_ON_ERROR
3564 tx_start_bd
->vlan_or_ethertype
=
3565 cpu_to_le16(ntohs(eth
->h_proto
));
3566 #ifndef BNX2X_STOP_ON_ERROR
3568 /* used by FW for packet accounting */
3569 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
3574 /* turn on parsing and get a BD */
3575 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3577 if (xmit_type
& XMIT_CSUM
)
3578 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
3580 if (!CHIP_IS_E1x(bp
)) {
3581 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
3582 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
3583 /* Set PBD in checksum offload case */
3584 if (xmit_type
& XMIT_CSUM
)
3585 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
3586 &pbd_e2_parsing_data
,
3589 if (IS_MF_SI(bp
) || IS_VF(bp
)) {
3590 /* fill in the MAC addresses in the PBD - for local
3593 bnx2x_set_fw_mac_addr(&pbd_e2
->src_mac_addr_hi
,
3594 &pbd_e2
->src_mac_addr_mid
,
3595 &pbd_e2
->src_mac_addr_lo
,
3597 bnx2x_set_fw_mac_addr(&pbd_e2
->dst_mac_addr_hi
,
3598 &pbd_e2
->dst_mac_addr_mid
,
3599 &pbd_e2
->dst_mac_addr_lo
,
3603 SET_FLAG(pbd_e2_parsing_data
,
3604 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE
, mac_type
);
3606 u16 global_data
= 0;
3607 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
3608 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
3609 /* Set PBD in checksum offload case */
3610 if (xmit_type
& XMIT_CSUM
)
3611 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
3613 SET_FLAG(global_data
,
3614 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE
, mac_type
);
3615 pbd_e1x
->global_data
|= cpu_to_le16(global_data
);
3618 /* Setup the data pointer of the first BD of the packet */
3619 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3620 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3621 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3622 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
3623 pkt_size
= tx_start_bd
->nbytes
;
3625 DP(NETIF_MSG_TX_QUEUED
,
3626 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
3627 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
3628 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
3629 tx_start_bd
->bd_flags
.as_bitfield
,
3630 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
3632 if (xmit_type
& XMIT_GSO
) {
3634 DP(NETIF_MSG_TX_QUEUED
,
3635 "TSO packet len %d hlen %d total len %d tso size %d\n",
3636 skb
->len
, hlen
, skb_headlen(skb
),
3637 skb_shinfo(skb
)->gso_size
);
3639 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
3641 if (unlikely(skb_headlen(skb
) > hlen
))
3642 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
3645 if (!CHIP_IS_E1x(bp
))
3646 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
3649 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
3652 /* Set the PBD's parsing_data field if not zero
3653 * (for the chips newer than 57711).
3655 if (pbd_e2_parsing_data
)
3656 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
3658 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
3660 /* Handle fragmented skb */
3661 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3662 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3664 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
, 0,
3665 skb_frag_size(frag
), DMA_TO_DEVICE
);
3666 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
3667 unsigned int pkts_compl
= 0, bytes_compl
= 0;
3669 DP(NETIF_MSG_TX_QUEUED
,
3670 "Unable to map page - dropping packet...\n");
3672 /* we need unmap all buffers already mapped
3674 * first_bd->nbd need to be properly updated
3675 * before call to bnx2x_free_tx_pkt
3677 first_bd
->nbd
= cpu_to_le16(nbd
);
3678 bnx2x_free_tx_pkt(bp
, txdata
,
3679 TX_BD(txdata
->tx_pkt_prod
),
3680 &pkts_compl
, &bytes_compl
);
3681 return NETDEV_TX_OK
;
3684 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3685 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3686 if (total_pkt_bd
== NULL
)
3687 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3689 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3690 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3691 tx_data_bd
->nbytes
= cpu_to_le16(skb_frag_size(frag
));
3692 le16_add_cpu(&pkt_size
, skb_frag_size(frag
));
3695 DP(NETIF_MSG_TX_QUEUED
,
3696 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3697 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
3698 le16_to_cpu(tx_data_bd
->nbytes
));
3701 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
3703 /* update with actual num BDs */
3704 first_bd
->nbd
= cpu_to_le16(nbd
);
3706 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3708 /* now send a tx doorbell, counting the next BD
3709 * if the packet contains or ends with it
3711 if (TX_BD_POFF(bd_prod
) < nbd
)
3714 /* total_pkt_bytes should be set on the first data BD if
3715 * it's not an LSO packet and there is more than one
3716 * data BD. In this case pkt_size is limited by an MTU value.
3717 * However we prefer to set it for an LSO packet (while we don't
3718 * have to) in order to save some CPU cycles in a none-LSO
3719 * case, when we much more care about them.
3721 if (total_pkt_bd
!= NULL
)
3722 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
3725 DP(NETIF_MSG_TX_QUEUED
,
3726 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
3727 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
3728 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
3729 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
3730 le16_to_cpu(pbd_e1x
->total_hlen_w
));
3732 DP(NETIF_MSG_TX_QUEUED
,
3733 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3734 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
3735 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
3736 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
3737 pbd_e2
->parsing_data
);
3738 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
3740 netdev_tx_sent_queue(txq
, skb
->len
);
3742 skb_tx_timestamp(skb
);
3744 txdata
->tx_pkt_prod
++;
3746 * Make sure that the BD data is updated before updating the producer
3747 * since FW might read the BD right after the producer is updated.
3748 * This is only applicable for weak-ordered memory model archs such
3749 * as IA-64. The following barrier is also mandatory since FW will
3750 * assumes packets must have BDs.
3754 txdata
->tx_db
.data
.prod
+= nbd
;
3757 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
3761 txdata
->tx_bd_prod
+= nbd
;
3763 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_DESC_PER_TX_PKT
)) {
3764 netif_tx_stop_queue(txq
);
3766 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3767 * ordering of set_bit() in netif_tx_stop_queue() and read of
3771 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
3772 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
)
3773 netif_tx_wake_queue(txq
);
3777 return NETDEV_TX_OK
;
3781 * bnx2x_setup_tc - routine to configure net_device for multi tc
3783 * @netdev: net device to configure
3784 * @tc: number of traffic classes to enable
3786 * callback connected to the ndo_setup_tc function pointer
3788 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
3790 int cos
, prio
, count
, offset
;
3791 struct bnx2x
*bp
= netdev_priv(dev
);
3793 /* setup tc must be called under rtnl lock */
3796 /* no traffic classes requested. aborting */
3798 netdev_reset_tc(dev
);
3802 /* requested to support too many traffic classes */
3803 if (num_tc
> bp
->max_cos
) {
3804 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3805 num_tc
, bp
->max_cos
);
3809 /* declare amount of supported traffic classes */
3810 if (netdev_set_num_tc(dev
, num_tc
)) {
3811 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc
);
3815 /* configure priority to traffic class mapping */
3816 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
3817 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[prio
]);
3818 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
3819 "mapping priority %d to tc %d\n",
3820 prio
, bp
->prio_to_cos
[prio
]);
3824 /* Use this configuration to diffrentiate tc0 from other COSes
3825 This can be used for ets or pfc, and save the effort of setting
3826 up a multio class queue disc or negotiating DCBX with a switch
3827 netdev_set_prio_tc_map(dev, 0, 0);
3828 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
3829 for (prio = 1; prio < 16; prio++) {
3830 netdev_set_prio_tc_map(dev, prio, 1);
3831 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
3834 /* configure traffic class to transmission queue mapping */
3835 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
3836 count
= BNX2X_NUM_ETH_QUEUES(bp
);
3837 offset
= cos
* BNX2X_NUM_NON_CNIC_QUEUES(bp
);
3838 netdev_set_tc_queue(dev
, cos
, count
, offset
);
3839 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
3840 "mapping tc %d to offset %d count %d\n",
3841 cos
, offset
, count
);
3847 /* called with rtnl_lock */
3848 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
3850 struct sockaddr
*addr
= p
;
3851 struct bnx2x
*bp
= netdev_priv(dev
);
3854 if (!bnx2x_is_valid_ether_addr(bp
, addr
->sa_data
)) {
3855 BNX2X_ERR("Requested MAC address is not valid\n");
3859 if ((IS_MF_STORAGE_SD(bp
) || IS_MF_FCOE_AFEX(bp
)) &&
3860 !is_zero_ether_addr(addr
->sa_data
)) {
3861 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3865 if (netif_running(dev
)) {
3866 rc
= bnx2x_set_eth_mac(bp
, false);
3871 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3873 if (netif_running(dev
))
3874 rc
= bnx2x_set_eth_mac(bp
, true);
3879 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
3881 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
3882 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
3887 if (IS_FCOE_IDX(fp_index
)) {
3888 memset(sb
, 0, sizeof(union host_hc_status_block
));
3889 fp
->status_blk_mapping
= 0;
3892 if (!CHIP_IS_E1x(bp
))
3893 BNX2X_PCI_FREE(sb
->e2_sb
,
3894 bnx2x_fp(bp
, fp_index
,
3895 status_blk_mapping
),
3896 sizeof(struct host_hc_status_block_e2
));
3898 BNX2X_PCI_FREE(sb
->e1x_sb
,
3899 bnx2x_fp(bp
, fp_index
,
3900 status_blk_mapping
),
3901 sizeof(struct host_hc_status_block_e1x
));
3905 if (!skip_rx_queue(bp
, fp_index
)) {
3906 bnx2x_free_rx_bds(fp
);
3908 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3909 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
3910 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
3911 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
3912 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
3914 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
3915 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
3916 sizeof(struct eth_fast_path_rx_cqe
) *
3920 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
3921 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
3922 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
3923 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
3927 if (!skip_tx_queue(bp
, fp_index
)) {
3928 /* fastpath tx rings: tx_buf tx_desc */
3929 for_each_cos_in_tx_queue(fp
, cos
) {
3930 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
3932 DP(NETIF_MSG_IFDOWN
,
3933 "freeing tx memory of fp %d cos %d cid %d\n",
3934 fp_index
, cos
, txdata
->cid
);
3936 BNX2X_FREE(txdata
->tx_buf_ring
);
3937 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
3938 txdata
->tx_desc_mapping
,
3939 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
3942 /* end of fastpath */
3945 void bnx2x_free_fp_mem_cnic(struct bnx2x
*bp
)
3948 for_each_cnic_queue(bp
, i
)
3949 bnx2x_free_fp_mem_at(bp
, i
);
3952 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
3955 for_each_eth_queue(bp
, i
)
3956 bnx2x_free_fp_mem_at(bp
, i
);
3959 static void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
3961 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
3962 if (!CHIP_IS_E1x(bp
)) {
3963 bnx2x_fp(bp
, index
, sb_index_values
) =
3964 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
3965 bnx2x_fp(bp
, index
, sb_running_index
) =
3966 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
3968 bnx2x_fp(bp
, index
, sb_index_values
) =
3969 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
3970 bnx2x_fp(bp
, index
, sb_running_index
) =
3971 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
3975 /* Returns the number of actually allocated BDs */
3976 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath
*fp
,
3979 struct bnx2x
*bp
= fp
->bp
;
3980 u16 ring_prod
, cqe_ring_prod
;
3981 int i
, failure_cnt
= 0;
3983 fp
->rx_comp_cons
= 0;
3984 cqe_ring_prod
= ring_prod
= 0;
3986 /* This routine is called only during fo init so
3987 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3989 for (i
= 0; i
< rx_ring_size
; i
++) {
3990 if (bnx2x_alloc_rx_data(bp
, fp
, ring_prod
) < 0) {
3994 ring_prod
= NEXT_RX_IDX(ring_prod
);
3995 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
3996 WARN_ON(ring_prod
<= (i
- failure_cnt
));
4000 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4001 i
- failure_cnt
, fp
->index
);
4003 fp
->rx_bd_prod
= ring_prod
;
4004 /* Limit the CQE producer by the CQE ring size */
4005 fp
->rx_comp_prod
= min_t(u16
, NUM_RCQ_RINGS
*RCQ_DESC_CNT
,
4007 fp
->rx_pkt
= fp
->rx_calls
= 0;
4009 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
+= failure_cnt
;
4011 return i
- failure_cnt
;
4014 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath
*fp
)
4018 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4019 struct eth_rx_cqe_next_page
*nextpg
;
4021 nextpg
= (struct eth_rx_cqe_next_page
*)
4022 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4024 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4025 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4027 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4028 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4032 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
4034 union host_hc_status_block
*sb
;
4035 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
4038 int rx_ring_size
= 0;
4040 if (!bp
->rx_ring_size
&&
4041 (IS_MF_STORAGE_SD(bp
) || IS_MF_FCOE_AFEX(bp
))) {
4042 rx_ring_size
= MIN_RX_SIZE_NONTPA
;
4043 bp
->rx_ring_size
= rx_ring_size
;
4044 } else if (!bp
->rx_ring_size
) {
4045 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
4047 if (CHIP_IS_E3(bp
)) {
4048 u32 cfg
= SHMEM_RD(bp
,
4049 dev_info
.port_hw_config
[BP_PORT(bp
)].
4052 /* Decrease ring size for 1G functions */
4053 if ((cfg
& PORT_HW_CFG_NET_SERDES_IF_MASK
) ==
4054 PORT_HW_CFG_NET_SERDES_IF_SGMII
)
4058 /* allocate at least number of buffers required by FW */
4059 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
4060 MIN_RX_SIZE_TPA
, rx_ring_size
);
4062 bp
->rx_ring_size
= rx_ring_size
;
4063 } else /* if rx_ring_size specified - use it */
4064 rx_ring_size
= bp
->rx_ring_size
;
4066 DP(BNX2X_MSG_SP
, "calculated rx_ring_size %d\n", rx_ring_size
);
4069 sb
= &bnx2x_fp(bp
, index
, status_blk
);
4071 if (!IS_FCOE_IDX(index
)) {
4073 if (!CHIP_IS_E1x(bp
))
4074 BNX2X_PCI_ALLOC(sb
->e2_sb
,
4075 &bnx2x_fp(bp
, index
, status_blk_mapping
),
4076 sizeof(struct host_hc_status_block_e2
));
4078 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
4079 &bnx2x_fp(bp
, index
, status_blk_mapping
),
4080 sizeof(struct host_hc_status_block_e1x
));
4083 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4084 * set shortcuts for it.
4086 if (!IS_FCOE_IDX(index
))
4087 set_sb_shortcuts(bp
, index
);
4090 if (!skip_tx_queue(bp
, index
)) {
4091 /* fastpath tx rings: tx_buf tx_desc */
4092 for_each_cos_in_tx_queue(fp
, cos
) {
4093 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
4096 "allocating tx memory of fp %d cos %d\n",
4099 BNX2X_ALLOC(txdata
->tx_buf_ring
,
4100 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
4101 BNX2X_PCI_ALLOC(txdata
->tx_desc_ring
,
4102 &txdata
->tx_desc_mapping
,
4103 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4108 if (!skip_rx_queue(bp
, index
)) {
4109 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4110 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
4111 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
4112 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
4113 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
4114 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4116 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
4117 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
4118 sizeof(struct eth_fast_path_rx_cqe
) *
4122 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
4123 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
4124 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
4125 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
4126 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4128 bnx2x_set_next_page_rx_bd(fp
);
4131 bnx2x_set_next_page_rx_cq(fp
);
4134 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
4135 if (ring_size
< rx_ring_size
)
4141 /* handles low memory cases */
4143 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4145 /* FW will drop all packets if queue is not big enough,
4146 * In these cases we disable the queue
4147 * Min size is different for OOO, TPA and non-TPA queues
4149 if (ring_size
< (fp
->disable_tpa
?
4150 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
4151 /* release memory allocated for this queue */
4152 bnx2x_free_fp_mem_at(bp
, index
);
4158 int bnx2x_alloc_fp_mem_cnic(struct bnx2x
*bp
)
4162 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX(bp
)))
4163 /* we will fail load process instead of mark
4171 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
4175 /* 1. Allocate FP for leading - fatal if error
4176 * 2. Allocate RSS - fix number of queues if error
4180 if (bnx2x_alloc_fp_mem_at(bp
, 0))
4184 for_each_nondefault_eth_queue(bp
, i
)
4185 if (bnx2x_alloc_fp_mem_at(bp
, i
))
4188 /* handle memory failures */
4189 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
4190 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
4193 bnx2x_shrink_eth_fp(bp
, delta
);
4194 if (CNIC_SUPPORT(bp
))
4195 /* move non eth FPs next to last eth FP
4196 * must be done in that order
4197 * FCOE_IDX < FWD_IDX < OOO_IDX
4200 /* move FCoE fp even NO_FCOE_FLAG is on */
4201 bnx2x_move_fp(bp
, FCOE_IDX(bp
), FCOE_IDX(bp
) - delta
);
4202 bp
->num_ethernet_queues
-= delta
;
4203 bp
->num_queues
= bp
->num_ethernet_queues
+
4204 bp
->num_cnic_queues
;
4205 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4206 bp
->num_queues
+ delta
, bp
->num_queues
);
4212 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
4216 for (i
= 0; i
< bp
->fp_array_size
; i
++)
4217 kfree(bp
->fp
[i
].tpa_info
);
4220 kfree(bp
->fp_stats
);
4221 kfree(bp
->bnx2x_txq
);
4222 kfree(bp
->msix_table
);
4226 int bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
4228 struct bnx2x_fastpath
*fp
;
4229 struct msix_entry
*tbl
;
4230 struct bnx2x_ilt
*ilt
;
4231 int msix_table_size
= 0;
4232 int fp_array_size
, txq_array_size
;
4236 * The biggest MSI-X table we might need is as a maximum number of fast
4237 * path IGU SBs plus default SB (for PF only).
4239 msix_table_size
= bp
->igu_sb_cnt
;
4242 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size
);
4244 /* fp array: RSS plus CNIC related L2 queues */
4245 fp_array_size
= BNX2X_MAX_RSS_COUNT(bp
) + CNIC_SUPPORT(bp
);
4246 bp
->fp_array_size
= fp_array_size
;
4247 BNX2X_DEV_INFO("fp_array_size %d\n", bp
->fp_array_size
);
4249 fp
= kcalloc(bp
->fp_array_size
, sizeof(*fp
), GFP_KERNEL
);
4252 for (i
= 0; i
< bp
->fp_array_size
; i
++) {
4254 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2
,
4255 sizeof(struct bnx2x_agg_info
), GFP_KERNEL
);
4256 if (!(fp
[i
].tpa_info
))
4262 /* allocate sp objs */
4263 bp
->sp_objs
= kcalloc(bp
->fp_array_size
, sizeof(struct bnx2x_sp_objs
),
4268 /* allocate fp_stats */
4269 bp
->fp_stats
= kcalloc(bp
->fp_array_size
, sizeof(struct bnx2x_fp_stats
),
4274 /* Allocate memory for the transmission queues array */
4276 BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+ CNIC_SUPPORT(bp
);
4277 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size
);
4279 bp
->bnx2x_txq
= kcalloc(txq_array_size
, sizeof(struct bnx2x_fp_txdata
),
4285 tbl
= kcalloc(msix_table_size
, sizeof(*tbl
), GFP_KERNEL
);
4288 bp
->msix_table
= tbl
;
4291 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
4298 bnx2x_free_mem_bp(bp
);
4303 int bnx2x_reload_if_running(struct net_device
*dev
)
4305 struct bnx2x
*bp
= netdev_priv(dev
);
4307 if (unlikely(!netif_running(dev
)))
4310 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
, true);
4311 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
4314 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
4316 u32 sel_phy_idx
= 0;
4317 if (bp
->link_params
.num_phys
<= 1)
4320 if (bp
->link_vars
.link_up
) {
4321 sel_phy_idx
= EXT_PHY1
;
4322 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4323 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
4324 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
4325 sel_phy_idx
= EXT_PHY2
;
4328 switch (bnx2x_phy_selection(&bp
->link_params
)) {
4329 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
4330 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
4331 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
4332 sel_phy_idx
= EXT_PHY1
;
4334 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
4335 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
4336 sel_phy_idx
= EXT_PHY2
;
4344 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
4346 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
4348 * The selected activated PHY is always after swapping (in case PHY
4349 * swapping is enabled). So when swapping is enabled, we need to reverse
4353 if (bp
->link_params
.multi_phy_config
&
4354 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
4355 if (sel_phy_idx
== EXT_PHY1
)
4356 sel_phy_idx
= EXT_PHY2
;
4357 else if (sel_phy_idx
== EXT_PHY2
)
4358 sel_phy_idx
= EXT_PHY1
;
4360 return LINK_CONFIG_IDX(sel_phy_idx
);
4363 #ifdef NETDEV_FCOE_WWNN
4364 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
4366 struct bnx2x
*bp
= netdev_priv(dev
);
4367 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
4370 case NETDEV_FCOE_WWNN
:
4371 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
4372 cp
->fcoe_wwn_node_name_lo
);
4374 case NETDEV_FCOE_WWPN
:
4375 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
4376 cp
->fcoe_wwn_port_name_lo
);
4379 BNX2X_ERR("Wrong WWN type requested - %d\n", type
);
4387 /* called with rtnl_lock */
4388 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
4390 struct bnx2x
*bp
= netdev_priv(dev
);
4392 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
4393 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4397 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
4398 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
)) {
4399 BNX2X_ERR("Can't support requested MTU size\n");
4403 /* This does not race with packet allocation
4404 * because the actual alloc size is
4405 * only updated as part of load
4409 return bnx2x_reload_if_running(dev
);
4412 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
4413 netdev_features_t features
)
4415 struct bnx2x
*bp
= netdev_priv(dev
);
4417 /* TPA requires Rx CSUM offloading */
4418 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
) {
4419 features
&= ~NETIF_F_LRO
;
4420 features
&= ~NETIF_F_GRO
;
4426 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
)
4428 struct bnx2x
*bp
= netdev_priv(dev
);
4429 u32 flags
= bp
->flags
;
4430 bool bnx2x_reload
= false;
4432 if (features
& NETIF_F_LRO
)
4433 flags
|= TPA_ENABLE_FLAG
;
4435 flags
&= ~TPA_ENABLE_FLAG
;
4437 if (features
& NETIF_F_GRO
)
4438 flags
|= GRO_ENABLE_FLAG
;
4440 flags
&= ~GRO_ENABLE_FLAG
;
4442 if (features
& NETIF_F_LOOPBACK
) {
4443 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
4444 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
4445 bnx2x_reload
= true;
4448 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
4449 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
4450 bnx2x_reload
= true;
4454 if (flags
^ bp
->flags
) {
4456 bnx2x_reload
= true;
4460 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
4461 return bnx2x_reload_if_running(dev
);
4462 /* else: bnx2x_nic_load() will be called at end of recovery */
4468 void bnx2x_tx_timeout(struct net_device
*dev
)
4470 struct bnx2x
*bp
= netdev_priv(dev
);
4472 #ifdef BNX2X_STOP_ON_ERROR
4477 smp_mb__before_clear_bit();
4478 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT
, &bp
->sp_rtnl_state
);
4479 smp_mb__after_clear_bit();
4481 /* This allows the netif to be shutdown gracefully before resetting */
4482 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
4485 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4487 struct net_device
*dev
= pci_get_drvdata(pdev
);
4491 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
4494 bp
= netdev_priv(dev
);
4498 pci_save_state(pdev
);
4500 if (!netif_running(dev
)) {
4505 netif_device_detach(dev
);
4507 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
4509 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
4516 int bnx2x_resume(struct pci_dev
*pdev
)
4518 struct net_device
*dev
= pci_get_drvdata(pdev
);
4523 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
4526 bp
= netdev_priv(dev
);
4528 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
4529 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4535 pci_restore_state(pdev
);
4537 if (!netif_running(dev
)) {
4542 bnx2x_set_power_state(bp
, PCI_D0
);
4543 netif_device_attach(dev
);
4545 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
4553 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
4556 /* ustorm cxt validation */
4557 cxt
->ustorm_ag_context
.cdu_usage
=
4558 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
4559 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
4560 /* xcontext validation */
4561 cxt
->xstorm_ag_context
.cdu_reserved
=
4562 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
4563 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
4566 static void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
4567 u8 fw_sb_id
, u8 sb_index
,
4571 u32 addr
= BAR_CSTRORM_INTMEM
+
4572 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
4573 REG_WR8(bp
, addr
, ticks
);
4575 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4576 port
, fw_sb_id
, sb_index
, ticks
);
4579 static void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
4580 u16 fw_sb_id
, u8 sb_index
,
4583 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
4584 u32 addr
= BAR_CSTRORM_INTMEM
+
4585 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
4586 u8 flags
= REG_RD8(bp
, addr
);
4588 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
4589 flags
|= enable_flag
;
4590 REG_WR8(bp
, addr
, flags
);
4592 "port %x fw_sb_id %d sb_index %d disable %d\n",
4593 port
, fw_sb_id
, sb_index
, disable
);
4596 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
4597 u8 sb_index
, u8 disable
, u16 usec
)
4599 int port
= BP_PORT(bp
);
4600 u8 ticks
= usec
/ BNX2X_BTR
;
4602 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
4604 disable
= disable
? 1 : (usec
? 0 : 1);
4605 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);