]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
Merge branch 'addr_compare'
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
65565884
MS
43 * source onto the target. Update txdata pointers and related
44 * content.
b3b83c3f
DK
45 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
57
58 /* Copy the NAPI object as it has been already initialized */
59 from_fp->napi = to_fp->napi;
60
b3b83c3f
DK
61 /* Move bnx2x_fastpath contents */
62 memcpy(to_fp, from_fp, sizeof(*to_fp));
63 to_fp->index = to;
65565884 64
34d5626a
YM
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
67 */
68 to_fp->tpa_info = old_tpa_info;
69
15192a8c
BW
70 /* move sp_objs contents as well, as their indices match fp ones */
71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
72
73 /* move fp_stats contents as well, as their indices match fp ones */
74 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
75
65565884
MS
76 /* Update txdata pointers in fp and move txdata content accordingly:
77 * Each fp consumes 'max_cos' txdata structures, so the index should be
78 * decremented by max_cos x delta.
79 */
80
81 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
82 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
83 (bp)->max_cos;
84 if (from == FCOE_IDX(bp)) {
85 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
86 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
87 }
88
4864a16a
YM
89 memcpy(&bp->bnx2x_txq[new_txdata_index],
90 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
91 sizeof(struct bnx2x_fp_txdata));
92 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
93}
94
8ca5e17e
AE
95/**
96 * bnx2x_fill_fw_str - Fill buffer with FW version string.
97 *
98 * @bp: driver handle
99 * @buf: character buffer to fill with the fw name
100 * @buf_len: length of the above buffer
101 *
102 */
103void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
104{
105 if (IS_PF(bp)) {
106 u8 phy_fw_ver[PHY_FW_VER_LEN];
107
108 phy_fw_ver[0] = '\0';
109 bnx2x_get_ext_phy_fw_version(&bp->link_params,
110 phy_fw_ver, PHY_FW_VER_LEN);
111 strlcpy(buf, bp->fw_ver, buf_len);
112 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
113 "bc %d.%d.%d%s%s",
114 (bp->common.bc_ver & 0xff0000) >> 16,
115 (bp->common.bc_ver & 0xff00) >> 8,
116 (bp->common.bc_ver & 0xff),
117 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
118 } else {
6411280a 119 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
120 }
121}
122
4864a16a
YM
123/**
124 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
125 *
126 * @bp: driver handle
127 * @delta: number of eth queues which were not allocated
128 */
129static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
130{
131 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
132
133 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 134 * backward along the array could cause memory to be overridden
4864a16a
YM
135 */
136 for (cos = 1; cos < bp->max_cos; cos++) {
137 for (i = 0; i < old_eth_num - delta; i++) {
138 struct bnx2x_fastpath *fp = &bp->fp[i];
139 int new_idx = cos * (old_eth_num - delta) + i;
140
141 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
142 sizeof(struct bnx2x_fp_txdata));
143 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
144 }
145 }
146}
147
619c5cb6
VZ
148int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
149
9f6c9258
DK
150/* free skb in the packet ring at pos idx
151 * return idx of last bd freed
152 */
6383c0b3 153static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
154 u16 idx, unsigned int *pkts_compl,
155 unsigned int *bytes_compl)
9f6c9258 156{
6383c0b3 157 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
158 struct eth_tx_start_bd *tx_start_bd;
159 struct eth_tx_bd *tx_data_bd;
160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd;
163
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end);
166
51c1a580 167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 168 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
169
170 /* unmap first bd */
6383c0b3 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
174
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR
177 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
178 BNX2X_ERR("BAD nbd!\n");
179 bnx2x_panic();
180 }
181#endif
182 new_cons = nbd + tx_buf->first_bd;
183
184 /* Get the next bd */
185 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
186
187 /* Skip a parse bd... */
188 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190
191 /* ...and the TSO split header bd since they have no mapping */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 }
196
197 /* now free frags */
198 while (nbd > 0) {
199
6383c0b3 200 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
201 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
202 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
203 if (--nbd)
204 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
205 }
206
207 /* release skb */
208 WARN_ON(!skb);
d8290ae5 209 if (likely(skb)) {
2df1a70a
TH
210 (*pkts_compl)++;
211 (*bytes_compl) += skb->len;
212 }
d8290ae5 213
40955532 214 dev_kfree_skb_any(skb);
9f6c9258
DK
215 tx_buf->first_bd = 0;
216 tx_buf->skb = NULL;
217
218 return new_cons;
219}
220
6383c0b3 221int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 222{
9f6c9258 223 struct netdev_queue *txq;
6383c0b3 224 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 225 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
226
227#ifdef BNX2X_STOP_ON_ERROR
228 if (unlikely(bp->panic))
229 return -1;
230#endif
231
6383c0b3
AE
232 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
233 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
234 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
235
236 while (sw_cons != hw_cons) {
237 u16 pkt_cons;
238
239 pkt_cons = TX_BD(sw_cons);
240
51c1a580
MS
241 DP(NETIF_MSG_TX_DONE,
242 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 243 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 244
2df1a70a 245 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 246 &pkts_compl, &bytes_compl);
2df1a70a 247
9f6c9258
DK
248 sw_cons++;
249 }
250
2df1a70a
TH
251 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
252
6383c0b3
AE
253 txdata->tx_pkt_cons = sw_cons;
254 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
255
256 /* Need to make the tx_bd_cons update visible to start_xmit()
257 * before checking for netif_tx_queue_stopped(). Without the
258 * memory barrier, there is a small possibility that
259 * start_xmit() will miss it and cause the queue to be stopped
260 * forever.
619c5cb6
VZ
261 * On the other hand we need an rmb() here to ensure the proper
262 * ordering of bit testing in the following
263 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
264 */
265 smp_mb();
266
9f6c9258 267 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 268 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
269 * while it's empty. This could have happen if rx_action() gets
270 * suspended in bnx2x_tx_int() after the condition before
271 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
272 *
273 * stops the queue->sees fresh tx_bd_cons->releases the queue->
274 * sends some packets consuming the whole queue again->
275 * stops the queue
276 */
277
278 __netif_tx_lock(txq, smp_processor_id());
279
280 if ((netif_tx_queue_stopped(txq)) &&
281 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 282 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
283 netif_tx_wake_queue(txq);
284
285 __netif_tx_unlock(txq);
286 }
287 return 0;
288}
289
290static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
291 u16 idx)
292{
293 u16 last_max = fp->last_max_sge;
294
295 if (SUB_S16(idx, last_max) > 0)
296 fp->last_max_sge = idx;
297}
298
621b4d66
DK
299static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
300 u16 sge_len,
301 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
302{
303 struct bnx2x *bp = fp->bp;
9f6c9258
DK
304 u16 last_max, last_elem, first_elem;
305 u16 delta = 0;
306 u16 i;
307
308 if (!sge_len)
309 return;
310
311 /* First mark all used pages */
312 for (i = 0; i < sge_len; i++)
619c5cb6 313 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 314 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
315
316 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 317 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
318
319 /* Here we assume that the last SGE index is the biggest */
320 prefetch((void *)(fp->sge_mask));
523224a3 321 bnx2x_update_last_max_sge(fp,
621b4d66 322 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
323
324 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
325 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
326 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
327
328 /* If ring is not full */
329 if (last_elem + 1 != first_elem)
330 last_elem++;
331
332 /* Now update the prod */
333 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
334 if (likely(fp->sge_mask[i]))
335 break;
336
619c5cb6
VZ
337 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
338 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
339 }
340
341 if (delta > 0) {
342 fp->rx_sge_prod += delta;
343 /* clear page-end entries */
344 bnx2x_clear_sge_mask_next_elems(fp);
345 }
346
347 DP(NETIF_MSG_RX_STATUS,
348 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
349 fp->last_max_sge, fp->rx_sge_prod);
350}
351
2de67439 352/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
353 * CQE (calculated by HW).
354 */
355static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 356 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 357 enum pkt_hash_types *rxhash_type)
e52fcb24 358{
2de67439 359 /* Get Toeplitz hash from CQE */
e52fcb24 360 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
361 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
362 enum eth_rss_hash_type htype;
363
364 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
365 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
366 (htype == TCP_IPV6_HASH_TYPE)) ?
367 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
368
e52fcb24 369 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 370 }
5495ab75 371 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
372 return 0;
373}
374
9f6c9258 375static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 376 u16 cons, u16 prod,
619c5cb6 377 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
378{
379 struct bnx2x *bp = fp->bp;
380 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
381 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
382 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
383 dma_addr_t mapping;
619c5cb6
VZ
384 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
385 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 386
619c5cb6
VZ
387 /* print error if current state != stop */
388 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
389 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
390
e52fcb24 391 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 392 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 393 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
394 fp->rx_buf_size, DMA_FROM_DEVICE);
395 /*
396 * ...if it fails - move the skb from the consumer to the producer
397 * and set the current aggregation state as ERROR to drop it
398 * when TPA_STOP arrives.
399 */
400
401 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
402 /* Move the BD from the consumer to the producer */
e52fcb24 403 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
404 tpa_info->tpa_state = BNX2X_TPA_ERROR;
405 return;
406 }
9f6c9258 407
e52fcb24
ED
408 /* move empty data from pool to prod */
409 prod_rx_buf->data = first_buf->data;
619c5cb6 410 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 411 /* point prod_bd to new data */
9f6c9258
DK
412 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
413 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
414
619c5cb6
VZ
415 /* move partial skb from cons to pool (don't unmap yet) */
416 *first_buf = *cons_rx_buf;
417
418 /* mark bin state as START */
419 tpa_info->parsing_flags =
420 le16_to_cpu(cqe->pars_flags.flags);
421 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
422 tpa_info->tpa_state = BNX2X_TPA_START;
423 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
424 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 425 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
426 if (fp->mode == TPA_MODE_GRO) {
427 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 428 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
429 tpa_info->gro_size = gro_size;
430 }
619c5cb6 431
9f6c9258
DK
432#ifdef BNX2X_STOP_ON_ERROR
433 fp->tpa_queue_used |= (1 << queue);
434#ifdef _ASM_GENERIC_INT_L64_H
435 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
436#else
437 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
438#endif
439 fp->tpa_queue_used);
440#endif
441}
442
e4e3c02a
VZ
443/* Timestamp option length allowed for TPA aggregation:
444 *
445 * nop nop kind length echo val
446 */
447#define TPA_TSTAMP_OPT_LEN 12
448/**
cbf1de72 449 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 450 *
cbf1de72 451 * @skb: packet skb
e8920674
DK
452 * @parsing_flags: parsing flags from the START CQE
453 * @len_on_bd: total length of the first packet for the
454 * aggregation.
cbf1de72 455 * @pkt_len: length of all segments
e8920674
DK
456 *
457 * Approximate value of the MSS for this aggregation calculated using
458 * the first packet of it.
2de67439 459 * Compute number of aggregated segments, and gso_type.
e4e3c02a 460 */
cbf1de72 461static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
462 u16 len_on_bd, unsigned int pkt_len,
463 u16 num_of_coalesced_segs)
e4e3c02a 464{
cbf1de72 465 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 466 * other than timestamp or IPv6 extension headers.
e4e3c02a 467 */
619c5cb6
VZ
468 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
469
470 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 471 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 472 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
473 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
474 } else {
619c5cb6 475 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
476 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
477 }
e4e3c02a
VZ
478
479 /* Check if there was a TCP timestamp, if there is it's will
480 * always be 12 bytes length: nop nop kind length echo val.
481 *
482 * Otherwise FW would close the aggregation.
483 */
484 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
485 hdrs_len += TPA_TSTAMP_OPT_LEN;
486
cbf1de72
YM
487 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
488
489 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
490 * to skb_shinfo(skb)->gso_segs
491 */
ab5777d7 492 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
493}
494
996dedba
MS
495static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
496 u16 index, gfp_t gfp_mask)
1191cb83 497{
996dedba 498 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
1191cb83
ED
499 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
500 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
501 dma_addr_t mapping;
502
503 if (unlikely(page == NULL)) {
504 BNX2X_ERR("Can't alloc sge\n");
505 return -ENOMEM;
506 }
507
508 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 509 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
510 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
511 __free_pages(page, PAGES_PER_SGE_SHIFT);
512 BNX2X_ERR("Can't map sge\n");
513 return -ENOMEM;
514 }
515
516 sw_buf->page = page;
517 dma_unmap_addr_set(sw_buf, mapping, mapping);
518
519 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
520 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
521
522 return 0;
523}
524
9f6c9258 525static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
526 struct bnx2x_agg_info *tpa_info,
527 u16 pages,
528 struct sk_buff *skb,
619c5cb6
VZ
529 struct eth_end_agg_rx_cqe *cqe,
530 u16 cqe_idx)
9f6c9258
DK
531{
532 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
533 u32 i, frag_len, frag_size;
534 int err, j, frag_id = 0;
619c5cb6 535 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 536 u16 full_page = 0, gro_size = 0;
9f6c9258 537
619c5cb6 538 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
539
540 if (fp->mode == TPA_MODE_GRO) {
541 gro_size = tpa_info->gro_size;
542 full_page = tpa_info->full_page;
543 }
9f6c9258
DK
544
545 /* This is needed in order to enable forwarding support */
cbf1de72
YM
546 if (frag_size)
547 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
548 le16_to_cpu(cqe->pkt_len),
549 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 550
9f6c9258 551#ifdef BNX2X_STOP_ON_ERROR
924d75ab 552 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
553 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
554 pages, cqe_idx);
619c5cb6 555 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
556 bnx2x_panic();
557 return -EINVAL;
558 }
559#endif
560
561 /* Run through the SGL and compose the fragmented skb */
562 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 563 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
564
565 /* FW gives the indices of the SGE as if the ring is an array
566 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
567 if (fp->mode == TPA_MODE_GRO)
568 frag_len = min_t(u32, frag_size, (u32)full_page);
569 else /* LRO */
924d75ab 570 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 571
9f6c9258
DK
572 rx_pg = &fp->rx_page_ring[sge_idx];
573 old_rx_pg = *rx_pg;
574
575 /* If we fail to allocate a substitute page, we simply stop
576 where we are and drop the whole packet */
996dedba 577 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 578 if (unlikely(err)) {
15192a8c 579 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
580 return err;
581 }
582
16a5fd92 583 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
584 dma_unmap_page(&bp->pdev->dev,
585 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 586 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 587 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
588 if (fp->mode == TPA_MODE_LRO)
589 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
590 else { /* GRO */
591 int rem;
592 int offset = 0;
593 for (rem = frag_len; rem > 0; rem -= gro_size) {
594 int len = rem > gro_size ? gro_size : rem;
595 skb_fill_page_desc(skb, frag_id++,
596 old_rx_pg.page, offset, len);
597 if (offset)
598 get_page(old_rx_pg.page);
599 offset += len;
600 }
601 }
9f6c9258
DK
602
603 skb->data_len += frag_len;
924d75ab 604 skb->truesize += SGE_PAGES;
9f6c9258
DK
605 skb->len += frag_len;
606
607 frag_size -= frag_len;
608 }
609
610 return 0;
611}
612
d46d132c
ED
613static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
614{
615 if (fp->rx_frag_size)
616 put_page(virt_to_head_page(data));
617 else
618 kfree(data);
619}
620
996dedba 621static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 622{
996dedba
MS
623 if (fp->rx_frag_size) {
624 /* GFP_KERNEL allocations are used only during initialization */
625 if (unlikely(gfp_mask & __GFP_WAIT))
626 return (void *)__get_free_page(gfp_mask);
627
d46d132c 628 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 629 }
d46d132c 630
996dedba 631 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
632}
633
9969085e
YM
634#ifdef CONFIG_INET
635static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
636{
637 const struct iphdr *iph = ip_hdr(skb);
638 struct tcphdr *th;
639
640 skb_set_transport_header(skb, sizeof(struct iphdr));
641 th = tcp_hdr(skb);
642
643 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
644 iph->saddr, iph->daddr, 0);
645}
646
647static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
648{
649 struct ipv6hdr *iph = ipv6_hdr(skb);
650 struct tcphdr *th;
651
652 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
653 th = tcp_hdr(skb);
654
655 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
656 &iph->saddr, &iph->daddr, 0);
657}
2c2d06d5
YM
658
659static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
660 void (*gro_func)(struct bnx2x*, struct sk_buff*))
661{
662 skb_set_network_header(skb, 0);
663 gro_func(bp, skb);
664 tcp_gro_complete(skb);
665}
9969085e
YM
666#endif
667
668static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
669 struct sk_buff *skb)
670{
671#ifdef CONFIG_INET
cbf1de72 672 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
673 switch (be16_to_cpu(skb->protocol)) {
674 case ETH_P_IP:
2c2d06d5 675 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
676 break;
677 case ETH_P_IPV6:
2c2d06d5 678 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
679 break;
680 default:
2c2d06d5 681 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
682 be16_to_cpu(skb->protocol));
683 }
9969085e
YM
684 }
685#endif
60e66fee 686 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
687 napi_gro_receive(&fp->napi, skb);
688}
689
1191cb83
ED
690static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
691 struct bnx2x_agg_info *tpa_info,
692 u16 pages,
693 struct eth_end_agg_rx_cqe *cqe,
694 u16 cqe_idx)
9f6c9258 695{
619c5cb6 696 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 697 u8 pad = tpa_info->placement_offset;
619c5cb6 698 u16 len = tpa_info->len_on_bd;
e52fcb24 699 struct sk_buff *skb = NULL;
621b4d66 700 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
701 u8 old_tpa_state = tpa_info->tpa_state;
702
703 tpa_info->tpa_state = BNX2X_TPA_STOP;
704
705 /* If we there was an error during the handling of the TPA_START -
706 * drop this aggregation.
707 */
708 if (old_tpa_state == BNX2X_TPA_ERROR)
709 goto drop;
710
e52fcb24 711 /* Try to allocate the new data */
996dedba 712 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
713 /* Unmap skb in the pool anyway, as we are going to change
714 pool entry status to BNX2X_TPA_STOP even if new skb allocation
715 fails. */
716 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 717 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 718 if (likely(new_data))
d46d132c 719 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 720
e52fcb24 721 if (likely(skb)) {
9f6c9258 722#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 723 if (pad + len > fp->rx_buf_size) {
51c1a580 724 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 725 pad, len, fp->rx_buf_size);
9f6c9258
DK
726 bnx2x_panic();
727 return;
728 }
729#endif
730
e52fcb24 731 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 732 skb_put(skb, len);
5495ab75 733 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
734
735 skb->protocol = eth_type_trans(skb, bp->dev);
736 skb->ip_summed = CHECKSUM_UNNECESSARY;
737
621b4d66
DK
738 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
739 skb, cqe, cqe_idx)) {
619c5cb6 740 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 741 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 742 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 743 } else {
51c1a580
MS
744 DP(NETIF_MSG_RX_STATUS,
745 "Failed to allocate new pages - dropping packet!\n");
40955532 746 dev_kfree_skb_any(skb);
9f6c9258
DK
747 }
748
e52fcb24
ED
749 /* put new data in bin */
750 rx_buf->data = new_data;
9f6c9258 751
619c5cb6 752 return;
9f6c9258 753 }
d46d132c 754 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
755drop:
756 /* drop the packet and keep the buffer in the bin */
757 DP(NETIF_MSG_RX_STATUS,
758 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 759 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
760}
761
996dedba
MS
762static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
763 u16 index, gfp_t gfp_mask)
1191cb83
ED
764{
765 u8 *data;
766 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
767 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
768 dma_addr_t mapping;
769
996dedba 770 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
771 if (unlikely(data == NULL))
772 return -ENOMEM;
773
774 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
775 fp->rx_buf_size,
776 DMA_FROM_DEVICE);
777 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 778 bnx2x_frag_free(fp, data);
1191cb83
ED
779 BNX2X_ERR("Can't map rx data\n");
780 return -ENOMEM;
781 }
782
783 rx_buf->data = data;
784 dma_unmap_addr_set(rx_buf, mapping, mapping);
785
786 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
787 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
788
789 return 0;
790}
791
15192a8c
BW
792static
793void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
794 struct bnx2x_fastpath *fp,
795 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 796{
e488921f
MS
797 /* Do nothing if no L4 csum validation was done.
798 * We do not check whether IP csum was validated. For IPv4 we assume
799 * that if the card got as far as validating the L4 csum, it also
800 * validated the IP csum. IPv6 has no IP csum.
801 */
d6cb3e41 802 if (cqe->fast_path_cqe.status_flags &
e488921f 803 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
804 return;
805
e488921f 806 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
807
808 if (cqe->fast_path_cqe.type_error_flags &
809 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
810 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 811 qstats->hw_csum_err++;
d6cb3e41
ED
812 else
813 skb->ip_summed = CHECKSUM_UNNECESSARY;
814}
9f6c9258
DK
815
816int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
817{
818 struct bnx2x *bp = fp->bp;
819 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 820 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 821 int rx_pkt = 0;
75b29459
DK
822 union eth_rx_cqe *cqe;
823 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
824
825#ifdef BNX2X_STOP_ON_ERROR
826 if (unlikely(bp->panic))
827 return 0;
828#endif
829
9f6c9258
DK
830 bd_cons = fp->rx_bd_cons;
831 bd_prod = fp->rx_bd_prod;
832 bd_prod_fw = bd_prod;
833 sw_comp_cons = fp->rx_comp_cons;
834 sw_comp_prod = fp->rx_comp_prod;
835
75b29459
DK
836 comp_ring_cons = RCQ_BD(sw_comp_cons);
837 cqe = &fp->rx_comp_ring[comp_ring_cons];
838 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
839
840 DP(NETIF_MSG_RX_STATUS,
75b29459 841 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 842
75b29459 843 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
844 struct sw_rx_bd *rx_buf = NULL;
845 struct sk_buff *skb;
9f6c9258 846 u8 cqe_fp_flags;
619c5cb6 847 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 848 u16 len, pad, queue;
e52fcb24 849 u8 *data;
bd5cef03 850 u32 rxhash;
5495ab75 851 enum pkt_hash_types rxhash_type;
9f6c9258 852
619c5cb6
VZ
853#ifdef BNX2X_STOP_ON_ERROR
854 if (unlikely(bp->panic))
855 return 0;
856#endif
857
9f6c9258
DK
858 bd_prod = RX_BD(bd_prod);
859 bd_cons = RX_BD(bd_cons);
860
619c5cb6
VZ
861 cqe_fp_flags = cqe_fp->type_error_flags;
862 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 863
51c1a580
MS
864 DP(NETIF_MSG_RX_STATUS,
865 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
866 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
867 cqe_fp_flags, cqe_fp->status_flags,
868 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
869 le16_to_cpu(cqe_fp->vlan_tag),
870 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
871
872 /* is this a slowpath msg? */
619c5cb6 873 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
874 bnx2x_sp_event(fp, cqe);
875 goto next_cqe;
e52fcb24 876 }
621b4d66 877
e52fcb24
ED
878 rx_buf = &fp->rx_buf_ring[bd_cons];
879 data = rx_buf->data;
9f6c9258 880
e52fcb24 881 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
882 struct bnx2x_agg_info *tpa_info;
883 u16 frag_size, pages;
619c5cb6 884#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
885 /* sanity check */
886 if (fp->disable_tpa &&
887 (CQE_TYPE_START(cqe_fp_type) ||
888 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 889 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 890 CQE_TYPE(cqe_fp_type));
619c5cb6 891#endif
9f6c9258 892
e52fcb24
ED
893 if (CQE_TYPE_START(cqe_fp_type)) {
894 u16 queue = cqe_fp->queue_index;
895 DP(NETIF_MSG_RX_STATUS,
896 "calling tpa_start on queue %d\n",
897 queue);
9f6c9258 898
e52fcb24
ED
899 bnx2x_tpa_start(fp, queue,
900 bd_cons, bd_prod,
901 cqe_fp);
621b4d66 902
e52fcb24 903 goto next_rx;
621b4d66
DK
904 }
905 queue = cqe->end_agg_cqe.queue_index;
906 tpa_info = &fp->tpa_info[queue];
907 DP(NETIF_MSG_RX_STATUS,
908 "calling tpa_stop on queue %d\n",
909 queue);
910
911 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
912 tpa_info->len_on_bd;
913
914 if (fp->mode == TPA_MODE_GRO)
915 pages = (frag_size + tpa_info->full_page - 1) /
916 tpa_info->full_page;
917 else
918 pages = SGE_PAGE_ALIGN(frag_size) >>
919 SGE_PAGE_SHIFT;
920
921 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
922 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 923#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
924 if (bp->panic)
925 return 0;
9f6c9258
DK
926#endif
927
621b4d66
DK
928 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
929 goto next_cqe;
e52fcb24
ED
930 }
931 /* non TPA */
621b4d66 932 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
933 pad = cqe_fp->placement_offset;
934 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 935 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
936 pad + RX_COPY_THRESH,
937 DMA_FROM_DEVICE);
938 pad += NET_SKB_PAD;
939 prefetch(data + pad); /* speedup eth_type_trans() */
940 /* is this an error packet? */
941 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 942 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
943 "ERROR flags %x rx packet %u\n",
944 cqe_fp_flags, sw_comp_cons);
15192a8c 945 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
946 goto reuse_rx;
947 }
9f6c9258 948
e52fcb24
ED
949 /* Since we don't have a jumbo ring
950 * copy small packets if mtu > 1500
951 */
952 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
953 (len <= RX_COPY_THRESH)) {
954 skb = netdev_alloc_skb_ip_align(bp->dev, len);
955 if (skb == NULL) {
51c1a580 956 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 957 "ERROR packet dropped because of alloc failure\n");
15192a8c 958 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
959 goto reuse_rx;
960 }
e52fcb24
ED
961 memcpy(skb->data, data + pad, len);
962 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
963 } else {
996dedba
MS
964 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
965 GFP_ATOMIC) == 0)) {
9f6c9258 966 dma_unmap_single(&bp->pdev->dev,
e52fcb24 967 dma_unmap_addr(rx_buf, mapping),
a8c94b91 968 fp->rx_buf_size,
9f6c9258 969 DMA_FROM_DEVICE);
d46d132c 970 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 971 if (unlikely(!skb)) {
d46d132c 972 bnx2x_frag_free(fp, data);
15192a8c
BW
973 bnx2x_fp_qstats(bp, fp)->
974 rx_skb_alloc_failed++;
e52fcb24
ED
975 goto next_rx;
976 }
9f6c9258 977 skb_reserve(skb, pad);
9f6c9258 978 } else {
51c1a580
MS
979 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
980 "ERROR packet dropped because of alloc failure\n");
15192a8c 981 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 982reuse_rx:
e52fcb24 983 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
984 goto next_rx;
985 }
036d2df9 986 }
9f6c9258 987
036d2df9
DK
988 skb_put(skb, len);
989 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 990
036d2df9 991 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
992 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
993 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 994
036d2df9 995 skb_checksum_none_assert(skb);
f85582f8 996
d6cb3e41 997 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
998 bnx2x_csum_validate(skb, cqe, fp,
999 bnx2x_fp_qstats(bp, fp));
9f6c9258 1000
f233cafe 1001 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1002
619c5cb6
VZ
1003 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1004 PARSING_FLAGS_VLAN)
86a9bad3 1005 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1006 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1007
8b80cda5 1008 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1009
1010 if (bnx2x_fp_ll_polling(fp))
1011 netif_receive_skb(skb);
1012 else
1013 napi_gro_receive(&fp->napi, skb);
9f6c9258 1014next_rx:
e52fcb24 1015 rx_buf->data = NULL;
9f6c9258
DK
1016
1017 bd_cons = NEXT_RX_IDX(bd_cons);
1018 bd_prod = NEXT_RX_IDX(bd_prod);
1019 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1020 rx_pkt++;
1021next_cqe:
1022 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1023 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1024
75b29459
DK
1025 /* mark CQE as free */
1026 BNX2X_SEED_CQE(cqe_fp);
1027
9f6c9258
DK
1028 if (rx_pkt == budget)
1029 break;
75b29459
DK
1030
1031 comp_ring_cons = RCQ_BD(sw_comp_cons);
1032 cqe = &fp->rx_comp_ring[comp_ring_cons];
1033 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1034 } /* while */
1035
1036 fp->rx_bd_cons = bd_cons;
1037 fp->rx_bd_prod = bd_prod_fw;
1038 fp->rx_comp_cons = sw_comp_cons;
1039 fp->rx_comp_prod = sw_comp_prod;
1040
1041 /* Update producers */
1042 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1043 fp->rx_sge_prod);
1044
1045 fp->rx_pkt += rx_pkt;
1046 fp->rx_calls++;
1047
1048 return rx_pkt;
1049}
1050
1051static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1052{
1053 struct bnx2x_fastpath *fp = fp_cookie;
1054 struct bnx2x *bp = fp->bp;
6383c0b3 1055 u8 cos;
9f6c9258 1056
51c1a580
MS
1057 DP(NETIF_MSG_INTR,
1058 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1059 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1060
523224a3 1061 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1062
1063#ifdef BNX2X_STOP_ON_ERROR
1064 if (unlikely(bp->panic))
1065 return IRQ_HANDLED;
1066#endif
1067
1068 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1069 for_each_cos_in_tx_queue(fp, cos)
65565884 1070 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1071
523224a3 1072 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1073 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1074
1075 return IRQ_HANDLED;
1076}
1077
9f6c9258
DK
1078/* HW Lock for shared dual port PHYs */
1079void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1080{
1081 mutex_lock(&bp->port.phy_mutex);
1082
8203c4b6 1083 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1084}
1085
1086void bnx2x_release_phy_lock(struct bnx2x *bp)
1087{
8203c4b6 1088 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1089
1090 mutex_unlock(&bp->port.phy_mutex);
1091}
1092
0793f83f
DK
1093/* calculates MF speed according to current linespeed and MF configuration */
1094u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1095{
1096 u16 line_speed = bp->link_vars.line_speed;
1097 if (IS_MF(bp)) {
faa6fcbb
DK
1098 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1099 bp->mf_config[BP_VN(bp)]);
1100
1101 /* Calculate the current MAX line speed limit for the MF
1102 * devices
0793f83f 1103 */
faa6fcbb
DK
1104 if (IS_MF_SI(bp))
1105 line_speed = (line_speed * maxCfg) / 100;
1106 else { /* SD mode */
0793f83f
DK
1107 u16 vn_max_rate = maxCfg * 100;
1108
1109 if (vn_max_rate < line_speed)
1110 line_speed = vn_max_rate;
faa6fcbb 1111 }
0793f83f
DK
1112 }
1113
1114 return line_speed;
1115}
1116
2ae17f66
VZ
1117/**
1118 * bnx2x_fill_report_data - fill link report data to report
1119 *
1120 * @bp: driver handle
1121 * @data: link state to update
1122 *
1123 * It uses a none-atomic bit operations because is called under the mutex.
1124 */
1191cb83
ED
1125static void bnx2x_fill_report_data(struct bnx2x *bp,
1126 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1127{
1128 u16 line_speed = bnx2x_get_mf_speed(bp);
1129
1130 memset(data, 0, sizeof(*data));
1131
16a5fd92 1132 /* Fill the report data: effective line speed */
2ae17f66
VZ
1133 data->line_speed = line_speed;
1134
1135 /* Link is down */
1136 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1137 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1138 &data->link_report_flags);
1139
1140 /* Full DUPLEX */
1141 if (bp->link_vars.duplex == DUPLEX_FULL)
1142 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1143
1144 /* Rx Flow Control is ON */
1145 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1146 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1147
1148 /* Tx Flow Control is ON */
1149 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1150 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1151}
1152
1153/**
1154 * bnx2x_link_report - report link status to OS.
1155 *
1156 * @bp: driver handle
1157 *
1158 * Calls the __bnx2x_link_report() under the same locking scheme
1159 * as a link/PHY state managing code to ensure a consistent link
1160 * reporting.
1161 */
1162
9f6c9258
DK
1163void bnx2x_link_report(struct bnx2x *bp)
1164{
2ae17f66
VZ
1165 bnx2x_acquire_phy_lock(bp);
1166 __bnx2x_link_report(bp);
1167 bnx2x_release_phy_lock(bp);
1168}
9f6c9258 1169
2ae17f66
VZ
1170/**
1171 * __bnx2x_link_report - report link status to OS.
1172 *
1173 * @bp: driver handle
1174 *
16a5fd92 1175 * None atomic implementation.
2ae17f66
VZ
1176 * Should be called under the phy_lock.
1177 */
1178void __bnx2x_link_report(struct bnx2x *bp)
1179{
1180 struct bnx2x_link_report_data cur_data;
9f6c9258 1181
2ae17f66 1182 /* reread mf_cfg */
ad5afc89 1183 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1184 bnx2x_read_mf_cfg(bp);
1185
1186 /* Read the current link report info */
1187 bnx2x_fill_report_data(bp, &cur_data);
1188
1189 /* Don't report link down or exactly the same link status twice */
1190 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1191 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1192 &bp->last_reported_link.link_report_flags) &&
1193 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1194 &cur_data.link_report_flags)))
1195 return;
1196
1197 bp->link_cnt++;
9f6c9258 1198
2ae17f66
VZ
1199 /* We are going to report a new link parameters now -
1200 * remember the current data for the next time.
1201 */
1202 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1203
2ae17f66
VZ
1204 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1205 &cur_data.link_report_flags)) {
1206 netif_carrier_off(bp->dev);
1207 netdev_err(bp->dev, "NIC Link is Down\n");
1208 return;
1209 } else {
94f05b0f
JP
1210 const char *duplex;
1211 const char *flow;
1212
2ae17f66 1213 netif_carrier_on(bp->dev);
9f6c9258 1214
2ae17f66
VZ
1215 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1216 &cur_data.link_report_flags))
94f05b0f 1217 duplex = "full";
9f6c9258 1218 else
94f05b0f 1219 duplex = "half";
9f6c9258 1220
2ae17f66
VZ
1221 /* Handle the FC at the end so that only these flags would be
1222 * possibly set. This way we may easily check if there is no FC
1223 * enabled.
1224 */
1225 if (cur_data.link_report_flags) {
1226 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1227 &cur_data.link_report_flags)) {
2ae17f66
VZ
1228 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1229 &cur_data.link_report_flags))
94f05b0f
JP
1230 flow = "ON - receive & transmit";
1231 else
1232 flow = "ON - receive";
9f6c9258 1233 } else {
94f05b0f 1234 flow = "ON - transmit";
9f6c9258 1235 }
94f05b0f
JP
1236 } else {
1237 flow = "none";
9f6c9258 1238 }
94f05b0f
JP
1239 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1240 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1241 }
1242}
1243
1191cb83
ED
1244static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1245{
1246 int i;
1247
1248 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1249 struct eth_rx_sge *sge;
1250
1251 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1252 sge->addr_hi =
1253 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1254 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1255
1256 sge->addr_lo =
1257 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1258 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1259 }
1260}
1261
1262static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1263 struct bnx2x_fastpath *fp, int last)
1264{
1265 int i;
1266
1267 for (i = 0; i < last; i++) {
1268 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1269 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1270 u8 *data = first_buf->data;
1271
1272 if (data == NULL) {
1273 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1274 continue;
1275 }
1276 if (tpa_info->tpa_state == BNX2X_TPA_START)
1277 dma_unmap_single(&bp->pdev->dev,
1278 dma_unmap_addr(first_buf, mapping),
1279 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1280 bnx2x_frag_free(fp, data);
1191cb83
ED
1281 first_buf->data = NULL;
1282 }
1283}
1284
55c11941
MS
1285void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1286{
1287 int j;
1288
1289 for_each_rx_queue_cnic(bp, j) {
1290 struct bnx2x_fastpath *fp = &bp->fp[j];
1291
1292 fp->rx_bd_cons = 0;
1293
1294 /* Activate BD ring */
1295 /* Warning!
1296 * this will generate an interrupt (to the TSTORM)
1297 * must only be done after chip is initialized
1298 */
1299 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1300 fp->rx_sge_prod);
1301 }
1302}
1303
9f6c9258
DK
1304void bnx2x_init_rx_rings(struct bnx2x *bp)
1305{
1306 int func = BP_FUNC(bp);
523224a3 1307 u16 ring_prod;
9f6c9258 1308 int i, j;
25141580 1309
b3b83c3f 1310 /* Allocate TPA resources */
55c11941 1311 for_each_eth_queue(bp, j) {
523224a3 1312 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1313
a8c94b91
VZ
1314 DP(NETIF_MSG_IFUP,
1315 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1316
523224a3 1317 if (!fp->disable_tpa) {
16a5fd92 1318 /* Fill the per-aggregation pool */
dfacf138 1319 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1320 struct bnx2x_agg_info *tpa_info =
1321 &fp->tpa_info[i];
1322 struct sw_rx_bd *first_buf =
1323 &tpa_info->first_buf;
1324
996dedba
MS
1325 first_buf->data =
1326 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1327 if (!first_buf->data) {
51c1a580
MS
1328 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1329 j);
9f6c9258
DK
1330 bnx2x_free_tpa_pool(bp, fp, i);
1331 fp->disable_tpa = 1;
1332 break;
1333 }
619c5cb6
VZ
1334 dma_unmap_addr_set(first_buf, mapping, 0);
1335 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1336 }
523224a3
DK
1337
1338 /* "next page" elements initialization */
1339 bnx2x_set_next_page_sgl(fp);
1340
1341 /* set SGEs bit mask */
1342 bnx2x_init_sge_ring_bit_mask(fp);
1343
1344 /* Allocate SGEs and initialize the ring elements */
1345 for (i = 0, ring_prod = 0;
1346 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1347
996dedba
MS
1348 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1349 GFP_KERNEL) < 0) {
51c1a580
MS
1350 BNX2X_ERR("was only able to allocate %d rx sges\n",
1351 i);
1352 BNX2X_ERR("disabling TPA for queue[%d]\n",
1353 j);
523224a3 1354 /* Cleanup already allocated elements */
619c5cb6
VZ
1355 bnx2x_free_rx_sge_range(bp, fp,
1356 ring_prod);
1357 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1358 MAX_AGG_QS(bp));
523224a3
DK
1359 fp->disable_tpa = 1;
1360 ring_prod = 0;
1361 break;
1362 }
1363 ring_prod = NEXT_SGE_IDX(ring_prod);
1364 }
1365
1366 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1367 }
1368 }
1369
55c11941 1370 for_each_eth_queue(bp, j) {
9f6c9258
DK
1371 struct bnx2x_fastpath *fp = &bp->fp[j];
1372
1373 fp->rx_bd_cons = 0;
9f6c9258 1374
b3b83c3f
DK
1375 /* Activate BD ring */
1376 /* Warning!
1377 * this will generate an interrupt (to the TSTORM)
1378 * must only be done after chip is initialized
1379 */
1380 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1381 fp->rx_sge_prod);
9f6c9258 1382
9f6c9258
DK
1383 if (j != 0)
1384 continue;
1385
619c5cb6 1386 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1387 REG_WR(bp, BAR_USTRORM_INTMEM +
1388 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1389 U64_LO(fp->rx_comp_mapping));
1390 REG_WR(bp, BAR_USTRORM_INTMEM +
1391 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1392 U64_HI(fp->rx_comp_mapping));
1393 }
9f6c9258
DK
1394 }
1395}
f85582f8 1396
55c11941 1397static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1398{
6383c0b3 1399 u8 cos;
55c11941 1400 struct bnx2x *bp = fp->bp;
9f6c9258 1401
55c11941
MS
1402 for_each_cos_in_tx_queue(fp, cos) {
1403 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1404 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1405
55c11941
MS
1406 u16 sw_prod = txdata->tx_pkt_prod;
1407 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1408
55c11941
MS
1409 while (sw_cons != sw_prod) {
1410 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1411 &pkts_compl, &bytes_compl);
1412 sw_cons++;
9f6c9258 1413 }
55c11941
MS
1414
1415 netdev_tx_reset_queue(
1416 netdev_get_tx_queue(bp->dev,
1417 txdata->txq_index));
1418 }
1419}
1420
1421static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1422{
1423 int i;
1424
1425 for_each_tx_queue_cnic(bp, i) {
1426 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1427 }
1428}
1429
1430static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1431{
1432 int i;
1433
1434 for_each_eth_queue(bp, i) {
1435 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1436 }
1437}
1438
b3b83c3f
DK
1439static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1440{
1441 struct bnx2x *bp = fp->bp;
1442 int i;
1443
1444 /* ring wasn't allocated */
1445 if (fp->rx_buf_ring == NULL)
1446 return;
1447
1448 for (i = 0; i < NUM_RX_BD; i++) {
1449 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1450 u8 *data = rx_buf->data;
b3b83c3f 1451
e52fcb24 1452 if (data == NULL)
b3b83c3f 1453 continue;
b3b83c3f
DK
1454 dma_unmap_single(&bp->pdev->dev,
1455 dma_unmap_addr(rx_buf, mapping),
1456 fp->rx_buf_size, DMA_FROM_DEVICE);
1457
e52fcb24 1458 rx_buf->data = NULL;
d46d132c 1459 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1460 }
1461}
1462
55c11941
MS
1463static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1464{
1465 int j;
1466
1467 for_each_rx_queue_cnic(bp, j) {
1468 bnx2x_free_rx_bds(&bp->fp[j]);
1469 }
1470}
1471
9f6c9258
DK
1472static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1473{
b3b83c3f 1474 int j;
9f6c9258 1475
55c11941 1476 for_each_eth_queue(bp, j) {
9f6c9258
DK
1477 struct bnx2x_fastpath *fp = &bp->fp[j];
1478
b3b83c3f 1479 bnx2x_free_rx_bds(fp);
9f6c9258 1480
9f6c9258 1481 if (!fp->disable_tpa)
dfacf138 1482 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1483 }
1484}
1485
55c11941
MS
1486void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1487{
1488 bnx2x_free_tx_skbs_cnic(bp);
1489 bnx2x_free_rx_skbs_cnic(bp);
1490}
1491
9f6c9258
DK
1492void bnx2x_free_skbs(struct bnx2x *bp)
1493{
1494 bnx2x_free_tx_skbs(bp);
1495 bnx2x_free_rx_skbs(bp);
1496}
1497
e3835b99
DK
1498void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1499{
1500 /* load old values */
1501 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1502
1503 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1504 /* leave all but MAX value */
1505 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1506
1507 /* set new MAX value */
1508 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1509 & FUNC_MF_CFG_MAX_BW_MASK;
1510
1511 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1512 }
1513}
1514
ca92429f
DK
1515/**
1516 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1517 *
1518 * @bp: driver handle
1519 * @nvecs: number of vectors to be released
1520 */
1521static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1522{
ca92429f 1523 int i, offset = 0;
9f6c9258 1524
ca92429f
DK
1525 if (nvecs == offset)
1526 return;
ad5afc89
AE
1527
1528 /* VFs don't have a default SB */
1529 if (IS_PF(bp)) {
1530 free_irq(bp->msix_table[offset].vector, bp->dev);
1531 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1532 bp->msix_table[offset].vector);
1533 offset++;
1534 }
55c11941
MS
1535
1536 if (CNIC_SUPPORT(bp)) {
1537 if (nvecs == offset)
1538 return;
1539 offset++;
1540 }
ca92429f 1541
ec6ba945 1542 for_each_eth_queue(bp, i) {
ca92429f
DK
1543 if (nvecs == offset)
1544 return;
51c1a580
MS
1545 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1546 i, bp->msix_table[offset].vector);
9f6c9258 1547
ca92429f 1548 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1549 }
1550}
1551
d6214d7a 1552void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1553{
30a5de77 1554 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1555 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1556 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1557
1558 /* vfs don't have a default status block */
1559 if (IS_PF(bp))
1560 nvecs++;
1561
1562 bnx2x_free_msix_irqs(bp, nvecs);
1563 } else {
30a5de77 1564 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1565 }
9f6c9258
DK
1566}
1567
0e8d2ec5 1568int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1569{
1ab4434c 1570 int msix_vec = 0, i, rc;
9f6c9258 1571
1ab4434c
AE
1572 /* VFs don't have a default status block */
1573 if (IS_PF(bp)) {
1574 bp->msix_table[msix_vec].entry = msix_vec;
1575 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1576 bp->msix_table[0].entry);
1577 msix_vec++;
1578 }
9f6c9258 1579
55c11941
MS
1580 /* Cnic requires an msix vector for itself */
1581 if (CNIC_SUPPORT(bp)) {
1582 bp->msix_table[msix_vec].entry = msix_vec;
1583 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1584 msix_vec, bp->msix_table[msix_vec].entry);
1585 msix_vec++;
1586 }
1587
6383c0b3 1588 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1589 for_each_eth_queue(bp, i) {
d6214d7a 1590 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1591 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1592 msix_vec, msix_vec, i);
d6214d7a 1593 msix_vec++;
9f6c9258
DK
1594 }
1595
1ab4434c
AE
1596 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1597 msix_vec);
d6214d7a 1598
1ab4434c 1599 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1600
1601 /*
1602 * reconfigure number of tx/rx queues according to available
1603 * MSI-X vectors
1604 */
55c11941 1605 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1606 /* how less vectors we will have? */
1ab4434c 1607 int diff = msix_vec - rc;
9f6c9258 1608
51c1a580 1609 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1610
1611 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1612
1613 if (rc) {
30a5de77
DK
1614 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1615 goto no_msix;
9f6c9258 1616 }
d6214d7a
DK
1617 /*
1618 * decrease number of queues by number of unallocated entries
1619 */
55c11941
MS
1620 bp->num_ethernet_queues -= diff;
1621 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1622
51c1a580 1623 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1624 bp->num_queues);
1625 } else if (rc > 0) {
1626 /* Get by with single vector */
1627 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1628 if (rc) {
1629 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1630 rc);
1631 goto no_msix;
1632 }
1633
1634 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1635 bp->flags |= USING_SINGLE_MSIX_FLAG;
1636
55c11941
MS
1637 BNX2X_DEV_INFO("set number of queues to 1\n");
1638 bp->num_ethernet_queues = 1;
1639 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1640 } else if (rc < 0) {
51c1a580 1641 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1642 goto no_msix;
9f6c9258
DK
1643 }
1644
1645 bp->flags |= USING_MSIX_FLAG;
1646
1647 return 0;
30a5de77
DK
1648
1649no_msix:
1650 /* fall to INTx if not enough memory */
1651 if (rc == -ENOMEM)
1652 bp->flags |= DISABLE_MSI_FLAG;
1653
1654 return rc;
9f6c9258
DK
1655}
1656
1657static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1658{
ca92429f 1659 int i, rc, offset = 0;
9f6c9258 1660
ad5afc89
AE
1661 /* no default status block for vf */
1662 if (IS_PF(bp)) {
1663 rc = request_irq(bp->msix_table[offset++].vector,
1664 bnx2x_msix_sp_int, 0,
1665 bp->dev->name, bp->dev);
1666 if (rc) {
1667 BNX2X_ERR("request sp irq failed\n");
1668 return -EBUSY;
1669 }
9f6c9258
DK
1670 }
1671
55c11941
MS
1672 if (CNIC_SUPPORT(bp))
1673 offset++;
1674
ec6ba945 1675 for_each_eth_queue(bp, i) {
9f6c9258
DK
1676 struct bnx2x_fastpath *fp = &bp->fp[i];
1677 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1678 bp->dev->name, i);
1679
d6214d7a 1680 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1681 bnx2x_msix_fp_int, 0, fp->name, fp);
1682 if (rc) {
ca92429f
DK
1683 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1684 bp->msix_table[offset].vector, rc);
1685 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1686 return -EBUSY;
1687 }
1688
d6214d7a 1689 offset++;
9f6c9258
DK
1690 }
1691
ec6ba945 1692 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1693 if (IS_PF(bp)) {
1694 offset = 1 + CNIC_SUPPORT(bp);
1695 netdev_info(bp->dev,
1696 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1697 bp->msix_table[0].vector,
1698 0, bp->msix_table[offset].vector,
1699 i - 1, bp->msix_table[offset + i - 1].vector);
1700 } else {
1701 offset = CNIC_SUPPORT(bp);
1702 netdev_info(bp->dev,
1703 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1704 0, bp->msix_table[offset].vector,
1705 i - 1, bp->msix_table[offset + i - 1].vector);
1706 }
9f6c9258
DK
1707 return 0;
1708}
1709
d6214d7a 1710int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1711{
1712 int rc;
1713
1714 rc = pci_enable_msi(bp->pdev);
1715 if (rc) {
51c1a580 1716 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1717 return -1;
1718 }
1719 bp->flags |= USING_MSI_FLAG;
1720
1721 return 0;
1722}
1723
1724static int bnx2x_req_irq(struct bnx2x *bp)
1725{
1726 unsigned long flags;
30a5de77 1727 unsigned int irq;
9f6c9258 1728
30a5de77 1729 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1730 flags = 0;
1731 else
1732 flags = IRQF_SHARED;
1733
30a5de77
DK
1734 if (bp->flags & USING_MSIX_FLAG)
1735 irq = bp->msix_table[0].vector;
1736 else
1737 irq = bp->pdev->irq;
1738
1739 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1740}
1741
c957d09f 1742static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1743{
1744 int rc = 0;
30a5de77
DK
1745 if (bp->flags & USING_MSIX_FLAG &&
1746 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1747 rc = bnx2x_req_msix_irqs(bp);
1748 if (rc)
1749 return rc;
1750 } else {
619c5cb6
VZ
1751 rc = bnx2x_req_irq(bp);
1752 if (rc) {
1753 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1754 return rc;
1755 }
1756 if (bp->flags & USING_MSI_FLAG) {
1757 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1758 netdev_info(bp->dev, "using MSI IRQ %d\n",
1759 bp->dev->irq);
1760 }
1761 if (bp->flags & USING_MSIX_FLAG) {
1762 bp->dev->irq = bp->msix_table[0].vector;
1763 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1764 bp->dev->irq);
619c5cb6
VZ
1765 }
1766 }
1767
1768 return 0;
1769}
1770
55c11941
MS
1771static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1772{
1773 int i;
1774
8f20aa57
DK
1775 for_each_rx_queue_cnic(bp, i) {
1776 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1777 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1778 }
55c11941
MS
1779}
1780
1191cb83 1781static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1782{
1783 int i;
1784
8f20aa57
DK
1785 for_each_eth_queue(bp, i) {
1786 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1787 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1788 }
9f6c9258
DK
1789}
1790
55c11941
MS
1791static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1792{
1793 int i;
1794
8f20aa57
DK
1795 local_bh_disable();
1796 for_each_rx_queue_cnic(bp, i) {
55c11941 1797 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1798 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1799 mdelay(1);
1800 }
1801 local_bh_enable();
55c11941
MS
1802}
1803
1191cb83 1804static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1805{
1806 int i;
1807
8f20aa57
DK
1808 local_bh_disable();
1809 for_each_eth_queue(bp, i) {
9f6c9258 1810 napi_disable(&bnx2x_fp(bp, i, napi));
8f20aa57
DK
1811 while (!bnx2x_fp_lock_napi(&bp->fp[i]))
1812 mdelay(1);
1813 }
1814 local_bh_enable();
9f6c9258
DK
1815}
1816
1817void bnx2x_netif_start(struct bnx2x *bp)
1818{
4b7ed897
DK
1819 if (netif_running(bp->dev)) {
1820 bnx2x_napi_enable(bp);
55c11941
MS
1821 if (CNIC_LOADED(bp))
1822 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1823 bnx2x_int_enable(bp);
1824 if (bp->state == BNX2X_STATE_OPEN)
1825 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1826 }
1827}
1828
1829void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1830{
1831 bnx2x_int_disable_sync(bp, disable_hw);
1832 bnx2x_napi_disable(bp);
55c11941
MS
1833 if (CNIC_LOADED(bp))
1834 bnx2x_napi_disable_cnic(bp);
9f6c9258 1835}
9f6c9258 1836
8307fa3e
VZ
1837u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1838{
8307fa3e 1839 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1840
55c11941 1841 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1842 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1843 u16 ether_type = ntohs(hdr->h_proto);
1844
1845 /* Skip VLAN tag if present */
1846 if (ether_type == ETH_P_8021Q) {
1847 struct vlan_ethhdr *vhdr =
1848 (struct vlan_ethhdr *)skb->data;
1849
1850 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1851 }
1852
1853 /* If ethertype is FCoE or FIP - use FCoE ring */
1854 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1855 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1856 }
55c11941 1857
cdb9d6ae 1858 /* select a non-FCoE queue */
ada7c19e 1859 return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1860}
1861
d6214d7a
DK
1862void bnx2x_set_num_queues(struct bnx2x *bp)
1863{
96305234 1864 /* RSS queues */
55c11941 1865 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1866
a3348722
BW
1867 /* override in STORAGE SD modes */
1868 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1869 bp->num_ethernet_queues = 1;
1870
ec6ba945 1871 /* Add special queues */
55c11941
MS
1872 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1873 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1874
1875 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1876}
1877
cdb9d6ae
VZ
1878/**
1879 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1880 *
1881 * @bp: Driver handle
1882 *
1883 * We currently support for at most 16 Tx queues for each CoS thus we will
1884 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1885 * bp->max_cos.
1886 *
1887 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1888 * index after all ETH L2 indices.
1889 *
1890 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1891 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1892 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1893 *
1894 * The proper configuration of skb->queue_mapping is handled by
1895 * bnx2x_select_queue() and __skb_tx_hash().
1896 *
1897 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1898 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1899 */
55c11941 1900static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1901{
6383c0b3 1902 int rc, tx, rx;
ec6ba945 1903
65565884 1904 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1905 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1906
6383c0b3 1907/* account for fcoe queue */
55c11941
MS
1908 if (include_cnic && !NO_FCOE(bp)) {
1909 rx++;
1910 tx++;
6383c0b3 1911 }
6383c0b3
AE
1912
1913 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1914 if (rc) {
1915 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1916 return rc;
1917 }
1918 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1919 if (rc) {
1920 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1921 return rc;
1922 }
1923
51c1a580 1924 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1925 tx, rx);
1926
ec6ba945
VZ
1927 return rc;
1928}
1929
1191cb83 1930static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1931{
1932 int i;
1933
1934 for_each_queue(bp, i) {
1935 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1936 u32 mtu;
a8c94b91
VZ
1937
1938 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1939 if (IS_FCOE_IDX(i))
1940 /*
1941 * Although there are no IP frames expected to arrive to
1942 * this ring we still want to add an
1943 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1944 * overrun attack.
1945 */
e52fcb24 1946 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1947 else
e52fcb24
ED
1948 mtu = bp->dev->mtu;
1949 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1950 IP_HEADER_ALIGNMENT_PADDING +
1951 ETH_OVREHEAD +
1952 mtu +
1953 BNX2X_FW_RX_ALIGN_END;
16a5fd92 1954 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
1955 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1956 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1957 else
1958 fp->rx_frag_size = 0;
a8c94b91
VZ
1959 }
1960}
1961
60cad4e6 1962static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
1963{
1964 int i;
619c5cb6
VZ
1965 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1966
16a5fd92 1967 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
1968 * enabled
1969 */
5d317c6a
MS
1970 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1971 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1972 bp->fp->cl_id +
1973 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1974
1975 /*
1976 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1977 * per-port, so if explicit configuration is needed , do it only
1978 * for a PMF.
1979 *
1980 * For 57712 and newer on the other hand it's a per-function
1981 * configuration.
1982 */
5d317c6a 1983 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1984}
1985
60cad4e6
AE
1986int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1987 bool config_hash, bool enable)
619c5cb6 1988{
3b603066 1989 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1990
1991 /* Although RSS is meaningless when there is a single HW queue we
1992 * still need it enabled in order to have HW Rx hash generated.
1993 *
1994 * if (!is_eth_multi(bp))
1995 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1996 */
1997
96305234 1998 params.rss_obj = rss_obj;
619c5cb6
VZ
1999
2000 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2001
60cad4e6
AE
2002 if (enable) {
2003 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2004
2005 /* RSS configuration */
2006 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2007 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2008 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2009 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2010 if (rss_obj->udp_rss_v4)
2011 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2012 if (rss_obj->udp_rss_v6)
2013 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2014 } else {
2015 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2016 }
619c5cb6 2017
96305234
DK
2018 /* Hash bits */
2019 params.rss_result_mask = MULTI_MASK;
619c5cb6 2020
5d317c6a 2021 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2022
96305234
DK
2023 if (config_hash) {
2024 /* RSS keys */
60cad4e6 2025 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2026 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2027 }
2028
60cad4e6
AE
2029 if (IS_PF(bp))
2030 return bnx2x_config_rss(bp, &params);
2031 else
2032 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2033}
2034
1191cb83 2035static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2036{
3b603066 2037 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2038
2039 /* Prepare parameters for function state transitions */
2040 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2041
2042 func_params.f_obj = &bp->func_obj;
2043 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2044
2045 func_params.params.hw_init.load_phase = load_code;
2046
2047 return bnx2x_func_state_change(bp, &func_params);
2048}
2049
2050/*
2051 * Cleans the object that have internal lists without sending
16a5fd92 2052 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2053 */
7fa6f340 2054void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2055{
2056 int rc;
2057 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2058 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2059 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2060
2061 /***************** Cleanup MACs' object first *************************/
2062
2063 /* Wait for completion of requested */
2064 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2065 /* Perform a dry cleanup */
2066 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2067
2068 /* Clean ETH primary MAC */
2069 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2070 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2071 &ramrod_flags);
2072 if (rc != 0)
2073 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2074
2075 /* Cleanup UC list */
2076 vlan_mac_flags = 0;
2077 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2078 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2079 &ramrod_flags);
2080 if (rc != 0)
2081 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2082
2083 /***************** Now clean mcast object *****************************/
2084 rparam.mcast_obj = &bp->mcast_obj;
2085 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2086
8b09be5f
YM
2087 /* Add a DEL command... - Since we're doing a driver cleanup only,
2088 * we take a lock surrounding both the initial send and the CONTs,
2089 * as we don't want a true completion to disrupt us in the middle.
2090 */
2091 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2092 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2093 if (rc < 0)
51c1a580
MS
2094 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2095 rc);
619c5cb6
VZ
2096
2097 /* ...and wait until all pending commands are cleared */
2098 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2099 while (rc != 0) {
2100 if (rc < 0) {
2101 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2102 rc);
8b09be5f 2103 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2104 return;
2105 }
2106
2107 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2108 }
8b09be5f 2109 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2110}
2111
2112#ifndef BNX2X_STOP_ON_ERROR
2113#define LOAD_ERROR_EXIT(bp, label) \
2114 do { \
2115 (bp)->state = BNX2X_STATE_ERROR; \
2116 goto label; \
2117 } while (0)
55c11941
MS
2118
2119#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2120 do { \
2121 bp->cnic_loaded = false; \
2122 goto label; \
2123 } while (0)
2124#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2125#define LOAD_ERROR_EXIT(bp, label) \
2126 do { \
2127 (bp)->state = BNX2X_STATE_ERROR; \
2128 (bp)->panic = 1; \
2129 return -EBUSY; \
2130 } while (0)
55c11941
MS
2131#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2132 do { \
2133 bp->cnic_loaded = false; \
2134 (bp)->panic = 1; \
2135 return -EBUSY; \
2136 } while (0)
2137#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2138
ad5afc89
AE
2139static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2140{
2141 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2142 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2143 return;
2144}
2145
2146static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2147{
8db573ba 2148 int num_groups, vf_headroom = 0;
ad5afc89 2149 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2150
ad5afc89
AE
2151 /* number of queues for statistics is number of eth queues + FCoE */
2152 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2153
ad5afc89
AE
2154 /* Total number of FW statistics requests =
2155 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2156 * and fcoe l2 queue) stats + num of queues (which includes another 1
2157 * for fcoe l2 queue if applicable)
2158 */
2159 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2160
8db573ba
AE
2161 /* vf stats appear in the request list, but their data is allocated by
2162 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2163 * it is used to determine where to place the vf stats queries in the
2164 * request struct
2165 */
2166 if (IS_SRIOV(bp))
6411280a 2167 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2168
ad5afc89
AE
2169 /* Request is built from stats_query_header and an array of
2170 * stats_query_cmd_group each of which contains
2171 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2172 * configured in the stats_query_header.
2173 */
2174 num_groups =
8db573ba
AE
2175 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2176 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2177 1 : 0));
2178
8db573ba
AE
2179 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2180 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2181 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2182 num_groups * sizeof(struct stats_query_cmd_group);
2183
2184 /* Data for statistics requests + stats_counter
2185 * stats_counter holds per-STORM counters that are incremented
2186 * when STORM has finished with the current request.
2187 * memory for FCoE offloaded statistics are counted anyway,
2188 * even if they will not be sent.
2189 * VF stats are not accounted for here as the data of VF stats is stored
2190 * in memory allocated by the VF, not here.
2191 */
2192 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2193 sizeof(struct per_pf_stats) +
2194 sizeof(struct fcoe_statistics_params) +
2195 sizeof(struct per_queue_stats) * num_queue_stats +
2196 sizeof(struct stats_counter);
2197
2198 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2199 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2200
2201 /* Set shortcuts */
2202 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2203 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2204 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2205 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2206 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2207 bp->fw_stats_req_sz;
2208
6bf07b8e 2209 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2210 U64_HI(bp->fw_stats_req_mapping),
2211 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2212 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2213 U64_HI(bp->fw_stats_data_mapping),
2214 U64_LO(bp->fw_stats_data_mapping));
2215 return 0;
2216
2217alloc_mem_err:
2218 bnx2x_free_fw_stats_mem(bp);
2219 BNX2X_ERR("Can't allocate FW stats memory\n");
2220 return -ENOMEM;
2221}
2222
2223/* send load request to mcp and analyze response */
2224static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2225{
178135c1
DK
2226 u32 param;
2227
ad5afc89
AE
2228 /* init fw_seq */
2229 bp->fw_seq =
2230 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2231 DRV_MSG_SEQ_NUMBER_MASK);
2232 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2233
2234 /* Get current FW pulse sequence */
2235 bp->fw_drv_pulse_wr_seq =
2236 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2237 DRV_PULSE_SEQ_MASK);
2238 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2239
178135c1
DK
2240 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2241
2242 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2243 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2244
ad5afc89 2245 /* load request */
178135c1 2246 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2247
2248 /* if mcp fails to respond we must abort */
2249 if (!(*load_code)) {
2250 BNX2X_ERR("MCP response failure, aborting\n");
2251 return -EBUSY;
2252 }
2253
2254 /* If mcp refused (e.g. other port is in diagnostic mode) we
2255 * must abort
2256 */
2257 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2258 BNX2X_ERR("MCP refused load request, aborting\n");
2259 return -EBUSY;
2260 }
2261 return 0;
2262}
2263
2264/* check whether another PF has already loaded FW to chip. In
2265 * virtualized environments a pf from another VM may have already
2266 * initialized the device including loading FW
2267 */
2268int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2269{
2270 /* is another pf loaded on this engine? */
2271 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2272 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2273 /* build my FW version dword */
2274 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2275 (BCM_5710_FW_MINOR_VERSION << 8) +
2276 (BCM_5710_FW_REVISION_VERSION << 16) +
2277 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2278
2279 /* read loaded FW from chip */
2280 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2281
2282 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2283 loaded_fw, my_fw);
2284
2285 /* abort nic load if version mismatch */
2286 if (my_fw != loaded_fw) {
6bf07b8e 2287 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
452427b0 2288 loaded_fw, my_fw);
ad5afc89
AE
2289 return -EBUSY;
2290 }
2291 }
2292 return 0;
2293}
2294
2295/* returns the "mcp load_code" according to global load_count array */
2296static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2297{
2298 int path = BP_PATH(bp);
2299
2300 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2301 path, load_count[path][0], load_count[path][1],
2302 load_count[path][2]);
2303 load_count[path][0]++;
2304 load_count[path][1 + port]++;
2305 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2306 path, load_count[path][0], load_count[path][1],
2307 load_count[path][2]);
2308 if (load_count[path][0] == 1)
2309 return FW_MSG_CODE_DRV_LOAD_COMMON;
2310 else if (load_count[path][1 + port] == 1)
2311 return FW_MSG_CODE_DRV_LOAD_PORT;
2312 else
2313 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2314}
2315
2316/* mark PMF if applicable */
2317static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2318{
2319 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2320 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2321 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2322 bp->port.pmf = 1;
2323 /* We need the barrier to ensure the ordering between the
2324 * writing to bp->port.pmf here and reading it from the
2325 * bnx2x_periodic_task().
2326 */
2327 smp_mb();
2328 } else {
2329 bp->port.pmf = 0;
452427b0
YM
2330 }
2331
ad5afc89
AE
2332 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2333}
2334
2335static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2336{
2337 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2338 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2339 (bp->common.shmem2_base)) {
2340 if (SHMEM2_HAS(bp, dcc_support))
2341 SHMEM2_WR(bp, dcc_support,
2342 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2343 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2344 if (SHMEM2_HAS(bp, afex_driver_support))
2345 SHMEM2_WR(bp, afex_driver_support,
2346 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2347 }
2348
2349 /* Set AFEX default VLAN tag to an invalid value */
2350 bp->afex_def_vlan_tag = -1;
452427b0
YM
2351}
2352
1191cb83
ED
2353/**
2354 * bnx2x_bz_fp - zero content of the fastpath structure.
2355 *
2356 * @bp: driver handle
2357 * @index: fastpath index to be zeroed
2358 *
2359 * Makes sure the contents of the bp->fp[index].napi is kept
2360 * intact.
2361 */
2362static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2363{
2364 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2365 int cos;
1191cb83 2366 struct napi_struct orig_napi = fp->napi;
15192a8c 2367 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2368
1191cb83 2369 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2370 if (fp->tpa_info)
2371 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2372 sizeof(struct bnx2x_agg_info));
2373 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2374
2375 /* Restore the NAPI object as it has been already initialized */
2376 fp->napi = orig_napi;
15192a8c 2377 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2378 fp->bp = bp;
2379 fp->index = index;
2380 if (IS_ETH_FP(fp))
2381 fp->max_cos = bp->max_cos;
2382 else
2383 /* Special queues support only one CoS */
2384 fp->max_cos = 1;
2385
65565884 2386 /* Init txdata pointers */
65565884
MS
2387 if (IS_FCOE_FP(fp))
2388 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2389 if (IS_ETH_FP(fp))
2390 for_each_cos_in_tx_queue(fp, cos)
2391 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2392 BNX2X_NUM_ETH_QUEUES(bp) + index];
2393
16a5fd92 2394 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2395 * minimal size so it must be set prior to queue memory allocation
2396 */
2397 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2398 (bp->flags & GRO_ENABLE_FLAG &&
2399 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2400 if (bp->flags & TPA_ENABLE_FLAG)
2401 fp->mode = TPA_MODE_LRO;
2402 else if (bp->flags & GRO_ENABLE_FLAG)
2403 fp->mode = TPA_MODE_GRO;
2404
1191cb83
ED
2405 /* We don't want TPA on an FCoE L2 ring */
2406 if (IS_FCOE_FP(fp))
2407 fp->disable_tpa = 1;
55c11941
MS
2408}
2409
2410int bnx2x_load_cnic(struct bnx2x *bp)
2411{
2412 int i, rc, port = BP_PORT(bp);
2413
2414 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2415
2416 mutex_init(&bp->cnic_mutex);
2417
ad5afc89
AE
2418 if (IS_PF(bp)) {
2419 rc = bnx2x_alloc_mem_cnic(bp);
2420 if (rc) {
2421 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2422 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2423 }
55c11941
MS
2424 }
2425
2426 rc = bnx2x_alloc_fp_mem_cnic(bp);
2427 if (rc) {
2428 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2429 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2430 }
2431
2432 /* Update the number of queues with the cnic queues */
2433 rc = bnx2x_set_real_num_queues(bp, 1);
2434 if (rc) {
2435 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2436 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2437 }
2438
2439 /* Add all CNIC NAPI objects */
2440 bnx2x_add_all_napi_cnic(bp);
2441 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2442 bnx2x_napi_enable_cnic(bp);
2443
2444 rc = bnx2x_init_hw_func_cnic(bp);
2445 if (rc)
2446 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2447
2448 bnx2x_nic_init_cnic(bp);
2449
ad5afc89
AE
2450 if (IS_PF(bp)) {
2451 /* Enable Timer scan */
2452 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2453
2454 /* setup cnic queues */
2455 for_each_cnic_queue(bp, i) {
2456 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2457 if (rc) {
2458 BNX2X_ERR("Queue setup failed\n");
2459 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2460 }
55c11941
MS
2461 }
2462 }
2463
2464 /* Initialize Rx filter. */
8b09be5f 2465 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2466
2467 /* re-read iscsi info */
2468 bnx2x_get_iscsi_info(bp);
2469 bnx2x_setup_cnic_irq_info(bp);
2470 bnx2x_setup_cnic_info(bp);
2471 bp->cnic_loaded = true;
2472 if (bp->state == BNX2X_STATE_OPEN)
2473 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2474
55c11941
MS
2475 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2476
2477 return 0;
2478
2479#ifndef BNX2X_STOP_ON_ERROR
2480load_error_cnic2:
2481 /* Disable Timer scan */
2482 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2483
2484load_error_cnic1:
2485 bnx2x_napi_disable_cnic(bp);
2486 /* Update the number of queues without the cnic queues */
d9d81862 2487 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2488 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2489load_error_cnic0:
2490 BNX2X_ERR("CNIC-related load failed\n");
2491 bnx2x_free_fp_mem_cnic(bp);
2492 bnx2x_free_mem_cnic(bp);
2493 return rc;
2494#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2495}
2496
9f6c9258
DK
2497/* must be called with rtnl_lock */
2498int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2499{
619c5cb6 2500 int port = BP_PORT(bp);
ad5afc89 2501 int i, rc = 0, load_code = 0;
9f6c9258 2502
55c11941
MS
2503 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2504 DP(NETIF_MSG_IFUP,
2505 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2506
9f6c9258 2507#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2508 if (unlikely(bp->panic)) {
2509 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2510 return -EPERM;
51c1a580 2511 }
9f6c9258
DK
2512#endif
2513
2514 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2515
16a5fd92 2516 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2517 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2518 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2519 &bp->last_reported_link.link_report_flags);
2ae17f66 2520
ad5afc89
AE
2521 if (IS_PF(bp))
2522 /* must be called before memory allocation and HW init */
2523 bnx2x_ilt_set_info(bp);
523224a3 2524
6383c0b3
AE
2525 /*
2526 * Zero fastpath structures preserving invariants like napi, which are
2527 * allocated only once, fp index, max_cos, bp pointer.
65565884 2528 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2529 */
51c1a580 2530 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2531 for_each_queue(bp, i)
2532 bnx2x_bz_fp(bp, i);
55c11941
MS
2533 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2534 bp->num_cnic_queues) *
2535 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2536
55c11941 2537 bp->fcoe_init = false;
6383c0b3 2538
a8c94b91
VZ
2539 /* Set the receive queues buffer size */
2540 bnx2x_set_rx_buf_size(bp);
2541
ad5afc89
AE
2542 if (IS_PF(bp)) {
2543 rc = bnx2x_alloc_mem(bp);
2544 if (rc) {
2545 BNX2X_ERR("Unable to allocate bp memory\n");
2546 return rc;
2547 }
2548 }
2549
ad5afc89
AE
2550 /* need to be done after alloc mem, since it's self adjusting to amount
2551 * of memory available for RSS queues
2552 */
2553 rc = bnx2x_alloc_fp_mem(bp);
2554 if (rc) {
2555 BNX2X_ERR("Unable to allocate memory for fps\n");
2556 LOAD_ERROR_EXIT(bp, load_error0);
2557 }
d6214d7a 2558
e3ed4eae
DK
2559 /* Allocated memory for FW statistics */
2560 if (bnx2x_alloc_fw_stats_mem(bp))
2561 LOAD_ERROR_EXIT(bp, load_error0);
2562
8d9ac297
AE
2563 /* request pf to initialize status blocks */
2564 if (IS_VF(bp)) {
2565 rc = bnx2x_vfpf_init(bp);
2566 if (rc)
2567 LOAD_ERROR_EXIT(bp, load_error0);
2568 }
2569
b3b83c3f
DK
2570 /* As long as bnx2x_alloc_mem() may possibly update
2571 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2572 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2573 */
55c11941 2574 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2575 if (rc) {
ec6ba945 2576 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2577 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2578 }
2579
6383c0b3 2580 /* configure multi cos mappings in kernel.
16a5fd92
YM
2581 * this configuration may be overridden by a multi class queue
2582 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2583 */
2584 bnx2x_setup_tc(bp->dev, bp->max_cos);
2585
26614ba5
MS
2586 /* Add all NAPI objects */
2587 bnx2x_add_all_napi(bp);
55c11941 2588 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2589 bnx2x_napi_enable(bp);
2590
ad5afc89
AE
2591 if (IS_PF(bp)) {
2592 /* set pf load just before approaching the MCP */
2593 bnx2x_set_pf_load(bp);
2594
2595 /* if mcp exists send load request and analyze response */
2596 if (!BP_NOMCP(bp)) {
2597 /* attempt to load pf */
2598 rc = bnx2x_nic_load_request(bp, &load_code);
2599 if (rc)
2600 LOAD_ERROR_EXIT(bp, load_error1);
2601
2602 /* what did mcp say? */
2603 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2604 if (rc) {
2605 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2606 LOAD_ERROR_EXIT(bp, load_error2);
2607 }
ad5afc89
AE
2608 } else {
2609 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2610 }
9f6c9258 2611
ad5afc89
AE
2612 /* mark pmf if applicable */
2613 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2614
ad5afc89
AE
2615 /* Init Function state controlling object */
2616 bnx2x__init_func_obj(bp);
6383c0b3 2617
ad5afc89
AE
2618 /* Initialize HW */
2619 rc = bnx2x_init_hw(bp, load_code);
2620 if (rc) {
2621 BNX2X_ERR("HW init failed, aborting\n");
2622 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2623 LOAD_ERROR_EXIT(bp, load_error2);
2624 }
9f6c9258
DK
2625 }
2626
ecf01c22
YM
2627 bnx2x_pre_irq_nic_init(bp);
2628
d6214d7a
DK
2629 /* Connect to IRQs */
2630 rc = bnx2x_setup_irqs(bp);
523224a3 2631 if (rc) {
ad5afc89
AE
2632 BNX2X_ERR("setup irqs failed\n");
2633 if (IS_PF(bp))
2634 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2635 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2636 }
2637
619c5cb6 2638 /* Init per-function objects */
ad5afc89 2639 if (IS_PF(bp)) {
ecf01c22
YM
2640 /* Setup NIC internals and enable interrupts */
2641 bnx2x_post_irq_nic_init(bp, load_code);
2642
ad5afc89 2643 bnx2x_init_bp_objs(bp);
b56e9670 2644 bnx2x_iov_nic_init(bp);
a3348722 2645
ad5afc89
AE
2646 /* Set AFEX default VLAN tag to an invalid value */
2647 bp->afex_def_vlan_tag = -1;
2648 bnx2x_nic_load_afex_dcc(bp, load_code);
2649 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2650 rc = bnx2x_func_start(bp);
2651 if (rc) {
2652 BNX2X_ERR("Function start failed!\n");
2653 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2654
619c5cb6 2655 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2656 }
9f6c9258 2657
ad5afc89
AE
2658 /* Send LOAD_DONE command to MCP */
2659 if (!BP_NOMCP(bp)) {
2660 load_code = bnx2x_fw_command(bp,
2661 DRV_MSG_CODE_LOAD_DONE, 0);
2662 if (!load_code) {
2663 BNX2X_ERR("MCP response failure, aborting\n");
2664 rc = -EBUSY;
2665 LOAD_ERROR_EXIT(bp, load_error3);
2666 }
2667 }
9f6c9258 2668
0c14e5ce
AE
2669 /* initialize FW coalescing state machines in RAM */
2670 bnx2x_update_coalesce(bp);
60cad4e6 2671 }
0c14e5ce 2672
60cad4e6
AE
2673 /* setup the leading queue */
2674 rc = bnx2x_setup_leading(bp);
2675 if (rc) {
2676 BNX2X_ERR("Setup leading failed!\n");
2677 LOAD_ERROR_EXIT(bp, load_error3);
2678 }
ad5afc89 2679
60cad4e6
AE
2680 /* set up the rest of the queues */
2681 for_each_nondefault_eth_queue(bp, i) {
2682 if (IS_PF(bp))
2683 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2684 else /* VF */
2685 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2686 if (rc) {
60cad4e6 2687 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2688 LOAD_ERROR_EXIT(bp, load_error3);
2689 }
60cad4e6 2690 }
8d9ac297 2691
60cad4e6
AE
2692 /* setup rss */
2693 rc = bnx2x_init_rss(bp);
2694 if (rc) {
2695 BNX2X_ERR("PF RSS init failed\n");
2696 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2697 }
619c5cb6 2698
523224a3
DK
2699 /* Now when Clients are configured we are ready to work */
2700 bp->state = BNX2X_STATE_OPEN;
2701
619c5cb6 2702 /* Configure a ucast MAC */
ad5afc89
AE
2703 if (IS_PF(bp))
2704 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2705 else /* vf */
f8f4f61a
DK
2706 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2707 true);
51c1a580
MS
2708 if (rc) {
2709 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2710 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2711 }
6e30dd4e 2712
ad5afc89 2713 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2714 bnx2x_update_max_mf_config(bp, bp->pending_max);
2715 bp->pending_max = 0;
2716 }
2717
ad5afc89
AE
2718 if (bp->port.pmf) {
2719 rc = bnx2x_initial_phy_init(bp, load_mode);
2720 if (rc)
2721 LOAD_ERROR_EXIT(bp, load_error3);
2722 }
c63da990 2723 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2724
619c5cb6
VZ
2725 /* Start fast path */
2726
2727 /* Initialize Rx filter. */
8b09be5f 2728 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2729
619c5cb6 2730 /* Start the Tx */
9f6c9258
DK
2731 switch (load_mode) {
2732 case LOAD_NORMAL:
16a5fd92 2733 /* Tx queue should be only re-enabled */
523224a3 2734 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2735 break;
2736
2737 case LOAD_OPEN:
2738 netif_tx_start_all_queues(bp->dev);
523224a3 2739 smp_mb__after_clear_bit();
9f6c9258
DK
2740 break;
2741
2742 case LOAD_DIAG:
8970b2e4 2743 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2744 bp->state = BNX2X_STATE_DIAG;
2745 break;
2746
2747 default:
2748 break;
2749 }
2750
00253a8c 2751 if (bp->port.pmf)
4c704899 2752 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2753 else
9f6c9258
DK
2754 bnx2x__link_status_update(bp);
2755
2756 /* start the timer */
2757 mod_timer(&bp->timer, jiffies + bp->current_interval);
2758
55c11941
MS
2759 if (CNIC_ENABLED(bp))
2760 bnx2x_load_cnic(bp);
9f6c9258 2761
ad5afc89
AE
2762 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2763 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2764 u32 val;
2765 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2766 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2767 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2768 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2769 }
2770
619c5cb6 2771 /* Wait for all pending SP commands to complete */
ad5afc89 2772 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2773 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2774 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2775 return -EBUSY;
2776 }
6891dd25 2777
9876879f
BW
2778 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2779 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2780 bnx2x_dcbx_init(bp, false);
2781
55c11941
MS
2782 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2783
9f6c9258
DK
2784 return 0;
2785
619c5cb6 2786#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2787load_error3:
ad5afc89
AE
2788 if (IS_PF(bp)) {
2789 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2790
ad5afc89
AE
2791 /* Clean queueable objects */
2792 bnx2x_squeeze_objects(bp);
2793 }
619c5cb6 2794
9f6c9258
DK
2795 /* Free SKBs, SGEs, TPA pool and driver internals */
2796 bnx2x_free_skbs(bp);
ec6ba945 2797 for_each_rx_queue(bp, i)
9f6c9258 2798 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2799
9f6c9258 2800 /* Release IRQs */
d6214d7a
DK
2801 bnx2x_free_irq(bp);
2802load_error2:
ad5afc89 2803 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2804 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2805 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2806 }
2807
2808 bp->port.pmf = 0;
9f6c9258
DK
2809load_error1:
2810 bnx2x_napi_disable(bp);
722c6f58 2811 bnx2x_del_all_napi(bp);
ad5afc89 2812
889b9af3 2813 /* clear pf_load status, as it was already set */
ad5afc89
AE
2814 if (IS_PF(bp))
2815 bnx2x_clear_pf_load(bp);
d6214d7a 2816load_error0:
ad5afc89 2817 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2818 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2819 bnx2x_free_mem(bp);
2820
2821 return rc;
619c5cb6 2822#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2823}
2824
7fa6f340 2825int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2826{
2827 u8 rc = 0, cos, i;
2828
2829 /* Wait until tx fastpath tasks complete */
2830 for_each_tx_queue(bp, i) {
2831 struct bnx2x_fastpath *fp = &bp->fp[i];
2832
2833 for_each_cos_in_tx_queue(fp, cos)
2834 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2835 if (rc)
2836 return rc;
2837 }
2838 return 0;
2839}
2840
9f6c9258 2841/* must be called with rtnl_lock */
5d07d868 2842int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2843{
2844 int i;
c9ee9206
VZ
2845 bool global = false;
2846
55c11941
MS
2847 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2848
9ce392d4 2849 /* mark driver is unloaded in shmem2 */
ad5afc89 2850 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2851 u32 val;
2852 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2853 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2854 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2855 }
2856
80bfe5cc 2857 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2858 (bp->state == BNX2X_STATE_CLOSED ||
2859 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2860 /* We can get here if the driver has been unloaded
2861 * during parity error recovery and is either waiting for a
2862 * leader to complete or for other functions to unload and
2863 * then ifdown has been issued. In this case we want to
2864 * unload and let other functions to complete a recovery
2865 * process.
2866 */
9f6c9258
DK
2867 bp->recovery_state = BNX2X_RECOVERY_DONE;
2868 bp->is_leader = 0;
c9ee9206
VZ
2869 bnx2x_release_leader_lock(bp);
2870 smp_mb();
2871
51c1a580
MS
2872 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2873 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2874 return -EINVAL;
2875 }
2876
80bfe5cc 2877 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2878 * have not completed successfully - all resources are released.
80bfe5cc
YM
2879 *
2880 * we can get here only after unsuccessful ndo_* callback, during which
2881 * dev->IFF_UP flag is still on.
2882 */
2883 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2884 return 0;
2885
2886 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2887 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2888 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2889 */
2890 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2891 smp_mb();
2892
78c3bcc5
AE
2893 /* indicate to VFs that the PF is going down */
2894 bnx2x_iov_channel_down(bp);
2895
55c11941
MS
2896 if (CNIC_LOADED(bp))
2897 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2898
9505ee37
VZ
2899 /* Stop Tx */
2900 bnx2x_tx_disable(bp);
65565884 2901 netdev_reset_tc(bp->dev);
9505ee37 2902
9f6c9258 2903 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2904
9f6c9258 2905 del_timer_sync(&bp->timer);
f85582f8 2906
ad5afc89
AE
2907 if (IS_PF(bp)) {
2908 /* Set ALWAYS_ALIVE bit in shmem */
2909 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2910 bnx2x_drv_pulse(bp);
2911 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2912 bnx2x_save_statistics(bp);
2913 }
9f6c9258 2914
ad5afc89
AE
2915 /* wait till consumers catch up with producers in all queues */
2916 bnx2x_drain_tx_queues(bp);
9f6c9258 2917
9b176b6b
AE
2918 /* if VF indicate to PF this function is going down (PF will delete sp
2919 * elements and clear initializations
2920 */
2921 if (IS_VF(bp))
2922 bnx2x_vfpf_close_vf(bp);
2923 else if (unload_mode != UNLOAD_RECOVERY)
2924 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2925 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2926 else {
c9ee9206
VZ
2927 /* Send the UNLOAD_REQUEST to the MCP */
2928 bnx2x_send_unload_req(bp, unload_mode);
2929
16a5fd92 2930 /* Prevent transactions to host from the functions on the
c9ee9206 2931 * engine that doesn't reset global blocks in case of global
16a5fd92 2932 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2933 * (the engine which leader will perform the recovery
2934 * last).
2935 */
2936 if (!CHIP_IS_E1x(bp))
2937 bnx2x_pf_disable(bp);
2938
2939 /* Disable HW interrupts, NAPI */
523224a3 2940 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2941 /* Delete all NAPI objects */
2942 bnx2x_del_all_napi(bp);
55c11941
MS
2943 if (CNIC_LOADED(bp))
2944 bnx2x_del_all_napi_cnic(bp);
523224a3 2945 /* Release IRQs */
d6214d7a 2946 bnx2x_free_irq(bp);
c9ee9206
VZ
2947
2948 /* Report UNLOAD_DONE to MCP */
5d07d868 2949 bnx2x_send_unload_done(bp, false);
523224a3 2950 }
9f6c9258 2951
619c5cb6 2952 /*
16a5fd92 2953 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
2954 * the queueable objects here in case they failed to get cleaned so far.
2955 */
ad5afc89
AE
2956 if (IS_PF(bp))
2957 bnx2x_squeeze_objects(bp);
619c5cb6 2958
79616895
VZ
2959 /* There should be no more pending SP commands at this stage */
2960 bp->sp_state = 0;
2961
9f6c9258
DK
2962 bp->port.pmf = 0;
2963
a0d307b2
DK
2964 /* clear pending work in rtnl task */
2965 bp->sp_rtnl_state = 0;
2966 smp_mb();
2967
9f6c9258
DK
2968 /* Free SKBs, SGEs, TPA pool and driver internals */
2969 bnx2x_free_skbs(bp);
55c11941
MS
2970 if (CNIC_LOADED(bp))
2971 bnx2x_free_skbs_cnic(bp);
ec6ba945 2972 for_each_rx_queue(bp, i)
9f6c9258 2973 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2974
ad5afc89
AE
2975 bnx2x_free_fp_mem(bp);
2976 if (CNIC_LOADED(bp))
55c11941 2977 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2978
ad5afc89 2979 if (IS_PF(bp)) {
ad5afc89
AE
2980 if (CNIC_LOADED(bp))
2981 bnx2x_free_mem_cnic(bp);
2982 }
b4cddbd6
AE
2983 bnx2x_free_mem(bp);
2984
9f6c9258 2985 bp->state = BNX2X_STATE_CLOSED;
55c11941 2986 bp->cnic_loaded = false;
9f6c9258 2987
c9ee9206
VZ
2988 /* Check if there are pending parity attentions. If there are - set
2989 * RECOVERY_IN_PROGRESS.
2990 */
ad5afc89 2991 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2992 bnx2x_set_reset_in_progress(bp);
2993
2994 /* Set RESET_IS_GLOBAL if needed */
2995 if (global)
2996 bnx2x_set_reset_global(bp);
2997 }
2998
9f6c9258
DK
2999 /* The last driver must disable a "close the gate" if there is no
3000 * parity attention or "process kill" pending.
3001 */
ad5afc89
AE
3002 if (IS_PF(bp) &&
3003 !bnx2x_clear_pf_load(bp) &&
3004 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3005 bnx2x_disable_close_the_gate(bp);
3006
55c11941
MS
3007 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3008
9f6c9258
DK
3009 return 0;
3010}
f85582f8 3011
9f6c9258
DK
3012int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3013{
3014 u16 pmcsr;
3015
adf5f6a1 3016 /* If there is no power capability, silently succeed */
29ed74c3 3017 if (!bp->pdev->pm_cap) {
51c1a580 3018 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3019 return 0;
3020 }
3021
29ed74c3 3022 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3023
3024 switch (state) {
3025 case PCI_D0:
29ed74c3 3026 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3027 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3028 PCI_PM_CTRL_PME_STATUS));
3029
3030 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3031 /* delay required during transition out of D3hot */
3032 msleep(20);
3033 break;
3034
3035 case PCI_D3hot:
3036 /* If there are other clients above don't
3037 shut down the power */
3038 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3039 return 0;
3040 /* Don't shut down the power for emulation and FPGA */
3041 if (CHIP_REV_IS_SLOW(bp))
3042 return 0;
3043
3044 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3045 pmcsr |= 3;
3046
3047 if (bp->wol)
3048 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3049
29ed74c3 3050 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3051 pmcsr);
3052
3053 /* No more memory access after this point until
3054 * device is brought back to D0.
3055 */
3056 break;
3057
3058 default:
51c1a580 3059 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3060 return -EINVAL;
3061 }
3062 return 0;
3063}
3064
9f6c9258
DK
3065/*
3066 * net_device service functions
3067 */
d6214d7a 3068int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3069{
3070 int work_done = 0;
6383c0b3 3071 u8 cos;
9f6c9258
DK
3072 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3073 napi);
3074 struct bnx2x *bp = fp->bp;
3075
3076 while (1) {
3077#ifdef BNX2X_STOP_ON_ERROR
3078 if (unlikely(bp->panic)) {
3079 napi_complete(napi);
3080 return 0;
3081 }
3082#endif
8f20aa57
DK
3083 if (!bnx2x_fp_lock_napi(fp))
3084 return work_done;
9f6c9258 3085
6383c0b3 3086 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3087 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3088 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3089
9f6c9258
DK
3090 if (bnx2x_has_rx_work(fp)) {
3091 work_done += bnx2x_rx_int(fp, budget - work_done);
3092
3093 /* must not complete if we consumed full budget */
8f20aa57
DK
3094 if (work_done >= budget) {
3095 bnx2x_fp_unlock_napi(fp);
9f6c9258 3096 break;
8f20aa57 3097 }
9f6c9258
DK
3098 }
3099
3100 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3101 if (!bnx2x_fp_unlock_napi(fp) &&
3102 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3103
ec6ba945
VZ
3104 /* No need to update SB for FCoE L2 ring as long as
3105 * it's connected to the default SB and the SB
3106 * has been updated when NAPI was scheduled.
3107 */
3108 if (IS_FCOE_FP(fp)) {
3109 napi_complete(napi);
3110 break;
3111 }
9f6c9258 3112 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3113 /* bnx2x_has_rx_work() reads the status block,
3114 * thus we need to ensure that status block indices
3115 * have been actually read (bnx2x_update_fpsb_idx)
3116 * prior to this check (bnx2x_has_rx_work) so that
3117 * we won't write the "newer" value of the status block
3118 * to IGU (if there was a DMA right after
3119 * bnx2x_has_rx_work and if there is no rmb, the memory
3120 * reading (bnx2x_update_fpsb_idx) may be postponed
3121 * to right before bnx2x_ack_sb). In this case there
3122 * will never be another interrupt until there is
3123 * another update of the status block, while there
3124 * is still unhandled work.
3125 */
9f6c9258
DK
3126 rmb();
3127
3128 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3129 napi_complete(napi);
3130 /* Re-enable interrupts */
51c1a580 3131 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3132 "Update index to %d\n", fp->fp_hc_idx);
3133 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3134 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3135 IGU_INT_ENABLE, 1);
3136 break;
3137 }
3138 }
3139 }
3140
3141 return work_done;
3142}
3143
e0d1095a 3144#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3145/* must be called with local_bh_disable()d */
3146int bnx2x_low_latency_recv(struct napi_struct *napi)
3147{
3148 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3149 napi);
3150 struct bnx2x *bp = fp->bp;
3151 int found = 0;
3152
3153 if ((bp->state == BNX2X_STATE_CLOSED) ||
3154 (bp->state == BNX2X_STATE_ERROR) ||
3155 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3156 return LL_FLUSH_FAILED;
3157
3158 if (!bnx2x_fp_lock_poll(fp))
3159 return LL_FLUSH_BUSY;
3160
75b29459 3161 if (bnx2x_has_rx_work(fp))
8f20aa57 3162 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3163
3164 bnx2x_fp_unlock_poll(fp);
3165
3166 return found;
3167}
3168#endif
3169
9f6c9258
DK
3170/* we split the first BD into headers and data BDs
3171 * to ease the pain of our fellow microcode engineers
3172 * we use one mapping for both BDs
9f6c9258 3173 */
91226790
DK
3174static u16 bnx2x_tx_split(struct bnx2x *bp,
3175 struct bnx2x_fp_txdata *txdata,
3176 struct sw_tx_bd *tx_buf,
3177 struct eth_tx_start_bd **tx_bd, u16 hlen,
3178 u16 bd_prod)
9f6c9258
DK
3179{
3180 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3181 struct eth_tx_bd *d_tx_bd;
3182 dma_addr_t mapping;
3183 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3184
3185 /* first fix first BD */
9f6c9258
DK
3186 h_tx_bd->nbytes = cpu_to_le16(hlen);
3187
91226790
DK
3188 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3189 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3190
3191 /* now get a new data BD
3192 * (after the pbd) and fill it */
3193 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3194 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3195
3196 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3197 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3198
3199 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3200 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3201 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3202
3203 /* this marks the BD as one that has no individual mapping */
3204 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3205
3206 DP(NETIF_MSG_TX_QUEUED,
3207 "TSO split data size is %d (%x:%x)\n",
3208 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3209
3210 /* update tx_bd */
3211 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3212
3213 return bd_prod;
3214}
3215
86564c3f
YM
3216#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3217#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3218static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3219{
86564c3f
YM
3220 __sum16 tsum = (__force __sum16) csum;
3221
9f6c9258 3222 if (fix > 0)
86564c3f
YM
3223 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3224 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3225
3226 else if (fix < 0)
86564c3f
YM
3227 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3228 csum_partial(t_header, -fix, 0)));
9f6c9258 3229
e2593fcd 3230 return bswab16(tsum);
9f6c9258
DK
3231}
3232
91226790 3233static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3234{
3235 u32 rc;
a848ade4
DK
3236 __u8 prot = 0;
3237 __be16 protocol;
9f6c9258
DK
3238
3239 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3240 return XMIT_PLAIN;
9f6c9258 3241
a848ade4
DK
3242 protocol = vlan_get_protocol(skb);
3243 if (protocol == htons(ETH_P_IPV6)) {
3244 rc = XMIT_CSUM_V6;
3245 prot = ipv6_hdr(skb)->nexthdr;
3246 } else {
3247 rc = XMIT_CSUM_V4;
3248 prot = ip_hdr(skb)->protocol;
3249 }
9f6c9258 3250
a848ade4
DK
3251 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3252 if (inner_ip_hdr(skb)->version == 6) {
3253 rc |= XMIT_CSUM_ENC_V6;
3254 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3255 rc |= XMIT_CSUM_TCP;
9f6c9258 3256 } else {
a848ade4
DK
3257 rc |= XMIT_CSUM_ENC_V4;
3258 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3259 rc |= XMIT_CSUM_TCP;
3260 }
3261 }
a848ade4
DK
3262 if (prot == IPPROTO_TCP)
3263 rc |= XMIT_CSUM_TCP;
9f6c9258 3264
36a8f39e
ED
3265 if (skb_is_gso(skb)) {
3266 if (skb_is_gso_v6(skb)) {
3267 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3268 if (rc & XMIT_CSUM_ENC)
3269 rc |= XMIT_GSO_ENC_V6;
3270 } else {
3271 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3272 if (rc & XMIT_CSUM_ENC)
3273 rc |= XMIT_GSO_ENC_V4;
3274 }
a848ade4 3275 }
9f6c9258
DK
3276
3277 return rc;
3278}
3279
3280#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3281/* check if packet requires linearization (packet is too fragmented)
3282 no need to check fragmentation if page size > 8K (there will be no
3283 violation to FW restrictions) */
3284static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3285 u32 xmit_type)
3286{
3287 int to_copy = 0;
3288 int hlen = 0;
3289 int first_bd_sz = 0;
3290
3291 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3292 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3293
3294 if (xmit_type & XMIT_GSO) {
3295 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3296 /* Check if LSO packet needs to be copied:
3297 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3298 int wnd_size = MAX_FETCH_BD - 3;
3299 /* Number of windows to check */
3300 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3301 int wnd_idx = 0;
3302 int frag_idx = 0;
3303 u32 wnd_sum = 0;
3304
3305 /* Headers length */
3306 hlen = (int)(skb_transport_header(skb) - skb->data) +
3307 tcp_hdrlen(skb);
3308
3309 /* Amount of data (w/o headers) on linear part of SKB*/
3310 first_bd_sz = skb_headlen(skb) - hlen;
3311
3312 wnd_sum = first_bd_sz;
3313
3314 /* Calculate the first sum - it's special */
3315 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3316 wnd_sum +=
9e903e08 3317 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3318
3319 /* If there was data on linear skb data - check it */
3320 if (first_bd_sz > 0) {
3321 if (unlikely(wnd_sum < lso_mss)) {
3322 to_copy = 1;
3323 goto exit_lbl;
3324 }
3325
3326 wnd_sum -= first_bd_sz;
3327 }
3328
3329 /* Others are easier: run through the frag list and
3330 check all windows */
3331 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3332 wnd_sum +=
9e903e08 3333 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3334
3335 if (unlikely(wnd_sum < lso_mss)) {
3336 to_copy = 1;
3337 break;
3338 }
3339 wnd_sum -=
9e903e08 3340 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3341 }
3342 } else {
3343 /* in non-LSO too fragmented packet should always
3344 be linearized */
3345 to_copy = 1;
3346 }
3347 }
3348
3349exit_lbl:
3350 if (unlikely(to_copy))
3351 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3352 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3353 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3354 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3355
3356 return to_copy;
3357}
3358#endif
3359
91226790
DK
3360static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3361 u32 xmit_type)
f2e0899f 3362{
a848ade4
DK
3363 struct ipv6hdr *ipv6;
3364
2297a2da
VZ
3365 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3366 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3367 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3368
3369 if (xmit_type & XMIT_GSO_ENC_V6)
3370 ipv6 = inner_ipv6_hdr(skb);
3371 else if (xmit_type & XMIT_GSO_V6)
3372 ipv6 = ipv6_hdr(skb);
3373 else
3374 ipv6 = NULL;
3375
3376 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3377 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3378}
3379
3380/**
e8920674 3381 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3382 *
e8920674
DK
3383 * @skb: packet skb
3384 * @pbd: parse BD
3385 * @xmit_type: xmit flags
f2e0899f 3386 */
91226790
DK
3387static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3388 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3389 struct eth_tx_start_bd *tx_start_bd,
91226790 3390 u32 xmit_type)
f2e0899f
DK
3391{
3392 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3393 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3394 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3395
3396 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3397 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3398 pbd->tcp_pseudo_csum =
86564c3f
YM
3399 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3400 ip_hdr(skb)->daddr,
3401 0, IPPROTO_TCP, 0));
f2e0899f 3402
057cf65e
YM
3403 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3404 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3405 } else {
f2e0899f 3406 pbd->tcp_pseudo_csum =
86564c3f
YM
3407 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3408 &ipv6_hdr(skb)->daddr,
3409 0, IPPROTO_TCP, 0));
057cf65e 3410 }
f2e0899f 3411
86564c3f
YM
3412 pbd->global_data |=
3413 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3414}
f85582f8 3415
a848ade4
DK
3416/**
3417 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3418 *
3419 * @bp: driver handle
3420 * @skb: packet skb
3421 * @parsing_data: data to be updated
3422 * @xmit_type: xmit flags
3423 *
3424 * 57712/578xx related, when skb has encapsulation
3425 */
3426static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3427 u32 *parsing_data, u32 xmit_type)
3428{
3429 *parsing_data |=
3430 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3431 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3432 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3433
3434 if (xmit_type & XMIT_CSUM_TCP) {
3435 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3436 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3437 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3438
3439 return skb_inner_transport_header(skb) +
3440 inner_tcp_hdrlen(skb) - skb->data;
3441 }
3442
3443 /* We support checksum offload for TCP and UDP only.
3444 * No need to pass the UDP header length - it's a constant.
3445 */
3446 return skb_inner_transport_header(skb) +
3447 sizeof(struct udphdr) - skb->data;
3448}
3449
f2e0899f 3450/**
e8920674 3451 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3452 *
e8920674
DK
3453 * @bp: driver handle
3454 * @skb: packet skb
3455 * @parsing_data: data to be updated
3456 * @xmit_type: xmit flags
f2e0899f 3457 *
91226790 3458 * 57712/578xx related
f2e0899f 3459 */
91226790
DK
3460static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3461 u32 *parsing_data, u32 xmit_type)
f2e0899f 3462{
e39aece7 3463 *parsing_data |=
2de67439 3464 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3465 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3466 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3467
e39aece7
VZ
3468 if (xmit_type & XMIT_CSUM_TCP) {
3469 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3470 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3471 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3472
e39aece7 3473 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3474 }
3475 /* We support checksum offload for TCP and UDP only.
3476 * No need to pass the UDP header length - it's a constant.
3477 */
3478 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3479}
3480
a848ade4 3481/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3482static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3483 struct eth_tx_start_bd *tx_start_bd,
3484 u32 xmit_type)
93ef5c02 3485{
93ef5c02
DK
3486 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3487
a848ade4 3488 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3489 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3490
3491 if (!(xmit_type & XMIT_CSUM_TCP))
3492 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3493}
3494
f2e0899f 3495/**
e8920674 3496 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3497 *
e8920674
DK
3498 * @bp: driver handle
3499 * @skb: packet skb
3500 * @pbd: parse BD to be updated
3501 * @xmit_type: xmit flags
f2e0899f 3502 */
91226790
DK
3503static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3504 struct eth_tx_parse_bd_e1x *pbd,
3505 u32 xmit_type)
f2e0899f 3506{
e39aece7 3507 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3508
3509 /* for now NS flag is not used in Linux */
3510 pbd->global_data =
86564c3f
YM
3511 cpu_to_le16(hlen |
3512 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3513 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3514
3515 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3516 skb_network_header(skb)) >> 1;
f2e0899f 3517
e39aece7
VZ
3518 hlen += pbd->ip_hlen_w;
3519
3520 /* We support checksum offload for TCP and UDP only */
3521 if (xmit_type & XMIT_CSUM_TCP)
3522 hlen += tcp_hdrlen(skb) / 2;
3523 else
3524 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3525
3526 pbd->total_hlen_w = cpu_to_le16(hlen);
3527 hlen = hlen*2;
3528
3529 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3530 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3531
3532 } else {
3533 s8 fix = SKB_CS_OFF(skb); /* signed! */
3534
3535 DP(NETIF_MSG_TX_QUEUED,
3536 "hlen %d fix %d csum before fix %x\n",
3537 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3538
3539 /* HW bug: fixup the CSUM */
3540 pbd->tcp_pseudo_csum =
3541 bnx2x_csum_fix(skb_transport_header(skb),
3542 SKB_CS(skb), fix);
3543
3544 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3545 pbd->tcp_pseudo_csum);
3546 }
3547
3548 return hlen;
3549}
f85582f8 3550
a848ade4
DK
3551static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3552 struct eth_tx_parse_bd_e2 *pbd_e2,
3553 struct eth_tx_parse_2nd_bd *pbd2,
3554 u16 *global_data,
3555 u32 xmit_type)
3556{
e287a75c 3557 u16 hlen_w = 0;
a848ade4 3558 u8 outerip_off, outerip_len = 0;
e768fb29 3559
e287a75c
DK
3560 /* from outer IP to transport */
3561 hlen_w = (skb_inner_transport_header(skb) -
3562 skb_network_header(skb)) >> 1;
a848ade4
DK
3563
3564 /* transport len */
e768fb29 3565 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3566
e287a75c 3567 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3568
e768fb29
DK
3569 /* outer IP header info */
3570 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3571 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3572 u32 csum = (__force u32)(~iph->check) -
3573 (__force u32)iph->tot_len -
3574 (__force u32)iph->frag_off;
c957d09f 3575
a848ade4 3576 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3577 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3578 } else {
3579 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3580 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3581 }
3582
3583 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3584
3585 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3586
3587 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3588 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3589
3590 pbd_e2->data.tunnel_data.pseudo_csum =
3591 bswab16(~csum_tcpudp_magic(
3592 inner_ip_hdr(skb)->saddr,
3593 inner_ip_hdr(skb)->daddr,
3594 0, IPPROTO_TCP, 0));
3595
3596 outerip_len = ip_hdr(skb)->ihl << 1;
3597 } else {
3598 pbd_e2->data.tunnel_data.pseudo_csum =
3599 bswab16(~csum_ipv6_magic(
3600 &inner_ipv6_hdr(skb)->saddr,
3601 &inner_ipv6_hdr(skb)->daddr,
3602 0, IPPROTO_TCP, 0));
3603 }
3604
3605 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3606
3607 *global_data |=
3608 outerip_off |
3609 (!!(xmit_type & XMIT_CSUM_V6) <<
3610 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3611 (outerip_len <<
3612 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3613 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3614 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3615
3616 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3617 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3618 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3619 }
a848ade4
DK
3620}
3621
9f6c9258
DK
3622/* called with netif_tx_lock
3623 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3624 * netif_wake_queue()
3625 */
3626netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3627{
3628 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3629
9f6c9258 3630 struct netdev_queue *txq;
6383c0b3 3631 struct bnx2x_fp_txdata *txdata;
9f6c9258 3632 struct sw_tx_bd *tx_buf;
619c5cb6 3633 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3634 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3635 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3636 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3637 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3638 u32 pbd_e2_parsing_data = 0;
9f6c9258 3639 u16 pkt_prod, bd_prod;
65565884 3640 int nbd, txq_index;
9f6c9258
DK
3641 dma_addr_t mapping;
3642 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3643 int i;
3644 u8 hlen = 0;
3645 __le16 pkt_size = 0;
3646 struct ethhdr *eth;
3647 u8 mac_type = UNICAST_ADDRESS;
3648
3649#ifdef BNX2X_STOP_ON_ERROR
3650 if (unlikely(bp->panic))
3651 return NETDEV_TX_BUSY;
3652#endif
3653
6383c0b3
AE
3654 txq_index = skb_get_queue_mapping(skb);
3655 txq = netdev_get_tx_queue(dev, txq_index);
3656
55c11941 3657 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3658
65565884 3659 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3660
3661 /* enable this debug print to view the transmission queue being used
51c1a580 3662 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3663 txq_index, fp_index, txdata_index); */
9f6c9258 3664
16a5fd92 3665 /* enable this debug print to view the transmission details
51c1a580
MS
3666 DP(NETIF_MSG_TX_QUEUED,
3667 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3668 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3669
6383c0b3 3670 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3671 skb_shinfo(skb)->nr_frags +
3672 BDS_PER_TX_PKT +
3673 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3674 /* Handle special storage cases separately */
c96bdc0c
DK
3675 if (txdata->tx_ring_size == 0) {
3676 struct bnx2x_eth_q_stats *q_stats =
3677 bnx2x_fp_qstats(bp, txdata->parent_fp);
3678 q_stats->driver_filtered_tx_pkt++;
3679 dev_kfree_skb(skb);
3680 return NETDEV_TX_OK;
3681 }
2de67439
YM
3682 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3683 netif_tx_stop_queue(txq);
c96bdc0c 3684 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3685
9f6c9258
DK
3686 return NETDEV_TX_BUSY;
3687 }
3688
51c1a580 3689 DP(NETIF_MSG_TX_QUEUED,
04c46736 3690 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3691 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3692 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3693 skb->len);
9f6c9258
DK
3694
3695 eth = (struct ethhdr *)skb->data;
3696
3697 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3698 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3699 if (is_broadcast_ether_addr(eth->h_dest))
3700 mac_type = BROADCAST_ADDRESS;
3701 else
3702 mac_type = MULTICAST_ADDRESS;
3703 }
3704
91226790 3705#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3706 /* First, check if we need to linearize the skb (due to FW
3707 restrictions). No need to check fragmentation if page size > 8K
3708 (there will be no violation to FW restrictions) */
3709 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3710 /* Statistics of linearization */
3711 bp->lin_cnt++;
3712 if (skb_linearize(skb) != 0) {
51c1a580
MS
3713 DP(NETIF_MSG_TX_QUEUED,
3714 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3715 dev_kfree_skb_any(skb);
3716 return NETDEV_TX_OK;
3717 }
3718 }
3719#endif
619c5cb6
VZ
3720 /* Map skb linear data for DMA */
3721 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3722 skb_headlen(skb), DMA_TO_DEVICE);
3723 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3724 DP(NETIF_MSG_TX_QUEUED,
3725 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3726 dev_kfree_skb_any(skb);
3727 return NETDEV_TX_OK;
3728 }
9f6c9258
DK
3729 /*
3730 Please read carefully. First we use one BD which we mark as start,
3731 then we have a parsing info BD (used for TSO or xsum),
3732 and only then we have the rest of the TSO BDs.
3733 (don't forget to mark the last one as last,
3734 and to unmap only AFTER you write to the BD ...)
3735 And above all, all pdb sizes are in words - NOT DWORDS!
3736 */
3737
619c5cb6
VZ
3738 /* get current pkt produced now - advance it just before sending packet
3739 * since mapping of pages may fail and cause packet to be dropped
3740 */
6383c0b3
AE
3741 pkt_prod = txdata->tx_pkt_prod;
3742 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3743
619c5cb6
VZ
3744 /* get a tx_buf and first BD
3745 * tx_start_bd may be changed during SPLIT,
3746 * but first_bd will always stay first
3747 */
6383c0b3
AE
3748 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3749 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3750 first_bd = tx_start_bd;
9f6c9258
DK
3751
3752 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3753
91226790
DK
3754 /* header nbd: indirectly zero other flags! */
3755 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3756
3757 /* remember the first BD of the packet */
6383c0b3 3758 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3759 tx_buf->skb = skb;
3760 tx_buf->flags = 0;
3761
3762 DP(NETIF_MSG_TX_QUEUED,
3763 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3764 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3765
eab6d18d 3766 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3767 tx_start_bd->vlan_or_ethertype =
3768 cpu_to_le16(vlan_tx_tag_get(skb));
3769 tx_start_bd->bd_flags.as_bitfield |=
3770 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3771 } else {
3772 /* when transmitting in a vf, start bd must hold the ethertype
3773 * for fw to enforce it
3774 */
91226790 3775 if (IS_VF(bp))
dc1ba591
AE
3776 tx_start_bd->vlan_or_ethertype =
3777 cpu_to_le16(ntohs(eth->h_proto));
91226790 3778 else
dc1ba591
AE
3779 /* used by FW for packet accounting */
3780 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3781 }
9f6c9258 3782
91226790
DK
3783 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3784
9f6c9258
DK
3785 /* turn on parsing and get a BD */
3786 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3787
93ef5c02
DK
3788 if (xmit_type & XMIT_CSUM)
3789 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3790
619c5cb6 3791 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3792 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3793 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3794
3795 if (xmit_type & XMIT_CSUM_ENC) {
3796 u16 global_data = 0;
3797
3798 /* Set PBD in enc checksum offload case */
3799 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3800 &pbd_e2_parsing_data,
3801 xmit_type);
3802
3803 /* turn on 2nd parsing and get a BD */
3804 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3805
3806 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3807
3808 memset(pbd2, 0, sizeof(*pbd2));
3809
3810 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3811 (skb_inner_network_header(skb) -
3812 skb->data) >> 1;
3813
3814 if (xmit_type & XMIT_GSO_ENC)
3815 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3816 &global_data,
3817 xmit_type);
3818
3819 pbd2->global_data = cpu_to_le16(global_data);
3820
3821 /* add addition parse BD indication to start BD */
3822 SET_FLAG(tx_start_bd->general_data,
3823 ETH_TX_START_BD_PARSE_NBDS, 1);
3824 /* set encapsulation flag in start BD */
3825 SET_FLAG(tx_start_bd->general_data,
3826 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3827 nbd++;
3828 } else if (xmit_type & XMIT_CSUM) {
91226790 3829 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3830 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3831 &pbd_e2_parsing_data,
3832 xmit_type);
a848ade4 3833 }
dc1ba591 3834
91226790
DK
3835 /* Add the macs to the parsing BD this is a vf */
3836 if (IS_VF(bp)) {
3837 /* override GRE parameters in BD */
3838 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3839 &pbd_e2->data.mac_addr.src_mid,
3840 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3841 eth->h_source);
91226790
DK
3842
3843 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3844 &pbd_e2->data.mac_addr.dst_mid,
3845 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3846 eth->h_dest);
3847 }
96bed4b9
YM
3848
3849 SET_FLAG(pbd_e2_parsing_data,
3850 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3851 } else {
96bed4b9 3852 u16 global_data = 0;
6383c0b3 3853 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3854 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3855 /* Set PBD in checksum offload case */
3856 if (xmit_type & XMIT_CSUM)
3857 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3858
96bed4b9
YM
3859 SET_FLAG(global_data,
3860 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3861 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3862 }
3863
f85582f8 3864 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3865 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3866 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3867 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3868 pkt_size = tx_start_bd->nbytes;
3869
51c1a580 3870 DP(NETIF_MSG_TX_QUEUED,
91226790 3871 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3872 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3873 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3874 tx_start_bd->bd_flags.as_bitfield,
3875 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3876
3877 if (xmit_type & XMIT_GSO) {
3878
3879 DP(NETIF_MSG_TX_QUEUED,
3880 "TSO packet len %d hlen %d total len %d tso size %d\n",
3881 skb->len, hlen, skb_headlen(skb),
3882 skb_shinfo(skb)->gso_size);
3883
3884 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3885
91226790
DK
3886 if (unlikely(skb_headlen(skb) > hlen)) {
3887 nbd++;
6383c0b3
AE
3888 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3889 &tx_start_bd, hlen,
91226790
DK
3890 bd_prod);
3891 }
619c5cb6 3892 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3893 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3894 xmit_type);
f2e0899f 3895 else
44dbc78e 3896 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3897 }
2297a2da
VZ
3898
3899 /* Set the PBD's parsing_data field if not zero
3900 * (for the chips newer than 57711).
3901 */
3902 if (pbd_e2_parsing_data)
3903 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3904
9f6c9258
DK
3905 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3906
f85582f8 3907 /* Handle fragmented skb */
9f6c9258
DK
3908 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3909 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3910
9e903e08
ED
3911 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3912 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3913 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3914 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3915
51c1a580
MS
3916 DP(NETIF_MSG_TX_QUEUED,
3917 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3918
3919 /* we need unmap all buffers already mapped
3920 * for this SKB;
3921 * first_bd->nbd need to be properly updated
3922 * before call to bnx2x_free_tx_pkt
3923 */
3924 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3925 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3926 TX_BD(txdata->tx_pkt_prod),
3927 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3928 return NETDEV_TX_OK;
3929 }
3930
9f6c9258 3931 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3932 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3933 if (total_pkt_bd == NULL)
6383c0b3 3934 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3935
9f6c9258
DK
3936 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3937 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3938 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3939 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3940 nbd++;
9f6c9258
DK
3941
3942 DP(NETIF_MSG_TX_QUEUED,
3943 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3944 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3945 le16_to_cpu(tx_data_bd->nbytes));
3946 }
3947
3948 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3949
619c5cb6
VZ
3950 /* update with actual num BDs */
3951 first_bd->nbd = cpu_to_le16(nbd);
3952
9f6c9258
DK
3953 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3954
3955 /* now send a tx doorbell, counting the next BD
3956 * if the packet contains or ends with it
3957 */
3958 if (TX_BD_POFF(bd_prod) < nbd)
3959 nbd++;
3960
619c5cb6
VZ
3961 /* total_pkt_bytes should be set on the first data BD if
3962 * it's not an LSO packet and there is more than one
3963 * data BD. In this case pkt_size is limited by an MTU value.
3964 * However we prefer to set it for an LSO packet (while we don't
3965 * have to) in order to save some CPU cycles in a none-LSO
3966 * case, when we much more care about them.
3967 */
9f6c9258
DK
3968 if (total_pkt_bd != NULL)
3969 total_pkt_bd->total_pkt_bytes = pkt_size;
3970
523224a3 3971 if (pbd_e1x)
9f6c9258 3972 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3973 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3974 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3975 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3976 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3977 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3978 if (pbd_e2)
3979 DP(NETIF_MSG_TX_QUEUED,
3980 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
3981 pbd_e2,
3982 pbd_e2->data.mac_addr.dst_hi,
3983 pbd_e2->data.mac_addr.dst_mid,
3984 pbd_e2->data.mac_addr.dst_lo,
3985 pbd_e2->data.mac_addr.src_hi,
3986 pbd_e2->data.mac_addr.src_mid,
3987 pbd_e2->data.mac_addr.src_lo,
f2e0899f 3988 pbd_e2->parsing_data);
9f6c9258
DK
3989 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3990
2df1a70a
TH
3991 netdev_tx_sent_queue(txq, skb->len);
3992
8373c57d
WB
3993 skb_tx_timestamp(skb);
3994
6383c0b3 3995 txdata->tx_pkt_prod++;
9f6c9258
DK
3996 /*
3997 * Make sure that the BD data is updated before updating the producer
3998 * since FW might read the BD right after the producer is updated.
3999 * This is only applicable for weak-ordered memory model archs such
4000 * as IA-64. The following barrier is also mandatory since FW will
4001 * assumes packets must have BDs.
4002 */
4003 wmb();
4004
6383c0b3 4005 txdata->tx_db.data.prod += nbd;
9f6c9258 4006 barrier();
f85582f8 4007
6383c0b3 4008 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4009
4010 mmiowb();
4011
6383c0b3 4012 txdata->tx_bd_prod += nbd;
9f6c9258 4013
7df2dc6b 4014 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4015 netif_tx_stop_queue(txq);
4016
4017 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4018 * ordering of set_bit() in netif_tx_stop_queue() and read of
4019 * fp->bd_tx_cons */
4020 smp_mb();
4021
15192a8c 4022 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4023 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4024 netif_tx_wake_queue(txq);
4025 }
6383c0b3 4026 txdata->tx_pkt++;
9f6c9258
DK
4027
4028 return NETDEV_TX_OK;
4029}
f85582f8 4030
6383c0b3
AE
4031/**
4032 * bnx2x_setup_tc - routine to configure net_device for multi tc
4033 *
4034 * @netdev: net device to configure
4035 * @tc: number of traffic classes to enable
4036 *
4037 * callback connected to the ndo_setup_tc function pointer
4038 */
4039int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4040{
4041 int cos, prio, count, offset;
4042 struct bnx2x *bp = netdev_priv(dev);
4043
4044 /* setup tc must be called under rtnl lock */
4045 ASSERT_RTNL();
4046
16a5fd92 4047 /* no traffic classes requested. Aborting */
6383c0b3
AE
4048 if (!num_tc) {
4049 netdev_reset_tc(dev);
4050 return 0;
4051 }
4052
4053 /* requested to support too many traffic classes */
4054 if (num_tc > bp->max_cos) {
6bf07b8e 4055 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4056 num_tc, bp->max_cos);
6383c0b3
AE
4057 return -EINVAL;
4058 }
4059
4060 /* declare amount of supported traffic classes */
4061 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4062 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4063 return -EINVAL;
4064 }
4065
4066 /* configure priority to traffic class mapping */
4067 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4068 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4069 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4070 "mapping priority %d to tc %d\n",
6383c0b3
AE
4071 prio, bp->prio_to_cos[prio]);
4072 }
4073
16a5fd92 4074 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4075 This can be used for ets or pfc, and save the effort of setting
4076 up a multio class queue disc or negotiating DCBX with a switch
4077 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4078 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4079 for (prio = 1; prio < 16; prio++) {
4080 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4081 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4082 } */
4083
4084 /* configure traffic class to transmission queue mapping */
4085 for (cos = 0; cos < bp->max_cos; cos++) {
4086 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4087 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4088 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4089 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4090 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4091 cos, offset, count);
4092 }
4093
4094 return 0;
4095}
4096
9f6c9258
DK
4097/* called with rtnl_lock */
4098int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4099{
4100 struct sockaddr *addr = p;
4101 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4102 int rc = 0;
9f6c9258 4103
51c1a580
MS
4104 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4105 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4106 return -EINVAL;
51c1a580 4107 }
614c76df 4108
a3348722
BW
4109 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4110 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4111 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4112 return -EINVAL;
51c1a580 4113 }
9f6c9258 4114
619c5cb6
VZ
4115 if (netif_running(dev)) {
4116 rc = bnx2x_set_eth_mac(bp, false);
4117 if (rc)
4118 return rc;
4119 }
4120
9f6c9258 4121 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4122
523224a3 4123 if (netif_running(dev))
619c5cb6 4124 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4125
619c5cb6 4126 return rc;
9f6c9258
DK
4127}
4128
b3b83c3f
DK
4129static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4130{
4131 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4132 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4133 u8 cos;
b3b83c3f
DK
4134
4135 /* Common */
55c11941 4136
b3b83c3f
DK
4137 if (IS_FCOE_IDX(fp_index)) {
4138 memset(sb, 0, sizeof(union host_hc_status_block));
4139 fp->status_blk_mapping = 0;
b3b83c3f 4140 } else {
b3b83c3f 4141 /* status blocks */
619c5cb6 4142 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4143 BNX2X_PCI_FREE(sb->e2_sb,
4144 bnx2x_fp(bp, fp_index,
4145 status_blk_mapping),
4146 sizeof(struct host_hc_status_block_e2));
4147 else
4148 BNX2X_PCI_FREE(sb->e1x_sb,
4149 bnx2x_fp(bp, fp_index,
4150 status_blk_mapping),
4151 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4152 }
55c11941 4153
b3b83c3f
DK
4154 /* Rx */
4155 if (!skip_rx_queue(bp, fp_index)) {
4156 bnx2x_free_rx_bds(fp);
4157
4158 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4159 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4160 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4161 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4162 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4163
4164 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4165 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4166 sizeof(struct eth_fast_path_rx_cqe) *
4167 NUM_RCQ_BD);
4168
4169 /* SGE ring */
4170 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4171 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4172 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4173 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4174 }
4175
4176 /* Tx */
4177 if (!skip_tx_queue(bp, fp_index)) {
4178 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4179 for_each_cos_in_tx_queue(fp, cos) {
65565884 4180 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4181
51c1a580 4182 DP(NETIF_MSG_IFDOWN,
94f05b0f 4183 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4184 fp_index, cos, txdata->cid);
4185
4186 BNX2X_FREE(txdata->tx_buf_ring);
4187 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4188 txdata->tx_desc_mapping,
4189 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4190 }
b3b83c3f
DK
4191 }
4192 /* end of fastpath */
4193}
4194
55c11941
MS
4195void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4196{
4197 int i;
4198 for_each_cnic_queue(bp, i)
4199 bnx2x_free_fp_mem_at(bp, i);
4200}
4201
b3b83c3f
DK
4202void bnx2x_free_fp_mem(struct bnx2x *bp)
4203{
4204 int i;
55c11941 4205 for_each_eth_queue(bp, i)
b3b83c3f
DK
4206 bnx2x_free_fp_mem_at(bp, i);
4207}
4208
1191cb83 4209static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4210{
4211 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4212 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4213 bnx2x_fp(bp, index, sb_index_values) =
4214 (__le16 *)status_blk.e2_sb->sb.index_values;
4215 bnx2x_fp(bp, index, sb_running_index) =
4216 (__le16 *)status_blk.e2_sb->sb.running_index;
4217 } else {
4218 bnx2x_fp(bp, index, sb_index_values) =
4219 (__le16 *)status_blk.e1x_sb->sb.index_values;
4220 bnx2x_fp(bp, index, sb_running_index) =
4221 (__le16 *)status_blk.e1x_sb->sb.running_index;
4222 }
4223}
4224
1191cb83
ED
4225/* Returns the number of actually allocated BDs */
4226static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4227 int rx_ring_size)
4228{
4229 struct bnx2x *bp = fp->bp;
4230 u16 ring_prod, cqe_ring_prod;
4231 int i, failure_cnt = 0;
4232
4233 fp->rx_comp_cons = 0;
4234 cqe_ring_prod = ring_prod = 0;
4235
4236 /* This routine is called only during fo init so
4237 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4238 */
4239 for (i = 0; i < rx_ring_size; i++) {
996dedba 4240 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4241 failure_cnt++;
4242 continue;
4243 }
4244 ring_prod = NEXT_RX_IDX(ring_prod);
4245 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4246 WARN_ON(ring_prod <= (i - failure_cnt));
4247 }
4248
4249 if (failure_cnt)
4250 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4251 i - failure_cnt, fp->index);
4252
4253 fp->rx_bd_prod = ring_prod;
4254 /* Limit the CQE producer by the CQE ring size */
4255 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4256 cqe_ring_prod);
4257 fp->rx_pkt = fp->rx_calls = 0;
4258
15192a8c 4259 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4260
4261 return i - failure_cnt;
4262}
4263
4264static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4265{
4266 int i;
4267
4268 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4269 struct eth_rx_cqe_next_page *nextpg;
4270
4271 nextpg = (struct eth_rx_cqe_next_page *)
4272 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4273 nextpg->addr_hi =
4274 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4275 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4276 nextpg->addr_lo =
4277 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4278 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4279 }
4280}
4281
b3b83c3f
DK
4282static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4283{
4284 union host_hc_status_block *sb;
4285 struct bnx2x_fastpath *fp = &bp->fp[index];
4286 int ring_size = 0;
6383c0b3 4287 u8 cos;
c2188952 4288 int rx_ring_size = 0;
b3b83c3f 4289
a3348722
BW
4290 if (!bp->rx_ring_size &&
4291 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4292 rx_ring_size = MIN_RX_SIZE_NONTPA;
4293 bp->rx_ring_size = rx_ring_size;
55c11941 4294 } else if (!bp->rx_ring_size) {
c2188952
VZ
4295 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4296
065f8b92
YM
4297 if (CHIP_IS_E3(bp)) {
4298 u32 cfg = SHMEM_RD(bp,
4299 dev_info.port_hw_config[BP_PORT(bp)].
4300 default_cfg);
4301
4302 /* Decrease ring size for 1G functions */
4303 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4304 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4305 rx_ring_size /= 10;
4306 }
d760fc37 4307
c2188952
VZ
4308 /* allocate at least number of buffers required by FW */
4309 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4310 MIN_RX_SIZE_TPA, rx_ring_size);
4311
4312 bp->rx_ring_size = rx_ring_size;
614c76df 4313 } else /* if rx_ring_size specified - use it */
c2188952 4314 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4315
04c46736
YM
4316 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4317
b3b83c3f
DK
4318 /* Common */
4319 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4320
b3b83c3f 4321 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4322 /* status blocks */
619c5cb6 4323 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4324 BNX2X_PCI_ALLOC(sb->e2_sb,
4325 &bnx2x_fp(bp, index, status_blk_mapping),
4326 sizeof(struct host_hc_status_block_e2));
4327 else
4328 BNX2X_PCI_ALLOC(sb->e1x_sb,
4329 &bnx2x_fp(bp, index, status_blk_mapping),
4330 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4331 }
8eef2af1
DK
4332
4333 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4334 * set shortcuts for it.
4335 */
4336 if (!IS_FCOE_IDX(index))
4337 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4338
4339 /* Tx */
4340 if (!skip_tx_queue(bp, index)) {
4341 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4342 for_each_cos_in_tx_queue(fp, cos) {
65565884 4343 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4344
51c1a580
MS
4345 DP(NETIF_MSG_IFUP,
4346 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4347 index, cos);
4348
4349 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4350 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4351 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4352 &txdata->tx_desc_mapping,
b3b83c3f 4353 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4354 }
b3b83c3f
DK
4355 }
4356
4357 /* Rx */
4358 if (!skip_rx_queue(bp, index)) {
4359 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4360 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4361 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4362 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4363 &bnx2x_fp(bp, index, rx_desc_mapping),
4364 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4365
75b29459
DK
4366 /* Seed all CQEs by 1s */
4367 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4368 &bnx2x_fp(bp, index, rx_comp_mapping),
4369 sizeof(struct eth_fast_path_rx_cqe) *
4370 NUM_RCQ_BD);
b3b83c3f
DK
4371
4372 /* SGE ring */
4373 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4374 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4375 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4376 &bnx2x_fp(bp, index, rx_sge_mapping),
4377 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4378 /* RX BD ring */
4379 bnx2x_set_next_page_rx_bd(fp);
4380
4381 /* CQ ring */
4382 bnx2x_set_next_page_rx_cq(fp);
4383
4384 /* BDs */
4385 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4386 if (ring_size < rx_ring_size)
4387 goto alloc_mem_err;
4388 }
4389
4390 return 0;
4391
4392/* handles low memory cases */
4393alloc_mem_err:
4394 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4395 index, ring_size);
4396 /* FW will drop all packets if queue is not big enough,
4397 * In these cases we disable the queue
6383c0b3 4398 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4399 */
4400 if (ring_size < (fp->disable_tpa ?
eb722d7a 4401 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4402 /* release memory allocated for this queue */
4403 bnx2x_free_fp_mem_at(bp, index);
4404 return -ENOMEM;
4405 }
4406 return 0;
4407}
4408
55c11941
MS
4409int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4410{
4411 if (!NO_FCOE(bp))
4412 /* FCoE */
4413 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4414 /* we will fail load process instead of mark
4415 * NO_FCOE_FLAG
4416 */
4417 return -ENOMEM;
4418
4419 return 0;
4420}
4421
b3b83c3f
DK
4422int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4423{
4424 int i;
4425
55c11941
MS
4426 /* 1. Allocate FP for leading - fatal if error
4427 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4428 */
4429
4430 /* leading */
4431 if (bnx2x_alloc_fp_mem_at(bp, 0))
4432 return -ENOMEM;
6383c0b3 4433
b3b83c3f
DK
4434 /* RSS */
4435 for_each_nondefault_eth_queue(bp, i)
4436 if (bnx2x_alloc_fp_mem_at(bp, i))
4437 break;
4438
4439 /* handle memory failures */
4440 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4441 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4442
4443 WARN_ON(delta < 0);
4864a16a 4444 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4445 if (CNIC_SUPPORT(bp))
4446 /* move non eth FPs next to last eth FP
4447 * must be done in that order
4448 * FCOE_IDX < FWD_IDX < OOO_IDX
4449 */
b3b83c3f 4450
55c11941
MS
4451 /* move FCoE fp even NO_FCOE_FLAG is on */
4452 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4453 bp->num_ethernet_queues -= delta;
4454 bp->num_queues = bp->num_ethernet_queues +
4455 bp->num_cnic_queues;
b3b83c3f
DK
4456 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4457 bp->num_queues + delta, bp->num_queues);
4458 }
4459
4460 return 0;
4461}
d6214d7a 4462
523224a3
DK
4463void bnx2x_free_mem_bp(struct bnx2x *bp)
4464{
c3146eb6
DK
4465 int i;
4466
4467 for (i = 0; i < bp->fp_array_size; i++)
4468 kfree(bp->fp[i].tpa_info);
523224a3 4469 kfree(bp->fp);
15192a8c
BW
4470 kfree(bp->sp_objs);
4471 kfree(bp->fp_stats);
65565884 4472 kfree(bp->bnx2x_txq);
523224a3
DK
4473 kfree(bp->msix_table);
4474 kfree(bp->ilt);
4475}
4476
0329aba1 4477int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4478{
4479 struct bnx2x_fastpath *fp;
4480 struct msix_entry *tbl;
4481 struct bnx2x_ilt *ilt;
6383c0b3 4482 int msix_table_size = 0;
55c11941 4483 int fp_array_size, txq_array_size;
15192a8c 4484 int i;
6383c0b3
AE
4485
4486 /*
4487 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4488 * path IGU SBs plus default SB (for PF only).
6383c0b3 4489 */
1ab4434c
AE
4490 msix_table_size = bp->igu_sb_cnt;
4491 if (IS_PF(bp))
4492 msix_table_size++;
4493 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4494
6383c0b3 4495 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4496 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4497 bp->fp_array_size = fp_array_size;
4498 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4499
c3146eb6 4500 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4501 if (!fp)
4502 goto alloc_err;
c3146eb6 4503 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4504 fp[i].tpa_info =
4505 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4506 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4507 if (!(fp[i].tpa_info))
4508 goto alloc_err;
4509 }
4510
523224a3
DK
4511 bp->fp = fp;
4512
15192a8c 4513 /* allocate sp objs */
c3146eb6 4514 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4515 GFP_KERNEL);
4516 if (!bp->sp_objs)
4517 goto alloc_err;
4518
4519 /* allocate fp_stats */
c3146eb6 4520 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4521 GFP_KERNEL);
4522 if (!bp->fp_stats)
4523 goto alloc_err;
4524
65565884 4525 /* Allocate memory for the transmission queues array */
55c11941
MS
4526 txq_array_size =
4527 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4528 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4529
4530 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4531 GFP_KERNEL);
65565884
MS
4532 if (!bp->bnx2x_txq)
4533 goto alloc_err;
4534
523224a3 4535 /* msix table */
01e23742 4536 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4537 if (!tbl)
4538 goto alloc_err;
4539 bp->msix_table = tbl;
4540
4541 /* ilt */
4542 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4543 if (!ilt)
4544 goto alloc_err;
4545 bp->ilt = ilt;
4546
4547 return 0;
4548alloc_err:
4549 bnx2x_free_mem_bp(bp);
4550 return -ENOMEM;
523224a3
DK
4551}
4552
a9fccec7 4553int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4554{
4555 struct bnx2x *bp = netdev_priv(dev);
4556
4557 if (unlikely(!netif_running(dev)))
4558 return 0;
4559
5d07d868 4560 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4561 return bnx2x_nic_load(bp, LOAD_NORMAL);
4562}
4563
1ac9e428
YR
4564int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4565{
4566 u32 sel_phy_idx = 0;
4567 if (bp->link_params.num_phys <= 1)
4568 return INT_PHY;
4569
4570 if (bp->link_vars.link_up) {
4571 sel_phy_idx = EXT_PHY1;
4572 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4573 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4574 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4575 sel_phy_idx = EXT_PHY2;
4576 } else {
4577
4578 switch (bnx2x_phy_selection(&bp->link_params)) {
4579 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4580 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4581 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4582 sel_phy_idx = EXT_PHY1;
4583 break;
4584 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4585 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4586 sel_phy_idx = EXT_PHY2;
4587 break;
4588 }
4589 }
4590
4591 return sel_phy_idx;
1ac9e428
YR
4592}
4593int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4594{
4595 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4596 /*
2de67439 4597 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4598 * swapping is enabled). So when swapping is enabled, we need to reverse
4599 * the configuration
4600 */
4601
4602 if (bp->link_params.multi_phy_config &
4603 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4604 if (sel_phy_idx == EXT_PHY1)
4605 sel_phy_idx = EXT_PHY2;
4606 else if (sel_phy_idx == EXT_PHY2)
4607 sel_phy_idx = EXT_PHY1;
4608 }
4609 return LINK_CONFIG_IDX(sel_phy_idx);
4610}
4611
55c11941 4612#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4613int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4614{
4615 struct bnx2x *bp = netdev_priv(dev);
4616 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4617
4618 switch (type) {
4619 case NETDEV_FCOE_WWNN:
4620 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4621 cp->fcoe_wwn_node_name_lo);
4622 break;
4623 case NETDEV_FCOE_WWPN:
4624 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4625 cp->fcoe_wwn_port_name_lo);
4626 break;
4627 default:
51c1a580 4628 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4629 return -EINVAL;
4630 }
4631
4632 return 0;
4633}
4634#endif
4635
9f6c9258
DK
4636/* called with rtnl_lock */
4637int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4638{
4639 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4640
4641 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4642 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4643 return -EAGAIN;
4644 }
4645
4646 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4647 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4648 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4649 return -EINVAL;
51c1a580 4650 }
9f6c9258
DK
4651
4652 /* This does not race with packet allocation
4653 * because the actual alloc size is
4654 * only updated as part of load
4655 */
4656 dev->mtu = new_mtu;
4657
66371c44
MM
4658 return bnx2x_reload_if_running(dev);
4659}
4660
c8f44aff 4661netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4662 netdev_features_t features)
66371c44
MM
4663{
4664 struct bnx2x *bp = netdev_priv(dev);
4665
4666 /* TPA requires Rx CSUM offloading */
621b4d66 4667 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4668 features &= ~NETIF_F_LRO;
621b4d66
DK
4669 features &= ~NETIF_F_GRO;
4670 }
66371c44
MM
4671
4672 return features;
4673}
4674
c8f44aff 4675int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4676{
4677 struct bnx2x *bp = netdev_priv(dev);
4678 u32 flags = bp->flags;
8802f579 4679 u32 changes;
538dd2e3 4680 bool bnx2x_reload = false;
66371c44
MM
4681
4682 if (features & NETIF_F_LRO)
4683 flags |= TPA_ENABLE_FLAG;
4684 else
4685 flags &= ~TPA_ENABLE_FLAG;
4686
621b4d66
DK
4687 if (features & NETIF_F_GRO)
4688 flags |= GRO_ENABLE_FLAG;
4689 else
4690 flags &= ~GRO_ENABLE_FLAG;
4691
538dd2e3
MB
4692 if (features & NETIF_F_LOOPBACK) {
4693 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4694 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4695 bnx2x_reload = true;
4696 }
4697 } else {
4698 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4699 bp->link_params.loopback_mode = LOOPBACK_NONE;
4700 bnx2x_reload = true;
4701 }
4702 }
4703
8802f579
ED
4704 changes = flags ^ bp->flags;
4705
16a5fd92 4706 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4707 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4708 changes &= ~GRO_ENABLE_FLAG;
4709
4710 if (changes)
538dd2e3 4711 bnx2x_reload = true;
8802f579
ED
4712
4713 bp->flags = flags;
66371c44 4714
538dd2e3 4715 if (bnx2x_reload) {
66371c44
MM
4716 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4717 return bnx2x_reload_if_running(dev);
4718 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4719 }
4720
66371c44 4721 return 0;
9f6c9258
DK
4722}
4723
4724void bnx2x_tx_timeout(struct net_device *dev)
4725{
4726 struct bnx2x *bp = netdev_priv(dev);
4727
4728#ifdef BNX2X_STOP_ON_ERROR
4729 if (!bp->panic)
4730 bnx2x_panic();
4731#endif
7be08a72
AE
4732
4733 smp_mb__before_clear_bit();
4734 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4735 smp_mb__after_clear_bit();
4736
9f6c9258 4737 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4738 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4739}
4740
9f6c9258
DK
4741int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4742{
4743 struct net_device *dev = pci_get_drvdata(pdev);
4744 struct bnx2x *bp;
4745
4746 if (!dev) {
4747 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4748 return -ENODEV;
4749 }
4750 bp = netdev_priv(dev);
4751
4752 rtnl_lock();
4753
4754 pci_save_state(pdev);
4755
4756 if (!netif_running(dev)) {
4757 rtnl_unlock();
4758 return 0;
4759 }
4760
4761 netif_device_detach(dev);
4762
5d07d868 4763 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4764
4765 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4766
4767 rtnl_unlock();
4768
4769 return 0;
4770}
4771
4772int bnx2x_resume(struct pci_dev *pdev)
4773{
4774 struct net_device *dev = pci_get_drvdata(pdev);
4775 struct bnx2x *bp;
4776 int rc;
4777
4778 if (!dev) {
4779 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4780 return -ENODEV;
4781 }
4782 bp = netdev_priv(dev);
4783
4784 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4785 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4786 return -EAGAIN;
4787 }
4788
4789 rtnl_lock();
4790
4791 pci_restore_state(pdev);
4792
4793 if (!netif_running(dev)) {
4794 rtnl_unlock();
4795 return 0;
4796 }
4797
4798 bnx2x_set_power_state(bp, PCI_D0);
4799 netif_device_attach(dev);
4800
4801 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4802
4803 rtnl_unlock();
4804
4805 return rc;
4806}
619c5cb6 4807
619c5cb6
VZ
4808void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4809 u32 cid)
4810{
b9871bcf
AE
4811 if (!cxt) {
4812 BNX2X_ERR("bad context pointer %p\n", cxt);
4813 return;
4814 }
4815
619c5cb6
VZ
4816 /* ustorm cxt validation */
4817 cxt->ustorm_ag_context.cdu_usage =
4818 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4819 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4820 /* xcontext validation */
4821 cxt->xstorm_ag_context.cdu_reserved =
4822 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4823 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4824}
4825
1191cb83
ED
4826static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4827 u8 fw_sb_id, u8 sb_index,
4828 u8 ticks)
619c5cb6 4829{
619c5cb6
VZ
4830 u32 addr = BAR_CSTRORM_INTMEM +
4831 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4832 REG_WR8(bp, addr, ticks);
51c1a580
MS
4833 DP(NETIF_MSG_IFUP,
4834 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4835 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4836}
4837
1191cb83
ED
4838static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4839 u16 fw_sb_id, u8 sb_index,
4840 u8 disable)
619c5cb6
VZ
4841{
4842 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4843 u32 addr = BAR_CSTRORM_INTMEM +
4844 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4845 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4846 /* clear and set */
4847 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4848 flags |= enable_flag;
0c14e5ce 4849 REG_WR8(bp, addr, flags);
51c1a580
MS
4850 DP(NETIF_MSG_IFUP,
4851 "port %x fw_sb_id %d sb_index %d disable %d\n",
4852 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4853}
4854
4855void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4856 u8 sb_index, u8 disable, u16 usec)
4857{
4858 int port = BP_PORT(bp);
4859 u8 ticks = usec / BNX2X_BTR;
4860
4861 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4862
4863 disable = disable ? 1 : (usec ? 0 : 1);
4864 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4865}